diff --git a/go.mod b/go.mod
index af656b8284c..ab1d803ba72 100644
--- a/go.mod
+++ b/go.mod
@@ -19,9 +19,9 @@ require (
github.com/onsi/ginkgo/v2 v2.20.2
github.com/onsi/gomega v1.34.1
github.com/pkg/errors v0.9.1
- github.com/rancher/yip v1.9.2
+ github.com/rancher/yip v1.9.3
github.com/sanity-io/litter v1.5.5
- github.com/sirupsen/logrus v1.9.3
+ github.com/sirupsen/logrus v1.9.4-0.20230606125235-dd1b4c2e81af
github.com/spf13/cobra v1.8.1
github.com/spf13/pflag v1.0.5
github.com/spf13/viper v1.19.0
@@ -31,67 +31,52 @@ require (
)
require (
- dario.cat/mergo v1.0.0 // indirect
+ dario.cat/mergo v1.0.1 // indirect
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect
github.com/Masterminds/goutils v1.1.1 // indirect
- github.com/Masterminds/semver/v3 v3.2.1 // indirect
- github.com/Masterminds/sprig/v3 v3.2.3 // indirect
- github.com/ProtonMail/go-crypto v1.1.0-alpha.2 // indirect
- github.com/cloudflare/circl v1.3.8 // indirect
+ github.com/Masterminds/semver/v3 v3.3.0 // indirect
+ github.com/Masterminds/sprig/v3 v3.3.0 // indirect
github.com/containerd/errdefs v0.1.0 // indirect
github.com/containerd/log v0.1.0 // indirect
- github.com/cyphar/filepath-securejoin v0.2.5 // indirect
github.com/denisbrodbeck/machineid v1.0.1 // indirect
- github.com/diskfs/go-diskfs v1.4.0 // indirect
- github.com/elliotwutingfeng/asciiset v0.0.0-20230602022725-51bbb787efab // indirect
- github.com/emirpasic/gods v1.18.1 // indirect
- github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 // indirect
- github.com/go-git/go-billy/v5 v5.5.0 // indirect
- github.com/go-git/go-git/v5 v5.12.0 // indirect
+ github.com/diskfs/go-diskfs v1.4.1 // indirect
+ github.com/djherbis/times v1.6.0 // indirect
+ github.com/elliotwutingfeng/asciiset v0.0.0-20240214025120-24af97c84155 // indirect
github.com/go-task/slim-sprig/v3 v3.0.0 // indirect
- github.com/gofrs/flock v0.8.1 // indirect
+ github.com/gofrs/flock v0.12.1 // indirect
github.com/golang/protobuf v1.5.4 // indirect
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect
github.com/google/uuid v1.6.0 // indirect
- github.com/huandu/xstrings v1.4.0 // indirect
- github.com/imdario/mergo v0.3.13 // indirect
+ github.com/huandu/xstrings v1.5.0 // indirect
github.com/itchyny/gojq v0.12.16 // indirect
github.com/itchyny/timefmt-go v0.1.6 // indirect
- github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 // indirect
- github.com/kendru/darwin/go/depgraph v0.0.0-20221105232959-877d6a81060c // indirect
- github.com/kevinburke/ssh_config v1.2.0 // indirect
+ github.com/kendru/darwin/go/depgraph v0.0.0-20230809052043-4d1c7e9d1767 // indirect
github.com/mauromorales/xpasswd v0.4.0 // indirect
github.com/mitchellh/copystructure v1.2.0 // indirect
github.com/mitchellh/reflectwalk v1.0.2 // indirect
github.com/moby/sys/mountinfo v0.7.1 // indirect
- github.com/mudler/entities v0.8.0 // indirect
+ github.com/mudler/entities v0.8.1 // indirect
github.com/packethost/packngo v0.31.0 // indirect
github.com/phayes/permbits v0.0.0-20190612203442-39d7c581d2ee // indirect
github.com/pierrec/lz4/v4 v4.1.21 // indirect
- github.com/pjbgf/sha1cd v0.3.0 // indirect
- github.com/pkg/xattr v0.4.9 // indirect
+ github.com/pkg/xattr v0.4.10 // indirect
github.com/rancher-sandbox/linuxkit v1.0.2 // indirect
github.com/sagikazarmark/locafero v0.4.0 // indirect
github.com/sagikazarmark/slog-shim v0.1.0 // indirect
github.com/satori/go.uuid v1.2.0 // indirect
- github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 // indirect
github.com/shopspring/decimal v1.4.0 // indirect
- github.com/skeema/knownhosts v1.2.2 // indirect
github.com/sourcegraph/conc v0.3.0 // indirect
github.com/tredoe/osutil v1.5.0 // indirect
github.com/ulikunitz/xz v0.5.12 // indirect
- github.com/vishvananda/netlink v1.2.1-beta.2 // indirect
+ github.com/vishvananda/netlink v1.3.0 // indirect
github.com/vishvananda/netns v0.0.4 // indirect
github.com/vmware/vmw-guestinfo v0.0.0-20220317130741-510905f0efa3 // indirect
- github.com/xanzy/ssh-agent v0.3.3 // indirect
- github.com/zcalusic/sysinfo v1.0.2 // indirect
+ github.com/zcalusic/sysinfo v1.1.0 // indirect
go.uber.org/atomic v1.9.0 // indirect
go.uber.org/multierr v1.9.0 // indirect
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20240314234333-6e1732d8331c // indirect
google.golang.org/grpc v1.62.1 // indirect
- gopkg.in/djherbis/times.v1 v1.3.0 // indirect
- gopkg.in/warnings.v0 v0.1.2 // indirect
)
require (
@@ -120,7 +105,7 @@ require (
github.com/hashicorp/hcl v1.0.0 // indirect
github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/jaypipes/pcidb v1.0.0 // indirect
- github.com/klauspost/compress v1.17.4 // indirect
+ github.com/klauspost/compress v1.17.9 // indirect
github.com/magiconair/properties v1.8.7 // indirect
github.com/mitchellh/go-homedir v1.1.0 // indirect
github.com/moby/sys/sequential v0.5.0 // indirect
@@ -129,14 +114,14 @@ require (
github.com/pelletier/go-toml/v2 v2.2.2 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect
github.com/spf13/afero v1.11.0 // indirect
- github.com/spf13/cast v1.6.0 // indirect
+ github.com/spf13/cast v1.7.0 // indirect
github.com/subosito/gotenv v1.6.0 // indirect
github.com/twpayne/go-vfs/v4 v4.3.0
github.com/vbatts/tar-split v0.11.3 // indirect
go.opencensus.io v0.24.0 // indirect
golang.org/x/net v0.28.0 // indirect
golang.org/x/sync v0.8.0 // indirect
- golang.org/x/sys v0.24.0 // indirect
+ golang.org/x/sys v0.25.0 // indirect
golang.org/x/text v0.17.0 // indirect
golang.org/x/tools v0.24.0 // indirect
google.golang.org/protobuf v1.34.1 // indirect
diff --git a/go.sum b/go.sum
index 704c8d38f6f..fd4c0db2623 100644
--- a/go.sum
+++ b/go.sum
@@ -1,34 +1,26 @@
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
-dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk=
-dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk=
+dario.cat/mergo v1.0.1 h1:Ra4+bf83h2ztPIQYNP99R6m+Y7KfnARDfID+a+vLl4s=
+dario.cat/mergo v1.0.1/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk=
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8=
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/toml v1.2.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI=
github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU=
-github.com/Masterminds/semver/v3 v3.2.0/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ=
-github.com/Masterminds/semver/v3 v3.2.1 h1:RN9w6+7QoMeJVGyfmbcgs28Br8cvmnucEXnY0rYXWg0=
-github.com/Masterminds/semver/v3 v3.2.1/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ=
-github.com/Masterminds/sprig/v3 v3.2.3 h1:eL2fZNezLomi0uOLqjQoN6BfsDD+fyLtgbJMAj9n6YA=
-github.com/Masterminds/sprig/v3 v3.2.3/go.mod h1:rXcFaZ2zZbLRJv/xSysmlgIM1u11eBaRMhvYXJNkGuM=
-github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY=
+github.com/Masterminds/semver/v3 v3.3.0 h1:B8LGeaivUe71a5qox1ICM/JLl0NqZSW5CHyL+hmvYS0=
+github.com/Masterminds/semver/v3 v3.3.0/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM=
+github.com/Masterminds/sprig/v3 v3.3.0 h1:mQh0Yrg1XPo6vjYXgtf5OtijNAKJRNcTdOOGZe3tPhs=
+github.com/Masterminds/sprig/v3 v3.3.0/go.mod h1:Zy1iXRYNqNLUolqCpL4uhk6SHUMAOSCzdgBfDb35Lz0=
github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY=
github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU=
github.com/Microsoft/hcsshim v0.11.7 h1:vl/nj3Bar/CvJSYo7gIQPyRWc9f3c6IeSNavBTSZNZQ=
github.com/Microsoft/hcsshim v0.11.7/go.mod h1:MV8xMfmECjl5HdO7U/3/hFVnkmSBjAjmA09d4bExKcU=
-github.com/ProtonMail/go-crypto v1.1.0-alpha.2 h1:bkyFVUP+ROOARdgCiJzNQo2V2kiB97LyUpzH9P6Hrlg=
-github.com/ProtonMail/go-crypto v1.1.0-alpha.2/go.mod h1:rA3QumHc/FZ8pAHreoekgiAbzpNsfQAosU5td4SnOrE=
github.com/StackExchange/wmi v1.2.1 h1:VIkavFPXSjcnS+O8yTq7NI32k0R5Aj+v39y29VYDOSA=
github.com/StackExchange/wmi v1.2.1/go.mod h1:rcmrprowKIVzvc+NUiLncP2uuArMWLCbu9SBzvHz7e8=
github.com/alecthomas/assert/v2 v2.3.0 h1:mAsH2wmvjsuvyBvAmCtm7zFsBlb8mIHx5ySLVdDZXL0=
github.com/alecthomas/assert/v2 v2.3.0/go.mod h1:pXcQ2Asjp247dahGEmsZ6ru0UVwnkhktn7S0bBDLxvQ=
github.com/alecthomas/repr v0.2.0 h1:HAzS41CIzNW5syS8Mf9UwXhNH1J9aix/BvDRf1Ml2Yk=
github.com/alecthomas/repr v0.2.0/go.mod h1:Fr0507jx4eOXV7AlPV6AVZLYrLIuIeSOWtW57eE/O/4=
-github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be h1:9AeTilPcZAjCFIImctFaOjnTIavg87rW78vTPkQqLI8=
-github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be/go.mod h1:ySMOLuWl6zY27l47sB3qLNK6tF2fkHG55UZxx8oIVo4=
-github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio=
-github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs=
github.com/bramvdbogaerde/go-scp v1.5.0 h1:a9BinAjTfQh273eh7vd3qUgmBC+bx+3TRDtkZWmIpzM=
github.com/bramvdbogaerde/go-scp v1.5.0/go.mod h1:on2aH5AxaFb2G0N5Vsdy6B0Ml7k9HuHSwfo1y0QzAbQ=
github.com/canonical/go-efilib v1.2.0 h1:+fvJdkj3oVyURFtfk8gSft6pdKyVzzdzNn9GC1kMJw8=
@@ -39,8 +31,6 @@ github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK3
github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
-github.com/cloudflare/circl v1.3.8 h1:j+V8jJt09PoeMFIu2uh5JUyEaIHTXVOHslFoLNAKqwI=
-github.com/cloudflare/circl v1.3.8/go.mod h1:PDRU+oXvdD7KCtgKxW95M5Z8BpSCJXQORiZFnBQS5QU=
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
github.com/containerd/cgroups v1.1.0 h1:v8rEWFl6EoqHB+swVNjVoCJE8o3jX7e8nqBGPLaDFBM=
github.com/containerd/cgroups v1.1.0/go.mod h1:6ppBcbh/NOOUU+dMKrykgaBnK9lCIBxHqJDGwsa1mIw=
@@ -57,8 +47,6 @@ github.com/containerd/stargz-snapshotter/estargz v0.14.3/go.mod h1:KY//uOCIkSuNA
github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/cpuguy83/go-md2man/v2 v2.0.4 h1:wfIWP927BUkWJb2NmU/kNDYIBTh/ziUX91+lVfRxZq4=
github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
-github.com/cyphar/filepath-securejoin v0.2.5 h1:6iR5tXJ/e6tJZzzdMc1km3Sa7RRIVBKAK32O2s7AYfo=
-github.com/cyphar/filepath-securejoin v0.2.5/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4=
github.com/davecgh/go-spew v0.0.0-20161028175848-04cdfd42973b/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
@@ -66,10 +54,12 @@ github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/denisbrodbeck/machineid v1.0.1 h1:geKr9qtkB876mXguW2X6TU4ZynleN6ezuMSRhl4D7AQ=
github.com/denisbrodbeck/machineid v1.0.1/go.mod h1:dJUwb7PTidGDeYyUBmXZ2GphQBbjJCrnectwCyxcUSI=
-github.com/diskfs/go-diskfs v1.4.0 h1:MAybY6TPD+fmhY+a2qFhmdvMeIKvCqlgh4QIc1uCmBs=
-github.com/diskfs/go-diskfs v1.4.0/go.mod h1:G8cyy+ngM+3yKlqjweMmtqvE+TxsnIo1xumbJX1AeLg=
+github.com/diskfs/go-diskfs v1.4.1 h1:iODgkzHLmvXS+1VDztpW53T+dQm8GQzi20y9yUd5UCA=
+github.com/diskfs/go-diskfs v1.4.1/go.mod h1:+tOkQs8CMMog6Nvljg8DGIxEXrgL48iyT3OM3IlSz74=
github.com/distribution/distribution v2.8.1+incompatible h1:8iXUoOqRPx30bhzIEPUmNIqlmBlWdrieW1bqr6LrX30=
github.com/distribution/distribution v2.8.1+incompatible/go.mod h1:EgLm2NgWtdKgzF9NpMzUKgzmR7AMmb0VQi2B+ZzDRjc=
+github.com/djherbis/times v1.6.0 h1:w2ctJ92J8fBvWPxugmXIv7Nz7Q3iDMKNx9v5ocVH20c=
+github.com/djherbis/times v1.6.0/go.mod h1:gOHeRAz2h+VJNZ5Gmc/o7iD9k4wW7NMVqieYCY99oc0=
github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ=
github.com/docker/cli v27.1.1+incompatible h1:goaZxOqs4QKxznZjjBWKONQci/MywhtRv2oNn0GkeZE=
github.com/docker/cli v27.1.1+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
@@ -83,12 +73,8 @@ github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKoh
github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
-github.com/elazarl/goproxy v0.0.0-20230808193330-2592e75ae04a h1:mATvB/9r/3gvcejNsXKSkQ6lcIaNec2nyfOdlTBR2lU=
-github.com/elazarl/goproxy v0.0.0-20230808193330-2592e75ae04a/go.mod h1:Ro8st/ElPeALwNFlcTpWmkr6IoMFfkjXAvTHpevnDsM=
-github.com/elliotwutingfeng/asciiset v0.0.0-20230602022725-51bbb787efab h1:h1UgjJdAAhj+uPL68n7XASS6bU+07ZX1WJvVS2eyoeY=
-github.com/elliotwutingfeng/asciiset v0.0.0-20230602022725-51bbb787efab/go.mod h1:GLo/8fDswSAniFG+BFIaiSPcK610jyzgEhWYPQwuQdw=
-github.com/emirpasic/gods v1.18.1 h1:FXtiHYKDGKCW2KzwZKx0iC0PQmdlorYgdFG9jPXJ1Bc=
-github.com/emirpasic/gods v1.18.1/go.mod h1:8tpGGwCnJ5H4r6BWwaV6OrWmMoPhUl5jm/FMNAnJvWQ=
+github.com/elliotwutingfeng/asciiset v0.0.0-20240214025120-24af97c84155 h1:seguMDM4tY+VtOu8pITTC/8fCGlMdYB01B/k07k/cr0=
+github.com/elliotwutingfeng/asciiset v0.0.0-20240214025120-24af97c84155/go.mod h1:GLo/8fDswSAniFG+BFIaiSPcK610jyzgEhWYPQwuQdw=
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
@@ -99,16 +85,6 @@ github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nos
github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM=
github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
-github.com/gliderlabs/ssh v0.3.7 h1:iV3Bqi942d9huXnzEF2Mt+CY9gLu8DNM4Obd+8bODRE=
-github.com/gliderlabs/ssh v0.3.7/go.mod h1:zpHEXBstFnQYtGnB8k8kQLol82umzn/2/snG7alWVD8=
-github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 h1:+zs/tPmkDkHx3U66DAb0lQFJrpS6731Oaa12ikc+DiI=
-github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376/go.mod h1:an3vInlBmSxCcxctByoQdvwPiA7DTK7jaaFDBTtu0ic=
-github.com/go-git/go-billy/v5 v5.5.0 h1:yEY4yhzCDuMGSv83oGxiBotRzhwhNr8VZyphhiu+mTU=
-github.com/go-git/go-billy/v5 v5.5.0/go.mod h1:hmexnoNsr2SJU1Ju67OaNz5ASJY3+sHgFRpCtpDCKow=
-github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20231010084843-55a94097c399 h1:eMje31YglSBqCdIqdhKBW8lokaMrL3uTkpGYlE2OOT4=
-github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20231010084843-55a94097c399/go.mod h1:1OCfN199q1Jm3HZlxleg+Dw/mwps2Wbk9frAWm+4FII=
-github.com/go-git/go-git/v5 v5.12.0 h1:7Md+ndsjrzZxbddRDZjF14qK+NN56sy6wkqaVrjZtys=
-github.com/go-git/go-git/v5 v5.12.0/go.mod h1:FTM9VKtnI2m65hNI/TenDDDnUf2Q9FHnXYjuz9i5OEY=
github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
github.com/go-ole/go-ole v1.2.5/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
@@ -118,8 +94,8 @@ github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1v
github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=
github.com/go-test/deep v1.0.8 h1:TDsG77qcSprGbC6vTN8OuXp5g+J+b5Pcguhf7Zt61VM=
github.com/go-test/deep v1.0.8/go.mod h1:5C2ZWiW0ErCdrYzpqxLbTX7MG14M9iiw8DgHncVwcsE=
-github.com/gofrs/flock v0.8.1 h1:+gYjHKf32LDeiEEFhQaotPbLuUXjY5ZqxKgXy7n59aw=
-github.com/gofrs/flock v0.8.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU=
+github.com/gofrs/flock v0.12.1 h1:MTLVXXHf8ekldpJk3AKicLij9MdwOWkZ+a/jHHZby9E=
+github.com/gofrs/flock v0.12.1/go.mod h1:9zxTsyu5xtJ9DK+1tFZyibEV7y3uwDxPPfbxeeHCoD0=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
@@ -153,7 +129,6 @@ github.com/google/pprof v0.0.0-20240827171923-fa2c70bbbfe5 h1:5iH8iuqE5apketRbSF
github.com/google/pprof v0.0.0-20240827171923-fa2c70bbbfe5/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144=
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4=
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ=
-github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
@@ -168,12 +143,8 @@ github.com/hexops/gotextdiff v1.0.3 h1:gitA9+qJrrTCsiCl7+kh75nPqQt1cx4ZkudSTLoUq
github.com/hexops/gotextdiff v1.0.3/go.mod h1:pSWU5MAI3yDq+fZBTazCSJysOMbxWL1BSow5/V2vxeg=
github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
-github.com/huandu/xstrings v1.3.3/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE=
-github.com/huandu/xstrings v1.4.0 h1:D17IlohoQq4UcpqD7fDk80P7l+lwAmlFaBHgOipl2FU=
-github.com/huandu/xstrings v1.4.0/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE=
-github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
-github.com/imdario/mergo v0.3.13 h1:lFzP57bqS/wsqKssCGmtLAb8A0wKjLGrve2q3PPVcBk=
-github.com/imdario/mergo v0.3.13/go.mod h1:4lJ1jqUDcsbIECGy0RUJAXNIhg+6ocWgb1ALK2O4oXg=
+github.com/huandu/xstrings v1.5.0 h1:2ag3IFq9ZDANvthTwTiqSSZLjDc+BedvHPAp5tJy2TI=
+github.com/huandu/xstrings v1.5.0/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE=
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
github.com/itchyny/gojq v0.12.16 h1:yLfgLxhIr/6sJNVmYfQjTIv0jGctu6/DgDoivmxTr7g=
@@ -184,38 +155,29 @@ github.com/jaypipes/ghw v0.12.0 h1:xU2/MDJfWmBhJnujHY9qwXQLs3DBsf0/Xa9vECY0Tho=
github.com/jaypipes/ghw v0.12.0/go.mod h1:jeJGbkRB2lL3/gxYzNYzEDETV1ZJ56OKr+CSeSEym+g=
github.com/jaypipes/pcidb v1.0.0 h1:vtZIfkiCUE42oYbJS0TAq9XSfSmcsgo9IdxSm9qzYU8=
github.com/jaypipes/pcidb v1.0.0/go.mod h1:TnYUvqhPBzCKnH34KrIX22kAeEbDCSRJ9cqLRCuNDfk=
-github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A=
-github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo=
github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
github.com/joho/godotenv v1.5.1 h1:7eLL/+HRGLY0ldzfGMeQkb7vMd0as4CfYvUVzLqw0N0=
github.com/joho/godotenv v1.5.1/go.mod h1:f4LDr5Voq0i2e/R5DDNOoa2zzDfwtkZa6DnEwAbqwq4=
-github.com/kendru/darwin/go/depgraph v0.0.0-20221105232959-877d6a81060c h1:eKb4PqwAMhlqwXw0W3atpKaYaPGlXE/Fwh+xpCEYaPk=
-github.com/kendru/darwin/go/depgraph v0.0.0-20221105232959-877d6a81060c/go.mod h1:VOfm8h1NySetVlpHDSnbpCMsvCgYaU+YDn4XezUy2+4=
-github.com/kevinburke/ssh_config v1.2.0 h1:x584FjTGwHzMwvHx18PXxbBVzfnxogHaAReU4gf13a4=
-github.com/kevinburke/ssh_config v1.2.0/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM=
+github.com/kendru/darwin/go/depgraph v0.0.0-20230809052043-4d1c7e9d1767 h1:Ds6xHRvL0yjG4kZD05leRKt70mM18Fjt0+B5gIqqe1g=
+github.com/kendru/darwin/go/depgraph v0.0.0-20230809052043-4d1c7e9d1767/go.mod h1:VOfm8h1NySetVlpHDSnbpCMsvCgYaU+YDn4XezUy2+4=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
-github.com/klauspost/compress v1.17.4 h1:Ej5ixsIri7BrIjBkRZLTo6ghwrEtHFk7ijlczPW4fZ4=
-github.com/klauspost/compress v1.17.4/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM=
-github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
+github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA=
+github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
-github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
-github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY=
github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0=
github.com/mauromorales/xpasswd v0.4.0 h1:Jf6mfA8lwQsYzwgfQADPDGV7l/liAvRrnG+nQTPy0j8=
github.com/mauromorales/xpasswd v0.4.0/go.mod h1:Z3+aY19mhNfcGi3st0+RAVSz2vC+pyoju2S/FPN8kEg=
-github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw=
github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw=
github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s=
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
-github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw=
github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ=
github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw=
github.com/moby/sys/mountinfo v0.7.1 h1:/tTvQaSJRr2FshkhXiIpux6fQ2Zvc4j7tAhMTStAG2g=
@@ -227,8 +189,8 @@ github.com/moby/term v0.0.0-20221205130635-1aeaba878587/go.mod h1:8FzsFHVUBGZdbD
github.com/modocache/gover v0.0.0-20171022184752-b58185e213c5/go.mod h1:caMODM3PzxT8aQXRPkAt8xlV/e7d7w8GM5g0fa5F0D8=
github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A=
github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc=
-github.com/mudler/entities v0.8.0 h1:U+KD0hn1v7MEkiOv60zjLLUj3kEsgj8IMFMN+No3D0Q=
-github.com/mudler/entities v0.8.0/go.mod h1:exnXZF6qVnu4b9dEiH3sLEyxYBTknfkcJ3UCxyc/dwE=
+github.com/mudler/entities v0.8.1 h1:/iZ3VrhZy8bSVr39IqoSwL4jphna2rgSYnJCUZakZ3s=
+github.com/mudler/entities v0.8.1/go.mod h1:exnXZF6qVnu4b9dEiH3sLEyxYBTknfkcJ3UCxyc/dwE=
github.com/onsi/ginkgo v1.12.0 h1:Iw5WCbBcaAAd0fpRb1c9r5YCylv4XDoCSigm1zLevwU=
github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg=
github.com/onsi/ginkgo/v2 v2.20.2 h1:7NVCeyIWROIAheY21RLS+3j2bb52W0W82tkberYytp4=
@@ -247,12 +209,10 @@ github.com/phayes/permbits v0.0.0-20190612203442-39d7c581d2ee h1:P6U24L02WMfj9ym
github.com/phayes/permbits v0.0.0-20190612203442-39d7c581d2ee/go.mod h1:3uODdxMgOaPYeWU7RzZLxVtJHZ/x1f/iHkBZuKJDzuY=
github.com/pierrec/lz4/v4 v4.1.21 h1:yOVMLb6qSIDP67pl/5F7RepeKYu/VmTyEXvuMI5d9mQ=
github.com/pierrec/lz4/v4 v4.1.21/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
-github.com/pjbgf/sha1cd v0.3.0 h1:4D5XXmUUBUl/xQ6IjCkEAbqXskkq/4O7LmGn0AqMDs4=
-github.com/pjbgf/sha1cd v0.3.0/go.mod h1:nZ1rrWOcGJ5uZgEEVL1VUM9iRQiZvWdbZjkKyFzPPsI=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
-github.com/pkg/xattr v0.4.9 h1:5883YPCtkSd8LFbs13nXplj9g9tlrwoJRjgpgMu1/fE=
-github.com/pkg/xattr v0.4.9/go.mod h1:di8WF84zAKk8jzR1UBTEWh9AUlIZZ7M/JNt8e9B6ktU=
+github.com/pkg/xattr v0.4.10 h1:Qe0mtiNFHQZ296vRgUjRCoPHPqH7VdTOrZx3g0T+pGA=
+github.com/pkg/xattr v0.4.10/go.mod h1:di8WF84zAKk8jzR1UBTEWh9AUlIZZ7M/JNt8e9B6ktU=
github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
@@ -260,8 +220,8 @@ github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/rancher-sandbox/linuxkit v1.0.2 h1:mUFPL2Mgl1XZ5H82ABR57t5H2G2Qd+lu3gMYvUGmeZo=
github.com/rancher-sandbox/linuxkit v1.0.2/go.mod h1:n6Fkjc5qoMeWrnLSA5oqUF8ZzFKMrM960CtBwfvH1ZM=
-github.com/rancher/yip v1.9.2 h1:AddaE7/J5eUgeMeW5mlB7JtNCafqVt78aZgg1CE+cfk=
-github.com/rancher/yip v1.9.2/go.mod h1:acOitsP+8zVDhxM36mDePxpT+SXhK66GxW5Vgg3lw2Y=
+github.com/rancher/yip v1.9.3 h1:0+k3kWZPV8bg5wsTEYG1qNyD69/rTDYRMlABSMLI4+U=
+github.com/rancher/yip v1.9.3/go.mod h1:UFoCEbbxsQXZ59G5xbQBlQuJSfyELzDzrB8es7/VWLY=
github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M=
github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA=
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
@@ -274,24 +234,17 @@ github.com/sanity-io/litter v1.5.5 h1:iE+sBxPBzoK6uaEP5Lt3fHNgpKcHXc/A2HGETy0uJQ
github.com/sanity-io/litter v1.5.5/go.mod h1:9gzJgR2i4ZpjZHsKvUXIRQVk7P+yM3e+jAF7bU2UI5U=
github.com/satori/go.uuid v1.2.0 h1:0uYX9dsZ2yD7q2RtLRtPSdGDWzjeM3TbMJP9utgA0ww=
github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
-github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 h1:n661drycOFuPLCN3Uc8sB6B/s6Z4t2xvBgU1htSHuq8=
-github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3/go.mod h1:A0bzQcvG0E7Rwjx0REVgAGH58e96+X0MeOfepqsbeW4=
-github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o=
github.com/shopspring/decimal v1.4.0 h1:bxl37RwXBklmTi0C79JfXCEBD1cqqHt0bbgBAGFp81k=
github.com/shopspring/decimal v1.4.0/go.mod h1:gawqmDU56v4yIKSwfBSFip1HdCCXN8/+DMd9qYNcwME=
-github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
-github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
-github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
-github.com/skeema/knownhosts v1.2.2 h1:Iug2P4fLmDw9f41PB6thxUkNUkJzB5i+1/exaj40L3A=
-github.com/skeema/knownhosts v1.2.2/go.mod h1:xYbVRSPxqBZFrdmDyMmsOs+uX1UZC3nTN3ThzgDxUwo=
+github.com/sirupsen/logrus v1.9.4-0.20230606125235-dd1b4c2e81af h1:Sp5TG9f7K39yfB+If0vjp97vuT74F72r8hfRpP8jLU0=
+github.com/sirupsen/logrus v1.9.4-0.20230606125235-dd1b4c2e81af/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo=
github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0=
github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8=
github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY=
-github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
-github.com/spf13/cast v1.6.0 h1:GEiTHELF+vaR5dhz3VqZfFSzZjYbgeKDpBxQVS4GYJ0=
-github.com/spf13/cast v1.6.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo=
+github.com/spf13/cast v1.7.0 h1:ntdiHjuueXFgm5nzDRdOS4yfT43P5Fnud6DH50rz/7w=
+github.com/spf13/cast v1.7.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo=
github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM=
github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y=
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
@@ -303,10 +256,7 @@ github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSS
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
github.com/stretchr/testify v0.0.0-20161117074351-18a02ba4a312/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
-github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
-github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
-github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
@@ -326,20 +276,16 @@ github.com/ulikunitz/xz v0.5.12/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0o
github.com/urfave/cli v1.22.12/go.mod h1:sSBEIC79qR6OvcmsD4U3KABeOTxDqQtdDnaFuUN30b8=
github.com/vbatts/tar-split v0.11.3 h1:hLFqsOLQ1SsppQNTMpkpPXClLDfC2A3Zgy9OUU+RVck=
github.com/vbatts/tar-split v0.11.3/go.mod h1:9QlHN18E+fEH7RdG+QAJJcuya3rqT7eXSTY7wGrAokY=
-github.com/vishvananda/netlink v1.2.1-beta.2 h1:Llsql0lnQEbHj0I1OuKyp8otXp0r3q0mPkuhwHfStVs=
-github.com/vishvananda/netlink v1.2.1-beta.2/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho=
-github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0=
+github.com/vishvananda/netlink v1.3.0 h1:X7l42GfcV4S6E4vHTsw48qbrV+9PVojNfIhZcwQdrZk=
+github.com/vishvananda/netlink v1.3.0/go.mod h1:i6NetklAujEcC6fK0JPjT8qSwWyO0HLn4UKG+hGqeJs=
github.com/vishvananda/netns v0.0.4 h1:Oeaw1EM2JMxD51g9uhtC0D7erkIjgmj8+JZc26m1YX8=
github.com/vishvananda/netns v0.0.4/go.mod h1:SpkAiCQRtJ6TvvxPnOSyH3BMl6unz3xZlaprSwhNNJM=
github.com/vmware/vmw-guestinfo v0.0.0-20220317130741-510905f0efa3 h1:v6jG/tdl4O07LNVp74Nt7/OyL+1JsIW1M2f/nSvQheY=
github.com/vmware/vmw-guestinfo v0.0.0-20220317130741-510905f0efa3/go.mod h1:CSBTxrhePCm0cmXNKDGeu+6bOQzpaEklfCqEpn89JWk=
-github.com/xanzy/ssh-agent v0.3.3 h1:+/15pJfg/RsTxqYcX6fHqOXZwwMP+2VyYWJeWM2qQFM=
-github.com/xanzy/ssh-agent v0.3.3/go.mod h1:6dzNDKs0J9rVPHPhaGCukekBHKqfl+L3KghI1Bc68Uw=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
-github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
-github.com/zcalusic/sysinfo v1.0.2 h1:nwTTo2a+WQ0NXwo0BGRojOJvJ/5XKvQih+2RrtWqfxc=
-github.com/zcalusic/sysinfo v1.0.2/go.mod h1:kluzTYflRWo6/tXVMJPdEjShsbPpsFRyy+p1mBQPC30=
+github.com/zcalusic/sysinfo v1.1.0 h1:79Hqn8h4poVz6T57/4ezXbT5ZkZbZm7u1YU1C4paMyk=
+github.com/zcalusic/sysinfo v1.1.0/go.mod h1:NX+qYnWGtJVPV0yWldff9uppNKU4h40hJIRPf/pGLv4=
go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE=
@@ -350,9 +296,6 @@ golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACk
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200420201142-3c4aac89819a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
-golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
-golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
-golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4=
golang.org/x/crypto v0.26.0 h1:RrRspgV4mU+YwB4FYnuBoKsUapNIL5cohGAmSH3azsw=
golang.org/x/crypto v0.26.0/go.mod h1:GY7jblb9wI+FOo5y8/S2oY4zWP07AkOJ4+jxCqdqn54=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
@@ -363,7 +306,6 @@ golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvx
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
-golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@@ -373,10 +315,6 @@ golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLL
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
-golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
-golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
-golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
-golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY=
golang.org/x/net v0.28.0 h1:a9JDOJc5GMUJ0+UDqmLT86WiEy7iWyIhz8gz8E4e5hE=
golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
@@ -385,40 +323,27 @@ golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ=
golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200217220822-9197077df867/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220408201424-a24fb2fb8a0f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220906165534-d0df966e6959/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.24.0 h1:Twjiwq9dn6R1fQcyiK+wQyHWfaz/BJB+YIpzU/Cv3Xg=
-golang.org/x/sys v0.24.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
-golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
-golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
-golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc=
+golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34=
+golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.23.0 h1:F6D4vR+EHoL9/sWAWgAR1H2DcHr4PareCbAaCo1RpuU=
golang.org/x/term v0.23.0/go.mod h1:DgV24QBUrK6jhZXl+20l6UWznPlwAHm1Q1mGHtydmSk=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
-golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc=
golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY=
golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk=
@@ -431,7 +356,6 @@ golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBn
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
-golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/tools v0.24.0 h1:J1shsA93PJUEVaUSaay7UXAyE8aimq3GW0pjlolpa24=
golang.org/x/tools v0.24.0/go.mod h1:YhNqVBIfWHdzvTLs0d8LCuMhkKUgSUKldakyV7W/WDQ=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
@@ -464,27 +388,19 @@ google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlba
google.golang.org/protobuf v1.34.1 h1:9ddQBjfCyZPOHPUiPxpYESBLc+T8P3E+Vo4IbKZgFWg=
google.golang.org/protobuf v1.34.1/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
-gopkg.in/djherbis/times.v1 v1.3.0 h1:uxMS4iMtH6Pwsxog094W0FYldiNnfY/xba00vq6C2+o=
-gopkg.in/djherbis/times.v1 v1.3.0/go.mod h1:AQlg6unIsrsCEdQYhTzERy542dz6SFdQFZFv6mUY0P8=
gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA=
gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
-gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME=
-gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI=
gopkg.in/yaml.v1 v1.0.0-20140924161607-9f9df34309c0/go.mod h1:WDnlLJ4WF5VGsH/HVa3CI79GS0ol3YnhVnKP89i0kNg=
-gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
-gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gotest.tools/v3 v3.0.3 h1:4AuOwCGf4lLR9u3YOe2awrHygurzhO/HeQ6laiA6Sx0=
diff --git a/pkg/cloudinit/cloudinit.go b/pkg/cloudinit/cloudinit.go
index 3aa04a66504..0880a9140f7 100644
--- a/pkg/cloudinit/cloudinit.go
+++ b/pkg/cloudinit/cloudinit.go
@@ -53,7 +53,6 @@ func NewYipCloudInitRunner(l types.Logger, r types.Runner, fs vfs.FS) *YipCloudI
// Note, the plugin execution order depends on the order passed here
plugins.DNS,
plugins.Download,
- plugins.Git,
plugins.Entities,
plugins.EnsureDirectories,
plugins.EnsureFiles,
diff --git a/vendor/dario.cat/mergo/.gitignore b/vendor/dario.cat/mergo/.gitignore
index 529c3412ba9..45ad0f1ae30 100644
--- a/vendor/dario.cat/mergo/.gitignore
+++ b/vendor/dario.cat/mergo/.gitignore
@@ -13,6 +13,9 @@
# Output of the go coverage tool, specifically when used with LiteIDE
*.out
+# Golang/Intellij
+.idea
+
# Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736
.glide/
diff --git a/vendor/dario.cat/mergo/README.md b/vendor/dario.cat/mergo/README.md
index 7d0cf9f32af..0b3c488893b 100644
--- a/vendor/dario.cat/mergo/README.md
+++ b/vendor/dario.cat/mergo/README.md
@@ -44,13 +44,21 @@ Also a lovely [comune](http://en.wikipedia.org/wiki/Mergo) (municipality) in the
## Status
-It is ready for production use. [It is used in several projects by Docker, Google, The Linux Foundation, VMWare, Shopify, Microsoft, etc](https://github.com/imdario/mergo#mergo-in-the-wild).
+Mergo is stable and frozen, ready for production. Check a short list of the projects using at large scale it [here](https://github.com/imdario/mergo#mergo-in-the-wild).
+
+No new features are accepted. They will be considered for a future v2 that improves the implementation and fixes bugs for corner cases.
### Important notes
#### 1.0.0
-In [1.0.0](//github.com/imdario/mergo/releases/tag/1.0.0) Mergo moves to a vanity URL `dario.cat/mergo`.
+In [1.0.0](//github.com/imdario/mergo/releases/tag/1.0.0) Mergo moves to a vanity URL `dario.cat/mergo`. No more v1 versions will be released.
+
+If the vanity URL is causing issues in your project due to a dependency pulling Mergo - it isn't a direct dependency in your project - it is recommended to use [replace](https://github.com/golang/go/wiki/Modules#when-should-i-use-the-replace-directive) to pin the version to the last one with the old import URL:
+
+```
+replace github.com/imdario/mergo => github.com/imdario/mergo v0.3.16
+```
#### 0.3.9
@@ -64,55 +72,24 @@ If you were using Mergo before April 6th, 2015, please check your project works
If Mergo is useful to you, consider buying me a coffee, a beer, or making a monthly donation to allow me to keep building great free software. :heart_eyes:
-
### Mergo in the wild
-- [moby/moby](https://github.com/moby/moby)
-- [kubernetes/kubernetes](https://github.com/kubernetes/kubernetes)
-- [vmware/dispatch](https://github.com/vmware/dispatch)
-- [Shopify/themekit](https://github.com/Shopify/themekit)
-- [imdario/zas](https://github.com/imdario/zas)
-- [matcornic/hermes](https://github.com/matcornic/hermes)
-- [OpenBazaar/openbazaar-go](https://github.com/OpenBazaar/openbazaar-go)
-- [kataras/iris](https://github.com/kataras/iris)
-- [michaelsauter/crane](https://github.com/michaelsauter/crane)
-- [go-task/task](https://github.com/go-task/task)
-- [sensu/uchiwa](https://github.com/sensu/uchiwa)
-- [ory/hydra](https://github.com/ory/hydra)
-- [sisatech/vcli](https://github.com/sisatech/vcli)
-- [dairycart/dairycart](https://github.com/dairycart/dairycart)
-- [projectcalico/felix](https://github.com/projectcalico/felix)
-- [resin-os/balena](https://github.com/resin-os/balena)
-- [go-kivik/kivik](https://github.com/go-kivik/kivik)
-- [Telefonica/govice](https://github.com/Telefonica/govice)
-- [supergiant/supergiant](supergiant/supergiant)
-- [SergeyTsalkov/brooce](https://github.com/SergeyTsalkov/brooce)
-- [soniah/dnsmadeeasy](https://github.com/soniah/dnsmadeeasy)
-- [ohsu-comp-bio/funnel](https://github.com/ohsu-comp-bio/funnel)
-- [EagerIO/Stout](https://github.com/EagerIO/Stout)
-- [lynndylanhurley/defsynth-api](https://github.com/lynndylanhurley/defsynth-api)
-- [russross/canvasassignments](https://github.com/russross/canvasassignments)
-- [rdegges/cryptly-api](https://github.com/rdegges/cryptly-api)
-- [casualjim/exeggutor](https://github.com/casualjim/exeggutor)
-- [divshot/gitling](https://github.com/divshot/gitling)
-- [RWJMurphy/gorl](https://github.com/RWJMurphy/gorl)
-- [andrerocker/deploy42](https://github.com/andrerocker/deploy42)
-- [elwinar/rambler](https://github.com/elwinar/rambler)
-- [tmaiaroto/gopartman](https://github.com/tmaiaroto/gopartman)
-- [jfbus/impressionist](https://github.com/jfbus/impressionist)
-- [Jmeyering/zealot](https://github.com/Jmeyering/zealot)
-- [godep-migrator/rigger-host](https://github.com/godep-migrator/rigger-host)
-- [Dronevery/MultiwaySwitch-Go](https://github.com/Dronevery/MultiwaySwitch-Go)
-- [thoas/picfit](https://github.com/thoas/picfit)
-- [mantasmatelis/whooplist-server](https://github.com/mantasmatelis/whooplist-server)
-- [jnuthong/item_search](https://github.com/jnuthong/item_search)
-- [bukalapak/snowboard](https://github.com/bukalapak/snowboard)
-- [containerssh/containerssh](https://github.com/containerssh/containerssh)
-- [goreleaser/goreleaser](https://github.com/goreleaser/goreleaser)
-- [tjpnz/structbot](https://github.com/tjpnz/structbot)
+Mergo is used by [thousands](https://deps.dev/go/dario.cat%2Fmergo/v1.0.0/dependents) [of](https://deps.dev/go/github.com%2Fimdario%2Fmergo/v0.3.16/dependents) [projects](https://deps.dev/go/github.com%2Fimdario%2Fmergo/v0.3.12), including:
+
+* [containerd/containerd](https://github.com/containerd/containerd)
+* [datadog/datadog-agent](https://github.com/datadog/datadog-agent)
+* [docker/cli/](https://github.com/docker/cli/)
+* [goreleaser/goreleaser](https://github.com/goreleaser/goreleaser)
+* [go-micro/go-micro](https://github.com/go-micro/go-micro)
+* [grafana/loki](https://github.com/grafana/loki)
+* [kubernetes/kubernetes](https://github.com/kubernetes/kubernetes)
+* [masterminds/sprig](github.com/Masterminds/sprig)
+* [moby/moby](https://github.com/moby/moby)
+* [slackhq/nebula](https://github.com/slackhq/nebula)
+* [volcano-sh/volcano](https://github.com/volcano-sh/volcano)
## Install
@@ -141,6 +118,39 @@ if err := mergo.Merge(&dst, src, mergo.WithOverride); err != nil {
}
```
+If you need to override pointers, so the source pointer's value is assigned to the destination's pointer, you must use `WithoutDereference`:
+
+```go
+package main
+
+import (
+ "fmt"
+
+ "dario.cat/mergo"
+)
+
+type Foo struct {
+ A *string
+ B int64
+}
+
+func main() {
+ first := "first"
+ second := "second"
+ src := Foo{
+ A: &first,
+ B: 2,
+ }
+
+ dest := Foo{
+ A: &second,
+ B: 1,
+ }
+
+ mergo.Merge(&dest, src, mergo.WithOverride, mergo.WithoutDereference)
+}
+```
+
Additionally, you can map a `map[string]interface{}` to a struct (and otherwise, from struct to map), following the same restrictions as in `Merge()`. Keys are capitalized to find each corresponding exported field.
```go
diff --git a/vendor/dario.cat/mergo/map.go b/vendor/dario.cat/mergo/map.go
index b50d5c2a4e7..759b4f74fd5 100644
--- a/vendor/dario.cat/mergo/map.go
+++ b/vendor/dario.cat/mergo/map.go
@@ -58,7 +58,7 @@ func deepMap(dst, src reflect.Value, visited map[uintptr]*visit, depth int, conf
}
fieldName := field.Name
fieldName = changeInitialCase(fieldName, unicode.ToLower)
- if v, ok := dstMap[fieldName]; !ok || (isEmptyValue(reflect.ValueOf(v), !config.ShouldNotDereference) || overwrite) {
+ if _, ok := dstMap[fieldName]; !ok || (!isEmptyValue(reflect.ValueOf(src.Field(i).Interface()), !config.ShouldNotDereference) && overwrite) || config.overwriteWithEmptyValue {
dstMap[fieldName] = src.Field(i).Interface()
}
}
diff --git a/vendor/dario.cat/mergo/merge.go b/vendor/dario.cat/mergo/merge.go
index 0ef9b2138c1..fd47c95b2b8 100644
--- a/vendor/dario.cat/mergo/merge.go
+++ b/vendor/dario.cat/mergo/merge.go
@@ -269,7 +269,7 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co
if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, config); err != nil {
return
}
- } else {
+ } else if src.Elem().Kind() != reflect.Struct {
if overwriteWithEmptySrc || (overwrite && !src.IsNil()) || dst.IsNil() {
dst.Set(src)
}
diff --git a/vendor/github.com/Masterminds/semver/v3/CHANGELOG.md b/vendor/github.com/Masterminds/semver/v3/CHANGELOG.md
index f12626423a3..f95a504fe70 100644
--- a/vendor/github.com/Masterminds/semver/v3/CHANGELOG.md
+++ b/vendor/github.com/Masterminds/semver/v3/CHANGELOG.md
@@ -1,5 +1,33 @@
# Changelog
+## 3.3.0 (2024-08-27)
+
+### Added
+
+- #238: Add LessThanEqual and GreaterThanEqual functions (thanks @grosser)
+- #213: nil version equality checking (thanks @KnutZuidema)
+
+### Changed
+
+- #241: Simplify StrictNewVersion parsing (thanks @grosser)
+- Testing support up through Go 1.23
+- Minimum version set to 1.21 as this is what's tested now
+- Fuzz testing now supports caching
+
+## 3.2.1 (2023-04-10)
+
+### Changed
+
+- #198: Improved testing around pre-release names
+- #200: Improved code scanning with addition of CodeQL
+- #201: Testing now includes Go 1.20. Go 1.17 has been dropped
+- #202: Migrated Fuzz testing to Go built-in Fuzzing. CI runs daily
+- #203: Docs updated for security details
+
+### Fixed
+
+- #199: Fixed issue with range transformations
+
## 3.2.0 (2022-11-28)
### Added
diff --git a/vendor/github.com/Masterminds/semver/v3/Makefile b/vendor/github.com/Masterminds/semver/v3/Makefile
index 0e7b5c7138e..9ca87a2c79e 100644
--- a/vendor/github.com/Masterminds/semver/v3/Makefile
+++ b/vendor/github.com/Masterminds/semver/v3/Makefile
@@ -19,6 +19,7 @@ test-cover:
.PHONY: fuzz
fuzz:
@echo "==> Running Fuzz Tests"
+ go env GOCACHE
go test -fuzz=FuzzNewVersion -fuzztime=15s .
go test -fuzz=FuzzStrictNewVersion -fuzztime=15s .
go test -fuzz=FuzzNewConstraint -fuzztime=15s .
@@ -27,4 +28,4 @@ $(GOLANGCI_LINT):
# Install golangci-lint. The configuration for it is in the .golangci.yml
# file in the root of the repository
echo ${GOPATH}
- curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $(GOPATH)/bin v1.17.1
+ curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $(GOPATH)/bin v1.56.2
diff --git a/vendor/github.com/Masterminds/semver/v3/README.md b/vendor/github.com/Masterminds/semver/v3/README.md
index eab8cac3b7f..ed56936084b 100644
--- a/vendor/github.com/Masterminds/semver/v3/README.md
+++ b/vendor/github.com/Masterminds/semver/v3/README.md
@@ -13,12 +13,9 @@ Active](https://masterminds.github.io/stability/active.svg)](https://masterminds
[](https://pkg.go.dev/github.com/Masterminds/semver/v3)
[](https://goreportcard.com/report/github.com/Masterminds/semver)
-If you are looking for a command line tool for version comparisons please see
-[vert](https://github.com/Masterminds/vert) which uses this library.
-
## Package Versions
-Note, import `github.com/github.com/Masterminds/semver/v3` to use the latest version.
+Note, import `github.com/Masterminds/semver/v3` to use the latest version.
There are three major versions fo the `semver` package.
@@ -80,12 +77,12 @@ There are two methods for comparing versions. One uses comparison methods on
differences to notes between these two methods of comparison.
1. When two versions are compared using functions such as `Compare`, `LessThan`,
- and others it will follow the specification and always include prereleases
+ and others it will follow the specification and always include pre-releases
within the comparison. It will provide an answer that is valid with the
comparison section of the spec at https://semver.org/#spec-item-11
2. When constraint checking is used for checks or validation it will follow a
different set of rules that are common for ranges with tools like npm/js
- and Rust/Cargo. This includes considering prereleases to be invalid if the
+ and Rust/Cargo. This includes considering pre-releases to be invalid if the
ranges does not include one. If you want to have it include pre-releases a
simple solution is to include `-0` in your range.
3. Constraint ranges can have some complex rules including the shorthand use of
@@ -113,7 +110,7 @@ v, err := semver.NewVersion("1.3")
if err != nil {
// Handle version not being parsable.
}
-// Check if the version meets the constraints. The a variable will be true.
+// Check if the version meets the constraints. The variable a will be true.
a := c.Check(v)
```
@@ -137,20 +134,20 @@ The basic comparisons are:
### Working With Prerelease Versions
Pre-releases, for those not familiar with them, are used for software releases
-prior to stable or generally available releases. Examples of prereleases include
-development, alpha, beta, and release candidate releases. A prerelease may be
+prior to stable or generally available releases. Examples of pre-releases include
+development, alpha, beta, and release candidate releases. A pre-release may be
a version such as `1.2.3-beta.1` while the stable release would be `1.2.3`. In the
-order of precedence, prereleases come before their associated releases. In this
+order of precedence, pre-releases come before their associated releases. In this
example `1.2.3-beta.1 < 1.2.3`.
-According to the Semantic Version specification prereleases may not be
+According to the Semantic Version specification, pre-releases may not be
API compliant with their release counterpart. It says,
> A pre-release version indicates that the version is unstable and might not satisfy the intended compatibility requirements as denoted by its associated normal version.
-SemVer comparisons using constraints without a prerelease comparator will skip
-prerelease versions. For example, `>=1.2.3` will skip prereleases when looking
-at a list of releases while `>=1.2.3-0` will evaluate and find prereleases.
+SemVer's comparisons using constraints without a pre-release comparator will skip
+pre-release versions. For example, `>=1.2.3` will skip pre-releases when looking
+at a list of releases while `>=1.2.3-0` will evaluate and find pre-releases.
The reason for the `0` as a pre-release version in the example comparison is
because pre-releases can only contain ASCII alphanumerics and hyphens (along with
@@ -171,6 +168,9 @@ These look like:
* `1.2 - 1.4.5` which is equivalent to `>= 1.2 <= 1.4.5`
* `2.3.4 - 4.5` which is equivalent to `>= 2.3.4 <= 4.5`
+Note that `1.2-1.4.5` without whitespace is parsed completely differently; it's
+parsed as a single constraint `1.2.0` with _prerelease_ `1.4.5`.
+
### Wildcards In Comparisons
The `x`, `X`, and `*` characters can be used as a wildcard character. This works
diff --git a/vendor/github.com/Masterminds/semver/v3/version.go b/vendor/github.com/Masterminds/semver/v3/version.go
index 7c4bed33474..ff499fb6640 100644
--- a/vendor/github.com/Masterminds/semver/v3/version.go
+++ b/vendor/github.com/Masterminds/semver/v3/version.go
@@ -83,22 +83,23 @@ func StrictNewVersion(v string) (*Version, error) {
original: v,
}
- // check for prerelease or build metadata
- var extra []string
- if strings.ContainsAny(parts[2], "-+") {
- // Start with the build metadata first as it needs to be on the right
- extra = strings.SplitN(parts[2], "+", 2)
- if len(extra) > 1 {
- // build metadata found
- sv.metadata = extra[1]
- parts[2] = extra[0]
+ // Extract build metadata
+ if strings.Contains(parts[2], "+") {
+ extra := strings.SplitN(parts[2], "+", 2)
+ sv.metadata = extra[1]
+ parts[2] = extra[0]
+ if err := validateMetadata(sv.metadata); err != nil {
+ return nil, err
}
+ }
- extra = strings.SplitN(parts[2], "-", 2)
- if len(extra) > 1 {
- // prerelease found
- sv.pre = extra[1]
- parts[2] = extra[0]
+ // Extract build prerelease
+ if strings.Contains(parts[2], "-") {
+ extra := strings.SplitN(parts[2], "-", 2)
+ sv.pre = extra[1]
+ parts[2] = extra[0]
+ if err := validatePrerelease(sv.pre); err != nil {
+ return nil, err
}
}
@@ -114,7 +115,7 @@ func StrictNewVersion(v string) (*Version, error) {
}
}
- // Extract the major, minor, and patch elements onto the returned Version
+ // Extract major, minor, and patch
var err error
sv.major, err = strconv.ParseUint(parts[0], 10, 64)
if err != nil {
@@ -131,23 +132,6 @@ func StrictNewVersion(v string) (*Version, error) {
return nil, err
}
- // No prerelease or build metadata found so returning now as a fastpath.
- if sv.pre == "" && sv.metadata == "" {
- return sv, nil
- }
-
- if sv.pre != "" {
- if err = validatePrerelease(sv.pre); err != nil {
- return nil, err
- }
- }
-
- if sv.metadata != "" {
- if err = validateMetadata(sv.metadata); err != nil {
- return nil, err
- }
- }
-
return sv, nil
}
@@ -381,15 +365,31 @@ func (v *Version) LessThan(o *Version) bool {
return v.Compare(o) < 0
}
+// LessThanEqual tests if one version is less or equal than another one.
+func (v *Version) LessThanEqual(o *Version) bool {
+ return v.Compare(o) <= 0
+}
+
// GreaterThan tests if one version is greater than another one.
func (v *Version) GreaterThan(o *Version) bool {
return v.Compare(o) > 0
}
+// GreaterThanEqual tests if one version is greater or equal than another one.
+func (v *Version) GreaterThanEqual(o *Version) bool {
+ return v.Compare(o) >= 0
+}
+
// Equal tests if two versions are equal to each other.
// Note, versions can be equal with different metadata since metadata
// is not considered part of the comparable version.
func (v *Version) Equal(o *Version) bool {
+ if v == o {
+ return true
+ }
+ if v == nil || o == nil {
+ return false
+ }
return v.Compare(o) == 0
}
diff --git a/vendor/github.com/Masterminds/sprig/v3/CHANGELOG.md b/vendor/github.com/Masterminds/sprig/v3/CHANGELOG.md
index 2ce45dd4eca..b5ef766a7af 100644
--- a/vendor/github.com/Masterminds/sprig/v3/CHANGELOG.md
+++ b/vendor/github.com/Masterminds/sprig/v3/CHANGELOG.md
@@ -1,5 +1,23 @@
# Changelog
+## Release 3.3.0 (2024-08-29)
+
+### Added
+
+- #400: added sha512sum function (thanks @itzik-elayev)
+
+### Changed
+
+- #407: Removed duplicate documentation (functions were documentated in 2 places)
+- #290: Corrected copy/paster oops in math documentation (thanks @zzhu41)
+- #369: Corrected template reference in docs (thanks @chey)
+- #375: Added link to URL documenation (thanks @carlpett)
+- #406: Updated the mergo dependency which had a breaking change (which was accounted for)
+- #376: Fixed documentation error (thanks @jheyduk)
+- #404: Updated dependency tree
+- #391: Fixed misspelling (thanks @chrishalbert)
+- #405: Updated Go versions used in testing
+
## Release 3.2.3 (2022-11-29)
### Changed
@@ -307,7 +325,7 @@ This release adds new functions, including:
- Added `semver` and `semverCompare` for Semantic Versions
- `list` replaces `tuple`
- Fixed issue with `join`
-- Added `first`, `last`, `intial`, `rest`, `prepend`, `append`, `toString`, `toStrings`, `sortAlpha`, `reverse`, `coalesce`, `pluck`, `pick`, `compact`, `keys`, `omit`, `uniq`, `has`, `without`
+- Added `first`, `last`, `initial`, `rest`, `prepend`, `append`, `toString`, `toStrings`, `sortAlpha`, `reverse`, `coalesce`, `pluck`, `pick`, `compact`, `keys`, `omit`, `uniq`, `has`, `without`
## Release 2.9.0 (2017-02-23)
@@ -361,7 +379,7 @@ Because we switched from `int` to `int64` as the return value for all integer ma
- `min` complements `max` (formerly `biggest`)
- `empty` indicates that a value is the empty value for its type
- `tuple` creates a tuple inside of a template: `{{$t := tuple "a", "b" "c"}}`
-- `dict` creates a dictionary inside of a template `{{$d := dict "key1" "val1" "key2" "val2"}}`
+- `dict` creates a dictionary inside of a template `{{$d := dict "key1" "val1" "key2" "val2"}}`
- Date formatters have been added for HTML dates (as used in `date` input fields)
- Integer math functions can convert from a number of types, including `string` (via `strconv.ParseInt`).
diff --git a/vendor/github.com/Masterminds/sprig/v3/crypto.go b/vendor/github.com/Masterminds/sprig/v3/crypto.go
index 13a5cd55934..75fe027e4d3 100644
--- a/vendor/github.com/Masterminds/sprig/v3/crypto.go
+++ b/vendor/github.com/Masterminds/sprig/v3/crypto.go
@@ -14,6 +14,7 @@ import (
"crypto/rsa"
"crypto/sha1"
"crypto/sha256"
+ "crypto/sha512"
"crypto/x509"
"crypto/x509/pkix"
"encoding/asn1"
@@ -36,6 +37,11 @@ import (
"golang.org/x/crypto/scrypt"
)
+func sha512sum(input string) string {
+ hash := sha512.Sum512([]byte(input))
+ return hex.EncodeToString(hash[:])
+}
+
func sha256sum(input string) string {
hash := sha256.Sum256([]byte(input))
return hex.EncodeToString(hash[:])
diff --git a/vendor/github.com/Masterminds/sprig/v3/dict.go b/vendor/github.com/Masterminds/sprig/v3/dict.go
index ade88969840..4315b3542ad 100644
--- a/vendor/github.com/Masterminds/sprig/v3/dict.go
+++ b/vendor/github.com/Masterminds/sprig/v3/dict.go
@@ -1,7 +1,7 @@
package sprig
import (
- "github.com/imdario/mergo"
+ "dario.cat/mergo"
"github.com/mitchellh/copystructure"
)
diff --git a/vendor/github.com/Masterminds/sprig/v3/doc.go b/vendor/github.com/Masterminds/sprig/v3/doc.go
index aabb9d4489f..91031d6d197 100644
--- a/vendor/github.com/Masterminds/sprig/v3/doc.go
+++ b/vendor/github.com/Masterminds/sprig/v3/doc.go
@@ -6,7 +6,7 @@ inside of Go `html/template` and `text/template` files.
To add these functions, use the `template.Funcs()` method:
- t := templates.New("foo").Funcs(sprig.FuncMap())
+ t := template.New("foo").Funcs(sprig.FuncMap())
Note that you should add the function map before you parse any template files.
diff --git a/vendor/github.com/Masterminds/sprig/v3/functions.go b/vendor/github.com/Masterminds/sprig/v3/functions.go
index 57fcec1d9ea..cda47d26f27 100644
--- a/vendor/github.com/Masterminds/sprig/v3/functions.go
+++ b/vendor/github.com/Masterminds/sprig/v3/functions.go
@@ -22,8 +22,7 @@ import (
//
// Use this to pass the functions into the template engine:
//
-// tpl := template.New("foo").Funcs(sprig.FuncMap()))
-//
+// tpl := template.New("foo").Funcs(sprig.FuncMap()))
func FuncMap() template.FuncMap {
return HtmlFuncMap()
}
@@ -142,10 +141,13 @@ var genericMap = map[string]interface{}{
"swapcase": util.SwapCase,
"shuffle": xstrings.Shuffle,
"snakecase": xstrings.ToSnakeCase,
- "camelcase": xstrings.ToCamelCase,
- "kebabcase": xstrings.ToKebabCase,
- "wrap": func(l int, s string) string { return util.Wrap(s, l) },
- "wrapWith": func(l int, sep, str string) string { return util.WrapCustom(str, l, sep, true) },
+ // camelcase used to call xstrings.ToCamelCase, but that function had a breaking change in version
+ // 1.5 that moved it from upper camel case to lower camel case. This is a breaking change for sprig.
+ // A new xstrings.ToPascalCase function was added that provided upper camel case.
+ "camelcase": xstrings.ToPascalCase,
+ "kebabcase": xstrings.ToKebabCase,
+ "wrap": func(l int, s string) string { return util.Wrap(s, l) },
+ "wrapWith": func(l int, sep, str string) string { return util.WrapCustom(str, l, sep, true) },
// Switch order so that "foobar" | contains "foo"
"contains": func(substr string, str string) bool { return strings.Contains(str, substr) },
"hasPrefix": func(substr string, str string) bool { return strings.HasPrefix(str, substr) },
@@ -159,6 +161,7 @@ var genericMap = map[string]interface{}{
"plural": plural,
"sha1sum": sha1sum,
"sha256sum": sha256sum,
+ "sha512sum": sha512sum,
"adler32sum": adler32sum,
"toString": strval,
@@ -336,20 +339,20 @@ var genericMap = map[string]interface{}{
"mustChunk": mustChunk,
// Crypto:
- "bcrypt": bcrypt,
- "htpasswd": htpasswd,
- "genPrivateKey": generatePrivateKey,
- "derivePassword": derivePassword,
- "buildCustomCert": buildCustomCertificate,
- "genCA": generateCertificateAuthority,
- "genCAWithKey": generateCertificateAuthorityWithPEMKey,
- "genSelfSignedCert": generateSelfSignedCertificate,
+ "bcrypt": bcrypt,
+ "htpasswd": htpasswd,
+ "genPrivateKey": generatePrivateKey,
+ "derivePassword": derivePassword,
+ "buildCustomCert": buildCustomCertificate,
+ "genCA": generateCertificateAuthority,
+ "genCAWithKey": generateCertificateAuthorityWithPEMKey,
+ "genSelfSignedCert": generateSelfSignedCertificate,
"genSelfSignedCertWithKey": generateSelfSignedCertificateWithPEMKey,
- "genSignedCert": generateSignedCertificate,
- "genSignedCertWithKey": generateSignedCertificateWithPEMKey,
- "encryptAES": encryptAES,
- "decryptAES": decryptAES,
- "randBytes": randBytes,
+ "genSignedCert": generateSignedCertificate,
+ "genSignedCertWithKey": generateSignedCertificateWithPEMKey,
+ "encryptAES": encryptAES,
+ "decryptAES": decryptAES,
+ "randBytes": randBytes,
// UUIDs:
"uuidv4": uuidv4,
diff --git a/vendor/github.com/ProtonMail/go-crypto/AUTHORS b/vendor/github.com/ProtonMail/go-crypto/AUTHORS
deleted file mode 100644
index 2b00ddba0df..00000000000
--- a/vendor/github.com/ProtonMail/go-crypto/AUTHORS
+++ /dev/null
@@ -1,3 +0,0 @@
-# This source code refers to The Go Authors for copyright purposes.
-# The master list of authors is in the main Go distribution,
-# visible at https://tip.golang.org/AUTHORS.
diff --git a/vendor/github.com/ProtonMail/go-crypto/CONTRIBUTORS b/vendor/github.com/ProtonMail/go-crypto/CONTRIBUTORS
deleted file mode 100644
index 1fbd3e976fa..00000000000
--- a/vendor/github.com/ProtonMail/go-crypto/CONTRIBUTORS
+++ /dev/null
@@ -1,3 +0,0 @@
-# This source code was written by the Go contributors.
-# The master list of contributors is in the main Go distribution,
-# visible at https://tip.golang.org/CONTRIBUTORS.
diff --git a/vendor/github.com/ProtonMail/go-crypto/LICENSE b/vendor/github.com/ProtonMail/go-crypto/LICENSE
deleted file mode 100644
index 6a66aea5eaf..00000000000
--- a/vendor/github.com/ProtonMail/go-crypto/LICENSE
+++ /dev/null
@@ -1,27 +0,0 @@
-Copyright (c) 2009 The Go Authors. All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
-
- * Redistributions of source code must retain the above copyright
-notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above
-copyright notice, this list of conditions and the following disclaimer
-in the documentation and/or other materials provided with the
-distribution.
- * Neither the name of Google Inc. nor the names of its
-contributors may be used to endorse or promote products derived from
-this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/ProtonMail/go-crypto/PATENTS b/vendor/github.com/ProtonMail/go-crypto/PATENTS
deleted file mode 100644
index 733099041f8..00000000000
--- a/vendor/github.com/ProtonMail/go-crypto/PATENTS
+++ /dev/null
@@ -1,22 +0,0 @@
-Additional IP Rights Grant (Patents)
-
-"This implementation" means the copyrightable works distributed by
-Google as part of the Go project.
-
-Google hereby grants to You a perpetual, worldwide, non-exclusive,
-no-charge, royalty-free, irrevocable (except as stated in this section)
-patent license to make, have made, use, offer to sell, sell, import,
-transfer and otherwise run, modify and propagate the contents of this
-implementation of Go, where such license applies only to those patent
-claims, both currently owned or controlled by Google and acquired in
-the future, licensable by Google that are necessarily infringed by this
-implementation of Go. This grant does not include claims that would be
-infringed only as a consequence of further modification of this
-implementation. If you or your agent or exclusive licensee institute or
-order or agree to the institution of patent litigation against any
-entity (including a cross-claim or counterclaim in a lawsuit) alleging
-that this implementation of Go or any code incorporated within this
-implementation of Go constitutes direct or contributory patent
-infringement, or inducement of patent infringement, then any patent
-rights granted to you under this License for this implementation of Go
-shall terminate as of the date such litigation is filed.
diff --git a/vendor/github.com/ProtonMail/go-crypto/bitcurves/bitcurve.go b/vendor/github.com/ProtonMail/go-crypto/bitcurves/bitcurve.go
deleted file mode 100644
index c85e6befeca..00000000000
--- a/vendor/github.com/ProtonMail/go-crypto/bitcurves/bitcurve.go
+++ /dev/null
@@ -1,381 +0,0 @@
-package bitcurves
-
-// Copyright 2010 The Go Authors. All rights reserved.
-// Copyright 2011 ThePiachu. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package bitelliptic implements several Koblitz elliptic curves over prime
-// fields.
-
-// This package operates, internally, on Jacobian coordinates. For a given
-// (x, y) position on the curve, the Jacobian coordinates are (x1, y1, z1)
-// where x = x1/z1² and y = y1/z1³. The greatest speedups come when the whole
-// calculation can be performed within the transform (as in ScalarMult and
-// ScalarBaseMult). But even for Add and Double, it's faster to apply and
-// reverse the transform than to operate in affine coordinates.
-
-import (
- "crypto/elliptic"
- "io"
- "math/big"
- "sync"
-)
-
-// A BitCurve represents a Koblitz Curve with a=0.
-// See http://www.hyperelliptic.org/EFD/g1p/auto-shortw.html
-type BitCurve struct {
- Name string
- P *big.Int // the order of the underlying field
- N *big.Int // the order of the base point
- B *big.Int // the constant of the BitCurve equation
- Gx, Gy *big.Int // (x,y) of the base point
- BitSize int // the size of the underlying field
-}
-
-// Params returns the parameters of the given BitCurve (see BitCurve struct)
-func (bitCurve *BitCurve) Params() (cp *elliptic.CurveParams) {
- cp = new(elliptic.CurveParams)
- cp.Name = bitCurve.Name
- cp.P = bitCurve.P
- cp.N = bitCurve.N
- cp.Gx = bitCurve.Gx
- cp.Gy = bitCurve.Gy
- cp.BitSize = bitCurve.BitSize
- return cp
-}
-
-// IsOnCurve returns true if the given (x,y) lies on the BitCurve.
-func (bitCurve *BitCurve) IsOnCurve(x, y *big.Int) bool {
- // y² = x³ + b
- y2 := new(big.Int).Mul(y, y) //y²
- y2.Mod(y2, bitCurve.P) //y²%P
-
- x3 := new(big.Int).Mul(x, x) //x²
- x3.Mul(x3, x) //x³
-
- x3.Add(x3, bitCurve.B) //x³+B
- x3.Mod(x3, bitCurve.P) //(x³+B)%P
-
- return x3.Cmp(y2) == 0
-}
-
-// affineFromJacobian reverses the Jacobian transform. See the comment at the
-// top of the file.
-func (bitCurve *BitCurve) affineFromJacobian(x, y, z *big.Int) (xOut, yOut *big.Int) {
- if z.Cmp(big.NewInt(0)) == 0 {
- panic("bitcurve: Can't convert to affine with Jacobian Z = 0")
- }
- // x = YZ^2 mod P
- zinv := new(big.Int).ModInverse(z, bitCurve.P)
- zinvsq := new(big.Int).Mul(zinv, zinv)
-
- xOut = new(big.Int).Mul(x, zinvsq)
- xOut.Mod(xOut, bitCurve.P)
- // y = YZ^3 mod P
- zinvsq.Mul(zinvsq, zinv)
- yOut = new(big.Int).Mul(y, zinvsq)
- yOut.Mod(yOut, bitCurve.P)
- return xOut, yOut
-}
-
-// Add returns the sum of (x1,y1) and (x2,y2)
-func (bitCurve *BitCurve) Add(x1, y1, x2, y2 *big.Int) (*big.Int, *big.Int) {
- z := new(big.Int).SetInt64(1)
- x, y, z := bitCurve.addJacobian(x1, y1, z, x2, y2, z)
- return bitCurve.affineFromJacobian(x, y, z)
-}
-
-// addJacobian takes two points in Jacobian coordinates, (x1, y1, z1) and
-// (x2, y2, z2) and returns their sum, also in Jacobian form.
-func (bitCurve *BitCurve) addJacobian(x1, y1, z1, x2, y2, z2 *big.Int) (*big.Int, *big.Int, *big.Int) {
- // See http://hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-0.html#addition-add-2007-bl
- z1z1 := new(big.Int).Mul(z1, z1)
- z1z1.Mod(z1z1, bitCurve.P)
- z2z2 := new(big.Int).Mul(z2, z2)
- z2z2.Mod(z2z2, bitCurve.P)
-
- u1 := new(big.Int).Mul(x1, z2z2)
- u1.Mod(u1, bitCurve.P)
- u2 := new(big.Int).Mul(x2, z1z1)
- u2.Mod(u2, bitCurve.P)
- h := new(big.Int).Sub(u2, u1)
- if h.Sign() == -1 {
- h.Add(h, bitCurve.P)
- }
- i := new(big.Int).Lsh(h, 1)
- i.Mul(i, i)
- j := new(big.Int).Mul(h, i)
-
- s1 := new(big.Int).Mul(y1, z2)
- s1.Mul(s1, z2z2)
- s1.Mod(s1, bitCurve.P)
- s2 := new(big.Int).Mul(y2, z1)
- s2.Mul(s2, z1z1)
- s2.Mod(s2, bitCurve.P)
- r := new(big.Int).Sub(s2, s1)
- if r.Sign() == -1 {
- r.Add(r, bitCurve.P)
- }
- r.Lsh(r, 1)
- v := new(big.Int).Mul(u1, i)
-
- x3 := new(big.Int).Set(r)
- x3.Mul(x3, x3)
- x3.Sub(x3, j)
- x3.Sub(x3, v)
- x3.Sub(x3, v)
- x3.Mod(x3, bitCurve.P)
-
- y3 := new(big.Int).Set(r)
- v.Sub(v, x3)
- y3.Mul(y3, v)
- s1.Mul(s1, j)
- s1.Lsh(s1, 1)
- y3.Sub(y3, s1)
- y3.Mod(y3, bitCurve.P)
-
- z3 := new(big.Int).Add(z1, z2)
- z3.Mul(z3, z3)
- z3.Sub(z3, z1z1)
- if z3.Sign() == -1 {
- z3.Add(z3, bitCurve.P)
- }
- z3.Sub(z3, z2z2)
- if z3.Sign() == -1 {
- z3.Add(z3, bitCurve.P)
- }
- z3.Mul(z3, h)
- z3.Mod(z3, bitCurve.P)
-
- return x3, y3, z3
-}
-
-// Double returns 2*(x,y)
-func (bitCurve *BitCurve) Double(x1, y1 *big.Int) (*big.Int, *big.Int) {
- z1 := new(big.Int).SetInt64(1)
- return bitCurve.affineFromJacobian(bitCurve.doubleJacobian(x1, y1, z1))
-}
-
-// doubleJacobian takes a point in Jacobian coordinates, (x, y, z), and
-// returns its double, also in Jacobian form.
-func (bitCurve *BitCurve) doubleJacobian(x, y, z *big.Int) (*big.Int, *big.Int, *big.Int) {
- // See http://hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-0.html#doubling-dbl-2009-l
-
- a := new(big.Int).Mul(x, x) //X1²
- b := new(big.Int).Mul(y, y) //Y1²
- c := new(big.Int).Mul(b, b) //B²
-
- d := new(big.Int).Add(x, b) //X1+B
- d.Mul(d, d) //(X1+B)²
- d.Sub(d, a) //(X1+B)²-A
- d.Sub(d, c) //(X1+B)²-A-C
- d.Mul(d, big.NewInt(2)) //2*((X1+B)²-A-C)
-
- e := new(big.Int).Mul(big.NewInt(3), a) //3*A
- f := new(big.Int).Mul(e, e) //E²
-
- x3 := new(big.Int).Mul(big.NewInt(2), d) //2*D
- x3.Sub(f, x3) //F-2*D
- x3.Mod(x3, bitCurve.P)
-
- y3 := new(big.Int).Sub(d, x3) //D-X3
- y3.Mul(e, y3) //E*(D-X3)
- y3.Sub(y3, new(big.Int).Mul(big.NewInt(8), c)) //E*(D-X3)-8*C
- y3.Mod(y3, bitCurve.P)
-
- z3 := new(big.Int).Mul(y, z) //Y1*Z1
- z3.Mul(big.NewInt(2), z3) //3*Y1*Z1
- z3.Mod(z3, bitCurve.P)
-
- return x3, y3, z3
-}
-
-// TODO: double check if it is okay
-// ScalarMult returns k*(Bx,By) where k is a number in big-endian form.
-func (bitCurve *BitCurve) ScalarMult(Bx, By *big.Int, k []byte) (*big.Int, *big.Int) {
- // We have a slight problem in that the identity of the group (the
- // point at infinity) cannot be represented in (x, y) form on a finite
- // machine. Thus the standard add/double algorithm has to be tweaked
- // slightly: our initial state is not the identity, but x, and we
- // ignore the first true bit in |k|. If we don't find any true bits in
- // |k|, then we return nil, nil, because we cannot return the identity
- // element.
-
- Bz := new(big.Int).SetInt64(1)
- x := Bx
- y := By
- z := Bz
-
- seenFirstTrue := false
- for _, byte := range k {
- for bitNum := 0; bitNum < 8; bitNum++ {
- if seenFirstTrue {
- x, y, z = bitCurve.doubleJacobian(x, y, z)
- }
- if byte&0x80 == 0x80 {
- if !seenFirstTrue {
- seenFirstTrue = true
- } else {
- x, y, z = bitCurve.addJacobian(Bx, By, Bz, x, y, z)
- }
- }
- byte <<= 1
- }
- }
-
- if !seenFirstTrue {
- return nil, nil
- }
-
- return bitCurve.affineFromJacobian(x, y, z)
-}
-
-// ScalarBaseMult returns k*G, where G is the base point of the group and k is
-// an integer in big-endian form.
-func (bitCurve *BitCurve) ScalarBaseMult(k []byte) (*big.Int, *big.Int) {
- return bitCurve.ScalarMult(bitCurve.Gx, bitCurve.Gy, k)
-}
-
-var mask = []byte{0xff, 0x1, 0x3, 0x7, 0xf, 0x1f, 0x3f, 0x7f}
-
-// TODO: double check if it is okay
-// GenerateKey returns a public/private key pair. The private key is generated
-// using the given reader, which must return random data.
-func (bitCurve *BitCurve) GenerateKey(rand io.Reader) (priv []byte, x, y *big.Int, err error) {
- byteLen := (bitCurve.BitSize + 7) >> 3
- priv = make([]byte, byteLen)
-
- for x == nil {
- _, err = io.ReadFull(rand, priv)
- if err != nil {
- return
- }
- // We have to mask off any excess bits in the case that the size of the
- // underlying field is not a whole number of bytes.
- priv[0] &= mask[bitCurve.BitSize%8]
- // This is because, in tests, rand will return all zeros and we don't
- // want to get the point at infinity and loop forever.
- priv[1] ^= 0x42
- x, y = bitCurve.ScalarBaseMult(priv)
- }
- return
-}
-
-// Marshal converts a point into the form specified in section 4.3.6 of ANSI
-// X9.62.
-func (bitCurve *BitCurve) Marshal(x, y *big.Int) []byte {
- byteLen := (bitCurve.BitSize + 7) >> 3
-
- ret := make([]byte, 1+2*byteLen)
- ret[0] = 4 // uncompressed point
-
- xBytes := x.Bytes()
- copy(ret[1+byteLen-len(xBytes):], xBytes)
- yBytes := y.Bytes()
- copy(ret[1+2*byteLen-len(yBytes):], yBytes)
- return ret
-}
-
-// Unmarshal converts a point, serialised by Marshal, into an x, y pair. On
-// error, x = nil.
-func (bitCurve *BitCurve) Unmarshal(data []byte) (x, y *big.Int) {
- byteLen := (bitCurve.BitSize + 7) >> 3
- if len(data) != 1+2*byteLen {
- return
- }
- if data[0] != 4 { // uncompressed form
- return
- }
- x = new(big.Int).SetBytes(data[1 : 1+byteLen])
- y = new(big.Int).SetBytes(data[1+byteLen:])
- return
-}
-
-//curve parameters taken from:
-//http://www.secg.org/collateral/sec2_final.pdf
-
-var initonce sync.Once
-var secp160k1 *BitCurve
-var secp192k1 *BitCurve
-var secp224k1 *BitCurve
-var secp256k1 *BitCurve
-
-func initAll() {
- initS160()
- initS192()
- initS224()
- initS256()
-}
-
-func initS160() {
- // See SEC 2 section 2.4.1
- secp160k1 = new(BitCurve)
- secp160k1.Name = "secp160k1"
- secp160k1.P, _ = new(big.Int).SetString("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFAC73", 16)
- secp160k1.N, _ = new(big.Int).SetString("0100000000000000000001B8FA16DFAB9ACA16B6B3", 16)
- secp160k1.B, _ = new(big.Int).SetString("0000000000000000000000000000000000000007", 16)
- secp160k1.Gx, _ = new(big.Int).SetString("3B4C382CE37AA192A4019E763036F4F5DD4D7EBB", 16)
- secp160k1.Gy, _ = new(big.Int).SetString("938CF935318FDCED6BC28286531733C3F03C4FEE", 16)
- secp160k1.BitSize = 160
-}
-
-func initS192() {
- // See SEC 2 section 2.5.1
- secp192k1 = new(BitCurve)
- secp192k1.Name = "secp192k1"
- secp192k1.P, _ = new(big.Int).SetString("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFEE37", 16)
- secp192k1.N, _ = new(big.Int).SetString("FFFFFFFFFFFFFFFFFFFFFFFE26F2FC170F69466A74DEFD8D", 16)
- secp192k1.B, _ = new(big.Int).SetString("000000000000000000000000000000000000000000000003", 16)
- secp192k1.Gx, _ = new(big.Int).SetString("DB4FF10EC057E9AE26B07D0280B7F4341DA5D1B1EAE06C7D", 16)
- secp192k1.Gy, _ = new(big.Int).SetString("9B2F2F6D9C5628A7844163D015BE86344082AA88D95E2F9D", 16)
- secp192k1.BitSize = 192
-}
-
-func initS224() {
- // See SEC 2 section 2.6.1
- secp224k1 = new(BitCurve)
- secp224k1.Name = "secp224k1"
- secp224k1.P, _ = new(big.Int).SetString("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFE56D", 16)
- secp224k1.N, _ = new(big.Int).SetString("010000000000000000000000000001DCE8D2EC6184CAF0A971769FB1F7", 16)
- secp224k1.B, _ = new(big.Int).SetString("00000000000000000000000000000000000000000000000000000005", 16)
- secp224k1.Gx, _ = new(big.Int).SetString("A1455B334DF099DF30FC28A169A467E9E47075A90F7E650EB6B7A45C", 16)
- secp224k1.Gy, _ = new(big.Int).SetString("7E089FED7FBA344282CAFBD6F7E319F7C0B0BD59E2CA4BDB556D61A5", 16)
- secp224k1.BitSize = 224
-}
-
-func initS256() {
- // See SEC 2 section 2.7.1
- secp256k1 = new(BitCurve)
- secp256k1.Name = "secp256k1"
- secp256k1.P, _ = new(big.Int).SetString("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFC2F", 16)
- secp256k1.N, _ = new(big.Int).SetString("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141", 16)
- secp256k1.B, _ = new(big.Int).SetString("0000000000000000000000000000000000000000000000000000000000000007", 16)
- secp256k1.Gx, _ = new(big.Int).SetString("79BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F81798", 16)
- secp256k1.Gy, _ = new(big.Int).SetString("483ADA7726A3C4655DA4FBFC0E1108A8FD17B448A68554199C47D08FFB10D4B8", 16)
- secp256k1.BitSize = 256
-}
-
-// S160 returns a BitCurve which implements secp160k1 (see SEC 2 section 2.4.1)
-func S160() *BitCurve {
- initonce.Do(initAll)
- return secp160k1
-}
-
-// S192 returns a BitCurve which implements secp192k1 (see SEC 2 section 2.5.1)
-func S192() *BitCurve {
- initonce.Do(initAll)
- return secp192k1
-}
-
-// S224 returns a BitCurve which implements secp224k1 (see SEC 2 section 2.6.1)
-func S224() *BitCurve {
- initonce.Do(initAll)
- return secp224k1
-}
-
-// S256 returns a BitCurve which implements bitcurves (see SEC 2 section 2.7.1)
-func S256() *BitCurve {
- initonce.Do(initAll)
- return secp256k1
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/brainpool/brainpool.go b/vendor/github.com/ProtonMail/go-crypto/brainpool/brainpool.go
deleted file mode 100644
index cb6676de24b..00000000000
--- a/vendor/github.com/ProtonMail/go-crypto/brainpool/brainpool.go
+++ /dev/null
@@ -1,134 +0,0 @@
-// Package brainpool implements Brainpool elliptic curves.
-// Implementation of rcurves is from github.com/ebfe/brainpool
-// Note that these curves are implemented with naive, non-constant time operations
-// and are likely not suitable for environments where timing attacks are a concern.
-package brainpool
-
-import (
- "crypto/elliptic"
- "math/big"
- "sync"
-)
-
-var (
- once sync.Once
- p256t1, p384t1, p512t1 *elliptic.CurveParams
- p256r1, p384r1, p512r1 *rcurve
-)
-
-func initAll() {
- initP256t1()
- initP384t1()
- initP512t1()
- initP256r1()
- initP384r1()
- initP512r1()
-}
-
-func initP256t1() {
- p256t1 = &elliptic.CurveParams{Name: "brainpoolP256t1"}
- p256t1.P, _ = new(big.Int).SetString("A9FB57DBA1EEA9BC3E660A909D838D726E3BF623D52620282013481D1F6E5377", 16)
- p256t1.N, _ = new(big.Int).SetString("A9FB57DBA1EEA9BC3E660A909D838D718C397AA3B561A6F7901E0E82974856A7", 16)
- p256t1.B, _ = new(big.Int).SetString("662C61C430D84EA4FE66A7733D0B76B7BF93EBC4AF2F49256AE58101FEE92B04", 16)
- p256t1.Gx, _ = new(big.Int).SetString("A3E8EB3CC1CFE7B7732213B23A656149AFA142C47AAFBC2B79A191562E1305F4", 16)
- p256t1.Gy, _ = new(big.Int).SetString("2D996C823439C56D7F7B22E14644417E69BCB6DE39D027001DABE8F35B25C9BE", 16)
- p256t1.BitSize = 256
-}
-
-func initP256r1() {
- twisted := p256t1
- params := &elliptic.CurveParams{
- Name: "brainpoolP256r1",
- P: twisted.P,
- N: twisted.N,
- BitSize: twisted.BitSize,
- }
- params.Gx, _ = new(big.Int).SetString("8BD2AEB9CB7E57CB2C4B482FFC81B7AFB9DE27E1E3BD23C23A4453BD9ACE3262", 16)
- params.Gy, _ = new(big.Int).SetString("547EF835C3DAC4FD97F8461A14611DC9C27745132DED8E545C1D54C72F046997", 16)
- z, _ := new(big.Int).SetString("3E2D4BD9597B58639AE7AA669CAB9837CF5CF20A2C852D10F655668DFC150EF0", 16)
- p256r1 = newrcurve(twisted, params, z)
-}
-
-func initP384t1() {
- p384t1 = &elliptic.CurveParams{Name: "brainpoolP384t1"}
- p384t1.P, _ = new(big.Int).SetString("8CB91E82A3386D280F5D6F7E50E641DF152F7109ED5456B412B1DA197FB71123ACD3A729901D1A71874700133107EC53", 16)
- p384t1.N, _ = new(big.Int).SetString("8CB91E82A3386D280F5D6F7E50E641DF152F7109ED5456B31F166E6CAC0425A7CF3AB6AF6B7FC3103B883202E9046565", 16)
- p384t1.B, _ = new(big.Int).SetString("7F519EADA7BDA81BD826DBA647910F8C4B9346ED8CCDC64E4B1ABD11756DCE1D2074AA263B88805CED70355A33B471EE", 16)
- p384t1.Gx, _ = new(big.Int).SetString("18DE98B02DB9A306F2AFCD7235F72A819B80AB12EBD653172476FECD462AABFFC4FF191B946A5F54D8D0AA2F418808CC", 16)
- p384t1.Gy, _ = new(big.Int).SetString("25AB056962D30651A114AFD2755AD336747F93475B7A1FCA3B88F2B6A208CCFE469408584DC2B2912675BF5B9E582928", 16)
- p384t1.BitSize = 384
-}
-
-func initP384r1() {
- twisted := p384t1
- params := &elliptic.CurveParams{
- Name: "brainpoolP384r1",
- P: twisted.P,
- N: twisted.N,
- BitSize: twisted.BitSize,
- }
- params.Gx, _ = new(big.Int).SetString("1D1C64F068CF45FFA2A63A81B7C13F6B8847A3E77EF14FE3DB7FCAFE0CBD10E8E826E03436D646AAEF87B2E247D4AF1E", 16)
- params.Gy, _ = new(big.Int).SetString("8ABE1D7520F9C2A45CB1EB8E95CFD55262B70B29FEEC5864E19C054FF99129280E4646217791811142820341263C5315", 16)
- z, _ := new(big.Int).SetString("41DFE8DD399331F7166A66076734A89CD0D2BCDB7D068E44E1F378F41ECBAE97D2D63DBC87BCCDDCCC5DA39E8589291C", 16)
- p384r1 = newrcurve(twisted, params, z)
-}
-
-func initP512t1() {
- p512t1 = &elliptic.CurveParams{Name: "brainpoolP512t1"}
- p512t1.P, _ = new(big.Int).SetString("AADD9DB8DBE9C48B3FD4E6AE33C9FC07CB308DB3B3C9D20ED6639CCA703308717D4D9B009BC66842AECDA12AE6A380E62881FF2F2D82C68528AA6056583A48F3", 16)
- p512t1.N, _ = new(big.Int).SetString("AADD9DB8DBE9C48B3FD4E6AE33C9FC07CB308DB3B3C9D20ED6639CCA70330870553E5C414CA92619418661197FAC10471DB1D381085DDADDB58796829CA90069", 16)
- p512t1.B, _ = new(big.Int).SetString("7CBBBCF9441CFAB76E1890E46884EAE321F70C0BCB4981527897504BEC3E36A62BCDFA2304976540F6450085F2DAE145C22553B465763689180EA2571867423E", 16)
- p512t1.Gx, _ = new(big.Int).SetString("640ECE5C12788717B9C1BA06CBC2A6FEBA85842458C56DDE9DB1758D39C0313D82BA51735CDB3EA499AA77A7D6943A64F7A3F25FE26F06B51BAA2696FA9035DA", 16)
- p512t1.Gy, _ = new(big.Int).SetString("5B534BD595F5AF0FA2C892376C84ACE1BB4E3019B71634C01131159CAE03CEE9D9932184BEEF216BD71DF2DADF86A627306ECFF96DBB8BACE198B61E00F8B332", 16)
- p512t1.BitSize = 512
-}
-
-func initP512r1() {
- twisted := p512t1
- params := &elliptic.CurveParams{
- Name: "brainpoolP512r1",
- P: twisted.P,
- N: twisted.N,
- BitSize: twisted.BitSize,
- }
- params.Gx, _ = new(big.Int).SetString("81AEE4BDD82ED9645A21322E9C4C6A9385ED9F70B5D916C1B43B62EEF4D0098EFF3B1F78E2D0D48D50D1687B93B97D5F7C6D5047406A5E688B352209BCB9F822", 16)
- params.Gy, _ = new(big.Int).SetString("7DDE385D566332ECC0EABFA9CF7822FDF209F70024A57B1AA000C55B881F8111B2DCDE494A5F485E5BCA4BD88A2763AED1CA2B2FA8F0540678CD1E0F3AD80892", 16)
- z, _ := new(big.Int).SetString("12EE58E6764838B69782136F0F2D3BA06E27695716054092E60A80BEDB212B64E585D90BCE13761F85C3F1D2A64E3BE8FEA2220F01EBA5EEB0F35DBD29D922AB", 16)
- p512r1 = newrcurve(twisted, params, z)
-}
-
-// P256t1 returns a Curve which implements Brainpool P256t1 (see RFC 5639, section 3.4)
-func P256t1() elliptic.Curve {
- once.Do(initAll)
- return p256t1
-}
-
-// P256r1 returns a Curve which implements Brainpool P256r1 (see RFC 5639, section 3.4)
-func P256r1() elliptic.Curve {
- once.Do(initAll)
- return p256r1
-}
-
-// P384t1 returns a Curve which implements Brainpool P384t1 (see RFC 5639, section 3.6)
-func P384t1() elliptic.Curve {
- once.Do(initAll)
- return p384t1
-}
-
-// P384r1 returns a Curve which implements Brainpool P384r1 (see RFC 5639, section 3.6)
-func P384r1() elliptic.Curve {
- once.Do(initAll)
- return p384r1
-}
-
-// P512t1 returns a Curve which implements Brainpool P512t1 (see RFC 5639, section 3.7)
-func P512t1() elliptic.Curve {
- once.Do(initAll)
- return p512t1
-}
-
-// P512r1 returns a Curve which implements Brainpool P512r1 (see RFC 5639, section 3.7)
-func P512r1() elliptic.Curve {
- once.Do(initAll)
- return p512r1
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/brainpool/rcurve.go b/vendor/github.com/ProtonMail/go-crypto/brainpool/rcurve.go
deleted file mode 100644
index 7e291d6aa4e..00000000000
--- a/vendor/github.com/ProtonMail/go-crypto/brainpool/rcurve.go
+++ /dev/null
@@ -1,83 +0,0 @@
-package brainpool
-
-import (
- "crypto/elliptic"
- "math/big"
-)
-
-var _ elliptic.Curve = (*rcurve)(nil)
-
-type rcurve struct {
- twisted elliptic.Curve
- params *elliptic.CurveParams
- z *big.Int
- zinv *big.Int
- z2 *big.Int
- z3 *big.Int
- zinv2 *big.Int
- zinv3 *big.Int
-}
-
-var (
- two = big.NewInt(2)
- three = big.NewInt(3)
-)
-
-func newrcurve(twisted elliptic.Curve, params *elliptic.CurveParams, z *big.Int) *rcurve {
- zinv := new(big.Int).ModInverse(z, params.P)
- return &rcurve{
- twisted: twisted,
- params: params,
- z: z,
- zinv: zinv,
- z2: new(big.Int).Exp(z, two, params.P),
- z3: new(big.Int).Exp(z, three, params.P),
- zinv2: new(big.Int).Exp(zinv, two, params.P),
- zinv3: new(big.Int).Exp(zinv, three, params.P),
- }
-}
-
-func (curve *rcurve) toTwisted(x, y *big.Int) (*big.Int, *big.Int) {
- var tx, ty big.Int
- tx.Mul(x, curve.z2)
- tx.Mod(&tx, curve.params.P)
- ty.Mul(y, curve.z3)
- ty.Mod(&ty, curve.params.P)
- return &tx, &ty
-}
-
-func (curve *rcurve) fromTwisted(tx, ty *big.Int) (*big.Int, *big.Int) {
- var x, y big.Int
- x.Mul(tx, curve.zinv2)
- x.Mod(&x, curve.params.P)
- y.Mul(ty, curve.zinv3)
- y.Mod(&y, curve.params.P)
- return &x, &y
-}
-
-func (curve *rcurve) Params() *elliptic.CurveParams {
- return curve.params
-}
-
-func (curve *rcurve) IsOnCurve(x, y *big.Int) bool {
- return curve.twisted.IsOnCurve(curve.toTwisted(x, y))
-}
-
-func (curve *rcurve) Add(x1, y1, x2, y2 *big.Int) (x, y *big.Int) {
- tx1, ty1 := curve.toTwisted(x1, y1)
- tx2, ty2 := curve.toTwisted(x2, y2)
- return curve.fromTwisted(curve.twisted.Add(tx1, ty1, tx2, ty2))
-}
-
-func (curve *rcurve) Double(x1, y1 *big.Int) (x, y *big.Int) {
- return curve.fromTwisted(curve.twisted.Double(curve.toTwisted(x1, y1)))
-}
-
-func (curve *rcurve) ScalarMult(x1, y1 *big.Int, scalar []byte) (x, y *big.Int) {
- tx1, ty1 := curve.toTwisted(x1, y1)
- return curve.fromTwisted(curve.twisted.ScalarMult(tx1, ty1, scalar))
-}
-
-func (curve *rcurve) ScalarBaseMult(scalar []byte) (x, y *big.Int) {
- return curve.fromTwisted(curve.twisted.ScalarBaseMult(scalar))
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/eax/eax.go b/vendor/github.com/ProtonMail/go-crypto/eax/eax.go
deleted file mode 100644
index 3ae91d594cd..00000000000
--- a/vendor/github.com/ProtonMail/go-crypto/eax/eax.go
+++ /dev/null
@@ -1,162 +0,0 @@
-// Copyright (C) 2019 ProtonTech AG
-
-// Package eax provides an implementation of the EAX
-// (encrypt-authenticate-translate) mode of operation, as described in
-// Bellare, Rogaway, and Wagner "THE EAX MODE OF OPERATION: A TWO-PASS
-// AUTHENTICATED-ENCRYPTION SCHEME OPTIMIZED FOR SIMPLICITY AND EFFICIENCY."
-// In FSE'04, volume 3017 of LNCS, 2004
-package eax
-
-import (
- "crypto/cipher"
- "crypto/subtle"
- "errors"
- "github.com/ProtonMail/go-crypto/internal/byteutil"
-)
-
-const (
- defaultTagSize = 16
- defaultNonceSize = 16
-)
-
-type eax struct {
- block cipher.Block // Only AES-{128, 192, 256} supported
- tagSize int // At least 12 bytes recommended
- nonceSize int
-}
-
-func (e *eax) NonceSize() int {
- return e.nonceSize
-}
-
-func (e *eax) Overhead() int {
- return e.tagSize
-}
-
-// NewEAX returns an EAX instance with AES-{KEYLENGTH} and default nonce and
-// tag lengths. Supports {128, 192, 256}- bit key length.
-func NewEAX(block cipher.Block) (cipher.AEAD, error) {
- return NewEAXWithNonceAndTagSize(block, defaultNonceSize, defaultTagSize)
-}
-
-// NewEAXWithNonceAndTagSize returns an EAX instance with AES-{keyLength} and
-// given nonce and tag lengths in bytes. Panics on zero nonceSize and
-// exceedingly long tags.
-//
-// It is recommended to use at least 12 bytes as tag length (see, for instance,
-// NIST SP 800-38D).
-//
-// Only to be used for compatibility with existing cryptosystems with
-// non-standard parameters. For all other cases, prefer NewEAX.
-func NewEAXWithNonceAndTagSize(
- block cipher.Block, nonceSize, tagSize int) (cipher.AEAD, error) {
- if nonceSize < 1 {
- return nil, eaxError("Cannot initialize EAX with nonceSize = 0")
- }
- if tagSize > block.BlockSize() {
- return nil, eaxError("Custom tag length exceeds blocksize")
- }
- return &eax{
- block: block,
- tagSize: tagSize,
- nonceSize: nonceSize,
- }, nil
-}
-
-func (e *eax) Seal(dst, nonce, plaintext, adata []byte) []byte {
- if len(nonce) > e.nonceSize {
- panic("crypto/eax: Nonce too long for this instance")
- }
- ret, out := byteutil.SliceForAppend(dst, len(plaintext)+e.tagSize)
- omacNonce := e.omacT(0, nonce)
- omacAdata := e.omacT(1, adata)
-
- // Encrypt message using CTR mode and omacNonce as IV
- ctr := cipher.NewCTR(e.block, omacNonce)
- ciphertextData := out[:len(plaintext)]
- ctr.XORKeyStream(ciphertextData, plaintext)
-
- omacCiphertext := e.omacT(2, ciphertextData)
-
- tag := out[len(plaintext):]
- for i := 0; i < e.tagSize; i++ {
- tag[i] = omacCiphertext[i] ^ omacNonce[i] ^ omacAdata[i]
- }
- return ret
-}
-
-func (e *eax) Open(dst, nonce, ciphertext, adata []byte) ([]byte, error) {
- if len(nonce) > e.nonceSize {
- panic("crypto/eax: Nonce too long for this instance")
- }
- if len(ciphertext) < e.tagSize {
- return nil, eaxError("Ciphertext shorter than tag length")
- }
- sep := len(ciphertext) - e.tagSize
-
- // Compute tag
- omacNonce := e.omacT(0, nonce)
- omacAdata := e.omacT(1, adata)
- omacCiphertext := e.omacT(2, ciphertext[:sep])
-
- tag := make([]byte, e.tagSize)
- for i := 0; i < e.tagSize; i++ {
- tag[i] = omacCiphertext[i] ^ omacNonce[i] ^ omacAdata[i]
- }
-
- // Compare tags
- if subtle.ConstantTimeCompare(ciphertext[sep:], tag) != 1 {
- return nil, eaxError("Tag authentication failed")
- }
-
- // Decrypt ciphertext
- ret, out := byteutil.SliceForAppend(dst, len(ciphertext))
- ctr := cipher.NewCTR(e.block, omacNonce)
- ctr.XORKeyStream(out, ciphertext[:sep])
-
- return ret[:sep], nil
-}
-
-// Tweakable OMAC - Calls OMAC_K([t]_n || plaintext)
-func (e *eax) omacT(t byte, plaintext []byte) []byte {
- blockSize := e.block.BlockSize()
- byteT := make([]byte, blockSize)
- byteT[blockSize-1] = t
- concat := append(byteT, plaintext...)
- return e.omac(concat)
-}
-
-func (e *eax) omac(plaintext []byte) []byte {
- blockSize := e.block.BlockSize()
- // L ← E_K(0^n); B ← 2L; P ← 4L
- L := make([]byte, blockSize)
- e.block.Encrypt(L, L)
- B := byteutil.GfnDouble(L)
- P := byteutil.GfnDouble(B)
-
- // CBC with IV = 0
- cbc := cipher.NewCBCEncrypter(e.block, make([]byte, blockSize))
- padded := e.pad(plaintext, B, P)
- cbcCiphertext := make([]byte, len(padded))
- cbc.CryptBlocks(cbcCiphertext, padded)
-
- return cbcCiphertext[len(cbcCiphertext)-blockSize:]
-}
-
-func (e *eax) pad(plaintext, B, P []byte) []byte {
- // if |M| in {n, 2n, 3n, ...}
- blockSize := e.block.BlockSize()
- if len(plaintext) != 0 && len(plaintext)%blockSize == 0 {
- return byteutil.RightXor(plaintext, B)
- }
-
- // else return (M || 1 || 0^(n−1−(|M| % n))) xor→ P
- ending := make([]byte, blockSize-len(plaintext)%blockSize)
- ending[0] = 0x80
- padded := append(plaintext, ending...)
- return byteutil.RightXor(padded, P)
-}
-
-func eaxError(err string) error {
- return errors.New("crypto/eax: " + err)
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/eax/eax_test_vectors.go b/vendor/github.com/ProtonMail/go-crypto/eax/eax_test_vectors.go
deleted file mode 100644
index ddb53d07905..00000000000
--- a/vendor/github.com/ProtonMail/go-crypto/eax/eax_test_vectors.go
+++ /dev/null
@@ -1,58 +0,0 @@
-package eax
-
-// Test vectors from
-// https://web.cs.ucdavis.edu/~rogaway/papers/eax.pdf
-var testVectors = []struct {
- msg, key, nonce, header, ciphertext string
-}{
- {"",
- "233952DEE4D5ED5F9B9C6D6FF80FF478",
- "62EC67F9C3A4A407FCB2A8C49031A8B3",
- "6BFB914FD07EAE6B",
- "E037830E8389F27B025A2D6527E79D01"},
- {"F7FB",
- "91945D3F4DCBEE0BF45EF52255F095A4",
- "BECAF043B0A23D843194BA972C66DEBD",
- "FA3BFD4806EB53FA",
- "19DD5C4C9331049D0BDAB0277408F67967E5"},
- {"1A47CB4933",
- "01F74AD64077F2E704C0F60ADA3DD523",
- "70C3DB4F0D26368400A10ED05D2BFF5E",
- "234A3463C1264AC6",
- "D851D5BAE03A59F238A23E39199DC9266626C40F80"},
- {"481C9E39B1",
- "D07CF6CBB7F313BDDE66B727AFD3C5E8",
- "8408DFFF3C1A2B1292DC199E46B7D617",
- "33CCE2EABFF5A79D",
- "632A9D131AD4C168A4225D8E1FF755939974A7BEDE"},
- {"40D0C07DA5E4",
- "35B6D0580005BBC12B0587124557D2C2",
- "FDB6B06676EEDC5C61D74276E1F8E816",
- "AEB96EAEBE2970E9",
- "071DFE16C675CB0677E536F73AFE6A14B74EE49844DD"},
- {"4DE3B35C3FC039245BD1FB7D",
- "BD8E6E11475E60B268784C38C62FEB22",
- "6EAC5C93072D8E8513F750935E46DA1B",
- "D4482D1CA78DCE0F",
- "835BB4F15D743E350E728414ABB8644FD6CCB86947C5E10590210A4F"},
- {"8B0A79306C9CE7ED99DAE4F87F8DD61636",
- "7C77D6E813BED5AC98BAA417477A2E7D",
- "1A8C98DCD73D38393B2BF1569DEEFC19",
- "65D2017990D62528",
- "02083E3979DA014812F59F11D52630DA30137327D10649B0AA6E1C181DB617D7F2"},
- {"1BDA122BCE8A8DBAF1877D962B8592DD2D56",
- "5FFF20CAFAB119CA2FC73549E20F5B0D",
- "DDE59B97D722156D4D9AFF2BC7559826",
- "54B9F04E6A09189A",
- "2EC47B2C4954A489AFC7BA4897EDCDAE8CC33B60450599BD02C96382902AEF7F832A"},
- {"6CF36720872B8513F6EAB1A8A44438D5EF11",
- "A4A4782BCFFD3EC5E7EF6D8C34A56123",
- "B781FCF2F75FA5A8DE97A9CA48E522EC",
- "899A175897561D7E",
- "0DE18FD0FDD91E7AF19F1D8EE8733938B1E8E7F6D2231618102FDB7FE55FF1991700"},
- {"CA40D7446E545FFAED3BD12A740A659FFBBB3CEAB7",
- "8395FCF1E95BEBD697BD010BC766AAC3",
- "22E7ADD93CFC6393C57EC0B3C17D6B44",
- "126735FCC320D25A",
- "CB8920F87A6C75CFF39627B56E3ED197C552D295A7CFC46AFC253B4652B1AF3795B124AB6E"},
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/eax/random_vectors.go b/vendor/github.com/ProtonMail/go-crypto/eax/random_vectors.go
deleted file mode 100644
index 4eb19f28d9c..00000000000
--- a/vendor/github.com/ProtonMail/go-crypto/eax/random_vectors.go
+++ /dev/null
@@ -1,131 +0,0 @@
-// These vectors include key length in {128, 192, 256}, tag size 128, and
-// random nonce, header, and plaintext lengths.
-
-// This file was automatically generated.
-
-package eax
-
-var randomVectors = []struct {
- key, nonce, header, plaintext, ciphertext string
-}{
- {"DFDE093F36B0356E5A81F609786982E3",
- "1D8AC604419001816905BA72B14CED7E",
- "152A1517A998D7A24163FCDD146DE81AC347C8B97088F502093C1ABB8F6E33D9A219C34D7603A18B1F5ABE02E56661B7D7F67E81EC08C1302EF38D80A859486D450E94A4F26AD9E68EEBBC0C857A0FC5CF9E641D63D565A7E361BC8908F5A8DC8FD6",
- "1C8EAAB71077FE18B39730A3156ADE29C5EE824C7EE86ED2A253B775603FB237116E654F6FEC588DD27F523A0E01246FE73FE348491F2A8E9ABC6CA58D663F71CDBCF4AD798BE46C42AE6EE8B599DB44A1A48D7BBBBA0F7D2750181E1C5E66967F7D57CBD30AFBDA5727",
- "79E7E150934BBEBF7013F61C60462A14D8B15AF7A248AFB8A344EF021C1500E16666891D6E973D8BB56B71A371F12CA34660C4410C016982B20F547E3762A58B7BF4F20236CADCF559E2BE7D783B13723B2741FC7CDC8997D839E39A3DDD2BADB96743DD7049F1BDB0516A262869915B3F70498AFB7B191BF960"},
- {"F10619EF02E5D94D7550EB84ED364A21",
- "8DC0D4F2F745BBAE835CC5574B942D20",
- "FE561358F2E8DF7E1024FF1AE9A8D36EBD01352214505CB99D644777A8A1F6027FA2BDBFC529A9B91136D5F2416CFC5F0F4EC3A1AFD32BDDA23CA504C5A5CB451785FABF4DFE4CD50D817491991A60615B30286361C100A95D1712F2A45F8E374461F4CA2B",
- "D7B5A971FC219631D30EFC3664AE3127D9CF3097DAD9C24AC7905D15E8D9B25B026B31D68CAE00975CDB81EB1FD96FD5E1A12E2BB83FA25F1B1D91363457657FC03875C27F2946C5",
- "2F336ED42D3CC38FC61660C4CD60BA4BD438B05F5965D8B7B399D2E7167F5D34F792D318F94DB15D67463AC449E13D568CC09BFCE32A35EE3EE96A041927680AE329811811E27F2D1E8E657707AF99BA96D13A478D695D59"},
- {"429F514EFC64D98A698A9247274CFF45",
- "976AA5EB072F912D126ACEBC954FEC38",
- "A71D89DC5B6CEDBB7451A27C3C2CAE09126DB4C421",
- "5632FE62AB1DC549D54D3BC3FC868ACCEDEFD9ECF5E9F8",
- "848AE4306CA8C7F416F8707625B7F55881C0AB430353A5C967CDA2DA787F581A70E34DBEBB2385"},
- {"398138F309085F47F8457CDF53895A63",
- "F8A8A7F2D28E5FFF7BBC2F24353F7A36",
- "5D633C21BA7764B8855CAB586F3746E236AD486039C83C6B56EFA9C651D38A41D6B20DAEE3418BFEA44B8BD6",
- "A3BBAA91920AF5E10659818B1B3B300AC79BFC129C8329E75251F73A66D3AE0128EB91D5031E0A65C329DB7D1E9C0493E268",
- "D078097267606E5FB07CFB7E2B4B718172A82C6A4CEE65D549A4DFB9838003BD2FBF64A7A66988AC1A632FD88F9E9FBB57C5A78AD2E086EACBA3DB68511D81C2970A"},
- {"7A4151EBD3901B42CBA45DAFB2E931BA",
- "0FC88ACEE74DD538040321C330974EB8",
- "250464FB04733BAB934C59E6AD2D6AE8D662CBCFEFBE61E5A308D4211E58C4C25935B72C69107722E946BFCBF416796600542D76AEB73F2B25BF53BAF97BDEB36ED3A7A51C31E7F170EB897457E7C17571D1BA0A908954E9",
- "88C41F3EBEC23FAB8A362D969CAC810FAD4F7CA6A7F7D0D44F060F92E37E1183768DD4A8C733F71C96058D362A39876D183B86C103DE",
- "74A25B2182C51096D48A870D80F18E1CE15867778E34FCBA6BD7BFB3739FDCD42AD0F2D9F4EBA29085285C6048C15BCE5E5166F1F962D3337AA88E6062F05523029D0A7F0BF9"},
- {"BFB147E1CD5459424F8C0271FC0E0DC5",
- "EABCC126442BF373969EA3015988CC45",
- "4C0880E1D71AA2C7",
- "BE1B5EC78FBF73E7A6682B21BA7E0E5D2D1C7ABE",
- "5660D7C1380E2F306895B1402CB2D6C37876504276B414D120F4CF92FDDDBB293A238EA0"},
- {"595DD6F52D18BC2CA8EB4EDAA18D9FA3",
- "0F84B5D36CF4BC3B863313AF3B4D2E97",
- "30AE6CC5F99580F12A779D98BD379A60948020C0B6FBD5746B30BA3A15C6CD33DAF376C70A9F15B6C0EB410A93161F7958AE23",
- "8EF3687A1642B070970B0B91462229D1D76ABC154D18211F7152AA9FF368",
- "317C1DDB11417E5A9CC4DDE7FDFF6659A5AC4B31DE025212580A05CDAC6024D3E4AE7C2966E52B9129E9ECDBED86"},
- {"44E6F2DC8FDC778AD007137D11410F50",
- "270A237AD977F7187AA6C158A0BAB24F",
- "509B0F0EB12E2AA5C5BA2DE553C07FAF4CE0C9E926531AA709A3D6224FCB783ACCF1559E10B1123EBB7D52E8AB54E6B5352A9ED0D04124BF0E9D9BACFD7E32B817B2E625F5EE94A64EDE9E470DE7FE6886C19B294F9F828209FE257A78",
- "8B3D7815DF25618A5D0C55A601711881483878F113A12EC36CF64900549A3199555528559DC118F789788A55FAFD944E6E99A9CA3F72F238CD3F4D88223F7A745992B3FAED1848",
- "1CC00D79F7AD82FDA71B58D286E5F34D0CC4CEF30704E771CC1E50746BDF83E182B078DB27149A42BAE619DF0F85B0B1090AD55D3B4471B0D6F6ECCD09C8F876B30081F0E7537A9624F8AAF29DA85E324122EFB4D68A56"},
- {"BB7BC352A03044B4428D8DBB4B0701FDEC4649FD17B81452",
- "8B4BBE26CCD9859DCD84884159D6B0A4",
- "2212BEB0E78E0F044A86944CF33C8D5C80D9DBE1034BF3BCF73611835C7D3A52F5BD2D81B68FD681B68540A496EE5DA16FD8AC8824E60E1EC2042BE28FB0BFAD4E4B03596446BDD8C37D936D9B3D5295BE19F19CF5ACE1D33A46C952CE4DE5C12F92C1DD051E04AEED",
- "9037234CC44FFF828FABED3A7084AF40FA7ABFF8E0C0EFB57A1CC361E18FC4FAC1AB54F3ABFE9FF77263ACE16C3A",
- "A9391B805CCD956081E0B63D282BEA46E7025126F1C1631239C33E92AA6F92CD56E5A4C56F00FF9658E93D48AF4EF0EF81628E34AD4DB0CDAEDCD2A17EE7"},
- {"99C0AD703196D2F60A74E6B378B838B31F82EA861F06FC4E",
- "92745C018AA708ECFEB1667E9F3F1B01",
- "828C69F376C0C0EC651C67749C69577D589EE39E51404D80EBF70C8660A8F5FD375473F4A7C611D59CB546A605D67446CE2AA844135FCD78BB5FBC90222A00D42920BB1D7EEDFB0C4672554F583EF23184F89063CDECBE482367B5F9AF3ACBC3AF61392BD94CBCD9B64677",
- "A879214658FD0A5B0E09836639BF82E05EC7A5EF71D4701934BDA228435C68AC3D5CEB54997878B06A655EEACEFB1345C15867E7FE6C6423660C8B88DF128EBD6BCD85118DBAE16E9252FFB204324E5C8F38CA97759BDBF3CB0083",
- "51FE87996F194A2585E438B023B345439EA60D1AEBED4650CDAF48A4D4EEC4FC77DC71CC4B09D3BEEF8B7B7AF716CE2B4EFFB3AC9E6323C18AC35E0AA6E2BBBC8889490EB6226C896B0D105EAB42BFE7053CCF00ED66BA94C1BA09A792AA873F0C3B26C5C5F9A936E57B25"},
- {"7086816D00D648FB8304AA8C9E552E1B69A9955FB59B25D1",
- "0F45CF7F0BF31CCEB85D9DA10F4D749F",
- "93F27C60A417D9F0669E86ACC784FC8917B502DAF30A6338F11B30B94D74FEFE2F8BE1BBE2EAD10FAB7EED3C6F72B7C3ECEE1937C32ED4970A6404E139209C05",
- "877F046601F3CBE4FB1491943FA29487E738F94B99AF206262A1D6FF856C9AA0B8D4D08A54370C98F8E88FA3DCC2B14C1F76D71B2A4C7963AEE8AF960464C5BEC8357AD00DC8",
- "FE96906B895CE6A8E72BC72344E2C8BB3C63113D70EAFA26C299BAFE77A8A6568172EB447FB3E86648A0AF3512DEB1AAC0819F3EC553903BF28A9FB0F43411237A774BF9EE03E445D280FBB9CD12B9BAAB6EF5E52691"},
- {"062F65A896D5BF1401BADFF70E91B458E1F9BD4888CB2E4D",
- "5B11EA1D6008EBB41CF892FCA5B943D1",
- "BAF4FF5C8242",
- "A8870E091238355984EB2F7D61A865B9170F440BFF999A5993DD41A10F4440D21FF948DDA2BF663B2E03AC3324492DC5E40262ECC6A65C07672353BE23E7FB3A9D79FF6AA38D97960905A38DECC312CB6A59E5467ECF06C311CD43ADC0B543EDF34FE8BE611F176460D5627CA51F8F8D9FED71F55C",
- "B10E127A632172CF8AA7539B140D2C9C2590E6F28C3CB892FC498FCE56A34F732FBFF32E79C7B9747D9094E8635A0C084D6F0247F9768FB5FF83493799A9BEC6C39572120C40E9292C8C947AE8573462A9108C36D9D7112E6995AE5867E6C8BB387D1C5D4BEF524F391B9FD9F0A3B4BFA079E915BCD920185CFD38D114C558928BD7D47877"},
- {"38A8E45D6D705A11AF58AED5A1344896998EACF359F2E26A",
- "FD82B5B31804FF47D44199B533D0CF84",
- "DE454D4E62FE879F2050EE3E25853623D3E9AC52EEC1A1779A48CFAF5ECA0BFDE44749391866D1",
- "B804",
- "164BB965C05EBE0931A1A63293EDF9C38C27"},
- {"34C33C97C6D7A0850DA94D78A58DC61EC717CD7574833068",
- "343BE00DA9483F05C14F2E9EB8EA6AE8",
- "78312A43EFDE3CAE34A65796FF059A3FE15304EEA5CF1D9306949FE5BF3349D4977D4EBE76C040FE894C5949E4E4D6681153DA87FB9AC5062063CA2EA183566343362370944CE0362D25FC195E124FD60E8682E665D13F2229DDA3E4B2CB1DCA",
- "CC11BB284B1153578E4A5ED9D937B869DAF00F5B1960C23455CA9CC43F486A3BE0B66254F1041F04FDF459C8640465B6E1D2CF899A381451E8E7FCB50CF87823BE77E24B132BBEEDC72E53369B275E1D8F49ECE59F4F215230AC4FE133FC80E4F634EE80BA4682B62C86",
- "E7F703DC31A95E3A4919FF957836CB76C063D81702AEA4703E1C2BF30831E58C4609D626EC6810E12EAA5B930F049FF9EFC22C3E3F1EBD4A1FB285CB02A1AC5AD46B425199FC0A85670A5C4E3DAA9636C8F64C199F42F18AAC8EA7457FD377F322DD7752D7D01B946C8F0A97E6113F0D50106F319AFD291AAACE"},
- {"C6ECF7F053573E403E61B83052A343D93CBCC179D1E835BE",
- "E280E13D7367042E3AA09A80111B6184",
- "21486C9D7A9647",
- "5F2639AFA6F17931853791CD8C92382BBB677FD72D0AB1A080D0E49BFAA21810E963E4FACD422E92F65CBFAD5884A60CD94740DF31AF02F95AA57DA0C4401B0ED906",
- "5C51DB20755302070C45F52E50128A67C8B2E4ED0EACB7E29998CCE2E8C289DD5655913EC1A51CC3AABE5CDC2402B2BE7D6D4BF6945F266FBD70BA9F37109067157AE7530678B45F64475D4EBFCB5FFF46A5"},
- {"5EC6CF7401BC57B18EF154E8C38ACCA8959E57D2F3975FF5",
- "656B41CB3F9CF8C08BAD7EBFC80BD225",
- "6B817C2906E2AF425861A7EF59BA5801F143EE2A139EE72697CDE168B4",
- "2C0E1DDC9B1E5389BA63845B18B1F8A1DB062037151BCC56EF7C21C0BB4DAE366636BBA975685D7CC5A94AFBE89C769016388C56FB7B57CE750A12B718A8BDCF70E80E8659A8330EFC8F86640F21735E8C80E23FE43ABF23507CE3F964AE4EC99D",
- "ED780CF911E6D1AA8C979B889B0B9DC1ABE261832980BDBFB576901D9EF5AB8048998E31A15BE54B3E5845A4D136AD24D0BDA1C3006168DF2F8AC06729CB0818867398150020131D8F04EDF1923758C9EABB5F735DE5EA1758D4BC0ACFCA98AFD202E9839B8720253693B874C65586C6F0"},
- {"C92F678EB2208662F5BCF3403EC05F5961E957908A3E79421E1D25FC19054153",
- "DA0F3A40983D92F2D4C01FED33C7A192",
- "2B6E9D26DB406A0FAB47608657AA10EFC2B4AA5F459B29FF85AC9A40BFFE7AEB04F77E9A11FAAA116D7F6D4DA417671A9AB02C588E0EF59CB1BFB4B1CC931B63A3B3A159FCEC97A04D1E6F0C7E6A9CEF6B0ABB04758A69F1FE754DF4C2610E8C46B6CF413BDB31351D55BEDCB7B4A13A1C98E10984475E0F2F957853",
- "F37326A80E08",
- "83519E53E321D334F7C10B568183775C0E9AAE55F806"},
- {"6847E0491BE57E72995D186D50094B0B3593957A5146798FCE68B287B2FB37B5",
- "3EE1182AEBB19A02B128F28E1D5F7F99",
- "D9F35ABB16D776CE",
- "DB7566ED8EA95BDF837F23DB277BAFBC5E70D1105ADFD0D9EF15475051B1EF94709C67DCA9F8D5",
- "2CDCED0C9EBD6E2A508822A685F7DCD1CDD99E7A5FCA786C234E7F7F1D27EC49751AD5DCFA30C5EDA87C43CAE3B919B6BBCFE34C8EDA59"},
- {"82B019673642C08388D3E42075A4D5D587558C229E4AB8F660E37650C4C41A0A",
- "336F5D681E0410FAE7B607246092C6DC",
- "D430CBD8FE435B64214E9E9CDC5DE99D31CFCFB8C10AA0587A49DF276611",
- "998404153AD77003E1737EDE93ED79859EE6DCCA93CB40C4363AA817ABF2DBBD46E42A14A7183B6CC01E12A577888141363D0AE011EB6E8D28C0B235",
- "9BEF69EEB60BD3D6065707B7557F25292A8872857CFBD24F2F3C088E4450995333088DA50FD9121221C504DF1D0CD5EFE6A12666C5D5BB12282CF4C19906E9CFAB97E9BDF7F49DC17CFC384B"},
- {"747B2E269B1859F0622C15C8BAD6A725028B1F94B8DB7326948D1E6ED663A8BC",
- "AB91F7245DDCE3F1C747872D47BE0A8A",
- "3B03F786EF1DDD76E1D42646DA4CD2A5165DC5383CE86D1A0B5F13F910DC278A4E451EE0192CBA178E13B3BA27FDC7840DF73D2E104B",
- "6B803F4701114F3E5FE21718845F8416F70F626303F545BE197189E0A2BA396F37CE06D389EB2658BC7D56D67868708F6D0D32",
- "1570DDB0BCE75AA25D1957A287A2C36B1A5F2270186DA81BA6112B7F43B0F3D1D0ED072591DCF1F1C99BBB25621FC39B896FF9BD9413A2845363A9DCD310C32CF98E57"},
- {"02E59853FB29AEDA0FE1C5F19180AD99A12FF2F144670BB2B8BADF09AD812E0A",
- "C691294EF67CD04D1B9242AF83DD1421",
- "879334DAE3",
- "1E17F46A98FEF5CBB40759D95354",
- "FED8C3FF27DDF6313AED444A2985B36CBA268AAD6AAC563C0BA28F6DB5DB"},
- {"F6C1FB9B4188F2288FF03BD716023198C3582CF2A037FC2F29760916C2B7FCDB",
- "4228DA0678CA3534588859E77DFF014C",
- "D8153CAF35539A61DD8D05B3C9B44F01E564FB9348BCD09A1C23B84195171308861058F0A3CD2A55B912A3AAEE06FF4D356C77275828F2157C2FC7C115DA39E443210CCC56BEDB0CC99BBFB227ABD5CC454F4E7F547C7378A659EEB6A7E809101A84F866503CB18D4484E1FA09B3EC7FC75EB2E35270800AA7",
- "23B660A779AD285704B12EC1C580387A47BEC7B00D452C6570",
- "5AA642BBABA8E49849002A2FAF31DB8FC7773EFDD656E469CEC19B3206D4174C9A263D0A05484261F6"},
- {"8FF6086F1FADB9A3FBE245EAC52640C43B39D43F89526BB5A6EBA47710931446",
- "943188480C99437495958B0AE4831AA9",
- "AD5CD0BDA426F6EBA23C8EB23DC73FF9FEC173355EDBD6C9344C4C4383F211888F7CE6B29899A6801DF6B38651A7C77150941A",
- "80CD5EA8D7F81DDF5070B934937912E8F541A5301877528EB41AB60C020968D459960ED8FB73083329841A",
- "ABAE8EB7F36FCA2362551E72DAC890BA1BB6794797E0FC3B67426EC9372726ED4725D379EA0AC9147E48DCD0005C502863C2C5358A38817C8264B5"},
- {"A083B54E6B1FE01B65D42FCD248F97BB477A41462BBFE6FD591006C022C8FD84",
- "B0490F5BD68A52459556B3749ACDF40E",
- "8892E047DA5CFBBDF7F3CFCBD1BD21C6D4C80774B1826999234394BD3E513CC7C222BB40E1E3140A152F19B3802F0D036C24A590512AD0E8",
- "D7B15752789DC94ED0F36778A5C7BBB207BEC32BAC66E702B39966F06E381E090C6757653C3D26A81EC6AD6C364D66867A334C91BB0B8A8A4B6EACDF0783D09010AEBA2DD2062308FE99CC1F",
- "C071280A732ADC93DF272BF1E613B2BB7D46FC6665EF2DC1671F3E211D6BDE1D6ADDD28DF3AA2E47053FC8BB8AE9271EC8BC8B2CFFA320D225B451685B6D23ACEFDD241FE284F8ADC8DB07F456985B14330BBB66E0FB212213E05B3E"},
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/internal/byteutil/byteutil.go b/vendor/github.com/ProtonMail/go-crypto/internal/byteutil/byteutil.go
deleted file mode 100644
index affb74a764d..00000000000
--- a/vendor/github.com/ProtonMail/go-crypto/internal/byteutil/byteutil.go
+++ /dev/null
@@ -1,90 +0,0 @@
-// Copyright (C) 2019 ProtonTech AG
-// This file contains necessary tools for the aex and ocb packages.
-//
-// These functions SHOULD NOT be used elsewhere, since they are optimized for
-// specific input nature in the EAX and OCB modes of operation.
-
-package byteutil
-
-// GfnDouble computes 2 * input in the field of 2^n elements.
-// The irreducible polynomial in the finite field for n=128 is
-// x^128 + x^7 + x^2 + x + 1 (equals 0x87)
-// Constant-time execution in order to avoid side-channel attacks
-func GfnDouble(input []byte) []byte {
- if len(input) != 16 {
- panic("Doubling in GFn only implemented for n = 128")
- }
- // If the first bit is zero, return 2L = L << 1
- // Else return (L << 1) xor 0^120 10000111
- shifted := ShiftBytesLeft(input)
- shifted[15] ^= ((input[0] >> 7) * 0x87)
- return shifted
-}
-
-// ShiftBytesLeft outputs the byte array corresponding to x << 1 in binary.
-func ShiftBytesLeft(x []byte) []byte {
- l := len(x)
- dst := make([]byte, l)
- for i := 0; i < l-1; i++ {
- dst[i] = (x[i] << 1) | (x[i+1] >> 7)
- }
- dst[l-1] = x[l-1] << 1
- return dst
-}
-
-// ShiftNBytesLeft puts in dst the byte array corresponding to x << n in binary.
-func ShiftNBytesLeft(dst, x []byte, n int) {
- // Erase first n / 8 bytes
- copy(dst, x[n/8:])
-
- // Shift the remaining n % 8 bits
- bits := uint(n % 8)
- l := len(dst)
- for i := 0; i < l-1; i++ {
- dst[i] = (dst[i] << bits) | (dst[i+1] >> uint(8-bits))
- }
- dst[l-1] = dst[l-1] << bits
-
- // Append trailing zeroes
- dst = append(dst, make([]byte, n/8)...)
-}
-
-// XorBytesMut assumes equal input length, replaces X with X XOR Y
-func XorBytesMut(X, Y []byte) {
- for i := 0; i < len(X); i++ {
- X[i] ^= Y[i]
- }
-}
-
-// XorBytes assumes equal input length, puts X XOR Y into Z
-func XorBytes(Z, X, Y []byte) {
- for i := 0; i < len(X); i++ {
- Z[i] = X[i] ^ Y[i]
- }
-}
-
-// RightXor XORs smaller input (assumed Y) at the right of the larger input (assumed X)
-func RightXor(X, Y []byte) []byte {
- offset := len(X) - len(Y)
- xored := make([]byte, len(X))
- copy(xored, X)
- for i := 0; i < len(Y); i++ {
- xored[offset+i] ^= Y[i]
- }
- return xored
-}
-
-// SliceForAppend takes a slice and a requested number of bytes. It returns a
-// slice with the contents of the given slice followed by that many bytes and a
-// second slice that aliases into it and contains only the extra bytes. If the
-// original slice has sufficient capacity then no allocation is performed.
-func SliceForAppend(in []byte, n int) (head, tail []byte) {
- if total := len(in) + n; cap(in) >= total {
- head = in[:total]
- } else {
- head = make([]byte, total)
- copy(head, in)
- }
- tail = head[len(in):]
- return
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/ocb/ocb.go b/vendor/github.com/ProtonMail/go-crypto/ocb/ocb.go
deleted file mode 100644
index 5022285b441..00000000000
--- a/vendor/github.com/ProtonMail/go-crypto/ocb/ocb.go
+++ /dev/null
@@ -1,318 +0,0 @@
-// Copyright (C) 2019 ProtonTech AG
-
-// Package ocb provides an implementation of the OCB (offset codebook) mode of
-// operation, as described in RFC-7253 of the IRTF and in Rogaway, Bellare,
-// Black and Krovetz - OCB: A BLOCK-CIPHER MODE OF OPERATION FOR EFFICIENT
-// AUTHENTICATED ENCRYPTION (2003).
-// Security considerations (from RFC-7253): A private key MUST NOT be used to
-// encrypt more than 2^48 blocks. Tag length should be at least 12 bytes (a
-// brute-force forging adversary succeeds after 2^{tag length} attempts). A
-// single key SHOULD NOT be used to decrypt ciphertext with different tag
-// lengths. Nonces need not be secret, but MUST NOT be reused.
-// This package only supports underlying block ciphers with 128-bit blocks,
-// such as AES-{128, 192, 256}, but may be extended to other sizes.
-package ocb
-
-import (
- "bytes"
- "crypto/cipher"
- "crypto/subtle"
- "errors"
- "math/bits"
-
- "github.com/ProtonMail/go-crypto/internal/byteutil"
-)
-
-type ocb struct {
- block cipher.Block
- tagSize int
- nonceSize int
- mask mask
- // Optimized en/decrypt: For each nonce N used to en/decrypt, the 'Ktop'
- // internal variable can be reused for en/decrypting with nonces sharing
- // all but the last 6 bits with N. The prefix of the first nonce used to
- // compute the new Ktop, and the Ktop value itself, are stored in
- // reusableKtop. If using incremental nonces, this saves one block cipher
- // call every 63 out of 64 OCB encryptions, and stores one nonce and one
- // output of the block cipher in memory only.
- reusableKtop reusableKtop
-}
-
-type mask struct {
- // L_*, L_$, (L_i)_{i ∈ N}
- lAst []byte
- lDol []byte
- L [][]byte
-}
-
-type reusableKtop struct {
- noncePrefix []byte
- Ktop []byte
-}
-
-const (
- defaultTagSize = 16
- defaultNonceSize = 15
-)
-
-const (
- enc = iota
- dec
-)
-
-func (o *ocb) NonceSize() int {
- return o.nonceSize
-}
-
-func (o *ocb) Overhead() int {
- return o.tagSize
-}
-
-// NewOCB returns an OCB instance with the given block cipher and default
-// tag and nonce sizes.
-func NewOCB(block cipher.Block) (cipher.AEAD, error) {
- return NewOCBWithNonceAndTagSize(block, defaultNonceSize, defaultTagSize)
-}
-
-// NewOCBWithNonceAndTagSize returns an OCB instance with the given block
-// cipher, nonce length, and tag length. Panics on zero nonceSize and
-// exceedingly long tag size.
-//
-// It is recommended to use at least 12 bytes as tag length.
-func NewOCBWithNonceAndTagSize(
- block cipher.Block, nonceSize, tagSize int) (cipher.AEAD, error) {
- if block.BlockSize() != 16 {
- return nil, ocbError("Block cipher must have 128-bit blocks")
- }
- if nonceSize < 1 {
- return nil, ocbError("Incorrect nonce length")
- }
- if nonceSize >= block.BlockSize() {
- return nil, ocbError("Nonce length exceeds blocksize - 1")
- }
- if tagSize > block.BlockSize() {
- return nil, ocbError("Custom tag length exceeds blocksize")
- }
- return &ocb{
- block: block,
- tagSize: tagSize,
- nonceSize: nonceSize,
- mask: initializeMaskTable(block),
- reusableKtop: reusableKtop{
- noncePrefix: nil,
- Ktop: nil,
- },
- }, nil
-}
-
-func (o *ocb) Seal(dst, nonce, plaintext, adata []byte) []byte {
- if len(nonce) > o.nonceSize {
- panic("crypto/ocb: Incorrect nonce length given to OCB")
- }
- ret, out := byteutil.SliceForAppend(dst, len(plaintext)+o.tagSize)
- o.crypt(enc, out, nonce, adata, plaintext)
- return ret
-}
-
-func (o *ocb) Open(dst, nonce, ciphertext, adata []byte) ([]byte, error) {
- if len(nonce) > o.nonceSize {
- panic("Nonce too long for this instance")
- }
- if len(ciphertext) < o.tagSize {
- return nil, ocbError("Ciphertext shorter than tag length")
- }
- sep := len(ciphertext) - o.tagSize
- ret, out := byteutil.SliceForAppend(dst, len(ciphertext))
- ciphertextData := ciphertext[:sep]
- tag := ciphertext[sep:]
- o.crypt(dec, out, nonce, adata, ciphertextData)
- if subtle.ConstantTimeCompare(ret[sep:], tag) == 1 {
- ret = ret[:sep]
- return ret, nil
- }
- for i := range out {
- out[i] = 0
- }
- return nil, ocbError("Tag authentication failed")
-}
-
-// On instruction enc (resp. dec), crypt is the encrypt (resp. decrypt)
-// function. It returns the resulting plain/ciphertext with the tag appended.
-func (o *ocb) crypt(instruction int, Y, nonce, adata, X []byte) []byte {
- //
- // Consider X as a sequence of 128-bit blocks
- //
- // Note: For encryption (resp. decryption), X is the plaintext (resp., the
- // ciphertext without the tag).
- blockSize := o.block.BlockSize()
-
- //
- // Nonce-dependent and per-encryption variables
- //
- // Zero out the last 6 bits of the nonce into truncatedNonce to see if Ktop
- // is already computed.
- truncatedNonce := make([]byte, len(nonce))
- copy(truncatedNonce, nonce)
- truncatedNonce[len(truncatedNonce)-1] &= 192
- var Ktop []byte
- if bytes.Equal(truncatedNonce, o.reusableKtop.noncePrefix) {
- Ktop = o.reusableKtop.Ktop
- } else {
- // Nonce = num2str(TAGLEN mod 128, 7) || zeros(120 - bitlen(N)) || 1 || N
- paddedNonce := append(make([]byte, blockSize-1-len(nonce)), 1)
- paddedNonce = append(paddedNonce, truncatedNonce...)
- paddedNonce[0] |= byte(((8 * o.tagSize) % (8 * blockSize)) << 1)
- // Last 6 bits of paddedNonce are already zero. Encrypt into Ktop
- paddedNonce[blockSize-1] &= 192
- Ktop = paddedNonce
- o.block.Encrypt(Ktop, Ktop)
- o.reusableKtop.noncePrefix = truncatedNonce
- o.reusableKtop.Ktop = Ktop
- }
-
- // Stretch = Ktop || ((lower half of Ktop) XOR (lower half of Ktop << 8))
- xorHalves := make([]byte, blockSize/2)
- byteutil.XorBytes(xorHalves, Ktop[:blockSize/2], Ktop[1:1+blockSize/2])
- stretch := append(Ktop, xorHalves...)
- bottom := int(nonce[len(nonce)-1] & 63)
- offset := make([]byte, len(stretch))
- byteutil.ShiftNBytesLeft(offset, stretch, bottom)
- offset = offset[:blockSize]
-
- //
- // Process any whole blocks
- //
- // Note: For encryption Y is ciphertext || tag, for decryption Y is
- // plaintext || tag.
- checksum := make([]byte, blockSize)
- m := len(X) / blockSize
- for i := 0; i < m; i++ {
- index := bits.TrailingZeros(uint(i + 1))
- if len(o.mask.L)-1 < index {
- o.mask.extendTable(index)
- }
- byteutil.XorBytesMut(offset, o.mask.L[bits.TrailingZeros(uint(i+1))])
- blockX := X[i*blockSize : (i+1)*blockSize]
- blockY := Y[i*blockSize : (i+1)*blockSize]
- byteutil.XorBytes(blockY, blockX, offset)
- switch instruction {
- case enc:
- o.block.Encrypt(blockY, blockY)
- byteutil.XorBytesMut(blockY, offset)
- byteutil.XorBytesMut(checksum, blockX)
- case dec:
- o.block.Decrypt(blockY, blockY)
- byteutil.XorBytesMut(blockY, offset)
- byteutil.XorBytesMut(checksum, blockY)
- }
- }
- //
- // Process any final partial block and compute raw tag
- //
- tag := make([]byte, blockSize)
- if len(X)%blockSize != 0 {
- byteutil.XorBytesMut(offset, o.mask.lAst)
- pad := make([]byte, blockSize)
- o.block.Encrypt(pad, offset)
- chunkX := X[blockSize*m:]
- chunkY := Y[blockSize*m : len(X)]
- byteutil.XorBytes(chunkY, chunkX, pad[:len(chunkX)])
- // P_* || bit(1) || zeroes(127) - len(P_*)
- switch instruction {
- case enc:
- paddedY := append(chunkX, byte(128))
- paddedY = append(paddedY, make([]byte, blockSize-len(chunkX)-1)...)
- byteutil.XorBytesMut(checksum, paddedY)
- case dec:
- paddedX := append(chunkY, byte(128))
- paddedX = append(paddedX, make([]byte, blockSize-len(chunkY)-1)...)
- byteutil.XorBytesMut(checksum, paddedX)
- }
- byteutil.XorBytes(tag, checksum, offset)
- byteutil.XorBytesMut(tag, o.mask.lDol)
- o.block.Encrypt(tag, tag)
- byteutil.XorBytesMut(tag, o.hash(adata))
- copy(Y[blockSize*m+len(chunkY):], tag[:o.tagSize])
- } else {
- byteutil.XorBytes(tag, checksum, offset)
- byteutil.XorBytesMut(tag, o.mask.lDol)
- o.block.Encrypt(tag, tag)
- byteutil.XorBytesMut(tag, o.hash(adata))
- copy(Y[blockSize*m:], tag[:o.tagSize])
- }
- return Y
-}
-
-// This hash function is used to compute the tag. Per design, on empty input it
-// returns a slice of zeros, of the same length as the underlying block cipher
-// block size.
-func (o *ocb) hash(adata []byte) []byte {
- //
- // Consider A as a sequence of 128-bit blocks
- //
- A := make([]byte, len(adata))
- copy(A, adata)
- blockSize := o.block.BlockSize()
-
- //
- // Process any whole blocks
- //
- sum := make([]byte, blockSize)
- offset := make([]byte, blockSize)
- m := len(A) / blockSize
- for i := 0; i < m; i++ {
- chunk := A[blockSize*i : blockSize*(i+1)]
- index := bits.TrailingZeros(uint(i + 1))
- // If the mask table is too short
- if len(o.mask.L)-1 < index {
- o.mask.extendTable(index)
- }
- byteutil.XorBytesMut(offset, o.mask.L[index])
- byteutil.XorBytesMut(chunk, offset)
- o.block.Encrypt(chunk, chunk)
- byteutil.XorBytesMut(sum, chunk)
- }
-
- //
- // Process any final partial block; compute final hash value
- //
- if len(A)%blockSize != 0 {
- byteutil.XorBytesMut(offset, o.mask.lAst)
- // Pad block with 1 || 0 ^ 127 - bitlength(a)
- ending := make([]byte, blockSize-len(A)%blockSize)
- ending[0] = 0x80
- encrypted := append(A[blockSize*m:], ending...)
- byteutil.XorBytesMut(encrypted, offset)
- o.block.Encrypt(encrypted, encrypted)
- byteutil.XorBytesMut(sum, encrypted)
- }
- return sum
-}
-
-func initializeMaskTable(block cipher.Block) mask {
- //
- // Key-dependent variables
- //
- lAst := make([]byte, block.BlockSize())
- block.Encrypt(lAst, lAst)
- lDol := byteutil.GfnDouble(lAst)
- L := make([][]byte, 1)
- L[0] = byteutil.GfnDouble(lDol)
-
- return mask{
- lAst: lAst,
- lDol: lDol,
- L: L,
- }
-}
-
-// Extends the L array of mask m up to L[limit], with L[i] = GfnDouble(L[i-1])
-func (m *mask) extendTable(limit int) {
- for i := len(m.L); i <= limit; i++ {
- m.L = append(m.L, byteutil.GfnDouble(m.L[i-1]))
- }
-}
-
-func ocbError(err string) error {
- return errors.New("crypto/ocb: " + err)
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/ocb/random_vectors.go b/vendor/github.com/ProtonMail/go-crypto/ocb/random_vectors.go
deleted file mode 100644
index 0efaf344fd5..00000000000
--- a/vendor/github.com/ProtonMail/go-crypto/ocb/random_vectors.go
+++ /dev/null
@@ -1,136 +0,0 @@
-// In the test vectors provided by RFC 7253, the "bottom"
-// internal variable, which defines "offset" for the first time, does not
-// exceed 15. However, it can attain values up to 63.
-
-// These vectors include key length in {128, 192, 256}, tag size 128, and
-// random nonce, header, and plaintext lengths.
-
-// This file was automatically generated.
-
-package ocb
-
-var randomVectors = []struct {
- key, nonce, header, plaintext, ciphertext string
-}{
-
- {"9438C5D599308EAF13F800D2D31EA7F0",
- "C38EE4801BEBFFA1CD8635BE",
- "0E507B7DADD8A98CDFE272D3CB6B3E8332B56AE583FB049C0874D4200BED16BD1A044182434E9DA0E841F182DFD5B3016B34641CED0784F1745F63AB3D0DA22D3351C9EF9A658B8081E24498EBF61FCE40DA6D8E184536",
- "962D227786FB8913A8BAD5DC3250",
- "EEDEF5FFA5986D1E3BF86DDD33EF9ADC79DCA06E215FA772CCBA814F63AD"},
- {"BA7DE631C7D6712167C6724F5B9A2B1D",
- "35263EBDA05765DC0E71F1F5",
- "0103257B4224507C0242FEFE821EA7FA42E0A82863E5F8B68F7D881B4B44FA428A2B6B21D2F591260802D8AB6D83",
- "9D6D1FC93AE8A64E7889B7B2E3521EFA9B920A8DDB692E6F833DDC4A38AFA535E5E2A3ED82CB7E26404AB86C54D01C4668F28398C2DF33D5D561CBA1C8DCFA7A912F5048E545B59483C0E3221F54B14DAA2E4EB657B3BEF9554F34CAD69B2724AE962D3D8A",
- "E93852D1985C5E775655E937FA79CE5BF28A585F2AF53A5018853B9634BE3C84499AC0081918FDCE0624494D60E25F76ACD6853AC7576E3C350F332249BFCABD4E73CEABC36BE4EDDA40914E598AE74174A0D7442149B26990899491BDDFE8FC54D6C18E83AE9E9A6FFBF5D376565633862EEAD88D"},
- {"2E74B25289F6FD3E578C24866E9C72A5",
- "FD912F15025AF8414642BA1D1D",
- "FB5FB8C26F365EEDAB5FE260C6E3CCD27806729C8335F146063A7F9EA93290E56CF84576EB446350D22AD730547C267B1F0BBB97EB34E1E2C41A",
- "6C092EBF78F76EE8C1C6E592277D9545BA16EDB67BC7D8480B9827702DC2F8A129E2B08A2CE710CA7E1DA45CE162BB6CD4B512E632116E2211D3C90871EFB06B8D4B902681C7FB",
- "6AC0A77F26531BF4F354A1737F99E49BE32ECD909A7A71AD69352906F54B08A9CE9B8CA5D724CBFFC5673437F23F630697F3B84117A1431D6FA8CC13A974FB4AD360300522E09511B99E71065D5AC4BBCB1D791E864EF4"},
- {"E7EC507C802528F790AFF5303A017B17",
- "4B97A7A568940A9E3CE7A99E93031E",
- "28349BDC5A09390C480F9B8AA3EDEA3DDB8B9D64BCA322C570B8225DF0E31190DAB25A4014BA39519E02ABFB12B89AA28BBFD29E486E7FB28734258C817B63CED9912DBAFEBB93E2798AB2890DE3B0ACFCFF906AB15563EF7823CE83D27CDB251195E22BD1337BCBDE65E7C2C427321C463C2777BFE5AEAA",
- "9455B3EA706B74",
- "7F33BA3EA848D48A96B9530E26888F43EBD4463C9399B6"},
- {"6C928AA3224736F28EE7378DE0090191",
- "8936138E2E4C6A13280017A1622D",
- "6202717F2631565BDCDC57C6584543E72A7C8BD444D0D108ED35069819633C",
- "DA0691439E5F035F3E455269D14FE5C201C8C9B0A3FE2D3F86BCC59387C868FE65733D388360B31E3CE28B4BF6A8BE636706B536D5720DB66B47CF1C7A5AFD6F61E0EF90F1726D6B0E169F9A768B2B7AE4EE00A17F630AC905FCAAA1B707FFF25B3A1AAE83B504837C64A5639B2A34002B300EC035C9B43654DA55",
- "B8804D182AB0F0EEB464FA7BD1329AD6154F982013F3765FEDFE09E26DAC078C9C1439BFC1159D6C02A25E3FF83EF852570117B315852AD5EE20E0FA3AA0A626B0E43BC0CEA38B44579DD36803455FB46989B90E6D229F513FD727AF8372517E9488384C515D6067704119C931299A0982EDDFB9C2E86A90C450C077EB222511EC9CCABC9FCFDB19F70088"},
- {"ECEA315CA4B3F425B0C9957A17805EA4",
- "664CDAE18403F4F9BA13015A44FC",
- "642AFB090D6C6DB46783F08B01A3EF2A8FEB5736B531EAC226E7888FCC8505F396818F83105065FACB3267485B9E5E4A0261F621041C08FCCB2A809A49AB5252A91D0971BCC620B9D614BD77E57A0EED2FA5",
- "6852C31F8083E20E364CEA21BB7854D67CEE812FE1C9ED2425C0932A90D3780728D1BB",
- "2ECEF962A9695A463ADABB275BDA9FF8B2BA57AEC2F52EFFB700CD9271A74D2A011C24AEA946051BD6291776429B7E681BA33E"},
- {"4EE616C4A58AAA380878F71A373461F6",
- "91B8C9C176D9C385E9C47E52",
- "CDA440B7F9762C572A718AC754EDEECC119E5EE0CCB9FEA4FFB22EEE75087C032EBF3DA9CDD8A28CC010B99ED45143B41A4BA50EA2A005473F89639237838867A57F23B0F0ED3BF22490E4501DAC9C658A9B9F",
- "D6E645FA9AE410D15B8123FD757FA356A8DBE9258DDB5BE88832E615910993F497EC",
- "B70ED7BF959FB2AAED4F36174A2A99BFB16992C8CDF369C782C4DB9C73DE78C5DB8E0615F647243B97ACDB24503BC9CADC48"},
- {"DCD475773136C830D5E3D0C5FE05B7FF",
- "BB8E1FBB483BE7616A922C4A",
- "36FEF2E1CB29E76A6EA663FC3AF66ECD7404F466382F7B040AABED62293302B56E8783EF7EBC21B4A16C3E78A7483A0A403F253A2CDC5BBF79DC3DAE6C73F39A961D8FBBE8D41B",
- "441E886EA38322B2437ECA7DEB5282518865A66780A454E510878E61BFEC3106A3CD93D2A02052E6F9E1832F9791053E3B76BF4C07EFDD6D4106E3027FABB752E60C1AA425416A87D53938163817A1051EBA1D1DEEB4B9B25C7E97368B52E5911A31810B0EC5AF547559B6142D9F4C4A6EF24A4CF75271BF9D48F62B",
- "1BE4DD2F4E25A6512C2CC71D24BBB07368589A94C2714962CD0ACE5605688F06342587521E75F0ACAFFD86212FB5C34327D238DB36CF2B787794B9A4412E7CD1410EA5DDD2450C265F29CF96013CD213FD2880657694D718558964BC189B4A84AFCF47EB012935483052399DBA5B088B0A0477F20DFE0E85DCB735E21F22A439FB837DD365A93116D063E607"},
- {"3FBA2B3D30177FFE15C1C59ED2148BB2C091F5615FBA7C07",
- "FACF804A4BEBF998505FF9DE",
- "8213B9263B2971A5BDA18DBD02208EE1",
- "15B323926993B326EA19F892D704439FC478828322AF72118748284A1FD8A6D814E641F70512FD706980337379F31DC63355974738D7FEA87AD2858C0C2EBBFBE74371C21450072373C7B651B334D7C4D43260B9D7CCD3AF9EDB",
- "6D35DC1469B26E6AAB26272A41B46916397C24C485B61162E640A062D9275BC33DDCFD3D9E1A53B6C8F51AC89B66A41D59B3574197A40D9B6DCF8A4E2A001409C8112F16B9C389E0096179DB914E05D6D11ED0005AD17E1CE105A2F0BAB8F6B1540DEB968B7A5428FF44"},
- {"53B52B8D4D748BCDF1DDE68857832FA46227FA6E2F32EFA1",
- "0B0EF53D4606B28D1398355F",
- "F23882436349094AF98BCACA8218E81581A043B19009E28EFBF2DE37883E04864148CC01D240552CA8844EC1456F42034653067DA67E80F87105FD06E14FF771246C9612867BE4D215F6D761",
- "F15030679BD4088D42CAC9BF2E9606EAD4798782FA3ED8C57EBE7F84A53236F51B25967C6489D0CD20C9EEA752F9BC",
- "67B96E2D67C3729C96DAEAEDF821D61C17E648643A2134C5621FEC621186915AD80864BFD1EB5B238BF526A679385E012A457F583AFA78134242E9D9C1B4E4"},
- {"0272DD80F23399F49BFC320381A5CD8225867245A49A7D41",
- "5C83F4896D0738E1366B1836",
- "69B0337289B19F73A12BAEEA857CCAF396C11113715D9500CCCF48BA08CFF12BC8B4BADB3084E63B85719DB5058FA7C2C11DEB096D7943CFA7CAF5",
- "C01AD10FC8B562CD17C7BC2FAB3E26CBDFF8D7F4DEA816794BBCC12336991712972F52816AABAB244EB43B0137E2BAC1DD413CE79531E78BEF782E6B439612BB3AEF154DE3502784F287958EBC159419F9EBA27916A28D6307324129F506B1DE80C1755A929F87",
- "FEFE52DD7159C8DD6E8EC2D3D3C0F37AB6CB471A75A071D17EC4ACDD8F3AA4D7D4F7BB559F3C09099E3D9003E5E8AA1F556B79CECDE66F85B08FA5955E6976BF2695EA076388A62D2AD5BAB7CBF1A7F3F4C8D5CDF37CDE99BD3E30B685D9E5EEE48C7C89118EF4878EB89747F28271FA2CC45F8E9E7601"},
- {"3EEAED04A455D6E5E5AB53CFD5AFD2F2BC625C7BF4BE49A5",
- "36B88F63ADBB5668588181D774",
- "D367E3CB3703E762D23C6533188EF7028EFF9D935A3977150361997EC9DEAF1E4794BDE26AA8B53C124980B1362EC86FCDDFC7A90073171C1BAEE351A53234B86C66E8AB92FAE99EC6967A6D3428892D80",
- "573454C719A9A55E04437BF7CBAAF27563CCCD92ADD5E515CD63305DFF0687E5EEF790C5DCA5C0033E9AB129505E2775438D92B38F08F3B0356BA142C6F694",
- "E9F79A5B432D9E682C9AAA5661CFC2E49A0FCB81A431E54B42EB73DD3BED3F377FEC556ABA81624BA64A5D739AD41467460088F8D4F442180A9382CA635745473794C382FCDDC49BA4EB6D8A44AE3C"},
- {"B695C691538F8CBD60F039D0E28894E3693CC7C36D92D79D",
- "BC099AEB637361BAC536B57618",
- "BFFF1A65AE38D1DC142C71637319F5F6508E2CB33C9DCB94202B359ED5A5ED8042E7F4F09231D32A7242976677E6F4C549BF65FADC99E5AF43F7A46FD95E16C2",
- "081DF3FD85B415D803F0BE5AC58CFF0023FDDED99788296C3731D8",
- "E50C64E3614D94FE69C47092E46ACC9957C6FEA2CCBF96BC62FBABE7424753C75F9C147C42AE26FE171531"},
- {"C9ACBD2718F0689A1BE9802A551B6B8D9CF5614DAF5E65ED",
- "B1B0AAF373B8B026EB80422051D8",
- "6648C0E61AC733C76119D23FB24548D637751387AA2EAE9D80E912B7BD486CAAD9EAF4D7A5FE2B54AAD481E8EC94BB4D558000896E2010462B70C9FED1E7273080D1",
- "189F591F6CB6D59AFEDD14C341741A8F1037DC0DF00FC57CE65C30F49E860255CEA5DC6019380CC0FE8880BC1A9E685F41C239C38F36E3F2A1388865C5C311059C0A",
- "922A5E949B61D03BE34AB5F4E58607D4504EA14017BB363DAE3C873059EA7A1C77A746FB78981671D26C2CF6D9F24952D510044CE02A10177E9DB42D0145211DFE6E84369C5E3BC2669EAB4147B2822895F9"},
- {"7A832BD2CF5BF4919F353CE2A8C86A5E406DA2D52BE16A72",
- "2F2F17CECF7E5A756D10785A3CB9DB",
- "61DA05E3788CC2D8405DBA70C7A28E5AF699863C9F72E6C6770126929F5D6FA267F005EBCF49495CB46400958A3AE80D1289D1C671",
- "44E91121195A41AF14E8CFDBD39A4B517BE0DF1A72977ED8A3EEF8EEDA1166B2EB6DB2C4AE2E74FA0F0C74537F659BFBD141E5DDEC67E64EDA85AABD3F52C85A785B9FB3CECD70E7DF",
- "BEDF596EA21288D2B84901E188F6EE1468B14D5161D3802DBFE00D60203A24E2AB62714BF272A45551489838C3A7FEAADC177B591836E73684867CCF4E12901DCF2064058726BBA554E84ADC5136F507E961188D4AF06943D3"},
- {"1508E8AE9079AA15F1CEC4F776B4D11BCCB061B58AA56C18",
- "BCA625674F41D1E3AB47672DC0C3",
- "8B12CF84F16360F0EAD2A41BC021530FFCEC7F3579CAE658E10E2D3D81870F65AFCED0C77C6C4C6E6BA424FF23088C796BA6195ABA35094BF1829E089662E7A95FC90750AE16D0C8AFA55DAC789D7735B970B58D4BE7CEC7341DA82A0179A01929C27A59C5063215B859EA43",
- "E525422519ECE070E82C",
- "B47BC07C3ED1C0A43BA52C43CBACBCDBB29CAF1001E09FDF7107"},
- {"7550C2761644E911FE9ADD119BAC07376BEA442845FEAD876D7E7AC1B713E464",
- "36D2EC25ADD33CDEDF495205BBC923",
- "7FCFE81A3790DE97FFC3DE160C470847EA7E841177C2F759571CBD837EA004A6CA8C6F4AEBFF2E9FD552D73EB8A30705D58D70C0B67AEEA280CBBF0A477358ACEF1E7508F2735CD9A0E4F9AC92B8C008F575D3B6278F1C18BD01227E3502E5255F3AB1893632AD00C717C588EF652A51A43209E7EE90",
- "2B1A62F8FDFAA3C16470A21AD307C9A7D03ADE8EF72C69B06F8D738CDE578D7AEFD0D40BD9C022FB9F580DF5394C998ACCCEFC5471A3996FB8F1045A81FDC6F32D13502EA65A211390C8D882B8E0BEFD8DD8CBEF51D1597B124E9F7F",
- "C873E02A22DB89EB0787DB6A60B99F7E4A0A085D5C4232A81ADCE2D60AA36F92DDC33F93DD8640AC0E08416B187FB382B3EC3EE85A64B0E6EE41C1366A5AD2A282F66605E87031CCBA2FA7B2DA201D975994AADE3DD1EE122AE09604AD489B84BF0C1AB7129EE16C6934850E"},
- {"A51300285E554FDBDE7F771A9A9A80955639DD87129FAEF74987C91FB9687C71",
- "81691D5D20EC818FCFF24B33DECC",
- "C948093218AA9EB2A8E44A87EEA73FC8B6B75A196819A14BD83709EA323E8DF8B491045220E1D88729A38DBCFFB60D3056DAD4564498FD6574F74512945DEB34B69329ACED9FFC05D5D59DFCD5B973E2ACAFE6AD1EF8BBBC49351A2DD12508ED89ED",
- "EB861165DAF7625F827C6B574ED703F03215",
- "C6CD1CE76D2B3679C1B5AA1CFD67CCB55444B6BFD3E22C81CBC9BB738796B83E54E3"},
- {"8CE0156D26FAEB7E0B9B800BBB2E9D4075B5EAC5C62358B0E7F6FCE610223282",
- "D2A7B94DD12CDACA909D3AD7",
- "E021A78F374FC271389AB9A3E97077D755",
- "7C26000B58929F5095E1CEE154F76C2A299248E299F9B5ADE6C403AA1FD4A67FD4E0232F214CE7B919EE7A1027D2B76C57475715CD078461",
- "C556FB38DF069B56F337B5FF5775CE6EAA16824DFA754F20B78819028EA635C3BB7AA731DE8776B2DCB67DCA2D33EEDF3C7E52EA450013722A41755A0752433ED17BDD5991AAE77A"},
- {"1E8000A2CE00A561C9920A30BF0D7B983FEF8A1014C8F04C35CA6970E6BA02BD",
- "65ED3D63F79F90BBFD19775E",
- "336A8C0B7243582A46B221AA677647FCAE91",
- "134A8B34824A290E7B",
- "914FBEF80D0E6E17F8BDBB6097EBF5FBB0554952DC2B9E5151"},
- {"53D5607BBE690B6E8D8F6D97F3DF2BA853B682597A214B8AA0EA6E598650AF15",
- "C391A856B9FE234E14BA1AC7BB40FF",
- "479682BC21349C4BE1641D5E78FE2C79EC1B9CF5470936DCAD9967A4DCD7C4EFADA593BC9EDE71E6A08829B8580901B61E274227E9D918502DE3",
- "EAD154DC09C5E26C5D26FF33ED148B27120C7F2C23225CC0D0631B03E1F6C6D96FEB88C1A4052ACB4CE746B884B6502931F407021126C6AAB8C514C077A5A38438AE88EE",
- "938821286EBB671D999B87C032E1D6055392EB564E57970D55E545FC5E8BAB90E6E3E3C0913F6320995FC636D72CD9919657CC38BD51552F4A502D8D1FE56DB33EBAC5092630E69EBB986F0E15CEE9FC8C052501"},
- {"294362FCC984F440CEA3E9F7D2C06AF20C53AAC1B3738CA2186C914A6E193ABB",
- "B15B61C8BB39261A8F55AB178EC3",
- "D0729B6B75BB",
- "2BD089ADCE9F334BAE3B065996C7D616DD0C27DF4218DCEEA0FBCA0F968837CE26B0876083327E25681FDDD620A32EC0DA12F73FAE826CC94BFF2B90A54D2651",
- "AC94B25E4E21DE2437B806966CCD5D9385EF0CD4A51AB9FA6DE675C7B8952D67802E9FEC1FDE9F5D1EAB06057498BC0EEA454804FC9D2068982A3E24182D9AC2E7AB9994DDC899A604264583F63D066B"},
- {"959DBFEB039B1A5B8CE6A44649B602AAA5F98A906DB96143D202CD2024F749D9",
- "01D7BDB1133E9C347486C1EFA6",
- "F3843955BD741F379DD750585EDC55E2CDA05CCBA8C1F4622AC2FE35214BC3A019B8BD12C4CC42D9213D1E1556941E8D8450830287FFB3B763A13722DD4140ED9846FB5FFF745D7B0B967D810A068222E10B259AF1D392035B0D83DC1498A6830B11B2418A840212599171E0258A1C203B05362978",
- "A21811232C950FA8B12237C2EBD6A7CD2C3A155905E9E0C7C120",
- "63C1CE397B22F1A03F1FA549B43178BC405B152D3C95E977426D519B3DFCA28498823240592B6EEE7A14"},
- {"096AE499F5294173F34FF2B375F0E5D5AB79D0D03B33B1A74D7D576826345DF4",
- "0C52B3D11D636E5910A4DD76D32C",
- "229E9ECA3053789E937447BC719467075B6138A142DA528DA8F0CF8DDF022FD9AF8E74779BA3AC306609",
- "8B7A00038783E8BAF6EDEAE0C4EAB48FC8FD501A588C7E4A4DB71E3604F2155A97687D3D2FFF8569261375A513CF4398CE0F87CA1658A1050F6EF6C4EA3E25",
- "C20B6CF8D3C8241825FD90B2EDAC7593600646E579A8D8DAAE9E2E40C3835FE801B2BE4379131452BC5182C90307B176DFBE2049544222FE7783147B690774F6D9D7CEF52A91E61E298E9AA15464AC"},
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/ocb/rfc7253_test_vectors_suite_a.go b/vendor/github.com/ProtonMail/go-crypto/ocb/rfc7253_test_vectors_suite_a.go
deleted file mode 100644
index 330309ff5f8..00000000000
--- a/vendor/github.com/ProtonMail/go-crypto/ocb/rfc7253_test_vectors_suite_a.go
+++ /dev/null
@@ -1,78 +0,0 @@
-package ocb
-
-import (
- "encoding/hex"
-)
-
-// Test vectors from https://tools.ietf.org/html/rfc7253. Note that key is
-// shared across tests.
-var testKey, _ = hex.DecodeString("000102030405060708090A0B0C0D0E0F")
-
-var rfc7253testVectors = []struct {
- nonce, header, plaintext, ciphertext string
-}{
- {"BBAA99887766554433221100",
- "",
- "",
- "785407BFFFC8AD9EDCC5520AC9111EE6"},
- {"BBAA99887766554433221101",
- "0001020304050607",
- "0001020304050607",
- "6820B3657B6F615A5725BDA0D3B4EB3A257C9AF1F8F03009"},
- {"BBAA99887766554433221102",
- "0001020304050607",
- "",
- "81017F8203F081277152FADE694A0A00"},
- {"BBAA99887766554433221103",
- "",
- "0001020304050607",
- "45DD69F8F5AAE72414054CD1F35D82760B2CD00D2F99BFA9"},
- {"BBAA99887766554433221104",
- "000102030405060708090A0B0C0D0E0F",
- "000102030405060708090A0B0C0D0E0F",
- "571D535B60B277188BE5147170A9A22C3AD7A4FF3835B8C5701C1CCEC8FC3358"},
- {"BBAA99887766554433221105",
- "000102030405060708090A0B0C0D0E0F",
- "",
- "8CF761B6902EF764462AD86498CA6B97"},
- {"BBAA99887766554433221106",
- "",
- "000102030405060708090A0B0C0D0E0F",
- "5CE88EC2E0692706A915C00AEB8B2396F40E1C743F52436BDF06D8FA1ECA343D"},
- {"BBAA99887766554433221107",
- "000102030405060708090A0B0C0D0E0F1011121314151617",
- "000102030405060708090A0B0C0D0E0F1011121314151617",
- "1CA2207308C87C010756104D8840CE1952F09673A448A122C92C62241051F57356D7F3C90BB0E07F"},
- {"BBAA99887766554433221108",
- "000102030405060708090A0B0C0D0E0F1011121314151617",
- "",
- "6DC225A071FC1B9F7C69F93B0F1E10DE"},
- {"BBAA99887766554433221109",
- "",
- "000102030405060708090A0B0C0D0E0F1011121314151617",
- "221BD0DE7FA6FE993ECCD769460A0AF2D6CDED0C395B1C3CE725F32494B9F914D85C0B1EB38357FF"},
- {"BBAA9988776655443322110A",
- "000102030405060708090A0B0C0D0E0F101112131415161718191A1B1C1D1E1F",
- "000102030405060708090A0B0C0D0E0F101112131415161718191A1B1C1D1E1F",
- "BD6F6C496201C69296C11EFD138A467ABD3C707924B964DEAFFC40319AF5A48540FBBA186C5553C68AD9F592A79A4240"},
- {"BBAA9988776655443322110B",
- "000102030405060708090A0B0C0D0E0F101112131415161718191A1B1C1D1E1F",
- "",
- "FE80690BEE8A485D11F32965BC9D2A32"},
- {"BBAA9988776655443322110C",
- "",
- "000102030405060708090A0B0C0D0E0F101112131415161718191A1B1C1D1E1F",
- "2942BFC773BDA23CABC6ACFD9BFD5835BD300F0973792EF46040C53F1432BCDFB5E1DDE3BC18A5F840B52E653444D5DF"},
- {"BBAA9988776655443322110D",
- "000102030405060708090A0B0C0D0E0F101112131415161718191A1B1C1D1E1F2021222324252627",
- "000102030405060708090A0B0C0D0E0F101112131415161718191A1B1C1D1E1F2021222324252627",
- "D5CA91748410C1751FF8A2F618255B68A0A12E093FF454606E59F9C1D0DDC54B65E8628E568BAD7AED07BA06A4A69483A7035490C5769E60"},
- {"BBAA9988776655443322110E",
- "000102030405060708090A0B0C0D0E0F101112131415161718191A1B1C1D1E1F2021222324252627",
- "",
- "C5CD9D1850C141E358649994EE701B68"},
- {"BBAA9988776655443322110F",
- "",
- "000102030405060708090A0B0C0D0E0F101112131415161718191A1B1C1D1E1F2021222324252627",
- "4412923493C57D5DE0D700F753CCE0D1D2D95060122E9F15A5DDBFC5787E50B5CC55EE507BCB084E479AD363AC366B95A98CA5F3000B1479"},
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/ocb/rfc7253_test_vectors_suite_b.go b/vendor/github.com/ProtonMail/go-crypto/ocb/rfc7253_test_vectors_suite_b.go
deleted file mode 100644
index 14a3c336fbc..00000000000
--- a/vendor/github.com/ProtonMail/go-crypto/ocb/rfc7253_test_vectors_suite_b.go
+++ /dev/null
@@ -1,25 +0,0 @@
-package ocb
-
-// Second set of test vectors from https://tools.ietf.org/html/rfc7253
-var rfc7253TestVectorTaglen96 = struct {
- key, nonce, header, plaintext, ciphertext string
-}{"0F0E0D0C0B0A09080706050403020100",
- "BBAA9988776655443322110D",
- "000102030405060708090A0B0C0D0E0F101112131415161718191A1B1C1D1E1F2021222324252627",
- "000102030405060708090A0B0C0D0E0F101112131415161718191A1B1C1D1E1F2021222324252627",
- "1792A4E31E0755FB03E31B22116E6C2DDF9EFD6E33D536F1A0124B0A55BAE884ED93481529C76B6AD0C515F4D1CDD4FDAC4F02AA"}
-
-var rfc7253AlgorithmTest = []struct {
- KEYLEN, TAGLEN int
- OUTPUT string
-}{
- {128, 128, "67E944D23256C5E0B6C61FA22FDF1EA2"},
- {192, 128, "F673F2C3E7174AAE7BAE986CA9F29E17"},
- {256, 128, "D90EB8E9C977C88B79DD793D7FFA161C"},
- {128, 96, "77A3D8E73589158D25D01209"},
- {192, 96, "05D56EAD2752C86BE6932C5E"},
- {256, 96, "5458359AC23B0CBA9E6330DD"},
- {128, 64, "192C9B7BD90BA06A"},
- {192, 64, "0066BC6E0EF34E24"},
- {256, 64, "7D4EA5D445501CBE"},
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/aes/keywrap/keywrap.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/aes/keywrap/keywrap.go
deleted file mode 100644
index 3c6251d1ce6..00000000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/aes/keywrap/keywrap.go
+++ /dev/null
@@ -1,153 +0,0 @@
-// Copyright 2014 Matthew Endsley
-// All rights reserved
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted providing that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-// ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
-// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
-// OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
-// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
-// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
-// IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-// POSSIBILITY OF SUCH DAMAGE.
-
-// Package keywrap is an implementation of the RFC 3394 AES key wrapping
-// algorithm. This is used in OpenPGP with elliptic curve keys.
-package keywrap
-
-import (
- "crypto/aes"
- "encoding/binary"
- "errors"
-)
-
-var (
- // ErrWrapPlaintext is returned if the plaintext is not a multiple
- // of 64 bits.
- ErrWrapPlaintext = errors.New("keywrap: plainText must be a multiple of 64 bits")
-
- // ErrUnwrapCiphertext is returned if the ciphertext is not a
- // multiple of 64 bits.
- ErrUnwrapCiphertext = errors.New("keywrap: cipherText must by a multiple of 64 bits")
-
- // ErrUnwrapFailed is returned if unwrapping a key fails.
- ErrUnwrapFailed = errors.New("keywrap: failed to unwrap key")
-
- // NB: the AES NewCipher call only fails if the key is an invalid length.
-
- // ErrInvalidKey is returned when the AES key is invalid.
- ErrInvalidKey = errors.New("keywrap: invalid AES key")
-)
-
-// Wrap a key using the RFC 3394 AES Key Wrap Algorithm.
-func Wrap(key, plainText []byte) ([]byte, error) {
- if len(plainText)%8 != 0 {
- return nil, ErrWrapPlaintext
- }
-
- c, err := aes.NewCipher(key)
- if err != nil {
- return nil, ErrInvalidKey
- }
-
- nblocks := len(plainText) / 8
-
- // 1) Initialize variables.
- var block [aes.BlockSize]byte
- // - Set A = IV, an initial value (see 2.2.3)
- for ii := 0; ii < 8; ii++ {
- block[ii] = 0xA6
- }
-
- // - For i = 1 to n
- // - Set R[i] = P[i]
- intermediate := make([]byte, len(plainText))
- copy(intermediate, plainText)
-
- // 2) Calculate intermediate values.
- for ii := 0; ii < 6; ii++ {
- for jj := 0; jj < nblocks; jj++ {
- // - B = AES(K, A | R[i])
- copy(block[8:], intermediate[jj*8:jj*8+8])
- c.Encrypt(block[:], block[:])
-
- // - A = MSB(64, B) ^ t where t = (n*j)+1
- t := uint64(ii*nblocks + jj + 1)
- val := binary.BigEndian.Uint64(block[:8]) ^ t
- binary.BigEndian.PutUint64(block[:8], val)
-
- // - R[i] = LSB(64, B)
- copy(intermediate[jj*8:jj*8+8], block[8:])
- }
- }
-
- // 3) Output results.
- // - Set C[0] = A
- // - For i = 1 to n
- // - C[i] = R[i]
- return append(block[:8], intermediate...), nil
-}
-
-// Unwrap a key using the RFC 3394 AES Key Wrap Algorithm.
-func Unwrap(key, cipherText []byte) ([]byte, error) {
- if len(cipherText)%8 != 0 {
- return nil, ErrUnwrapCiphertext
- }
-
- c, err := aes.NewCipher(key)
- if err != nil {
- return nil, ErrInvalidKey
- }
-
- nblocks := len(cipherText)/8 - 1
-
- // 1) Initialize variables.
- var block [aes.BlockSize]byte
- // - Set A = C[0]
- copy(block[:8], cipherText[:8])
-
- // - For i = 1 to n
- // - Set R[i] = C[i]
- intermediate := make([]byte, len(cipherText)-8)
- copy(intermediate, cipherText[8:])
-
- // 2) Compute intermediate values.
- for jj := 5; jj >= 0; jj-- {
- for ii := nblocks - 1; ii >= 0; ii-- {
- // - B = AES-1(K, (A ^ t) | R[i]) where t = n*j+1
- // - A = MSB(64, B)
- t := uint64(jj*nblocks + ii + 1)
- val := binary.BigEndian.Uint64(block[:8]) ^ t
- binary.BigEndian.PutUint64(block[:8], val)
-
- copy(block[8:], intermediate[ii*8:ii*8+8])
- c.Decrypt(block[:], block[:])
-
- // - R[i] = LSB(B, 64)
- copy(intermediate[ii*8:ii*8+8], block[8:])
- }
- }
-
- // 3) Output results.
- // - If A is an appropriate initial value (see 2.2.3),
- for ii := 0; ii < 8; ii++ {
- if block[ii] != 0xA6 {
- return nil, ErrUnwrapFailed
- }
- }
-
- // - For i = 1 to n
- // - P[i] = R[i]
- return intermediate, nil
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/armor/armor.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/armor/armor.go
deleted file mode 100644
index e0a677f2843..00000000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/armor/armor.go
+++ /dev/null
@@ -1,183 +0,0 @@
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package armor implements OpenPGP ASCII Armor, see RFC 4880. OpenPGP Armor is
-// very similar to PEM except that it has an additional CRC checksum.
-package armor // import "github.com/ProtonMail/go-crypto/openpgp/armor"
-
-import (
- "bufio"
- "bytes"
- "encoding/base64"
- "io"
-
- "github.com/ProtonMail/go-crypto/openpgp/errors"
-)
-
-// A Block represents an OpenPGP armored structure.
-//
-// The encoded form is:
-//
-// -----BEGIN Type-----
-// Headers
-//
-// base64-encoded Bytes
-// '=' base64 encoded checksum (optional) not checked anymore
-// -----END Type-----
-//
-// where Headers is a possibly empty sequence of Key: Value lines.
-//
-// Since the armored data can be very large, this package presents a streaming
-// interface.
-type Block struct {
- Type string // The type, taken from the preamble (i.e. "PGP SIGNATURE").
- Header map[string]string // Optional headers.
- Body io.Reader // A Reader from which the contents can be read
- lReader lineReader
- oReader openpgpReader
-}
-
-var ArmorCorrupt error = errors.StructuralError("armor invalid")
-
-var armorStart = []byte("-----BEGIN ")
-var armorEnd = []byte("-----END ")
-var armorEndOfLine = []byte("-----")
-
-// lineReader wraps a line based reader. It watches for the end of an armor block
-type lineReader struct {
- in *bufio.Reader
- buf []byte
- eof bool
-}
-
-func (l *lineReader) Read(p []byte) (n int, err error) {
- if l.eof {
- return 0, io.EOF
- }
-
- if len(l.buf) > 0 {
- n = copy(p, l.buf)
- l.buf = l.buf[n:]
- return
- }
-
- line, isPrefix, err := l.in.ReadLine()
- if err != nil {
- return
- }
- if isPrefix {
- return 0, ArmorCorrupt
- }
-
- if bytes.HasPrefix(line, armorEnd) {
- l.eof = true
- return 0, io.EOF
- }
-
- if len(line) == 5 && line[0] == '=' {
- // This is the checksum line
- // Don't check the checksum
-
- l.eof = true
- return 0, io.EOF
- }
-
- if len(line) > 96 {
- return 0, ArmorCorrupt
- }
-
- n = copy(p, line)
- bytesToSave := len(line) - n
- if bytesToSave > 0 {
- if cap(l.buf) < bytesToSave {
- l.buf = make([]byte, 0, bytesToSave)
- }
- l.buf = l.buf[0:bytesToSave]
- copy(l.buf, line[n:])
- }
-
- return
-}
-
-// openpgpReader passes Read calls to the underlying base64 decoder.
-type openpgpReader struct {
- lReader *lineReader
- b64Reader io.Reader
-}
-
-func (r *openpgpReader) Read(p []byte) (n int, err error) {
- n, err = r.b64Reader.Read(p)
- return
-}
-
-// Decode reads a PGP armored block from the given Reader. It will ignore
-// leading garbage. If it doesn't find a block, it will return nil, io.EOF. The
-// given Reader is not usable after calling this function: an arbitrary amount
-// of data may have been read past the end of the block.
-func Decode(in io.Reader) (p *Block, err error) {
- r := bufio.NewReaderSize(in, 100)
- var line []byte
- ignoreNext := false
-
-TryNextBlock:
- p = nil
-
- // Skip leading garbage
- for {
- ignoreThis := ignoreNext
- line, ignoreNext, err = r.ReadLine()
- if err != nil {
- return
- }
- if ignoreNext || ignoreThis {
- continue
- }
- line = bytes.TrimSpace(line)
- if len(line) > len(armorStart)+len(armorEndOfLine) && bytes.HasPrefix(line, armorStart) {
- break
- }
- }
-
- p = new(Block)
- p.Type = string(line[len(armorStart) : len(line)-len(armorEndOfLine)])
- p.Header = make(map[string]string)
- nextIsContinuation := false
- var lastKey string
-
- // Read headers
- for {
- isContinuation := nextIsContinuation
- line, nextIsContinuation, err = r.ReadLine()
- if err != nil {
- p = nil
- return
- }
- if isContinuation {
- p.Header[lastKey] += string(line)
- continue
- }
- line = bytes.TrimSpace(line)
- if len(line) == 0 {
- break
- }
-
- i := bytes.Index(line, []byte(":"))
- if i == -1 {
- goto TryNextBlock
- }
- lastKey = string(line[:i])
- var value string
- if len(line) > i+2 {
- value = string(line[i+2:])
- }
- p.Header[lastKey] = value
- }
-
- p.lReader.in = r
- p.oReader.lReader = &p.lReader
- p.oReader.b64Reader = base64.NewDecoder(base64.StdEncoding, &p.lReader)
- p.Body = &p.oReader
-
- return
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/armor/encode.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/armor/encode.go
deleted file mode 100644
index 112f98b8351..00000000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/armor/encode.go
+++ /dev/null
@@ -1,198 +0,0 @@
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package armor
-
-import (
- "encoding/base64"
- "io"
-)
-
-var armorHeaderSep = []byte(": ")
-var blockEnd = []byte("\n=")
-var newline = []byte("\n")
-var armorEndOfLineOut = []byte("-----\n")
-
-const crc24Init = 0xb704ce
-const crc24Poly = 0x1864cfb
-
-// crc24 calculates the OpenPGP checksum as specified in RFC 4880, section 6.1
-func crc24(crc uint32, d []byte) uint32 {
- for _, b := range d {
- crc ^= uint32(b) << 16
- for i := 0; i < 8; i++ {
- crc <<= 1
- if crc&0x1000000 != 0 {
- crc ^= crc24Poly
- }
- }
- }
- return crc
-}
-
-// writeSlices writes its arguments to the given Writer.
-func writeSlices(out io.Writer, slices ...[]byte) (err error) {
- for _, s := range slices {
- _, err = out.Write(s)
- if err != nil {
- return err
- }
- }
- return
-}
-
-// lineBreaker breaks data across several lines, all of the same byte length
-// (except possibly the last). Lines are broken with a single '\n'.
-type lineBreaker struct {
- lineLength int
- line []byte
- used int
- out io.Writer
- haveWritten bool
-}
-
-func newLineBreaker(out io.Writer, lineLength int) *lineBreaker {
- return &lineBreaker{
- lineLength: lineLength,
- line: make([]byte, lineLength),
- used: 0,
- out: out,
- }
-}
-
-func (l *lineBreaker) Write(b []byte) (n int, err error) {
- n = len(b)
-
- if n == 0 {
- return
- }
-
- if l.used == 0 && l.haveWritten {
- _, err = l.out.Write([]byte{'\n'})
- if err != nil {
- return
- }
- }
-
- if l.used+len(b) < l.lineLength {
- l.used += copy(l.line[l.used:], b)
- return
- }
-
- l.haveWritten = true
- _, err = l.out.Write(l.line[0:l.used])
- if err != nil {
- return
- }
- excess := l.lineLength - l.used
- l.used = 0
-
- _, err = l.out.Write(b[0:excess])
- if err != nil {
- return
- }
-
- _, err = l.Write(b[excess:])
- return
-}
-
-func (l *lineBreaker) Close() (err error) {
- if l.used > 0 {
- _, err = l.out.Write(l.line[0:l.used])
- if err != nil {
- return
- }
- }
-
- return
-}
-
-// encoding keeps track of a running CRC24 over the data which has been written
-// to it and outputs a OpenPGP checksum when closed, followed by an armor
-// trailer.
-//
-// It's built into a stack of io.Writers:
-//
-// encoding -> base64 encoder -> lineBreaker -> out
-type encoding struct {
- out io.Writer
- breaker *lineBreaker
- b64 io.WriteCloser
- crc uint32
- crcEnabled bool
- blockType []byte
-}
-
-func (e *encoding) Write(data []byte) (n int, err error) {
- if e.crcEnabled {
- e.crc = crc24(e.crc, data)
- }
- return e.b64.Write(data)
-}
-
-func (e *encoding) Close() (err error) {
- err = e.b64.Close()
- if err != nil {
- return
- }
- e.breaker.Close()
-
- if e.crcEnabled {
- var checksumBytes [3]byte
- checksumBytes[0] = byte(e.crc >> 16)
- checksumBytes[1] = byte(e.crc >> 8)
- checksumBytes[2] = byte(e.crc)
-
- var b64ChecksumBytes [4]byte
- base64.StdEncoding.Encode(b64ChecksumBytes[:], checksumBytes[:])
-
- return writeSlices(e.out, blockEnd, b64ChecksumBytes[:], newline, armorEnd, e.blockType, armorEndOfLine)
- }
- return writeSlices(e.out, newline, armorEnd, e.blockType, armorEndOfLine)
-}
-
-func encode(out io.Writer, blockType string, headers map[string]string, checksum bool) (w io.WriteCloser, err error) {
- bType := []byte(blockType)
- err = writeSlices(out, armorStart, bType, armorEndOfLineOut)
- if err != nil {
- return
- }
-
- for k, v := range headers {
- err = writeSlices(out, []byte(k), armorHeaderSep, []byte(v), newline)
- if err != nil {
- return
- }
- }
-
- _, err = out.Write(newline)
- if err != nil {
- return
- }
-
- e := &encoding{
- out: out,
- breaker: newLineBreaker(out, 64),
- blockType: bType,
- crc: crc24Init,
- crcEnabled: checksum,
- }
- e.b64 = base64.NewEncoder(base64.StdEncoding, e.breaker)
- return e, nil
-}
-
-// Encode returns a WriteCloser which will encode the data written to it in
-// OpenPGP armor.
-func Encode(out io.Writer, blockType string, headers map[string]string) (w io.WriteCloser, err error) {
- return encode(out, blockType, headers, true)
-}
-
-// EncodeWithChecksumOption returns a WriteCloser which will encode the data written to it in
-// OpenPGP armor and provides the option to include a checksum.
-// When forming ASCII Armor, the CRC24 footer SHOULD NOT be generated,
-// unless interoperability with implementations that require the CRC24 footer
-// to be present is a concern.
-func EncodeWithChecksumOption(out io.Writer, blockType string, headers map[string]string, doChecksum bool) (w io.WriteCloser, err error) {
- return encode(out, blockType, headers, doChecksum)
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/canonical_text.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/canonical_text.go
deleted file mode 100644
index 5b40e1375de..00000000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/canonical_text.go
+++ /dev/null
@@ -1,71 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package openpgp
-
-import (
- "hash"
- "io"
-)
-
-// NewCanonicalTextHash reformats text written to it into the canonical
-// form and then applies the hash h. See RFC 4880, section 5.2.1.
-func NewCanonicalTextHash(h hash.Hash) hash.Hash {
- return &canonicalTextHash{h, 0}
-}
-
-type canonicalTextHash struct {
- h hash.Hash
- s int
-}
-
-var newline = []byte{'\r', '\n'}
-
-func writeCanonical(cw io.Writer, buf []byte, s *int) (int, error) {
- start := 0
- for i, c := range buf {
- switch *s {
- case 0:
- if c == '\r' {
- *s = 1
- } else if c == '\n' {
- if _, err := cw.Write(buf[start:i]); err != nil {
- return 0, err
- }
- if _, err := cw.Write(newline); err != nil {
- return 0, err
- }
- start = i + 1
- }
- case 1:
- *s = 0
- }
- }
-
- if _, err := cw.Write(buf[start:]); err != nil {
- return 0, err
- }
- return len(buf), nil
-}
-
-func (cth *canonicalTextHash) Write(buf []byte) (int, error) {
- return writeCanonical(cth.h, buf, &cth.s)
-}
-
-func (cth *canonicalTextHash) Sum(in []byte) []byte {
- return cth.h.Sum(in)
-}
-
-func (cth *canonicalTextHash) Reset() {
- cth.h.Reset()
- cth.s = 0
-}
-
-func (cth *canonicalTextHash) Size() int {
- return cth.h.Size()
-}
-
-func (cth *canonicalTextHash) BlockSize() int {
- return cth.h.BlockSize()
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/ecdh/ecdh.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/ecdh/ecdh.go
deleted file mode 100644
index c895bad6bbc..00000000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/ecdh/ecdh.go
+++ /dev/null
@@ -1,210 +0,0 @@
-// Copyright 2017 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package ecdh implements ECDH encryption, suitable for OpenPGP,
-// as specified in RFC 6637, section 8.
-package ecdh
-
-import (
- "bytes"
- "errors"
- "io"
-
- "github.com/ProtonMail/go-crypto/openpgp/aes/keywrap"
- "github.com/ProtonMail/go-crypto/openpgp/internal/algorithm"
- "github.com/ProtonMail/go-crypto/openpgp/internal/ecc"
-)
-
-type KDF struct {
- Hash algorithm.Hash
- Cipher algorithm.Cipher
-}
-
-type PublicKey struct {
- curve ecc.ECDHCurve
- Point []byte
- KDF
-}
-
-type PrivateKey struct {
- PublicKey
- D []byte
-}
-
-func NewPublicKey(curve ecc.ECDHCurve, kdfHash algorithm.Hash, kdfCipher algorithm.Cipher) *PublicKey {
- return &PublicKey{
- curve: curve,
- KDF: KDF{
- Hash: kdfHash,
- Cipher: kdfCipher,
- },
- }
-}
-
-func NewPrivateKey(key PublicKey) *PrivateKey {
- return &PrivateKey{
- PublicKey: key,
- }
-}
-
-func (pk *PublicKey) GetCurve() ecc.ECDHCurve {
- return pk.curve
-}
-
-func (pk *PublicKey) MarshalPoint() []byte {
- return pk.curve.MarshalBytePoint(pk.Point)
-}
-
-func (pk *PublicKey) UnmarshalPoint(p []byte) error {
- pk.Point = pk.curve.UnmarshalBytePoint(p)
- if pk.Point == nil {
- return errors.New("ecdh: failed to parse EC point")
- }
- return nil
-}
-
-func (sk *PrivateKey) MarshalByteSecret() []byte {
- return sk.curve.MarshalByteSecret(sk.D)
-}
-
-func (sk *PrivateKey) UnmarshalByteSecret(d []byte) error {
- sk.D = sk.curve.UnmarshalByteSecret(d)
-
- if sk.D == nil {
- return errors.New("ecdh: failed to parse scalar")
- }
- return nil
-}
-
-func GenerateKey(rand io.Reader, c ecc.ECDHCurve, kdf KDF) (priv *PrivateKey, err error) {
- priv = new(PrivateKey)
- priv.PublicKey.curve = c
- priv.PublicKey.KDF = kdf
- priv.PublicKey.Point, priv.D, err = c.GenerateECDH(rand)
- return
-}
-
-func Encrypt(random io.Reader, pub *PublicKey, msg, curveOID, fingerprint []byte) (vsG, c []byte, err error) {
- if len(msg) > 40 {
- return nil, nil, errors.New("ecdh: message too long")
- }
- // the sender MAY use 21, 13, and 5 bytes of padding for AES-128,
- // AES-192, and AES-256, respectively, to provide the same number of
- // octets, 40 total, as an input to the key wrapping method.
- padding := make([]byte, 40-len(msg))
- for i := range padding {
- padding[i] = byte(40 - len(msg))
- }
- m := append(msg, padding...)
-
- ephemeral, zb, err := pub.curve.Encaps(random, pub.Point)
- if err != nil {
- return nil, nil, err
- }
-
- vsG = pub.curve.MarshalBytePoint(ephemeral)
-
- z, err := buildKey(pub, zb, curveOID, fingerprint, false, false)
- if err != nil {
- return nil, nil, err
- }
-
- if c, err = keywrap.Wrap(z, m); err != nil {
- return nil, nil, err
- }
-
- return vsG, c, nil
-
-}
-
-func Decrypt(priv *PrivateKey, vsG, c, curveOID, fingerprint []byte) (msg []byte, err error) {
- var m []byte
- zb, err := priv.PublicKey.curve.Decaps(priv.curve.UnmarshalBytePoint(vsG), priv.D)
-
- // Try buildKey three times to workaround an old bug, see comments in buildKey.
- for i := 0; i < 3; i++ {
- var z []byte
- // RFC6637 §8: "Compute Z = KDF( S, Z_len, Param );"
- z, err = buildKey(&priv.PublicKey, zb, curveOID, fingerprint, i == 1, i == 2)
- if err != nil {
- return nil, err
- }
-
- // RFC6637 §8: "Compute C = AESKeyWrap( Z, c ) as per [RFC3394]"
- m, err = keywrap.Unwrap(z, c)
- if err == nil {
- break
- }
- }
-
- // Only return an error after we've tried all (required) variants of buildKey.
- if err != nil {
- return nil, err
- }
-
- // RFC6637 §8: "m = symm_alg_ID || session key || checksum || pkcs5_padding"
- // The last byte should be the length of the padding, as per PKCS5; strip it off.
- return m[:len(m)-int(m[len(m)-1])], nil
-}
-
-func buildKey(pub *PublicKey, zb []byte, curveOID, fingerprint []byte, stripLeading, stripTrailing bool) ([]byte, error) {
- // Param = curve_OID_len || curve_OID || public_key_alg_ID || 03
- // || 01 || KDF_hash_ID || KEK_alg_ID for AESKeyWrap
- // || "Anonymous Sender " || recipient_fingerprint;
- param := new(bytes.Buffer)
- if _, err := param.Write(curveOID); err != nil {
- return nil, err
- }
- algKDF := []byte{18, 3, 1, pub.KDF.Hash.Id(), pub.KDF.Cipher.Id()}
- if _, err := param.Write(algKDF); err != nil {
- return nil, err
- }
- if _, err := param.Write([]byte("Anonymous Sender ")); err != nil {
- return nil, err
- }
- // For v5 keys, the 20 leftmost octets of the fingerprint are used.
- if _, err := param.Write(fingerprint[:20]); err != nil {
- return nil, err
- }
- if param.Len()-len(curveOID) != 45 {
- return nil, errors.New("ecdh: malformed KDF Param")
- }
-
- // MB = Hash ( 00 || 00 || 00 || 01 || ZB || Param );
- h := pub.KDF.Hash.New()
- if _, err := h.Write([]byte{0x0, 0x0, 0x0, 0x1}); err != nil {
- return nil, err
- }
- zbLen := len(zb)
- i := 0
- j := zbLen - 1
- if stripLeading {
- // Work around old go crypto bug where the leading zeros are missing.
- for i < zbLen && zb[i] == 0 {
- i++
- }
- }
- if stripTrailing {
- // Work around old OpenPGP.js bug where insignificant trailing zeros in
- // this little-endian number are missing.
- // (See https://github.com/openpgpjs/openpgpjs/pull/853.)
- for j >= 0 && zb[j] == 0 {
- j--
- }
- }
- if _, err := h.Write(zb[i : j+1]); err != nil {
- return nil, err
- }
- if _, err := h.Write(param.Bytes()); err != nil {
- return nil, err
- }
- mb := h.Sum(nil)
-
- return mb[:pub.KDF.Cipher.KeySize()], nil // return oBits leftmost bits of MB.
-
-}
-
-func Validate(priv *PrivateKey) error {
- return priv.curve.ValidateECDH(priv.Point, priv.D)
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/ecdsa/ecdsa.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/ecdsa/ecdsa.go
deleted file mode 100644
index f94ae1b2f50..00000000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/ecdsa/ecdsa.go
+++ /dev/null
@@ -1,80 +0,0 @@
-// Package ecdsa implements ECDSA signature, suitable for OpenPGP,
-// as specified in RFC 6637, section 5.
-package ecdsa
-
-import (
- "errors"
- "github.com/ProtonMail/go-crypto/openpgp/internal/ecc"
- "io"
- "math/big"
-)
-
-type PublicKey struct {
- X, Y *big.Int
- curve ecc.ECDSACurve
-}
-
-type PrivateKey struct {
- PublicKey
- D *big.Int
-}
-
-func NewPublicKey(curve ecc.ECDSACurve) *PublicKey {
- return &PublicKey{
- curve: curve,
- }
-}
-
-func NewPrivateKey(key PublicKey) *PrivateKey {
- return &PrivateKey{
- PublicKey: key,
- }
-}
-
-func (pk *PublicKey) GetCurve() ecc.ECDSACurve {
- return pk.curve
-}
-
-func (pk *PublicKey) MarshalPoint() []byte {
- return pk.curve.MarshalIntegerPoint(pk.X, pk.Y)
-}
-
-func (pk *PublicKey) UnmarshalPoint(p []byte) error {
- pk.X, pk.Y = pk.curve.UnmarshalIntegerPoint(p)
- if pk.X == nil {
- return errors.New("ecdsa: failed to parse EC point")
- }
- return nil
-}
-
-func (sk *PrivateKey) MarshalIntegerSecret() []byte {
- return sk.curve.MarshalIntegerSecret(sk.D)
-}
-
-func (sk *PrivateKey) UnmarshalIntegerSecret(d []byte) error {
- sk.D = sk.curve.UnmarshalIntegerSecret(d)
-
- if sk.D == nil {
- return errors.New("ecdsa: failed to parse scalar")
- }
- return nil
-}
-
-func GenerateKey(rand io.Reader, c ecc.ECDSACurve) (priv *PrivateKey, err error) {
- priv = new(PrivateKey)
- priv.PublicKey.curve = c
- priv.PublicKey.X, priv.PublicKey.Y, priv.D, err = c.GenerateECDSA(rand)
- return
-}
-
-func Sign(rand io.Reader, priv *PrivateKey, hash []byte) (r, s *big.Int, err error) {
- return priv.PublicKey.curve.Sign(rand, priv.X, priv.Y, priv.D, hash)
-}
-
-func Verify(pub *PublicKey, hash []byte, r, s *big.Int) bool {
- return pub.curve.Verify(pub.X, pub.Y, hash, r, s)
-}
-
-func Validate(priv *PrivateKey) error {
- return priv.curve.ValidateECDSA(priv.X, priv.Y, priv.D.Bytes())
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/ed25519/ed25519.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/ed25519/ed25519.go
deleted file mode 100644
index 6abdf7c4466..00000000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/ed25519/ed25519.go
+++ /dev/null
@@ -1,115 +0,0 @@
-// Package ed25519 implements the ed25519 signature algorithm for OpenPGP
-// as defined in the Open PGP crypto refresh.
-package ed25519
-
-import (
- "crypto/subtle"
- "io"
-
- "github.com/ProtonMail/go-crypto/openpgp/errors"
- ed25519lib "github.com/cloudflare/circl/sign/ed25519"
-)
-
-const (
- // PublicKeySize is the size, in bytes, of public keys in this package.
- PublicKeySize = ed25519lib.PublicKeySize
- // SeedSize is the size, in bytes, of private key seeds.
- // The private key representation used by RFC 8032.
- SeedSize = ed25519lib.SeedSize
- // SignatureSize is the size, in bytes, of signatures generated and verified by this package.
- SignatureSize = ed25519lib.SignatureSize
-)
-
-type PublicKey struct {
- // Point represents the elliptic curve point of the public key.
- Point []byte
-}
-
-type PrivateKey struct {
- PublicKey
- // Key the private key representation by RFC 8032,
- // encoded as seed | pub key point.
- Key []byte
-}
-
-// NewPublicKey creates a new empty ed25519 public key.
-func NewPublicKey() *PublicKey {
- return &PublicKey{}
-}
-
-// NewPrivateKey creates a new empty private key referencing the public key.
-func NewPrivateKey(key PublicKey) *PrivateKey {
- return &PrivateKey{
- PublicKey: key,
- }
-}
-
-// Seed returns the ed25519 private key secret seed.
-// The private key representation by RFC 8032.
-func (pk *PrivateKey) Seed() []byte {
- return pk.Key[:SeedSize]
-}
-
-// MarshalByteSecret returns the underlying 32 byte seed of the private key.
-func (pk *PrivateKey) MarshalByteSecret() []byte {
- return pk.Seed()
-}
-
-// UnmarshalByteSecret computes the private key from the secret seed
-// and stores it in the private key object.
-func (sk *PrivateKey) UnmarshalByteSecret(seed []byte) error {
- sk.Key = ed25519lib.NewKeyFromSeed(seed)
- return nil
-}
-
-// GenerateKey generates a fresh private key with the provided randomness source.
-func GenerateKey(rand io.Reader) (*PrivateKey, error) {
- publicKey, privateKey, err := ed25519lib.GenerateKey(rand)
- if err != nil {
- return nil, err
- }
- privateKeyOut := new(PrivateKey)
- privateKeyOut.PublicKey.Point = publicKey[:]
- privateKeyOut.Key = privateKey[:]
- return privateKeyOut, nil
-}
-
-// Sign signs a message with the ed25519 algorithm.
-// priv MUST be a valid key! Check this with Validate() before use.
-func Sign(priv *PrivateKey, message []byte) ([]byte, error) {
- return ed25519lib.Sign(priv.Key, message), nil
-}
-
-// Verify verifies an ed25519 signature.
-func Verify(pub *PublicKey, message []byte, signature []byte) bool {
- return ed25519lib.Verify(pub.Point, message, signature)
-}
-
-// Validate checks if the ed25519 private key is valid.
-func Validate(priv *PrivateKey) error {
- expectedPrivateKey := ed25519lib.NewKeyFromSeed(priv.Seed())
- if subtle.ConstantTimeCompare(priv.Key, expectedPrivateKey) == 0 {
- return errors.KeyInvalidError("ed25519: invalid ed25519 secret")
- }
- if subtle.ConstantTimeCompare(priv.PublicKey.Point, expectedPrivateKey[SeedSize:]) == 0 {
- return errors.KeyInvalidError("ed25519: invalid ed25519 public key")
- }
- return nil
-}
-
-// ENCODING/DECODING signature:
-
-// WriteSignature encodes and writes an ed25519 signature to writer.
-func WriteSignature(writer io.Writer, signature []byte) error {
- _, err := writer.Write(signature)
- return err
-}
-
-// ReadSignature decodes an ed25519 signature from a reader.
-func ReadSignature(reader io.Reader) ([]byte, error) {
- signature := make([]byte, SignatureSize)
- if _, err := io.ReadFull(reader, signature); err != nil {
- return nil, err
- }
- return signature, nil
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/ed448/ed448.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/ed448/ed448.go
deleted file mode 100644
index b11fb4fb179..00000000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/ed448/ed448.go
+++ /dev/null
@@ -1,119 +0,0 @@
-// Package ed448 implements the ed448 signature algorithm for OpenPGP
-// as defined in the Open PGP crypto refresh.
-package ed448
-
-import (
- "crypto/subtle"
- "io"
-
- "github.com/ProtonMail/go-crypto/openpgp/errors"
- ed448lib "github.com/cloudflare/circl/sign/ed448"
-)
-
-const (
- // PublicKeySize is the size, in bytes, of public keys in this package.
- PublicKeySize = ed448lib.PublicKeySize
- // SeedSize is the size, in bytes, of private key seeds.
- // The private key representation used by RFC 8032.
- SeedSize = ed448lib.SeedSize
- // SignatureSize is the size, in bytes, of signatures generated and verified by this package.
- SignatureSize = ed448lib.SignatureSize
-)
-
-type PublicKey struct {
- // Point represents the elliptic curve point of the public key.
- Point []byte
-}
-
-type PrivateKey struct {
- PublicKey
- // Key the private key representation by RFC 8032,
- // encoded as seed | public key point.
- Key []byte
-}
-
-// NewPublicKey creates a new empty ed448 public key.
-func NewPublicKey() *PublicKey {
- return &PublicKey{}
-}
-
-// NewPrivateKey creates a new empty private key referencing the public key.
-func NewPrivateKey(key PublicKey) *PrivateKey {
- return &PrivateKey{
- PublicKey: key,
- }
-}
-
-// Seed returns the ed448 private key secret seed.
-// The private key representation by RFC 8032.
-func (pk *PrivateKey) Seed() []byte {
- return pk.Key[:SeedSize]
-}
-
-// MarshalByteSecret returns the underlying seed of the private key.
-func (pk *PrivateKey) MarshalByteSecret() []byte {
- return pk.Seed()
-}
-
-// UnmarshalByteSecret computes the private key from the secret seed
-// and stores it in the private key object.
-func (sk *PrivateKey) UnmarshalByteSecret(seed []byte) error {
- sk.Key = ed448lib.NewKeyFromSeed(seed)
- return nil
-}
-
-// GenerateKey generates a fresh private key with the provided randomness source.
-func GenerateKey(rand io.Reader) (*PrivateKey, error) {
- publicKey, privateKey, err := ed448lib.GenerateKey(rand)
- if err != nil {
- return nil, err
- }
- privateKeyOut := new(PrivateKey)
- privateKeyOut.PublicKey.Point = publicKey[:]
- privateKeyOut.Key = privateKey[:]
- return privateKeyOut, nil
-}
-
-// Sign signs a message with the ed448 algorithm.
-// priv MUST be a valid key! Check this with Validate() before use.
-func Sign(priv *PrivateKey, message []byte) ([]byte, error) {
- // Ed448 is used with the empty string as a context string.
- // See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-08#section-13.7
- return ed448lib.Sign(priv.Key, message, ""), nil
-}
-
-// Verify verifies a ed448 signature
-func Verify(pub *PublicKey, message []byte, signature []byte) bool {
- // Ed448 is used with the empty string as a context string.
- // See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-08#section-13.7
- return ed448lib.Verify(pub.Point, message, signature, "")
-}
-
-// Validate checks if the ed448 private key is valid
-func Validate(priv *PrivateKey) error {
- expectedPrivateKey := ed448lib.NewKeyFromSeed(priv.Seed())
- if subtle.ConstantTimeCompare(priv.Key, expectedPrivateKey) == 0 {
- return errors.KeyInvalidError("ed448: invalid ed448 secret")
- }
- if subtle.ConstantTimeCompare(priv.PublicKey.Point, expectedPrivateKey[SeedSize:]) == 0 {
- return errors.KeyInvalidError("ed448: invalid ed448 public key")
- }
- return nil
-}
-
-// ENCODING/DECODING signature:
-
-// WriteSignature encodes and writes an ed448 signature to writer.
-func WriteSignature(writer io.Writer, signature []byte) error {
- _, err := writer.Write(signature)
- return err
-}
-
-// ReadSignature decodes an ed448 signature from a reader.
-func ReadSignature(reader io.Reader) ([]byte, error) {
- signature := make([]byte, SignatureSize)
- if _, err := io.ReadFull(reader, signature); err != nil {
- return nil, err
- }
- return signature, nil
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/eddsa/eddsa.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/eddsa/eddsa.go
deleted file mode 100644
index 99ecfc7f12d..00000000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/eddsa/eddsa.go
+++ /dev/null
@@ -1,91 +0,0 @@
-// Package eddsa implements EdDSA signature, suitable for OpenPGP, as specified in
-// https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-13.7
-package eddsa
-
-import (
- "errors"
- "github.com/ProtonMail/go-crypto/openpgp/internal/ecc"
- "io"
-)
-
-type PublicKey struct {
- X []byte
- curve ecc.EdDSACurve
-}
-
-type PrivateKey struct {
- PublicKey
- D []byte
-}
-
-func NewPublicKey(curve ecc.EdDSACurve) *PublicKey {
- return &PublicKey{
- curve: curve,
- }
-}
-
-func NewPrivateKey(key PublicKey) *PrivateKey {
- return &PrivateKey{
- PublicKey: key,
- }
-}
-
-func (pk *PublicKey) GetCurve() ecc.EdDSACurve {
- return pk.curve
-}
-
-func (pk *PublicKey) MarshalPoint() []byte {
- return pk.curve.MarshalBytePoint(pk.X)
-}
-
-func (pk *PublicKey) UnmarshalPoint(x []byte) error {
- pk.X = pk.curve.UnmarshalBytePoint(x)
-
- if pk.X == nil {
- return errors.New("eddsa: failed to parse EC point")
- }
- return nil
-}
-
-func (sk *PrivateKey) MarshalByteSecret() []byte {
- return sk.curve.MarshalByteSecret(sk.D)
-}
-
-func (sk *PrivateKey) UnmarshalByteSecret(d []byte) error {
- sk.D = sk.curve.UnmarshalByteSecret(d)
-
- if sk.D == nil {
- return errors.New("eddsa: failed to parse scalar")
- }
- return nil
-}
-
-func GenerateKey(rand io.Reader, c ecc.EdDSACurve) (priv *PrivateKey, err error) {
- priv = new(PrivateKey)
- priv.PublicKey.curve = c
- priv.PublicKey.X, priv.D, err = c.GenerateEdDSA(rand)
- return
-}
-
-func Sign(priv *PrivateKey, message []byte) (r, s []byte, err error) {
- sig, err := priv.PublicKey.curve.Sign(priv.PublicKey.X, priv.D, message)
- if err != nil {
- return nil, nil, err
- }
-
- r, s = priv.PublicKey.curve.MarshalSignature(sig)
- return
-}
-
-func Verify(pub *PublicKey, message, r, s []byte) bool {
- sig := pub.curve.UnmarshalSignature(r, s)
- if sig == nil {
- return false
- }
-
- return pub.curve.Verify(pub.X, message, sig)
-}
-
-func Validate(priv *PrivateKey) error {
- return priv.curve.ValidateEdDSA(priv.PublicKey.X, priv.D)
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/elgamal/elgamal.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/elgamal/elgamal.go
deleted file mode 100644
index bad27743445..00000000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/elgamal/elgamal.go
+++ /dev/null
@@ -1,124 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package elgamal implements ElGamal encryption, suitable for OpenPGP,
-// as specified in "A Public-Key Cryptosystem and a Signature Scheme Based on
-// Discrete Logarithms," IEEE Transactions on Information Theory, v. IT-31,
-// n. 4, 1985, pp. 469-472.
-//
-// This form of ElGamal embeds PKCS#1 v1.5 padding, which may make it
-// unsuitable for other protocols. RSA should be used in preference in any
-// case.
-package elgamal // import "github.com/ProtonMail/go-crypto/openpgp/elgamal"
-
-import (
- "crypto/rand"
- "crypto/subtle"
- "errors"
- "io"
- "math/big"
-)
-
-// PublicKey represents an ElGamal public key.
-type PublicKey struct {
- G, P, Y *big.Int
-}
-
-// PrivateKey represents an ElGamal private key.
-type PrivateKey struct {
- PublicKey
- X *big.Int
-}
-
-// Encrypt encrypts the given message to the given public key. The result is a
-// pair of integers. Errors can result from reading random, or because msg is
-// too large to be encrypted to the public key.
-func Encrypt(random io.Reader, pub *PublicKey, msg []byte) (c1, c2 *big.Int, err error) {
- pLen := (pub.P.BitLen() + 7) / 8
- if len(msg) > pLen-11 {
- err = errors.New("elgamal: message too long")
- return
- }
-
- // EM = 0x02 || PS || 0x00 || M
- em := make([]byte, pLen-1)
- em[0] = 2
- ps, mm := em[1:len(em)-len(msg)-1], em[len(em)-len(msg):]
- err = nonZeroRandomBytes(ps, random)
- if err != nil {
- return
- }
- em[len(em)-len(msg)-1] = 0
- copy(mm, msg)
-
- m := new(big.Int).SetBytes(em)
-
- k, err := rand.Int(random, pub.P)
- if err != nil {
- return
- }
-
- c1 = new(big.Int).Exp(pub.G, k, pub.P)
- s := new(big.Int).Exp(pub.Y, k, pub.P)
- c2 = s.Mul(s, m)
- c2.Mod(c2, pub.P)
-
- return
-}
-
-// Decrypt takes two integers, resulting from an ElGamal encryption, and
-// returns the plaintext of the message. An error can result only if the
-// ciphertext is invalid. Users should keep in mind that this is a padding
-// oracle and thus, if exposed to an adaptive chosen ciphertext attack, can
-// be used to break the cryptosystem. See “Chosen Ciphertext Attacks
-// Against Protocols Based on the RSA Encryption Standard PKCS #1”, Daniel
-// Bleichenbacher, Advances in Cryptology (Crypto '98),
-func Decrypt(priv *PrivateKey, c1, c2 *big.Int) (msg []byte, err error) {
- s := new(big.Int).Exp(c1, priv.X, priv.P)
- if s.ModInverse(s, priv.P) == nil {
- return nil, errors.New("elgamal: invalid private key")
- }
- s.Mul(s, c2)
- s.Mod(s, priv.P)
- em := s.Bytes()
-
- firstByteIsTwo := subtle.ConstantTimeByteEq(em[0], 2)
-
- // The remainder of the plaintext must be a string of non-zero random
- // octets, followed by a 0, followed by the message.
- // lookingForIndex: 1 iff we are still looking for the zero.
- // index: the offset of the first zero byte.
- var lookingForIndex, index int
- lookingForIndex = 1
-
- for i := 1; i < len(em); i++ {
- equals0 := subtle.ConstantTimeByteEq(em[i], 0)
- index = subtle.ConstantTimeSelect(lookingForIndex&equals0, i, index)
- lookingForIndex = subtle.ConstantTimeSelect(equals0, 0, lookingForIndex)
- }
-
- if firstByteIsTwo != 1 || lookingForIndex != 0 || index < 9 {
- return nil, errors.New("elgamal: decryption error")
- }
- return em[index+1:], nil
-}
-
-// nonZeroRandomBytes fills the given slice with non-zero random octets.
-func nonZeroRandomBytes(s []byte, rand io.Reader) (err error) {
- _, err = io.ReadFull(rand, s)
- if err != nil {
- return
- }
-
- for i := 0; i < len(s); i++ {
- for s[i] == 0 {
- _, err = io.ReadFull(rand, s[i:i+1])
- if err != nil {
- return
- }
- }
- }
-
- return
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/errors/errors.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/errors/errors.go
deleted file mode 100644
index 8d6969c0bf8..00000000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/errors/errors.go
+++ /dev/null
@@ -1,143 +0,0 @@
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package errors contains common error types for the OpenPGP packages.
-package errors // import "github.com/ProtonMail/go-crypto/v2/openpgp/errors"
-
-import (
- "strconv"
-)
-
-// A StructuralError is returned when OpenPGP data is found to be syntactically
-// invalid.
-type StructuralError string
-
-func (s StructuralError) Error() string {
- return "openpgp: invalid data: " + string(s)
-}
-
-// UnsupportedError indicates that, although the OpenPGP data is valid, it
-// makes use of currently unimplemented features.
-type UnsupportedError string
-
-func (s UnsupportedError) Error() string {
- return "openpgp: unsupported feature: " + string(s)
-}
-
-// InvalidArgumentError indicates that the caller is in error and passed an
-// incorrect value.
-type InvalidArgumentError string
-
-func (i InvalidArgumentError) Error() string {
- return "openpgp: invalid argument: " + string(i)
-}
-
-// SignatureError indicates that a syntactically valid signature failed to
-// validate.
-type SignatureError string
-
-func (b SignatureError) Error() string {
- return "openpgp: invalid signature: " + string(b)
-}
-
-var ErrMDCHashMismatch error = SignatureError("MDC hash mismatch")
-var ErrMDCMissing error = SignatureError("MDC packet not found")
-
-type signatureExpiredError int
-
-func (se signatureExpiredError) Error() string {
- return "openpgp: signature expired"
-}
-
-var ErrSignatureExpired error = signatureExpiredError(0)
-
-type keyExpiredError int
-
-func (ke keyExpiredError) Error() string {
- return "openpgp: key expired"
-}
-
-var ErrSignatureOlderThanKey error = signatureOlderThanKeyError(0)
-
-type signatureOlderThanKeyError int
-
-func (ske signatureOlderThanKeyError) Error() string {
- return "openpgp: signature is older than the key"
-}
-
-var ErrKeyExpired error = keyExpiredError(0)
-
-type keyIncorrectError int
-
-func (ki keyIncorrectError) Error() string {
- return "openpgp: incorrect key"
-}
-
-var ErrKeyIncorrect error = keyIncorrectError(0)
-
-// KeyInvalidError indicates that the public key parameters are invalid
-// as they do not match the private ones
-type KeyInvalidError string
-
-func (e KeyInvalidError) Error() string {
- return "openpgp: invalid key: " + string(e)
-}
-
-type unknownIssuerError int
-
-func (unknownIssuerError) Error() string {
- return "openpgp: signature made by unknown entity"
-}
-
-var ErrUnknownIssuer error = unknownIssuerError(0)
-
-type keyRevokedError int
-
-func (keyRevokedError) Error() string {
- return "openpgp: signature made by revoked key"
-}
-
-var ErrKeyRevoked error = keyRevokedError(0)
-
-type WeakAlgorithmError string
-
-func (e WeakAlgorithmError) Error() string {
- return "openpgp: weak algorithms are rejected: " + string(e)
-}
-
-type UnknownPacketTypeError uint8
-
-func (upte UnknownPacketTypeError) Error() string {
- return "openpgp: unknown packet type: " + strconv.Itoa(int(upte))
-}
-
-type CriticalUnknownPacketTypeError uint8
-
-func (upte CriticalUnknownPacketTypeError) Error() string {
- return "openpgp: unknown critical packet type: " + strconv.Itoa(int(upte))
-}
-
-// AEADError indicates that there is a problem when initializing or using a
-// AEAD instance, configuration struct, nonces or index values.
-type AEADError string
-
-func (ae AEADError) Error() string {
- return "openpgp: aead error: " + string(ae)
-}
-
-// ErrDummyPrivateKey results when operations are attempted on a private key
-// that is just a dummy key. See
-// https://git.gnupg.org/cgi-bin/gitweb.cgi?p=gnupg.git;a=blob;f=doc/DETAILS;h=fe55ae16ab4e26d8356dc574c9e8bc935e71aef1;hb=23191d7851eae2217ecdac6484349849a24fd94a#l1109
-type ErrDummyPrivateKey string
-
-func (dke ErrDummyPrivateKey) Error() string {
- return "openpgp: s2k GNU dummy key: " + string(dke)
-}
-
-// ErrMalformedMessage results when the packet sequence is incorrect
-type ErrMalformedMessage string
-
-func (dke ErrMalformedMessage) Error() string {
- return "openpgp: malformed message " + string(dke)
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/hash.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/hash.go
deleted file mode 100644
index 526bd7777f8..00000000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/hash.go
+++ /dev/null
@@ -1,24 +0,0 @@
-package openpgp
-
-import (
- "crypto"
-
- "github.com/ProtonMail/go-crypto/openpgp/internal/algorithm"
-)
-
-// HashIdToHash returns a crypto.Hash which corresponds to the given OpenPGP
-// hash id.
-func HashIdToHash(id byte) (h crypto.Hash, ok bool) {
- return algorithm.HashIdToHash(id)
-}
-
-// HashIdToString returns the name of the hash function corresponding to the
-// given OpenPGP hash id.
-func HashIdToString(id byte) (name string, ok bool) {
- return algorithm.HashIdToString(id)
-}
-
-// HashToHashId returns an OpenPGP hash id which corresponds the given Hash.
-func HashToHashId(h crypto.Hash) (id byte, ok bool) {
- return algorithm.HashToHashId(h)
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/algorithm/aead.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/algorithm/aead.go
deleted file mode 100644
index d0670651866..00000000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/algorithm/aead.go
+++ /dev/null
@@ -1,65 +0,0 @@
-// Copyright (C) 2019 ProtonTech AG
-
-package algorithm
-
-import (
- "crypto/cipher"
- "github.com/ProtonMail/go-crypto/eax"
- "github.com/ProtonMail/go-crypto/ocb"
-)
-
-// AEADMode defines the Authenticated Encryption with Associated Data mode of
-// operation.
-type AEADMode uint8
-
-// Supported modes of operation (see RFC4880bis [EAX] and RFC7253)
-const (
- AEADModeEAX = AEADMode(1)
- AEADModeOCB = AEADMode(2)
- AEADModeGCM = AEADMode(3)
-)
-
-// TagLength returns the length in bytes of authentication tags.
-func (mode AEADMode) TagLength() int {
- switch mode {
- case AEADModeEAX:
- return 16
- case AEADModeOCB:
- return 16
- case AEADModeGCM:
- return 16
- default:
- return 0
- }
-}
-
-// NonceLength returns the length in bytes of nonces.
-func (mode AEADMode) NonceLength() int {
- switch mode {
- case AEADModeEAX:
- return 16
- case AEADModeOCB:
- return 15
- case AEADModeGCM:
- return 12
- default:
- return 0
- }
-}
-
-// New returns a fresh instance of the given mode
-func (mode AEADMode) New(block cipher.Block) (alg cipher.AEAD) {
- var err error
- switch mode {
- case AEADModeEAX:
- alg, err = eax.NewEAX(block)
- case AEADModeOCB:
- alg, err = ocb.NewOCB(block)
- case AEADModeGCM:
- alg, err = cipher.NewGCM(block)
- }
- if err != nil {
- panic(err.Error())
- }
- return alg
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/algorithm/cipher.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/algorithm/cipher.go
deleted file mode 100644
index c76a75bcda5..00000000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/algorithm/cipher.go
+++ /dev/null
@@ -1,97 +0,0 @@
-// Copyright 2017 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package algorithm
-
-import (
- "crypto/aes"
- "crypto/cipher"
- "crypto/des"
-
- "golang.org/x/crypto/cast5"
-)
-
-// Cipher is an official symmetric key cipher algorithm. See RFC 4880,
-// section 9.2.
-type Cipher interface {
- // Id returns the algorithm ID, as a byte, of the cipher.
- Id() uint8
- // KeySize returns the key size, in bytes, of the cipher.
- KeySize() int
- // BlockSize returns the block size, in bytes, of the cipher.
- BlockSize() int
- // New returns a fresh instance of the given cipher.
- New(key []byte) cipher.Block
-}
-
-// The following constants mirror the OpenPGP standard (RFC 4880).
-const (
- TripleDES = CipherFunction(2)
- CAST5 = CipherFunction(3)
- AES128 = CipherFunction(7)
- AES192 = CipherFunction(8)
- AES256 = CipherFunction(9)
-)
-
-// CipherById represents the different block ciphers specified for OpenPGP. See
-// http://www.iana.org/assignments/pgp-parameters/pgp-parameters.xhtml#pgp-parameters-13
-var CipherById = map[uint8]Cipher{
- TripleDES.Id(): TripleDES,
- CAST5.Id(): CAST5,
- AES128.Id(): AES128,
- AES192.Id(): AES192,
- AES256.Id(): AES256,
-}
-
-type CipherFunction uint8
-
-// ID returns the algorithm Id, as a byte, of cipher.
-func (sk CipherFunction) Id() uint8 {
- return uint8(sk)
-}
-
-// KeySize returns the key size, in bytes, of cipher.
-func (cipher CipherFunction) KeySize() int {
- switch cipher {
- case CAST5:
- return cast5.KeySize
- case AES128:
- return 16
- case AES192, TripleDES:
- return 24
- case AES256:
- return 32
- }
- return 0
-}
-
-// BlockSize returns the block size, in bytes, of cipher.
-func (cipher CipherFunction) BlockSize() int {
- switch cipher {
- case TripleDES:
- return des.BlockSize
- case CAST5:
- return 8
- case AES128, AES192, AES256:
- return 16
- }
- return 0
-}
-
-// New returns a fresh instance of the given cipher.
-func (cipher CipherFunction) New(key []byte) (block cipher.Block) {
- var err error
- switch cipher {
- case TripleDES:
- block, err = des.NewTripleDESCipher(key)
- case CAST5:
- block, err = cast5.NewCipher(key)
- case AES128, AES192, AES256:
- block, err = aes.NewCipher(key)
- }
- if err != nil {
- panic(err.Error())
- }
- return
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/algorithm/hash.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/algorithm/hash.go
deleted file mode 100644
index d1a00fc7495..00000000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/algorithm/hash.go
+++ /dev/null
@@ -1,143 +0,0 @@
-// Copyright 2017 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package algorithm
-
-import (
- "crypto"
- "fmt"
- "hash"
-)
-
-// Hash is an official hash function algorithm. See RFC 4880, section 9.4.
-type Hash interface {
- // Id returns the algorithm ID, as a byte, of Hash.
- Id() uint8
- // Available reports whether the given hash function is linked into the binary.
- Available() bool
- // HashFunc simply returns the value of h so that Hash implements SignerOpts.
- HashFunc() crypto.Hash
- // New returns a new hash.Hash calculating the given hash function. New
- // panics if the hash function is not linked into the binary.
- New() hash.Hash
- // Size returns the length, in bytes, of a digest resulting from the given
- // hash function. It doesn't require that the hash function in question be
- // linked into the program.
- Size() int
- // String is the name of the hash function corresponding to the given
- // OpenPGP hash id.
- String() string
-}
-
-// The following vars mirror the crypto/Hash supported hash functions.
-var (
- SHA1 Hash = cryptoHash{2, crypto.SHA1}
- SHA256 Hash = cryptoHash{8, crypto.SHA256}
- SHA384 Hash = cryptoHash{9, crypto.SHA384}
- SHA512 Hash = cryptoHash{10, crypto.SHA512}
- SHA224 Hash = cryptoHash{11, crypto.SHA224}
- SHA3_256 Hash = cryptoHash{12, crypto.SHA3_256}
- SHA3_512 Hash = cryptoHash{14, crypto.SHA3_512}
-)
-
-// HashById represents the different hash functions specified for OpenPGP. See
-// http://www.iana.org/assignments/pgp-parameters/pgp-parameters.xhtml#pgp-parameters-14
-var (
- HashById = map[uint8]Hash{
- SHA256.Id(): SHA256,
- SHA384.Id(): SHA384,
- SHA512.Id(): SHA512,
- SHA224.Id(): SHA224,
- SHA3_256.Id(): SHA3_256,
- SHA3_512.Id(): SHA3_512,
- }
-)
-
-// cryptoHash contains pairs relating OpenPGP's hash identifier with
-// Go's crypto.Hash type. See RFC 4880, section 9.4.
-type cryptoHash struct {
- id uint8
- crypto.Hash
-}
-
-// Id returns the algorithm ID, as a byte, of cryptoHash.
-func (h cryptoHash) Id() uint8 {
- return h.id
-}
-
-var hashNames = map[uint8]string{
- SHA256.Id(): "SHA256",
- SHA384.Id(): "SHA384",
- SHA512.Id(): "SHA512",
- SHA224.Id(): "SHA224",
- SHA3_256.Id(): "SHA3-256",
- SHA3_512.Id(): "SHA3-512",
-}
-
-func (h cryptoHash) String() string {
- s, ok := hashNames[h.id]
- if !ok {
- panic(fmt.Sprintf("Unsupported hash function %d", h.id))
- }
- return s
-}
-
-// HashIdToHash returns a crypto.Hash which corresponds to the given OpenPGP
-// hash id.
-func HashIdToHash(id byte) (h crypto.Hash, ok bool) {
- if hash, ok := HashById[id]; ok {
- return hash.HashFunc(), true
- }
- return 0, false
-}
-
-// HashIdToHashWithSha1 returns a crypto.Hash which corresponds to the given OpenPGP
-// hash id, allowing sha1.
-func HashIdToHashWithSha1(id byte) (h crypto.Hash, ok bool) {
- if hash, ok := HashById[id]; ok {
- return hash.HashFunc(), true
- }
-
- if id == SHA1.Id() {
- return SHA1.HashFunc(), true
- }
-
- return 0, false
-}
-
-// HashIdToString returns the name of the hash function corresponding to the
-// given OpenPGP hash id.
-func HashIdToString(id byte) (name string, ok bool) {
- if hash, ok := HashById[id]; ok {
- return hash.String(), true
- }
- return "", false
-}
-
-// HashToHashId returns an OpenPGP hash id which corresponds the given Hash.
-func HashToHashId(h crypto.Hash) (id byte, ok bool) {
- for id, hash := range HashById {
- if hash.HashFunc() == h {
- return id, true
- }
- }
-
- return 0, false
-}
-
-// HashToHashIdWithSha1 returns an OpenPGP hash id which corresponds the given Hash,
-// allowing instances of SHA1
-func HashToHashIdWithSha1(h crypto.Hash) (id byte, ok bool) {
- for id, hash := range HashById {
- if hash.HashFunc() == h {
- return id, true
- }
- }
-
- if h == SHA1.HashFunc() {
- return SHA1.Id(), true
- }
-
- return 0, false
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curve25519.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curve25519.go
deleted file mode 100644
index 888767c4e43..00000000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curve25519.go
+++ /dev/null
@@ -1,171 +0,0 @@
-// Package ecc implements a generic interface for ECDH, ECDSA, and EdDSA.
-package ecc
-
-import (
- "crypto/subtle"
- "io"
-
- "github.com/ProtonMail/go-crypto/openpgp/errors"
- x25519lib "github.com/cloudflare/circl/dh/x25519"
-)
-
-type curve25519 struct{}
-
-func NewCurve25519() *curve25519 {
- return &curve25519{}
-}
-
-func (c *curve25519) GetCurveName() string {
- return "curve25519"
-}
-
-// MarshalBytePoint encodes the public point from native format, adding the prefix.
-// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-5.5.5.6
-func (c *curve25519) MarshalBytePoint(point []byte) []byte {
- return append([]byte{0x40}, point...)
-}
-
-// UnmarshalBytePoint decodes the public point to native format, removing the prefix.
-// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-5.5.5.6
-func (c *curve25519) UnmarshalBytePoint(point []byte) []byte {
- if len(point) != x25519lib.Size+1 {
- return nil
- }
-
- // Remove prefix
- return point[1:]
-}
-
-// MarshalByteSecret encodes the secret scalar from native format.
-// Note that the EC secret scalar differs from the definition of public keys in
-// [Curve25519] in two ways: (1) the byte-ordering is big-endian, which is
-// more uniform with how big integers are represented in OpenPGP, and (2) the
-// leading zeros are truncated.
-// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-5.5.5.6.1.1
-// Note that leading zero bytes are stripped later when encoding as an MPI.
-func (c *curve25519) MarshalByteSecret(secret []byte) []byte {
- d := make([]byte, x25519lib.Size)
- copyReversed(d, secret)
-
- // The following ensures that the private key is a number of the form
- // 2^{254} + 8 * [0, 2^{251}), in order to avoid the small subgroup of
- // the curve.
- //
- // This masking is done internally in the underlying lib and so is unnecessary
- // for security, but OpenPGP implementations require that private keys be
- // pre-masked.
- d[0] &= 127
- d[0] |= 64
- d[31] &= 248
-
- return d
-}
-
-// UnmarshalByteSecret decodes the secret scalar from native format.
-// Note that the EC secret scalar differs from the definition of public keys in
-// [Curve25519] in two ways: (1) the byte-ordering is big-endian, which is
-// more uniform with how big integers are represented in OpenPGP, and (2) the
-// leading zeros are truncated.
-// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-5.5.5.6.1.1
-func (c *curve25519) UnmarshalByteSecret(d []byte) []byte {
- if len(d) > x25519lib.Size {
- return nil
- }
-
- // Ensure truncated leading bytes are re-added
- secret := make([]byte, x25519lib.Size)
- copyReversed(secret, d)
-
- return secret
-}
-
-// generateKeyPairBytes Generates a private-public key-pair.
-// 'priv' is a private key; a little-endian scalar belonging to the set
-// 2^{254} + 8 * [0, 2^{251}), in order to avoid the small subgroup of the
-// curve. 'pub' is simply 'priv' * G where G is the base point.
-// See https://cr.yp.to/ecdh.html and RFC7748, sec 5.
-func (c *curve25519) generateKeyPairBytes(rand io.Reader) (priv, pub x25519lib.Key, err error) {
- _, err = io.ReadFull(rand, priv[:])
- if err != nil {
- return
- }
-
- x25519lib.KeyGen(&pub, &priv)
- return
-}
-
-func (c *curve25519) GenerateECDH(rand io.Reader) (point []byte, secret []byte, err error) {
- priv, pub, err := c.generateKeyPairBytes(rand)
- if err != nil {
- return
- }
-
- return pub[:], priv[:], nil
-}
-
-func (c *genericCurve) MaskSecret(secret []byte) []byte {
- return secret
-}
-
-func (c *curve25519) Encaps(rand io.Reader, point []byte) (ephemeral, sharedSecret []byte, err error) {
- // RFC6637 §8: "Generate an ephemeral key pair {v, V=vG}"
- // ephemeralPrivate corresponds to `v`.
- // ephemeralPublic corresponds to `V`.
- ephemeralPrivate, ephemeralPublic, err := c.generateKeyPairBytes(rand)
- if err != nil {
- return nil, nil, err
- }
-
- // RFC6637 §8: "Obtain the authenticated recipient public key R"
- // pubKey corresponds to `R`.
- var pubKey x25519lib.Key
- copy(pubKey[:], point)
-
- // RFC6637 §8: "Compute the shared point S = vR"
- // "VB = convert point V to the octet string"
- // sharedPoint corresponds to `VB`.
- var sharedPoint x25519lib.Key
- x25519lib.Shared(&sharedPoint, &ephemeralPrivate, &pubKey)
-
- return ephemeralPublic[:], sharedPoint[:], nil
-}
-
-func (c *curve25519) Decaps(vsG, secret []byte) (sharedSecret []byte, err error) {
- var ephemeralPublic, decodedPrivate, sharedPoint x25519lib.Key
- // RFC6637 §8: "The decryption is the inverse of the method given."
- // All quoted descriptions in comments below describe encryption, and
- // the reverse is performed.
- // vsG corresponds to `VB` in RFC6637 §8 .
-
- // RFC6637 §8: "VB = convert point V to the octet string"
- copy(ephemeralPublic[:], vsG)
-
- // decodedPrivate corresponds to `r` in RFC6637 §8 .
- copy(decodedPrivate[:], secret)
-
- // RFC6637 §8: "Note that the recipient obtains the shared secret by calculating
- // S = rV = rvG, where (r,R) is the recipient's key pair."
- // sharedPoint corresponds to `S`.
- x25519lib.Shared(&sharedPoint, &decodedPrivate, &ephemeralPublic)
-
- return sharedPoint[:], nil
-}
-
-func (c *curve25519) ValidateECDH(point []byte, secret []byte) (err error) {
- var pk, sk x25519lib.Key
- copy(sk[:], secret)
- x25519lib.KeyGen(&pk, &sk)
-
- if subtle.ConstantTimeCompare(point, pk[:]) == 0 {
- return errors.KeyInvalidError("ecc: invalid curve25519 public point")
- }
-
- return nil
-}
-
-func copyReversed(out []byte, in []byte) {
- l := len(in)
- for i := 0; i < l; i++ {
- out[i] = in[l-i-1]
- }
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curve_info.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curve_info.go
deleted file mode 100644
index 97f891ffc0f..00000000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curve_info.go
+++ /dev/null
@@ -1,141 +0,0 @@
-// Package ecc implements a generic interface for ECDH, ECDSA, and EdDSA.
-package ecc
-
-import (
- "bytes"
- "crypto/elliptic"
-
- "github.com/ProtonMail/go-crypto/bitcurves"
- "github.com/ProtonMail/go-crypto/brainpool"
- "github.com/ProtonMail/go-crypto/openpgp/internal/encoding"
-)
-
-type CurveInfo struct {
- GenName string
- Oid *encoding.OID
- Curve Curve
-}
-
-var Curves = []CurveInfo{
- {
- // NIST P-256
- GenName: "P256",
- Oid: encoding.NewOID([]byte{0x2A, 0x86, 0x48, 0xCE, 0x3D, 0x03, 0x01, 0x07}),
- Curve: NewGenericCurve(elliptic.P256()),
- },
- {
- // NIST P-384
- GenName: "P384",
- Oid: encoding.NewOID([]byte{0x2B, 0x81, 0x04, 0x00, 0x22}),
- Curve: NewGenericCurve(elliptic.P384()),
- },
- {
- // NIST P-521
- GenName: "P521",
- Oid: encoding.NewOID([]byte{0x2B, 0x81, 0x04, 0x00, 0x23}),
- Curve: NewGenericCurve(elliptic.P521()),
- },
- {
- // SecP256k1
- GenName: "SecP256k1",
- Oid: encoding.NewOID([]byte{0x2B, 0x81, 0x04, 0x00, 0x0A}),
- Curve: NewGenericCurve(bitcurves.S256()),
- },
- {
- // Curve25519
- GenName: "Curve25519",
- Oid: encoding.NewOID([]byte{0x2B, 0x06, 0x01, 0x04, 0x01, 0x97, 0x55, 0x01, 0x05, 0x01}),
- Curve: NewCurve25519(),
- },
- {
- // x448
- GenName: "Curve448",
- Oid: encoding.NewOID([]byte{0x2B, 0x65, 0x6F}),
- Curve: NewX448(),
- },
- {
- // Ed25519
- GenName: "Curve25519",
- Oid: encoding.NewOID([]byte{0x2B, 0x06, 0x01, 0x04, 0x01, 0xDA, 0x47, 0x0F, 0x01}),
- Curve: NewEd25519(),
- },
- {
- // Ed448
- GenName: "Curve448",
- Oid: encoding.NewOID([]byte{0x2B, 0x65, 0x71}),
- Curve: NewEd448(),
- },
- {
- // BrainpoolP256r1
- GenName: "BrainpoolP256",
- Oid: encoding.NewOID([]byte{0x2B, 0x24, 0x03, 0x03, 0x02, 0x08, 0x01, 0x01, 0x07}),
- Curve: NewGenericCurve(brainpool.P256r1()),
- },
- {
- // BrainpoolP384r1
- GenName: "BrainpoolP384",
- Oid: encoding.NewOID([]byte{0x2B, 0x24, 0x03, 0x03, 0x02, 0x08, 0x01, 0x01, 0x0B}),
- Curve: NewGenericCurve(brainpool.P384r1()),
- },
- {
- // BrainpoolP512r1
- GenName: "BrainpoolP512",
- Oid: encoding.NewOID([]byte{0x2B, 0x24, 0x03, 0x03, 0x02, 0x08, 0x01, 0x01, 0x0D}),
- Curve: NewGenericCurve(brainpool.P512r1()),
- },
-}
-
-func FindByCurve(curve Curve) *CurveInfo {
- for _, curveInfo := range Curves {
- if curveInfo.Curve.GetCurveName() == curve.GetCurveName() {
- return &curveInfo
- }
- }
- return nil
-}
-
-func FindByOid(oid encoding.Field) *CurveInfo {
- var rawBytes = oid.Bytes()
- for _, curveInfo := range Curves {
- if bytes.Equal(curveInfo.Oid.Bytes(), rawBytes) {
- return &curveInfo
- }
- }
- return nil
-}
-
-func FindEdDSAByGenName(curveGenName string) EdDSACurve {
- for _, curveInfo := range Curves {
- if curveInfo.GenName == curveGenName {
- curve, ok := curveInfo.Curve.(EdDSACurve)
- if ok {
- return curve
- }
- }
- }
- return nil
-}
-
-func FindECDSAByGenName(curveGenName string) ECDSACurve {
- for _, curveInfo := range Curves {
- if curveInfo.GenName == curveGenName {
- curve, ok := curveInfo.Curve.(ECDSACurve)
- if ok {
- return curve
- }
- }
- }
- return nil
-}
-
-func FindECDHByGenName(curveGenName string) ECDHCurve {
- for _, curveInfo := range Curves {
- if curveInfo.GenName == curveGenName {
- curve, ok := curveInfo.Curve.(ECDHCurve)
- if ok {
- return curve
- }
- }
- }
- return nil
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curves.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curves.go
deleted file mode 100644
index 5ed9c93b3d6..00000000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curves.go
+++ /dev/null
@@ -1,48 +0,0 @@
-// Package ecc implements a generic interface for ECDH, ECDSA, and EdDSA.
-package ecc
-
-import (
- "io"
- "math/big"
-)
-
-type Curve interface {
- GetCurveName() string
-}
-
-type ECDSACurve interface {
- Curve
- MarshalIntegerPoint(x, y *big.Int) []byte
- UnmarshalIntegerPoint([]byte) (x, y *big.Int)
- MarshalIntegerSecret(d *big.Int) []byte
- UnmarshalIntegerSecret(d []byte) *big.Int
- GenerateECDSA(rand io.Reader) (x, y, secret *big.Int, err error)
- Sign(rand io.Reader, x, y, d *big.Int, hash []byte) (r, s *big.Int, err error)
- Verify(x, y *big.Int, hash []byte, r, s *big.Int) bool
- ValidateECDSA(x, y *big.Int, secret []byte) error
-}
-
-type EdDSACurve interface {
- Curve
- MarshalBytePoint(x []byte) []byte
- UnmarshalBytePoint([]byte) (x []byte)
- MarshalByteSecret(d []byte) []byte
- UnmarshalByteSecret(d []byte) []byte
- MarshalSignature(sig []byte) (r, s []byte)
- UnmarshalSignature(r, s []byte) (sig []byte)
- GenerateEdDSA(rand io.Reader) (pub, priv []byte, err error)
- Sign(publicKey, privateKey, message []byte) (sig []byte, err error)
- Verify(publicKey, message, sig []byte) bool
- ValidateEdDSA(publicKey, privateKey []byte) (err error)
-}
-type ECDHCurve interface {
- Curve
- MarshalBytePoint([]byte) (encoded []byte)
- UnmarshalBytePoint(encoded []byte) []byte
- MarshalByteSecret(d []byte) []byte
- UnmarshalByteSecret(d []byte) []byte
- GenerateECDH(rand io.Reader) (point []byte, secret []byte, err error)
- Encaps(rand io.Reader, point []byte) (ephemeral, sharedSecret []byte, err error)
- Decaps(ephemeral, secret []byte) (sharedSecret []byte, err error)
- ValidateECDH(public []byte, secret []byte) error
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/ed25519.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/ed25519.go
deleted file mode 100644
index 54a08a8a38f..00000000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/ed25519.go
+++ /dev/null
@@ -1,112 +0,0 @@
-// Package ecc implements a generic interface for ECDH, ECDSA, and EdDSA.
-package ecc
-
-import (
- "crypto/subtle"
- "io"
-
- "github.com/ProtonMail/go-crypto/openpgp/errors"
- ed25519lib "github.com/cloudflare/circl/sign/ed25519"
-)
-
-const ed25519Size = 32
-
-type ed25519 struct{}
-
-func NewEd25519() *ed25519 {
- return &ed25519{}
-}
-
-func (c *ed25519) GetCurveName() string {
- return "ed25519"
-}
-
-// MarshalBytePoint encodes the public point from native format, adding the prefix.
-// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-5.5.5.5
-func (c *ed25519) MarshalBytePoint(x []byte) []byte {
- return append([]byte{0x40}, x...)
-}
-
-// UnmarshalBytePoint decodes a point from prefixed format to native.
-// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-5.5.5.5
-func (c *ed25519) UnmarshalBytePoint(point []byte) (x []byte) {
- if len(point) != ed25519lib.PublicKeySize+1 {
- return nil
- }
-
- // Return unprefixed
- return point[1:]
-}
-
-// MarshalByteSecret encodes a scalar in native format.
-// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-5.5.5.5
-func (c *ed25519) MarshalByteSecret(d []byte) []byte {
- return d
-}
-
-// UnmarshalByteSecret decodes a scalar in native format and re-adds the stripped leading zeroes
-// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-5.5.5.5
-func (c *ed25519) UnmarshalByteSecret(s []byte) (d []byte) {
- if len(s) > ed25519lib.SeedSize {
- return nil
- }
-
- // Handle stripped leading zeroes
- d = make([]byte, ed25519lib.SeedSize)
- copy(d[ed25519lib.SeedSize-len(s):], s)
- return
-}
-
-// MarshalSignature splits a signature in R and S.
-// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-5.2.3.3.1
-func (c *ed25519) MarshalSignature(sig []byte) (r, s []byte) {
- return sig[:ed25519Size], sig[ed25519Size:]
-}
-
-// UnmarshalSignature decodes R and S in the native format, re-adding the stripped leading zeroes
-// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-5.2.3.3.1
-func (c *ed25519) UnmarshalSignature(r, s []byte) (sig []byte) {
- // Check size
- if len(r) > 32 || len(s) > 32 {
- return nil
- }
-
- sig = make([]byte, ed25519lib.SignatureSize)
-
- // Handle stripped leading zeroes
- copy(sig[ed25519Size-len(r):ed25519Size], r)
- copy(sig[ed25519lib.SignatureSize-len(s):], s)
- return sig
-}
-
-func (c *ed25519) GenerateEdDSA(rand io.Reader) (pub, priv []byte, err error) {
- pk, sk, err := ed25519lib.GenerateKey(rand)
-
- if err != nil {
- return nil, nil, err
- }
-
- return pk, sk[:ed25519lib.SeedSize], nil
-}
-
-func getEd25519Sk(publicKey, privateKey []byte) ed25519lib.PrivateKey {
- return append(privateKey, publicKey...)
-}
-
-func (c *ed25519) Sign(publicKey, privateKey, message []byte) (sig []byte, err error) {
- sig = ed25519lib.Sign(getEd25519Sk(publicKey, privateKey), message)
- return sig, nil
-}
-
-func (c *ed25519) Verify(publicKey, message, sig []byte) bool {
- return ed25519lib.Verify(publicKey, message, sig)
-}
-
-func (c *ed25519) ValidateEdDSA(publicKey, privateKey []byte) (err error) {
- priv := getEd25519Sk(publicKey, privateKey)
- expectedPriv := ed25519lib.NewKeyFromSeed(priv.Seed())
- if subtle.ConstantTimeCompare(priv, expectedPriv) == 0 {
- return errors.KeyInvalidError("ecc: invalid ed25519 secret")
- }
- return nil
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/ed448.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/ed448.go
deleted file mode 100644
index 18cd80434b6..00000000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/ed448.go
+++ /dev/null
@@ -1,111 +0,0 @@
-// Package ecc implements a generic interface for ECDH, ECDSA, and EdDSA.
-package ecc
-
-import (
- "crypto/subtle"
- "io"
-
- "github.com/ProtonMail/go-crypto/openpgp/errors"
- ed448lib "github.com/cloudflare/circl/sign/ed448"
-)
-
-type ed448 struct{}
-
-func NewEd448() *ed448 {
- return &ed448{}
-}
-
-func (c *ed448) GetCurveName() string {
- return "ed448"
-}
-
-// MarshalBytePoint encodes the public point from native format, adding the prefix.
-// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-5.5.5.5
-func (c *ed448) MarshalBytePoint(x []byte) []byte {
- // Return prefixed
- return append([]byte{0x40}, x...)
-}
-
-// UnmarshalBytePoint decodes a point from prefixed format to native.
-// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-5.5.5.5
-func (c *ed448) UnmarshalBytePoint(point []byte) (x []byte) {
- if len(point) != ed448lib.PublicKeySize+1 {
- return nil
- }
-
- // Strip prefix
- return point[1:]
-}
-
-// MarshalByteSecret encoded a scalar from native format to prefixed.
-// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-5.5.5.5
-func (c *ed448) MarshalByteSecret(d []byte) []byte {
- // Return prefixed
- return append([]byte{0x40}, d...)
-}
-
-// UnmarshalByteSecret decodes a scalar from prefixed format to native.
-// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-5.5.5.5
-func (c *ed448) UnmarshalByteSecret(s []byte) (d []byte) {
- // Check prefixed size
- if len(s) != ed448lib.SeedSize+1 {
- return nil
- }
-
- // Strip prefix
- return s[1:]
-}
-
-// MarshalSignature splits a signature in R and S, where R is in prefixed native format and
-// S is an MPI with value zero.
-// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-5.2.3.3.2
-func (c *ed448) MarshalSignature(sig []byte) (r, s []byte) {
- return append([]byte{0x40}, sig...), []byte{}
-}
-
-// UnmarshalSignature decodes R and S in the native format. Only R is used, in prefixed native format.
-// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-5.2.3.3.2
-func (c *ed448) UnmarshalSignature(r, s []byte) (sig []byte) {
- if len(r) != ed448lib.SignatureSize+1 {
- return nil
- }
-
- return r[1:]
-}
-
-func (c *ed448) GenerateEdDSA(rand io.Reader) (pub, priv []byte, err error) {
- pk, sk, err := ed448lib.GenerateKey(rand)
-
- if err != nil {
- return nil, nil, err
- }
-
- return pk, sk[:ed448lib.SeedSize], nil
-}
-
-func getEd448Sk(publicKey, privateKey []byte) ed448lib.PrivateKey {
- return append(privateKey, publicKey...)
-}
-
-func (c *ed448) Sign(publicKey, privateKey, message []byte) (sig []byte, err error) {
- // Ed448 is used with the empty string as a context string.
- // See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-13.7
- sig = ed448lib.Sign(getEd448Sk(publicKey, privateKey), message, "")
-
- return sig, nil
-}
-
-func (c *ed448) Verify(publicKey, message, sig []byte) bool {
- // Ed448 is used with the empty string as a context string.
- // See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-13.7
- return ed448lib.Verify(publicKey, message, sig, "")
-}
-
-func (c *ed448) ValidateEdDSA(publicKey, privateKey []byte) (err error) {
- priv := getEd448Sk(publicKey, privateKey)
- expectedPriv := ed448lib.NewKeyFromSeed(priv.Seed())
- if subtle.ConstantTimeCompare(priv, expectedPriv) == 0 {
- return errors.KeyInvalidError("ecc: invalid ed448 secret")
- }
- return nil
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/generic.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/generic.go
deleted file mode 100644
index e28d7c7106a..00000000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/generic.go
+++ /dev/null
@@ -1,149 +0,0 @@
-// Package ecc implements a generic interface for ECDH, ECDSA, and EdDSA.
-package ecc
-
-import (
- "crypto/ecdsa"
- "crypto/elliptic"
- "fmt"
- "github.com/ProtonMail/go-crypto/openpgp/errors"
- "io"
- "math/big"
-)
-
-type genericCurve struct {
- Curve elliptic.Curve
-}
-
-func NewGenericCurve(c elliptic.Curve) *genericCurve {
- return &genericCurve{
- Curve: c,
- }
-}
-
-func (c *genericCurve) GetCurveName() string {
- return c.Curve.Params().Name
-}
-
-func (c *genericCurve) MarshalBytePoint(point []byte) []byte {
- return point
-}
-
-func (c *genericCurve) UnmarshalBytePoint(point []byte) []byte {
- return point
-}
-
-func (c *genericCurve) MarshalIntegerPoint(x, y *big.Int) []byte {
- return elliptic.Marshal(c.Curve, x, y)
-}
-
-func (c *genericCurve) UnmarshalIntegerPoint(point []byte) (x, y *big.Int) {
- return elliptic.Unmarshal(c.Curve, point)
-}
-
-func (c *genericCurve) MarshalByteSecret(d []byte) []byte {
- return d
-}
-
-func (c *genericCurve) UnmarshalByteSecret(d []byte) []byte {
- return d
-}
-
-func (c *genericCurve) MarshalIntegerSecret(d *big.Int) []byte {
- return d.Bytes()
-}
-
-func (c *genericCurve) UnmarshalIntegerSecret(d []byte) *big.Int {
- return new(big.Int).SetBytes(d)
-}
-
-func (c *genericCurve) GenerateECDH(rand io.Reader) (point, secret []byte, err error) {
- secret, x, y, err := elliptic.GenerateKey(c.Curve, rand)
- if err != nil {
- return nil, nil, err
- }
-
- point = elliptic.Marshal(c.Curve, x, y)
- return point, secret, nil
-}
-
-func (c *genericCurve) GenerateECDSA(rand io.Reader) (x, y, secret *big.Int, err error) {
- priv, err := ecdsa.GenerateKey(c.Curve, rand)
- if err != nil {
- return
- }
-
- return priv.X, priv.Y, priv.D, nil
-}
-
-func (c *genericCurve) Encaps(rand io.Reader, point []byte) (ephemeral, sharedSecret []byte, err error) {
- xP, yP := elliptic.Unmarshal(c.Curve, point)
- if xP == nil {
- panic("invalid point")
- }
-
- d, x, y, err := elliptic.GenerateKey(c.Curve, rand)
- if err != nil {
- return nil, nil, err
- }
-
- vsG := elliptic.Marshal(c.Curve, x, y)
- zbBig, _ := c.Curve.ScalarMult(xP, yP, d)
-
- byteLen := (c.Curve.Params().BitSize + 7) >> 3
- zb := make([]byte, byteLen)
- zbBytes := zbBig.Bytes()
- copy(zb[byteLen-len(zbBytes):], zbBytes)
-
- return vsG, zb, nil
-}
-
-func (c *genericCurve) Decaps(ephemeral, secret []byte) (sharedSecret []byte, err error) {
- x, y := elliptic.Unmarshal(c.Curve, ephemeral)
- zbBig, _ := c.Curve.ScalarMult(x, y, secret)
- byteLen := (c.Curve.Params().BitSize + 7) >> 3
- zb := make([]byte, byteLen)
- zbBytes := zbBig.Bytes()
- copy(zb[byteLen-len(zbBytes):], zbBytes)
-
- return zb, nil
-}
-
-func (c *genericCurve) Sign(rand io.Reader, x, y, d *big.Int, hash []byte) (r, s *big.Int, err error) {
- priv := &ecdsa.PrivateKey{D: d, PublicKey: ecdsa.PublicKey{X: x, Y: y, Curve: c.Curve}}
- return ecdsa.Sign(rand, priv, hash)
-}
-
-func (c *genericCurve) Verify(x, y *big.Int, hash []byte, r, s *big.Int) bool {
- pub := &ecdsa.PublicKey{X: x, Y: y, Curve: c.Curve}
- return ecdsa.Verify(pub, hash, r, s)
-}
-
-func (c *genericCurve) validate(xP, yP *big.Int, secret []byte) error {
- // the public point should not be at infinity (0,0)
- zero := new(big.Int)
- if xP.Cmp(zero) == 0 && yP.Cmp(zero) == 0 {
- return errors.KeyInvalidError(fmt.Sprintf("ecc (%s): infinity point", c.Curve.Params().Name))
- }
-
- // re-derive the public point Q' = (X,Y) = dG
- // to compare to declared Q in public key
- expectedX, expectedY := c.Curve.ScalarBaseMult(secret)
- if xP.Cmp(expectedX) != 0 || yP.Cmp(expectedY) != 0 {
- return errors.KeyInvalidError(fmt.Sprintf("ecc (%s): invalid point", c.Curve.Params().Name))
- }
-
- return nil
-}
-
-func (c *genericCurve) ValidateECDSA(xP, yP *big.Int, secret []byte) error {
- return c.validate(xP, yP, secret)
-}
-
-func (c *genericCurve) ValidateECDH(point []byte, secret []byte) error {
- xP, yP := elliptic.Unmarshal(c.Curve, point)
- if xP == nil {
- return errors.KeyInvalidError(fmt.Sprintf("ecc (%s): invalid point", c.Curve.Params().Name))
- }
-
- return c.validate(xP, yP, secret)
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/x448.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/x448.go
deleted file mode 100644
index df04262e9e6..00000000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/x448.go
+++ /dev/null
@@ -1,107 +0,0 @@
-// Package ecc implements a generic interface for ECDH, ECDSA, and EdDSA.
-package ecc
-
-import (
- "crypto/subtle"
- "io"
-
- "github.com/ProtonMail/go-crypto/openpgp/errors"
- x448lib "github.com/cloudflare/circl/dh/x448"
-)
-
-type x448 struct{}
-
-func NewX448() *x448 {
- return &x448{}
-}
-
-func (c *x448) GetCurveName() string {
- return "x448"
-}
-
-// MarshalBytePoint encodes the public point from native format, adding the prefix.
-// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-5.5.5.6
-func (c *x448) MarshalBytePoint(point []byte) []byte {
- return append([]byte{0x40}, point...)
-}
-
-// UnmarshalBytePoint decodes a point from prefixed format to native.
-// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-5.5.5.6
-func (c *x448) UnmarshalBytePoint(point []byte) []byte {
- if len(point) != x448lib.Size+1 {
- return nil
- }
-
- return point[1:]
-}
-
-// MarshalByteSecret encoded a scalar from native format to prefixed.
-// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-5.5.5.6.1.2
-func (c *x448) MarshalByteSecret(d []byte) []byte {
- return append([]byte{0x40}, d...)
-}
-
-// UnmarshalByteSecret decodes a scalar from prefixed format to native.
-// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-5.5.5.6.1.2
-func (c *x448) UnmarshalByteSecret(d []byte) []byte {
- if len(d) != x448lib.Size+1 {
- return nil
- }
-
- // Store without prefix
- return d[1:]
-}
-
-func (c *x448) generateKeyPairBytes(rand io.Reader) (sk, pk x448lib.Key, err error) {
- if _, err = rand.Read(sk[:]); err != nil {
- return
- }
-
- x448lib.KeyGen(&pk, &sk)
- return
-}
-
-func (c *x448) GenerateECDH(rand io.Reader) (point []byte, secret []byte, err error) {
- priv, pub, err := c.generateKeyPairBytes(rand)
- if err != nil {
- return
- }
-
- return pub[:], priv[:], nil
-}
-
-func (c *x448) Encaps(rand io.Reader, point []byte) (ephemeral, sharedSecret []byte, err error) {
- var pk, ss x448lib.Key
- seed, e, err := c.generateKeyPairBytes(rand)
- if err != nil {
- return nil, nil, err
- }
- copy(pk[:], point)
- x448lib.Shared(&ss, &seed, &pk)
-
- return e[:], ss[:], nil
-}
-
-func (c *x448) Decaps(ephemeral, secret []byte) (sharedSecret []byte, err error) {
- var ss, sk, e x448lib.Key
-
- copy(sk[:], secret)
- copy(e[:], ephemeral)
- x448lib.Shared(&ss, &sk, &e)
-
- return ss[:], nil
-}
-
-func (c *x448) ValidateECDH(point []byte, secret []byte) error {
- var sk, pk, expectedPk x448lib.Key
-
- copy(pk[:], point)
- copy(sk[:], secret)
- x448lib.KeyGen(&expectedPk, &sk)
-
- if subtle.ConstantTimeCompare(expectedPk[:], pk[:]) == 0 {
- return errors.KeyInvalidError("ecc: invalid curve25519 public point")
- }
-
- return nil
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/encoding/encoding.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/encoding/encoding.go
deleted file mode 100644
index 6c921481b7b..00000000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/encoding/encoding.go
+++ /dev/null
@@ -1,27 +0,0 @@
-// Copyright 2017 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package encoding implements openpgp packet field encodings as specified in
-// RFC 4880 and 6637.
-package encoding
-
-import "io"
-
-// Field is an encoded field of an openpgp packet.
-type Field interface {
- // Bytes returns the decoded data.
- Bytes() []byte
-
- // BitLength is the size in bits of the decoded data.
- BitLength() uint16
-
- // EncodedBytes returns the encoded data.
- EncodedBytes() []byte
-
- // EncodedLength is the size in bytes of the encoded data.
- EncodedLength() uint16
-
- // ReadFrom reads the next Field from r.
- ReadFrom(r io.Reader) (int64, error)
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/encoding/mpi.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/encoding/mpi.go
deleted file mode 100644
index 02e5e695c38..00000000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/encoding/mpi.go
+++ /dev/null
@@ -1,91 +0,0 @@
-// Copyright 2017 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package encoding
-
-import (
- "io"
- "math/big"
- "math/bits"
-)
-
-// An MPI is used to store the contents of a big integer, along with the bit
-// length that was specified in the original input. This allows the MPI to be
-// reserialized exactly.
-type MPI struct {
- bytes []byte
- bitLength uint16
-}
-
-// NewMPI returns a MPI initialized with bytes.
-func NewMPI(bytes []byte) *MPI {
- for len(bytes) != 0 && bytes[0] == 0 {
- bytes = bytes[1:]
- }
- if len(bytes) == 0 {
- bitLength := uint16(0)
- return &MPI{bytes, bitLength}
- }
- bitLength := 8*uint16(len(bytes)-1) + uint16(bits.Len8(bytes[0]))
- return &MPI{bytes, bitLength}
-}
-
-// Bytes returns the decoded data.
-func (m *MPI) Bytes() []byte {
- return m.bytes
-}
-
-// BitLength is the size in bits of the decoded data.
-func (m *MPI) BitLength() uint16 {
- return m.bitLength
-}
-
-// EncodedBytes returns the encoded data.
-func (m *MPI) EncodedBytes() []byte {
- return append([]byte{byte(m.bitLength >> 8), byte(m.bitLength)}, m.bytes...)
-}
-
-// EncodedLength is the size in bytes of the encoded data.
-func (m *MPI) EncodedLength() uint16 {
- return uint16(2 + len(m.bytes))
-}
-
-// ReadFrom reads into m the next MPI from r.
-func (m *MPI) ReadFrom(r io.Reader) (int64, error) {
- var buf [2]byte
- n, err := io.ReadFull(r, buf[0:])
- if err != nil {
- if err == io.EOF {
- err = io.ErrUnexpectedEOF
- }
- return int64(n), err
- }
-
- m.bitLength = uint16(buf[0])<<8 | uint16(buf[1])
- m.bytes = make([]byte, (int(m.bitLength)+7)/8)
-
- nn, err := io.ReadFull(r, m.bytes)
- if err == io.EOF {
- err = io.ErrUnexpectedEOF
- }
-
- // remove leading zero bytes from malformed GnuPG encoded MPIs:
- // https://bugs.gnupg.org/gnupg/issue1853
- // for _, b := range m.bytes {
- // if b != 0 {
- // break
- // }
- // m.bytes = m.bytes[1:]
- // m.bitLength -= 8
- // }
-
- return int64(n) + int64(nn), err
-}
-
-// SetBig initializes m with the bits from n.
-func (m *MPI) SetBig(n *big.Int) *MPI {
- m.bytes = n.Bytes()
- m.bitLength = uint16(n.BitLen())
- return m
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/encoding/oid.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/encoding/oid.go
deleted file mode 100644
index c9df9fe2322..00000000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/encoding/oid.go
+++ /dev/null
@@ -1,88 +0,0 @@
-// Copyright 2017 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package encoding
-
-import (
- "io"
-
- "github.com/ProtonMail/go-crypto/openpgp/errors"
-)
-
-// OID is used to store a variable-length field with a one-octet size
-// prefix. See https://tools.ietf.org/html/rfc6637#section-9.
-type OID struct {
- bytes []byte
-}
-
-const (
- // maxOID is the maximum number of bytes in a OID.
- maxOID = 254
- // reservedOIDLength1 and reservedOIDLength2 are OID lengths that the RFC
- // specifies are reserved.
- reservedOIDLength1 = 0
- reservedOIDLength2 = 0xff
-)
-
-// NewOID returns a OID initialized with bytes.
-func NewOID(bytes []byte) *OID {
- switch len(bytes) {
- case reservedOIDLength1, reservedOIDLength2:
- panic("encoding: NewOID argument length is reserved")
- default:
- if len(bytes) > maxOID {
- panic("encoding: NewOID argument too large")
- }
- }
-
- return &OID{
- bytes: bytes,
- }
-}
-
-// Bytes returns the decoded data.
-func (o *OID) Bytes() []byte {
- return o.bytes
-}
-
-// BitLength is the size in bits of the decoded data.
-func (o *OID) BitLength() uint16 {
- return uint16(len(o.bytes) * 8)
-}
-
-// EncodedBytes returns the encoded data.
-func (o *OID) EncodedBytes() []byte {
- return append([]byte{byte(len(o.bytes))}, o.bytes...)
-}
-
-// EncodedLength is the size in bytes of the encoded data.
-func (o *OID) EncodedLength() uint16 {
- return uint16(1 + len(o.bytes))
-}
-
-// ReadFrom reads into b the next OID from r.
-func (o *OID) ReadFrom(r io.Reader) (int64, error) {
- var buf [1]byte
- n, err := io.ReadFull(r, buf[:])
- if err != nil {
- if err == io.EOF {
- err = io.ErrUnexpectedEOF
- }
- return int64(n), err
- }
-
- switch buf[0] {
- case reservedOIDLength1, reservedOIDLength2:
- return int64(n), errors.UnsupportedError("reserved for future extensions")
- }
-
- o.bytes = make([]byte, buf[0])
-
- nn, err := io.ReadFull(r, o.bytes)
- if err == io.EOF {
- err = io.ErrUnexpectedEOF
- }
-
- return int64(n) + int64(nn), err
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/key_generation.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/key_generation.go
deleted file mode 100644
index a40e45beeef..00000000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/key_generation.go
+++ /dev/null
@@ -1,445 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package openpgp
-
-import (
- "crypto"
- "crypto/rand"
- "crypto/rsa"
- goerrors "errors"
- "io"
- "math/big"
- "time"
-
- "github.com/ProtonMail/go-crypto/openpgp/ecdh"
- "github.com/ProtonMail/go-crypto/openpgp/ecdsa"
- "github.com/ProtonMail/go-crypto/openpgp/ed25519"
- "github.com/ProtonMail/go-crypto/openpgp/ed448"
- "github.com/ProtonMail/go-crypto/openpgp/eddsa"
- "github.com/ProtonMail/go-crypto/openpgp/errors"
- "github.com/ProtonMail/go-crypto/openpgp/internal/algorithm"
- "github.com/ProtonMail/go-crypto/openpgp/internal/ecc"
- "github.com/ProtonMail/go-crypto/openpgp/packet"
- "github.com/ProtonMail/go-crypto/openpgp/x25519"
- "github.com/ProtonMail/go-crypto/openpgp/x448"
-)
-
-// NewEntity returns an Entity that contains a fresh RSA/RSA keypair with a
-// single identity composed of the given full name, comment and email, any of
-// which may be empty but must not contain any of "()<>\x00".
-// If config is nil, sensible defaults will be used.
-func NewEntity(name, comment, email string, config *packet.Config) (*Entity, error) {
- creationTime := config.Now()
- keyLifetimeSecs := config.KeyLifetime()
-
- // Generate a primary signing key
- primaryPrivRaw, err := newSigner(config)
- if err != nil {
- return nil, err
- }
- primary := packet.NewSignerPrivateKey(creationTime, primaryPrivRaw)
- if config.V6() {
- primary.UpgradeToV6()
- }
-
- e := &Entity{
- PrimaryKey: &primary.PublicKey,
- PrivateKey: primary,
- Identities: make(map[string]*Identity),
- Subkeys: []Subkey{},
- Signatures: []*packet.Signature{},
- }
-
- if config.V6() {
- // In v6 keys algorithm preferences should be stored in direct key signatures
- selfSignature := createSignaturePacket(&primary.PublicKey, packet.SigTypeDirectSignature, config)
- err = writeKeyProperties(selfSignature, creationTime, keyLifetimeSecs, config)
- if err != nil {
- return nil, err
- }
- err = selfSignature.SignDirectKeyBinding(&primary.PublicKey, primary, config)
- if err != nil {
- return nil, err
- }
- e.Signatures = append(e.Signatures, selfSignature)
- e.SelfSignature = selfSignature
- }
-
- err = e.addUserId(name, comment, email, config, creationTime, keyLifetimeSecs, !config.V6())
- if err != nil {
- return nil, err
- }
-
- // NOTE: No key expiry here, but we will not return this subkey in EncryptionKey()
- // if the primary/master key has expired.
- err = e.addEncryptionSubkey(config, creationTime, 0)
- if err != nil {
- return nil, err
- }
-
- return e, nil
-}
-
-func (t *Entity) AddUserId(name, comment, email string, config *packet.Config) error {
- creationTime := config.Now()
- keyLifetimeSecs := config.KeyLifetime()
- return t.addUserId(name, comment, email, config, creationTime, keyLifetimeSecs, !config.V6())
-}
-
-func writeKeyProperties(selfSignature *packet.Signature, creationTime time.Time, keyLifetimeSecs uint32, config *packet.Config) error {
- selfSignature.CreationTime = creationTime
- selfSignature.KeyLifetimeSecs = &keyLifetimeSecs
- selfSignature.FlagsValid = true
- selfSignature.FlagSign = true
- selfSignature.FlagCertify = true
- selfSignature.SEIPDv1 = true // true by default, see 5.8 vs. 5.14
- selfSignature.SEIPDv2 = config.AEAD() != nil
-
- // Set the PreferredHash for the SelfSignature from the packet.Config.
- // If it is not the must-implement algorithm from rfc4880bis, append that.
- hash, ok := algorithm.HashToHashId(config.Hash())
- if !ok {
- return errors.UnsupportedError("unsupported preferred hash function")
- }
-
- selfSignature.PreferredHash = []uint8{hash}
- if config.Hash() != crypto.SHA256 {
- selfSignature.PreferredHash = append(selfSignature.PreferredHash, hashToHashId(crypto.SHA256))
- }
-
- // Likewise for DefaultCipher.
- selfSignature.PreferredSymmetric = []uint8{uint8(config.Cipher())}
- if config.Cipher() != packet.CipherAES128 {
- selfSignature.PreferredSymmetric = append(selfSignature.PreferredSymmetric, uint8(packet.CipherAES128))
- }
-
- // We set CompressionNone as the preferred compression algorithm because
- // of compression side channel attacks, then append the configured
- // DefaultCompressionAlgo if any is set (to signal support for cases
- // where the application knows that using compression is safe).
- selfSignature.PreferredCompression = []uint8{uint8(packet.CompressionNone)}
- if config.Compression() != packet.CompressionNone {
- selfSignature.PreferredCompression = append(selfSignature.PreferredCompression, uint8(config.Compression()))
- }
-
- // And for DefaultMode.
- modes := []uint8{uint8(config.AEAD().Mode())}
- if config.AEAD().Mode() != packet.AEADModeOCB {
- modes = append(modes, uint8(packet.AEADModeOCB))
- }
-
- // For preferred (AES256, GCM), we'll generate (AES256, GCM), (AES256, OCB), (AES128, GCM), (AES128, OCB)
- for _, cipher := range selfSignature.PreferredSymmetric {
- for _, mode := range modes {
- selfSignature.PreferredCipherSuites = append(selfSignature.PreferredCipherSuites, [2]uint8{cipher, mode})
- }
- }
- return nil
-}
-
-func (t *Entity) addUserId(name, comment, email string, config *packet.Config, creationTime time.Time, keyLifetimeSecs uint32, writeProperties bool) error {
- uid := packet.NewUserId(name, comment, email)
- if uid == nil {
- return errors.InvalidArgumentError("user id field contained invalid characters")
- }
-
- if _, ok := t.Identities[uid.Id]; ok {
- return errors.InvalidArgumentError("user id exist")
- }
-
- primary := t.PrivateKey
- isPrimaryId := len(t.Identities) == 0
- selfSignature := createSignaturePacket(&primary.PublicKey, packet.SigTypePositiveCert, config)
- if writeProperties {
- err := writeKeyProperties(selfSignature, creationTime, keyLifetimeSecs, config)
- if err != nil {
- return err
- }
- }
- selfSignature.IsPrimaryId = &isPrimaryId
-
- // User ID binding signature
- err := selfSignature.SignUserId(uid.Id, &primary.PublicKey, primary, config)
- if err != nil {
- return err
- }
- t.Identities[uid.Id] = &Identity{
- Name: uid.Id,
- UserId: uid,
- SelfSignature: selfSignature,
- Signatures: []*packet.Signature{selfSignature},
- }
- return nil
-}
-
-// AddSigningSubkey adds a signing keypair as a subkey to the Entity.
-// If config is nil, sensible defaults will be used.
-func (e *Entity) AddSigningSubkey(config *packet.Config) error {
- creationTime := config.Now()
- keyLifetimeSecs := config.KeyLifetime()
-
- subPrivRaw, err := newSigner(config)
- if err != nil {
- return err
- }
- sub := packet.NewSignerPrivateKey(creationTime, subPrivRaw)
- sub.IsSubkey = true
- if config.V6() {
- sub.UpgradeToV6()
- }
-
- subkey := Subkey{
- PublicKey: &sub.PublicKey,
- PrivateKey: sub,
- }
- subkey.Sig = createSignaturePacket(e.PrimaryKey, packet.SigTypeSubkeyBinding, config)
- subkey.Sig.CreationTime = creationTime
- subkey.Sig.KeyLifetimeSecs = &keyLifetimeSecs
- subkey.Sig.FlagsValid = true
- subkey.Sig.FlagSign = true
- subkey.Sig.EmbeddedSignature = createSignaturePacket(subkey.PublicKey, packet.SigTypePrimaryKeyBinding, config)
- subkey.Sig.EmbeddedSignature.CreationTime = creationTime
-
- err = subkey.Sig.EmbeddedSignature.CrossSignKey(subkey.PublicKey, e.PrimaryKey, subkey.PrivateKey, config)
- if err != nil {
- return err
- }
-
- err = subkey.Sig.SignKey(subkey.PublicKey, e.PrivateKey, config)
- if err != nil {
- return err
- }
-
- e.Subkeys = append(e.Subkeys, subkey)
- return nil
-}
-
-// AddEncryptionSubkey adds an encryption keypair as a subkey to the Entity.
-// If config is nil, sensible defaults will be used.
-func (e *Entity) AddEncryptionSubkey(config *packet.Config) error {
- creationTime := config.Now()
- keyLifetimeSecs := config.KeyLifetime()
- return e.addEncryptionSubkey(config, creationTime, keyLifetimeSecs)
-}
-
-func (e *Entity) addEncryptionSubkey(config *packet.Config, creationTime time.Time, keyLifetimeSecs uint32) error {
- subPrivRaw, err := newDecrypter(config)
- if err != nil {
- return err
- }
- sub := packet.NewDecrypterPrivateKey(creationTime, subPrivRaw)
- sub.IsSubkey = true
- if config.V6() {
- sub.UpgradeToV6()
- }
-
- subkey := Subkey{
- PublicKey: &sub.PublicKey,
- PrivateKey: sub,
- }
- subkey.Sig = createSignaturePacket(e.PrimaryKey, packet.SigTypeSubkeyBinding, config)
- subkey.Sig.CreationTime = creationTime
- subkey.Sig.KeyLifetimeSecs = &keyLifetimeSecs
- subkey.Sig.FlagsValid = true
- subkey.Sig.FlagEncryptStorage = true
- subkey.Sig.FlagEncryptCommunications = true
-
- err = subkey.Sig.SignKey(subkey.PublicKey, e.PrivateKey, config)
- if err != nil {
- return err
- }
-
- e.Subkeys = append(e.Subkeys, subkey)
- return nil
-}
-
-// Generates a signing key
-func newSigner(config *packet.Config) (signer interface{}, err error) {
- switch config.PublicKeyAlgorithm() {
- case packet.PubKeyAlgoRSA:
- bits := config.RSAModulusBits()
- if bits < 1024 {
- return nil, errors.InvalidArgumentError("bits must be >= 1024")
- }
- if config != nil && len(config.RSAPrimes) >= 2 {
- primes := config.RSAPrimes[0:2]
- config.RSAPrimes = config.RSAPrimes[2:]
- return generateRSAKeyWithPrimes(config.Random(), 2, bits, primes)
- }
- return rsa.GenerateKey(config.Random(), bits)
- case packet.PubKeyAlgoEdDSA:
- if config.V6() {
- // Implementations MUST NOT accept or generate v6 key material
- // using the deprecated OIDs.
- return nil, errors.InvalidArgumentError("EdDSALegacy cannot be used for v6 keys")
- }
- curve := ecc.FindEdDSAByGenName(string(config.CurveName()))
- if curve == nil {
- return nil, errors.InvalidArgumentError("unsupported curve")
- }
-
- priv, err := eddsa.GenerateKey(config.Random(), curve)
- if err != nil {
- return nil, err
- }
- return priv, nil
- case packet.PubKeyAlgoECDSA:
- curve := ecc.FindECDSAByGenName(string(config.CurveName()))
- if curve == nil {
- return nil, errors.InvalidArgumentError("unsupported curve")
- }
-
- priv, err := ecdsa.GenerateKey(config.Random(), curve)
- if err != nil {
- return nil, err
- }
- return priv, nil
- case packet.PubKeyAlgoEd25519:
- priv, err := ed25519.GenerateKey(config.Random())
- if err != nil {
- return nil, err
- }
- return priv, nil
- case packet.PubKeyAlgoEd448:
- priv, err := ed448.GenerateKey(config.Random())
- if err != nil {
- return nil, err
- }
- return priv, nil
- default:
- return nil, errors.InvalidArgumentError("unsupported public key algorithm")
- }
-}
-
-// Generates an encryption/decryption key
-func newDecrypter(config *packet.Config) (decrypter interface{}, err error) {
- switch config.PublicKeyAlgorithm() {
- case packet.PubKeyAlgoRSA:
- bits := config.RSAModulusBits()
- if bits < 1024 {
- return nil, errors.InvalidArgumentError("bits must be >= 1024")
- }
- if config != nil && len(config.RSAPrimes) >= 2 {
- primes := config.RSAPrimes[0:2]
- config.RSAPrimes = config.RSAPrimes[2:]
- return generateRSAKeyWithPrimes(config.Random(), 2, bits, primes)
- }
- return rsa.GenerateKey(config.Random(), bits)
- case packet.PubKeyAlgoEdDSA, packet.PubKeyAlgoECDSA:
- fallthrough // When passing EdDSA or ECDSA, we generate an ECDH subkey
- case packet.PubKeyAlgoECDH:
- if config.V6() &&
- (config.CurveName() == packet.Curve25519 ||
- config.CurveName() == packet.Curve448) {
- // Implementations MUST NOT accept or generate v6 key material
- // using the deprecated OIDs.
- return nil, errors.InvalidArgumentError("ECDH with Curve25519/448 legacy cannot be used for v6 keys")
- }
- var kdf = ecdh.KDF{
- Hash: algorithm.SHA512,
- Cipher: algorithm.AES256,
- }
- curve := ecc.FindECDHByGenName(string(config.CurveName()))
- if curve == nil {
- return nil, errors.InvalidArgumentError("unsupported curve")
- }
- return ecdh.GenerateKey(config.Random(), curve, kdf)
- case packet.PubKeyAlgoEd25519, packet.PubKeyAlgoX25519: // When passing Ed25519, we generate an x25519 subkey
- return x25519.GenerateKey(config.Random())
- case packet.PubKeyAlgoEd448, packet.PubKeyAlgoX448: // When passing Ed448, we generate an x448 subkey
- return x448.GenerateKey(config.Random())
- default:
- return nil, errors.InvalidArgumentError("unsupported public key algorithm")
- }
-}
-
-var bigOne = big.NewInt(1)
-
-// generateRSAKeyWithPrimes generates a multi-prime RSA keypair of the
-// given bit size, using the given random source and pre-populated primes.
-func generateRSAKeyWithPrimes(random io.Reader, nprimes int, bits int, prepopulatedPrimes []*big.Int) (*rsa.PrivateKey, error) {
- priv := new(rsa.PrivateKey)
- priv.E = 65537
-
- if nprimes < 2 {
- return nil, goerrors.New("generateRSAKeyWithPrimes: nprimes must be >= 2")
- }
-
- if bits < 1024 {
- return nil, goerrors.New("generateRSAKeyWithPrimes: bits must be >= 1024")
- }
-
- primes := make([]*big.Int, nprimes)
-
-NextSetOfPrimes:
- for {
- todo := bits
- // crypto/rand should set the top two bits in each prime.
- // Thus each prime has the form
- // p_i = 2^bitlen(p_i) × 0.11... (in base 2).
- // And the product is:
- // P = 2^todo × α
- // where α is the product of nprimes numbers of the form 0.11...
- //
- // If α < 1/2 (which can happen for nprimes > 2), we need to
- // shift todo to compensate for lost bits: the mean value of 0.11...
- // is 7/8, so todo + shift - nprimes * log2(7/8) ~= bits - 1/2
- // will give good results.
- if nprimes >= 7 {
- todo += (nprimes - 2) / 5
- }
- for i := 0; i < nprimes; i++ {
- var err error
- if len(prepopulatedPrimes) == 0 {
- primes[i], err = rand.Prime(random, todo/(nprimes-i))
- if err != nil {
- return nil, err
- }
- } else {
- primes[i] = prepopulatedPrimes[0]
- prepopulatedPrimes = prepopulatedPrimes[1:]
- }
-
- todo -= primes[i].BitLen()
- }
-
- // Make sure that primes is pairwise unequal.
- for i, prime := range primes {
- for j := 0; j < i; j++ {
- if prime.Cmp(primes[j]) == 0 {
- continue NextSetOfPrimes
- }
- }
- }
-
- n := new(big.Int).Set(bigOne)
- totient := new(big.Int).Set(bigOne)
- pminus1 := new(big.Int)
- for _, prime := range primes {
- n.Mul(n, prime)
- pminus1.Sub(prime, bigOne)
- totient.Mul(totient, pminus1)
- }
- if n.BitLen() != bits {
- // This should never happen for nprimes == 2 because
- // crypto/rand should set the top two bits in each prime.
- // For nprimes > 2 we hope it does not happen often.
- continue NextSetOfPrimes
- }
-
- priv.D = new(big.Int)
- e := big.NewInt(int64(priv.E))
- ok := priv.D.ModInverse(e, totient)
-
- if ok != nil {
- priv.Primes = primes
- priv.N = n
- break
- }
- }
-
- priv.Precompute()
- return priv, nil
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/keys.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/keys.go
deleted file mode 100644
index a071353e2ec..00000000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/keys.go
+++ /dev/null
@@ -1,901 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package openpgp
-
-import (
- goerrors "errors"
- "fmt"
- "io"
- "time"
-
- "github.com/ProtonMail/go-crypto/openpgp/armor"
- "github.com/ProtonMail/go-crypto/openpgp/errors"
- "github.com/ProtonMail/go-crypto/openpgp/packet"
-)
-
-// PublicKeyType is the armor type for a PGP public key.
-var PublicKeyType = "PGP PUBLIC KEY BLOCK"
-
-// PrivateKeyType is the armor type for a PGP private key.
-var PrivateKeyType = "PGP PRIVATE KEY BLOCK"
-
-// An Entity represents the components of an OpenPGP key: a primary public key
-// (which must be a signing key), one or more identities claimed by that key,
-// and zero or more subkeys, which may be encryption keys.
-type Entity struct {
- PrimaryKey *packet.PublicKey
- PrivateKey *packet.PrivateKey
- Identities map[string]*Identity // indexed by Identity.Name
- Revocations []*packet.Signature
- Subkeys []Subkey
- SelfSignature *packet.Signature // Direct-key self signature of the PrimaryKey (contains primary key properties in v6)
- Signatures []*packet.Signature // all (potentially unverified) self-signatures, revocations, and third-party signatures
-}
-
-// An Identity represents an identity claimed by an Entity and zero or more
-// assertions by other entities about that claim.
-type Identity struct {
- Name string // by convention, has the form "Full Name (comment) "
- UserId *packet.UserId
- SelfSignature *packet.Signature
- Revocations []*packet.Signature
- Signatures []*packet.Signature // all (potentially unverified) self-signatures, revocations, and third-party signatures
-}
-
-// A Subkey is an additional public key in an Entity. Subkeys can be used for
-// encryption.
-type Subkey struct {
- PublicKey *packet.PublicKey
- PrivateKey *packet.PrivateKey
- Sig *packet.Signature
- Revocations []*packet.Signature
-}
-
-// A Key identifies a specific public key in an Entity. This is either the
-// Entity's primary key or a subkey.
-type Key struct {
- Entity *Entity
- PublicKey *packet.PublicKey
- PrivateKey *packet.PrivateKey
- SelfSignature *packet.Signature
- Revocations []*packet.Signature
-}
-
-// A KeyRing provides access to public and private keys.
-type KeyRing interface {
- // KeysById returns the set of keys that have the given key id.
- KeysById(id uint64) []Key
- // KeysByIdAndUsage returns the set of keys with the given id
- // that also meet the key usage given by requiredUsage.
- // The requiredUsage is expressed as the bitwise-OR of
- // packet.KeyFlag* values.
- KeysByIdUsage(id uint64, requiredUsage byte) []Key
- // DecryptionKeys returns all private keys that are valid for
- // decryption.
- DecryptionKeys() []Key
-}
-
-// PrimaryIdentity returns an Identity, preferring non-revoked identities,
-// identities marked as primary, or the latest-created identity, in that order.
-func (e *Entity) PrimaryIdentity() *Identity {
- var primaryIdentity *Identity
- for _, ident := range e.Identities {
- if shouldPreferIdentity(primaryIdentity, ident) {
- primaryIdentity = ident
- }
- }
- return primaryIdentity
-}
-
-func shouldPreferIdentity(existingId, potentialNewId *Identity) bool {
- if existingId == nil {
- return true
- }
-
- if len(existingId.Revocations) > len(potentialNewId.Revocations) {
- return true
- }
-
- if len(existingId.Revocations) < len(potentialNewId.Revocations) {
- return false
- }
-
- if existingId.SelfSignature == nil {
- return true
- }
-
- if existingId.SelfSignature.IsPrimaryId != nil && *existingId.SelfSignature.IsPrimaryId &&
- !(potentialNewId.SelfSignature.IsPrimaryId != nil && *potentialNewId.SelfSignature.IsPrimaryId) {
- return false
- }
-
- if !(existingId.SelfSignature.IsPrimaryId != nil && *existingId.SelfSignature.IsPrimaryId) &&
- potentialNewId.SelfSignature.IsPrimaryId != nil && *potentialNewId.SelfSignature.IsPrimaryId {
- return true
- }
-
- return potentialNewId.SelfSignature.CreationTime.After(existingId.SelfSignature.CreationTime)
-}
-
-// EncryptionKey returns the best candidate Key for encrypting a message to the
-// given Entity.
-func (e *Entity) EncryptionKey(now time.Time) (Key, bool) {
- // Fail to find any encryption key if the...
- primarySelfSignature, primaryIdentity := e.PrimarySelfSignature()
- if primarySelfSignature == nil || // no self-signature found
- e.PrimaryKey.KeyExpired(primarySelfSignature, now) || // primary key has expired
- e.Revoked(now) || // primary key has been revoked
- primarySelfSignature.SigExpired(now) || // user ID or or direct self-signature has expired
- (primaryIdentity != nil && primaryIdentity.Revoked(now)) { // user ID has been revoked (for v4 keys)
- return Key{}, false
- }
-
- // Iterate the keys to find the newest, unexpired one
- candidateSubkey := -1
- var maxTime time.Time
- for i, subkey := range e.Subkeys {
- if subkey.Sig.FlagsValid &&
- subkey.Sig.FlagEncryptCommunications &&
- subkey.PublicKey.PubKeyAlgo.CanEncrypt() &&
- !subkey.PublicKey.KeyExpired(subkey.Sig, now) &&
- !subkey.Sig.SigExpired(now) &&
- !subkey.Revoked(now) &&
- (maxTime.IsZero() || subkey.Sig.CreationTime.After(maxTime)) {
- candidateSubkey = i
- maxTime = subkey.Sig.CreationTime
- }
- }
-
- if candidateSubkey != -1 {
- subkey := e.Subkeys[candidateSubkey]
- return Key{e, subkey.PublicKey, subkey.PrivateKey, subkey.Sig, subkey.Revocations}, true
- }
-
- // If we don't have any subkeys for encryption and the primary key
- // is marked as OK to encrypt with, then we can use it.
- if primarySelfSignature.FlagsValid && primarySelfSignature.FlagEncryptCommunications &&
- e.PrimaryKey.PubKeyAlgo.CanEncrypt() {
- return Key{e, e.PrimaryKey, e.PrivateKey, primarySelfSignature, e.Revocations}, true
- }
-
- return Key{}, false
-}
-
-// CertificationKey return the best candidate Key for certifying a key with this
-// Entity.
-func (e *Entity) CertificationKey(now time.Time) (Key, bool) {
- return e.CertificationKeyById(now, 0)
-}
-
-// CertificationKeyById return the Key for key certification with this
-// Entity and keyID.
-func (e *Entity) CertificationKeyById(now time.Time, id uint64) (Key, bool) {
- return e.signingKeyByIdUsage(now, id, packet.KeyFlagCertify)
-}
-
-// SigningKey return the best candidate Key for signing a message with this
-// Entity.
-func (e *Entity) SigningKey(now time.Time) (Key, bool) {
- return e.SigningKeyById(now, 0)
-}
-
-// SigningKeyById return the Key for signing a message with this
-// Entity and keyID.
-func (e *Entity) SigningKeyById(now time.Time, id uint64) (Key, bool) {
- return e.signingKeyByIdUsage(now, id, packet.KeyFlagSign)
-}
-
-func (e *Entity) signingKeyByIdUsage(now time.Time, id uint64, flags int) (Key, bool) {
- // Fail to find any signing key if the...
- primarySelfSignature, primaryIdentity := e.PrimarySelfSignature()
- if primarySelfSignature == nil || // no self-signature found
- e.PrimaryKey.KeyExpired(primarySelfSignature, now) || // primary key has expired
- e.Revoked(now) || // primary key has been revoked
- primarySelfSignature.SigExpired(now) || // user ID or direct self-signature has expired
- (primaryIdentity != nil && primaryIdentity.Revoked(now)) { // user ID has been revoked (for v4 keys)
- return Key{}, false
- }
-
- // Iterate the keys to find the newest, unexpired one
- candidateSubkey := -1
- var maxTime time.Time
- for idx, subkey := range e.Subkeys {
- if subkey.Sig.FlagsValid &&
- (flags&packet.KeyFlagCertify == 0 || subkey.Sig.FlagCertify) &&
- (flags&packet.KeyFlagSign == 0 || subkey.Sig.FlagSign) &&
- subkey.PublicKey.PubKeyAlgo.CanSign() &&
- !subkey.PublicKey.KeyExpired(subkey.Sig, now) &&
- !subkey.Sig.SigExpired(now) &&
- !subkey.Revoked(now) &&
- (maxTime.IsZero() || subkey.Sig.CreationTime.After(maxTime)) &&
- (id == 0 || subkey.PublicKey.KeyId == id) {
- candidateSubkey = idx
- maxTime = subkey.Sig.CreationTime
- }
- }
-
- if candidateSubkey != -1 {
- subkey := e.Subkeys[candidateSubkey]
- return Key{e, subkey.PublicKey, subkey.PrivateKey, subkey.Sig, subkey.Revocations}, true
- }
-
- // If we don't have any subkeys for signing and the primary key
- // is marked as OK to sign with, then we can use it.
- if primarySelfSignature.FlagsValid &&
- (flags&packet.KeyFlagCertify == 0 || primarySelfSignature.FlagCertify) &&
- (flags&packet.KeyFlagSign == 0 || primarySelfSignature.FlagSign) &&
- e.PrimaryKey.PubKeyAlgo.CanSign() &&
- (id == 0 || e.PrimaryKey.KeyId == id) {
- return Key{e, e.PrimaryKey, e.PrivateKey, primarySelfSignature, e.Revocations}, true
- }
-
- // No keys with a valid Signing Flag or no keys matched the id passed in
- return Key{}, false
-}
-
-func revoked(revocations []*packet.Signature, now time.Time) bool {
- for _, revocation := range revocations {
- if revocation.RevocationReason != nil && *revocation.RevocationReason == packet.KeyCompromised {
- // If the key is compromised, the key is considered revoked even before the revocation date.
- return true
- }
- if !revocation.SigExpired(now) {
- return true
- }
- }
- return false
-}
-
-// Revoked returns whether the entity has any direct key revocation signatures.
-// Note that third-party revocation signatures are not supported.
-// Note also that Identity and Subkey revocation should be checked separately.
-func (e *Entity) Revoked(now time.Time) bool {
- return revoked(e.Revocations, now)
-}
-
-// EncryptPrivateKeys encrypts all non-encrypted keys in the entity with the same key
-// derived from the provided passphrase. Public keys and dummy keys are ignored,
-// and don't cause an error to be returned.
-func (e *Entity) EncryptPrivateKeys(passphrase []byte, config *packet.Config) error {
- var keysToEncrypt []*packet.PrivateKey
- // Add entity private key to encrypt.
- if e.PrivateKey != nil && !e.PrivateKey.Dummy() && !e.PrivateKey.Encrypted {
- keysToEncrypt = append(keysToEncrypt, e.PrivateKey)
- }
-
- // Add subkeys to encrypt.
- for _, sub := range e.Subkeys {
- if sub.PrivateKey != nil && !sub.PrivateKey.Dummy() && !sub.PrivateKey.Encrypted {
- keysToEncrypt = append(keysToEncrypt, sub.PrivateKey)
- }
- }
- return packet.EncryptPrivateKeys(keysToEncrypt, passphrase, config)
-}
-
-// DecryptPrivateKeys decrypts all encrypted keys in the entity with the given passphrase.
-// Avoids recomputation of similar s2k key derivations. Public keys and dummy keys are ignored,
-// and don't cause an error to be returned.
-func (e *Entity) DecryptPrivateKeys(passphrase []byte) error {
- var keysToDecrypt []*packet.PrivateKey
- // Add entity private key to decrypt.
- if e.PrivateKey != nil && !e.PrivateKey.Dummy() && e.PrivateKey.Encrypted {
- keysToDecrypt = append(keysToDecrypt, e.PrivateKey)
- }
-
- // Add subkeys to decrypt.
- for _, sub := range e.Subkeys {
- if sub.PrivateKey != nil && !sub.PrivateKey.Dummy() && sub.PrivateKey.Encrypted {
- keysToDecrypt = append(keysToDecrypt, sub.PrivateKey)
- }
- }
- return packet.DecryptPrivateKeys(keysToDecrypt, passphrase)
-}
-
-// Revoked returns whether the identity has been revoked by a self-signature.
-// Note that third-party revocation signatures are not supported.
-func (i *Identity) Revoked(now time.Time) bool {
- return revoked(i.Revocations, now)
-}
-
-// Revoked returns whether the subkey has been revoked by a self-signature.
-// Note that third-party revocation signatures are not supported.
-func (s *Subkey) Revoked(now time.Time) bool {
- return revoked(s.Revocations, now)
-}
-
-// Revoked returns whether the key or subkey has been revoked by a self-signature.
-// Note that third-party revocation signatures are not supported.
-// Note also that Identity revocation should be checked separately.
-// Normally, it's not necessary to call this function, except on keys returned by
-// KeysById or KeysByIdUsage.
-func (key *Key) Revoked(now time.Time) bool {
- return revoked(key.Revocations, now)
-}
-
-// An EntityList contains one or more Entities.
-type EntityList []*Entity
-
-// KeysById returns the set of keys that have the given key id.
-func (el EntityList) KeysById(id uint64) (keys []Key) {
- for _, e := range el {
- if e.PrimaryKey.KeyId == id {
- selfSig, _ := e.PrimarySelfSignature()
- keys = append(keys, Key{e, e.PrimaryKey, e.PrivateKey, selfSig, e.Revocations})
- }
-
- for _, subKey := range e.Subkeys {
- if subKey.PublicKey.KeyId == id {
- keys = append(keys, Key{e, subKey.PublicKey, subKey.PrivateKey, subKey.Sig, subKey.Revocations})
- }
- }
- }
- return
-}
-
-// KeysByIdAndUsage returns the set of keys with the given id that also meet
-// the key usage given by requiredUsage. The requiredUsage is expressed as
-// the bitwise-OR of packet.KeyFlag* values.
-func (el EntityList) KeysByIdUsage(id uint64, requiredUsage byte) (keys []Key) {
- for _, key := range el.KeysById(id) {
- if requiredUsage != 0 {
- if key.SelfSignature == nil || !key.SelfSignature.FlagsValid {
- continue
- }
-
- var usage byte
- if key.SelfSignature.FlagCertify {
- usage |= packet.KeyFlagCertify
- }
- if key.SelfSignature.FlagSign {
- usage |= packet.KeyFlagSign
- }
- if key.SelfSignature.FlagEncryptCommunications {
- usage |= packet.KeyFlagEncryptCommunications
- }
- if key.SelfSignature.FlagEncryptStorage {
- usage |= packet.KeyFlagEncryptStorage
- }
- if usage&requiredUsage != requiredUsage {
- continue
- }
- }
-
- keys = append(keys, key)
- }
- return
-}
-
-// DecryptionKeys returns all private keys that are valid for decryption.
-func (el EntityList) DecryptionKeys() (keys []Key) {
- for _, e := range el {
- for _, subKey := range e.Subkeys {
- if subKey.PrivateKey != nil && subKey.Sig.FlagsValid && (subKey.Sig.FlagEncryptStorage || subKey.Sig.FlagEncryptCommunications) {
- keys = append(keys, Key{e, subKey.PublicKey, subKey.PrivateKey, subKey.Sig, subKey.Revocations})
- }
- }
- }
- return
-}
-
-// ReadArmoredKeyRing reads one or more public/private keys from an armor keyring file.
-func ReadArmoredKeyRing(r io.Reader) (EntityList, error) {
- block, err := armor.Decode(r)
- if err == io.EOF {
- return nil, errors.InvalidArgumentError("no armored data found")
- }
- if err != nil {
- return nil, err
- }
- if block.Type != PublicKeyType && block.Type != PrivateKeyType {
- return nil, errors.InvalidArgumentError("expected public or private key block, got: " + block.Type)
- }
-
- return ReadKeyRing(block.Body)
-}
-
-// ReadKeyRing reads one or more public/private keys. Unsupported keys are
-// ignored as long as at least a single valid key is found.
-func ReadKeyRing(r io.Reader) (el EntityList, err error) {
- packets := packet.NewReader(r)
- var lastUnsupportedError error
-
- for {
- var e *Entity
- e, err = ReadEntity(packets)
- if err != nil {
- // TODO: warn about skipped unsupported/unreadable keys
- if _, ok := err.(errors.UnsupportedError); ok {
- lastUnsupportedError = err
- err = readToNextPublicKey(packets)
- } else if _, ok := err.(errors.StructuralError); ok {
- // Skip unreadable, badly-formatted keys
- lastUnsupportedError = err
- err = readToNextPublicKey(packets)
- }
- if err == io.EOF {
- err = nil
- break
- }
- if err != nil {
- el = nil
- break
- }
- } else {
- el = append(el, e)
- }
- }
-
- if len(el) == 0 && err == nil {
- err = lastUnsupportedError
- }
- return
-}
-
-// readToNextPublicKey reads packets until the start of the entity and leaves
-// the first packet of the new entity in the Reader.
-func readToNextPublicKey(packets *packet.Reader) (err error) {
- var p packet.Packet
- for {
- p, err = packets.Next()
- if err == io.EOF {
- return
- } else if err != nil {
- if _, ok := err.(errors.UnsupportedError); ok {
- continue
- }
- return
- }
-
- if pk, ok := p.(*packet.PublicKey); ok && !pk.IsSubkey {
- packets.Unread(p)
- return
- }
- }
-}
-
-// ReadEntity reads an entity (public key, identities, subkeys etc) from the
-// given Reader.
-func ReadEntity(packets *packet.Reader) (*Entity, error) {
- e := new(Entity)
- e.Identities = make(map[string]*Identity)
-
- p, err := packets.Next()
- if err != nil {
- return nil, err
- }
-
- var ok bool
- if e.PrimaryKey, ok = p.(*packet.PublicKey); !ok {
- if e.PrivateKey, ok = p.(*packet.PrivateKey); !ok {
- packets.Unread(p)
- return nil, errors.StructuralError("first packet was not a public/private key")
- }
- e.PrimaryKey = &e.PrivateKey.PublicKey
- }
-
- if !e.PrimaryKey.PubKeyAlgo.CanSign() {
- return nil, errors.StructuralError("primary key cannot be used for signatures")
- }
-
- var revocations []*packet.Signature
- var directSignatures []*packet.Signature
-EachPacket:
- for {
- p, err := packets.Next()
- if err == io.EOF {
- break
- } else if err != nil {
- return nil, err
- }
-
- switch pkt := p.(type) {
- case *packet.UserId:
- if err := addUserID(e, packets, pkt); err != nil {
- return nil, err
- }
- case *packet.Signature:
- if pkt.SigType == packet.SigTypeKeyRevocation {
- revocations = append(revocations, pkt)
- } else if pkt.SigType == packet.SigTypeDirectSignature {
- directSignatures = append(directSignatures, pkt)
- }
- // Else, ignoring the signature as it does not follow anything
- // we would know to attach it to.
- case *packet.PrivateKey:
- if !pkt.IsSubkey {
- packets.Unread(p)
- break EachPacket
- }
- err = addSubkey(e, packets, &pkt.PublicKey, pkt)
- if err != nil {
- return nil, err
- }
- case *packet.PublicKey:
- if !pkt.IsSubkey {
- packets.Unread(p)
- break EachPacket
- }
- err = addSubkey(e, packets, pkt, nil)
- if err != nil {
- return nil, err
- }
- default:
- // we ignore unknown packets.
- }
- }
-
- if len(e.Identities) == 0 && e.PrimaryKey.Version < 6 {
- return nil, errors.StructuralError(fmt.Sprintf("v%d entity without any identities", e.PrimaryKey.Version))
- }
-
- // An implementation MUST ensure that a valid direct-key signature is present before using a v6 key.
- if e.PrimaryKey.Version == 6 {
- if len(directSignatures) == 0 {
- return nil, errors.StructuralError("v6 entity without a valid direct-key signature")
- }
- // Select main direct key signature.
- var mainDirectKeySelfSignature *packet.Signature
- for _, directSignature := range directSignatures {
- if directSignature.SigType == packet.SigTypeDirectSignature &&
- directSignature.CheckKeyIdOrFingerprint(e.PrimaryKey) &&
- (mainDirectKeySelfSignature == nil ||
- directSignature.CreationTime.After(mainDirectKeySelfSignature.CreationTime)) {
- mainDirectKeySelfSignature = directSignature
- }
- }
- if mainDirectKeySelfSignature == nil {
- return nil, errors.StructuralError("no valid direct-key self-signature for v6 primary key found")
- }
- // Check that the main self-signature is valid.
- err = e.PrimaryKey.VerifyDirectKeySignature(mainDirectKeySelfSignature)
- if err != nil {
- return nil, errors.StructuralError("invalid direct-key self-signature for v6 primary key")
- }
- e.SelfSignature = mainDirectKeySelfSignature
- e.Signatures = directSignatures
- }
-
- for _, revocation := range revocations {
- err = e.PrimaryKey.VerifyRevocationSignature(revocation)
- if err == nil {
- e.Revocations = append(e.Revocations, revocation)
- } else {
- // TODO: RFC 4880 5.2.3.15 defines revocation keys.
- return nil, errors.StructuralError("revocation signature signed by alternate key")
- }
- }
-
- return e, nil
-}
-
-func addUserID(e *Entity, packets *packet.Reader, pkt *packet.UserId) error {
- // Make a new Identity object, that we might wind up throwing away.
- // We'll only add it if we get a valid self-signature over this
- // userID.
- identity := new(Identity)
- identity.Name = pkt.Id
- identity.UserId = pkt
-
- for {
- p, err := packets.Next()
- if err == io.EOF {
- break
- } else if err != nil {
- return err
- }
-
- sig, ok := p.(*packet.Signature)
- if !ok {
- packets.Unread(p)
- break
- }
-
- if sig.SigType != packet.SigTypeGenericCert &&
- sig.SigType != packet.SigTypePersonaCert &&
- sig.SigType != packet.SigTypeCasualCert &&
- sig.SigType != packet.SigTypePositiveCert &&
- sig.SigType != packet.SigTypeCertificationRevocation {
- return errors.StructuralError("user ID signature with wrong type")
- }
-
- if sig.CheckKeyIdOrFingerprint(e.PrimaryKey) {
- if err = e.PrimaryKey.VerifyUserIdSignature(pkt.Id, e.PrimaryKey, sig); err != nil {
- return errors.StructuralError("user ID self-signature invalid: " + err.Error())
- }
- if sig.SigType == packet.SigTypeCertificationRevocation {
- identity.Revocations = append(identity.Revocations, sig)
- } else if identity.SelfSignature == nil || sig.CreationTime.After(identity.SelfSignature.CreationTime) {
- identity.SelfSignature = sig
- }
- identity.Signatures = append(identity.Signatures, sig)
- e.Identities[pkt.Id] = identity
- } else {
- identity.Signatures = append(identity.Signatures, sig)
- }
- }
-
- return nil
-}
-
-func addSubkey(e *Entity, packets *packet.Reader, pub *packet.PublicKey, priv *packet.PrivateKey) error {
- var subKey Subkey
- subKey.PublicKey = pub
- subKey.PrivateKey = priv
-
- for {
- p, err := packets.Next()
- if err == io.EOF {
- break
- } else if err != nil {
- return errors.StructuralError("subkey signature invalid: " + err.Error())
- }
-
- sig, ok := p.(*packet.Signature)
- if !ok {
- packets.Unread(p)
- break
- }
-
- if sig.SigType != packet.SigTypeSubkeyBinding && sig.SigType != packet.SigTypeSubkeyRevocation {
- return errors.StructuralError("subkey signature with wrong type")
- }
-
- if err := e.PrimaryKey.VerifyKeySignature(subKey.PublicKey, sig); err != nil {
- return errors.StructuralError("subkey signature invalid: " + err.Error())
- }
-
- switch sig.SigType {
- case packet.SigTypeSubkeyRevocation:
- subKey.Revocations = append(subKey.Revocations, sig)
- case packet.SigTypeSubkeyBinding:
- if subKey.Sig == nil || sig.CreationTime.After(subKey.Sig.CreationTime) {
- subKey.Sig = sig
- }
- }
- }
-
- if subKey.Sig == nil {
- return errors.StructuralError("subkey packet not followed by signature")
- }
-
- e.Subkeys = append(e.Subkeys, subKey)
-
- return nil
-}
-
-// SerializePrivate serializes an Entity, including private key material, but
-// excluding signatures from other entities, to the given Writer.
-// Identities and subkeys are re-signed in case they changed since NewEntry.
-// If config is nil, sensible defaults will be used.
-func (e *Entity) SerializePrivate(w io.Writer, config *packet.Config) (err error) {
- if e.PrivateKey.Dummy() {
- return errors.ErrDummyPrivateKey("dummy private key cannot re-sign identities")
- }
- return e.serializePrivate(w, config, true)
-}
-
-// SerializePrivateWithoutSigning serializes an Entity, including private key
-// material, but excluding signatures from other entities, to the given Writer.
-// Self-signatures of identities and subkeys are not re-signed. This is useful
-// when serializing GNU dummy keys, among other things.
-// If config is nil, sensible defaults will be used.
-func (e *Entity) SerializePrivateWithoutSigning(w io.Writer, config *packet.Config) (err error) {
- return e.serializePrivate(w, config, false)
-}
-
-func (e *Entity) serializePrivate(w io.Writer, config *packet.Config, reSign bool) (err error) {
- if e.PrivateKey == nil {
- return goerrors.New("openpgp: private key is missing")
- }
- err = e.PrivateKey.Serialize(w)
- if err != nil {
- return
- }
- for _, revocation := range e.Revocations {
- err := revocation.Serialize(w)
- if err != nil {
- return err
- }
- }
- for _, directSignature := range e.Signatures {
- err := directSignature.Serialize(w)
- if err != nil {
- return err
- }
- }
- for _, ident := range e.Identities {
- err = ident.UserId.Serialize(w)
- if err != nil {
- return
- }
- if reSign {
- if ident.SelfSignature == nil {
- return goerrors.New("openpgp: can't re-sign identity without valid self-signature")
- }
- err = ident.SelfSignature.SignUserId(ident.UserId.Id, e.PrimaryKey, e.PrivateKey, config)
- if err != nil {
- return
- }
- }
- for _, sig := range ident.Signatures {
- err = sig.Serialize(w)
- if err != nil {
- return err
- }
- }
- }
- for _, subkey := range e.Subkeys {
- err = subkey.PrivateKey.Serialize(w)
- if err != nil {
- return
- }
- if reSign {
- err = subkey.Sig.SignKey(subkey.PublicKey, e.PrivateKey, config)
- if err != nil {
- return
- }
- if subkey.Sig.EmbeddedSignature != nil {
- err = subkey.Sig.EmbeddedSignature.CrossSignKey(subkey.PublicKey, e.PrimaryKey,
- subkey.PrivateKey, config)
- if err != nil {
- return
- }
- }
- }
- for _, revocation := range subkey.Revocations {
- err := revocation.Serialize(w)
- if err != nil {
- return err
- }
- }
- err = subkey.Sig.Serialize(w)
- if err != nil {
- return
- }
- }
- return nil
-}
-
-// Serialize writes the public part of the given Entity to w, including
-// signatures from other entities. No private key material will be output.
-func (e *Entity) Serialize(w io.Writer) error {
- err := e.PrimaryKey.Serialize(w)
- if err != nil {
- return err
- }
- for _, revocation := range e.Revocations {
- err := revocation.Serialize(w)
- if err != nil {
- return err
- }
- }
- for _, directSignature := range e.Signatures {
- err := directSignature.Serialize(w)
- if err != nil {
- return err
- }
- }
- for _, ident := range e.Identities {
- err = ident.UserId.Serialize(w)
- if err != nil {
- return err
- }
- for _, sig := range ident.Signatures {
- err = sig.Serialize(w)
- if err != nil {
- return err
- }
- }
- }
- for _, subkey := range e.Subkeys {
- err = subkey.PublicKey.Serialize(w)
- if err != nil {
- return err
- }
- for _, revocation := range subkey.Revocations {
- err := revocation.Serialize(w)
- if err != nil {
- return err
- }
- }
- err = subkey.Sig.Serialize(w)
- if err != nil {
- return err
- }
- }
- return nil
-}
-
-// SignIdentity adds a signature to e, from signer, attesting that identity is
-// associated with e. The provided identity must already be an element of
-// e.Identities and the private key of signer must have been decrypted if
-// necessary.
-// If config is nil, sensible defaults will be used.
-func (e *Entity) SignIdentity(identity string, signer *Entity, config *packet.Config) error {
- certificationKey, ok := signer.CertificationKey(config.Now())
- if !ok {
- return errors.InvalidArgumentError("no valid certification key found")
- }
-
- if certificationKey.PrivateKey.Encrypted {
- return errors.InvalidArgumentError("signing Entity's private key must be decrypted")
- }
-
- ident, ok := e.Identities[identity]
- if !ok {
- return errors.InvalidArgumentError("given identity string not found in Entity")
- }
-
- sig := createSignaturePacket(certificationKey.PublicKey, packet.SigTypeGenericCert, config)
-
- signingUserID := config.SigningUserId()
- if signingUserID != "" {
- if _, ok := signer.Identities[signingUserID]; !ok {
- return errors.InvalidArgumentError("signer identity string not found in signer Entity")
- }
- sig.SignerUserId = &signingUserID
- }
-
- if err := sig.SignUserId(identity, e.PrimaryKey, certificationKey.PrivateKey, config); err != nil {
- return err
- }
- ident.Signatures = append(ident.Signatures, sig)
- return nil
-}
-
-// RevokeKey generates a key revocation signature (packet.SigTypeKeyRevocation) with the
-// specified reason code and text (RFC4880 section-5.2.3.23).
-// If config is nil, sensible defaults will be used.
-func (e *Entity) RevokeKey(reason packet.ReasonForRevocation, reasonText string, config *packet.Config) error {
- revSig := createSignaturePacket(e.PrimaryKey, packet.SigTypeKeyRevocation, config)
- revSig.RevocationReason = &reason
- revSig.RevocationReasonText = reasonText
-
- if err := revSig.RevokeKey(e.PrimaryKey, e.PrivateKey, config); err != nil {
- return err
- }
- e.Revocations = append(e.Revocations, revSig)
- return nil
-}
-
-// RevokeSubkey generates a subkey revocation signature (packet.SigTypeSubkeyRevocation) for
-// a subkey with the specified reason code and text (RFC4880 section-5.2.3.23).
-// If config is nil, sensible defaults will be used.
-func (e *Entity) RevokeSubkey(sk *Subkey, reason packet.ReasonForRevocation, reasonText string, config *packet.Config) error {
- if err := e.PrimaryKey.VerifyKeySignature(sk.PublicKey, sk.Sig); err != nil {
- return errors.InvalidArgumentError("given subkey is not associated with this key")
- }
-
- revSig := createSignaturePacket(e.PrimaryKey, packet.SigTypeSubkeyRevocation, config)
- revSig.RevocationReason = &reason
- revSig.RevocationReasonText = reasonText
-
- if err := revSig.RevokeSubkey(sk.PublicKey, e.PrivateKey, config); err != nil {
- return err
- }
-
- sk.Revocations = append(sk.Revocations, revSig)
- return nil
-}
-
-func (e *Entity) primaryDirectSignature() *packet.Signature {
- return e.SelfSignature
-}
-
-// PrimarySelfSignature searches the entity for the self-signature that stores key preferences.
-// For V4 keys, returns the self-signature of the primary identity, and the identity.
-// For V6 keys, returns the latest valid direct-key self-signature, and no identity (nil).
-// This self-signature is to be used to check the key expiration,
-// algorithm preferences, and so on.
-func (e *Entity) PrimarySelfSignature() (*packet.Signature, *Identity) {
- if e.PrimaryKey.Version == 6 {
- return e.primaryDirectSignature(), nil
- }
- primaryIdentity := e.PrimaryIdentity()
- if primaryIdentity == nil {
- return nil, nil
- }
- return primaryIdentity.SelfSignature, primaryIdentity
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/keys_test_data.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/keys_test_data.go
deleted file mode 100644
index 108fd096f3c..00000000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/keys_test_data.go
+++ /dev/null
@@ -1,538 +0,0 @@
-package openpgp
-
-const expiringKeyHex = "c6c04d0451d0c680010800abbb021fd03ffc4e96618901180c3fdcb060ee69eeead97b91256d11420d80b5f1b51930248044130bd300605cf8a05b7a40d3d8cfb0a910be2e3db50dcd50a9c54064c2a5550801daa834ff4480b33d3d3ca495ff8a4e84a886977d17d998f881241a874083d8b995beab555b6d22b8a4817ab17ac3e7304f7d4d2c05c495fb2218348d3bc13651db1d92732e368a9dd7dcefa6eddff30b94706a9aaee47e9d39321460b740c59c6fc3c2fd8ab6c0fb868cb87c0051f0321301fe0f0e1820b15e7fb7063395769b525005c7e30a7ce85984f5cac00504e7b4fdc45d74958de8388436fd5c7ba9ea121f1c851b5911dd1b47a14d81a09e92ef37721e2325b6790011010001cd00c2c07b041001080025050251d0c680050900278d00060b09070803020415080a0203160201021901021b03021e01000a0910e7b484133a890a35ae4b0800a1beb82e7f28eaf5273d6af9d3391314f6280b2b624eaca2851f89a9ebcaf80ac589ebd509f168bc4322106ca2e2ce77a76e071a3c7444787d65216b5f05e82c77928860b92aace3b7d0327db59492f422eb9dfab7249266d37429870b091a98aba8724c2259ebf8f85093f21255eafa75aa841e31d94f2ac891b9755fed455e539044ee69fc47950b80e003fc9f298d695660f28329eaa38037c367efde1727458e514faf990d439a21461b719edaddf9296d3d0647b43ca56cb8dbf63b4fcf8b9968e7928c463470fab3b98e44d0d95645062f94b2d04fe56bd52822b71934db8ce845622c40b92fcbe765a142e7f38b61a6aa9606c8e8858dcd3b6eb1894acec04d0451d1f06b01080088bea67444e1789390e7c0335c86775502d58ec783d99c8ef4e06de235ed3dd4b0467f6f358d818c7d8989d43ec6d69fcbc8c32632d5a1b605e3fa8e41d695fcdcaa535936cd0157f9040dce362519803b908eafe838bb13216c885c6f93e9e8d5745607f0d062322085d6bdc760969149a8ff8dd9f5c18d9bfe2e6f63a06e17694cf1f67587c6fb70e9aebf90ffc528ca3b615ac7c9d4a21ea4f7c06f2e98fbbd90a859b8608bf9ea638e3a54289ce44c283110d0c45fa458de6251cd6e7baf71f80f12c8978340490fd90c92b81736ae902ed958e478dceae2835953d189c45d182aff02ea2be61b81d8e94430f041d638647b43e2fcb45fd512fbf5068b810011010001c2c06504180108000f050251d1f06b050900081095021b0c000a0910e7b484133a890a35e63407fe2ec88d6d1e6c9ce7553ece0cb2524747217bad29f251d33df84599ffcc900141a355abd62126800744068a5e05dc167056aa9205273dc7765a2ed49db15c2a83b8d6e6429c902136f1e12229086c1c10c0053242c2a4ae1930db58163387a48cad64607ff2153c320e42843dec28e3fce90e7399d63ac0affa2fee1f0adc0953c89eb3f46ef1d6c04328ed13b491669d5120a3782e3ffb7c69575fb77eebd108794f4dda9d34be2bae57e8e59ec8ebfda2f6f06104b2321be408ea146e2db482b00c5055c8618de36ac9716f80da2617e225556d0fce61b01c8cea2d1e0ea982c31711060ca370f2739366e1e708f38405d784b49d16a26cf62d152eae734327cec04d0451d1f07b010800d5af91c5e7c2fd8951c8d254eab0c97cdcb66822f868b79b78c366255059a68fd74ebca9adb9b970cd9e586690e6e0756705432306878c897b10a4b4ca0005966f99ac8fa4e6f9caf54bf8e53844544beee9872a7ac64c119cf1393d96e674254b661f61ee975633d0e8a8672531edb6bb8e211204e7754a9efa802342118eee850beea742bac95a3f706cc2024cf6037a308bb68162b2f53b9a6346a96e6d31871a2456186e24a1c7a82b82ac04afdfd57cd7fb9ba77a9c760d40b76a170f7be525e5fb6a9848cc726e806187710d9b190387df28700f321f988a392899f93815cc937f309129eb94d5299c5547cb2c085898e6639496e70d746c9d3fb9881d0011010001c2c06504180108000f050251d1f07b050900266305021b0c000a0910e7b484133a890a35bff207fd10dfe8c4a6ea1dd30568012b6fd6891a763c87ad0f7a1d112aad9e8e3239378a3b85588c235865bac2e614348cb4f216d7217f53b3ef48c192e0a4d31d64d7bfa5faccf21155965fa156e887056db644a05ad08a85cc6152d1377d9e37b46f4ff462bbe68ace2dc586ef90070314576c985d8037c2ba63f0a7dc17a62e15bd77e88bc61d9d00858979709f12304264a4cf4225c5cf86f12c8e19486cb9cdcc69f18f027e5f16f4ca8b50e28b3115eaff3a345acd21f624aef81f6ede515c1b55b26b84c1e32264754eab672d5489b287e7277ea855e0a5ff2aa9e8b8c76d579a964ec225255f4d57bf66639ccb34b64798846943e162a41096a7002ca21c7f56"
-const subkeyUsageHex = "988d04533a52bc010400d26af43085558f65b9e7dbc90cb9238015259aed5e954637adcfa2181548b2d0b60c65f1f42ec5081cbf1bc0a8aa4900acfb77070837c58f26012fbce297d70afe96e759ad63531f0037538e70dbf8e384569b9720d99d8eb39d8d0a2947233ed242436cb6ac7dfe74123354b3d0119b5c235d3dd9c9d6c004f8ffaf67ad8583001101000188b7041f010200210502533b8552170c8001ce094aa433f7040bb2ddf0be3893cb843d0fe70c020700000a0910a42704b92866382aa98404009d63d916a27543da4221c60087c33f1c44bec9998c5438018ed370cca4962876c748e94b73eb39c58eb698063f3fd6346d58dd2a11c0247934c4a9d71f24754f7468f96fb24c3e791dd2392b62f626148ad724189498cbf993db2df7c0cdc2d677c35da0f16cb16c9ce7c33b4de65a4a91b1d21a130ae9cc26067718910ef8e2b417556d627261203c756d627261407379642e65642e61753e88b80413010200220502533a52bc021b03060b090807030206150802090a0b0416020301021e01021780000a0910a42704b92866382a47840400c0c2bd04f5fca586de408b395b3c280a278259c93eaaa8b79a53b97003f8ed502a8a00446dd9947fb462677e4fcac0dac2f0701847d15130aadb6cd9e0705ea0cf5f92f129136c7be21a718d46c8e641eb7f044f2adae573e11ae423a0a9ca51324f03a8a2f34b91fa40c3cc764bee4dccadedb54c768ba0469b683ea53f1c29b88d04533a52bc01040099c92a5d6f8b744224da27bc2369127c35269b58bec179de6bbc038f749344222f85a31933224f26b70243c4e4b2d242f0c4777eaef7b5502f9dad6d8bf3aaeb471210674b74de2d7078af497d55f5cdad97c7bedfbc1b41e8065a97c9c3d344b21fc81d27723af8e374bc595da26ea242dccb6ae497be26eea57e563ed517e90011010001889f0418010200090502533a52bc021b0c000a0910a42704b92866382afa1403ff70284c2de8a043ff51d8d29772602fa98009b7861c540535f874f2c230af8caf5638151a636b21f8255003997ccd29747fdd06777bb24f9593bd7d98a3e887689bf902f999915fcc94625ae487e5d13e6616f89090ebc4fdc7eb5cad8943e4056995bb61c6af37f8043016876a958ec7ebf39c43d20d53b7f546cfa83e8d2604b88d04533b8283010400c0b529316dbdf58b4c54461e7e669dc11c09eb7f73819f178ccd4177b9182b91d138605fcf1e463262fabefa73f94a52b5e15d1904635541c7ea540f07050ce0fb51b73e6f88644cec86e91107c957a114f69554548a85295d2b70bd0b203992f76eb5d493d86d9eabcaa7ef3fc7db7e458438db3fcdb0ca1cc97c638439a9170011010001889f0418010200090502533b8283021b0c000a0910a42704b92866382adc6d0400cfff6258485a21675adb7a811c3e19ebca18851533f75a7ba317950b9997fda8d1a4c8c76505c08c04b6c2cc31dc704d33da36a21273f2b388a1a706f7c3378b66d887197a525936ed9a69acb57fe7f718133da85ec742001c5d1864e9c6c8ea1b94f1c3759cebfd93b18606066c063a63be86085b7e37bdbc65f9a915bf084bb901a204533b85cd110400aed3d2c52af2b38b5b67904b0ef73d6dd7aef86adb770e2b153cd22489654dcc91730892087bb9856ae2d9f7ed1eb48f214243fe86bfe87b349ebd7c30e630e49c07b21fdabf78b7a95c8b7f969e97e3d33f2e074c63552ba64a2ded7badc05ce0ea2be6d53485f6900c7860c7aa76560376ce963d7271b9b54638a4028b573f00a0d8854bfcdb04986141568046202192263b9b67350400aaa1049dbc7943141ef590a70dcb028d730371d92ea4863de715f7f0f16d168bd3dc266c2450457d46dcbbf0b071547e5fbee7700a820c3750b236335d8d5848adb3c0da010e998908dfd93d961480084f3aea20b247034f8988eccb5546efaa35a92d0451df3aaf1aee5aa36a4c4d462c760ecd9cebcabfbe1412b1f21450f203fd126687cd486496e971a87fd9e1a8a765fe654baa219a6871ab97768596ab05c26c1aeea8f1a2c72395a58dbc12ef9640d2b95784e974a4d2d5a9b17c25fedacfe551bda52602de8f6d2e48443f5dd1a2a2a8e6a5e70ecdb88cd6e766ad9745c7ee91d78cc55c3d06536b49c3fee6c3d0b6ff0fb2bf13a314f57c953b8f4d93bf88e70418010200090502533b85cd021b0200520910a42704b92866382a47200419110200060502533b85cd000a091042ce2c64bc0ba99214b2009e26b26852c8b13b10c35768e40e78fbbb48bd084100a0c79d9ea0844fa5853dd3c85ff3ecae6f2c9dd6c557aa04008bbbc964cd65b9b8299d4ebf31f41cc7264b8cf33a00e82c5af022331fac79efc9563a822497ba012953cefe2629f1242fcdcb911dbb2315985bab060bfd58261ace3c654bdbbe2e8ed27a46e836490145c86dc7bae15c011f7e1ffc33730109b9338cd9f483e7cef3d2f396aab5bd80efb6646d7e778270ee99d934d187dd98"
-const revokedKeyHex = "988d045331ce82010400c4fdf7b40a5477f206e6ee278eaef888ca73bf9128a9eef9f2f1ddb8b7b71a4c07cfa241f028a04edb405e4d916c61d6beabc333813dc7b484d2b3c52ee233c6a79b1eea4e9cc51596ba9cd5ac5aeb9df62d86ea051055b79d03f8a4fa9f38386f5bd17529138f3325d46801514ea9047977e0829ed728e68636802796801be10011010001889f04200102000905025331d0e3021d03000a0910a401d9f09a34f7c042aa040086631196405b7e6af71026b88e98012eab44aa9849f6ef3fa930c7c9f23deaedba9db1538830f8652fb7648ec3fcade8dbcbf9eaf428e83c6cbcc272201bfe2fbb90d41963397a7c0637a1a9d9448ce695d9790db2dc95433ad7be19eb3de72dacf1d6db82c3644c13eae2a3d072b99bb341debba012c5ce4006a7d34a1f4b94b444526567205265766f6b657220283c52656727732022424d204261726973746122204b657920262530305c303e5c29203c72656740626d626172697374612e636f2e61753e88b704130102002205025331ce82021b03060b090807030206150802090a0b0416020301021e01021780000a0910a401d9f09a34f7c0019c03f75edfbeb6a73e7225ad3cc52724e2872e04260d7daf0d693c170d8c4b243b8767bc7785763533febc62ec2600c30603c433c095453ede59ff2fcabeb84ce32e0ed9d5cf15ffcbc816202b64370d4d77c1e9077d74e94a16fb4fa2e5bec23a56d7a73cf275f91691ae1801a976fcde09e981a2f6327ac27ea1fecf3185df0d56889c04100102000605025331cfb5000a0910fe9645554e8266b64b4303fc084075396674fb6f778d302ac07cef6bc0b5d07b66b2004c44aef711cbac79617ef06d836b4957522d8772dd94bf41a2f4ac8b1ee6d70c57503f837445a74765a076d07b829b8111fc2a918423ddb817ead7ca2a613ef0bfb9c6b3562aec6c3cf3c75ef3031d81d95f6563e4cdcc9960bcb386c5d757b104fcca5fe11fc709df884604101102000605025331cfe7000a09107b15a67f0b3ddc0317f6009e360beea58f29c1d963a22b962b80788c3fa6c84e009d148cfde6b351469b8eae91187eff07ad9d08fcaab88d045331ce820104009f25e20a42b904f3fa555530fe5c46737cf7bd076c35a2a0d22b11f7e0b61a69320b768f4a80fe13980ce380d1cfc4a0cd8fbe2d2e2ef85416668b77208baa65bf973fe8e500e78cc310d7c8705cdb34328bf80e24f0385fce5845c33bc7943cf6b11b02348a23da0bf6428e57c05135f2dc6bd7c1ce325d666d5a5fd2fd5e410011010001889f04180102000905025331ce82021b0c000a0910a401d9f09a34f7c0418003fe34feafcbeaef348a800a0d908a7a6809cc7304017d820f70f0474d5e23cb17e38b67dc6dca282c6ca00961f4ec9edf2738d0f087b1d81e4871ef08e1798010863afb4eac4c44a376cb343be929c5be66a78cfd4456ae9ec6a99d97f4e1c3ff3583351db2147a65c0acef5c003fb544ab3a2e2dc4d43646f58b811a6c3a369d1f"
-const revokedSubkeyHex = "988d04533121f6010400aefc803a3e4bb1a61c86e8a86d2726c6a43e0079e9f2713f1fa017e9854c83877f4aced8e331d675c67ea83ddab80aacbfa0b9040bb12d96f5a3d6be09455e2a76546cbd21677537db941cab710216b6d24ec277ee0bd65b910f416737ed120f6b93a9d3b306245c8cfd8394606fdb462e5cf43c551438d2864506c63367fc890011010001b41d416c696365203c616c69636540626d626172697374612e636f2e61753e88bb041301020025021b03060b090807030206150802090a0b0416020301021e01021780050253312798021901000a09104ef7e4beccde97f015a803ff5448437780f63263b0df8442a995e7f76c221351a51edd06f2063d8166cf3157aada4923dfc44aa0f2a6a4da5cf83b7fe722ba8ab416c976e77c6b5682e7f1069026673bd0de56ba06fd5d7a9f177607f277d9b55ff940a638c3e68525c67517e2b3d976899b93ca267f705b3e5efad7d61220e96b618a4497eab8d04403d23f8846041011020006050253312910000a09107b15a67f0b3ddc03d96e009f50b6365d86c4be5d5e9d0ea42d5e56f5794c617700a0ab274e19c2827780016d23417ce89e0a2c0d987d889c04100102000605025331cf7a000a0910a401d9f09a34f7c0ee970400aca292f213041c9f3b3fc49148cbda9d84afee6183c8dd6c5ff2600b29482db5fecd4303797be1ee6d544a20a858080fec43412061c9a71fae4039fd58013b4ae341273e6c66ad4c7cdd9e68245bedb260562e7b166f2461a1032f2b38c0e0e5715fb3d1656979e052b55ca827a76f872b78a9fdae64bc298170bfcebedc1271b41a416c696365203c616c696365407379646973702e6f722e61753e88b804130102002205025331278b021b03060b090807030206150802090a0b0416020301021e01021780000a09104ef7e4beccde97f06a7003fa03c3af68d272ebc1fa08aa72a03b02189c26496a2833d90450801c4e42c5b5f51ad96ce2d2c9cef4b7c02a6a2fcf1412d6a2d486098eb762f5010a201819c17fd2888aec8eda20c65a3b75744de7ee5cc8ac7bfc470cbe3cb982720405a27a3c6a8c229cfe36905f881b02ed5680f6a8f05866efb9d6c5844897e631deb949ca8846041011020006050253312910000a09107b15a67f0b3ddc0347bc009f7fa35db59147469eb6f2c5aaf6428accb138b22800a0caa2f5f0874bacc5909c652a57a31beda65eddd5889c04100102000605025331cf7a000a0910a401d9f09a34f7c0316403ff46f2a5c101256627f16384d34a38fb47a6c88ba60506843e532d91614339fccae5f884a5741e7582ffaf292ba38ee10a270a05f139bde3814b6a077e8cd2db0f105ebea2a83af70d385f13b507fac2ad93ff79d84950328bb86f3074745a8b7f9b64990fb142e2a12976e27e8d09a28dc5621f957ac49091116da410ac3cbde1b88d04533121f6010400cbd785b56905e4192e2fb62a720727d43c4fa487821203cf72138b884b78b701093243e1d8c92a0248a6c0203a5a88693da34af357499abacaf4b3309c640797d03093870a323b4b6f37865f6eaa2838148a67df4735d43a90ca87942554cdf1c4a751b1e75f9fd4ce4e97e278d6c1c7ed59d33441df7d084f3f02beb68896c70011010001889f0418010200090502533121f6021b0c000a09104ef7e4beccde97f0b98b03fc0a5ccf6a372995835a2f5da33b282a7d612c0ab2a97f59cf9fff73e9110981aac2858c41399afa29624a7fd8a0add11654e3d882c0fd199e161bdad65e5e2548f7b68a437ea64293db1246e3011cbb94dc1bcdeaf0f2539bd88ff16d95547144d97cead6a8c5927660a91e6db0d16eb36b7b49a3525b54d1644e65599b032b7eb901a204533127a0110400bd3edaa09eff9809c4edc2c2a0ebe52e53c50a19c1e49ab78e6167bf61473bb08f2050d78a5cbbc6ed66aff7b42cd503f16b4a0b99fa1609681fca9b7ce2bbb1a5b3864d6cdda4d7ef7849d156d534dea30fb0efb9e4cf8959a2b2ce623905882d5430b995a15c3b9fe92906086788b891002924f94abe139b42cbbfaaabe42f00a0b65dc1a1ad27d798adbcb5b5ad02d2688c89477b03ff4eebb6f7b15a73b96a96bed201c0e5e4ea27e4c6e2dd1005b94d4b90137a5b1cf5e01c6226c070c4cc999938101578877ee76d296b9aab8246d57049caacf489e80a3f40589cade790a020b1ac146d6f7a6241184b8c7fcde680eae3188f5dcbe846d7f7bdad34f6fcfca08413e19c1d5df83fc7c7c627d493492e009c2f52a80400a2fe82de87136fd2e8845888c4431b032ba29d9a29a804277e31002a8201fb8591a3e55c7a0d0881496caf8b9fb07544a5a4879291d0dc026a0ea9e5bd88eb4aa4947bbd694b25012e208a250d65ddc6f1eea59d3aed3b4ec15fcab85e2afaa23a40ab1ef9ce3e11e1bc1c34a0e758e7aa64deb8739276df0af7d4121f834a9b88e70418010200090502533127a0021b02005209104ef7e4beccde97f047200419110200060502533127a0000a0910dbce4ee19529437fe045009c0b32f5ead48ee8a7e98fac0dea3d3e6c0e2c552500a0ad71fadc5007cfaf842d9b7db3335a8cdad15d3d1a6404009b08e2c68fe8f3b45c1bb72a4b3278cdf3012aa0f229883ad74aa1f6000bb90b18301b2f85372ca5d6b9bf478d235b733b1b197d19ccca48e9daf8e890cb64546b4ce1b178faccfff07003c172a2d4f5ebaba9f57153955f3f61a9b80a4f5cb959908f8b211b03b7026a8a82fc612bfedd3794969bcf458c4ce92be215a1176ab88d045331d144010400a5063000c5aaf34953c1aa3bfc95045b3aab9882b9a8027fecfe2142dc6b47ba8aca667399990244d513dd0504716908c17d92c65e74219e004f7b83fc125e575dd58efec3ab6dd22e3580106998523dea42ec75bf9aa111734c82df54630bebdff20fe981cfc36c76f865eb1c2fb62c9e85bc3a6e5015a361a2eb1c8431578d0011010001889f04280102000905025331d433021d03000a09104ef7e4beccde97f02e5503ff5e0630d1b65291f4882b6d40a29da4616bb5088717d469fbcc3648b8276de04a04988b1f1b9f3e18f52265c1f8b6c85861691c1a6b8a3a25a1809a0b32ad330aec5667cb4262f4450649184e8113849b05e5ad06a316ea80c001e8e71838190339a6e48bbde30647bcf245134b9a97fa875c1d83a9862cae87ffd7e2c4ce3a1b89013d04180102000905025331d144021b0200a809104ef7e4beccde97f09d2004190102000605025331d144000a0910677815e371c2fd23522203fe22ab62b8e7a151383cea3edd3a12995693911426f8ccf125e1f6426388c0010f88d9ca7da2224aee8d1c12135998640c5e1813d55a93df472faae75bef858457248db41b4505827590aeccf6f9eb646da7f980655dd3050c6897feddddaca90676dee856d66db8923477d251712bb9b3186b4d0114daf7d6b59272b53218dd1da94a03ff64006fcbe71211e5daecd9961fba66cdb6de3f914882c58ba5beddeba7dcb950c1156d7fba18c19ea880dccc800eae335deec34e3b84ac75ffa24864f782f87815cda1c0f634b3dd2fa67cea30811d21723d21d9551fa12ccbcfa62b6d3a15d01307b99925707992556d50065505b090aadb8579083a20fe65bd2a270da9b011"
-
-const missingCrossSignatureKey = `-----BEGIN PGP PUBLIC KEY BLOCK-----
-Charset: UTF-8
-
-mQENBFMYynYBCACVOZ3/e8Bm2b9KH9QyIlHGo/i1bnkpqsgXj8tpJ2MIUOnXMMAY
-ztW7kKFLCmgVdLIC0vSoLA4yhaLcMojznh/2CcUglZeb6Ao8Gtelr//Rd5DRfPpG
-zqcfUo+m+eO1co2Orabw0tZDfGpg5p3AYl0hmxhUyYSc/xUq93xL1UJzBFgYXY54
-QsM8dgeQgFseSk/YvdP5SMx1ev+eraUyiiUtWzWrWC1TdyRa5p4UZg6Rkoppf+WJ
-QrW6BWrhAtqATHc8ozV7uJjeONjUEq24roRc/OFZdmQQGK6yrzKnnbA6MdHhqpdo
-9kWDcXYb7pSE63Lc+OBa5X2GUVvXJLS/3nrtABEBAAG0F2ludmFsaWQtc2lnbmlu
-Zy1zdWJrZXlziQEoBBMBAgASBQJTnKB5AhsBAgsHAhUIAh4BAAoJEO3UDQUIHpI/
-dN4H/idX4FQ1LIZCnpHS/oxoWQWfpRgdKAEM0qCqjMgiipJeEwSQbqjTCynuh5/R
-JlODDz85ABR06aoF4l5ebGLQWFCYifPnJZ/Yf5OYcMGtb7dIbqxWVFL9iLMO/oDL
-ioI3dotjPui5e+2hI9pVH1UHB/bZ/GvMGo6Zg0XxLPolKQODMVjpjLAQ0YJ3spew
-RAmOGre6tIvbDsMBnm8qREt7a07cBJ6XK7xjxYaZHQBiHVxyEWDa6gyANONx8duW
-/fhQ/zDTnyVM/ik6VO0Ty9BhPpcEYLFwh5c1ilFari1ta3e6qKo6ZGa9YMk/REhu
-yBHd9nTkI+0CiQUmbckUiVjDKKe5AQ0EUxjKdgEIAJcXQeP+NmuciE99YcJoffxv
-2gVLU4ZXBNHEaP0mgaJ1+tmMD089vUQAcyGRvw8jfsNsVZQIOAuRxY94aHQhIRHR
-bUzBN28ofo/AJJtfx62C15xt6fDKRV6HXYqAiygrHIpEoRLyiN69iScUsjIJeyFL
-C8wa72e8pSL6dkHoaV1N9ZH/xmrJ+k0vsgkQaAh9CzYufncDxcwkoP+aOlGtX1gP
-WwWoIbz0JwLEMPHBWvDDXQcQPQTYQyj+LGC9U6f9VZHN25E94subM1MjuT9OhN9Y
-MLfWaaIc5WyhLFyQKW2Upofn9wSFi8ubyBnv640Dfd0rVmaWv7LNTZpoZ/GbJAMA
-EQEAAYkBHwQYAQIACQUCU5ygeQIbAgAKCRDt1A0FCB6SP0zCB/sEzaVR38vpx+OQ
-MMynCBJrakiqDmUZv9xtplY7zsHSQjpd6xGflbU2n+iX99Q+nav0ETQZifNUEd4N
-1ljDGQejcTyKD6Pkg6wBL3x9/RJye7Zszazm4+toJXZ8xJ3800+BtaPoI39akYJm
-+ijzbskvN0v/j5GOFJwQO0pPRAFtdHqRs9Kf4YanxhedB4dIUblzlIJuKsxFit6N
-lgGRblagG3Vv2eBszbxzPbJjHCgVLR3RmrVezKOsZjr/2i7X+xLWIR0uD3IN1qOW
-CXQxLBizEEmSNVNxsp7KPGTLnqO3bPtqFirxS9PJLIMPTPLNBY7ZYuPNTMqVIUWF
-4artDmrG
-=7FfJ
------END PGP PUBLIC KEY BLOCK-----`
-
-const invalidCrossSignatureKey = `-----BEGIN PGP PUBLIC KEY BLOCK-----
-
-mQENBFMYynYBCACVOZ3/e8Bm2b9KH9QyIlHGo/i1bnkpqsgXj8tpJ2MIUOnXMMAY
-ztW7kKFLCmgVdLIC0vSoLA4yhaLcMojznh/2CcUglZeb6Ao8Gtelr//Rd5DRfPpG
-zqcfUo+m+eO1co2Orabw0tZDfGpg5p3AYl0hmxhUyYSc/xUq93xL1UJzBFgYXY54
-QsM8dgeQgFseSk/YvdP5SMx1ev+eraUyiiUtWzWrWC1TdyRa5p4UZg6Rkoppf+WJ
-QrW6BWrhAtqATHc8ozV7uJjeONjUEq24roRc/OFZdmQQGK6yrzKnnbA6MdHhqpdo
-9kWDcXYb7pSE63Lc+OBa5X2GUVvXJLS/3nrtABEBAAG0F2ludmFsaWQtc2lnbmlu
-Zy1zdWJrZXlziQEoBBMBAgASBQJTnKB5AhsBAgsHAhUIAh4BAAoJEO3UDQUIHpI/
-dN4H/idX4FQ1LIZCnpHS/oxoWQWfpRgdKAEM0qCqjMgiipJeEwSQbqjTCynuh5/R
-JlODDz85ABR06aoF4l5ebGLQWFCYifPnJZ/Yf5OYcMGtb7dIbqxWVFL9iLMO/oDL
-ioI3dotjPui5e+2hI9pVH1UHB/bZ/GvMGo6Zg0XxLPolKQODMVjpjLAQ0YJ3spew
-RAmOGre6tIvbDsMBnm8qREt7a07cBJ6XK7xjxYaZHQBiHVxyEWDa6gyANONx8duW
-/fhQ/zDTnyVM/ik6VO0Ty9BhPpcEYLFwh5c1ilFari1ta3e6qKo6ZGa9YMk/REhu
-yBHd9nTkI+0CiQUmbckUiVjDKKe5AQ0EUxjKdgEIAIINDqlj7X6jYKc6DjwrOkjQ
-UIRWbQQar0LwmNilehmt70g5DCL1SYm9q4LcgJJ2Nhxj0/5qqsYib50OSWMcKeEe
-iRXpXzv1ObpcQtI5ithp0gR53YPXBib80t3bUzomQ5UyZqAAHzMp3BKC54/vUrSK
-FeRaxDzNLrCeyI00+LHNUtwghAqHvdNcsIf8VRumK8oTm3RmDh0TyjASWYbrt9c8
-R1Um3zuoACOVy+mEIgIzsfHq0u7dwYwJB5+KeM7ZLx+HGIYdUYzHuUE1sLwVoELh
-+SHIGHI1HDicOjzqgajShuIjj5hZTyQySVprrsLKiXS6NEwHAP20+XjayJ/R3tEA
-EQEAAYkCPgQYAQIBKAUCU5ygeQIbAsBdIAQZAQIABgUCU5ygeQAKCRCpVlnFZmhO
-52RJB/9uD1MSa0wjY6tHOIgquZcP3bHBvHmrHNMw9HR2wRCMO91ZkhrpdS3ZHtgb
-u3/55etj0FdvDo1tb8P8FGSVtO5Vcwf5APM8sbbqoi8L951Q3i7qt847lfhu6sMl
-w0LWFvPTOLHrliZHItPRjOltS1WAWfr2jUYhsU9ytaDAJmvf9DujxEOsN5G1YJep
-54JCKVCkM/y585Zcnn+yxk/XwqoNQ0/iJUT9qRrZWvoeasxhl1PQcwihCwss44A+
-YXaAt3hbk+6LEQuZoYS73yR3WHj+42tfm7YxRGeubXfgCEz/brETEWXMh4pe0vCL
-bfWrmfSPq2rDegYcAybxRQz0lF8PAAoJEO3UDQUIHpI/exkH/0vQfdHA8g/N4T6E
-i6b1CUVBAkvtdJpCATZjWPhXmShOw62gkDw306vHPilL4SCvEEi4KzG72zkp6VsB
-DSRcpxCwT4mHue+duiy53/aRMtSJ+vDfiV1Vhq+3sWAck/yUtfDU9/u4eFaiNok1
-8/Gd7reyuZt5CiJnpdPpjCwelK21l2w7sHAnJF55ITXdOxI8oG3BRKufz0z5lyDY
-s2tXYmhhQIggdgelN8LbcMhWs/PBbtUr6uZlNJG2lW1yscD4aI529VjwJlCeo745
-U7pO4eF05VViUJ2mmfoivL3tkhoTUWhx8xs8xCUcCg8DoEoSIhxtOmoTPR22Z9BL
-6LCg2mg=
-=Dhm4
------END PGP PUBLIC KEY BLOCK-----`
-
-const goodCrossSignatureKey = `-----BEGIN PGP PUBLIC KEY BLOCK-----
-Version: GnuPG v1
-
-mI0EVUqeVwEEAMufHRrMPWK3gyvi0O0tABCs/oON9zV9KDZlr1a1M91ShCSFwCPo
-7r80PxdWVWcj0V5h50/CJYtpN3eE/mUIgW2z1uDYQF1OzrQ8ubrksfsJvpAhENom
-lTQEppv9mV8qhcM278teb7TX0pgrUHLYF5CfPdp1L957JLLXoQR/lwLVABEBAAG0
-E2dvb2Qtc2lnbmluZy1zdWJrZXmIuAQTAQIAIgUCVUqeVwIbAwYLCQgHAwIGFQgC
-CQoLBBYCAwECHgECF4AACgkQNRjL95IRWP69XQQAlH6+eyXJN4DZTLX78KGjHrsw
-6FCvxxClEPtPUjcJy/1KCRQmtLAt9PbbA78dvgzjDeZMZqRAwdjyJhjyg/fkU2OH
-7wq4ktjUu+dLcOBb+BFMEY+YjKZhf6EJuVfxoTVr5f82XNPbYHfTho9/OABKH6kv
-X70PaKZhbwnwij8Nts65AaIEVUqftREEAJ3WxZfqAX0bTDbQPf2CMT2IVMGDfhK7
-GyubOZgDFFjwUJQvHNvsrbeGLZ0xOBumLINyPO1amIfTgJNm1iiWFWfmnHReGcDl
-y5mpYG60Mb79Whdcer7CMm3AqYh/dW4g6IB02NwZMKoUHo3PXmFLxMKXnWyJ0clw
-R0LI/Qn509yXAKDh1SO20rqrBM+EAP2c5bfI98kyNwQAi3buu94qo3RR1ZbvfxgW
-CKXDVm6N99jdZGNK7FbRifXqzJJDLcXZKLnstnC4Sd3uyfyf1uFhmDLIQRryn5m+
-LBYHfDBPN3kdm7bsZDDq9GbTHiFZUfm/tChVKXWxkhpAmHhU/tH6GGzNSMXuIWSO
-aOz3Rqq0ED4NXyNKjdF9MiwD/i83S0ZBc0LmJYt4Z10jtH2B6tYdqnAK29uQaadx
-yZCX2scE09UIm32/w7pV77CKr1Cp/4OzAXS1tmFzQ+bX7DR+Gl8t4wxr57VeEMvl
-BGw4Vjh3X8//m3xynxycQU18Q1zJ6PkiMyPw2owZ/nss3hpSRKFJsxMLhW3fKmKr
-Ey2KiOcEGAECAAkFAlVKn7UCGwIAUgkQNRjL95IRWP5HIAQZEQIABgUCVUqftQAK
-CRD98VjDN10SqkWrAKDTpEY8D8HC02E/KVC5YUI01B30wgCgurpILm20kXEDCeHp
-C5pygfXw1DJrhAP+NyPJ4um/bU1I+rXaHHJYroYJs8YSweiNcwiHDQn0Engh/mVZ
-SqLHvbKh2dL/RXymC3+rjPvQf5cup9bPxNMa6WagdYBNAfzWGtkVISeaQW+cTEp/
-MtgVijRGXR/lGLGETPg2X3Afwn9N9bLMBkBprKgbBqU7lpaoPupxT61bL70=
-=vtbN
------END PGP PUBLIC KEY BLOCK-----`
-
-const revokedUserIDKey = `-----BEGIN PGP PUBLIC KEY BLOCK-----
-
-mQENBFsgO5EBCADhREPmcjsPkXe1z7ctvyWL0S7oa9JaoGZ9oPDHFDlQxd0qlX2e
-DZJZDg0qYvVixmaULIulApq1puEsaJCn3lHUbHlb4PYKwLEywYXM28JN91KtLsz/
-uaEX2KC5WqeP40utmzkNLq+oRX/xnRMgwbO7yUNVG2UlEa6eI+xOXO3YtLdmJMBW
-ClQ066ZnOIzEo1JxnIwha1CDBMWLLfOLrg6l8InUqaXbtEBbnaIYO6fXVXELUjkx
-nmk7t/QOk0tXCy8muH9UDqJkwDUESY2l79XwBAcx9riX8vY7vwC34pm22fAUVLCJ
-x1SJx0J8bkeNp38jKM2Zd9SUQqSbfBopQ4pPABEBAAG0I0dvbGFuZyBHb3BoZXIg
-PG5vLXJlcGx5QGdvbGFuZy5jb20+iQFUBBMBCgA+FiEE5Ik5JLcNx6l6rZfw1oFy
-9I6cUoMFAlsgO5ECGwMFCQPCZwAFCwkIBwMFFQoJCAsFFgIDAQACHgECF4AACgkQ
-1oFy9I6cUoMIkwf8DNPeD23i4jRwd/pylbvxwZintZl1fSwTJW1xcOa1emXaEtX2
-depuqhP04fjlRQGfsYAQh7X9jOJxAHjTmhqFBi5sD7QvKU00cPFYbJ/JTx0B41bl
-aXnSbGhRPh63QtEZL7ACAs+shwvvojJqysx7kyVRu0EW2wqjXdHwR/SJO6nhNBa2
-DXzSiOU/SUA42mmG+5kjF8Aabq9wPwT9wjraHShEweNerNMmOqJExBOy3yFeyDpa
-XwEZFzBfOKoxFNkIaVf5GSdIUGhFECkGvBMB935khftmgR8APxdU4BE7XrXexFJU
-8RCuPXonm4WQOwTWR0vQg64pb2WKAzZ8HhwTGbQiR29sYW5nIEdvcGhlciA8cmV2
-b2tlZEBnb2xhbmcuY29tPokBNgQwAQoAIBYhBOSJOSS3Dcepeq2X8NaBcvSOnFKD
-BQJbIDv3Ah0AAAoJENaBcvSOnFKDfWMIAKhI/Tvu3h8fSUxp/gSAcduT6bC1JttG
-0lYQ5ilKB/58lBUA5CO3ZrKDKlzW3M8VEcvohVaqeTMKeoQd5rCZq8KxHn/KvN6N
-s85REfXfniCKfAbnGgVXX3kDmZ1g63pkxrFu0fDZjVDXC6vy+I0sGyI/Inro0Pzb
-tvn0QCsxjapKK15BtmSrpgHgzVqVg0cUp8vqZeKFxarYbYB2idtGRci4b9tObOK0
-BSTVFy26+I/mrFGaPrySYiy2Kz5NMEcRhjmTxJ8jSwEr2O2sUR0yjbgUAXbTxDVE
-/jg5fQZ1ACvBRQnB7LvMHcInbzjyeTM3FazkkSYQD6b97+dkWwb1iWG5AQ0EWyA7
-kQEIALkg04REDZo1JgdYV4x8HJKFS4xAYWbIva1ZPqvDNmZRUbQZR2+gpJGEwn7z
-VofGvnOYiGW56AS5j31SFf5kro1+1bZQ5iOONBng08OOo58/l1hRseIIVGB5TGSa
-PCdChKKHreJI6hS3mShxH6hdfFtiZuB45rwoaArMMsYcjaezLwKeLc396cpUwwcZ
-snLUNd1Xu5EWEF2OdFkZ2a1qYdxBvAYdQf4+1Nr+NRIx1u1NS9c8jp3PuMOkrQEi
-bNtc1v6v0Jy52mKLG4y7mC/erIkvkQBYJdxPaP7LZVaPYc3/xskcyijrJ/5ufoD8
-K71/ShtsZUXSQn9jlRaYR0EbojMAEQEAAYkBPAQYAQoAJhYhBOSJOSS3Dcepeq2X
-8NaBcvSOnFKDBQJbIDuRAhsMBQkDwmcAAAoJENaBcvSOnFKDkFMIAIt64bVZ8x7+
-TitH1bR4pgcNkaKmgKoZz6FXu80+SnbuEt2NnDyf1cLOSimSTILpwLIuv9Uft5Pb
-OraQbYt3xi9yrqdKqGLv80bxqK0NuryNkvh9yyx5WoG1iKqMj9/FjGghuPrRaT4l
-QinNAghGVkEy1+aXGFrG2DsOC1FFI51CC2WVTzZ5RwR2GpiNRfESsU1rZAUqf/2V
-yJl9bD5R4SUNy8oQmhOxi+gbhD4Ao34e4W0ilibslI/uawvCiOwlu5NGd8zv5n+U
-heiQvzkApQup5c+BhH5zFDFdKJ2CBByxw9+7QjMFI/wgLixKuE0Ob2kAokXf7RlB
-7qTZOahrETw=
-=IKnw
------END PGP PUBLIC KEY BLOCK-----`
-
-const keyWithFirstUserIDRevoked = `-----BEGIN PGP PUBLIC KEY BLOCK-----
-Version: OpenPGP.js v4.10.10
-Comment: https://openpgpjs.org
-
-xsBNBFsgO5EBCADhREPmcjsPkXe1z7ctvyWL0S7oa9JaoGZ9oPDHFDlQxd0q
-lX2eDZJZDg0qYvVixmaULIulApq1puEsaJCn3lHUbHlb4PYKwLEywYXM28JN
-91KtLsz/uaEX2KC5WqeP40utmzkNLq+oRX/xnRMgwbO7yUNVG2UlEa6eI+xO
-XO3YtLdmJMBWClQ066ZnOIzEo1JxnIwha1CDBMWLLfOLrg6l8InUqaXbtEBb
-naIYO6fXVXELUjkxnmk7t/QOk0tXCy8muH9UDqJkwDUESY2l79XwBAcx9riX
-8vY7vwC34pm22fAUVLCJx1SJx0J8bkeNp38jKM2Zd9SUQqSbfBopQ4pPABEB
-AAHNIkdvbGFuZyBHb3BoZXIgPHJldm9rZWRAZ29sYW5nLmNvbT7CwI0EMAEK
-ACAWIQTkiTkktw3HqXqtl/DWgXL0jpxSgwUCWyA79wIdAAAhCRDWgXL0jpxS
-gxYhBOSJOSS3Dcepeq2X8NaBcvSOnFKDfWMIAKhI/Tvu3h8fSUxp/gSAcduT
-6bC1JttG0lYQ5ilKB/58lBUA5CO3ZrKDKlzW3M8VEcvohVaqeTMKeoQd5rCZ
-q8KxHn/KvN6Ns85REfXfniCKfAbnGgVXX3kDmZ1g63pkxrFu0fDZjVDXC6vy
-+I0sGyI/Inro0Pzbtvn0QCsxjapKK15BtmSrpgHgzVqVg0cUp8vqZeKFxarY
-bYB2idtGRci4b9tObOK0BSTVFy26+I/mrFGaPrySYiy2Kz5NMEcRhjmTxJ8j
-SwEr2O2sUR0yjbgUAXbTxDVE/jg5fQZ1ACvBRQnB7LvMHcInbzjyeTM3Fazk
-kSYQD6b97+dkWwb1iWHNI0dvbGFuZyBHb3BoZXIgPG5vLXJlcGx5QGdvbGFu
-Zy5jb20+wsCrBBMBCgA+FiEE5Ik5JLcNx6l6rZfw1oFy9I6cUoMFAlsgO5EC
-GwMFCQPCZwAFCwkIBwMFFQoJCAsFFgIDAQACHgECF4AAIQkQ1oFy9I6cUoMW
-IQTkiTkktw3HqXqtl/DWgXL0jpxSgwiTB/wM094PbeLiNHB3+nKVu/HBmKe1
-mXV9LBMlbXFw5rV6ZdoS1fZ16m6qE/Th+OVFAZ+xgBCHtf2M4nEAeNOaGoUG
-LmwPtC8pTTRw8Vhsn8lPHQHjVuVpedJsaFE+HrdC0RkvsAICz6yHC++iMmrK
-zHuTJVG7QRbbCqNd0fBH9Ik7qeE0FrYNfNKI5T9JQDjaaYb7mSMXwBpur3A/
-BP3COtodKETB416s0yY6okTEE7LfIV7IOlpfARkXMF84qjEU2QhpV/kZJ0hQ
-aEUQKQa8EwH3fmSF+2aBHwA/F1TgETtetd7EUlTxEK49eiebhZA7BNZHS9CD
-rilvZYoDNnweHBMZzsBNBFsgO5EBCAC5INOERA2aNSYHWFeMfByShUuMQGFm
-yL2tWT6rwzZmUVG0GUdvoKSRhMJ+81aHxr5zmIhluegEuY99UhX+ZK6NftW2
-UOYjjjQZ4NPDjqOfP5dYUbHiCFRgeUxkmjwnQoSih63iSOoUt5kocR+oXXxb
-YmbgeOa8KGgKzDLGHI2nsy8Cni3N/enKVMMHGbJy1DXdV7uRFhBdjnRZGdmt
-amHcQbwGHUH+PtTa/jUSMdbtTUvXPI6dz7jDpK0BImzbXNb+r9CcudpiixuM
-u5gv3qyJL5EAWCXcT2j+y2VWj2HN/8bJHMoo6yf+bn6A/Cu9f0obbGVF0kJ/
-Y5UWmEdBG6IzABEBAAHCwJMEGAEKACYWIQTkiTkktw3HqXqtl/DWgXL0jpxS
-gwUCWyA7kQIbDAUJA8JnAAAhCRDWgXL0jpxSgxYhBOSJOSS3Dcepeq2X8NaB
-cvSOnFKDkFMIAIt64bVZ8x7+TitH1bR4pgcNkaKmgKoZz6FXu80+SnbuEt2N
-nDyf1cLOSimSTILpwLIuv9Uft5PbOraQbYt3xi9yrqdKqGLv80bxqK0NuryN
-kvh9yyx5WoG1iKqMj9/FjGghuPrRaT4lQinNAghGVkEy1+aXGFrG2DsOC1FF
-I51CC2WVTzZ5RwR2GpiNRfESsU1rZAUqf/2VyJl9bD5R4SUNy8oQmhOxi+gb
-hD4Ao34e4W0ilibslI/uawvCiOwlu5NGd8zv5n+UheiQvzkApQup5c+BhH5z
-FDFdKJ2CBByxw9+7QjMFI/wgLixKuE0Ob2kAokXf7RlB7qTZOahrETw=
-=+2T8
------END PGP PUBLIC KEY BLOCK-----
-`
-
-const keyWithOnlyUserIDRevoked = `-----BEGIN PGP PUBLIC KEY BLOCK-----
-
-mDMEYYwB7RYJKwYBBAHaRw8BAQdARimqhPPzyGAXmfQJjcqM1QVPzLtURJSzNVll
-JV4tEaW0KVJldm9rZWQgUHJpbWFyeSBVc2VyIElEIDxyZXZva2VkQGtleS5jb20+
-iHgEMBYIACAWIQSpyJZAXYqVEFkjyKutFcS0yeB0LQUCYYwCtgIdAAAKCRCtFcS0
-yeB0LbSsAQD8OYMaaBjrdzzpwIkP1stgmPd4/kzN/ZG28Ywl6a5F5QEA5Xg7aq4e
-/t6Fsb4F5iqB956kSPe6YJrikobD/tBbMwSIkAQTFggAOBYhBKnIlkBdipUQWSPI
-q60VxLTJ4HQtBQJhjAHtAhsDBQsJCAcCBhUKCQgLAgQWAgMBAh4BAheAAAoJEK0V
-xLTJ4HQtBaoBAPZL7luTCji+Tqhn7XNfFE/0QIahCt8k9wfO1cGlB3inAQDf8Tzw
-ZGR5fNluUcNoVxQT7bUSFStbaGo3k0BaOYPbCLg4BGGMAe0SCisGAQQBl1UBBQEB
-B0DLwSpveSrbIO/IVZD13yrs1XuB3FURZUnafGrRq7+jUAMBCAeIeAQYFggAIBYh
-BKnIlkBdipUQWSPIq60VxLTJ4HQtBQJhjAHtAhsMAAoJEK0VxLTJ4HQtZ1oA/j9u
-8+p3xTNzsmabTL6BkNbMeB/RUKCrlm6woM6AV+vxAQCcXTn3JC2sNoNrLoXuVzaA
-mcG3/TwG5GSQUUPkrDsGDA==
-=mFWy
------END PGP PUBLIC KEY BLOCK-----
-`
-
-const keyWithSubKey = `-----BEGIN PGP PUBLIC KEY BLOCK-----
-
-mI0EWyKwKQEEALwXhKBnyaaNFeK3ljfc/qn9X/QFw+28EUfgZPHjRmHubuXLE2uR
-s3ZoSXY2z7Dkv+NyHYMt8p+X8q5fR7JvUjK2XbPyKoiJVnHINll83yl67DaWfKNL
-EjNoO0kIfbXfCkZ7EG6DL+iKtuxniGTcnGT47e+HJSqb/STpLMnWwXjBABEBAAG0
-I0dvbGFuZyBHb3BoZXIgPG5vLXJlcGx5QGdvbGFuZy5jb20+iM4EEwEKADgWIQQ/
-lRafP/p9PytHbwxMvYJsOQdOOAUCWyKwKQIbAwULCQgHAwUVCgkICwUWAgMBAAIe
-AQIXgAAKCRBMvYJsOQdOOOsFBAC62mXww8XuqvYLcVOvHkWLT6mhxrQOJXnlfpn7
-2uBV9CMhoG/Ycd43NONsJrB95Apr9TDIqWnVszNbqPCuBhZQSGLdbiDKjxnCWBk0
-69qv4RNtkpOhYB7jK4s8F5oQZqId6JasT/PmJTH92mhBYhhTQr0GYFuPX2UJdkw9
-Sn9C67iNBFsisDUBBAC3A+Yo9lgCnxi/pfskyLrweYif6kIXWLAtLTsM6g/6jt7b
-wTrknuCPyTv0QKGXsAEe/cK/Xq3HvX9WfXPGIHc/X56ZIsHQ+RLowbZV/Lhok1IW
-FAuQm8axr/by80cRwFnzhfPc/ukkAq2Qyj4hLsGblu6mxeAhzcp8aqmWOO2H9QAR
-AQABiLYEKAEKACAWIQQ/lRafP/p9PytHbwxMvYJsOQdOOAUCWyK16gIdAAAKCRBM
-vYJsOQdOOB1vA/4u4uLONsE+2GVOyBsHyy7uTdkuxaR9b54A/cz6jT/tzUbeIzgx
-22neWhgvIEghnUZd0vEyK9k1wy5vbDlEo6nKzHso32N1QExGr5upRERAxweDxGOj
-7luDwNypI7QcifE64lS/JmlnunwRCdRWMKc0Fp+7jtRc5mpwyHN/Suf5RokBagQY
-AQoAIBYhBD+VFp8/+n0/K0dvDEy9gmw5B044BQJbIrA1AhsCAL8JEEy9gmw5B044
-tCAEGQEKAB0WIQSNdnkaWY6t62iX336UXbGvYdhXJwUCWyKwNQAKCRCUXbGvYdhX
-JxJSA/9fCPHP6sUtGF1o3G1a3yvOUDGr1JWcct9U+QpbCt1mZoNopCNDDQAJvDWl
-mvDgHfuogmgNJRjOMznvahbF+wpTXmB7LS0SK412gJzl1fFIpK4bgnhu0TwxNsO1
-8UkCZWqxRMgcNUn9z6XWONK8dgt5JNvHSHrwF4CxxwjL23AAtK+FA/UUoi3U4kbC
-0XnSr1Sl+mrzQi1+H7xyMe7zjqe+gGANtskqexHzwWPUJCPZ5qpIa2l8ghiUim6b
-4ymJ+N8/T8Yva1FaPEqfMzzqJr8McYFm0URioXJPvOAlRxdHPteZ0qUopt/Jawxl
-Xt6B9h1YpeLoJwjwsvbi98UTRs0jXwoY
-=3fWu
------END PGP PUBLIC KEY BLOCK-----`
-
-const keyWithSubKeyAndBadSelfSigOrder = `-----BEGIN PGP PUBLIC KEY BLOCK-----
-
-mI0EWyLLDQEEAOqIOpJ/ha1OYAGduu9tS3rBz5vyjbNgJO4sFveEM0mgsHQ0X9/L
-plonW+d0gRoO1dhJ8QICjDAc6+cna1DE3tEb5m6JtQ30teLZuqrR398Cf6w7NNVz
-r3lrlmnH9JaKRuXl7tZciwyovneBfZVCdtsRZjaLI1uMQCz/BToiYe3DABEBAAG0
-I0dvbGFuZyBHb3BoZXIgPG5vLXJlcGx5QGdvbGFuZy5jb20+iM4EEwEKADgWIQRZ
-sixZOfQcZdW0wUqmgmdsv1O9xgUCWyLLDQIbAwULCQgHAwUVCgkICwUWAgMBAAIe
-AQIXgAAKCRCmgmdsv1O9xql2A/4pix98NxjhdsXtazA9agpAKeADf9tG4Za27Gj+
-3DCww/E4iP2X35jZimSm/30QRB6j08uGCqd9vXkkJxtOt63y/IpVOtWX6vMWSTUm
-k8xKkaYMP0/IzKNJ1qC/qYEUYpwERBKg9Z+k99E2Ql4kRHdxXUHq6OzY79H18Y+s
-GdeM/riNBFsiyxsBBAC54Pxg/8ZWaZX1phGdwfe5mek27SOYpC0AxIDCSOdMeQ6G
-HPk38pywl1d+S+KmF/F4Tdi+kWro62O4eG2uc/T8JQuRDUhSjX0Qa51gPzJrUOVT
-CFyUkiZ/3ZDhtXkgfuso8ua2ChBgR9Ngr4v43tSqa9y6AK7v0qjxD1x+xMrjXQAR
-AQABiQFxBBgBCgAmAhsCFiEEWbIsWTn0HGXVtMFKpoJnbL9TvcYFAlsizTIFCQAN
-MRcAv7QgBBkBCgAdFiEEJcoVUVJIk5RWj1c/o62jUpRPICQFAlsiyxsACgkQo62j
-UpRPICQq5gQApoWIigZxXFoM0uw4uJBS5JFZtirTANvirZV5RhndwHeMN6JttaBS
-YnjyA4+n1D+zB2VqliD2QrsX12KJN6rGOehCtEIClQ1Hodo9nC6kMzzAwW1O8bZs
-nRJmXV+bsvD4sidLZLjdwOVa3Cxh6pvq4Uur6a7/UYx121hEY0Qx0s8JEKaCZ2y/
-U73GGi0D/i20VW8AWYAPACm2zMlzExKTOAV01YTQH/3vW0WLrOse53WcIVZga6es
-HuO4So0SOEAvxKMe5HpRIu2dJxTvd99Bo9xk9xJU0AoFrO0vNCRnL+5y68xMlODK
-lEw5/kl0jeaTBp6xX0HDQOEVOpPGUwWV4Ij2EnvfNDXaE1vK1kffiQFrBBgBCgAg
-AhsCFiEEWbIsWTn0HGXVtMFKpoJnbL9TvcYFAlsi0AYAv7QgBBkBCgAdFiEEJcoV
-UVJIk5RWj1c/o62jUpRPICQFAlsiyxsACgkQo62jUpRPICQq5gQApoWIigZxXFoM
-0uw4uJBS5JFZtirTANvirZV5RhndwHeMN6JttaBSYnjyA4+n1D+zB2VqliD2QrsX
-12KJN6rGOehCtEIClQ1Hodo9nC6kMzzAwW1O8bZsnRJmXV+bsvD4sidLZLjdwOVa
-3Cxh6pvq4Uur6a7/UYx121hEY0Qx0s8JEKaCZ2y/U73GRl0EAJokkXmy4zKDHWWi
-wvK9gi2gQgRkVnu2AiONxJb5vjeLhM/07BRmH6K1o+w3fOeEQp4FjXj1eQ5fPSM6
-Hhwx2CTl9SDnPSBMiKXsEFRkmwQ2AAsQZLmQZvKBkLZYeBiwf+IY621eYDhZfo+G
-1dh1WoUCyREZsJQg2YoIpWIcvw+a
-=bNRo
------END PGP PUBLIC KEY BLOCK-----
-`
-
-const onlySubkeyNoPrivateKey = `-----BEGIN PGP PRIVATE KEY BLOCK-----
-Version: GnuPG v1
-
-lQCVBFggvocBBAC7vBsHn7MKmS6IiiZNTXdciplVgS9cqVd+RTdIAoyNTcsiV1H0
-GQ3QtodOPeDlQDNoqinqaobd7R9g3m3hS53Nor7yBZkCWQ5x9v9JxRtoAq0sklh1
-I1X2zEqZk2l6YrfBF/64zWrhjnW3j23szkrAIVu0faQXbQ4z56tmZrw11wARAQAB
-/gdlAkdOVQG0CUdOVSBEdW1teYi4BBMBAgAiBQJYIL6HAhsDBgsJCAcDAgYVCAIJ
-CgsEFgIDAQIeAQIXgAAKCRCd1xxWp1CYAnjGA/9synn6ZXJUKAXQzySgmCZvCIbl
-rqBfEpxwLG4Q/lONhm5vthAE0z49I8hj5Gc5e2tLYUtq0o0OCRdCrYHa/efOYWpJ
-6RsK99bePOisVzmOABLIgZkcr022kHoMCmkPgv9CUGKP1yqbGl+zzAwQfUjRUmvD
-ZIcWLHi2ge4GzPMPi50B2ARYIL6cAQQAxWHnicKejAFcFcF1/3gUSgSH7eiwuBPX
-M7vDdgGzlve1o1jbV4tzrjN9jsCl6r0nJPDMfBSzgLr1auNTRG6HpJ4abcOx86ED
-Ad+avDcQPZb7z3dPhH/gb2lQejZsHh7bbeOS8WMSzHV3RqCLd8J/xwWPNR5zKn1f
-yp4IGfopidMAEQEAAQAD+wQOelnR82+dxyM2IFmZdOB9wSXQeCVOvxSaNMh6Y3lk
-UOOkO8Nlic4x0ungQRvjoRs4wBmCuwFK/MII6jKui0B7dn/NDf51i7rGdNGuJXDH
-e676By1sEY/NGkc74jr74T+5GWNU64W0vkpfgVmjSAzsUtpmhJMXsc7beBhJdnVl
-AgDKCb8hZqj1alcdmLoNvb7ibA3K/V8J462CPD7bMySPBa/uayoFhNxibpoXml2r
-oOtHa5izF3b0/9JY97F6rqkdAgD6GdTJ+xmlCoz1Sewoif1I6krq6xoa7gOYpIXo
-UL1Afr+LiJeyAnF/M34j/kjIVmPanZJjry0kkjHE5ILjH3uvAf4/6n9np+Th8ujS
-YDCIzKwR7639+H+qccOaddCep8Y6KGUMVdD/vTKEx1rMtK+hK/CDkkkxnFslifMJ
-kqoqv3WUqCWJAT0EGAECAAkFAlggvpwCGwIAqAkQndccVqdQmAKdIAQZAQIABgUC
-WCC+nAAKCRDmGUholQPwvQk+A/9latnSsR5s5/1A9TFki11GzSEnfLbx46FYOdkW
-n3YBxZoPQGxNA1vIn8GmouxZInw9CF4jdOJxEdzLlYQJ9YLTLtN5tQEMl/19/bR8
-/qLacAZ9IOezYRWxxZsyn6//jfl7A0Y+FV59d4YajKkEfItcIIlgVBSW6T+TNQT3
-R+EH5HJ/A/4/AN0CmBhhE2vGzTnVU0VPrE4V64pjn1rufFdclgpixNZCuuqpKpoE
-VVHn6mnBf4njKjZrAGPs5kfQ+H4NsM7v3Zz4yV6deu9FZc4O6E+V1WJ38rO8eBix
-7G2jko106CC6vtxsCPVIzY7aaG3H5pjRtomw+pX7SzrQ7FUg2PGumg==
-=F/T0
------END PGP PRIVATE KEY BLOCK-----`
-
-const ecdsaPrivateKey = `-----BEGIN PGP PRIVATE KEY BLOCK-----
-
-xaUEX1KsSRMIKoZIzj0DAQcCAwTpYqJsnJiFhKKh+8TulWD+lVmerBFNS+Ii
-B+nlG3T0xQQ4Sy5eIjJ0CExIQQzi3EElF/Z2l4F3WC5taFA11NgA/gkDCHSS
-PThf1M2K4LN8F1MRcvR+sb7i0nH55ojkwuVB1DE6jqIT9m9i+mX1tzjSAS+6
-lPQiweCJvG7xTC7Hs3AzRapf/r1At4TB+v+5G2/CKynNFEJpbGwgPGJpbGxA
-aG9tZS5jb20+wncEEBMIAB8FAl9SrEkGCwkHCAMCBBUICgIDFgIBAhkBAhsD
-Ah4BAAoJEMpwT3+q3+xqw5UBAMebZN9isEZ1ML+R/jWAAWMwa/knMugrEZ1v
-Bl9+ZwM0AQCZdf80/wYY4Nve01qSRFv8OmKswLli3TvDv6FKc4cLz8epBF9S
-rEkSCCqGSM49AwEHAgMEAjKnT9b5wY2bf9TpAV3d7OUfPOxKj9c4VzeVzSrH
-AtQgo/MuI1cdYVURicV4i76DNjFhQHQFTk7BrC+C2u1yqQMBCAf+CQMIHImA
-iYfzQtjgQWSFZYUkCFpbbwhNF0ch+3HNaZkaHCnZRIsWsRnc6FCb6lRQyK9+
-Dq59kHlduE5QgY40894jfmP2JdJHU6nBdYrivbEdbMJhBBgTCAAJBQJfUqxJ
-AhsMAAoJEMpwT3+q3+xqUI0BAMykhV08kQ4Ip9Qlbss6Jdufv7YrU0Vd5hou
-b5TmiPd0APoDBh3qIic+aLLUcAuG3+Gt1P1AbUlmqV61ozn1WfHxfw==
-=KLN8
------END PGP PRIVATE KEY BLOCK-----`
-
-const dsaPrivateKeyWithElGamalSubkey = `-----BEGIN PGP PRIVATE KEY BLOCK-----
-
-lQOBBF9/MLsRCACeaF6BI0jTgDAs86t8/kXPfwlPvR2MCYzB0BCqAdcq1hV/GTYd
-oNmJRna/ZJfsI/vf+d8Nv+EYOQkPheFS1MJVBitkAXjQPgm8i1tQWen1FCWZxqGk
-/vwZYF4yo8GhZ+Wxi3w09W9Cp9QM/CTmyE1Xe7wpPBGe+oD+me8Zxjyt8JBS4Qx+
-gvWbfHxfHnggh4pz7U8QkItlLsBNQEdX4R5+zwRN66g2ZSX/shaa/EkVnihUhD7r
-njP9I51ORWucTQD6OvgooaNQZCkQ/Se9TzdakwWKS2XSIFXiY/e2E5ZgKI/pfKDU
-iA/KessxddPb7nP/05OIJqg9AoDrD4vmehLzAQD+zsUS3LDU1m9/cG4LMsQbT2VK
-Te4HqbGIAle+eu/asQf8DDJMrbZpiJZvADum9j0TJ0oep6VdMbzo9RSDKvlLKT9m
-kG63H8oDWnCZm1a+HmGq9YIX+JHWmsLXXsFLeEouLzHO+mZo0X28eji3V2T87hyR
-MmUM0wFo4k7jK8uVmkDXv3XwNp2uByWxUKZd7EnWmcEZWqIiexJ7XpCS0Pg3tRaI
-zxve0SRe/dxfUPnTk/9KQ9hS6DWroBKquL182zx1Fggh4LIWWE2zq+UYn8BI0E8A
-rmIDFJdF8ymFQGRrEy6g79NnkPmkrZWsgMRYY65P6v4zLVmqohJKkpm3/Uxa6QAP
-CCoPh/JTOvPeCP2bOJH8z4Z9Py3ouMIjofQW8sXqRgf/RIHbh0KsINHrwwZ4gVIr
-MK3RofpaYxw1ztPIWb4cMWoWZHH1Pxh7ggTGSBpAhKXkiWw2Rxat8QF5aA7e962c
-bLvVv8dqsPrD/RnVJHag89cbPTzjn7gY9elE8EM8ithV3oQkwHTr4avYlpDZsgNd
-hUW3YgRwGo31tdzxoG04AcpV2t+07P8XMPr9hsfWs4rHohXPi38Hseu1Ji+dBoWQ
-3+1w/HH3o55s+jy4Ruaz78AIrjbmAJq+6rA2mIcCgrhw3DnzuwQAKeBvSeqn9zfS
-ZC812osMBVmkycwelpaIh64WZ0vWL3GvdXDctV2kXM+qVpDTLEny0LuiXxrwCKQL
-Ev4HAwK9uQBcreDEEud7pfRb8EYP5lzO2ZA7RaIvje6EWAGBvJGMRT0QQE5SGqc7
-Fw5geigBdt+vVyRuNNhg3c2fdn/OBQaYu0J/8AiOogG8EaM8tCFlbGdhbWFsQGRz
-YS5jb20gPGVsZ2FtYWxAZHNhLmNvbT6IkAQTEQgAOBYhBI+gnfiHQxB35/Dp0XAQ
-aE/rsWC5BQJffzC7AhsDBQsJCAcCBhUKCQgLAgQWAgMBAh4BAheAAAoJEHAQaE/r
-sWC5A4EA/0GcJmyPtN+Klc7b9sVT3JgKTRnB/URxOJfYJofP0hZLAQCkqyMO+adV
-JvbgDH0zaITQWZSSXPqpgMpCA6juTrDsd50CawRffzC7EAgAxFFFSAAEQzWTgKU5
-EBtpxxoPzHqcChawTHRxHxjcELXzmUBS5PzfA1HXSPnNqK/x3Ut5ycC3CsW41Fnt
-Gm3706Wu9VFbFZVn55F9lPiplUo61n5pqMvOr1gmuQsdXiTa0t5FRa4TZ2VSiHFw
-vdAVSPTUsT4ZxJ1rPyFYRtq1n3pQcvdZowd07r0JnzTMjLLMFYCKhwIowoOC4zqJ
-iB8enjwOlpaqBATRm9xpVF7SJkroPF6/B1vdhj7E3c1aJyHlo0PYBAg756sSHWHg
-UuLyUQ4TA0hcCVenn/L/aSY2LnbdZB1EBhlYjA7dTCgwIqsQhfQmPkjz6g64A7+Y
-HbbrLwADBQgAk14QIEQ+J/VHetpQV/jt2pNsFK1kVK7mXK0spTExaC2yj2sXlHjL
-Ie3bO5T/KqmIaBEB5db5fA5xK9cZt79qrQHDKsEqUetUeMUWLBx77zBsus3grIgy
-bwDZKseRzQ715pwxquxQlScGoDIBKEh08HpwHkq140eIj3w+MAIfndaZaSCNaxaP
-Snky7BQmJ7Wc7qrIwoQP6yrnUqyW2yNi81nJYUhxjChqaFSlwzLs/iNGryBKo0ic
-BqVIRjikKHBlwBng6WyrltQo/Vt9GG8w+lqaAVXbJRlaBZJUR+2NKi/YhP3qQse3
-v8fi4kns0gh5LK+2C01RvdX4T49QSExuIf4HAwLJqYIGwadA2uem5v7/765ZtFWV
-oL0iZ0ueTJDby4wTFDpLVzzDi/uVcB0ZRFrGOp7w6OYcNYTtV8n3xmli2Q5Trw0c
-wZVzvg+ABKWiv7faBjMczIFF8y6WZKOIeAQYEQgAIBYhBI+gnfiHQxB35/Dp0XAQ
-aE/rsWC5BQJffzC7AhsMAAoJEHAQaE/rsWC5ZmIA/jhS4r4lClbvjuPWt0Yqdn7R
-fss2SPMYvMrrDh42aE0OAQD8xn4G6CN8UtW9xihXOY6FpxiJ/sMc2VaneeUd34oa
-4g==
-=XZm8
------END PGP PRIVATE KEY BLOCK-----`
-
-// https://tests.sequoia-pgp.org/#Certificate_expiration
-// P _ U p
-const expiringPrimaryUIDKey = `-----BEGIN PGP PUBLIC KEY BLOCK-----
-
-xsDNBF2lnPIBDAC5cL9PQoQLTMuhjbYvb4Ncuuo0bfmgPRFywX53jPhoFf4Zg6mv
-/seOXpgecTdOcVttfzC8ycIKrt3aQTiwOG/ctaR4Bk/t6ayNFfdUNxHWk4WCKzdz
-/56fW2O0F23qIRd8UUJp5IIlN4RDdRCtdhVQIAuzvp2oVy/LaS2kxQoKvph/5pQ/
-5whqsyroEWDJoSV0yOb25B/iwk/pLUFoyhDG9bj0kIzDxrEqW+7Ba8nocQlecMF3
-X5KMN5kp2zraLv9dlBBpWW43XktjcCZgMy20SouraVma8Je/ECwUWYUiAZxLIlMv
-9CurEOtxUw6N3RdOtLmYZS9uEnn5y1UkF88o8Nku890uk6BrewFzJyLAx5wRZ4F0
-qV/yq36UWQ0JB/AUGhHVPdFf6pl6eaxBwT5GXvbBUibtf8YI2og5RsgTWtXfU7eb
-SGXrl5ZMpbA6mbfhd0R8aPxWfmDWiIOhBufhMCvUHh1sApMKVZnvIff9/0Dca3wb
-vLIwa3T4CyshfT0AEQEAAc0hQm9iIEJhYmJhZ2UgPGJvYkBvcGVucGdwLmV4YW1w
-bGU+wsFcBBMBCgCQBYJhesp/BYkEWQPJBQsJCAcCCRD7/MgqAV5zMEcUAAAAAAAe
-ACBzYWx0QG5vdGF0aW9ucy5zZXF1b2lhLXBncC5vcmeEOQlNyTLFkc9I/elp+BpY
-495V7KatqtDmsyDr+zDAdwYVCgkICwIEFgIDAQIXgAIbAwIeARYhBNGmbhojsYLJ
-mA94jPv8yCoBXnMwAABSCQv/av8hKyynMtXVKFuWOGJw0mR8auDm84WdhMFRZg8t
-yTJ1L88+Ny4WUAFeqo2j7DU2yPGrm5rmuvzlEedFYFeOWt+A4adz+oumgRd0nsgG
-Lf3QYUWQhLWVlz+H7zubgKqSB2A2RqV65S7mTTVro42nb2Mng6rvGWiqeKG5nrXN
-/01p1mIBQGR/KnZSqYLzA2Pw2PiJoSkXT26PDz/kiEMXpjKMR6sicV4bKVlEdUvm
-pIImIPBHZq1EsKXEyWtWC41w/pc+FofGE+uSFs2aef1vvEHFkj3BHSK8gRcH3kfR
-eFroTET8C2q9V1AOELWm+Ys6PzGzF72URK1MKXlThuL4t4LjvXWGNA78IKW+/RQH
-DzK4U0jqSO0mL6qxqVS5Ij6jjL6OTrVEGdtDf5n0vI8tcUTBKtVqYAYk+t2YGT05
-ayxALtb7viVKo8f10WEcCuKshn0gdsEFMRZQzJ89uQIY3R3FbsdRCaE6OEaDgKMQ
-UTFROyfhthgzRKbRxfcplMUCzsDNBF2lnPIBDADWML9cbGMrp12CtF9b2P6z9TTT
-74S8iyBOzaSvdGDQY/sUtZXRg21HWamXnn9sSXvIDEINOQ6A9QxdxoqWdCHrOuW3
-ofneYXoG+zeKc4dC86wa1TR2q9vW+RMXSO4uImA+Uzula/6k1DogDf28qhCxMwG/
-i/m9g1c/0aApuDyKdQ1PXsHHNlgd/Dn6rrd5y2AObaifV7wIhEJnvqgFXDN2RXGj
-LeCOHV4Q2WTYPg/S4k1nMXVDwZXrvIsA0YwIMgIT86Rafp1qKlgPNbiIlC1g9RY/
-iFaGN2b4Ir6GDohBQSfZW2+LXoPZuVE/wGlQ01rh827KVZW4lXvqsge+wtnWlszc
-selGATyzqOK9LdHPdZGzROZYI2e8c+paLNDdVPL6vdRBUnkCaEkOtl1mr2JpQi5n
-TU+gTX4IeInC7E+1a9UDF/Y85ybUz8XV8rUnR76UqVC7KidNepdHbZjjXCt8/Zo+
-Tec9JNbYNQB/e9ExmDntmlHEsSEQzFwzj8sxH48AEQEAAcLA9gQYAQoAIBYhBNGm
-bhojsYLJmA94jPv8yCoBXnMwBQJdpZzyAhsMAAoJEPv8yCoBXnMw6f8L/26C34dk
-jBffTzMj5Bdzm8MtF67OYneJ4TQMw7+41IL4rVcSKhIhk/3Ud5knaRtP2ef1+5F6
-6h9/RPQOJ5+tvBwhBAcUWSupKnUrdVaZQanYmtSxcVV2PL9+QEiNN3tzluhaWO//
-rACxJ+K/ZXQlIzwQVTpNhfGzAaMVV9zpf3u0k14itcv6alKY8+rLZvO1wIIeRZLm
-U0tZDD5HtWDvUV7rIFI1WuoLb+KZgbYn3OWjCPHVdTrdZ2CqnZbG3SXw6awH9bzR
-LV9EXkbhIMez0deCVdeo+wFFklh8/5VK2b0vk/+wqMJxfpa1lHvJLobzOP9fvrsw
-sr92MA2+k901WeISR7qEzcI0Fdg8AyFAExaEK6VyjP7SXGLwvfisw34OxuZr3qmx
-1Sufu4toH3XrB7QJN8XyqqbsGxUCBqWif9RSK4xjzRTe56iPeiSJJOIciMP9i2ld
-I+KgLycyeDvGoBj0HCLO3gVaBe4ubVrj5KjhX2PVNEJd3XZRzaXZE2aAMQ==
-=AmgT
------END PGP PUBLIC KEY BLOCK-----`
-
-const rsa2048PrivateKey = `-----BEGIN PGP PRIVATE KEY BLOCK-----
-Comment: gpg (GnuPG) 2.2.27 with libgcrypt 1.9.4
-
-lQPGBGL07P0BCADL0etN8efyAXA6sL2WfQvHe5wEKYXPWeN2+jiqSppfeRZAOlzP
-kZ3U+cloeJriplYvVJwI3ID2aw52Z/TRn8iKRP5eOUFrEgcgl06lazLtOndK7o7p
-oBV5mLtHEirFHm6W61fNt10jzM0jx0PV6nseLhFB2J42F1cmU/aBgFo41wjLSZYr
-owR+v+O9S5sUXblQF6sEDcY01sBEu09zrIgT49VFwQ1Cvdh9XZEOTQBfdiugoj5a
-DS3fAqAka3r1VoQK4eR7/upnYSgSACGeaQ4pUelKku5rpm50gdWTY8ppq0k9e1eT
-y2x0OQcW3hWE+j4os1ca0ZEADMdqr/99MOxrABEBAAH+BwMCJWxU4VOZOJ7/I6vX
-FxdfBhIBEXlJ52FM3S/oYtXqLhkGyrtmZOeEazVvUtuCe3M3ScHI8xCthcmE8E0j
-bi+ZEHPS2NiBZtgHFF27BLn7zZuTc+oD5WKduZdK3463egnyThTqIIMl25WZBuab
-k5ycwYrWwBH0jfA4gwJ13ai4pufKC2RM8qIu6YAVPglYBKFLKGvvJHa5vI+LuA0E
-K+k35hIic7yVUcQneNnAF2598X5yWiieYnOZpmHlRw1zfbMwOJr3ZNj2v94u7b+L
-sTa/1Uv9887Vb6sJp0c2Sh4cwEccoPYkvMqFn3ZrJUr3UdDu1K2vWohPtswzhrYV
-+RdPZE5RLoCQufKvlPezk0Pzhzb3bBU7XjUbdGY1nH/EyQeBNp+Gw6qldKvzcBaB
-cyOK1c6hPSszpJX93m5UxCN55IeifmcNjmbDh8vGCCdajy6d56qV2n4F3k7vt1J1
-0UlxIGhqijJoaTCX66xjLMC6VXkSz6aHQ35rnXosm/cqPcQshsZTdlfSyWkorfdr
-4Hj8viBER26mjYurTMLBKDtUN724ZrR0Ev5jorX9uoKlgl87bDZHty2Ku2S+vR68
-VAvnj6Fi1BYNclnDoqxdRB2z5T9JbWE52HuG83/QsplhEqXxESDxriTyTHMbNxEe
-88soVCDh4tgflZFa2ucUr6gEKJKij7jgahARnyaXfPZlQBUAS1YUeILYmN+VR+M/
-sHENpwDWc7TInn8VN638nJV+ScZGMih3AwWZTIoiLju3MMt1K0YZ3NuiqwGH4Jwg
-/BbEdTWeCci9y3NEQHQ3uZZ5p6j2CwFVlK11idemCMvAiTVxF+gKdaLMkeCwKxru
-J3YzhKEo+iDVYbPYBYizx/EHBn2U5kITQ5SBXzjTaaFMNZJEf9JYsL1ybPB6HOFY
-VNVB2KT8CGVwtCJHb2xhbmcgR29waGVyIDxnb2xhbmdAZXhhbXBsZS5vcmc+iQFO
-BBMBCgA4FiEEC6K7U7f4qesybTnqSkra7gHusm0FAmL07P0CGwMFCwkIBwIGFQoJ
-CAsCBBYCAwECHgECF4AACgkQSkra7gHusm1MvwgAxpClWkeSqIhMQfbiuz0+lOkE
-89y1DCFw8bHjZoUf4/4K8hFA3dGkk+q72XFgiyaCpfXxMt6Gi+dN47t+tTv9NIqC
-sukbaoJBmJDhN6+djmJOgOYy+FWsW2LAk2LOwKYulpnBZdcA5rlMAhBg7gevQpF+
-ruSU69P7UUaFJl/DC7hDmaIcj+4cjBE/HO26SnVQjoTfjZT82rDh1Wsuf8LnkJUk
-b3wezBLpXKjDvdHikdv4gdlR4AputVM38aZntYYglh/EASo5TneyZ7ZscdLNRdcF
-r5O2fKqrOJLOdaoYRFZZWOvP5GtEVFDU7WGivOSVfiszBE0wZR3dgZRJipHCXJ0D
-xgRi9Oz9AQgAtMJcJqLLVANJHl90tWuoizDkm+Imcwq2ubQAjpclnNrODnDK+7o4
-pBsWmXbZSdkC4gY+LhOQA6bPDD0JEHM58DOnrm49BddxXAyK0HPsk4sGGt2SS86B
-OawWNdfJVyqw4bAiHWDmQg4PcjBbt3ocOIxAR6I5kBSiQVxuGQs9T+Zvg3G1r3Or
-fS6DzlgY3HFUML5YsGH4lOxNSOoKAP68GIH/WNdUZ+feiRg9knIib6I3Hgtf5eO8
-JRH7aWE/TD7eNu36bLLjT5TZPq5r6xaD2plbtPOyXbNPWs9qI1yG+VnErfaLY0w8
-Qo0aqzbgID+CTZVomXSOpOcQseaFKw8ZfQARAQAB/gcDArha6+/+d4OY/w9N32K9
-hFNYt4LufTETMQ+k/sBeaMuAVzmT47DlAXzkrZhGW4dZOtXMu1rXaUwHlqkhEyzL
-L4MYEWVXfD+LbZNEK3MEFss6RK+UAMeT/PTV9aA8cXQVPcSJYzfBXHQ1U1hnOgrO
-apn92MN8RmkhX8wJLyeWTMMuP4lXByJMmmGo8WvifeRD2kFY4y0WVBDAXJAV4Ljf
-Di/bBiwoc5a+gxHuZT2W9ZSxBQJNXdt4Un2IlyZuo58s5MLx2N0EaNJ8PwRUE6fM
-RZYO8aZCEPUtINE4njbvsWOMCtrblsMPwZ1B0SiIaWmLaNyGdCNKea+fCIW7kasC
-JYMhnLumpUTXg5HNexkCsl7ABWj0PYBflOE61h8EjWpnQ7JBBVKS2ua4lMjwHRX7
-5o5yxym9k5UZNFdGoXVL7xpizCcdGawxTJvwhs3vBqu1ZWYCegOAZWDrOkCyhUpq
-8uKMROZFbn+FwE+7tjt+v2ed62FVEvD6g4V3ThCA6mQqeOARfJWN8GZY8BDm8lht
-crOXriUkrx+FlrgGtm2CkwjW5/9Xd7AhFpHnQdFeozOHyq1asNSgJF9sNi9Lz94W
-skQSVRi0IExxSXYGI3Y0nnAZUe2BAQflYPJdEveSr3sKlUqXiETTA1VXsTPK3kOC
-92CbLzj/Hz199jZvywwyu53I+GKMpF42rMq7zxr2oa61YWY4YE/GDezwwys/wLx/
-QpCW4X3ppI7wJjCSSqEV0baYZSSli1ayheS6dxi8QnSpX1Bmpz6gU7m/M9Sns+hl
-J7ZvgpjCAiV7KJTjtclr5/S02zP78LTVkoTWoz/6MOTROwaP63VBUXX8pbJhf/vu
-DLmNnDk8joMJxoDXWeNU0EnNl4hP7Z/jExRBOEO4oAnUf/Sf6gCWQhL5qcajtg6w
-tGv7vx3f2IkBNgQYAQoAIBYhBAuiu1O3+KnrMm056kpK2u4B7rJtBQJi9Oz9AhsM
-AAoJEEpK2u4B7rJt6lgIAMBWqP4BCOGnQXBbgJ0+ACVghpkFUXZTb/tXJc8UUvTM
-8uov6k/RsqDGZrvhhufD7Wwt7j9v7dD7VPp7bPyjVWyimglQzWguTUUqLDGlstYH
-5uYv1pzma0ZsAGNqFeGlTLsKOSGKFMH4rB2KfN2n51L8POvtp1y7GKZQbWIWneaB
-cZr3BINU5GMvYYU7pAYcoR+mJPdJx5Up3Ocn+bn8Tu1sy9C/ArtCQucazGnoE9u1
-HhNLrh0CdzzX7TNH6TQ8LwPOvq0K5l/WqbN9lE0WBBhMv2HydxhluO8AhU+A5GqC
-C+wET7nVDnhoOm/fstIeb7/LN7OYejKPeHdFBJEL9GA=
-=u442
------END PGP PRIVATE KEY BLOCK-----`
-
-const curve25519PrivateKey = `-----BEGIN PGP PRIVATE KEY BLOCK-----
-Comment: gpg (GnuPG) 2.2.27 with libgcrypt 1.9.4
-
-lFgEYvTtQBYJKwYBBAHaRw8BAQdAxsNXLbrk5xOjpO24VhOMvQ0/F+JcyIkckMDH
-X3FIGxcAAQDFOlunZWYuPsCx5JLp78vKqUTfgef9TGG4oD6I/Sa0zBMstCJHb2xh
-bmcgR29waGVyIDxnb2xhbmdAZXhhbXBsZS5vcmc+iJAEExYIADgWIQSFQHEOazmo
-h1ldII4MvfnLQ4JBNwUCYvTtQAIbAwULCQgHAgYVCgkICwIEFgIDAQIeAQIXgAAK
-CRAMvfnLQ4JBN5yeAQCKdry8B5ScCPrev2+UByMCss7Sdu5RhomCFsHdNPLcKAEA
-8ugei+1owHsV+3cGwWWzKk6sLa8ZN87i3SKuOGp9DQycXQRi9O1AEgorBgEEAZdV
-AQUBAQdA5CubPp8l7lrVQ25h7Hx5XN2C8xanRnnpcjzEooCaEA0DAQgHAAD/Rpc+
-sOZUXrFk9HOWB1XU41LoWbDBoG8sP8RWAVYwD5AQRYh4BBgWCAAgFiEEhUBxDms5
-qIdZXSCODL35y0OCQTcFAmL07UACGwwACgkQDL35y0OCQTcvdwEA7lb5g/YisrEf
-iq660uwMGoepLUfvtqKzuQ6heYe83y0BAN65Ffg5HYOJzUEi0kZQRf7OhdtuL2kJ
-SRXn8DmCTfEB
-=cELM
------END PGP PRIVATE KEY BLOCK-----`
-
-const curve448PrivateKey = `-----BEGIN PGP PRIVATE KEY BLOCK-----
-Comment: C1DB 65D5 80D7 B922 7254 4B1E A699 9895 FABA CE52
-
-xYUEYV2UmRYDK2VxAc9AFyxgh5xnSbyt50TWl558mw9xdMN+/UBLr5+UMP8IsrvV
-MdXuTIE8CyaUQKSotHtH2RkYEXj5nsMAAAHPQIbTMSzjIWug8UFECzAex5FHgAgH
-gYF3RK+TS8D24wX8kOu2C/NoVxwGY+p+i0JHaB+7yljriSKAGxs6wsBEBB8WCgCD
-BYJhXZSZBYkFpI+9AwsJBwkQppmYlfq6zlJHFAAAAAAAHgAgc2FsdEBub3RhdGlv
-bnMuc2VxdW9pYS1wZ3Aub3Jn5wSpIutJ5HncJWk4ruUV8GzQF390rR5+qWEAnAoY
-akcDFQoIApsBAh4BFiEEwdtl1YDXuSJyVEseppmYlfq6zlIAALzdA5dA/fsgYg/J
-qaQriYKaPUkyHL7EB3BXhV2d1h/gk+qJLvXQuU2WEJ/XSs3GrsBRiiZwvPH4o+7b
-mleAxjy5wpS523vqrrBR2YZ5FwIku7WS4litSdn4AtVam/TlLdMNIf41CtFeZKBe
-c5R5VNdQy8y7qy8AAADNEUN1cnZlNDQ4IE9wdGlvbiA4wsBHBBMWCgCGBYJhXZSZ
-BYkFpI+9AwsJBwkQppmYlfq6zlJHFAAAAAAAHgAgc2FsdEBub3RhdGlvbnMuc2Vx
-dW9pYS1wZ3Aub3JnD55UsYMzE6OACP+mgw5zvT+BBgol8/uFQjHg4krjUCMDFQoI
-ApkBApsBAh4BFiEEwdtl1YDXuSJyVEseppmYlfq6zlIAAPQJA5dA0Xqwzn/0uwCq
-RlsOVCB3f5NOj1exKnlBvRw0xT1VBee1yxvlUt5eIAoCxWoRlWBJob3TTkhm9AEA
-8dyhwPmyGfWHzPw5NFG3xsXrZdNXNvit9WMVAPcmsyR7teXuDlJItxRAdJJc/qfJ
-YVbBFoaNrhYAAADHhQRhXZSZFgMrZXEBz0BL7THZ9MnCLfSPJ1FMLim9eGkQ3Bfn
-M3he5rOwO3t14QI1LjI96OjkeJipMgcFAmEP1Bq/ZHGO7oAAAc9AFnE8iNBaT3OU
-EFtxkmWHXtdaYMmGGRdopw9JPXr/UxuunDln5o9dxPxf7q7z26zXrZen+qed/Isa
-HsDCwSwEGBYKAWsFgmFdlJkFiQWkj70JEKaZmJX6us5SRxQAAAAAAB4AIHNhbHRA
-bm90YXRpb25zLnNlcXVvaWEtcGdwLm9yZxREUizdTcepBzgSMOv2VWQCWbl++3CZ
-EbgAWDryvSsyApsCwDGgBBkWCgBvBYJhXZSZCRBKo3SL4S5djkcUAAAAAAAeACBz
-YWx0QG5vdGF0aW9ucy5zZXF1b2lhLXBncC5vcmemoGTDjmNQiIzw6HOEddvS0OB7
-UZ/P07jM/EVmnYxTlBYhBAxsnkGpx1UCiH6gUUqjdIvhLl2OAAALYQOXQAMB1oKq
-OWxSFmvmgCKNcbAAyA3piF5ERIqs4z07oJvqDYrOWt75UsEIH/04gU/vHc4EmfG2
-JDLJgOLlyTUPkL/08f0ydGZPofFQBhn8HkuFFjnNtJ5oz3GIP4cdWMQFaUw0uvjb
-PM9Tm3ptENGd6Ts1AAAAFiEEwdtl1YDXuSJyVEseppmYlfq6zlIAAGpTA5dATR6i
-U2GrpUcQgpG+JqfAsGmF4yAOhgFxc1UfidFk3nTup3fLgjipkYY170WLRNbyKkVO
-Sodx93GAs58rizO1acDAWiLq3cyEPBFXbyFThbcNPcLl+/77Uk/mgkYrPQFAQWdK
-1kSRm4SizDBK37K8ChAAAADHhwRhXZSZEgMrZW8Bx0DMhzvhQo+OsXeqQ6QVw4sF
-CaexHh6rLohh7TzL3hQSjoJ27fV6JBkIWdn0LfrMlJIDbSv2SLdlgQMBCgkAAcdA
-MO7Dc1myF6Co1fAH+EuP+OxhxP/7V6ljuSCZENDfA49tQkzTta+PniG+pOVB2LHb
-huyaKBkqiaogo8LAOQQYFgoAeAWCYV2UmQWJBaSPvQkQppmYlfq6zlJHFAAAAAAA
-HgAgc2FsdEBub3RhdGlvbnMuc2VxdW9pYS1wZ3Aub3JnEjBMQAmc/2u45u5FQGmB
-QAytjSG2LM3JQN+PPVl5vEkCmwwWIQTB22XVgNe5InJUSx6mmZiV+rrOUgAASdYD
-l0DXEHQ9ykNP2rZP35ET1dmiFagFtTj/hLQcWlg16LqvJNGqOgYXuqTerbiOOt02
-XLCBln+wdewpU4ChEffMUDRBfqfQco/YsMqWV7bHJHAO0eC/DMKCjyU90xdH7R/d
-QgqsfguR1PqPuJxpXV4bSr6CGAAAAA==
-=MSvh
------END PGP PRIVATE KEY BLOCK-----`
-
-const keyWithNotation = `-----BEGIN PGP PRIVATE KEY BLOCK-----
-
-xVgEY9gIshYJKwYBBAHaRw8BAQdAF25fSM8OpFlXZhop4Qpqo5ywGZ4jgWlR
-ppjhIKDthREAAQC+LFpzFcMJYcjxGKzBGHN0Px2jU4d04YSRnFAik+lVVQ6u
-zRdUZXN0IDx0ZXN0QGV4YW1wbGUuY29tPsLACgQQFgoAfAUCY9gIsgQLCQcI
-CRD/utJOCym8pR0UgAAAAAAQAAR0ZXh0QGV4YW1wbGUuY29tdGVzdB8UAAAA
-AAASAARiaW5hcnlAZXhhbXBsZS5jb20AAQIDAxUICgQWAAIBAhkBAhsDAh4B
-FiEEEMCQTUVGKgCX5rDQ/7rSTgspvKUAAPl5AP9Npz90LxzrB97Qr2DrGwfG
-wuYn4FSYwtuPfZHHeoIabwD/QEbvpQJ/NBb9EAZuow4Rirlt1yv19mmnF+j5
-8yUzhQjHXQRj2AiyEgorBgEEAZdVAQUBAQdARXAo30DmKcyUg6co7OUm0RNT
-z9iqFbDBzA8A47JEt1MDAQgHAAD/XKK3lBm0SqMR558HLWdBrNG6NqKuqb5X
-joCML987ZNgRD8J4BBgWCAAqBQJj2AiyCRD/utJOCym8pQIbDBYhBBDAkE1F
-RioAl+aw0P+60k4LKbylAADRxgEAg7UfBDiDPp5LHcW9D+SgFHk6+GyEU4ev
-VppQxdtxPvAA/34snHBX7Twnip1nMt7P4e2hDiw/hwQ7oqioOvc6jMkP
-=Z8YJ
------END PGP PRIVATE KEY BLOCK-----
-`
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/aead_config.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/aead_config.go
deleted file mode 100644
index fec41a0e73f..00000000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/aead_config.go
+++ /dev/null
@@ -1,67 +0,0 @@
-// Copyright (C) 2019 ProtonTech AG
-
-package packet
-
-import "math/bits"
-
-// CipherSuite contains a combination of Cipher and Mode
-type CipherSuite struct {
- // The cipher function
- Cipher CipherFunction
- // The AEAD mode of operation.
- Mode AEADMode
-}
-
-// AEADConfig collects a number of AEAD parameters along with sensible defaults.
-// A nil AEADConfig is valid and results in all default values.
-type AEADConfig struct {
- // The AEAD mode of operation.
- DefaultMode AEADMode
- // Amount of octets in each chunk of data
- ChunkSize uint64
-}
-
-// Mode returns the AEAD mode of operation.
-func (conf *AEADConfig) Mode() AEADMode {
- // If no preference is specified, OCB is used (which is mandatory to implement).
- if conf == nil || conf.DefaultMode == 0 {
- return AEADModeOCB
- }
-
- mode := conf.DefaultMode
- if mode != AEADModeEAX && mode != AEADModeOCB && mode != AEADModeGCM {
- panic("AEAD mode unsupported")
- }
- return mode
-}
-
-// ChunkSizeByte returns the byte indicating the chunk size. The effective
-// chunk size is computed with the formula uint64(1) << (chunkSizeByte + 6)
-// limit to 16 = 4 MiB
-// https://www.ietf.org/archive/id/draft-ietf-openpgp-crypto-refresh-07.html#section-5.13.2
-func (conf *AEADConfig) ChunkSizeByte() byte {
- if conf == nil || conf.ChunkSize == 0 {
- return 12 // 1 << (12 + 6) == 262144 bytes
- }
-
- chunkSize := conf.ChunkSize
- exponent := bits.Len64(chunkSize) - 1
- switch {
- case exponent < 6:
- exponent = 6
- case exponent > 16:
- exponent = 16
- }
-
- return byte(exponent - 6)
-}
-
-// decodeAEADChunkSize returns the effective chunk size. In 32-bit systems, the
-// maximum returned value is 1 << 30.
-func decodeAEADChunkSize(c byte) int {
- size := uint64(1 << (c + 6))
- if size != uint64(int(size)) {
- return 1 << 30
- }
- return int(size)
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/aead_crypter.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/aead_crypter.go
deleted file mode 100644
index 2d1aeed65c0..00000000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/aead_crypter.go
+++ /dev/null
@@ -1,273 +0,0 @@
-// Copyright (C) 2019 ProtonTech AG
-
-package packet
-
-import (
- "bytes"
- "crypto/cipher"
- "encoding/binary"
- "io"
-
- "github.com/ProtonMail/go-crypto/openpgp/errors"
-)
-
-// aeadCrypter is an AEAD opener/sealer, its configuration, and data for en/decryption.
-type aeadCrypter struct {
- aead cipher.AEAD
- chunkSize int
- initialNonce []byte
- associatedData []byte // Chunk-independent associated data
- chunkIndex []byte // Chunk counter
- packetTag packetType // SEIP packet (v2) or AEAD Encrypted Data packet
- bytesProcessed int // Amount of plaintext bytes encrypted/decrypted
- buffer bytes.Buffer // Buffered bytes across chunks
-}
-
-// computeNonce takes the incremental index and computes an eXclusive OR with
-// the least significant 8 bytes of the receivers' initial nonce (see sec.
-// 5.16.1 and 5.16.2). It returns the resulting nonce.
-func (wo *aeadCrypter) computeNextNonce() (nonce []byte) {
- if wo.packetTag == packetTypeSymmetricallyEncryptedIntegrityProtected {
- return append(wo.initialNonce, wo.chunkIndex...)
- }
-
- nonce = make([]byte, len(wo.initialNonce))
- copy(nonce, wo.initialNonce)
- offset := len(wo.initialNonce) - 8
- for i := 0; i < 8; i++ {
- nonce[i+offset] ^= wo.chunkIndex[i]
- }
- return
-}
-
-// incrementIndex performs an integer increment by 1 of the integer represented by the
-// slice, modifying it accordingly.
-func (wo *aeadCrypter) incrementIndex() error {
- index := wo.chunkIndex
- if len(index) == 0 {
- return errors.AEADError("Index has length 0")
- }
- for i := len(index) - 1; i >= 0; i-- {
- if index[i] < 255 {
- index[i]++
- return nil
- }
- index[i] = 0
- }
- return errors.AEADError("cannot further increment index")
-}
-
-// aeadDecrypter reads and decrypts bytes. It buffers extra decrypted bytes when
-// necessary, similar to aeadEncrypter.
-type aeadDecrypter struct {
- aeadCrypter // Embedded ciphertext opener
- reader io.Reader // 'reader' is a partialLengthReader
- peekedBytes []byte // Used to detect last chunk
- eof bool
-}
-
-// Read decrypts bytes and reads them into dst. It decrypts when necessary and
-// buffers extra decrypted bytes. It returns the number of bytes copied into dst
-// and an error.
-func (ar *aeadDecrypter) Read(dst []byte) (n int, err error) {
- // Return buffered plaintext bytes from previous calls
- if ar.buffer.Len() > 0 {
- return ar.buffer.Read(dst)
- }
-
- // Return EOF if we've previously validated the final tag
- if ar.eof {
- return 0, io.EOF
- }
-
- // Read a chunk
- tagLen := ar.aead.Overhead()
- cipherChunkBuf := new(bytes.Buffer)
- _, errRead := io.CopyN(cipherChunkBuf, ar.reader, int64(ar.chunkSize+tagLen))
- cipherChunk := cipherChunkBuf.Bytes()
- if errRead != nil && errRead != io.EOF {
- return 0, errRead
- }
-
- if len(cipherChunk) > 0 {
- decrypted, errChunk := ar.openChunk(cipherChunk)
- if errChunk != nil {
- return 0, errChunk
- }
-
- // Return decrypted bytes, buffering if necessary
- if len(dst) < len(decrypted) {
- n = copy(dst, decrypted[:len(dst)])
- ar.buffer.Write(decrypted[len(dst):])
- } else {
- n = copy(dst, decrypted)
- }
- }
-
- // Check final authentication tag
- if errRead == io.EOF {
- errChunk := ar.validateFinalTag(ar.peekedBytes)
- if errChunk != nil {
- return n, errChunk
- }
- ar.eof = true // Mark EOF for when we've returned all buffered data
- }
- return
-}
-
-// Close is noOp. The final authentication tag of the stream was already
-// checked in the last Read call. In the future, this function could be used to
-// wipe the reader and peeked, decrypted bytes, if necessary.
-func (ar *aeadDecrypter) Close() (err error) {
- if !ar.eof {
- errChunk := ar.validateFinalTag(ar.peekedBytes)
- if errChunk != nil {
- return errChunk
- }
- }
- return nil
-}
-
-// openChunk decrypts and checks integrity of an encrypted chunk, returning
-// the underlying plaintext and an error. It accesses peeked bytes from next
-// chunk, to identify the last chunk and decrypt/validate accordingly.
-func (ar *aeadDecrypter) openChunk(data []byte) ([]byte, error) {
- tagLen := ar.aead.Overhead()
- // Restore carried bytes from last call
- chunkExtra := append(ar.peekedBytes, data...)
- // 'chunk' contains encrypted bytes, followed by an authentication tag.
- chunk := chunkExtra[:len(chunkExtra)-tagLen]
- ar.peekedBytes = chunkExtra[len(chunkExtra)-tagLen:]
-
- adata := ar.associatedData
- if ar.aeadCrypter.packetTag == packetTypeAEADEncrypted {
- adata = append(ar.associatedData, ar.chunkIndex...)
- }
-
- nonce := ar.computeNextNonce()
- plainChunk, err := ar.aead.Open(nil, nonce, chunk, adata)
- if err != nil {
- return nil, err
- }
- ar.bytesProcessed += len(plainChunk)
- if err = ar.aeadCrypter.incrementIndex(); err != nil {
- return nil, err
- }
- return plainChunk, nil
-}
-
-// Checks the summary tag. It takes into account the total decrypted bytes into
-// the associated data. It returns an error, or nil if the tag is valid.
-func (ar *aeadDecrypter) validateFinalTag(tag []byte) error {
- // Associated: tag, version, cipher, aead, chunk size, ...
- amountBytes := make([]byte, 8)
- binary.BigEndian.PutUint64(amountBytes, uint64(ar.bytesProcessed))
-
- adata := ar.associatedData
- if ar.aeadCrypter.packetTag == packetTypeAEADEncrypted {
- // ... index ...
- adata = append(ar.associatedData, ar.chunkIndex...)
- }
-
- // ... and total number of encrypted octets
- adata = append(adata, amountBytes...)
- nonce := ar.computeNextNonce()
- _, err := ar.aead.Open(nil, nonce, tag, adata)
- if err != nil {
- return err
- }
- return nil
-}
-
-// aeadEncrypter encrypts and writes bytes. It encrypts when necessary according
-// to the AEAD block size, and buffers the extra encrypted bytes for next write.
-type aeadEncrypter struct {
- aeadCrypter // Embedded plaintext sealer
- writer io.WriteCloser // 'writer' is a partialLengthWriter
-}
-
-// Write encrypts and writes bytes. It encrypts when necessary and buffers extra
-// plaintext bytes for next call. When the stream is finished, Close() MUST be
-// called to append the final tag.
-func (aw *aeadEncrypter) Write(plaintextBytes []byte) (n int, err error) {
- // Append plaintextBytes to existing buffered bytes
- n, err = aw.buffer.Write(plaintextBytes)
- if err != nil {
- return n, err
- }
- // Encrypt and write chunks
- for aw.buffer.Len() >= aw.chunkSize {
- plainChunk := aw.buffer.Next(aw.chunkSize)
- encryptedChunk, err := aw.sealChunk(plainChunk)
- if err != nil {
- return n, err
- }
- _, err = aw.writer.Write(encryptedChunk)
- if err != nil {
- return n, err
- }
- }
- return
-}
-
-// Close encrypts and writes the remaining buffered plaintext if any, appends
-// the final authentication tag, and closes the embedded writer. This function
-// MUST be called at the end of a stream.
-func (aw *aeadEncrypter) Close() (err error) {
- // Encrypt and write a chunk if there's buffered data left, or if we haven't
- // written any chunks yet.
- if aw.buffer.Len() > 0 || aw.bytesProcessed == 0 {
- plainChunk := aw.buffer.Bytes()
- lastEncryptedChunk, err := aw.sealChunk(plainChunk)
- if err != nil {
- return err
- }
- _, err = aw.writer.Write(lastEncryptedChunk)
- if err != nil {
- return err
- }
- }
- // Compute final tag (associated data: packet tag, version, cipher, aead,
- // chunk size...
- adata := aw.associatedData
-
- if aw.aeadCrypter.packetTag == packetTypeAEADEncrypted {
- // ... index ...
- adata = append(aw.associatedData, aw.chunkIndex...)
- }
-
- // ... and total number of encrypted octets
- amountBytes := make([]byte, 8)
- binary.BigEndian.PutUint64(amountBytes, uint64(aw.bytesProcessed))
- adata = append(adata, amountBytes...)
-
- nonce := aw.computeNextNonce()
- finalTag := aw.aead.Seal(nil, nonce, nil, adata)
- _, err = aw.writer.Write(finalTag)
- if err != nil {
- return err
- }
- return aw.writer.Close()
-}
-
-// sealChunk Encrypts and authenticates the given chunk.
-func (aw *aeadEncrypter) sealChunk(data []byte) ([]byte, error) {
- if len(data) > aw.chunkSize {
- return nil, errors.AEADError("chunk exceeds maximum length")
- }
- if aw.associatedData == nil {
- return nil, errors.AEADError("can't seal without headers")
- }
- adata := aw.associatedData
- if aw.aeadCrypter.packetTag == packetTypeAEADEncrypted {
- adata = append(aw.associatedData, aw.chunkIndex...)
- }
-
- nonce := aw.computeNextNonce()
- encrypted := aw.aead.Seal(nil, nonce, data, adata)
- aw.bytesProcessed += len(data)
- if err := aw.aeadCrypter.incrementIndex(); err != nil {
- return nil, err
- }
- return encrypted, nil
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/aead_encrypted.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/aead_encrypted.go
deleted file mode 100644
index 98bd876bf29..00000000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/aead_encrypted.go
+++ /dev/null
@@ -1,96 +0,0 @@
-// Copyright (C) 2019 ProtonTech AG
-
-package packet
-
-import (
- "io"
-
- "github.com/ProtonMail/go-crypto/openpgp/errors"
- "github.com/ProtonMail/go-crypto/openpgp/internal/algorithm"
-)
-
-// AEADEncrypted represents an AEAD Encrypted Packet.
-// See https://www.ietf.org/archive/id/draft-koch-openpgp-2015-rfc4880bis-00.html#name-aead-encrypted-data-packet-t
-type AEADEncrypted struct {
- cipher CipherFunction
- mode AEADMode
- chunkSizeByte byte
- Contents io.Reader // Encrypted chunks and tags
- initialNonce []byte // Referred to as IV in RFC4880-bis
-}
-
-// Only currently defined version
-const aeadEncryptedVersion = 1
-
-func (ae *AEADEncrypted) parse(buf io.Reader) error {
- headerData := make([]byte, 4)
- if n, err := io.ReadFull(buf, headerData); n < 4 {
- return errors.AEADError("could not read aead header:" + err.Error())
- }
- // Read initial nonce
- mode := AEADMode(headerData[2])
- nonceLen := mode.IvLength()
-
- // This packet supports only EAX and OCB
- // https://www.ietf.org/archive/id/draft-koch-openpgp-2015-rfc4880bis-00.html#name-aead-encrypted-data-packet-t
- if nonceLen == 0 || mode > AEADModeOCB {
- return errors.AEADError("unknown mode")
- }
-
- initialNonce := make([]byte, nonceLen)
- if n, err := io.ReadFull(buf, initialNonce); n < nonceLen {
- return errors.AEADError("could not read aead nonce:" + err.Error())
- }
- ae.Contents = buf
- ae.initialNonce = initialNonce
- c := headerData[1]
- if _, ok := algorithm.CipherById[c]; !ok {
- return errors.UnsupportedError("unknown cipher: " + string(c))
- }
- ae.cipher = CipherFunction(c)
- ae.mode = mode
- ae.chunkSizeByte = headerData[3]
- return nil
-}
-
-// Decrypt returns a io.ReadCloser from which decrypted bytes can be read, or
-// an error.
-func (ae *AEADEncrypted) Decrypt(ciph CipherFunction, key []byte) (io.ReadCloser, error) {
- return ae.decrypt(key)
-}
-
-// decrypt prepares an aeadCrypter and returns a ReadCloser from which
-// decrypted bytes can be read (see aeadDecrypter.Read()).
-func (ae *AEADEncrypted) decrypt(key []byte) (io.ReadCloser, error) {
- blockCipher := ae.cipher.new(key)
- aead := ae.mode.new(blockCipher)
- // Carry the first tagLen bytes
- tagLen := ae.mode.TagLength()
- peekedBytes := make([]byte, tagLen)
- n, err := io.ReadFull(ae.Contents, peekedBytes)
- if n < tagLen || (err != nil && err != io.EOF) {
- return nil, errors.AEADError("Not enough data to decrypt:" + err.Error())
- }
- chunkSize := decodeAEADChunkSize(ae.chunkSizeByte)
- return &aeadDecrypter{
- aeadCrypter: aeadCrypter{
- aead: aead,
- chunkSize: chunkSize,
- initialNonce: ae.initialNonce,
- associatedData: ae.associatedData(),
- chunkIndex: make([]byte, 8),
- packetTag: packetTypeAEADEncrypted,
- },
- reader: ae.Contents,
- peekedBytes: peekedBytes}, nil
-}
-
-// associatedData for chunks: tag, version, cipher, mode, chunk size byte
-func (ae *AEADEncrypted) associatedData() []byte {
- return []byte{
- 0xD4,
- aeadEncryptedVersion,
- byte(ae.cipher),
- byte(ae.mode),
- ae.chunkSizeByte}
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/compressed.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/compressed.go
deleted file mode 100644
index 334de286b38..00000000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/compressed.go
+++ /dev/null
@@ -1,162 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package packet
-
-import (
- "compress/bzip2"
- "compress/flate"
- "compress/zlib"
- "io"
- "io/ioutil"
- "strconv"
-
- "github.com/ProtonMail/go-crypto/openpgp/errors"
-)
-
-// Compressed represents a compressed OpenPGP packet. The decompressed contents
-// will contain more OpenPGP packets. See RFC 4880, section 5.6.
-type Compressed struct {
- Body io.Reader
-}
-
-const (
- NoCompression = flate.NoCompression
- BestSpeed = flate.BestSpeed
- BestCompression = flate.BestCompression
- DefaultCompression = flate.DefaultCompression
-)
-
-// CompressionConfig contains compressor configuration settings.
-type CompressionConfig struct {
- // Level is the compression level to use. It must be set to
- // between -1 and 9, with -1 causing the compressor to use the
- // default compression level, 0 causing the compressor to use
- // no compression and 1 to 9 representing increasing (better,
- // slower) compression levels. If Level is less than -1 or
- // more then 9, a non-nil error will be returned during
- // encryption. See the constants above for convenient common
- // settings for Level.
- Level int
-}
-
-// decompressionReader ensures that the whole compression packet is read.
-type decompressionReader struct {
- compressed io.Reader
- decompressed io.ReadCloser
- readAll bool
-}
-
-func newDecompressionReader(r io.Reader, decompressor io.ReadCloser) *decompressionReader {
- return &decompressionReader{
- compressed: r,
- decompressed: decompressor,
- }
-}
-
-func (dr *decompressionReader) Read(data []byte) (n int, err error) {
- if dr.readAll {
- return 0, io.EOF
- }
- n, err = dr.decompressed.Read(data)
- if err == io.EOF {
- dr.readAll = true
- // Close the decompressor.
- if errDec := dr.decompressed.Close(); errDec != nil {
- return n, errDec
- }
- // Consume all remaining data from the compressed packet.
- consumeAll(dr.compressed)
- }
- return n, err
-}
-
-func (c *Compressed) parse(r io.Reader) error {
- var buf [1]byte
- _, err := readFull(r, buf[:])
- if err != nil {
- return err
- }
-
- switch buf[0] {
- case 0:
- c.Body = r
- case 1:
- c.Body = newDecompressionReader(r, flate.NewReader(r))
- case 2:
- decompressor, err := zlib.NewReader(r)
- if err != nil {
- return err
- }
- c.Body = newDecompressionReader(r, decompressor)
- case 3:
- c.Body = newDecompressionReader(r, ioutil.NopCloser(bzip2.NewReader(r)))
- default:
- err = errors.UnsupportedError("unknown compression algorithm: " + strconv.Itoa(int(buf[0])))
- }
-
- return err
-}
-
-// compressedWriterCloser represents the serialized compression stream
-// header and the compressor. Its Close() method ensures that both the
-// compressor and serialized stream header are closed. Its Write()
-// method writes to the compressor.
-type compressedWriteCloser struct {
- sh io.Closer // Stream Header
- c io.WriteCloser // Compressor
-}
-
-func (cwc compressedWriteCloser) Write(p []byte) (int, error) {
- return cwc.c.Write(p)
-}
-
-func (cwc compressedWriteCloser) Close() (err error) {
- err = cwc.c.Close()
- if err != nil {
- return err
- }
-
- return cwc.sh.Close()
-}
-
-// SerializeCompressed serializes a compressed data packet to w and
-// returns a WriteCloser to which the literal data packets themselves
-// can be written and which MUST be closed on completion. If cc is
-// nil, sensible defaults will be used to configure the compression
-// algorithm.
-func SerializeCompressed(w io.WriteCloser, algo CompressionAlgo, cc *CompressionConfig) (literaldata io.WriteCloser, err error) {
- compressed, err := serializeStreamHeader(w, packetTypeCompressed)
- if err != nil {
- return
- }
-
- _, err = compressed.Write([]byte{uint8(algo)})
- if err != nil {
- return
- }
-
- level := DefaultCompression
- if cc != nil {
- level = cc.Level
- }
-
- var compressor io.WriteCloser
- switch algo {
- case CompressionZIP:
- compressor, err = flate.NewWriter(compressed, level)
- case CompressionZLIB:
- compressor, err = zlib.NewWriterLevel(compressed, level)
- default:
- s := strconv.Itoa(int(algo))
- err = errors.UnsupportedError("Unsupported compression algorithm: " + s)
- }
- if err != nil {
- return
- }
-
- literaldata = compressedWriteCloser{compressed, compressor}
-
- return
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/config.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/config.go
deleted file mode 100644
index 181d5d344ec..00000000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/config.go
+++ /dev/null
@@ -1,352 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package packet
-
-import (
- "crypto"
- "crypto/rand"
- "io"
- "math/big"
- "time"
-
- "github.com/ProtonMail/go-crypto/openpgp/s2k"
-)
-
-var (
- defaultRejectPublicKeyAlgorithms = map[PublicKeyAlgorithm]bool{
- PubKeyAlgoElGamal: true,
- PubKeyAlgoDSA: true,
- }
- defaultRejectMessageHashAlgorithms = map[crypto.Hash]bool{
- crypto.SHA1: true,
- crypto.MD5: true,
- crypto.RIPEMD160: true,
- }
- defaultRejectCurves = map[Curve]bool{
- CurveSecP256k1: true,
- }
-)
-
-// Config collects a number of parameters along with sensible defaults.
-// A nil *Config is valid and results in all default values.
-type Config struct {
- // Rand provides the source of entropy.
- // If nil, the crypto/rand Reader is used.
- Rand io.Reader
- // DefaultHash is the default hash function to be used.
- // If zero, SHA-256 is used.
- DefaultHash crypto.Hash
- // DefaultCipher is the cipher to be used.
- // If zero, AES-128 is used.
- DefaultCipher CipherFunction
- // Time returns the current time as the number of seconds since the
- // epoch. If Time is nil, time.Now is used.
- Time func() time.Time
- // DefaultCompressionAlgo is the compression algorithm to be
- // applied to the plaintext before encryption. If zero, no
- // compression is done.
- DefaultCompressionAlgo CompressionAlgo
- // CompressionConfig configures the compression settings.
- CompressionConfig *CompressionConfig
- // S2K (String to Key) config, used for key derivation in the context of secret key encryption
- // and password-encrypted data.
- // If nil, the default configuration is used
- S2KConfig *s2k.Config
- // Iteration count for Iterated S2K (String to Key).
- // Only used if sk2.Mode is nil.
- // This value is duplicated here from s2k.Config for backwards compatibility.
- // It determines the strength of the passphrase stretching when
- // the said passphrase is hashed to produce a key. S2KCount
- // should be between 65536 and 65011712, inclusive. If Config
- // is nil or S2KCount is 0, the value 16777216 used. Not all
- // values in the above range can be represented. S2KCount will
- // be rounded up to the next representable value if it cannot
- // be encoded exactly. When set, it is strongly encrouraged to
- // use a value that is at least 65536. See RFC 4880 Section
- // 3.7.1.3.
- //
- // Deprecated: SK2Count should be configured in S2KConfig instead.
- S2KCount int
- // RSABits is the number of bits in new RSA keys made with NewEntity.
- // If zero, then 2048 bit keys are created.
- RSABits int
- // The public key algorithm to use - will always create a signing primary
- // key and encryption subkey.
- Algorithm PublicKeyAlgorithm
- // Some known primes that are optionally prepopulated by the caller
- RSAPrimes []*big.Int
- // Curve configures the desired packet.Curve if the Algorithm is PubKeyAlgoECDSA,
- // PubKeyAlgoEdDSA, or PubKeyAlgoECDH. If empty Curve25519 is used.
- Curve Curve
- // AEADConfig configures the use of the new AEAD Encrypted Data Packet,
- // defined in the draft of the next version of the OpenPGP specification.
- // If a non-nil AEADConfig is passed, usage of this packet is enabled. By
- // default, it is disabled. See the documentation of AEADConfig for more
- // configuration options related to AEAD.
- // **Note: using this option may break compatibility with other OpenPGP
- // implementations, as well as future versions of this library.**
- AEADConfig *AEADConfig
- // V6Keys configures version 6 key generation. If false, this package still
- // supports version 6 keys, but produces version 4 keys.
- V6Keys bool
- // Minimum RSA key size allowed for key generation and message signing, verification and encryption.
- MinRSABits uint16
- // Reject insecure algorithms, only works with v2 api
- RejectPublicKeyAlgorithms map[PublicKeyAlgorithm]bool
- RejectMessageHashAlgorithms map[crypto.Hash]bool
- RejectCurves map[Curve]bool
- // "The validity period of the key. This is the number of seconds after
- // the key creation time that the key expires. If this is not present
- // or has a value of zero, the key never expires. This is found only on
- // a self-signature.""
- // https://tools.ietf.org/html/rfc4880#section-5.2.3.6
- KeyLifetimeSecs uint32
- // "The validity period of the signature. This is the number of seconds
- // after the signature creation time that the signature expires. If
- // this is not present or has a value of zero, it never expires."
- // https://tools.ietf.org/html/rfc4880#section-5.2.3.10
- SigLifetimeSecs uint32
- // SigningKeyId is used to specify the signing key to use (by Key ID).
- // By default, the signing key is selected automatically, preferring
- // signing subkeys if available.
- SigningKeyId uint64
- // SigningIdentity is used to specify a user ID (packet Signer's User ID, type 28)
- // when producing a generic certification signature onto an existing user ID.
- // The identity must be present in the signer Entity.
- SigningIdentity string
- // InsecureAllowUnauthenticatedMessages controls, whether it is tolerated to read
- // encrypted messages without Modification Detection Code (MDC).
- // MDC is mandated by the IETF OpenPGP Crypto Refresh draft and has long been implemented
- // in most OpenPGP implementations. Messages without MDC are considered unnecessarily
- // insecure and should be prevented whenever possible.
- // In case one needs to deal with messages from very old OpenPGP implementations, there
- // might be no other way than to tolerate the missing MDC. Setting this flag, allows this
- // mode of operation. It should be considered a measure of last resort.
- InsecureAllowUnauthenticatedMessages bool
- // KnownNotations is a map of Notation Data names to bools, which controls
- // the notation names that are allowed to be present in critical Notation Data
- // signature subpackets.
- KnownNotations map[string]bool
- // SignatureNotations is a list of Notations to be added to any signatures.
- SignatureNotations []*Notation
- // CheckIntendedRecipients controls, whether the OpenPGP Intended Recipient Fingerprint feature
- // should be enabled for encryption and decryption.
- // (See https://www.ietf.org/archive/id/draft-ietf-openpgp-crypto-refresh-12.html#name-intended-recipient-fingerpr).
- // When the flag is set, encryption produces Intended Recipient Fingerprint signature sub-packets and decryption
- // checks whether the key it was encrypted to is one of the included fingerprints in the signature.
- // If the flag is disabled, no Intended Recipient Fingerprint sub-packets are created or checked.
- // The default behavior, when the config or flag is nil, is to enable the feature.
- CheckIntendedRecipients *bool
- // CacheSessionKey controls if decryption should return the session key used for decryption.
- // If the flag is set, the session key is cached in the message details struct.
- CacheSessionKey bool
- // CheckPacketSequence is a flag that controls if the pgp message reader should strictly check
- // that the packet sequence conforms with the grammar mandated by rfc4880.
- // The default behavior, when the config or flag is nil, is to check the packet sequence.
- CheckPacketSequence *bool
-}
-
-func (c *Config) Random() io.Reader {
- if c == nil || c.Rand == nil {
- return rand.Reader
- }
- return c.Rand
-}
-
-func (c *Config) Hash() crypto.Hash {
- if c == nil || uint(c.DefaultHash) == 0 {
- return crypto.SHA256
- }
- return c.DefaultHash
-}
-
-func (c *Config) Cipher() CipherFunction {
- if c == nil || uint8(c.DefaultCipher) == 0 {
- return CipherAES128
- }
- return c.DefaultCipher
-}
-
-func (c *Config) Now() time.Time {
- if c == nil || c.Time == nil {
- return time.Now().Truncate(time.Second)
- }
- return c.Time().Truncate(time.Second)
-}
-
-// KeyLifetime returns the validity period of the key.
-func (c *Config) KeyLifetime() uint32 {
- if c == nil {
- return 0
- }
- return c.KeyLifetimeSecs
-}
-
-// SigLifetime returns the validity period of the signature.
-func (c *Config) SigLifetime() uint32 {
- if c == nil {
- return 0
- }
- return c.SigLifetimeSecs
-}
-
-func (c *Config) Compression() CompressionAlgo {
- if c == nil {
- return CompressionNone
- }
- return c.DefaultCompressionAlgo
-}
-
-func (c *Config) RSAModulusBits() int {
- if c == nil || c.RSABits == 0 {
- return 2048
- }
- return c.RSABits
-}
-
-func (c *Config) PublicKeyAlgorithm() PublicKeyAlgorithm {
- if c == nil || c.Algorithm == 0 {
- return PubKeyAlgoRSA
- }
- return c.Algorithm
-}
-
-func (c *Config) CurveName() Curve {
- if c == nil || c.Curve == "" {
- return Curve25519
- }
- return c.Curve
-}
-
-// Deprecated: The hash iterations should now be queried via the S2K() method.
-func (c *Config) PasswordHashIterations() int {
- if c == nil || c.S2KCount == 0 {
- return 0
- }
- return c.S2KCount
-}
-
-func (c *Config) S2K() *s2k.Config {
- if c == nil {
- return nil
- }
- // for backwards compatibility
- if c != nil && c.S2KCount > 0 && c.S2KConfig == nil {
- return &s2k.Config{
- S2KCount: c.S2KCount,
- }
- }
- return c.S2KConfig
-}
-
-func (c *Config) AEAD() *AEADConfig {
- if c == nil {
- return nil
- }
- return c.AEADConfig
-}
-
-func (c *Config) SigningKey() uint64 {
- if c == nil {
- return 0
- }
- return c.SigningKeyId
-}
-
-func (c *Config) SigningUserId() string {
- if c == nil {
- return ""
- }
- return c.SigningIdentity
-}
-
-func (c *Config) AllowUnauthenticatedMessages() bool {
- if c == nil {
- return false
- }
- return c.InsecureAllowUnauthenticatedMessages
-}
-
-func (c *Config) KnownNotation(notationName string) bool {
- if c == nil {
- return false
- }
- return c.KnownNotations[notationName]
-}
-
-func (c *Config) Notations() []*Notation {
- if c == nil {
- return nil
- }
- return c.SignatureNotations
-}
-
-func (c *Config) V6() bool {
- if c == nil {
- return false
- }
- return c.V6Keys
-}
-
-func (c *Config) IntendedRecipients() bool {
- if c == nil || c.CheckIntendedRecipients == nil {
- return true
- }
- return *c.CheckIntendedRecipients
-}
-
-func (c *Config) RetrieveSessionKey() bool {
- if c == nil {
- return false
- }
- return c.CacheSessionKey
-}
-
-func (c *Config) MinimumRSABits() uint16 {
- if c == nil || c.MinRSABits == 0 {
- return 2047
- }
- return c.MinRSABits
-}
-
-func (c *Config) RejectPublicKeyAlgorithm(alg PublicKeyAlgorithm) bool {
- var rejectedAlgorithms map[PublicKeyAlgorithm]bool
- if c == nil || c.RejectPublicKeyAlgorithms == nil {
- // Default
- rejectedAlgorithms = defaultRejectPublicKeyAlgorithms
- } else {
- rejectedAlgorithms = c.RejectPublicKeyAlgorithms
- }
- return rejectedAlgorithms[alg]
-}
-
-func (c *Config) RejectMessageHashAlgorithm(hash crypto.Hash) bool {
- var rejectedAlgorithms map[crypto.Hash]bool
- if c == nil || c.RejectMessageHashAlgorithms == nil {
- // Default
- rejectedAlgorithms = defaultRejectMessageHashAlgorithms
- } else {
- rejectedAlgorithms = c.RejectMessageHashAlgorithms
- }
- return rejectedAlgorithms[hash]
-}
-
-func (c *Config) RejectCurve(curve Curve) bool {
- var rejectedCurve map[Curve]bool
- if c == nil || c.RejectCurves == nil {
- // Default
- rejectedCurve = defaultRejectCurves
- } else {
- rejectedCurve = c.RejectCurves
- }
- return rejectedCurve[curve]
-}
-
-func (c *Config) StrictPacketSequence() bool {
- if c == nil || c.CheckPacketSequence == nil {
- return true
- }
- return *c.CheckPacketSequence
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/encrypted_key.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/encrypted_key.go
deleted file mode 100644
index e70f9d9411b..00000000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/encrypted_key.go
+++ /dev/null
@@ -1,575 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package packet
-
-import (
- "bytes"
- "crypto"
- "crypto/rsa"
- "encoding/binary"
- "encoding/hex"
- "io"
- "math/big"
- "strconv"
-
- "github.com/ProtonMail/go-crypto/openpgp/ecdh"
- "github.com/ProtonMail/go-crypto/openpgp/elgamal"
- "github.com/ProtonMail/go-crypto/openpgp/errors"
- "github.com/ProtonMail/go-crypto/openpgp/internal/encoding"
- "github.com/ProtonMail/go-crypto/openpgp/x25519"
- "github.com/ProtonMail/go-crypto/openpgp/x448"
-)
-
-// EncryptedKey represents a public-key encrypted session key. See RFC 4880,
-// section 5.1.
-type EncryptedKey struct {
- Version int
- KeyId uint64
- KeyVersion int // v6
- KeyFingerprint []byte // v6
- Algo PublicKeyAlgorithm
- CipherFunc CipherFunction // only valid after a successful Decrypt for a v3 packet
- Key []byte // only valid after a successful Decrypt
-
- encryptedMPI1, encryptedMPI2 encoding.Field
- ephemeralPublicX25519 *x25519.PublicKey // used for x25519
- ephemeralPublicX448 *x448.PublicKey // used for x448
- encryptedSession []byte // used for x25519 and x448
-}
-
-func (e *EncryptedKey) parse(r io.Reader) (err error) {
- var buf [8]byte
- _, err = readFull(r, buf[:versionSize])
- if err != nil {
- return
- }
- e.Version = int(buf[0])
- if e.Version != 3 && e.Version != 6 {
- return errors.UnsupportedError("unknown EncryptedKey version " + strconv.Itoa(int(buf[0])))
- }
- if e.Version == 6 {
- //Read a one-octet size of the following two fields.
- if _, err = readFull(r, buf[:1]); err != nil {
- return
- }
- // The size may also be zero, and the key version and
- // fingerprint omitted for an "anonymous recipient"
- if buf[0] != 0 {
- // non-anonymous case
- _, err = readFull(r, buf[:versionSize])
- if err != nil {
- return
- }
- e.KeyVersion = int(buf[0])
- if e.KeyVersion != 4 && e.KeyVersion != 6 {
- return errors.UnsupportedError("unknown public key version " + strconv.Itoa(e.KeyVersion))
- }
- var fingerprint []byte
- if e.KeyVersion == 6 {
- fingerprint = make([]byte, fingerprintSizeV6)
- } else if e.KeyVersion == 4 {
- fingerprint = make([]byte, fingerprintSize)
- }
- _, err = readFull(r, fingerprint)
- if err != nil {
- return
- }
- e.KeyFingerprint = fingerprint
- if e.KeyVersion == 6 {
- e.KeyId = binary.BigEndian.Uint64(e.KeyFingerprint[:keyIdSize])
- } else if e.KeyVersion == 4 {
- e.KeyId = binary.BigEndian.Uint64(e.KeyFingerprint[fingerprintSize-keyIdSize : fingerprintSize])
- }
- }
- } else {
- _, err = readFull(r, buf[:8])
- if err != nil {
- return
- }
- e.KeyId = binary.BigEndian.Uint64(buf[:keyIdSize])
- }
-
- _, err = readFull(r, buf[:1])
- if err != nil {
- return
- }
- e.Algo = PublicKeyAlgorithm(buf[0])
- var cipherFunction byte
- switch e.Algo {
- case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly:
- e.encryptedMPI1 = new(encoding.MPI)
- if _, err = e.encryptedMPI1.ReadFrom(r); err != nil {
- return
- }
- case PubKeyAlgoElGamal:
- e.encryptedMPI1 = new(encoding.MPI)
- if _, err = e.encryptedMPI1.ReadFrom(r); err != nil {
- return
- }
-
- e.encryptedMPI2 = new(encoding.MPI)
- if _, err = e.encryptedMPI2.ReadFrom(r); err != nil {
- return
- }
- case PubKeyAlgoECDH:
- e.encryptedMPI1 = new(encoding.MPI)
- if _, err = e.encryptedMPI1.ReadFrom(r); err != nil {
- return
- }
-
- e.encryptedMPI2 = new(encoding.OID)
- if _, err = e.encryptedMPI2.ReadFrom(r); err != nil {
- return
- }
- case PubKeyAlgoX25519:
- e.ephemeralPublicX25519, e.encryptedSession, cipherFunction, err = x25519.DecodeFields(r, e.Version == 6)
- if err != nil {
- return
- }
- case PubKeyAlgoX448:
- e.ephemeralPublicX448, e.encryptedSession, cipherFunction, err = x448.DecodeFields(r, e.Version == 6)
- if err != nil {
- return
- }
- }
- if e.Version < 6 {
- switch e.Algo {
- case PubKeyAlgoX25519, PubKeyAlgoX448:
- e.CipherFunc = CipherFunction(cipherFunction)
- // Check for validiy is in the Decrypt method
- }
- }
-
- _, err = consumeAll(r)
- return
-}
-
-// Decrypt decrypts an encrypted session key with the given private key. The
-// private key must have been decrypted first.
-// If config is nil, sensible defaults will be used.
-func (e *EncryptedKey) Decrypt(priv *PrivateKey, config *Config) error {
- if e.Version < 6 && e.KeyId != 0 && e.KeyId != priv.KeyId {
- return errors.InvalidArgumentError("cannot decrypt encrypted session key for key id " + strconv.FormatUint(e.KeyId, 16) + " with private key id " + strconv.FormatUint(priv.KeyId, 16))
- }
- if e.Version == 6 && e.KeyVersion != 0 && !bytes.Equal(e.KeyFingerprint, priv.Fingerprint) {
- return errors.InvalidArgumentError("cannot decrypt encrypted session key for key fingerprint " + hex.EncodeToString(e.KeyFingerprint) + " with private key fingerprint " + hex.EncodeToString(priv.Fingerprint))
- }
- if e.Algo != priv.PubKeyAlgo {
- return errors.InvalidArgumentError("cannot decrypt encrypted session key of type " + strconv.Itoa(int(e.Algo)) + " with private key of type " + strconv.Itoa(int(priv.PubKeyAlgo)))
- }
- if priv.Dummy() {
- return errors.ErrDummyPrivateKey("dummy key found")
- }
-
- var err error
- var b []byte
-
- // TODO(agl): use session key decryption routines here to avoid
- // padding oracle attacks.
- switch priv.PubKeyAlgo {
- case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly:
- // Supports both *rsa.PrivateKey and crypto.Decrypter
- k := priv.PrivateKey.(crypto.Decrypter)
- b, err = k.Decrypt(config.Random(), padToKeySize(k.Public().(*rsa.PublicKey), e.encryptedMPI1.Bytes()), nil)
- case PubKeyAlgoElGamal:
- c1 := new(big.Int).SetBytes(e.encryptedMPI1.Bytes())
- c2 := new(big.Int).SetBytes(e.encryptedMPI2.Bytes())
- b, err = elgamal.Decrypt(priv.PrivateKey.(*elgamal.PrivateKey), c1, c2)
- case PubKeyAlgoECDH:
- vsG := e.encryptedMPI1.Bytes()
- m := e.encryptedMPI2.Bytes()
- oid := priv.PublicKey.oid.EncodedBytes()
- b, err = ecdh.Decrypt(priv.PrivateKey.(*ecdh.PrivateKey), vsG, m, oid, priv.PublicKey.Fingerprint[:])
- case PubKeyAlgoX25519:
- b, err = x25519.Decrypt(priv.PrivateKey.(*x25519.PrivateKey), e.ephemeralPublicX25519, e.encryptedSession)
- case PubKeyAlgoX448:
- b, err = x448.Decrypt(priv.PrivateKey.(*x448.PrivateKey), e.ephemeralPublicX448, e.encryptedSession)
- default:
- err = errors.InvalidArgumentError("cannot decrypt encrypted session key with private key of type " + strconv.Itoa(int(priv.PubKeyAlgo)))
- }
- if err != nil {
- return err
- }
-
- var key []byte
- switch priv.PubKeyAlgo {
- case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoElGamal, PubKeyAlgoECDH:
- keyOffset := 0
- if e.Version < 6 {
- e.CipherFunc = CipherFunction(b[0])
- keyOffset = 1
- if !e.CipherFunc.IsSupported() {
- return errors.UnsupportedError("unsupported encryption function")
- }
- }
- key, err = decodeChecksumKey(b[keyOffset:])
- if err != nil {
- return err
- }
- case PubKeyAlgoX25519, PubKeyAlgoX448:
- if e.Version < 6 {
- switch e.CipherFunc {
- case CipherAES128, CipherAES192, CipherAES256:
- break
- default:
- return errors.StructuralError("v3 PKESK mandates AES as cipher function for x25519 and x448")
- }
- }
- key = b[:]
- default:
- return errors.UnsupportedError("unsupported algorithm for decryption")
- }
- e.Key = key
- return nil
-}
-
-// Serialize writes the encrypted key packet, e, to w.
-func (e *EncryptedKey) Serialize(w io.Writer) error {
- var encodedLength int
- switch e.Algo {
- case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly:
- encodedLength = int(e.encryptedMPI1.EncodedLength())
- case PubKeyAlgoElGamal:
- encodedLength = int(e.encryptedMPI1.EncodedLength()) + int(e.encryptedMPI2.EncodedLength())
- case PubKeyAlgoECDH:
- encodedLength = int(e.encryptedMPI1.EncodedLength()) + int(e.encryptedMPI2.EncodedLength())
- case PubKeyAlgoX25519:
- encodedLength = x25519.EncodedFieldsLength(e.encryptedSession, e.Version == 6)
- case PubKeyAlgoX448:
- encodedLength = x448.EncodedFieldsLength(e.encryptedSession, e.Version == 6)
- default:
- return errors.InvalidArgumentError("don't know how to serialize encrypted key type " + strconv.Itoa(int(e.Algo)))
- }
-
- packetLen := versionSize /* version */ + keyIdSize /* key id */ + algorithmSize /* algo */ + encodedLength
- if e.Version == 6 {
- packetLen = versionSize /* version */ + algorithmSize /* algo */ + encodedLength + keyVersionSize /* key version */
- if e.KeyVersion == 6 {
- packetLen += fingerprintSizeV6
- } else if e.KeyVersion == 4 {
- packetLen += fingerprintSize
- }
- }
-
- err := serializeHeader(w, packetTypeEncryptedKey, packetLen)
- if err != nil {
- return err
- }
-
- _, err = w.Write([]byte{byte(e.Version)})
- if err != nil {
- return err
- }
- if e.Version == 6 {
- _, err = w.Write([]byte{byte(e.KeyVersion)})
- if err != nil {
- return err
- }
- // The key version number may also be zero,
- // and the fingerprint omitted
- if e.KeyVersion != 0 {
- _, err = w.Write(e.KeyFingerprint)
- if err != nil {
- return err
- }
- }
- } else {
- // Write KeyID
- err = binary.Write(w, binary.BigEndian, e.KeyId)
- if err != nil {
- return err
- }
- }
- _, err = w.Write([]byte{byte(e.Algo)})
- if err != nil {
- return err
- }
-
- switch e.Algo {
- case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly:
- _, err := w.Write(e.encryptedMPI1.EncodedBytes())
- return err
- case PubKeyAlgoElGamal:
- if _, err := w.Write(e.encryptedMPI1.EncodedBytes()); err != nil {
- return err
- }
- _, err := w.Write(e.encryptedMPI2.EncodedBytes())
- return err
- case PubKeyAlgoECDH:
- if _, err := w.Write(e.encryptedMPI1.EncodedBytes()); err != nil {
- return err
- }
- _, err := w.Write(e.encryptedMPI2.EncodedBytes())
- return err
- case PubKeyAlgoX25519:
- err := x25519.EncodeFields(w, e.ephemeralPublicX25519, e.encryptedSession, byte(e.CipherFunc), e.Version == 6)
- return err
- case PubKeyAlgoX448:
- err := x448.EncodeFields(w, e.ephemeralPublicX448, e.encryptedSession, byte(e.CipherFunc), e.Version == 6)
- return err
- default:
- panic("internal error")
- }
-}
-
-// SerializeEncryptedKeyAEAD serializes an encrypted key packet to w that contains
-// key, encrypted to pub.
-// If aeadSupported is set, PKESK v6 is used else v4.
-// If config is nil, sensible defaults will be used.
-func SerializeEncryptedKeyAEAD(w io.Writer, pub *PublicKey, cipherFunc CipherFunction, aeadSupported bool, key []byte, config *Config) error {
- return SerializeEncryptedKeyAEADwithHiddenOption(w, pub, cipherFunc, aeadSupported, key, false, config)
-}
-
-// SerializeEncryptedKeyAEADwithHiddenOption serializes an encrypted key packet to w that contains
-// key, encrypted to pub.
-// Offers the hidden flag option to indicated if the PKESK packet should include a wildcard KeyID.
-// If aeadSupported is set, PKESK v6 is used else v4.
-// If config is nil, sensible defaults will be used.
-func SerializeEncryptedKeyAEADwithHiddenOption(w io.Writer, pub *PublicKey, cipherFunc CipherFunction, aeadSupported bool, key []byte, hidden bool, config *Config) error {
- var buf [36]byte // max possible header size is v6
- lenHeaderWritten := versionSize
- version := 3
-
- if aeadSupported {
- version = 6
- }
- // An implementation MUST NOT generate ElGamal v6 PKESKs.
- if version == 6 && pub.PubKeyAlgo == PubKeyAlgoElGamal {
- return errors.InvalidArgumentError("ElGamal v6 PKESK are not allowed")
- }
- // In v3 PKESKs, for x25519 and x448, mandate using AES
- if version == 3 && (pub.PubKeyAlgo == PubKeyAlgoX25519 || pub.PubKeyAlgo == PubKeyAlgoX448) {
- switch cipherFunc {
- case CipherAES128, CipherAES192, CipherAES256:
- break
- default:
- return errors.InvalidArgumentError("v3 PKESK mandates AES for x25519 and x448")
- }
- }
-
- buf[0] = byte(version)
-
- // If hidden is set, the key should be hidden
- // An implementation MAY accept or use a Key ID of all zeros,
- // or a key version of zero and no key fingerprint, to hide the intended decryption key.
- // See Section 5.1.8. in the open pgp crypto refresh
- if version == 6 {
- if !hidden {
- // A one-octet size of the following two fields.
- buf[1] = byte(keyVersionSize + len(pub.Fingerprint))
- // A one octet key version number.
- buf[2] = byte(pub.Version)
- lenHeaderWritten += keyVersionSize + 1
- // The fingerprint of the public key
- copy(buf[lenHeaderWritten:lenHeaderWritten+len(pub.Fingerprint)], pub.Fingerprint)
- lenHeaderWritten += len(pub.Fingerprint)
- } else {
- // The size may also be zero, and the key version
- // and fingerprint omitted for an "anonymous recipient"
- buf[1] = 0
- lenHeaderWritten += 1
- }
- } else {
- if !hidden {
- binary.BigEndian.PutUint64(buf[versionSize:(versionSize+keyIdSize)], pub.KeyId)
- }
- lenHeaderWritten += keyIdSize
- }
- buf[lenHeaderWritten] = byte(pub.PubKeyAlgo)
- lenHeaderWritten += algorithmSize
-
- var keyBlock []byte
- switch pub.PubKeyAlgo {
- case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoElGamal, PubKeyAlgoECDH:
- lenKeyBlock := len(key) + 2
- if version < 6 {
- lenKeyBlock += 1 // cipher type included
- }
- keyBlock = make([]byte, lenKeyBlock)
- keyOffset := 0
- if version < 6 {
- keyBlock[0] = byte(cipherFunc)
- keyOffset = 1
- }
- encodeChecksumKey(keyBlock[keyOffset:], key)
- case PubKeyAlgoX25519, PubKeyAlgoX448:
- // algorithm is added in plaintext below
- keyBlock = key
- }
-
- switch pub.PubKeyAlgo {
- case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly:
- return serializeEncryptedKeyRSA(w, config.Random(), buf[:lenHeaderWritten], pub.PublicKey.(*rsa.PublicKey), keyBlock)
- case PubKeyAlgoElGamal:
- return serializeEncryptedKeyElGamal(w, config.Random(), buf[:lenHeaderWritten], pub.PublicKey.(*elgamal.PublicKey), keyBlock)
- case PubKeyAlgoECDH:
- return serializeEncryptedKeyECDH(w, config.Random(), buf[:lenHeaderWritten], pub.PublicKey.(*ecdh.PublicKey), keyBlock, pub.oid, pub.Fingerprint)
- case PubKeyAlgoX25519:
- return serializeEncryptedKeyX25519(w, config.Random(), buf[:lenHeaderWritten], pub.PublicKey.(*x25519.PublicKey), keyBlock, byte(cipherFunc), version)
- case PubKeyAlgoX448:
- return serializeEncryptedKeyX448(w, config.Random(), buf[:lenHeaderWritten], pub.PublicKey.(*x448.PublicKey), keyBlock, byte(cipherFunc), version)
- case PubKeyAlgoDSA, PubKeyAlgoRSASignOnly:
- return errors.InvalidArgumentError("cannot encrypt to public key of type " + strconv.Itoa(int(pub.PubKeyAlgo)))
- }
-
- return errors.UnsupportedError("encrypting a key to public key of type " + strconv.Itoa(int(pub.PubKeyAlgo)))
-}
-
-// SerializeEncryptedKey serializes an encrypted key packet to w that contains
-// key, encrypted to pub.
-// PKESKv6 is used if config.AEAD() is not nil.
-// If config is nil, sensible defaults will be used.
-func SerializeEncryptedKey(w io.Writer, pub *PublicKey, cipherFunc CipherFunction, key []byte, config *Config) error {
- return SerializeEncryptedKeyAEAD(w, pub, cipherFunc, config.AEAD() != nil, key, config)
-}
-
-// SerializeEncryptedKeyWithHiddenOption serializes an encrypted key packet to w that contains
-// key, encrypted to pub. PKESKv6 is used if config.AEAD() is not nil.
-// The hidden option controls if the packet should be anonymous, i.e., omit key metadata.
-// If config is nil, sensible defaults will be used.
-func SerializeEncryptedKeyWithHiddenOption(w io.Writer, pub *PublicKey, cipherFunc CipherFunction, key []byte, hidden bool, config *Config) error {
- return SerializeEncryptedKeyAEADwithHiddenOption(w, pub, cipherFunc, config.AEAD() != nil, key, hidden, config)
-}
-
-func serializeEncryptedKeyRSA(w io.Writer, rand io.Reader, header []byte, pub *rsa.PublicKey, keyBlock []byte) error {
- cipherText, err := rsa.EncryptPKCS1v15(rand, pub, keyBlock)
- if err != nil {
- return errors.InvalidArgumentError("RSA encryption failed: " + err.Error())
- }
-
- cipherMPI := encoding.NewMPI(cipherText)
- packetLen := len(header) /* header length */ + int(cipherMPI.EncodedLength())
-
- err = serializeHeader(w, packetTypeEncryptedKey, packetLen)
- if err != nil {
- return err
- }
- _, err = w.Write(header[:])
- if err != nil {
- return err
- }
- _, err = w.Write(cipherMPI.EncodedBytes())
- return err
-}
-
-func serializeEncryptedKeyElGamal(w io.Writer, rand io.Reader, header []byte, pub *elgamal.PublicKey, keyBlock []byte) error {
- c1, c2, err := elgamal.Encrypt(rand, pub, keyBlock)
- if err != nil {
- return errors.InvalidArgumentError("ElGamal encryption failed: " + err.Error())
- }
-
- packetLen := len(header) /* header length */
- packetLen += 2 /* mpi size */ + (c1.BitLen()+7)/8
- packetLen += 2 /* mpi size */ + (c2.BitLen()+7)/8
-
- err = serializeHeader(w, packetTypeEncryptedKey, packetLen)
- if err != nil {
- return err
- }
- _, err = w.Write(header[:])
- if err != nil {
- return err
- }
- if _, err = w.Write(new(encoding.MPI).SetBig(c1).EncodedBytes()); err != nil {
- return err
- }
- _, err = w.Write(new(encoding.MPI).SetBig(c2).EncodedBytes())
- return err
-}
-
-func serializeEncryptedKeyECDH(w io.Writer, rand io.Reader, header []byte, pub *ecdh.PublicKey, keyBlock []byte, oid encoding.Field, fingerprint []byte) error {
- vsG, c, err := ecdh.Encrypt(rand, pub, keyBlock, oid.EncodedBytes(), fingerprint)
- if err != nil {
- return errors.InvalidArgumentError("ECDH encryption failed: " + err.Error())
- }
-
- g := encoding.NewMPI(vsG)
- m := encoding.NewOID(c)
-
- packetLen := len(header) /* header length */
- packetLen += int(g.EncodedLength()) + int(m.EncodedLength())
-
- err = serializeHeader(w, packetTypeEncryptedKey, packetLen)
- if err != nil {
- return err
- }
-
- _, err = w.Write(header[:])
- if err != nil {
- return err
- }
- if _, err = w.Write(g.EncodedBytes()); err != nil {
- return err
- }
- _, err = w.Write(m.EncodedBytes())
- return err
-}
-
-func serializeEncryptedKeyX25519(w io.Writer, rand io.Reader, header []byte, pub *x25519.PublicKey, keyBlock []byte, cipherFunc byte, version int) error {
- ephemeralPublicX25519, ciphertext, err := x25519.Encrypt(rand, pub, keyBlock)
- if err != nil {
- return errors.InvalidArgumentError("x25519 encryption failed: " + err.Error())
- }
-
- packetLen := len(header) /* header length */
- packetLen += x25519.EncodedFieldsLength(ciphertext, version == 6)
-
- err = serializeHeader(w, packetTypeEncryptedKey, packetLen)
- if err != nil {
- return err
- }
-
- _, err = w.Write(header[:])
- if err != nil {
- return err
- }
- return x25519.EncodeFields(w, ephemeralPublicX25519, ciphertext, cipherFunc, version == 6)
-}
-
-func serializeEncryptedKeyX448(w io.Writer, rand io.Reader, header []byte, pub *x448.PublicKey, keyBlock []byte, cipherFunc byte, version int) error {
- ephemeralPublicX448, ciphertext, err := x448.Encrypt(rand, pub, keyBlock)
- if err != nil {
- return errors.InvalidArgumentError("x448 encryption failed: " + err.Error())
- }
-
- packetLen := len(header) /* header length */
- packetLen += x448.EncodedFieldsLength(ciphertext, version == 6)
-
- err = serializeHeader(w, packetTypeEncryptedKey, packetLen)
- if err != nil {
- return err
- }
-
- _, err = w.Write(header[:])
- if err != nil {
- return err
- }
- return x448.EncodeFields(w, ephemeralPublicX448, ciphertext, cipherFunc, version == 6)
-}
-
-func checksumKeyMaterial(key []byte) uint16 {
- var checksum uint16
- for _, v := range key {
- checksum += uint16(v)
- }
- return checksum
-}
-
-func decodeChecksumKey(msg []byte) (key []byte, err error) {
- key = msg[:len(msg)-2]
- expectedChecksum := uint16(msg[len(msg)-2])<<8 | uint16(msg[len(msg)-1])
- checksum := checksumKeyMaterial(key)
- if checksum != expectedChecksum {
- err = errors.StructuralError("session key checksum is incorrect")
- }
- return
-}
-
-func encodeChecksumKey(buffer []byte, key []byte) {
- copy(buffer, key)
- checksum := checksumKeyMaterial(key)
- buffer[len(key)] = byte(checksum >> 8)
- buffer[len(key)+1] = byte(checksum)
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/literal.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/literal.go
deleted file mode 100644
index 8a028c8a171..00000000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/literal.go
+++ /dev/null
@@ -1,91 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package packet
-
-import (
- "encoding/binary"
- "io"
-)
-
-// LiteralData represents an encrypted file. See RFC 4880, section 5.9.
-type LiteralData struct {
- Format uint8
- IsBinary bool
- FileName string
- Time uint32 // Unix epoch time. Either creation time or modification time. 0 means undefined.
- Body io.Reader
-}
-
-// ForEyesOnly returns whether the contents of the LiteralData have been marked
-// as especially sensitive.
-func (l *LiteralData) ForEyesOnly() bool {
- return l.FileName == "_CONSOLE"
-}
-
-func (l *LiteralData) parse(r io.Reader) (err error) {
- var buf [256]byte
-
- _, err = readFull(r, buf[:2])
- if err != nil {
- return
- }
-
- l.Format = buf[0]
- l.IsBinary = l.Format == 'b'
- fileNameLen := int(buf[1])
-
- _, err = readFull(r, buf[:fileNameLen])
- if err != nil {
- return
- }
-
- l.FileName = string(buf[:fileNameLen])
-
- _, err = readFull(r, buf[:4])
- if err != nil {
- return
- }
-
- l.Time = binary.BigEndian.Uint32(buf[:4])
- l.Body = r
- return
-}
-
-// SerializeLiteral serializes a literal data packet to w and returns a
-// WriteCloser to which the data itself can be written and which MUST be closed
-// on completion. The fileName is truncated to 255 bytes.
-func SerializeLiteral(w io.WriteCloser, isBinary bool, fileName string, time uint32) (plaintext io.WriteCloser, err error) {
- var buf [4]byte
- buf[0] = 'b'
- if !isBinary {
- buf[0] = 'u'
- }
- if len(fileName) > 255 {
- fileName = fileName[:255]
- }
- buf[1] = byte(len(fileName))
-
- inner, err := serializeStreamHeader(w, packetTypeLiteralData)
- if err != nil {
- return
- }
-
- _, err = inner.Write(buf[:2])
- if err != nil {
- return
- }
- _, err = inner.Write([]byte(fileName))
- if err != nil {
- return
- }
- binary.BigEndian.PutUint32(buf[:], time)
- _, err = inner.Write(buf[:])
- if err != nil {
- return
- }
-
- plaintext = inner
- return
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/marker.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/marker.go
deleted file mode 100644
index 1ee378ba3c1..00000000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/marker.go
+++ /dev/null
@@ -1,33 +0,0 @@
-package packet
-
-import (
- "io"
-
- "github.com/ProtonMail/go-crypto/openpgp/errors"
-)
-
-type Marker struct{}
-
-const markerString = "PGP"
-
-// parse just checks if the packet contains "PGP".
-func (m *Marker) parse(reader io.Reader) error {
- var buffer [3]byte
- if _, err := io.ReadFull(reader, buffer[:]); err != nil {
- return err
- }
- if string(buffer[:]) != markerString {
- return errors.StructuralError("invalid marker packet")
- }
- return nil
-}
-
-// SerializeMarker writes a marker packet to writer.
-func SerializeMarker(writer io.Writer) error {
- err := serializeHeader(writer, packetTypeMarker, len(markerString))
- if err != nil {
- return err
- }
- _, err = writer.Write([]byte(markerString))
- return err
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/notation.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/notation.go
deleted file mode 100644
index 2c3e3f50b25..00000000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/notation.go
+++ /dev/null
@@ -1,29 +0,0 @@
-package packet
-
-// Notation type represents a Notation Data subpacket
-// see https://tools.ietf.org/html/rfc4880#section-5.2.3.16
-type Notation struct {
- Name string
- Value []byte
- IsCritical bool
- IsHumanReadable bool
-}
-
-func (notation *Notation) getData() []byte {
- nameData := []byte(notation.Name)
- nameLen := len(nameData)
- valueLen := len(notation.Value)
-
- data := make([]byte, 8+nameLen+valueLen)
- if notation.IsHumanReadable {
- data[0] = 0x80
- }
-
- data[4] = byte(nameLen >> 8)
- data[5] = byte(nameLen)
- data[6] = byte(valueLen >> 8)
- data[7] = byte(valueLen)
- copy(data[8:8+nameLen], nameData)
- copy(data[8+nameLen:], notation.Value)
- return data
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/ocfb.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/ocfb.go
deleted file mode 100644
index 4f26d0a00b7..00000000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/ocfb.go
+++ /dev/null
@@ -1,137 +0,0 @@
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// OpenPGP CFB Mode. http://tools.ietf.org/html/rfc4880#section-13.9
-
-package packet
-
-import (
- "crypto/cipher"
-)
-
-type ocfbEncrypter struct {
- b cipher.Block
- fre []byte
- outUsed int
-}
-
-// An OCFBResyncOption determines if the "resynchronization step" of OCFB is
-// performed.
-type OCFBResyncOption bool
-
-const (
- OCFBResync OCFBResyncOption = true
- OCFBNoResync OCFBResyncOption = false
-)
-
-// NewOCFBEncrypter returns a cipher.Stream which encrypts data with OpenPGP's
-// cipher feedback mode using the given cipher.Block, and an initial amount of
-// ciphertext. randData must be random bytes and be the same length as the
-// cipher.Block's block size. Resync determines if the "resynchronization step"
-// from RFC 4880, 13.9 step 7 is performed. Different parts of OpenPGP vary on
-// this point.
-func NewOCFBEncrypter(block cipher.Block, randData []byte, resync OCFBResyncOption) (cipher.Stream, []byte) {
- blockSize := block.BlockSize()
- if len(randData) != blockSize {
- return nil, nil
- }
-
- x := &ocfbEncrypter{
- b: block,
- fre: make([]byte, blockSize),
- outUsed: 0,
- }
- prefix := make([]byte, blockSize+2)
-
- block.Encrypt(x.fre, x.fre)
- for i := 0; i < blockSize; i++ {
- prefix[i] = randData[i] ^ x.fre[i]
- }
-
- block.Encrypt(x.fre, prefix[:blockSize])
- prefix[blockSize] = x.fre[0] ^ randData[blockSize-2]
- prefix[blockSize+1] = x.fre[1] ^ randData[blockSize-1]
-
- if resync {
- block.Encrypt(x.fre, prefix[2:])
- } else {
- x.fre[0] = prefix[blockSize]
- x.fre[1] = prefix[blockSize+1]
- x.outUsed = 2
- }
- return x, prefix
-}
-
-func (x *ocfbEncrypter) XORKeyStream(dst, src []byte) {
- for i := 0; i < len(src); i++ {
- if x.outUsed == len(x.fre) {
- x.b.Encrypt(x.fre, x.fre)
- x.outUsed = 0
- }
-
- x.fre[x.outUsed] ^= src[i]
- dst[i] = x.fre[x.outUsed]
- x.outUsed++
- }
-}
-
-type ocfbDecrypter struct {
- b cipher.Block
- fre []byte
- outUsed int
-}
-
-// NewOCFBDecrypter returns a cipher.Stream which decrypts data with OpenPGP's
-// cipher feedback mode using the given cipher.Block. Prefix must be the first
-// blockSize + 2 bytes of the ciphertext, where blockSize is the cipher.Block's
-// block size. On successful exit, blockSize+2 bytes of decrypted data are written into
-// prefix. Resync determines if the "resynchronization step" from RFC 4880,
-// 13.9 step 7 is performed. Different parts of OpenPGP vary on this point.
-func NewOCFBDecrypter(block cipher.Block, prefix []byte, resync OCFBResyncOption) cipher.Stream {
- blockSize := block.BlockSize()
- if len(prefix) != blockSize+2 {
- return nil
- }
-
- x := &ocfbDecrypter{
- b: block,
- fre: make([]byte, blockSize),
- outUsed: 0,
- }
- prefixCopy := make([]byte, len(prefix))
- copy(prefixCopy, prefix)
-
- block.Encrypt(x.fre, x.fre)
- for i := 0; i < blockSize; i++ {
- prefixCopy[i] ^= x.fre[i]
- }
-
- block.Encrypt(x.fre, prefix[:blockSize])
- prefixCopy[blockSize] ^= x.fre[0]
- prefixCopy[blockSize+1] ^= x.fre[1]
-
- if resync {
- block.Encrypt(x.fre, prefix[2:])
- } else {
- x.fre[0] = prefix[blockSize]
- x.fre[1] = prefix[blockSize+1]
- x.outUsed = 2
- }
- copy(prefix, prefixCopy)
- return x
-}
-
-func (x *ocfbDecrypter) XORKeyStream(dst, src []byte) {
- for i := 0; i < len(src); i++ {
- if x.outUsed == len(x.fre) {
- x.b.Encrypt(x.fre, x.fre)
- x.outUsed = 0
- }
-
- c := src[i]
- dst[i] = x.fre[x.outUsed] ^ src[i]
- x.fre[x.outUsed] = c
- x.outUsed++
- }
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/one_pass_signature.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/one_pass_signature.go
deleted file mode 100644
index f393c4063b8..00000000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/one_pass_signature.go
+++ /dev/null
@@ -1,157 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package packet
-
-import (
- "crypto"
- "encoding/binary"
- "io"
- "strconv"
-
- "github.com/ProtonMail/go-crypto/openpgp/errors"
- "github.com/ProtonMail/go-crypto/openpgp/internal/algorithm"
-)
-
-// OnePassSignature represents a one-pass signature packet. See RFC 4880,
-// section 5.4.
-type OnePassSignature struct {
- Version int
- SigType SignatureType
- Hash crypto.Hash
- PubKeyAlgo PublicKeyAlgorithm
- KeyId uint64
- IsLast bool
- Salt []byte // v6 only
- KeyFingerprint []byte // v6 only
-}
-
-func (ops *OnePassSignature) parse(r io.Reader) (err error) {
- var buf [8]byte
- // Read: version | signature type | hash algorithm | public-key algorithm
- _, err = readFull(r, buf[:4])
- if err != nil {
- return
- }
- if buf[0] != 3 && buf[0] != 6 {
- return errors.UnsupportedError("one-pass-signature packet version " + strconv.Itoa(int(buf[0])))
- }
- ops.Version = int(buf[0])
-
- var ok bool
- ops.Hash, ok = algorithm.HashIdToHashWithSha1(buf[2])
- if !ok {
- return errors.UnsupportedError("hash function: " + strconv.Itoa(int(buf[2])))
- }
-
- ops.SigType = SignatureType(buf[1])
- ops.PubKeyAlgo = PublicKeyAlgorithm(buf[3])
-
- if ops.Version == 6 {
- // Only for v6, a variable-length field containing the salt
- _, err = readFull(r, buf[:1])
- if err != nil {
- return
- }
- saltLength := int(buf[0])
- var expectedSaltLength int
- expectedSaltLength, err = SaltLengthForHash(ops.Hash)
- if err != nil {
- return
- }
- if saltLength != expectedSaltLength {
- err = errors.StructuralError("unexpected salt size for the given hash algorithm")
- return
- }
- salt := make([]byte, expectedSaltLength)
- _, err = readFull(r, salt)
- if err != nil {
- return
- }
- ops.Salt = salt
-
- // Only for v6 packets, 32 octets of the fingerprint of the signing key.
- fingerprint := make([]byte, 32)
- _, err = readFull(r, fingerprint)
- if err != nil {
- return
- }
- ops.KeyFingerprint = fingerprint
- ops.KeyId = binary.BigEndian.Uint64(ops.KeyFingerprint[:8])
- } else {
- _, err = readFull(r, buf[:8])
- if err != nil {
- return
- }
- ops.KeyId = binary.BigEndian.Uint64(buf[:8])
- }
-
- _, err = readFull(r, buf[:1])
- if err != nil {
- return
- }
- ops.IsLast = buf[0] != 0
- return
-}
-
-// Serialize marshals the given OnePassSignature to w.
-func (ops *OnePassSignature) Serialize(w io.Writer) error {
- //v3 length 1+1+1+1+8+1 =
- packetLength := 13
- if ops.Version == 6 {
- // v6 length 1+1+1+1+1+len(salt)+32+1 =
- packetLength = 38 + len(ops.Salt)
- }
-
- if err := serializeHeader(w, packetTypeOnePassSignature, packetLength); err != nil {
- return err
- }
-
- var buf [8]byte
- buf[0] = byte(ops.Version)
- buf[1] = uint8(ops.SigType)
- var ok bool
- buf[2], ok = algorithm.HashToHashIdWithSha1(ops.Hash)
- if !ok {
- return errors.UnsupportedError("hash type: " + strconv.Itoa(int(ops.Hash)))
- }
- buf[3] = uint8(ops.PubKeyAlgo)
-
- _, err := w.Write(buf[:4])
- if err != nil {
- return err
- }
-
- if ops.Version == 6 {
- // write salt for v6 signatures
- _, err := w.Write([]byte{uint8(len(ops.Salt))})
- if err != nil {
- return err
- }
- _, err = w.Write(ops.Salt)
- if err != nil {
- return err
- }
-
- // write fingerprint v6 signatures
- _, err = w.Write(ops.KeyFingerprint)
- if err != nil {
- return err
- }
- } else {
- binary.BigEndian.PutUint64(buf[:8], ops.KeyId)
- _, err := w.Write(buf[:8])
- if err != nil {
- return err
- }
- }
-
- isLast := []byte{byte(0)}
- if ops.IsLast {
- isLast[0] = 1
- }
-
- _, err = w.Write(isLast)
- return err
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/opaque.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/opaque.go
deleted file mode 100644
index cef7c661d3f..00000000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/opaque.go
+++ /dev/null
@@ -1,170 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package packet
-
-import (
- "bytes"
- "io"
-
- "github.com/ProtonMail/go-crypto/openpgp/errors"
-)
-
-// OpaquePacket represents an OpenPGP packet as raw, unparsed data. This is
-// useful for splitting and storing the original packet contents separately,
-// handling unsupported packet types or accessing parts of the packet not yet
-// implemented by this package.
-type OpaquePacket struct {
- // Packet type
- Tag uint8
- // Reason why the packet was parsed opaquely
- Reason error
- // Binary contents of the packet data
- Contents []byte
-}
-
-func (op *OpaquePacket) parse(r io.Reader) (err error) {
- op.Contents, err = io.ReadAll(r)
- return
-}
-
-// Serialize marshals the packet to a writer in its original form, including
-// the packet header.
-func (op *OpaquePacket) Serialize(w io.Writer) (err error) {
- err = serializeHeader(w, packetType(op.Tag), len(op.Contents))
- if err == nil {
- _, err = w.Write(op.Contents)
- }
- return
-}
-
-// Parse attempts to parse the opaque contents into a structure supported by
-// this package. If the packet is not known then the result will be another
-// OpaquePacket.
-func (op *OpaquePacket) Parse() (p Packet, err error) {
- hdr := bytes.NewBuffer(nil)
- err = serializeHeader(hdr, packetType(op.Tag), len(op.Contents))
- if err != nil {
- op.Reason = err
- return op, err
- }
- p, err = Read(io.MultiReader(hdr, bytes.NewBuffer(op.Contents)))
- if err != nil {
- op.Reason = err
- p = op
- }
- return
-}
-
-// OpaqueReader reads OpaquePackets from an io.Reader.
-type OpaqueReader struct {
- r io.Reader
-}
-
-func NewOpaqueReader(r io.Reader) *OpaqueReader {
- return &OpaqueReader{r: r}
-}
-
-// Read the next OpaquePacket.
-func (or *OpaqueReader) Next() (op *OpaquePacket, err error) {
- tag, _, contents, err := readHeader(or.r)
- if err != nil {
- return
- }
- op = &OpaquePacket{Tag: uint8(tag), Reason: err}
- err = op.parse(contents)
- if err != nil {
- consumeAll(contents)
- }
- return
-}
-
-// OpaqueSubpacket represents an unparsed OpenPGP subpacket,
-// as found in signature and user attribute packets.
-type OpaqueSubpacket struct {
- SubType uint8
- EncodedLength []byte // Store the original encoded length for signature verifications.
- Contents []byte
-}
-
-// OpaqueSubpackets extracts opaque, unparsed OpenPGP subpackets from
-// their byte representation.
-func OpaqueSubpackets(contents []byte) (result []*OpaqueSubpacket, err error) {
- var (
- subHeaderLen int
- subPacket *OpaqueSubpacket
- )
- for len(contents) > 0 {
- subHeaderLen, subPacket, err = nextSubpacket(contents)
- if err != nil {
- break
- }
- result = append(result, subPacket)
- contents = contents[subHeaderLen+len(subPacket.Contents):]
- }
- return
-}
-
-func nextSubpacket(contents []byte) (subHeaderLen int, subPacket *OpaqueSubpacket, err error) {
- // RFC 4880, section 5.2.3.1
- var subLen uint32
- var encodedLength []byte
- if len(contents) < 1 {
- goto Truncated
- }
- subPacket = &OpaqueSubpacket{}
- switch {
- case contents[0] < 192:
- subHeaderLen = 2 // 1 length byte, 1 subtype byte
- if len(contents) < subHeaderLen {
- goto Truncated
- }
- encodedLength = contents[0:1]
- subLen = uint32(contents[0])
- contents = contents[1:]
- case contents[0] < 255:
- subHeaderLen = 3 // 2 length bytes, 1 subtype
- if len(contents) < subHeaderLen {
- goto Truncated
- }
- encodedLength = contents[0:2]
- subLen = uint32(contents[0]-192)<<8 + uint32(contents[1]) + 192
- contents = contents[2:]
- default:
- subHeaderLen = 6 // 5 length bytes, 1 subtype
- if len(contents) < subHeaderLen {
- goto Truncated
- }
- encodedLength = contents[0:5]
- subLen = uint32(contents[1])<<24 |
- uint32(contents[2])<<16 |
- uint32(contents[3])<<8 |
- uint32(contents[4])
- contents = contents[5:]
-
- }
- if subLen > uint32(len(contents)) || subLen == 0 {
- goto Truncated
- }
- subPacket.SubType = contents[0]
- subPacket.EncodedLength = encodedLength
- subPacket.Contents = contents[1:subLen]
- return
-Truncated:
- err = errors.StructuralError("subpacket truncated")
- return
-}
-
-func (osp *OpaqueSubpacket) Serialize(w io.Writer) (err error) {
- buf := make([]byte, 6)
- copy(buf, osp.EncodedLength)
- n := len(osp.EncodedLength)
-
- buf[n] = osp.SubType
- if _, err = w.Write(buf[:n+1]); err != nil {
- return
- }
- _, err = w.Write(osp.Contents)
- return
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/packet.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/packet.go
deleted file mode 100644
index da12fbce060..00000000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/packet.go
+++ /dev/null
@@ -1,675 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package packet implements parsing and serialization of OpenPGP packets, as
-// specified in RFC 4880.
-package packet // import "github.com/ProtonMail/go-crypto/v2/openpgp/packet"
-
-import (
- "bytes"
- "crypto/cipher"
- "crypto/rsa"
- "io"
-
- "github.com/ProtonMail/go-crypto/openpgp/errors"
- "github.com/ProtonMail/go-crypto/openpgp/internal/algorithm"
-)
-
-// readFull is the same as io.ReadFull except that reading zero bytes returns
-// ErrUnexpectedEOF rather than EOF.
-func readFull(r io.Reader, buf []byte) (n int, err error) {
- n, err = io.ReadFull(r, buf)
- if err == io.EOF {
- err = io.ErrUnexpectedEOF
- }
- return
-}
-
-// readLength reads an OpenPGP length from r. See RFC 4880, section 4.2.2.
-func readLength(r io.Reader) (length int64, isPartial bool, err error) {
- var buf [4]byte
- _, err = readFull(r, buf[:1])
- if err != nil {
- return
- }
- switch {
- case buf[0] < 192:
- length = int64(buf[0])
- case buf[0] < 224:
- length = int64(buf[0]-192) << 8
- _, err = readFull(r, buf[0:1])
- if err != nil {
- return
- }
- length += int64(buf[0]) + 192
- case buf[0] < 255:
- length = int64(1) << (buf[0] & 0x1f)
- isPartial = true
- default:
- _, err = readFull(r, buf[0:4])
- if err != nil {
- return
- }
- length = int64(buf[0])<<24 |
- int64(buf[1])<<16 |
- int64(buf[2])<<8 |
- int64(buf[3])
- }
- return
-}
-
-// partialLengthReader wraps an io.Reader and handles OpenPGP partial lengths.
-// The continuation lengths are parsed and removed from the stream and EOF is
-// returned at the end of the packet. See RFC 4880, section 4.2.2.4.
-type partialLengthReader struct {
- r io.Reader
- remaining int64
- isPartial bool
-}
-
-func (r *partialLengthReader) Read(p []byte) (n int, err error) {
- for r.remaining == 0 {
- if !r.isPartial {
- return 0, io.EOF
- }
- r.remaining, r.isPartial, err = readLength(r.r)
- if err != nil {
- return 0, err
- }
- }
-
- toRead := int64(len(p))
- if toRead > r.remaining {
- toRead = r.remaining
- }
-
- n, err = r.r.Read(p[:int(toRead)])
- r.remaining -= int64(n)
- if n < int(toRead) && err == io.EOF {
- err = io.ErrUnexpectedEOF
- }
- return
-}
-
-// partialLengthWriter writes a stream of data using OpenPGP partial lengths.
-// See RFC 4880, section 4.2.2.4.
-type partialLengthWriter struct {
- w io.WriteCloser
- buf bytes.Buffer
- lengthByte [1]byte
-}
-
-func (w *partialLengthWriter) Write(p []byte) (n int, err error) {
- bufLen := w.buf.Len()
- if bufLen > 512 {
- for power := uint(30); ; power-- {
- l := 1 << power
- if bufLen >= l {
- w.lengthByte[0] = 224 + uint8(power)
- _, err = w.w.Write(w.lengthByte[:])
- if err != nil {
- return
- }
- var m int
- m, err = w.w.Write(w.buf.Next(l))
- if err != nil {
- return
- }
- if m != l {
- return 0, io.ErrShortWrite
- }
- break
- }
- }
- }
- return w.buf.Write(p)
-}
-
-func (w *partialLengthWriter) Close() (err error) {
- len := w.buf.Len()
- err = serializeLength(w.w, len)
- if err != nil {
- return err
- }
- _, err = w.buf.WriteTo(w.w)
- if err != nil {
- return err
- }
- return w.w.Close()
-}
-
-// A spanReader is an io.LimitReader, but it returns ErrUnexpectedEOF if the
-// underlying Reader returns EOF before the limit has been reached.
-type spanReader struct {
- r io.Reader
- n int64
-}
-
-func (l *spanReader) Read(p []byte) (n int, err error) {
- if l.n <= 0 {
- return 0, io.EOF
- }
- if int64(len(p)) > l.n {
- p = p[0:l.n]
- }
- n, err = l.r.Read(p)
- l.n -= int64(n)
- if l.n > 0 && err == io.EOF {
- err = io.ErrUnexpectedEOF
- }
- return
-}
-
-// readHeader parses a packet header and returns an io.Reader which will return
-// the contents of the packet. See RFC 4880, section 4.2.
-func readHeader(r io.Reader) (tag packetType, length int64, contents io.Reader, err error) {
- var buf [4]byte
- _, err = io.ReadFull(r, buf[:1])
- if err != nil {
- return
- }
- if buf[0]&0x80 == 0 {
- err = errors.StructuralError("tag byte does not have MSB set")
- return
- }
- if buf[0]&0x40 == 0 {
- // Old format packet
- tag = packetType((buf[0] & 0x3f) >> 2)
- lengthType := buf[0] & 3
- if lengthType == 3 {
- length = -1
- contents = r
- return
- }
- lengthBytes := 1 << lengthType
- _, err = readFull(r, buf[0:lengthBytes])
- if err != nil {
- return
- }
- for i := 0; i < lengthBytes; i++ {
- length <<= 8
- length |= int64(buf[i])
- }
- contents = &spanReader{r, length}
- return
- }
-
- // New format packet
- tag = packetType(buf[0] & 0x3f)
- length, isPartial, err := readLength(r)
- if err != nil {
- return
- }
- if isPartial {
- contents = &partialLengthReader{
- remaining: length,
- isPartial: true,
- r: r,
- }
- length = -1
- } else {
- contents = &spanReader{r, length}
- }
- return
-}
-
-// serializeHeader writes an OpenPGP packet header to w. See RFC 4880, section
-// 4.2.
-func serializeHeader(w io.Writer, ptype packetType, length int) (err error) {
- err = serializeType(w, ptype)
- if err != nil {
- return
- }
- return serializeLength(w, length)
-}
-
-// serializeType writes an OpenPGP packet type to w. See RFC 4880, section
-// 4.2.
-func serializeType(w io.Writer, ptype packetType) (err error) {
- var buf [1]byte
- buf[0] = 0x80 | 0x40 | byte(ptype)
- _, err = w.Write(buf[:])
- return
-}
-
-// serializeLength writes an OpenPGP packet length to w. See RFC 4880, section
-// 4.2.2.
-func serializeLength(w io.Writer, length int) (err error) {
- var buf [5]byte
- var n int
-
- if length < 192 {
- buf[0] = byte(length)
- n = 1
- } else if length < 8384 {
- length -= 192
- buf[0] = 192 + byte(length>>8)
- buf[1] = byte(length)
- n = 2
- } else {
- buf[0] = 255
- buf[1] = byte(length >> 24)
- buf[2] = byte(length >> 16)
- buf[3] = byte(length >> 8)
- buf[4] = byte(length)
- n = 5
- }
-
- _, err = w.Write(buf[:n])
- return
-}
-
-// serializeStreamHeader writes an OpenPGP packet header to w where the
-// length of the packet is unknown. It returns a io.WriteCloser which can be
-// used to write the contents of the packet. See RFC 4880, section 4.2.
-func serializeStreamHeader(w io.WriteCloser, ptype packetType) (out io.WriteCloser, err error) {
- err = serializeType(w, ptype)
- if err != nil {
- return
- }
- out = &partialLengthWriter{w: w}
- return
-}
-
-// Packet represents an OpenPGP packet. Users are expected to try casting
-// instances of this interface to specific packet types.
-type Packet interface {
- parse(io.Reader) error
-}
-
-// consumeAll reads from the given Reader until error, returning the number of
-// bytes read.
-func consumeAll(r io.Reader) (n int64, err error) {
- var m int
- var buf [1024]byte
-
- for {
- m, err = r.Read(buf[:])
- n += int64(m)
- if err == io.EOF {
- err = nil
- return
- }
- if err != nil {
- return
- }
- }
-}
-
-// packetType represents the numeric ids of the different OpenPGP packet types. See
-// http://www.iana.org/assignments/pgp-parameters/pgp-parameters.xhtml#pgp-parameters-2
-type packetType uint8
-
-const (
- packetTypeEncryptedKey packetType = 1
- packetTypeSignature packetType = 2
- packetTypeSymmetricKeyEncrypted packetType = 3
- packetTypeOnePassSignature packetType = 4
- packetTypePrivateKey packetType = 5
- packetTypePublicKey packetType = 6
- packetTypePrivateSubkey packetType = 7
- packetTypeCompressed packetType = 8
- packetTypeSymmetricallyEncrypted packetType = 9
- packetTypeMarker packetType = 10
- packetTypeLiteralData packetType = 11
- packetTypeTrust packetType = 12
- packetTypeUserId packetType = 13
- packetTypePublicSubkey packetType = 14
- packetTypeUserAttribute packetType = 17
- packetTypeSymmetricallyEncryptedIntegrityProtected packetType = 18
- packetTypeAEADEncrypted packetType = 20
- packetPadding packetType = 21
-)
-
-// EncryptedDataPacket holds encrypted data. It is currently implemented by
-// SymmetricallyEncrypted and AEADEncrypted.
-type EncryptedDataPacket interface {
- Decrypt(CipherFunction, []byte) (io.ReadCloser, error)
-}
-
-// Read reads a single OpenPGP packet from the given io.Reader. If there is an
-// error parsing a packet, the whole packet is consumed from the input.
-func Read(r io.Reader) (p Packet, err error) {
- tag, len, contents, err := readHeader(r)
- if err != nil {
- return
- }
-
- switch tag {
- case packetTypeEncryptedKey:
- p = new(EncryptedKey)
- case packetTypeSignature:
- p = new(Signature)
- case packetTypeSymmetricKeyEncrypted:
- p = new(SymmetricKeyEncrypted)
- case packetTypeOnePassSignature:
- p = new(OnePassSignature)
- case packetTypePrivateKey, packetTypePrivateSubkey:
- pk := new(PrivateKey)
- if tag == packetTypePrivateSubkey {
- pk.IsSubkey = true
- }
- p = pk
- case packetTypePublicKey, packetTypePublicSubkey:
- isSubkey := tag == packetTypePublicSubkey
- p = &PublicKey{IsSubkey: isSubkey}
- case packetTypeCompressed:
- p = new(Compressed)
- case packetTypeSymmetricallyEncrypted:
- p = new(SymmetricallyEncrypted)
- case packetTypeLiteralData:
- p = new(LiteralData)
- case packetTypeUserId:
- p = new(UserId)
- case packetTypeUserAttribute:
- p = new(UserAttribute)
- case packetTypeSymmetricallyEncryptedIntegrityProtected:
- se := new(SymmetricallyEncrypted)
- se.IntegrityProtected = true
- p = se
- case packetTypeAEADEncrypted:
- p = new(AEADEncrypted)
- case packetPadding:
- p = Padding(len)
- case packetTypeMarker:
- p = new(Marker)
- case packetTypeTrust:
- // Not implemented, just consume
- err = errors.UnknownPacketTypeError(tag)
- default:
- // Packet Tags from 0 to 39 are critical.
- // Packet Tags from 40 to 63 are non-critical.
- if tag < 40 {
- err = errors.CriticalUnknownPacketTypeError(tag)
- } else {
- err = errors.UnknownPacketTypeError(tag)
- }
- }
- if p != nil {
- err = p.parse(contents)
- }
- if err != nil {
- consumeAll(contents)
- }
- return
-}
-
-// ReadWithCheck reads a single OpenPGP message packet from the given io.Reader. If there is an
-// error parsing a packet, the whole packet is consumed from the input.
-// ReadWithCheck additionally checks if the OpenPGP message packet sequence adheres
-// to the packet composition rules in rfc4880, if not throws an error.
-func ReadWithCheck(r io.Reader, sequence *SequenceVerifier) (p Packet, msgErr error, err error) {
- tag, len, contents, err := readHeader(r)
- if err != nil {
- return
- }
- switch tag {
- case packetTypeEncryptedKey:
- msgErr = sequence.Next(ESKSymbol)
- p = new(EncryptedKey)
- case packetTypeSignature:
- msgErr = sequence.Next(SigSymbol)
- p = new(Signature)
- case packetTypeSymmetricKeyEncrypted:
- msgErr = sequence.Next(ESKSymbol)
- p = new(SymmetricKeyEncrypted)
- case packetTypeOnePassSignature:
- msgErr = sequence.Next(OPSSymbol)
- p = new(OnePassSignature)
- case packetTypeCompressed:
- msgErr = sequence.Next(CompSymbol)
- p = new(Compressed)
- case packetTypeSymmetricallyEncrypted:
- msgErr = sequence.Next(EncSymbol)
- p = new(SymmetricallyEncrypted)
- case packetTypeLiteralData:
- msgErr = sequence.Next(LDSymbol)
- p = new(LiteralData)
- case packetTypeSymmetricallyEncryptedIntegrityProtected:
- msgErr = sequence.Next(EncSymbol)
- se := new(SymmetricallyEncrypted)
- se.IntegrityProtected = true
- p = se
- case packetTypeAEADEncrypted:
- msgErr = sequence.Next(EncSymbol)
- p = new(AEADEncrypted)
- case packetPadding:
- p = Padding(len)
- case packetTypeMarker:
- p = new(Marker)
- case packetTypeTrust:
- // Not implemented, just consume
- err = errors.UnknownPacketTypeError(tag)
- case packetTypePrivateKey,
- packetTypePrivateSubkey,
- packetTypePublicKey,
- packetTypePublicSubkey,
- packetTypeUserId,
- packetTypeUserAttribute:
- msgErr = sequence.Next(UnknownSymbol)
- consumeAll(contents)
- default:
- // Packet Tags from 0 to 39 are critical.
- // Packet Tags from 40 to 63 are non-critical.
- if tag < 40 {
- err = errors.CriticalUnknownPacketTypeError(tag)
- } else {
- err = errors.UnknownPacketTypeError(tag)
- }
- }
- if p != nil {
- err = p.parse(contents)
- }
- if err != nil {
- consumeAll(contents)
- }
- return
-}
-
-// SignatureType represents the different semantic meanings of an OpenPGP
-// signature. See RFC 4880, section 5.2.1.
-type SignatureType uint8
-
-const (
- SigTypeBinary SignatureType = 0x00
- SigTypeText SignatureType = 0x01
- SigTypeGenericCert SignatureType = 0x10
- SigTypePersonaCert SignatureType = 0x11
- SigTypeCasualCert SignatureType = 0x12
- SigTypePositiveCert SignatureType = 0x13
- SigTypeSubkeyBinding SignatureType = 0x18
- SigTypePrimaryKeyBinding SignatureType = 0x19
- SigTypeDirectSignature SignatureType = 0x1F
- SigTypeKeyRevocation SignatureType = 0x20
- SigTypeSubkeyRevocation SignatureType = 0x28
- SigTypeCertificationRevocation SignatureType = 0x30
-)
-
-// PublicKeyAlgorithm represents the different public key system specified for
-// OpenPGP. See
-// http://www.iana.org/assignments/pgp-parameters/pgp-parameters.xhtml#pgp-parameters-12
-type PublicKeyAlgorithm uint8
-
-const (
- PubKeyAlgoRSA PublicKeyAlgorithm = 1
- PubKeyAlgoElGamal PublicKeyAlgorithm = 16
- PubKeyAlgoDSA PublicKeyAlgorithm = 17
- // RFC 6637, Section 5.
- PubKeyAlgoECDH PublicKeyAlgorithm = 18
- PubKeyAlgoECDSA PublicKeyAlgorithm = 19
- // https://www.ietf.org/archive/id/draft-koch-eddsa-for-openpgp-04.txt
- PubKeyAlgoEdDSA PublicKeyAlgorithm = 22
- // https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh
- PubKeyAlgoX25519 PublicKeyAlgorithm = 25
- PubKeyAlgoX448 PublicKeyAlgorithm = 26
- PubKeyAlgoEd25519 PublicKeyAlgorithm = 27
- PubKeyAlgoEd448 PublicKeyAlgorithm = 28
-
- // Deprecated in RFC 4880, Section 13.5. Use key flags instead.
- PubKeyAlgoRSAEncryptOnly PublicKeyAlgorithm = 2
- PubKeyAlgoRSASignOnly PublicKeyAlgorithm = 3
-)
-
-// CanEncrypt returns true if it's possible to encrypt a message to a public
-// key of the given type.
-func (pka PublicKeyAlgorithm) CanEncrypt() bool {
- switch pka {
- case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoElGamal, PubKeyAlgoECDH, PubKeyAlgoX25519, PubKeyAlgoX448:
- return true
- }
- return false
-}
-
-// CanSign returns true if it's possible for a public key of the given type to
-// sign a message.
-func (pka PublicKeyAlgorithm) CanSign() bool {
- switch pka {
- case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly, PubKeyAlgoDSA, PubKeyAlgoECDSA, PubKeyAlgoEdDSA, PubKeyAlgoEd25519, PubKeyAlgoEd448:
- return true
- }
- return false
-}
-
-// CipherFunction represents the different block ciphers specified for OpenPGP. See
-// http://www.iana.org/assignments/pgp-parameters/pgp-parameters.xhtml#pgp-parameters-13
-type CipherFunction algorithm.CipherFunction
-
-const (
- Cipher3DES CipherFunction = 2
- CipherCAST5 CipherFunction = 3
- CipherAES128 CipherFunction = 7
- CipherAES192 CipherFunction = 8
- CipherAES256 CipherFunction = 9
-)
-
-// KeySize returns the key size, in bytes, of cipher.
-func (cipher CipherFunction) KeySize() int {
- return algorithm.CipherFunction(cipher).KeySize()
-}
-
-// IsSupported returns true if the cipher is supported from the library
-func (cipher CipherFunction) IsSupported() bool {
- return algorithm.CipherFunction(cipher).KeySize() > 0
-}
-
-// blockSize returns the block size, in bytes, of cipher.
-func (cipher CipherFunction) blockSize() int {
- return algorithm.CipherFunction(cipher).BlockSize()
-}
-
-// new returns a fresh instance of the given cipher.
-func (cipher CipherFunction) new(key []byte) (block cipher.Block) {
- return algorithm.CipherFunction(cipher).New(key)
-}
-
-// padToKeySize left-pads a MPI with zeroes to match the length of the
-// specified RSA public.
-func padToKeySize(pub *rsa.PublicKey, b []byte) []byte {
- k := (pub.N.BitLen() + 7) / 8
- if len(b) >= k {
- return b
- }
- bb := make([]byte, k)
- copy(bb[len(bb)-len(b):], b)
- return bb
-}
-
-// CompressionAlgo Represents the different compression algorithms
-// supported by OpenPGP (except for BZIP2, which is not currently
-// supported). See Section 9.3 of RFC 4880.
-type CompressionAlgo uint8
-
-const (
- CompressionNone CompressionAlgo = 0
- CompressionZIP CompressionAlgo = 1
- CompressionZLIB CompressionAlgo = 2
-)
-
-// AEADMode represents the different Authenticated Encryption with Associated
-// Data specified for OpenPGP.
-// See https://www.ietf.org/archive/id/draft-ietf-openpgp-crypto-refresh-07.html#section-9.6
-type AEADMode algorithm.AEADMode
-
-const (
- AEADModeEAX AEADMode = 1
- AEADModeOCB AEADMode = 2
- AEADModeGCM AEADMode = 3
-)
-
-func (mode AEADMode) IvLength() int {
- return algorithm.AEADMode(mode).NonceLength()
-}
-
-func (mode AEADMode) TagLength() int {
- return algorithm.AEADMode(mode).TagLength()
-}
-
-// IsSupported returns true if the aead mode is supported from the library
-func (mode AEADMode) IsSupported() bool {
- return algorithm.AEADMode(mode).TagLength() > 0
-}
-
-// new returns a fresh instance of the given mode.
-func (mode AEADMode) new(block cipher.Block) cipher.AEAD {
- return algorithm.AEADMode(mode).New(block)
-}
-
-// ReasonForRevocation represents a revocation reason code as per RFC4880
-// section 5.2.3.23.
-type ReasonForRevocation uint8
-
-const (
- NoReason ReasonForRevocation = 0
- KeySuperseded ReasonForRevocation = 1
- KeyCompromised ReasonForRevocation = 2
- KeyRetired ReasonForRevocation = 3
- UserIDNotValid ReasonForRevocation = 32
- Unknown ReasonForRevocation = 200
-)
-
-func NewReasonForRevocation(value byte) ReasonForRevocation {
- if value < 4 || value == 32 {
- return ReasonForRevocation(value)
- }
- return Unknown
-}
-
-// Curve is a mapping to supported ECC curves for key generation.
-// See https://www.ietf.org/archive/id/draft-ietf-openpgp-crypto-refresh-06.html#name-curve-specific-wire-formats
-type Curve string
-
-const (
- Curve25519 Curve = "Curve25519"
- Curve448 Curve = "Curve448"
- CurveNistP256 Curve = "P256"
- CurveNistP384 Curve = "P384"
- CurveNistP521 Curve = "P521"
- CurveSecP256k1 Curve = "SecP256k1"
- CurveBrainpoolP256 Curve = "BrainpoolP256"
- CurveBrainpoolP384 Curve = "BrainpoolP384"
- CurveBrainpoolP512 Curve = "BrainpoolP512"
-)
-
-// TrustLevel represents a trust level per RFC4880 5.2.3.13
-type TrustLevel uint8
-
-// TrustAmount represents a trust amount per RFC4880 5.2.3.13
-type TrustAmount uint8
-
-const (
- // versionSize is the length in bytes of the version value.
- versionSize = 1
- // algorithmSize is the length in bytes of the key algorithm value.
- algorithmSize = 1
- // keyVersionSize is the length in bytes of the key version value
- keyVersionSize = 1
- // keyIdSize is the length in bytes of the key identifier value.
- keyIdSize = 8
- // timestampSize is the length in bytes of encoded timestamps.
- timestampSize = 4
- // fingerprintSizeV6 is the length in bytes of the key fingerprint in v6.
- fingerprintSizeV6 = 32
- // fingerprintSize is the length in bytes of the key fingerprint.
- fingerprintSize = 20
-)
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/packet_sequence.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/packet_sequence.go
deleted file mode 100644
index 55a8a56c2d1..00000000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/packet_sequence.go
+++ /dev/null
@@ -1,222 +0,0 @@
-package packet
-
-// This file implements the pushdown automata (PDA) from PGPainless (Paul Schaub)
-// to verify pgp packet sequences. See Paul's blogpost for more details:
-// https://blog.jabberhead.tk/2022/10/26/implementing-packet-sequence-validation-using-pushdown-automata/
-import (
- "fmt"
-
- "github.com/ProtonMail/go-crypto/openpgp/errors"
-)
-
-func NewErrMalformedMessage(from State, input InputSymbol, stackSymbol StackSymbol) errors.ErrMalformedMessage {
- return errors.ErrMalformedMessage(fmt.Sprintf("state %d, input symbol %d, stack symbol %d ", from, input, stackSymbol))
-}
-
-// InputSymbol defines the input alphabet of the PDA
-type InputSymbol uint8
-
-const (
- LDSymbol InputSymbol = iota
- SigSymbol
- OPSSymbol
- CompSymbol
- ESKSymbol
- EncSymbol
- EOSSymbol
- UnknownSymbol
-)
-
-// StackSymbol defines the stack alphabet of the PDA
-type StackSymbol int8
-
-const (
- MsgStackSymbol StackSymbol = iota
- OpsStackSymbol
- KeyStackSymbol
- EndStackSymbol
- EmptyStackSymbol
-)
-
-// State defines the states of the PDA
-type State int8
-
-const (
- OpenPGPMessage State = iota
- ESKMessage
- LiteralMessage
- CompressedMessage
- EncryptedMessage
- ValidMessage
-)
-
-// transition represents a state transition in the PDA
-type transition func(input InputSymbol, stackSymbol StackSymbol) (State, []StackSymbol, bool, error)
-
-// SequenceVerifier is a pushdown automata to verify
-// PGP messages packet sequences according to rfc4880.
-type SequenceVerifier struct {
- stack []StackSymbol
- state State
-}
-
-// Next performs a state transition with the given input symbol.
-// If the transition fails a ErrMalformedMessage is returned.
-func (sv *SequenceVerifier) Next(input InputSymbol) error {
- for {
- stackSymbol := sv.popStack()
- transitionFunc := getTransition(sv.state)
- nextState, newStackSymbols, redo, err := transitionFunc(input, stackSymbol)
- if err != nil {
- return err
- }
- if redo {
- sv.pushStack(stackSymbol)
- }
- for _, newStackSymbol := range newStackSymbols {
- sv.pushStack(newStackSymbol)
- }
- sv.state = nextState
- if !redo {
- break
- }
- }
- return nil
-}
-
-// Valid returns true if RDA is in a valid state.
-func (sv *SequenceVerifier) Valid() bool {
- return sv.state == ValidMessage && len(sv.stack) == 0
-}
-
-func (sv *SequenceVerifier) AssertValid() error {
- if !sv.Valid() {
- return errors.ErrMalformedMessage("invalid message")
- }
- return nil
-}
-
-func NewSequenceVerifier() *SequenceVerifier {
- return &SequenceVerifier{
- stack: []StackSymbol{EndStackSymbol, MsgStackSymbol},
- state: OpenPGPMessage,
- }
-}
-
-func (sv *SequenceVerifier) popStack() StackSymbol {
- if len(sv.stack) == 0 {
- return EmptyStackSymbol
- }
- elemIndex := len(sv.stack) - 1
- stackSymbol := sv.stack[elemIndex]
- sv.stack = sv.stack[:elemIndex]
- return stackSymbol
-}
-
-func (sv *SequenceVerifier) pushStack(stackSymbol StackSymbol) {
- sv.stack = append(sv.stack, stackSymbol)
-}
-
-func getTransition(from State) transition {
- switch from {
- case OpenPGPMessage:
- return fromOpenPGPMessage
- case LiteralMessage:
- return fromLiteralMessage
- case CompressedMessage:
- return fromCompressedMessage
- case EncryptedMessage:
- return fromEncryptedMessage
- case ESKMessage:
- return fromESKMessage
- case ValidMessage:
- return fromValidMessage
- }
- return nil
-}
-
-// fromOpenPGPMessage is the transition for the state OpenPGPMessage.
-func fromOpenPGPMessage(input InputSymbol, stackSymbol StackSymbol) (State, []StackSymbol, bool, error) {
- if stackSymbol != MsgStackSymbol {
- return 0, nil, false, NewErrMalformedMessage(OpenPGPMessage, input, stackSymbol)
- }
- switch input {
- case LDSymbol:
- return LiteralMessage, nil, false, nil
- case SigSymbol:
- return OpenPGPMessage, []StackSymbol{MsgStackSymbol}, false, nil
- case OPSSymbol:
- return OpenPGPMessage, []StackSymbol{OpsStackSymbol, MsgStackSymbol}, false, nil
- case CompSymbol:
- return CompressedMessage, nil, false, nil
- case ESKSymbol:
- return ESKMessage, []StackSymbol{KeyStackSymbol}, false, nil
- case EncSymbol:
- return EncryptedMessage, nil, false, nil
- }
- return 0, nil, false, NewErrMalformedMessage(OpenPGPMessage, input, stackSymbol)
-}
-
-// fromESKMessage is the transition for the state ESKMessage.
-func fromESKMessage(input InputSymbol, stackSymbol StackSymbol) (State, []StackSymbol, bool, error) {
- if stackSymbol != KeyStackSymbol {
- return 0, nil, false, NewErrMalformedMessage(ESKMessage, input, stackSymbol)
- }
- switch input {
- case ESKSymbol:
- return ESKMessage, []StackSymbol{KeyStackSymbol}, false, nil
- case EncSymbol:
- return EncryptedMessage, nil, false, nil
- }
- return 0, nil, false, NewErrMalformedMessage(ESKMessage, input, stackSymbol)
-}
-
-// fromLiteralMessage is the transition for the state LiteralMessage.
-func fromLiteralMessage(input InputSymbol, stackSymbol StackSymbol) (State, []StackSymbol, bool, error) {
- switch input {
- case SigSymbol:
- if stackSymbol == OpsStackSymbol {
- return LiteralMessage, nil, false, nil
- }
- case EOSSymbol:
- if stackSymbol == EndStackSymbol {
- return ValidMessage, nil, false, nil
- }
- }
- return 0, nil, false, NewErrMalformedMessage(LiteralMessage, input, stackSymbol)
-}
-
-// fromLiteralMessage is the transition for the state CompressedMessage.
-func fromCompressedMessage(input InputSymbol, stackSymbol StackSymbol) (State, []StackSymbol, bool, error) {
- switch input {
- case SigSymbol:
- if stackSymbol == OpsStackSymbol {
- return CompressedMessage, nil, false, nil
- }
- case EOSSymbol:
- if stackSymbol == EndStackSymbol {
- return ValidMessage, nil, false, nil
- }
- }
- return OpenPGPMessage, []StackSymbol{MsgStackSymbol}, true, nil
-}
-
-// fromEncryptedMessage is the transition for the state EncryptedMessage.
-func fromEncryptedMessage(input InputSymbol, stackSymbol StackSymbol) (State, []StackSymbol, bool, error) {
- switch input {
- case SigSymbol:
- if stackSymbol == OpsStackSymbol {
- return EncryptedMessage, nil, false, nil
- }
- case EOSSymbol:
- if stackSymbol == EndStackSymbol {
- return ValidMessage, nil, false, nil
- }
- }
- return OpenPGPMessage, []StackSymbol{MsgStackSymbol}, true, nil
-}
-
-// fromValidMessage is the transition for the state ValidMessage.
-func fromValidMessage(input InputSymbol, stackSymbol StackSymbol) (State, []StackSymbol, bool, error) {
- return 0, nil, false, NewErrMalformedMessage(ValidMessage, input, stackSymbol)
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/packet_unsupported.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/packet_unsupported.go
deleted file mode 100644
index 2d714723cf8..00000000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/packet_unsupported.go
+++ /dev/null
@@ -1,24 +0,0 @@
-package packet
-
-import (
- "io"
-
- "github.com/ProtonMail/go-crypto/openpgp/errors"
-)
-
-// UnsupportedPackage represents a OpenPGP packet with a known packet type
-// but with unsupported content.
-type UnsupportedPacket struct {
- IncompletePacket Packet
- Error errors.UnsupportedError
-}
-
-// Implements the Packet interface
-func (up *UnsupportedPacket) parse(read io.Reader) error {
- err := up.IncompletePacket.parse(read)
- if castedErr, ok := err.(errors.UnsupportedError); ok {
- up.Error = castedErr
- return nil
- }
- return err
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/padding.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/padding.go
deleted file mode 100644
index 06fa83740d8..00000000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/padding.go
+++ /dev/null
@@ -1,27 +0,0 @@
-package packet
-
-import (
- "io"
- "io/ioutil"
-)
-
-// Padding type represents a Padding Packet (Tag 21).
-// The padding type is represented by the length of its padding.
-// see https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh#name-padding-packet-tag-21
-type Padding int
-
-// parse just ignores the padding content.
-func (pad Padding) parse(reader io.Reader) error {
- _, err := io.CopyN(ioutil.Discard, reader, int64(pad))
- return err
-}
-
-// SerializePadding writes the padding to writer.
-func (pad Padding) SerializePadding(writer io.Writer, rand io.Reader) error {
- err := serializeHeader(writer, packetPadding, int(pad))
- if err != nil {
- return err
- }
- _, err = io.CopyN(writer, rand, int64(pad))
- return err
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/private_key.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/private_key.go
deleted file mode 100644
index 099b4d9ba08..00000000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/private_key.go
+++ /dev/null
@@ -1,1173 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package packet
-
-import (
- "bytes"
- "crypto"
- "crypto/cipher"
- "crypto/dsa"
- "crypto/rsa"
- "crypto/sha1"
- "crypto/sha256"
- "crypto/subtle"
- "fmt"
- "io"
- "math/big"
- "strconv"
- "time"
-
- "github.com/ProtonMail/go-crypto/openpgp/ecdh"
- "github.com/ProtonMail/go-crypto/openpgp/ecdsa"
- "github.com/ProtonMail/go-crypto/openpgp/ed25519"
- "github.com/ProtonMail/go-crypto/openpgp/ed448"
- "github.com/ProtonMail/go-crypto/openpgp/eddsa"
- "github.com/ProtonMail/go-crypto/openpgp/elgamal"
- "github.com/ProtonMail/go-crypto/openpgp/errors"
- "github.com/ProtonMail/go-crypto/openpgp/internal/encoding"
- "github.com/ProtonMail/go-crypto/openpgp/s2k"
- "github.com/ProtonMail/go-crypto/openpgp/x25519"
- "github.com/ProtonMail/go-crypto/openpgp/x448"
- "golang.org/x/crypto/hkdf"
-)
-
-// PrivateKey represents a possibly encrypted private key. See RFC 4880,
-// section 5.5.3.
-type PrivateKey struct {
- PublicKey
- Encrypted bool // if true then the private key is unavailable until Decrypt has been called.
- encryptedData []byte
- cipher CipherFunction
- s2k func(out, in []byte)
- aead AEADMode // only relevant if S2KAEAD is enabled
- // An *{rsa|dsa|elgamal|ecdh|ecdsa|ed25519|ed448}.PrivateKey or
- // crypto.Signer/crypto.Decrypter (Decryptor RSA only).
- PrivateKey interface{}
- iv []byte
-
- // Type of encryption of the S2K packet
- // Allowed values are 0 (Not encrypted), 253 (AEAD), 254 (SHA1), or
- // 255 (2-byte checksum)
- s2kType S2KType
- // Full parameters of the S2K packet
- s2kParams *s2k.Params
-}
-
-// S2KType s2k packet type
-type S2KType uint8
-
-const (
- // S2KNON unencrypt
- S2KNON S2KType = 0
- // S2KAEAD use authenticated encryption
- S2KAEAD S2KType = 253
- // S2KSHA1 sha1 sum check
- S2KSHA1 S2KType = 254
- // S2KCHECKSUM sum check
- S2KCHECKSUM S2KType = 255
-)
-
-func NewRSAPrivateKey(creationTime time.Time, priv *rsa.PrivateKey) *PrivateKey {
- pk := new(PrivateKey)
- pk.PublicKey = *NewRSAPublicKey(creationTime, &priv.PublicKey)
- pk.PrivateKey = priv
- return pk
-}
-
-func NewDSAPrivateKey(creationTime time.Time, priv *dsa.PrivateKey) *PrivateKey {
- pk := new(PrivateKey)
- pk.PublicKey = *NewDSAPublicKey(creationTime, &priv.PublicKey)
- pk.PrivateKey = priv
- return pk
-}
-
-func NewElGamalPrivateKey(creationTime time.Time, priv *elgamal.PrivateKey) *PrivateKey {
- pk := new(PrivateKey)
- pk.PublicKey = *NewElGamalPublicKey(creationTime, &priv.PublicKey)
- pk.PrivateKey = priv
- return pk
-}
-
-func NewECDSAPrivateKey(creationTime time.Time, priv *ecdsa.PrivateKey) *PrivateKey {
- pk := new(PrivateKey)
- pk.PublicKey = *NewECDSAPublicKey(creationTime, &priv.PublicKey)
- pk.PrivateKey = priv
- return pk
-}
-
-func NewEdDSAPrivateKey(creationTime time.Time, priv *eddsa.PrivateKey) *PrivateKey {
- pk := new(PrivateKey)
- pk.PublicKey = *NewEdDSAPublicKey(creationTime, &priv.PublicKey)
- pk.PrivateKey = priv
- return pk
-}
-
-func NewECDHPrivateKey(creationTime time.Time, priv *ecdh.PrivateKey) *PrivateKey {
- pk := new(PrivateKey)
- pk.PublicKey = *NewECDHPublicKey(creationTime, &priv.PublicKey)
- pk.PrivateKey = priv
- return pk
-}
-
-func NewX25519PrivateKey(creationTime time.Time, priv *x25519.PrivateKey) *PrivateKey {
- pk := new(PrivateKey)
- pk.PublicKey = *NewX25519PublicKey(creationTime, &priv.PublicKey)
- pk.PrivateKey = priv
- return pk
-}
-
-func NewX448PrivateKey(creationTime time.Time, priv *x448.PrivateKey) *PrivateKey {
- pk := new(PrivateKey)
- pk.PublicKey = *NewX448PublicKey(creationTime, &priv.PublicKey)
- pk.PrivateKey = priv
- return pk
-}
-
-func NewEd25519PrivateKey(creationTime time.Time, priv *ed25519.PrivateKey) *PrivateKey {
- pk := new(PrivateKey)
- pk.PublicKey = *NewEd25519PublicKey(creationTime, &priv.PublicKey)
- pk.PrivateKey = priv
- return pk
-}
-
-func NewEd448PrivateKey(creationTime time.Time, priv *ed448.PrivateKey) *PrivateKey {
- pk := new(PrivateKey)
- pk.PublicKey = *NewEd448PublicKey(creationTime, &priv.PublicKey)
- pk.PrivateKey = priv
- return pk
-}
-
-// NewSignerPrivateKey creates a PrivateKey from a crypto.Signer that
-// implements RSA, ECDSA or EdDSA.
-func NewSignerPrivateKey(creationTime time.Time, signer interface{}) *PrivateKey {
- pk := new(PrivateKey)
- // In general, the public Keys should be used as pointers. We still
- // type-switch on the values, for backwards-compatibility.
- switch pubkey := signer.(type) {
- case *rsa.PrivateKey:
- pk.PublicKey = *NewRSAPublicKey(creationTime, &pubkey.PublicKey)
- case rsa.PrivateKey:
- pk.PublicKey = *NewRSAPublicKey(creationTime, &pubkey.PublicKey)
- case *ecdsa.PrivateKey:
- pk.PublicKey = *NewECDSAPublicKey(creationTime, &pubkey.PublicKey)
- case ecdsa.PrivateKey:
- pk.PublicKey = *NewECDSAPublicKey(creationTime, &pubkey.PublicKey)
- case *eddsa.PrivateKey:
- pk.PublicKey = *NewEdDSAPublicKey(creationTime, &pubkey.PublicKey)
- case eddsa.PrivateKey:
- pk.PublicKey = *NewEdDSAPublicKey(creationTime, &pubkey.PublicKey)
- case *ed25519.PrivateKey:
- pk.PublicKey = *NewEd25519PublicKey(creationTime, &pubkey.PublicKey)
- case ed25519.PrivateKey:
- pk.PublicKey = *NewEd25519PublicKey(creationTime, &pubkey.PublicKey)
- case *ed448.PrivateKey:
- pk.PublicKey = *NewEd448PublicKey(creationTime, &pubkey.PublicKey)
- case ed448.PrivateKey:
- pk.PublicKey = *NewEd448PublicKey(creationTime, &pubkey.PublicKey)
- default:
- panic("openpgp: unknown signer type in NewSignerPrivateKey")
- }
- pk.PrivateKey = signer
- return pk
-}
-
-// NewDecrypterPrivateKey creates a PrivateKey from a *{rsa|elgamal|ecdh|x25519|x448}.PrivateKey.
-func NewDecrypterPrivateKey(creationTime time.Time, decrypter interface{}) *PrivateKey {
- pk := new(PrivateKey)
- switch priv := decrypter.(type) {
- case *rsa.PrivateKey:
- pk.PublicKey = *NewRSAPublicKey(creationTime, &priv.PublicKey)
- case *elgamal.PrivateKey:
- pk.PublicKey = *NewElGamalPublicKey(creationTime, &priv.PublicKey)
- case *ecdh.PrivateKey:
- pk.PublicKey = *NewECDHPublicKey(creationTime, &priv.PublicKey)
- case *x25519.PrivateKey:
- pk.PublicKey = *NewX25519PublicKey(creationTime, &priv.PublicKey)
- case *x448.PrivateKey:
- pk.PublicKey = *NewX448PublicKey(creationTime, &priv.PublicKey)
- default:
- panic("openpgp: unknown decrypter type in NewDecrypterPrivateKey")
- }
- pk.PrivateKey = decrypter
- return pk
-}
-
-func (pk *PrivateKey) parse(r io.Reader) (err error) {
- err = (&pk.PublicKey).parse(r)
- if err != nil {
- return
- }
- v5 := pk.PublicKey.Version == 5
- v6 := pk.PublicKey.Version == 6
-
- var buf [1]byte
- _, err = readFull(r, buf[:])
- if err != nil {
- return
- }
- pk.s2kType = S2KType(buf[0])
- var optCount [1]byte
- if v5 || (v6 && pk.s2kType != S2KNON) {
- if _, err = readFull(r, optCount[:]); err != nil {
- return
- }
- }
-
- switch pk.s2kType {
- case S2KNON:
- pk.s2k = nil
- pk.Encrypted = false
- case S2KSHA1, S2KCHECKSUM, S2KAEAD:
- if (v5 || v6) && pk.s2kType == S2KCHECKSUM {
- return errors.StructuralError(fmt.Sprintf("wrong s2k identifier for version %d", pk.Version))
- }
- _, err = readFull(r, buf[:])
- if err != nil {
- return
- }
- pk.cipher = CipherFunction(buf[0])
- if pk.cipher != 0 && !pk.cipher.IsSupported() {
- return errors.UnsupportedError("unsupported cipher function in private key")
- }
- // [Optional] If string-to-key usage octet was 253,
- // a one-octet AEAD algorithm.
- if pk.s2kType == S2KAEAD {
- _, err = readFull(r, buf[:])
- if err != nil {
- return
- }
- pk.aead = AEADMode(buf[0])
- if !pk.aead.IsSupported() {
- return errors.UnsupportedError("unsupported aead mode in private key")
- }
- }
-
- // [Optional] Only for a version 6 packet,
- // and if string-to-key usage octet was 255, 254, or 253,
- // an one-octet count of the following field.
- if v6 {
- _, err = readFull(r, buf[:])
- if err != nil {
- return
- }
- }
-
- pk.s2kParams, err = s2k.ParseIntoParams(r)
- if err != nil {
- return
- }
- if pk.s2kParams.Dummy() {
- return
- }
- pk.s2k, err = pk.s2kParams.Function()
- if err != nil {
- return
- }
- pk.Encrypted = true
- default:
- return errors.UnsupportedError("deprecated s2k function in private key")
- }
-
- if pk.Encrypted {
- var ivSize int
- // If the S2K usage octet was 253, the IV is of the size expected by the AEAD mode,
- // unless it's a version 5 key, in which case it's the size of the symmetric cipher's block size.
- // For all other S2K modes, it's always the block size.
- if !v5 && pk.s2kType == S2KAEAD {
- ivSize = pk.aead.IvLength()
- } else {
- ivSize = pk.cipher.blockSize()
- }
-
- if ivSize == 0 {
- return errors.UnsupportedError("unsupported cipher in private key: " + strconv.Itoa(int(pk.cipher)))
- }
- pk.iv = make([]byte, ivSize)
- _, err = readFull(r, pk.iv)
- if err != nil {
- return
- }
- if v5 && pk.s2kType == S2KAEAD {
- pk.iv = pk.iv[:pk.aead.IvLength()]
- }
- }
-
- var privateKeyData []byte
- if v5 {
- var n [4]byte /* secret material four octet count */
- _, err = readFull(r, n[:])
- if err != nil {
- return
- }
- count := uint32(uint32(n[0])<<24 | uint32(n[1])<<16 | uint32(n[2])<<8 | uint32(n[3]))
- if !pk.Encrypted {
- count = count + 2 /* two octet checksum */
- }
- privateKeyData = make([]byte, count)
- _, err = readFull(r, privateKeyData)
- if err != nil {
- return
- }
- } else {
- privateKeyData, err = io.ReadAll(r)
- if err != nil {
- return
- }
- }
- if !pk.Encrypted {
- if len(privateKeyData) < 2 {
- return errors.StructuralError("truncated private key data")
- }
- if pk.Version != 6 {
- // checksum
- var sum uint16
- for i := 0; i < len(privateKeyData)-2; i++ {
- sum += uint16(privateKeyData[i])
- }
- if privateKeyData[len(privateKeyData)-2] != uint8(sum>>8) ||
- privateKeyData[len(privateKeyData)-1] != uint8(sum) {
- return errors.StructuralError("private key checksum failure")
- }
- privateKeyData = privateKeyData[:len(privateKeyData)-2]
- return pk.parsePrivateKey(privateKeyData)
- } else {
- // No checksum
- return pk.parsePrivateKey(privateKeyData)
- }
- }
-
- pk.encryptedData = privateKeyData
- return
-}
-
-// Dummy returns true if the private key is a dummy key. This is a GNU extension.
-func (pk *PrivateKey) Dummy() bool {
- return pk.s2kParams.Dummy()
-}
-
-func mod64kHash(d []byte) uint16 {
- var h uint16
- for _, b := range d {
- h += uint16(b)
- }
- return h
-}
-
-func (pk *PrivateKey) Serialize(w io.Writer) (err error) {
- contents := bytes.NewBuffer(nil)
- err = pk.PublicKey.serializeWithoutHeaders(contents)
- if err != nil {
- return
- }
- if _, err = contents.Write([]byte{uint8(pk.s2kType)}); err != nil {
- return
- }
-
- optional := bytes.NewBuffer(nil)
- if pk.Encrypted || pk.Dummy() {
- // [Optional] If string-to-key usage octet was 255, 254, or 253,
- // a one-octet symmetric encryption algorithm.
- if _, err = optional.Write([]byte{uint8(pk.cipher)}); err != nil {
- return
- }
- // [Optional] If string-to-key usage octet was 253,
- // a one-octet AEAD algorithm.
- if pk.s2kType == S2KAEAD {
- if _, err = optional.Write([]byte{uint8(pk.aead)}); err != nil {
- return
- }
- }
-
- s2kBuffer := bytes.NewBuffer(nil)
- if err := pk.s2kParams.Serialize(s2kBuffer); err != nil {
- return err
- }
- // [Optional] Only for a version 6 packet, and if string-to-key
- // usage octet was 255, 254, or 253, an one-octet
- // count of the following field.
- if pk.Version == 6 {
- if _, err = optional.Write([]byte{uint8(s2kBuffer.Len())}); err != nil {
- return
- }
- }
- // [Optional] If string-to-key usage octet was 255, 254, or 253,
- // a string-to-key (S2K) specifier. The length of the string-to-key specifier
- // depends on its type
- if _, err = io.Copy(optional, s2kBuffer); err != nil {
- return
- }
-
- // IV
- if pk.Encrypted {
- if _, err = optional.Write(pk.iv); err != nil {
- return
- }
- if pk.Version == 5 && pk.s2kType == S2KAEAD {
- // Add padding for version 5
- padding := make([]byte, pk.cipher.blockSize()-len(pk.iv))
- if _, err = optional.Write(padding); err != nil {
- return
- }
- }
- }
- }
- if pk.Version == 5 || (pk.Version == 6 && pk.s2kType != S2KNON) {
- contents.Write([]byte{uint8(optional.Len())})
- }
-
- if _, err := io.Copy(contents, optional); err != nil {
- return err
- }
-
- if !pk.Dummy() {
- l := 0
- var priv []byte
- if !pk.Encrypted {
- buf := bytes.NewBuffer(nil)
- err = pk.serializePrivateKey(buf)
- if err != nil {
- return err
- }
- l = buf.Len()
- if pk.Version != 6 {
- checksum := mod64kHash(buf.Bytes())
- buf.Write([]byte{byte(checksum >> 8), byte(checksum)})
- }
- priv = buf.Bytes()
- } else {
- priv, l = pk.encryptedData, len(pk.encryptedData)
- }
-
- if pk.Version == 5 {
- contents.Write([]byte{byte(l >> 24), byte(l >> 16), byte(l >> 8), byte(l)})
- }
- contents.Write(priv)
- }
-
- ptype := packetTypePrivateKey
- if pk.IsSubkey {
- ptype = packetTypePrivateSubkey
- }
- err = serializeHeader(w, ptype, contents.Len())
- if err != nil {
- return
- }
- _, err = io.Copy(w, contents)
- if err != nil {
- return
- }
- return
-}
-
-func serializeRSAPrivateKey(w io.Writer, priv *rsa.PrivateKey) error {
- if _, err := w.Write(new(encoding.MPI).SetBig(priv.D).EncodedBytes()); err != nil {
- return err
- }
- if _, err := w.Write(new(encoding.MPI).SetBig(priv.Primes[1]).EncodedBytes()); err != nil {
- return err
- }
- if _, err := w.Write(new(encoding.MPI).SetBig(priv.Primes[0]).EncodedBytes()); err != nil {
- return err
- }
- _, err := w.Write(new(encoding.MPI).SetBig(priv.Precomputed.Qinv).EncodedBytes())
- return err
-}
-
-func serializeDSAPrivateKey(w io.Writer, priv *dsa.PrivateKey) error {
- _, err := w.Write(new(encoding.MPI).SetBig(priv.X).EncodedBytes())
- return err
-}
-
-func serializeElGamalPrivateKey(w io.Writer, priv *elgamal.PrivateKey) error {
- _, err := w.Write(new(encoding.MPI).SetBig(priv.X).EncodedBytes())
- return err
-}
-
-func serializeECDSAPrivateKey(w io.Writer, priv *ecdsa.PrivateKey) error {
- _, err := w.Write(encoding.NewMPI(priv.MarshalIntegerSecret()).EncodedBytes())
- return err
-}
-
-func serializeEdDSAPrivateKey(w io.Writer, priv *eddsa.PrivateKey) error {
- _, err := w.Write(encoding.NewMPI(priv.MarshalByteSecret()).EncodedBytes())
- return err
-}
-
-func serializeECDHPrivateKey(w io.Writer, priv *ecdh.PrivateKey) error {
- _, err := w.Write(encoding.NewMPI(priv.MarshalByteSecret()).EncodedBytes())
- return err
-}
-
-func serializeX25519PrivateKey(w io.Writer, priv *x25519.PrivateKey) error {
- _, err := w.Write(priv.Secret)
- return err
-}
-
-func serializeX448PrivateKey(w io.Writer, priv *x448.PrivateKey) error {
- _, err := w.Write(priv.Secret)
- return err
-}
-
-func serializeEd25519PrivateKey(w io.Writer, priv *ed25519.PrivateKey) error {
- _, err := w.Write(priv.MarshalByteSecret())
- return err
-}
-
-func serializeEd448PrivateKey(w io.Writer, priv *ed448.PrivateKey) error {
- _, err := w.Write(priv.MarshalByteSecret())
- return err
-}
-
-// decrypt decrypts an encrypted private key using a decryption key.
-func (pk *PrivateKey) decrypt(decryptionKey []byte) error {
- if pk.Dummy() {
- return errors.ErrDummyPrivateKey("dummy key found")
- }
- if !pk.Encrypted {
- return nil
- }
- block := pk.cipher.new(decryptionKey)
- var data []byte
- switch pk.s2kType {
- case S2KAEAD:
- aead := pk.aead.new(block)
- additionalData, err := pk.additionalData()
- if err != nil {
- return err
- }
- // Decrypt the encrypted key material with aead
- data, err = aead.Open(nil, pk.iv, pk.encryptedData, additionalData)
- if err != nil {
- return err
- }
- case S2KSHA1, S2KCHECKSUM:
- cfb := cipher.NewCFBDecrypter(block, pk.iv)
- data = make([]byte, len(pk.encryptedData))
- cfb.XORKeyStream(data, pk.encryptedData)
- if pk.s2kType == S2KSHA1 {
- if len(data) < sha1.Size {
- return errors.StructuralError("truncated private key data")
- }
- h := sha1.New()
- h.Write(data[:len(data)-sha1.Size])
- sum := h.Sum(nil)
- if !bytes.Equal(sum, data[len(data)-sha1.Size:]) {
- return errors.StructuralError("private key checksum failure")
- }
- data = data[:len(data)-sha1.Size]
- } else {
- if len(data) < 2 {
- return errors.StructuralError("truncated private key data")
- }
- var sum uint16
- for i := 0; i < len(data)-2; i++ {
- sum += uint16(data[i])
- }
- if data[len(data)-2] != uint8(sum>>8) ||
- data[len(data)-1] != uint8(sum) {
- return errors.StructuralError("private key checksum failure")
- }
- data = data[:len(data)-2]
- }
- default:
- return errors.InvalidArgumentError("invalid s2k type")
- }
-
- err := pk.parsePrivateKey(data)
- if _, ok := err.(errors.KeyInvalidError); ok {
- return errors.KeyInvalidError("invalid key parameters")
- }
- if err != nil {
- return err
- }
-
- // Mark key as unencrypted
- pk.s2kType = S2KNON
- pk.s2k = nil
- pk.Encrypted = false
- pk.encryptedData = nil
- return nil
-}
-
-func (pk *PrivateKey) decryptWithCache(passphrase []byte, keyCache *s2k.Cache) error {
- if pk.Dummy() {
- return errors.ErrDummyPrivateKey("dummy key found")
- }
- if !pk.Encrypted {
- return nil
- }
-
- key, err := keyCache.GetOrComputeDerivedKey(passphrase, pk.s2kParams, pk.cipher.KeySize())
- if err != nil {
- return err
- }
- if pk.s2kType == S2KAEAD {
- key = pk.applyHKDF(key)
- }
- return pk.decrypt(key)
-}
-
-// Decrypt decrypts an encrypted private key using a passphrase.
-func (pk *PrivateKey) Decrypt(passphrase []byte) error {
- if pk.Dummy() {
- return errors.ErrDummyPrivateKey("dummy key found")
- }
- if !pk.Encrypted {
- return nil
- }
-
- key := make([]byte, pk.cipher.KeySize())
- pk.s2k(key, passphrase)
- if pk.s2kType == S2KAEAD {
- key = pk.applyHKDF(key)
- }
- return pk.decrypt(key)
-}
-
-// DecryptPrivateKeys decrypts all encrypted keys with the given config and passphrase.
-// Avoids recomputation of similar s2k key derivations.
-func DecryptPrivateKeys(keys []*PrivateKey, passphrase []byte) error {
- // Create a cache to avoid recomputation of key derviations for the same passphrase.
- s2kCache := &s2k.Cache{}
- for _, key := range keys {
- if key != nil && !key.Dummy() && key.Encrypted {
- err := key.decryptWithCache(passphrase, s2kCache)
- if err != nil {
- return err
- }
- }
- }
- return nil
-}
-
-// encrypt encrypts an unencrypted private key.
-func (pk *PrivateKey) encrypt(key []byte, params *s2k.Params, s2kType S2KType, cipherFunction CipherFunction, rand io.Reader) error {
- if pk.Dummy() {
- return errors.ErrDummyPrivateKey("dummy key found")
- }
- if pk.Encrypted {
- return nil
- }
- // check if encryptionKey has the correct size
- if len(key) != cipherFunction.KeySize() {
- return errors.InvalidArgumentError("supplied encryption key has the wrong size")
- }
-
- priv := bytes.NewBuffer(nil)
- err := pk.serializePrivateKey(priv)
- if err != nil {
- return err
- }
-
- pk.cipher = cipherFunction
- pk.s2kParams = params
- pk.s2k, err = pk.s2kParams.Function()
- if err != nil {
- return err
- }
-
- privateKeyBytes := priv.Bytes()
- pk.s2kType = s2kType
- block := pk.cipher.new(key)
- switch s2kType {
- case S2KAEAD:
- if pk.aead == 0 {
- return errors.StructuralError("aead mode is not set on key")
- }
- aead := pk.aead.new(block)
- additionalData, err := pk.additionalData()
- if err != nil {
- return err
- }
- pk.iv = make([]byte, aead.NonceSize())
- _, err = io.ReadFull(rand, pk.iv)
- if err != nil {
- return err
- }
- // Decrypt the encrypted key material with aead
- pk.encryptedData = aead.Seal(nil, pk.iv, privateKeyBytes, additionalData)
- case S2KSHA1, S2KCHECKSUM:
- pk.iv = make([]byte, pk.cipher.blockSize())
- _, err = io.ReadFull(rand, pk.iv)
- if err != nil {
- return err
- }
- cfb := cipher.NewCFBEncrypter(block, pk.iv)
- if s2kType == S2KSHA1 {
- h := sha1.New()
- h.Write(privateKeyBytes)
- sum := h.Sum(nil)
- privateKeyBytes = append(privateKeyBytes, sum...)
- } else {
- var sum uint16
- for _, b := range privateKeyBytes {
- sum += uint16(b)
- }
- privateKeyBytes = append(privateKeyBytes, []byte{uint8(sum >> 8), uint8(sum)}...)
- }
- pk.encryptedData = make([]byte, len(privateKeyBytes))
- cfb.XORKeyStream(pk.encryptedData, privateKeyBytes)
- default:
- return errors.InvalidArgumentError("invalid s2k type for encryption")
- }
-
- pk.Encrypted = true
- pk.PrivateKey = nil
- return err
-}
-
-// EncryptWithConfig encrypts an unencrypted private key using the passphrase and the config.
-func (pk *PrivateKey) EncryptWithConfig(passphrase []byte, config *Config) error {
- params, err := s2k.Generate(config.Random(), config.S2K())
- if err != nil {
- return err
- }
- // Derive an encryption key with the configured s2k function.
- key := make([]byte, config.Cipher().KeySize())
- s2k, err := params.Function()
- if err != nil {
- return err
- }
- s2k(key, passphrase)
- s2kType := S2KSHA1
- if config.AEAD() != nil {
- s2kType = S2KAEAD
- pk.aead = config.AEAD().Mode()
- pk.cipher = config.Cipher()
- key = pk.applyHKDF(key)
- }
- // Encrypt the private key with the derived encryption key.
- return pk.encrypt(key, params, s2kType, config.Cipher(), config.Random())
-}
-
-// EncryptPrivateKeys encrypts all unencrypted keys with the given config and passphrase.
-// Only derives one key from the passphrase, which is then used to encrypt each key.
-func EncryptPrivateKeys(keys []*PrivateKey, passphrase []byte, config *Config) error {
- params, err := s2k.Generate(config.Random(), config.S2K())
- if err != nil {
- return err
- }
- // Derive an encryption key with the configured s2k function.
- encryptionKey := make([]byte, config.Cipher().KeySize())
- s2k, err := params.Function()
- if err != nil {
- return err
- }
- s2k(encryptionKey, passphrase)
- for _, key := range keys {
- if key != nil && !key.Dummy() && !key.Encrypted {
- s2kType := S2KSHA1
- if config.AEAD() != nil {
- s2kType = S2KAEAD
- key.aead = config.AEAD().Mode()
- key.cipher = config.Cipher()
- derivedKey := key.applyHKDF(encryptionKey)
- err = key.encrypt(derivedKey, params, s2kType, config.Cipher(), config.Random())
- } else {
- err = key.encrypt(encryptionKey, params, s2kType, config.Cipher(), config.Random())
- }
- if err != nil {
- return err
- }
- }
- }
- return nil
-}
-
-// Encrypt encrypts an unencrypted private key using a passphrase.
-func (pk *PrivateKey) Encrypt(passphrase []byte) error {
- // Default config of private key encryption
- config := &Config{
- S2KConfig: &s2k.Config{
- S2KMode: s2k.IteratedSaltedS2K,
- S2KCount: 65536,
- Hash: crypto.SHA256,
- },
- DefaultCipher: CipherAES256,
- }
- return pk.EncryptWithConfig(passphrase, config)
-}
-
-func (pk *PrivateKey) serializePrivateKey(w io.Writer) (err error) {
- switch priv := pk.PrivateKey.(type) {
- case *rsa.PrivateKey:
- err = serializeRSAPrivateKey(w, priv)
- case *dsa.PrivateKey:
- err = serializeDSAPrivateKey(w, priv)
- case *elgamal.PrivateKey:
- err = serializeElGamalPrivateKey(w, priv)
- case *ecdsa.PrivateKey:
- err = serializeECDSAPrivateKey(w, priv)
- case *eddsa.PrivateKey:
- err = serializeEdDSAPrivateKey(w, priv)
- case *ecdh.PrivateKey:
- err = serializeECDHPrivateKey(w, priv)
- case *x25519.PrivateKey:
- err = serializeX25519PrivateKey(w, priv)
- case *x448.PrivateKey:
- err = serializeX448PrivateKey(w, priv)
- case *ed25519.PrivateKey:
- err = serializeEd25519PrivateKey(w, priv)
- case *ed448.PrivateKey:
- err = serializeEd448PrivateKey(w, priv)
- default:
- err = errors.InvalidArgumentError("unknown private key type")
- }
- return
-}
-
-func (pk *PrivateKey) parsePrivateKey(data []byte) (err error) {
- switch pk.PublicKey.PubKeyAlgo {
- case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly, PubKeyAlgoRSAEncryptOnly:
- return pk.parseRSAPrivateKey(data)
- case PubKeyAlgoDSA:
- return pk.parseDSAPrivateKey(data)
- case PubKeyAlgoElGamal:
- return pk.parseElGamalPrivateKey(data)
- case PubKeyAlgoECDSA:
- return pk.parseECDSAPrivateKey(data)
- case PubKeyAlgoECDH:
- return pk.parseECDHPrivateKey(data)
- case PubKeyAlgoEdDSA:
- return pk.parseEdDSAPrivateKey(data)
- case PubKeyAlgoX25519:
- return pk.parseX25519PrivateKey(data)
- case PubKeyAlgoX448:
- return pk.parseX448PrivateKey(data)
- case PubKeyAlgoEd25519:
- return pk.parseEd25519PrivateKey(data)
- case PubKeyAlgoEd448:
- return pk.parseEd448PrivateKey(data)
- default:
- err = errors.StructuralError("unknown private key type")
- return
- }
-}
-
-func (pk *PrivateKey) parseRSAPrivateKey(data []byte) (err error) {
- rsaPub := pk.PublicKey.PublicKey.(*rsa.PublicKey)
- rsaPriv := new(rsa.PrivateKey)
- rsaPriv.PublicKey = *rsaPub
-
- buf := bytes.NewBuffer(data)
- d := new(encoding.MPI)
- if _, err := d.ReadFrom(buf); err != nil {
- return err
- }
-
- p := new(encoding.MPI)
- if _, err := p.ReadFrom(buf); err != nil {
- return err
- }
-
- q := new(encoding.MPI)
- if _, err := q.ReadFrom(buf); err != nil {
- return err
- }
-
- rsaPriv.D = new(big.Int).SetBytes(d.Bytes())
- rsaPriv.Primes = make([]*big.Int, 2)
- rsaPriv.Primes[0] = new(big.Int).SetBytes(p.Bytes())
- rsaPriv.Primes[1] = new(big.Int).SetBytes(q.Bytes())
- if err := rsaPriv.Validate(); err != nil {
- return errors.KeyInvalidError(err.Error())
- }
- rsaPriv.Precompute()
- pk.PrivateKey = rsaPriv
-
- return nil
-}
-
-func (pk *PrivateKey) parseDSAPrivateKey(data []byte) (err error) {
- dsaPub := pk.PublicKey.PublicKey.(*dsa.PublicKey)
- dsaPriv := new(dsa.PrivateKey)
- dsaPriv.PublicKey = *dsaPub
-
- buf := bytes.NewBuffer(data)
- x := new(encoding.MPI)
- if _, err := x.ReadFrom(buf); err != nil {
- return err
- }
-
- dsaPriv.X = new(big.Int).SetBytes(x.Bytes())
- if err := validateDSAParameters(dsaPriv); err != nil {
- return err
- }
- pk.PrivateKey = dsaPriv
-
- return nil
-}
-
-func (pk *PrivateKey) parseElGamalPrivateKey(data []byte) (err error) {
- pub := pk.PublicKey.PublicKey.(*elgamal.PublicKey)
- priv := new(elgamal.PrivateKey)
- priv.PublicKey = *pub
-
- buf := bytes.NewBuffer(data)
- x := new(encoding.MPI)
- if _, err := x.ReadFrom(buf); err != nil {
- return err
- }
-
- priv.X = new(big.Int).SetBytes(x.Bytes())
- if err := validateElGamalParameters(priv); err != nil {
- return err
- }
- pk.PrivateKey = priv
-
- return nil
-}
-
-func (pk *PrivateKey) parseECDSAPrivateKey(data []byte) (err error) {
- ecdsaPub := pk.PublicKey.PublicKey.(*ecdsa.PublicKey)
- ecdsaPriv := ecdsa.NewPrivateKey(*ecdsaPub)
-
- buf := bytes.NewBuffer(data)
- d := new(encoding.MPI)
- if _, err := d.ReadFrom(buf); err != nil {
- return err
- }
-
- if err := ecdsaPriv.UnmarshalIntegerSecret(d.Bytes()); err != nil {
- return err
- }
- if err := ecdsa.Validate(ecdsaPriv); err != nil {
- return err
- }
- pk.PrivateKey = ecdsaPriv
-
- return nil
-}
-
-func (pk *PrivateKey) parseECDHPrivateKey(data []byte) (err error) {
- ecdhPub := pk.PublicKey.PublicKey.(*ecdh.PublicKey)
- ecdhPriv := ecdh.NewPrivateKey(*ecdhPub)
-
- buf := bytes.NewBuffer(data)
- d := new(encoding.MPI)
- if _, err := d.ReadFrom(buf); err != nil {
- return err
- }
-
- if err := ecdhPriv.UnmarshalByteSecret(d.Bytes()); err != nil {
- return err
- }
-
- if err := ecdh.Validate(ecdhPriv); err != nil {
- return err
- }
-
- pk.PrivateKey = ecdhPriv
-
- return nil
-}
-
-func (pk *PrivateKey) parseX25519PrivateKey(data []byte) (err error) {
- publicKey := pk.PublicKey.PublicKey.(*x25519.PublicKey)
- privateKey := x25519.NewPrivateKey(*publicKey)
- privateKey.PublicKey = *publicKey
-
- privateKey.Secret = make([]byte, x25519.KeySize)
-
- if len(data) != x25519.KeySize {
- err = errors.StructuralError("wrong x25519 key size")
- return err
- }
- subtle.ConstantTimeCopy(1, privateKey.Secret, data)
- if err = x25519.Validate(privateKey); err != nil {
- return err
- }
- pk.PrivateKey = privateKey
- return nil
-}
-
-func (pk *PrivateKey) parseX448PrivateKey(data []byte) (err error) {
- publicKey := pk.PublicKey.PublicKey.(*x448.PublicKey)
- privateKey := x448.NewPrivateKey(*publicKey)
- privateKey.PublicKey = *publicKey
-
- privateKey.Secret = make([]byte, x448.KeySize)
-
- if len(data) != x448.KeySize {
- err = errors.StructuralError("wrong x448 key size")
- return err
- }
- subtle.ConstantTimeCopy(1, privateKey.Secret, data)
- if err = x448.Validate(privateKey); err != nil {
- return err
- }
- pk.PrivateKey = privateKey
- return nil
-}
-
-func (pk *PrivateKey) parseEd25519PrivateKey(data []byte) (err error) {
- publicKey := pk.PublicKey.PublicKey.(*ed25519.PublicKey)
- privateKey := ed25519.NewPrivateKey(*publicKey)
- privateKey.PublicKey = *publicKey
-
- if len(data) != ed25519.SeedSize {
- err = errors.StructuralError("wrong ed25519 key size")
- return err
- }
- err = privateKey.UnmarshalByteSecret(data)
- if err != nil {
- return err
- }
- err = ed25519.Validate(privateKey)
- if err != nil {
- return err
- }
- pk.PrivateKey = privateKey
- return nil
-}
-
-func (pk *PrivateKey) parseEd448PrivateKey(data []byte) (err error) {
- publicKey := pk.PublicKey.PublicKey.(*ed448.PublicKey)
- privateKey := ed448.NewPrivateKey(*publicKey)
- privateKey.PublicKey = *publicKey
-
- if len(data) != ed448.SeedSize {
- err = errors.StructuralError("wrong ed448 key size")
- return err
- }
- err = privateKey.UnmarshalByteSecret(data)
- if err != nil {
- return err
- }
- err = ed448.Validate(privateKey)
- if err != nil {
- return err
- }
- pk.PrivateKey = privateKey
- return nil
-}
-
-func (pk *PrivateKey) parseEdDSAPrivateKey(data []byte) (err error) {
- eddsaPub := pk.PublicKey.PublicKey.(*eddsa.PublicKey)
- eddsaPriv := eddsa.NewPrivateKey(*eddsaPub)
- eddsaPriv.PublicKey = *eddsaPub
-
- buf := bytes.NewBuffer(data)
- d := new(encoding.MPI)
- if _, err := d.ReadFrom(buf); err != nil {
- return err
- }
-
- if err = eddsaPriv.UnmarshalByteSecret(d.Bytes()); err != nil {
- return err
- }
-
- if err := eddsa.Validate(eddsaPriv); err != nil {
- return err
- }
-
- pk.PrivateKey = eddsaPriv
-
- return nil
-}
-
-func (pk *PrivateKey) additionalData() ([]byte, error) {
- additionalData := bytes.NewBuffer(nil)
- // Write additional data prefix based on packet type
- var packetByte byte
- if pk.PublicKey.IsSubkey {
- packetByte = 0xc7
- } else {
- packetByte = 0xc5
- }
- // Write public key to additional data
- _, err := additionalData.Write([]byte{packetByte})
- if err != nil {
- return nil, err
- }
- err = pk.PublicKey.serializeWithoutHeaders(additionalData)
- if err != nil {
- return nil, err
- }
- return additionalData.Bytes(), nil
-}
-
-func (pk *PrivateKey) applyHKDF(inputKey []byte) []byte {
- var packetByte byte
- if pk.PublicKey.IsSubkey {
- packetByte = 0xc7
- } else {
- packetByte = 0xc5
- }
- associatedData := []byte{packetByte, byte(pk.Version), byte(pk.cipher), byte(pk.aead)}
- hkdfReader := hkdf.New(sha256.New, inputKey, []byte{}, associatedData)
- encryptionKey := make([]byte, pk.cipher.KeySize())
- _, _ = readFull(hkdfReader, encryptionKey)
- return encryptionKey
-}
-
-func validateDSAParameters(priv *dsa.PrivateKey) error {
- p := priv.P // group prime
- q := priv.Q // subgroup order
- g := priv.G // g has order q mod p
- x := priv.X // secret
- y := priv.Y // y == g**x mod p
- one := big.NewInt(1)
- // expect g, y >= 2 and g < p
- if g.Cmp(one) <= 0 || y.Cmp(one) <= 0 || g.Cmp(p) > 0 {
- return errors.KeyInvalidError("dsa: invalid group")
- }
- // expect p > q
- if p.Cmp(q) <= 0 {
- return errors.KeyInvalidError("dsa: invalid group prime")
- }
- // q should be large enough and divide p-1
- pSub1 := new(big.Int).Sub(p, one)
- if q.BitLen() < 150 || new(big.Int).Mod(pSub1, q).Cmp(big.NewInt(0)) != 0 {
- return errors.KeyInvalidError("dsa: invalid order")
- }
- // confirm that g has order q mod p
- if !q.ProbablyPrime(32) || new(big.Int).Exp(g, q, p).Cmp(one) != 0 {
- return errors.KeyInvalidError("dsa: invalid order")
- }
- // check y
- if new(big.Int).Exp(g, x, p).Cmp(y) != 0 {
- return errors.KeyInvalidError("dsa: mismatching values")
- }
-
- return nil
-}
-
-func validateElGamalParameters(priv *elgamal.PrivateKey) error {
- p := priv.P // group prime
- g := priv.G // g has order p-1 mod p
- x := priv.X // secret
- y := priv.Y // y == g**x mod p
- one := big.NewInt(1)
- // Expect g, y >= 2 and g < p
- if g.Cmp(one) <= 0 || y.Cmp(one) <= 0 || g.Cmp(p) > 0 {
- return errors.KeyInvalidError("elgamal: invalid group")
- }
- if p.BitLen() < 1024 {
- return errors.KeyInvalidError("elgamal: group order too small")
- }
- pSub1 := new(big.Int).Sub(p, one)
- if new(big.Int).Exp(g, pSub1, p).Cmp(one) != 0 {
- return errors.KeyInvalidError("elgamal: invalid group")
- }
- // Since p-1 is not prime, g might have a smaller order that divides p-1.
- // We cannot confirm the exact order of g, but we make sure it is not too small.
- gExpI := new(big.Int).Set(g)
- i := 1
- threshold := 2 << 17 // we want order > threshold
- for i < threshold {
- i++ // we check every order to make sure key validation is not easily bypassed by guessing y'
- gExpI.Mod(new(big.Int).Mul(gExpI, g), p)
- if gExpI.Cmp(one) == 0 {
- return errors.KeyInvalidError("elgamal: order too small")
- }
- }
- // Check y
- if new(big.Int).Exp(g, x, p).Cmp(y) != 0 {
- return errors.KeyInvalidError("elgamal: mismatching values")
- }
-
- return nil
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/private_key_test_data.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/private_key_test_data.go
deleted file mode 100644
index 029b8f1aab2..00000000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/private_key_test_data.go
+++ /dev/null
@@ -1,12 +0,0 @@
-package packet
-
-// Generated with `gpg --export-secret-keys "Test Key 2"`
-const privKeyRSAHex = "9501fe044cc349a8010400b70ca0010e98c090008d45d1ee8f9113bd5861fd57b88bacb7c68658747663f1e1a3b5a98f32fda6472373c024b97359cd2efc88ff60f77751adfbf6af5e615e6a1408cfad8bf0cea30b0d5f53aa27ad59089ba9b15b7ebc2777a25d7b436144027e3bcd203909f147d0e332b240cf63d3395f5dfe0df0a6c04e8655af7eacdf0011010001fe0303024a252e7d475fd445607de39a265472aa74a9320ba2dac395faa687e9e0336aeb7e9a7397e511b5afd9dc84557c80ac0f3d4d7bfec5ae16f20d41c8c84a04552a33870b930420e230e179564f6d19bb153145e76c33ae993886c388832b0fa042ddda7f133924f3854481533e0ede31d51278c0519b29abc3bf53da673e13e3e1214b52413d179d7f66deee35cac8eacb060f78379d70ef4af8607e68131ff529439668fc39c9ce6dfef8a5ac234d234802cbfb749a26107db26406213ae5c06d4673253a3cbee1fcbae58d6ab77e38d6e2c0e7c6317c48e054edadb5a40d0d48acb44643d998139a8a66bb820be1f3f80185bc777d14b5954b60effe2448a036d565c6bc0b915fcea518acdd20ab07bc1529f561c58cd044f723109b93f6fd99f876ff891d64306b5d08f48bab59f38695e9109c4dec34013ba3153488ce070268381ba923ee1eb77125b36afcb4347ec3478c8f2735b06ef17351d872e577fa95d0c397c88c71b59629a36aec"
-
-// Generated by `gpg --export-secret-keys` followed by a manual extraction of
-// the ElGamal subkey from the packets.
-const privKeyElGamalHex = "9d0157044df9ee1a100400eb8e136a58ec39b582629cdadf830bc64e0a94ed8103ca8bb247b27b11b46d1d25297ef4bcc3071785ba0c0bedfe89eabc5287fcc0edf81ab5896c1c8e4b20d27d79813c7aede75320b33eaeeaa586edc00fd1036c10133e6ba0ff277245d0d59d04b2b3421b7244aca5f4a8d870c6f1c1fbff9e1c26699a860b9504f35ca1d700030503fd1ededd3b840795be6d9ccbe3c51ee42e2f39233c432b831ddd9c4e72b7025a819317e47bf94f9ee316d7273b05d5fcf2999c3a681f519b1234bbfa6d359b4752bd9c3f77d6b6456cde152464763414ca130f4e91d91041432f90620fec0e6d6b5116076c2985d5aeaae13be492b9b329efcaf7ee25120159a0a30cd976b42d7afe030302dae7eb80db744d4960c4df930d57e87fe81412eaace9f900e6c839817a614ddb75ba6603b9417c33ea7b6c93967dfa2bcff3fa3c74a5ce2c962db65b03aece14c96cbd0038fc"
-
-// pkcs1PrivKeyHex is a PKCS#1, RSA private key.
-// Generated by `openssl genrsa 1024 | openssl rsa -outform DER | xxd -p`
-const pkcs1PrivKeyHex = "3082025d02010002818100e98edfa1c3b35884a54d0b36a6a603b0290fa85e49e30fa23fc94fef9c6790bc4849928607aa48d809da326fb42a969d06ad756b98b9c1a90f5d4a2b6d0ac05953c97f4da3120164a21a679793ce181c906dc01d235cc085ddcdf6ea06c389b6ab8885dfd685959e693138856a68a7e5db263337ff82a088d583a897cf2d59e9020301000102818100b6d5c9eb70b02d5369b3ee5b520a14490b5bde8a317d36f7e4c74b7460141311d1e5067735f8f01d6f5908b2b96fbd881f7a1ab9a84d82753e39e19e2d36856be960d05ac9ef8e8782ea1b6d65aee28fdfe1d61451e8cff0adfe84322f12cf455028b581cf60eb9e0e140ba5d21aeba6c2634d7c65318b9a665fc01c3191ca21024100fa5e818da3705b0fa33278bb28d4b6f6050388af2d4b75ec9375dd91ccf2e7d7068086a8b82a8f6282e4fbbdb8a7f2622eb97295249d87acea7f5f816f54d347024100eecf9406d7dc49cdfb95ab1eff4064de84c7a30f64b2798936a0d2018ba9eb52e4b636f82e96c49cc63b80b675e91e40d1b2e4017d4b9adaf33ab3d9cf1c214f024100c173704ace742c082323066226a4655226819a85304c542b9dacbeacbf5d1881ee863485fcf6f59f3a604f9b42289282067447f2b13dfeed3eab7851fc81e0550240741fc41f3fc002b382eed8730e33c5d8de40256e4accee846667f536832f711ab1d4590e7db91a8a116ac5bff3be13d3f9243ff2e976662aa9b395d907f8e9c9024046a5696c9ef882363e06c9fa4e2f5b580906452befba03f4a99d0f873697ef1f851d2226ca7934b30b7c3e80cb634a67172bbbf4781735fe3e09263e2dd723e7"
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/public_key.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/public_key.go
deleted file mode 100644
index dd93c98702c..00000000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/public_key.go
+++ /dev/null
@@ -1,1035 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package packet
-
-import (
- "crypto/dsa"
- "crypto/rsa"
- "crypto/sha1"
- "crypto/sha256"
- _ "crypto/sha512"
- "encoding/binary"
- "fmt"
- "hash"
- "io"
- "math/big"
- "strconv"
- "time"
-
- "github.com/ProtonMail/go-crypto/openpgp/ecdh"
- "github.com/ProtonMail/go-crypto/openpgp/ecdsa"
- "github.com/ProtonMail/go-crypto/openpgp/ed25519"
- "github.com/ProtonMail/go-crypto/openpgp/ed448"
- "github.com/ProtonMail/go-crypto/openpgp/eddsa"
- "github.com/ProtonMail/go-crypto/openpgp/elgamal"
- "github.com/ProtonMail/go-crypto/openpgp/errors"
- "github.com/ProtonMail/go-crypto/openpgp/internal/algorithm"
- "github.com/ProtonMail/go-crypto/openpgp/internal/ecc"
- "github.com/ProtonMail/go-crypto/openpgp/internal/encoding"
- "github.com/ProtonMail/go-crypto/openpgp/x25519"
- "github.com/ProtonMail/go-crypto/openpgp/x448"
-)
-
-// PublicKey represents an OpenPGP public key. See RFC 4880, section 5.5.2.
-type PublicKey struct {
- Version int
- CreationTime time.Time
- PubKeyAlgo PublicKeyAlgorithm
- PublicKey interface{} // *rsa.PublicKey, *dsa.PublicKey, *ecdsa.PublicKey or *eddsa.PublicKey, *x25519.PublicKey, *x448.PublicKey, *ed25519.PublicKey, *ed448.PublicKey
- Fingerprint []byte
- KeyId uint64
- IsSubkey bool
-
- // RFC 4880 fields
- n, e, p, q, g, y encoding.Field
-
- // RFC 6637 fields
- // oid contains the OID byte sequence identifying the elliptic curve used
- oid encoding.Field
-
- // kdf stores key derivation function parameters
- // used for ECDH encryption. See RFC 6637, Section 9.
- kdf encoding.Field
-}
-
-// UpgradeToV5 updates the version of the key to v5, and updates all necessary
-// fields.
-func (pk *PublicKey) UpgradeToV5() {
- pk.Version = 5
- pk.setFingerprintAndKeyId()
-}
-
-// UpgradeToV6 updates the version of the key to v6, and updates all necessary
-// fields.
-func (pk *PublicKey) UpgradeToV6() {
- pk.Version = 6
- pk.setFingerprintAndKeyId()
-}
-
-// signingKey provides a convenient abstraction over signature verification
-// for v3 and v4 public keys.
-type signingKey interface {
- SerializeForHash(io.Writer) error
- SerializeSignaturePrefix(io.Writer) error
- serializeWithoutHeaders(io.Writer) error
-}
-
-// NewRSAPublicKey returns a PublicKey that wraps the given rsa.PublicKey.
-func NewRSAPublicKey(creationTime time.Time, pub *rsa.PublicKey) *PublicKey {
- pk := &PublicKey{
- Version: 4,
- CreationTime: creationTime,
- PubKeyAlgo: PubKeyAlgoRSA,
- PublicKey: pub,
- n: new(encoding.MPI).SetBig(pub.N),
- e: new(encoding.MPI).SetBig(big.NewInt(int64(pub.E))),
- }
-
- pk.setFingerprintAndKeyId()
- return pk
-}
-
-// NewDSAPublicKey returns a PublicKey that wraps the given dsa.PublicKey.
-func NewDSAPublicKey(creationTime time.Time, pub *dsa.PublicKey) *PublicKey {
- pk := &PublicKey{
- Version: 4,
- CreationTime: creationTime,
- PubKeyAlgo: PubKeyAlgoDSA,
- PublicKey: pub,
- p: new(encoding.MPI).SetBig(pub.P),
- q: new(encoding.MPI).SetBig(pub.Q),
- g: new(encoding.MPI).SetBig(pub.G),
- y: new(encoding.MPI).SetBig(pub.Y),
- }
-
- pk.setFingerprintAndKeyId()
- return pk
-}
-
-// NewElGamalPublicKey returns a PublicKey that wraps the given elgamal.PublicKey.
-func NewElGamalPublicKey(creationTime time.Time, pub *elgamal.PublicKey) *PublicKey {
- pk := &PublicKey{
- Version: 4,
- CreationTime: creationTime,
- PubKeyAlgo: PubKeyAlgoElGamal,
- PublicKey: pub,
- p: new(encoding.MPI).SetBig(pub.P),
- g: new(encoding.MPI).SetBig(pub.G),
- y: new(encoding.MPI).SetBig(pub.Y),
- }
-
- pk.setFingerprintAndKeyId()
- return pk
-}
-
-func NewECDSAPublicKey(creationTime time.Time, pub *ecdsa.PublicKey) *PublicKey {
- pk := &PublicKey{
- Version: 4,
- CreationTime: creationTime,
- PubKeyAlgo: PubKeyAlgoECDSA,
- PublicKey: pub,
- p: encoding.NewMPI(pub.MarshalPoint()),
- }
-
- curveInfo := ecc.FindByCurve(pub.GetCurve())
- if curveInfo == nil {
- panic("unknown elliptic curve")
- }
- pk.oid = curveInfo.Oid
- pk.setFingerprintAndKeyId()
- return pk
-}
-
-func NewECDHPublicKey(creationTime time.Time, pub *ecdh.PublicKey) *PublicKey {
- var pk *PublicKey
- var kdf = encoding.NewOID([]byte{0x1, pub.Hash.Id(), pub.Cipher.Id()})
- pk = &PublicKey{
- Version: 4,
- CreationTime: creationTime,
- PubKeyAlgo: PubKeyAlgoECDH,
- PublicKey: pub,
- p: encoding.NewMPI(pub.MarshalPoint()),
- kdf: kdf,
- }
-
- curveInfo := ecc.FindByCurve(pub.GetCurve())
-
- if curveInfo == nil {
- panic("unknown elliptic curve")
- }
-
- pk.oid = curveInfo.Oid
- pk.setFingerprintAndKeyId()
- return pk
-}
-
-func NewEdDSAPublicKey(creationTime time.Time, pub *eddsa.PublicKey) *PublicKey {
- curveInfo := ecc.FindByCurve(pub.GetCurve())
- pk := &PublicKey{
- Version: 4,
- CreationTime: creationTime,
- PubKeyAlgo: PubKeyAlgoEdDSA,
- PublicKey: pub,
- oid: curveInfo.Oid,
- // Native point format, see draft-koch-eddsa-for-openpgp-04, Appendix B
- p: encoding.NewMPI(pub.MarshalPoint()),
- }
-
- pk.setFingerprintAndKeyId()
- return pk
-}
-
-func NewX25519PublicKey(creationTime time.Time, pub *x25519.PublicKey) *PublicKey {
- pk := &PublicKey{
- Version: 4,
- CreationTime: creationTime,
- PubKeyAlgo: PubKeyAlgoX25519,
- PublicKey: pub,
- }
-
- pk.setFingerprintAndKeyId()
- return pk
-}
-
-func NewX448PublicKey(creationTime time.Time, pub *x448.PublicKey) *PublicKey {
- pk := &PublicKey{
- Version: 4,
- CreationTime: creationTime,
- PubKeyAlgo: PubKeyAlgoX448,
- PublicKey: pub,
- }
-
- pk.setFingerprintAndKeyId()
- return pk
-}
-
-func NewEd25519PublicKey(creationTime time.Time, pub *ed25519.PublicKey) *PublicKey {
- pk := &PublicKey{
- Version: 4,
- CreationTime: creationTime,
- PubKeyAlgo: PubKeyAlgoEd25519,
- PublicKey: pub,
- }
-
- pk.setFingerprintAndKeyId()
- return pk
-}
-
-func NewEd448PublicKey(creationTime time.Time, pub *ed448.PublicKey) *PublicKey {
- pk := &PublicKey{
- Version: 4,
- CreationTime: creationTime,
- PubKeyAlgo: PubKeyAlgoEd448,
- PublicKey: pub,
- }
-
- pk.setFingerprintAndKeyId()
- return pk
-}
-
-func (pk *PublicKey) parse(r io.Reader) (err error) {
- // RFC 4880, section 5.5.2
- var buf [6]byte
- _, err = readFull(r, buf[:])
- if err != nil {
- return
- }
- if buf[0] != 4 && buf[0] != 5 && buf[0] != 6 {
- return errors.UnsupportedError("public key version " + strconv.Itoa(int(buf[0])))
- }
-
- pk.Version = int(buf[0])
- if pk.Version >= 5 {
- // Read the four-octet scalar octet count
- // The count is not used in this implementation
- var n [4]byte
- _, err = readFull(r, n[:])
- if err != nil {
- return
- }
- }
- pk.CreationTime = time.Unix(int64(uint32(buf[1])<<24|uint32(buf[2])<<16|uint32(buf[3])<<8|uint32(buf[4])), 0)
- pk.PubKeyAlgo = PublicKeyAlgorithm(buf[5])
- // Ignore four-ocet length
- switch pk.PubKeyAlgo {
- case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly:
- err = pk.parseRSA(r)
- case PubKeyAlgoDSA:
- err = pk.parseDSA(r)
- case PubKeyAlgoElGamal:
- err = pk.parseElGamal(r)
- case PubKeyAlgoECDSA:
- err = pk.parseECDSA(r)
- case PubKeyAlgoECDH:
- err = pk.parseECDH(r)
- case PubKeyAlgoEdDSA:
- err = pk.parseEdDSA(r)
- case PubKeyAlgoX25519:
- err = pk.parseX25519(r)
- case PubKeyAlgoX448:
- err = pk.parseX448(r)
- case PubKeyAlgoEd25519:
- err = pk.parseEd25519(r)
- case PubKeyAlgoEd448:
- err = pk.parseEd448(r)
- default:
- err = errors.UnsupportedError("public key type: " + strconv.Itoa(int(pk.PubKeyAlgo)))
- }
- if err != nil {
- return
- }
-
- pk.setFingerprintAndKeyId()
- return
-}
-
-func (pk *PublicKey) setFingerprintAndKeyId() {
- // RFC 4880, section 12.2
- if pk.Version >= 5 {
- fingerprint := sha256.New()
- if err := pk.SerializeForHash(fingerprint); err != nil {
- // Should not happen for a hash.
- panic(err)
- }
- pk.Fingerprint = make([]byte, 32)
- copy(pk.Fingerprint, fingerprint.Sum(nil))
- pk.KeyId = binary.BigEndian.Uint64(pk.Fingerprint[:8])
- } else {
- fingerprint := sha1.New()
- if err := pk.SerializeForHash(fingerprint); err != nil {
- // Should not happen for a hash.
- panic(err)
- }
- pk.Fingerprint = make([]byte, 20)
- copy(pk.Fingerprint, fingerprint.Sum(nil))
- pk.KeyId = binary.BigEndian.Uint64(pk.Fingerprint[12:20])
- }
-}
-
-// parseRSA parses RSA public key material from the given Reader. See RFC 4880,
-// section 5.5.2.
-func (pk *PublicKey) parseRSA(r io.Reader) (err error) {
- pk.n = new(encoding.MPI)
- if _, err = pk.n.ReadFrom(r); err != nil {
- return
- }
- pk.e = new(encoding.MPI)
- if _, err = pk.e.ReadFrom(r); err != nil {
- return
- }
-
- if len(pk.e.Bytes()) > 3 {
- err = errors.UnsupportedError("large public exponent")
- return
- }
- rsa := &rsa.PublicKey{
- N: new(big.Int).SetBytes(pk.n.Bytes()),
- E: 0,
- }
- for i := 0; i < len(pk.e.Bytes()); i++ {
- rsa.E <<= 8
- rsa.E |= int(pk.e.Bytes()[i])
- }
- pk.PublicKey = rsa
- return
-}
-
-// parseDSA parses DSA public key material from the given Reader. See RFC 4880,
-// section 5.5.2.
-func (pk *PublicKey) parseDSA(r io.Reader) (err error) {
- pk.p = new(encoding.MPI)
- if _, err = pk.p.ReadFrom(r); err != nil {
- return
- }
- pk.q = new(encoding.MPI)
- if _, err = pk.q.ReadFrom(r); err != nil {
- return
- }
- pk.g = new(encoding.MPI)
- if _, err = pk.g.ReadFrom(r); err != nil {
- return
- }
- pk.y = new(encoding.MPI)
- if _, err = pk.y.ReadFrom(r); err != nil {
- return
- }
-
- dsa := new(dsa.PublicKey)
- dsa.P = new(big.Int).SetBytes(pk.p.Bytes())
- dsa.Q = new(big.Int).SetBytes(pk.q.Bytes())
- dsa.G = new(big.Int).SetBytes(pk.g.Bytes())
- dsa.Y = new(big.Int).SetBytes(pk.y.Bytes())
- pk.PublicKey = dsa
- return
-}
-
-// parseElGamal parses ElGamal public key material from the given Reader. See
-// RFC 4880, section 5.5.2.
-func (pk *PublicKey) parseElGamal(r io.Reader) (err error) {
- pk.p = new(encoding.MPI)
- if _, err = pk.p.ReadFrom(r); err != nil {
- return
- }
- pk.g = new(encoding.MPI)
- if _, err = pk.g.ReadFrom(r); err != nil {
- return
- }
- pk.y = new(encoding.MPI)
- if _, err = pk.y.ReadFrom(r); err != nil {
- return
- }
-
- elgamal := new(elgamal.PublicKey)
- elgamal.P = new(big.Int).SetBytes(pk.p.Bytes())
- elgamal.G = new(big.Int).SetBytes(pk.g.Bytes())
- elgamal.Y = new(big.Int).SetBytes(pk.y.Bytes())
- pk.PublicKey = elgamal
- return
-}
-
-// parseECDSA parses ECDSA public key material from the given Reader. See
-// RFC 6637, Section 9.
-func (pk *PublicKey) parseECDSA(r io.Reader) (err error) {
- pk.oid = new(encoding.OID)
- if _, err = pk.oid.ReadFrom(r); err != nil {
- return
- }
-
- curveInfo := ecc.FindByOid(pk.oid)
- if curveInfo == nil {
- return errors.UnsupportedError(fmt.Sprintf("unknown oid: %x", pk.oid))
- }
-
- pk.p = new(encoding.MPI)
- if _, err = pk.p.ReadFrom(r); err != nil {
- return
- }
-
- c, ok := curveInfo.Curve.(ecc.ECDSACurve)
- if !ok {
- return errors.UnsupportedError(fmt.Sprintf("unsupported oid: %x", pk.oid))
- }
-
- ecdsaKey := ecdsa.NewPublicKey(c)
- err = ecdsaKey.UnmarshalPoint(pk.p.Bytes())
- pk.PublicKey = ecdsaKey
-
- return
-}
-
-// parseECDH parses ECDH public key material from the given Reader. See
-// RFC 6637, Section 9.
-func (pk *PublicKey) parseECDH(r io.Reader) (err error) {
- pk.oid = new(encoding.OID)
- if _, err = pk.oid.ReadFrom(r); err != nil {
- return
- }
-
- curveInfo := ecc.FindByOid(pk.oid)
- if curveInfo == nil {
- return errors.UnsupportedError(fmt.Sprintf("unknown oid: %x", pk.oid))
- }
-
- pk.p = new(encoding.MPI)
- if _, err = pk.p.ReadFrom(r); err != nil {
- return
- }
- pk.kdf = new(encoding.OID)
- if _, err = pk.kdf.ReadFrom(r); err != nil {
- return
- }
-
- c, ok := curveInfo.Curve.(ecc.ECDHCurve)
- if !ok {
- return errors.UnsupportedError(fmt.Sprintf("unsupported oid: %x", pk.oid))
- }
-
- if kdfLen := len(pk.kdf.Bytes()); kdfLen < 3 {
- return errors.UnsupportedError("unsupported ECDH KDF length: " + strconv.Itoa(kdfLen))
- }
- if reserved := pk.kdf.Bytes()[0]; reserved != 0x01 {
- return errors.UnsupportedError("unsupported KDF reserved field: " + strconv.Itoa(int(reserved)))
- }
- kdfHash, ok := algorithm.HashById[pk.kdf.Bytes()[1]]
- if !ok {
- return errors.UnsupportedError("unsupported ECDH KDF hash: " + strconv.Itoa(int(pk.kdf.Bytes()[1])))
- }
- kdfCipher, ok := algorithm.CipherById[pk.kdf.Bytes()[2]]
- if !ok {
- return errors.UnsupportedError("unsupported ECDH KDF cipher: " + strconv.Itoa(int(pk.kdf.Bytes()[2])))
- }
-
- ecdhKey := ecdh.NewPublicKey(c, kdfHash, kdfCipher)
- err = ecdhKey.UnmarshalPoint(pk.p.Bytes())
- pk.PublicKey = ecdhKey
-
- return
-}
-
-func (pk *PublicKey) parseEdDSA(r io.Reader) (err error) {
- pk.oid = new(encoding.OID)
- if _, err = pk.oid.ReadFrom(r); err != nil {
- return
- }
-
- curveInfo := ecc.FindByOid(pk.oid)
- if curveInfo == nil {
- return errors.UnsupportedError(fmt.Sprintf("unknown oid: %x", pk.oid))
- }
-
- c, ok := curveInfo.Curve.(ecc.EdDSACurve)
- if !ok {
- return errors.UnsupportedError(fmt.Sprintf("unsupported oid: %x", pk.oid))
- }
-
- pk.p = new(encoding.MPI)
- if _, err = pk.p.ReadFrom(r); err != nil {
- return
- }
-
- if len(pk.p.Bytes()) == 0 {
- return errors.StructuralError("empty EdDSA public key")
- }
-
- pub := eddsa.NewPublicKey(c)
-
- switch flag := pk.p.Bytes()[0]; flag {
- case 0x04:
- // TODO: see _grcy_ecc_eddsa_ensure_compact in grcypt
- return errors.UnsupportedError("unsupported EdDSA compression: " + strconv.Itoa(int(flag)))
- case 0x40:
- err = pub.UnmarshalPoint(pk.p.Bytes())
- default:
- return errors.UnsupportedError("unsupported EdDSA compression: " + strconv.Itoa(int(flag)))
- }
-
- pk.PublicKey = pub
- return
-}
-
-func (pk *PublicKey) parseX25519(r io.Reader) (err error) {
- point := make([]byte, x25519.KeySize)
- _, err = io.ReadFull(r, point)
- if err != nil {
- return
- }
- pub := &x25519.PublicKey{
- Point: point,
- }
- pk.PublicKey = pub
- return
-}
-
-func (pk *PublicKey) parseX448(r io.Reader) (err error) {
- point := make([]byte, x448.KeySize)
- _, err = io.ReadFull(r, point)
- if err != nil {
- return
- }
- pub := &x448.PublicKey{
- Point: point,
- }
- pk.PublicKey = pub
- return
-}
-
-func (pk *PublicKey) parseEd25519(r io.Reader) (err error) {
- point := make([]byte, ed25519.PublicKeySize)
- _, err = io.ReadFull(r, point)
- if err != nil {
- return
- }
- pub := &ed25519.PublicKey{
- Point: point,
- }
- pk.PublicKey = pub
- return
-}
-
-func (pk *PublicKey) parseEd448(r io.Reader) (err error) {
- point := make([]byte, ed448.PublicKeySize)
- _, err = io.ReadFull(r, point)
- if err != nil {
- return
- }
- pub := &ed448.PublicKey{
- Point: point,
- }
- pk.PublicKey = pub
- return
-}
-
-// SerializeForHash serializes the PublicKey to w with the special packet
-// header format needed for hashing.
-func (pk *PublicKey) SerializeForHash(w io.Writer) error {
- if err := pk.SerializeSignaturePrefix(w); err != nil {
- return err
- }
- return pk.serializeWithoutHeaders(w)
-}
-
-// SerializeSignaturePrefix writes the prefix for this public key to the given Writer.
-// The prefix is used when calculating a signature over this public key. See
-// RFC 4880, section 5.2.4.
-func (pk *PublicKey) SerializeSignaturePrefix(w io.Writer) error {
- var pLength = pk.algorithmSpecificByteCount()
- // version, timestamp, algorithm
- pLength += versionSize + timestampSize + algorithmSize
- if pk.Version >= 5 {
- // key octet count (4).
- pLength += 4
- _, err := w.Write([]byte{
- // When a v4 signature is made over a key, the hash data starts with the octet 0x99, followed by a two-octet length
- // of the key, and then the body of the key packet. When a v6 signature is made over a key, the hash data starts
- // with the salt, then octet 0x9B, followed by a four-octet length of the key, and then the body of the key packet.
- 0x95 + byte(pk.Version),
- byte(pLength >> 24),
- byte(pLength >> 16),
- byte(pLength >> 8),
- byte(pLength),
- })
- if err != nil {
- return err
- }
- return nil
- }
- if _, err := w.Write([]byte{0x99, byte(pLength >> 8), byte(pLength)}); err != nil {
- return err
- }
- return nil
-}
-
-func (pk *PublicKey) Serialize(w io.Writer) (err error) {
- length := uint32(versionSize + timestampSize + algorithmSize) // 6 byte header
- length += pk.algorithmSpecificByteCount()
- if pk.Version >= 5 {
- length += 4 // octet key count
- }
- packetType := packetTypePublicKey
- if pk.IsSubkey {
- packetType = packetTypePublicSubkey
- }
- err = serializeHeader(w, packetType, int(length))
- if err != nil {
- return
- }
- return pk.serializeWithoutHeaders(w)
-}
-
-func (pk *PublicKey) algorithmSpecificByteCount() uint32 {
- length := uint32(0)
- switch pk.PubKeyAlgo {
- case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly:
- length += uint32(pk.n.EncodedLength())
- length += uint32(pk.e.EncodedLength())
- case PubKeyAlgoDSA:
- length += uint32(pk.p.EncodedLength())
- length += uint32(pk.q.EncodedLength())
- length += uint32(pk.g.EncodedLength())
- length += uint32(pk.y.EncodedLength())
- case PubKeyAlgoElGamal:
- length += uint32(pk.p.EncodedLength())
- length += uint32(pk.g.EncodedLength())
- length += uint32(pk.y.EncodedLength())
- case PubKeyAlgoECDSA:
- length += uint32(pk.oid.EncodedLength())
- length += uint32(pk.p.EncodedLength())
- case PubKeyAlgoECDH:
- length += uint32(pk.oid.EncodedLength())
- length += uint32(pk.p.EncodedLength())
- length += uint32(pk.kdf.EncodedLength())
- case PubKeyAlgoEdDSA:
- length += uint32(pk.oid.EncodedLength())
- length += uint32(pk.p.EncodedLength())
- case PubKeyAlgoX25519:
- length += x25519.KeySize
- case PubKeyAlgoX448:
- length += x448.KeySize
- case PubKeyAlgoEd25519:
- length += ed25519.PublicKeySize
- case PubKeyAlgoEd448:
- length += ed448.PublicKeySize
- default:
- panic("unknown public key algorithm")
- }
- return length
-}
-
-// serializeWithoutHeaders marshals the PublicKey to w in the form of an
-// OpenPGP public key packet, not including the packet header.
-func (pk *PublicKey) serializeWithoutHeaders(w io.Writer) (err error) {
- t := uint32(pk.CreationTime.Unix())
- if _, err = w.Write([]byte{
- byte(pk.Version),
- byte(t >> 24), byte(t >> 16), byte(t >> 8), byte(t),
- byte(pk.PubKeyAlgo),
- }); err != nil {
- return
- }
-
- if pk.Version >= 5 {
- n := pk.algorithmSpecificByteCount()
- if _, err = w.Write([]byte{
- byte(n >> 24), byte(n >> 16), byte(n >> 8), byte(n),
- }); err != nil {
- return
- }
- }
-
- switch pk.PubKeyAlgo {
- case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly:
- if _, err = w.Write(pk.n.EncodedBytes()); err != nil {
- return
- }
- _, err = w.Write(pk.e.EncodedBytes())
- return
- case PubKeyAlgoDSA:
- if _, err = w.Write(pk.p.EncodedBytes()); err != nil {
- return
- }
- if _, err = w.Write(pk.q.EncodedBytes()); err != nil {
- return
- }
- if _, err = w.Write(pk.g.EncodedBytes()); err != nil {
- return
- }
- _, err = w.Write(pk.y.EncodedBytes())
- return
- case PubKeyAlgoElGamal:
- if _, err = w.Write(pk.p.EncodedBytes()); err != nil {
- return
- }
- if _, err = w.Write(pk.g.EncodedBytes()); err != nil {
- return
- }
- _, err = w.Write(pk.y.EncodedBytes())
- return
- case PubKeyAlgoECDSA:
- if _, err = w.Write(pk.oid.EncodedBytes()); err != nil {
- return
- }
- _, err = w.Write(pk.p.EncodedBytes())
- return
- case PubKeyAlgoECDH:
- if _, err = w.Write(pk.oid.EncodedBytes()); err != nil {
- return
- }
- if _, err = w.Write(pk.p.EncodedBytes()); err != nil {
- return
- }
- _, err = w.Write(pk.kdf.EncodedBytes())
- return
- case PubKeyAlgoEdDSA:
- if _, err = w.Write(pk.oid.EncodedBytes()); err != nil {
- return
- }
- _, err = w.Write(pk.p.EncodedBytes())
- return
- case PubKeyAlgoX25519:
- publicKey := pk.PublicKey.(*x25519.PublicKey)
- _, err = w.Write(publicKey.Point)
- return
- case PubKeyAlgoX448:
- publicKey := pk.PublicKey.(*x448.PublicKey)
- _, err = w.Write(publicKey.Point)
- return
- case PubKeyAlgoEd25519:
- publicKey := pk.PublicKey.(*ed25519.PublicKey)
- _, err = w.Write(publicKey.Point)
- return
- case PubKeyAlgoEd448:
- publicKey := pk.PublicKey.(*ed448.PublicKey)
- _, err = w.Write(publicKey.Point)
- return
- }
- return errors.InvalidArgumentError("bad public-key algorithm")
-}
-
-// CanSign returns true iff this public key can generate signatures
-func (pk *PublicKey) CanSign() bool {
- return pk.PubKeyAlgo != PubKeyAlgoRSAEncryptOnly && pk.PubKeyAlgo != PubKeyAlgoElGamal && pk.PubKeyAlgo != PubKeyAlgoECDH
-}
-
-// VerifySignature returns nil iff sig is a valid signature, made by this
-// public key, of the data hashed into signed. signed is mutated by this call.
-func (pk *PublicKey) VerifySignature(signed hash.Hash, sig *Signature) (err error) {
- if !pk.CanSign() {
- return errors.InvalidArgumentError("public key cannot generate signatures")
- }
- if sig.Version == 5 && (sig.SigType == 0x00 || sig.SigType == 0x01) {
- sig.AddMetadataToHashSuffix()
- }
- signed.Write(sig.HashSuffix)
- hashBytes := signed.Sum(nil)
- // see discussion https://github.com/ProtonMail/go-crypto/issues/107
- if sig.Version >= 5 && (hashBytes[0] != sig.HashTag[0] || hashBytes[1] != sig.HashTag[1]) {
- return errors.SignatureError("hash tag doesn't match")
- }
-
- if pk.PubKeyAlgo != sig.PubKeyAlgo {
- return errors.InvalidArgumentError("public key and signature use different algorithms")
- }
-
- switch pk.PubKeyAlgo {
- case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly:
- rsaPublicKey, _ := pk.PublicKey.(*rsa.PublicKey)
- err = rsa.VerifyPKCS1v15(rsaPublicKey, sig.Hash, hashBytes, padToKeySize(rsaPublicKey, sig.RSASignature.Bytes()))
- if err != nil {
- return errors.SignatureError("RSA verification failure")
- }
- return nil
- case PubKeyAlgoDSA:
- dsaPublicKey, _ := pk.PublicKey.(*dsa.PublicKey)
- // Need to truncate hashBytes to match FIPS 186-3 section 4.6.
- subgroupSize := (dsaPublicKey.Q.BitLen() + 7) / 8
- if len(hashBytes) > subgroupSize {
- hashBytes = hashBytes[:subgroupSize]
- }
- if !dsa.Verify(dsaPublicKey, hashBytes, new(big.Int).SetBytes(sig.DSASigR.Bytes()), new(big.Int).SetBytes(sig.DSASigS.Bytes())) {
- return errors.SignatureError("DSA verification failure")
- }
- return nil
- case PubKeyAlgoECDSA:
- ecdsaPublicKey := pk.PublicKey.(*ecdsa.PublicKey)
- if !ecdsa.Verify(ecdsaPublicKey, hashBytes, new(big.Int).SetBytes(sig.ECDSASigR.Bytes()), new(big.Int).SetBytes(sig.ECDSASigS.Bytes())) {
- return errors.SignatureError("ECDSA verification failure")
- }
- return nil
- case PubKeyAlgoEdDSA:
- eddsaPublicKey := pk.PublicKey.(*eddsa.PublicKey)
- if !eddsa.Verify(eddsaPublicKey, hashBytes, sig.EdDSASigR.Bytes(), sig.EdDSASigS.Bytes()) {
- return errors.SignatureError("EdDSA verification failure")
- }
- return nil
- case PubKeyAlgoEd25519:
- ed25519PublicKey := pk.PublicKey.(*ed25519.PublicKey)
- if !ed25519.Verify(ed25519PublicKey, hashBytes, sig.EdSig) {
- return errors.SignatureError("Ed25519 verification failure")
- }
- return nil
- case PubKeyAlgoEd448:
- ed448PublicKey := pk.PublicKey.(*ed448.PublicKey)
- if !ed448.Verify(ed448PublicKey, hashBytes, sig.EdSig) {
- return errors.SignatureError("ed448 verification failure")
- }
- return nil
- default:
- return errors.SignatureError("Unsupported public key algorithm used in signature")
- }
-}
-
-// keySignatureHash returns a Hash of the message that needs to be signed for
-// pk to assert a subkey relationship to signed.
-func keySignatureHash(pk, signed signingKey, hashFunc hash.Hash) (h hash.Hash, err error) {
- h = hashFunc
-
- // RFC 4880, section 5.2.4
- err = pk.SerializeForHash(h)
- if err != nil {
- return nil, err
- }
-
- err = signed.SerializeForHash(h)
- return
-}
-
-// VerifyKeySignature returns nil iff sig is a valid signature, made by this
-// public key, of signed.
-func (pk *PublicKey) VerifyKeySignature(signed *PublicKey, sig *Signature) error {
- preparedHash, err := sig.PrepareVerify()
- if err != nil {
- return err
- }
- h, err := keySignatureHash(pk, signed, preparedHash)
- if err != nil {
- return err
- }
- if err = pk.VerifySignature(h, sig); err != nil {
- return err
- }
-
- if sig.FlagSign {
- // Signing subkeys must be cross-signed. See
- // https://www.gnupg.org/faq/subkey-cross-certify.html.
- if sig.EmbeddedSignature == nil {
- return errors.StructuralError("signing subkey is missing cross-signature")
- }
- preparedHashEmbedded, err := sig.EmbeddedSignature.PrepareVerify()
- if err != nil {
- return err
- }
- // Verify the cross-signature. This is calculated over the same
- // data as the main signature, so we cannot just recursively
- // call signed.VerifyKeySignature(...)
- if h, err = keySignatureHash(pk, signed, preparedHashEmbedded); err != nil {
- return errors.StructuralError("error while hashing for cross-signature: " + err.Error())
- }
- if err := signed.VerifySignature(h, sig.EmbeddedSignature); err != nil {
- return errors.StructuralError("error while verifying cross-signature: " + err.Error())
- }
- }
-
- return nil
-}
-
-func keyRevocationHash(pk signingKey, hashFunc hash.Hash) (err error) {
- return pk.SerializeForHash(hashFunc)
-}
-
-// VerifyRevocationSignature returns nil iff sig is a valid signature, made by this
-// public key.
-func (pk *PublicKey) VerifyRevocationSignature(sig *Signature) (err error) {
- preparedHash, err := sig.PrepareVerify()
- if err != nil {
- return err
- }
- if keyRevocationHash(pk, preparedHash); err != nil {
- return err
- }
- return pk.VerifySignature(preparedHash, sig)
-}
-
-// VerifySubkeyRevocationSignature returns nil iff sig is a valid subkey revocation signature,
-// made by this public key, of signed.
-func (pk *PublicKey) VerifySubkeyRevocationSignature(sig *Signature, signed *PublicKey) (err error) {
- preparedHash, err := sig.PrepareVerify()
- if err != nil {
- return err
- }
- h, err := keySignatureHash(pk, signed, preparedHash)
- if err != nil {
- return err
- }
- return pk.VerifySignature(h, sig)
-}
-
-// userIdSignatureHash returns a Hash of the message that needs to be signed
-// to assert that pk is a valid key for id.
-func userIdSignatureHash(id string, pk *PublicKey, h hash.Hash) (err error) {
-
- // RFC 4880, section 5.2.4
- if err := pk.SerializeSignaturePrefix(h); err != nil {
- return err
- }
- if err := pk.serializeWithoutHeaders(h); err != nil {
- return err
- }
-
- var buf [5]byte
- buf[0] = 0xb4
- buf[1] = byte(len(id) >> 24)
- buf[2] = byte(len(id) >> 16)
- buf[3] = byte(len(id) >> 8)
- buf[4] = byte(len(id))
- h.Write(buf[:])
- h.Write([]byte(id))
-
- return nil
-}
-
-// directKeySignatureHash returns a Hash of the message that needs to be signed.
-func directKeySignatureHash(pk *PublicKey, h hash.Hash) (err error) {
- return pk.SerializeForHash(h)
-}
-
-// VerifyUserIdSignature returns nil iff sig is a valid signature, made by this
-// public key, that id is the identity of pub.
-func (pk *PublicKey) VerifyUserIdSignature(id string, pub *PublicKey, sig *Signature) (err error) {
- h, err := sig.PrepareVerify()
- if err != nil {
- return err
- }
- if err := userIdSignatureHash(id, pub, h); err != nil {
- return err
- }
- return pk.VerifySignature(h, sig)
-}
-
-// VerifyDirectKeySignature returns nil iff sig is a valid signature, made by this
-// public key.
-func (pk *PublicKey) VerifyDirectKeySignature(sig *Signature) (err error) {
- h, err := sig.PrepareVerify()
- if err != nil {
- return err
- }
- if err := directKeySignatureHash(pk, h); err != nil {
- return err
- }
- return pk.VerifySignature(h, sig)
-}
-
-// KeyIdString returns the public key's fingerprint in capital hex
-// (e.g. "6C7EE1B8621CC013").
-func (pk *PublicKey) KeyIdString() string {
- return fmt.Sprintf("%X", pk.Fingerprint[12:20])
-}
-
-// KeyIdShortString returns the short form of public key's fingerprint
-// in capital hex, as shown by gpg --list-keys (e.g. "621CC013").
-func (pk *PublicKey) KeyIdShortString() string {
- return fmt.Sprintf("%X", pk.Fingerprint[16:20])
-}
-
-// BitLength returns the bit length for the given public key.
-func (pk *PublicKey) BitLength() (bitLength uint16, err error) {
- switch pk.PubKeyAlgo {
- case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly:
- bitLength = pk.n.BitLength()
- case PubKeyAlgoDSA:
- bitLength = pk.p.BitLength()
- case PubKeyAlgoElGamal:
- bitLength = pk.p.BitLength()
- case PubKeyAlgoECDSA:
- bitLength = pk.p.BitLength()
- case PubKeyAlgoECDH:
- bitLength = pk.p.BitLength()
- case PubKeyAlgoEdDSA:
- bitLength = pk.p.BitLength()
- case PubKeyAlgoX25519:
- bitLength = x25519.KeySize * 8
- case PubKeyAlgoX448:
- bitLength = x448.KeySize * 8
- case PubKeyAlgoEd25519:
- bitLength = ed25519.PublicKeySize * 8
- case PubKeyAlgoEd448:
- bitLength = ed448.PublicKeySize * 8
- default:
- err = errors.InvalidArgumentError("bad public-key algorithm")
- }
- return
-}
-
-// Curve returns the used elliptic curve of this public key.
-// Returns an error if no elliptic curve is used.
-func (pk *PublicKey) Curve() (curve Curve, err error) {
- switch pk.PubKeyAlgo {
- case PubKeyAlgoECDSA, PubKeyAlgoECDH, PubKeyAlgoEdDSA:
- curveInfo := ecc.FindByOid(pk.oid)
- if curveInfo == nil {
- return "", errors.UnsupportedError(fmt.Sprintf("unknown oid: %x", pk.oid))
- }
- curve = Curve(curveInfo.GenName)
- case PubKeyAlgoEd25519, PubKeyAlgoX25519:
- curve = Curve25519
- case PubKeyAlgoEd448, PubKeyAlgoX448:
- curve = Curve448
- default:
- err = errors.InvalidArgumentError("public key does not operate with an elliptic curve")
- }
- return
-}
-
-// KeyExpired returns whether sig is a self-signature of a key that has
-// expired or is created in the future.
-func (pk *PublicKey) KeyExpired(sig *Signature, currentTime time.Time) bool {
- if pk.CreationTime.Unix() > currentTime.Unix() {
- return true
- }
- if sig.KeyLifetimeSecs == nil || *sig.KeyLifetimeSecs == 0 {
- return false
- }
- expiry := pk.CreationTime.Add(time.Duration(*sig.KeyLifetimeSecs) * time.Second)
- return currentTime.Unix() > expiry.Unix()
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/public_key_test_data.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/public_key_test_data.go
deleted file mode 100644
index b255f1f6f8f..00000000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/public_key_test_data.go
+++ /dev/null
@@ -1,24 +0,0 @@
-package packet
-
-const rsaFingerprintHex = "5fb74b1d03b1e3cb31bc2f8aa34d7e18c20c31bb"
-
-const rsaPkDataHex = "988d044d3c5c10010400b1d13382944bd5aba23a4312968b5095d14f947f600eb478e14a6fcb16b0e0cac764884909c020bc495cfcc39a935387c661507bdb236a0612fb582cac3af9b29cc2c8c70090616c41b662f4da4c1201e195472eb7f4ae1ccbcbf9940fe21d985e379a5563dde5b9a23d35f1cfaa5790da3b79db26f23695107bfaca8e7b5bcd0011010001"
-
-const dsaFingerprintHex = "eece4c094db002103714c63c8e8fbe54062f19ed"
-
-const dsaPkDataHex = "9901a2044d432f89110400cd581334f0d7a1e1bdc8b9d6d8c0baf68793632735d2bb0903224cbaa1dfbf35a60ee7a13b92643421e1eb41aa8d79bea19a115a677f6b8ba3c7818ce53a6c2a24a1608bd8b8d6e55c5090cbde09dd26e356267465ae25e69ec8bdd57c7bbb2623e4d73336f73a0a9098f7f16da2e25252130fd694c0e8070c55a812a423ae7f00a0ebf50e70c2f19c3520a551bd4b08d30f23530d3d03ff7d0bf4a53a64a09dc5e6e6e35854b7d70c882b0c60293401958b1bd9e40abec3ea05ba87cf64899299d4bd6aa7f459c201d3fbbd6c82004bdc5e8a9eb8082d12054cc90fa9d4ec251a843236a588bf49552441817436c4f43326966fe85447d4e6d0acf8fa1ef0f014730770603ad7634c3088dc52501c237328417c31c89ed70400b2f1a98b0bf42f11fefc430704bebbaa41d9f355600c3facee1e490f64208e0e094ea55e3a598a219a58500bf78ac677b670a14f4e47e9cf8eab4f368cc1ddcaa18cc59309d4cc62dd4f680e73e6cc3e1ce87a84d0925efbcb26c575c093fc42eecf45135fabf6403a25c2016e1774c0484e440a18319072c617cc97ac0a3bb0"
-
-const ecdsaFingerprintHex = "9892270b38b8980b05c8d56d43fe956c542ca00b"
-
-const ecdsaPkDataHex = "9893045071c29413052b8104002304230401f4867769cedfa52c325018896245443968e52e51d0c2df8d939949cb5b330f2921711fbee1c9b9dddb95d15cb0255e99badeddda7cc23d9ddcaacbc290969b9f24019375d61c2e4e3b36953a28d8b2bc95f78c3f1d592fb24499be348656a7b17e3963187b4361afe497bc5f9f81213f04069f8e1fb9e6a6290ae295ca1a92b894396cb4"
-
-const ecdhFingerprintHex = "722354df2475a42164d1d49faa8b938f9a201946"
-
-const ecdhPkDataHex = "b90073044d53059212052b810400220303042faa84024a20b6735c4897efa5bfb41bf85b7eefeab5ca0cb9ffc8ea04a46acb25534a577694f9e25340a4ab5223a9dd1eda530c8aa2e6718db10d7e672558c7736fe09369ea5739a2a3554bf16d41faa50562f11c6d39bbd5dffb6b9a9ec91803010909"
-
-const eddsaFingerprintHex = "b2d5e5ec0e6deca6bc8eeeb00907e75e1dd99ad8"
-
-const eddsaPkDataHex = "98330456e2132b16092b06010401da470f01010740bbda39266affa511a8c2d02edf690fb784b0499c4406185811a163539ef11dc1b41d74657374696e67203c74657374696e674074657374696e672e636f6d3e8879041316080021050256e2132b021b03050b09080702061508090a0b020416020301021e01021780000a09100907e75e1dd99ad86d0c00fe39d2008359352782bc9b61ac382584cd8eff3f57a18c2287e3afeeb05d1f04ba00fe2d0bc1ddf3ff8adb9afa3e7d9287244b4ec567f3db4d60b74a9b5465ed528203"
-
-// Source: https://sites.google.com/site/brainhub/pgpecckeys#TOC-ECC-NIST-P-384-key
-const ecc384PubHex = `99006f044d53059213052b81040022030304f6b8c5aced5b84ef9f4a209db2e4a9dfb70d28cb8c10ecd57674a9fa5a67389942b62d5e51367df4c7bfd3f8e500feecf07ed265a621a8ebbbe53e947ec78c677eba143bd1533c2b350e1c29f82313e1e1108eba063be1e64b10e6950e799c2db42465635f6473615f64685f333834203c6f70656e70677040627261696e6875622e6f72673e8900cb04101309005305024d530592301480000000002000077072656665727265642d656d61696c2d656e636f64696e67407067702e636f6d7067706d696d65040b090807021901051b03000000021602051e010000000415090a08000a0910098033880f54719fca2b0180aa37350968bd5f115afd8ce7bc7b103822152dbff06d0afcda835329510905b98cb469ba208faab87c7412b799e7b633017f58364ea480e8a1a3f253a0c5f22c446e8be9a9fce6210136ee30811abbd49139de28b5bdf8dc36d06ae748579e9ff503b90073044d53059212052b810400220303042faa84024a20b6735c4897efa5bfb41bf85b7eefeab5ca0cb9ffc8ea04a46acb25534a577694f9e25340a4ab5223a9dd1eda530c8aa2e6718db10d7e672558c7736fe09369ea5739a2a3554bf16d41faa50562f11c6d39bbd5dffb6b9a9ec9180301090989008404181309000c05024d530592051b0c000000000a0910098033880f54719f80970180eee7a6d8fcee41ee4f9289df17f9bcf9d955dca25c583b94336f3a2b2d4986dc5cf417b8d2dc86f741a9e1a6d236c0e3017d1c76575458a0cfb93ae8a2b274fcc65ceecd7a91eec83656ba13219969f06945b48c56bd04152c3a0553c5f2f4bd1267`
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/reader.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/reader.go
deleted file mode 100644
index dd84092392a..00000000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/reader.go
+++ /dev/null
@@ -1,209 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package packet
-
-import (
- "io"
-
- "github.com/ProtonMail/go-crypto/openpgp/errors"
-)
-
-type PacketReader interface {
- Next() (p Packet, err error)
- Push(reader io.Reader) (err error)
- Unread(p Packet)
-}
-
-// Reader reads packets from an io.Reader and allows packets to be 'unread' so
-// that they result from the next call to Next.
-type Reader struct {
- q []Packet
- readers []io.Reader
-}
-
-// New io.Readers are pushed when a compressed or encrypted packet is processed
-// and recursively treated as a new source of packets. However, a carefully
-// crafted packet can trigger an infinite recursive sequence of packets. See
-// http://mumble.net/~campbell/misc/pgp-quine
-// https://web.nvd.nist.gov/view/vuln/detail?vulnId=CVE-2013-4402
-// This constant limits the number of recursive packets that may be pushed.
-const maxReaders = 32
-
-// Next returns the most recently unread Packet, or reads another packet from
-// the top-most io.Reader. Unknown/unsupported/Marker packet types are skipped.
-func (r *Reader) Next() (p Packet, err error) {
- for {
- p, err := r.read()
- if err == io.EOF {
- break
- } else if err != nil {
- if _, ok := err.(errors.UnknownPacketTypeError); ok {
- continue
- }
- if _, ok := err.(errors.UnsupportedError); ok {
- switch p.(type) {
- case *SymmetricallyEncrypted, *AEADEncrypted, *Compressed, *LiteralData:
- return nil, err
- }
- continue
- }
- return nil, err
- } else {
- //A marker packet MUST be ignored when received
- switch p.(type) {
- case *Marker:
- continue
- }
- return p, nil
- }
- }
- return nil, io.EOF
-}
-
-// Next returns the most recently unread Packet, or reads another packet from
-// the top-most io.Reader. Unknown/Marker packet types are skipped while unsupported
-// packets are returned as UnsupportedPacket type.
-func (r *Reader) NextWithUnsupported() (p Packet, err error) {
- for {
- p, err = r.read()
- if err == io.EOF {
- break
- } else if err != nil {
- if _, ok := err.(errors.UnknownPacketTypeError); ok {
- continue
- }
- if casteErr, ok := err.(errors.UnsupportedError); ok {
- return &UnsupportedPacket{
- IncompletePacket: p,
- Error: casteErr,
- }, nil
- }
- return
- } else {
- //A marker packet MUST be ignored when received
- switch p.(type) {
- case *Marker:
- continue
- }
- return
- }
- }
- return nil, io.EOF
-}
-
-func (r *Reader) read() (p Packet, err error) {
- if len(r.q) > 0 {
- p = r.q[len(r.q)-1]
- r.q = r.q[:len(r.q)-1]
- return
- }
- for len(r.readers) > 0 {
- p, err = Read(r.readers[len(r.readers)-1])
- if err == io.EOF {
- r.readers = r.readers[:len(r.readers)-1]
- continue
- }
- return p, err
- }
- return nil, io.EOF
-}
-
-// Push causes the Reader to start reading from a new io.Reader. When an EOF
-// error is seen from the new io.Reader, it is popped and the Reader continues
-// to read from the next most recent io.Reader. Push returns a StructuralError
-// if pushing the reader would exceed the maximum recursion level, otherwise it
-// returns nil.
-func (r *Reader) Push(reader io.Reader) (err error) {
- if len(r.readers) >= maxReaders {
- return errors.StructuralError("too many layers of packets")
- }
- r.readers = append(r.readers, reader)
- return nil
-}
-
-// Unread causes the given Packet to be returned from the next call to Next.
-func (r *Reader) Unread(p Packet) {
- r.q = append(r.q, p)
-}
-
-func NewReader(r io.Reader) *Reader {
- return &Reader{
- q: nil,
- readers: []io.Reader{r},
- }
-}
-
-// CheckReader is similar to Reader but additionally
-// uses the pushdown automata to verify the read packet sequence.
-type CheckReader struct {
- Reader
- verifier *SequenceVerifier
- fullyRead bool
-}
-
-// Next returns the most recently unread Packet, or reads another packet from
-// the top-most io.Reader. Unknown packet types are skipped.
-// If the read packet sequence does not conform to the packet composition
-// rules in rfc4880, it returns an error.
-func (r *CheckReader) Next() (p Packet, err error) {
- if r.fullyRead {
- return nil, io.EOF
- }
- if len(r.q) > 0 {
- p = r.q[len(r.q)-1]
- r.q = r.q[:len(r.q)-1]
- return
- }
- var errMsg error
- for len(r.readers) > 0 {
- p, errMsg, err = ReadWithCheck(r.readers[len(r.readers)-1], r.verifier)
- if errMsg != nil {
- err = errMsg
- return
- }
- if err == nil {
- return
- }
- if err == io.EOF {
- r.readers = r.readers[:len(r.readers)-1]
- continue
- }
- //A marker packet MUST be ignored when received
- switch p.(type) {
- case *Marker:
- continue
- }
- if _, ok := err.(errors.UnknownPacketTypeError); ok {
- continue
- }
- if _, ok := err.(errors.UnsupportedError); ok {
- switch p.(type) {
- case *SymmetricallyEncrypted, *AEADEncrypted, *Compressed, *LiteralData:
- return nil, err
- }
- continue
- }
- return nil, err
- }
- if errMsg = r.verifier.Next(EOSSymbol); errMsg != nil {
- return nil, errMsg
- }
- if errMsg = r.verifier.AssertValid(); errMsg != nil {
- return nil, errMsg
- }
- r.fullyRead = true
- return nil, io.EOF
-}
-
-func NewCheckReader(r io.Reader) *CheckReader {
- return &CheckReader{
- Reader: Reader{
- q: nil,
- readers: []io.Reader{r},
- },
- verifier: NewSequenceVerifier(),
- fullyRead: false,
- }
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/recipient.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/recipient.go
deleted file mode 100644
index fb2e362e4a8..00000000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/recipient.go
+++ /dev/null
@@ -1,15 +0,0 @@
-package packet
-
-// Recipient type represents a Intended Recipient Fingerprint subpacket
-// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh#name-intended-recipient-fingerpr
-type Recipient struct {
- KeyVersion int
- Fingerprint []byte
-}
-
-func (r *Recipient) Serialize() []byte {
- packet := make([]byte, len(r.Fingerprint)+1)
- packet[0] = byte(r.KeyVersion)
- copy(packet[1:], r.Fingerprint)
- return packet
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/signature.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/signature.go
deleted file mode 100644
index 420625386b5..00000000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/signature.go
+++ /dev/null
@@ -1,1402 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package packet
-
-import (
- "bytes"
- "crypto"
- "crypto/dsa"
- "encoding/binary"
- "hash"
- "io"
- "strconv"
- "time"
-
- "github.com/ProtonMail/go-crypto/openpgp/ecdsa"
- "github.com/ProtonMail/go-crypto/openpgp/ed25519"
- "github.com/ProtonMail/go-crypto/openpgp/ed448"
- "github.com/ProtonMail/go-crypto/openpgp/eddsa"
- "github.com/ProtonMail/go-crypto/openpgp/errors"
- "github.com/ProtonMail/go-crypto/openpgp/internal/algorithm"
- "github.com/ProtonMail/go-crypto/openpgp/internal/encoding"
-)
-
-const (
- // See RFC 4880, section 5.2.3.21 for details.
- KeyFlagCertify = 1 << iota
- KeyFlagSign
- KeyFlagEncryptCommunications
- KeyFlagEncryptStorage
- KeyFlagSplitKey
- KeyFlagAuthenticate
- _
- KeyFlagGroupKey
-)
-
-// Signature represents a signature. See RFC 4880, section 5.2.
-type Signature struct {
- Version int
- SigType SignatureType
- PubKeyAlgo PublicKeyAlgorithm
- Hash crypto.Hash
- // salt contains a random salt value for v6 signatures
- // See RFC the crypto refresh Section 5.2.3.
- salt []byte
-
- // HashSuffix is extra data that is hashed in after the signed data.
- HashSuffix []byte
- // HashTag contains the first two bytes of the hash for fast rejection
- // of bad signed data.
- HashTag [2]byte
-
- // Metadata includes format, filename and time, and is protected by v5
- // signatures of type 0x00 or 0x01. This metadata is included into the hash
- // computation; if nil, six 0x00 bytes are used instead. See section 5.2.4.
- Metadata *LiteralData
-
- CreationTime time.Time
-
- RSASignature encoding.Field
- DSASigR, DSASigS encoding.Field
- ECDSASigR, ECDSASigS encoding.Field
- EdDSASigR, EdDSASigS encoding.Field
- EdSig []byte
-
- // rawSubpackets contains the unparsed subpackets, in order.
- rawSubpackets []outputSubpacket
-
- // The following are optional so are nil when not included in the
- // signature.
-
- SigLifetimeSecs, KeyLifetimeSecs *uint32
- PreferredSymmetric, PreferredHash, PreferredCompression []uint8
- PreferredCipherSuites [][2]uint8
- IssuerKeyId *uint64
- IssuerFingerprint []byte
- SignerUserId *string
- IsPrimaryId *bool
- Notations []*Notation
- IntendedRecipients []*Recipient
-
- // TrustLevel and TrustAmount can be set by the signer to assert that
- // the key is not only valid but also trustworthy at the specified
- // level.
- // See RFC 4880, section 5.2.3.13 for details.
- TrustLevel TrustLevel
- TrustAmount TrustAmount
-
- // TrustRegularExpression can be used in conjunction with trust Signature
- // packets to limit the scope of the trust that is extended.
- // See RFC 4880, section 5.2.3.14 for details.
- TrustRegularExpression *string
-
- // PolicyURI can be set to the URI of a document that describes the
- // policy under which the signature was issued. See RFC 4880, section
- // 5.2.3.20 for details.
- PolicyURI string
-
- // FlagsValid is set if any flags were given. See RFC 4880, section
- // 5.2.3.21 for details.
- FlagsValid bool
- FlagCertify, FlagSign, FlagEncryptCommunications, FlagEncryptStorage, FlagSplitKey, FlagAuthenticate, FlagGroupKey bool
-
- // RevocationReason is set if this signature has been revoked.
- // See RFC 4880, section 5.2.3.23 for details.
- RevocationReason *ReasonForRevocation
- RevocationReasonText string
-
- // In a self-signature, these flags are set there is a features subpacket
- // indicating that the issuer implementation supports these features
- // see https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh#features-subpacket
- SEIPDv1, SEIPDv2 bool
-
- // EmbeddedSignature, if non-nil, is a signature of the parent key, by
- // this key. This prevents an attacker from claiming another's signing
- // subkey as their own.
- EmbeddedSignature *Signature
-
- outSubpackets []outputSubpacket
-}
-
-// VerifiableSignature internally keeps state if the
-// the signature has been verified before.
-type VerifiableSignature struct {
- Valid *bool // nil if it has not been verified yet
- Packet *Signature
-}
-
-// NewVerifiableSig returns a struct of type VerifiableSignature referencing the input signature.
-func NewVerifiableSig(signature *Signature) *VerifiableSignature {
- return &VerifiableSignature{
- Packet: signature,
- }
-}
-
-// Salt returns the signature salt for v6 signatures.
-func (sig *Signature) Salt() []byte {
- if sig == nil {
- return nil
- }
- return sig.salt
-}
-
-func (sig *Signature) parse(r io.Reader) (err error) {
- // RFC 4880, section 5.2.3
- var buf [7]byte
- _, err = readFull(r, buf[:1])
- if err != nil {
- return
- }
- if buf[0] != 4 && buf[0] != 5 && buf[0] != 6 {
- err = errors.UnsupportedError("signature packet version " + strconv.Itoa(int(buf[0])))
- return
- }
- sig.Version = int(buf[0])
- if sig.Version == 6 {
- _, err = readFull(r, buf[:7])
- } else {
- _, err = readFull(r, buf[:5])
- }
- if err != nil {
- return
- }
- sig.SigType = SignatureType(buf[0])
- sig.PubKeyAlgo = PublicKeyAlgorithm(buf[1])
- switch sig.PubKeyAlgo {
- case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly, PubKeyAlgoDSA, PubKeyAlgoECDSA, PubKeyAlgoEdDSA, PubKeyAlgoEd25519, PubKeyAlgoEd448:
- default:
- err = errors.UnsupportedError("public key algorithm " + strconv.Itoa(int(sig.PubKeyAlgo)))
- return
- }
-
- var ok bool
-
- if sig.Version < 5 {
- sig.Hash, ok = algorithm.HashIdToHashWithSha1(buf[2])
- } else {
- sig.Hash, ok = algorithm.HashIdToHash(buf[2])
- }
-
- if !ok {
- return errors.UnsupportedError("hash function " + strconv.Itoa(int(buf[2])))
- }
-
- var hashedSubpacketsLength int
- if sig.Version == 6 {
- // For a v6 signature, a four-octet length is used.
- hashedSubpacketsLength =
- int(buf[3])<<24 |
- int(buf[4])<<16 |
- int(buf[5])<<8 |
- int(buf[6])
- } else {
- hashedSubpacketsLength = int(buf[3])<<8 | int(buf[4])
- }
- hashedSubpackets := make([]byte, hashedSubpacketsLength)
- _, err = readFull(r, hashedSubpackets)
- if err != nil {
- return
- }
- err = sig.buildHashSuffix(hashedSubpackets)
- if err != nil {
- return
- }
-
- err = parseSignatureSubpackets(sig, hashedSubpackets, true)
- if err != nil {
- return
- }
-
- if sig.Version == 6 {
- _, err = readFull(r, buf[:4])
- } else {
- _, err = readFull(r, buf[:2])
- }
-
- if err != nil {
- return
- }
- var unhashedSubpacketsLength uint32
- if sig.Version == 6 {
- unhashedSubpacketsLength = uint32(buf[0])<<24 | uint32(buf[1])<<16 | uint32(buf[2])<<8 | uint32(buf[3])
- } else {
- unhashedSubpacketsLength = uint32(buf[0])<<8 | uint32(buf[1])
- }
- unhashedSubpackets := make([]byte, unhashedSubpacketsLength)
- _, err = readFull(r, unhashedSubpackets)
- if err != nil {
- return
- }
- err = parseSignatureSubpackets(sig, unhashedSubpackets, false)
- if err != nil {
- return
- }
-
- _, err = readFull(r, sig.HashTag[:2])
- if err != nil {
- return
- }
-
- if sig.Version == 6 {
- // Only for v6 signatures, a variable-length field containing the salt
- _, err = readFull(r, buf[:1])
- if err != nil {
- return
- }
- saltLength := int(buf[0])
- var expectedSaltLength int
- expectedSaltLength, err = SaltLengthForHash(sig.Hash)
- if err != nil {
- return
- }
- if saltLength != expectedSaltLength {
- err = errors.StructuralError("unexpected salt size for the given hash algorithm")
- return
- }
- salt := make([]byte, expectedSaltLength)
- _, err = readFull(r, salt)
- if err != nil {
- return
- }
- sig.salt = salt
- }
-
- switch sig.PubKeyAlgo {
- case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly:
- sig.RSASignature = new(encoding.MPI)
- _, err = sig.RSASignature.ReadFrom(r)
- case PubKeyAlgoDSA:
- sig.DSASigR = new(encoding.MPI)
- if _, err = sig.DSASigR.ReadFrom(r); err != nil {
- return
- }
-
- sig.DSASigS = new(encoding.MPI)
- _, err = sig.DSASigS.ReadFrom(r)
- case PubKeyAlgoECDSA:
- sig.ECDSASigR = new(encoding.MPI)
- if _, err = sig.ECDSASigR.ReadFrom(r); err != nil {
- return
- }
-
- sig.ECDSASigS = new(encoding.MPI)
- _, err = sig.ECDSASigS.ReadFrom(r)
- case PubKeyAlgoEdDSA:
- sig.EdDSASigR = new(encoding.MPI)
- if _, err = sig.EdDSASigR.ReadFrom(r); err != nil {
- return
- }
-
- sig.EdDSASigS = new(encoding.MPI)
- if _, err = sig.EdDSASigS.ReadFrom(r); err != nil {
- return
- }
- case PubKeyAlgoEd25519:
- sig.EdSig, err = ed25519.ReadSignature(r)
- if err != nil {
- return
- }
- case PubKeyAlgoEd448:
- sig.EdSig, err = ed448.ReadSignature(r)
- if err != nil {
- return
- }
- default:
- panic("unreachable")
- }
- return
-}
-
-// parseSignatureSubpackets parses subpackets of the main signature packet. See
-// RFC 4880, section 5.2.3.1.
-func parseSignatureSubpackets(sig *Signature, subpackets []byte, isHashed bool) (err error) {
- for len(subpackets) > 0 {
- subpackets, err = parseSignatureSubpacket(sig, subpackets, isHashed)
- if err != nil {
- return
- }
- }
-
- if sig.CreationTime.IsZero() {
- err = errors.StructuralError("no creation time in signature")
- }
-
- return
-}
-
-type signatureSubpacketType uint8
-
-const (
- creationTimeSubpacket signatureSubpacketType = 2
- signatureExpirationSubpacket signatureSubpacketType = 3
- trustSubpacket signatureSubpacketType = 5
- regularExpressionSubpacket signatureSubpacketType = 6
- keyExpirationSubpacket signatureSubpacketType = 9
- prefSymmetricAlgosSubpacket signatureSubpacketType = 11
- issuerSubpacket signatureSubpacketType = 16
- notationDataSubpacket signatureSubpacketType = 20
- prefHashAlgosSubpacket signatureSubpacketType = 21
- prefCompressionSubpacket signatureSubpacketType = 22
- primaryUserIdSubpacket signatureSubpacketType = 25
- policyUriSubpacket signatureSubpacketType = 26
- keyFlagsSubpacket signatureSubpacketType = 27
- signerUserIdSubpacket signatureSubpacketType = 28
- reasonForRevocationSubpacket signatureSubpacketType = 29
- featuresSubpacket signatureSubpacketType = 30
- embeddedSignatureSubpacket signatureSubpacketType = 32
- issuerFingerprintSubpacket signatureSubpacketType = 33
- intendedRecipientSubpacket signatureSubpacketType = 35
- prefCipherSuitesSubpacket signatureSubpacketType = 39
-)
-
-// parseSignatureSubpacket parses a single subpacket. len(subpacket) is >= 1.
-func parseSignatureSubpacket(sig *Signature, subpacket []byte, isHashed bool) (rest []byte, err error) {
- // RFC 4880, section 5.2.3.1
- var (
- length uint32
- packetType signatureSubpacketType
- isCritical bool
- )
- if len(subpacket) == 0 {
- err = errors.StructuralError("zero length signature subpacket")
- return
- }
- switch {
- case subpacket[0] < 192:
- length = uint32(subpacket[0])
- subpacket = subpacket[1:]
- case subpacket[0] < 255:
- if len(subpacket) < 2 {
- goto Truncated
- }
- length = uint32(subpacket[0]-192)<<8 + uint32(subpacket[1]) + 192
- subpacket = subpacket[2:]
- default:
- if len(subpacket) < 5 {
- goto Truncated
- }
- length = uint32(subpacket[1])<<24 |
- uint32(subpacket[2])<<16 |
- uint32(subpacket[3])<<8 |
- uint32(subpacket[4])
- subpacket = subpacket[5:]
- }
- if length > uint32(len(subpacket)) {
- goto Truncated
- }
- rest = subpacket[length:]
- subpacket = subpacket[:length]
- if len(subpacket) == 0 {
- err = errors.StructuralError("zero length signature subpacket")
- return
- }
- packetType = signatureSubpacketType(subpacket[0] & 0x7f)
- isCritical = subpacket[0]&0x80 == 0x80
- subpacket = subpacket[1:]
- sig.rawSubpackets = append(sig.rawSubpackets, outputSubpacket{isHashed, packetType, isCritical, subpacket})
- if !isHashed &&
- packetType != issuerSubpacket &&
- packetType != issuerFingerprintSubpacket &&
- packetType != embeddedSignatureSubpacket {
- return
- }
- switch packetType {
- case creationTimeSubpacket:
- if len(subpacket) != 4 {
- err = errors.StructuralError("signature creation time not four bytes")
- return
- }
- t := binary.BigEndian.Uint32(subpacket)
- sig.CreationTime = time.Unix(int64(t), 0)
- case signatureExpirationSubpacket:
- // Signature expiration time, section 5.2.3.10
- if len(subpacket) != 4 {
- err = errors.StructuralError("expiration subpacket with bad length")
- return
- }
- sig.SigLifetimeSecs = new(uint32)
- *sig.SigLifetimeSecs = binary.BigEndian.Uint32(subpacket)
- case trustSubpacket:
- if len(subpacket) != 2 {
- err = errors.StructuralError("trust subpacket with bad length")
- return
- }
- // Trust level and amount, section 5.2.3.13
- sig.TrustLevel = TrustLevel(subpacket[0])
- sig.TrustAmount = TrustAmount(subpacket[1])
- case regularExpressionSubpacket:
- if len(subpacket) == 0 {
- err = errors.StructuralError("regexp subpacket with bad length")
- return
- }
- // Trust regular expression, section 5.2.3.14
- // RFC specifies the string should be null-terminated; remove a null byte from the end
- if subpacket[len(subpacket)-1] != 0x00 {
- err = errors.StructuralError("expected regular expression to be null-terminated")
- return
- }
- trustRegularExpression := string(subpacket[:len(subpacket)-1])
- sig.TrustRegularExpression = &trustRegularExpression
- case keyExpirationSubpacket:
- // Key expiration time, section 5.2.3.6
- if len(subpacket) != 4 {
- err = errors.StructuralError("key expiration subpacket with bad length")
- return
- }
- sig.KeyLifetimeSecs = new(uint32)
- *sig.KeyLifetimeSecs = binary.BigEndian.Uint32(subpacket)
- case prefSymmetricAlgosSubpacket:
- // Preferred symmetric algorithms, section 5.2.3.7
- sig.PreferredSymmetric = make([]byte, len(subpacket))
- copy(sig.PreferredSymmetric, subpacket)
- case issuerSubpacket:
- // Issuer, section 5.2.3.5
- if sig.Version > 4 && isHashed {
- err = errors.StructuralError("issuer subpacket found in v6 key")
- return
- }
- if len(subpacket) != 8 {
- err = errors.StructuralError("issuer subpacket with bad length")
- return
- }
- if sig.Version <= 4 {
- sig.IssuerKeyId = new(uint64)
- *sig.IssuerKeyId = binary.BigEndian.Uint64(subpacket)
- }
- case notationDataSubpacket:
- // Notation data, section 5.2.3.16
- if len(subpacket) < 8 {
- err = errors.StructuralError("notation data subpacket with bad length")
- return
- }
-
- nameLength := uint32(subpacket[4])<<8 | uint32(subpacket[5])
- valueLength := uint32(subpacket[6])<<8 | uint32(subpacket[7])
- if len(subpacket) != int(nameLength)+int(valueLength)+8 {
- err = errors.StructuralError("notation data subpacket with bad length")
- return
- }
-
- notation := Notation{
- IsHumanReadable: (subpacket[0] & 0x80) == 0x80,
- Name: string(subpacket[8:(nameLength + 8)]),
- Value: subpacket[(nameLength + 8):(valueLength + nameLength + 8)],
- IsCritical: isCritical,
- }
-
- sig.Notations = append(sig.Notations, ¬ation)
- case prefHashAlgosSubpacket:
- // Preferred hash algorithms, section 5.2.3.8
- sig.PreferredHash = make([]byte, len(subpacket))
- copy(sig.PreferredHash, subpacket)
- case prefCompressionSubpacket:
- // Preferred compression algorithms, section 5.2.3.9
- sig.PreferredCompression = make([]byte, len(subpacket))
- copy(sig.PreferredCompression, subpacket)
- case primaryUserIdSubpacket:
- // Primary User ID, section 5.2.3.19
- if len(subpacket) != 1 {
- err = errors.StructuralError("primary user id subpacket with bad length")
- return
- }
- sig.IsPrimaryId = new(bool)
- if subpacket[0] > 0 {
- *sig.IsPrimaryId = true
- }
- case keyFlagsSubpacket:
- // Key flags, section 5.2.3.21
- if len(subpacket) == 0 {
- err = errors.StructuralError("empty key flags subpacket")
- return
- }
- sig.FlagsValid = true
- if subpacket[0]&KeyFlagCertify != 0 {
- sig.FlagCertify = true
- }
- if subpacket[0]&KeyFlagSign != 0 {
- sig.FlagSign = true
- }
- if subpacket[0]&KeyFlagEncryptCommunications != 0 {
- sig.FlagEncryptCommunications = true
- }
- if subpacket[0]&KeyFlagEncryptStorage != 0 {
- sig.FlagEncryptStorage = true
- }
- if subpacket[0]&KeyFlagSplitKey != 0 {
- sig.FlagSplitKey = true
- }
- if subpacket[0]&KeyFlagAuthenticate != 0 {
- sig.FlagAuthenticate = true
- }
- if subpacket[0]&KeyFlagGroupKey != 0 {
- sig.FlagGroupKey = true
- }
- case signerUserIdSubpacket:
- userId := string(subpacket)
- sig.SignerUserId = &userId
- case reasonForRevocationSubpacket:
- // Reason For Revocation, section 5.2.3.23
- if len(subpacket) == 0 {
- err = errors.StructuralError("empty revocation reason subpacket")
- return
- }
- sig.RevocationReason = new(ReasonForRevocation)
- *sig.RevocationReason = NewReasonForRevocation(subpacket[0])
- sig.RevocationReasonText = string(subpacket[1:])
- case featuresSubpacket:
- // Features subpacket, section 5.2.3.24 specifies a very general
- // mechanism for OpenPGP implementations to signal support for new
- // features.
- if len(subpacket) > 0 {
- if subpacket[0]&0x01 != 0 {
- sig.SEIPDv1 = true
- }
- // 0x02 and 0x04 are reserved
- if subpacket[0]&0x08 != 0 {
- sig.SEIPDv2 = true
- }
- }
- case embeddedSignatureSubpacket:
- // Only usage is in signatures that cross-certify
- // signing subkeys. section 5.2.3.26 describes the
- // format, with its usage described in section 11.1
- if sig.EmbeddedSignature != nil {
- err = errors.StructuralError("Cannot have multiple embedded signatures")
- return
- }
- sig.EmbeddedSignature = new(Signature)
- // Embedded signatures are required to be v4 signatures see
- // section 12.1. However, we only parse v4 signatures in this
- // file anyway.
- if err := sig.EmbeddedSignature.parse(bytes.NewBuffer(subpacket)); err != nil {
- return nil, err
- }
- if sigType := sig.EmbeddedSignature.SigType; sigType != SigTypePrimaryKeyBinding {
- return nil, errors.StructuralError("cross-signature has unexpected type " + strconv.Itoa(int(sigType)))
- }
- case policyUriSubpacket:
- // Policy URI, section 5.2.3.20
- sig.PolicyURI = string(subpacket)
- case issuerFingerprintSubpacket:
- if len(subpacket) == 0 {
- err = errors.StructuralError("empty issuer fingerprint subpacket")
- return
- }
- v, l := subpacket[0], len(subpacket[1:])
- if v >= 5 && l != 32 || v < 5 && l != 20 {
- return nil, errors.StructuralError("bad fingerprint length")
- }
- sig.IssuerFingerprint = make([]byte, l)
- copy(sig.IssuerFingerprint, subpacket[1:])
- sig.IssuerKeyId = new(uint64)
- if v >= 5 {
- *sig.IssuerKeyId = binary.BigEndian.Uint64(subpacket[1:9])
- } else {
- *sig.IssuerKeyId = binary.BigEndian.Uint64(subpacket[13:21])
- }
- case intendedRecipientSubpacket:
- // Intended Recipient Fingerprint
- // https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh#name-intended-recipient-fingerpr
- if len(subpacket) < 1 {
- return nil, errors.StructuralError("invalid intended recipient fingerpring length")
- }
- version, length := subpacket[0], len(subpacket[1:])
- if version >= 5 && length != 32 || version < 5 && length != 20 {
- return nil, errors.StructuralError("invalid fingerprint length")
- }
- fingerprint := make([]byte, length)
- copy(fingerprint, subpacket[1:])
- sig.IntendedRecipients = append(sig.IntendedRecipients, &Recipient{int(version), fingerprint})
- case prefCipherSuitesSubpacket:
- // Preferred AEAD cipher suites
- // See https://www.ietf.org/archive/id/draft-ietf-openpgp-crypto-refresh-07.html#name-preferred-aead-ciphersuites
- if len(subpacket)%2 != 0 {
- err = errors.StructuralError("invalid aead cipher suite length")
- return
- }
-
- sig.PreferredCipherSuites = make([][2]byte, len(subpacket)/2)
-
- for i := 0; i < len(subpacket)/2; i++ {
- sig.PreferredCipherSuites[i] = [2]uint8{subpacket[2*i], subpacket[2*i+1]}
- }
- default:
- if isCritical {
- err = errors.UnsupportedError("unknown critical signature subpacket type " + strconv.Itoa(int(packetType)))
- return
- }
- }
- return
-
-Truncated:
- err = errors.StructuralError("signature subpacket truncated")
- return
-}
-
-// subpacketLengthLength returns the length, in bytes, of an encoded length value.
-func subpacketLengthLength(length int) int {
- if length < 192 {
- return 1
- }
- if length < 16320 {
- return 2
- }
- return 5
-}
-
-func (sig *Signature) CheckKeyIdOrFingerprint(pk *PublicKey) bool {
- if sig.IssuerFingerprint != nil && len(sig.IssuerFingerprint) >= 20 {
- return bytes.Equal(sig.IssuerFingerprint, pk.Fingerprint)
- }
- return sig.IssuerKeyId != nil && *sig.IssuerKeyId == pk.KeyId
-}
-
-func (sig *Signature) CheckKeyIdOrFingerprintExplicit(fingerprint []byte, keyId uint64) bool {
- if sig.IssuerFingerprint != nil && len(sig.IssuerFingerprint) >= 20 && fingerprint != nil {
- return bytes.Equal(sig.IssuerFingerprint, fingerprint)
- }
- return sig.IssuerKeyId != nil && *sig.IssuerKeyId == keyId
-}
-
-// serializeSubpacketLength marshals the given length into to.
-func serializeSubpacketLength(to []byte, length int) int {
- // RFC 4880, Section 4.2.2.
- if length < 192 {
- to[0] = byte(length)
- return 1
- }
- if length < 16320 {
- length -= 192
- to[0] = byte((length >> 8) + 192)
- to[1] = byte(length)
- return 2
- }
- to[0] = 255
- to[1] = byte(length >> 24)
- to[2] = byte(length >> 16)
- to[3] = byte(length >> 8)
- to[4] = byte(length)
- return 5
-}
-
-// subpacketsLength returns the serialized length, in bytes, of the given
-// subpackets.
-func subpacketsLength(subpackets []outputSubpacket, hashed bool) (length int) {
- for _, subpacket := range subpackets {
- if subpacket.hashed == hashed {
- length += subpacketLengthLength(len(subpacket.contents) + 1)
- length += 1 // type byte
- length += len(subpacket.contents)
- }
- }
- return
-}
-
-// serializeSubpackets marshals the given subpackets into to.
-func serializeSubpackets(to []byte, subpackets []outputSubpacket, hashed bool) {
- for _, subpacket := range subpackets {
- if subpacket.hashed == hashed {
- n := serializeSubpacketLength(to, len(subpacket.contents)+1)
- to[n] = byte(subpacket.subpacketType)
- if subpacket.isCritical {
- to[n] |= 0x80
- }
- to = to[1+n:]
- n = copy(to, subpacket.contents)
- to = to[n:]
- }
- }
-}
-
-// SigExpired returns whether sig is a signature that has expired or is created
-// in the future.
-func (sig *Signature) SigExpired(currentTime time.Time) bool {
- if sig.CreationTime.Unix() > currentTime.Unix() {
- return true
- }
- if sig.SigLifetimeSecs == nil || *sig.SigLifetimeSecs == 0 {
- return false
- }
- expiry := sig.CreationTime.Add(time.Duration(*sig.SigLifetimeSecs) * time.Second)
- return currentTime.Unix() > expiry.Unix()
-}
-
-// buildHashSuffix constructs the HashSuffix member of sig in preparation for signing.
-func (sig *Signature) buildHashSuffix(hashedSubpackets []byte) (err error) {
- var hashId byte
- var ok bool
-
- if sig.Version < 5 {
- hashId, ok = algorithm.HashToHashIdWithSha1(sig.Hash)
- } else {
- hashId, ok = algorithm.HashToHashId(sig.Hash)
- }
-
- if !ok {
- sig.HashSuffix = nil
- return errors.InvalidArgumentError("hash cannot be represented in OpenPGP: " + strconv.Itoa(int(sig.Hash)))
- }
-
- hashedFields := bytes.NewBuffer([]byte{
- uint8(sig.Version),
- uint8(sig.SigType),
- uint8(sig.PubKeyAlgo),
- uint8(hashId),
- })
- hashedSubpacketsLength := len(hashedSubpackets)
- if sig.Version == 6 {
- // v6 signatures store the length in 4 octets
- hashedFields.Write([]byte{
- uint8(hashedSubpacketsLength >> 24),
- uint8(hashedSubpacketsLength >> 16),
- uint8(hashedSubpacketsLength >> 8),
- uint8(hashedSubpacketsLength),
- })
- } else {
- hashedFields.Write([]byte{
- uint8(hashedSubpacketsLength >> 8),
- uint8(hashedSubpacketsLength),
- })
- }
- lenPrefix := hashedFields.Len()
- hashedFields.Write(hashedSubpackets)
-
- var l uint64 = uint64(lenPrefix + len(hashedSubpackets))
- if sig.Version == 5 {
- // v5 case
- hashedFields.Write([]byte{0x05, 0xff})
- hashedFields.Write([]byte{
- uint8(l >> 56), uint8(l >> 48), uint8(l >> 40), uint8(l >> 32),
- uint8(l >> 24), uint8(l >> 16), uint8(l >> 8), uint8(l),
- })
- } else {
- // v4 and v6 case
- hashedFields.Write([]byte{byte(sig.Version), 0xff})
- hashedFields.Write([]byte{
- uint8(l >> 24), uint8(l >> 16), uint8(l >> 8), uint8(l),
- })
- }
- sig.HashSuffix = make([]byte, hashedFields.Len())
- copy(sig.HashSuffix, hashedFields.Bytes())
- return
-}
-
-func (sig *Signature) signPrepareHash(h hash.Hash) (digest []byte, err error) {
- hashedSubpacketsLen := subpacketsLength(sig.outSubpackets, true)
- hashedSubpackets := make([]byte, hashedSubpacketsLen)
- serializeSubpackets(hashedSubpackets, sig.outSubpackets, true)
- err = sig.buildHashSuffix(hashedSubpackets)
- if err != nil {
- return
- }
- if sig.Version == 5 && (sig.SigType == 0x00 || sig.SigType == 0x01) {
- sig.AddMetadataToHashSuffix()
- }
-
- h.Write(sig.HashSuffix)
- digest = h.Sum(nil)
- copy(sig.HashTag[:], digest)
- return
-}
-
-// PrepareSign must be called to create a hash object before Sign for v6 signatures.
-// The created hash object initially hashes a randomly generated salt
-// as required by v6 signatures. The generated salt is stored in sig. If the signature is not v6,
-// the method returns an empty hash object.
-// See RFC the crypto refresh Section 3.2.4.
-func (sig *Signature) PrepareSign(config *Config) (hash.Hash, error) {
- if !sig.Hash.Available() {
- return nil, errors.UnsupportedError("hash function")
- }
- hasher := sig.Hash.New()
- if sig.Version == 6 {
- if sig.salt == nil {
- var err error
- sig.salt, err = SignatureSaltForHash(sig.Hash, config.Random())
- if err != nil {
- return nil, err
- }
- }
- hasher.Write(sig.salt)
- }
- return hasher, nil
-}
-
-// SetSalt sets the signature salt for v6 signatures.
-// Assumes salt is generated correctly and checks if length matches.
-// If the signature is not v6, the method ignores the salt.
-// Use PrepareSign whenever possible instead of generating and
-// hashing the salt externally.
-// See RFC the crypto refresh Section 3.2.4.
-func (sig *Signature) SetSalt(salt []byte) error {
- if sig.Version == 6 {
- expectedSaltLength, err := SaltLengthForHash(sig.Hash)
- if err != nil {
- return err
- }
- if salt == nil || len(salt) != expectedSaltLength {
- return errors.InvalidArgumentError("unexpected salt size for the given hash algorithm")
- }
- sig.salt = salt
- }
- return nil
-}
-
-// PrepareVerify must be called to create a hash object before verifying v6 signatures.
-// The created hash object initially hashes the internally stored salt.
-// If the signature is not v6, the method returns an empty hash object.
-// See crypto refresh Section 3.2.4.
-func (sig *Signature) PrepareVerify() (hash.Hash, error) {
- if !sig.Hash.Available() {
- return nil, errors.UnsupportedError("hash function")
- }
- hasher := sig.Hash.New()
- if sig.Version == 6 {
- if sig.salt == nil {
- return nil, errors.StructuralError("v6 requires a salt for the hash to be signed")
- }
- hasher.Write(sig.salt)
- }
- return hasher, nil
-}
-
-// Sign signs a message with a private key. The hash, h, must contain
-// the hash of the message to be signed and will be mutated by this function.
-// On success, the signature is stored in sig. Call Serialize to write it out.
-// If config is nil, sensible defaults will be used.
-func (sig *Signature) Sign(h hash.Hash, priv *PrivateKey, config *Config) (err error) {
- if priv.Dummy() {
- return errors.ErrDummyPrivateKey("dummy key found")
- }
- sig.Version = priv.PublicKey.Version
- sig.IssuerFingerprint = priv.PublicKey.Fingerprint
- sig.outSubpackets, err = sig.buildSubpackets(priv.PublicKey)
- if err != nil {
- return err
- }
- digest, err := sig.signPrepareHash(h)
- if err != nil {
- return
- }
- switch priv.PubKeyAlgo {
- case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly:
- // supports both *rsa.PrivateKey and crypto.Signer
- sigdata, err := priv.PrivateKey.(crypto.Signer).Sign(config.Random(), digest, sig.Hash)
- if err == nil {
- sig.RSASignature = encoding.NewMPI(sigdata)
- }
- case PubKeyAlgoDSA:
- dsaPriv := priv.PrivateKey.(*dsa.PrivateKey)
-
- // Need to truncate hashBytes to match FIPS 186-3 section 4.6.
- subgroupSize := (dsaPriv.Q.BitLen() + 7) / 8
- if len(digest) > subgroupSize {
- digest = digest[:subgroupSize]
- }
- r, s, err := dsa.Sign(config.Random(), dsaPriv, digest)
- if err == nil {
- sig.DSASigR = new(encoding.MPI).SetBig(r)
- sig.DSASigS = new(encoding.MPI).SetBig(s)
- }
- case PubKeyAlgoECDSA:
- sk := priv.PrivateKey.(*ecdsa.PrivateKey)
- r, s, err := ecdsa.Sign(config.Random(), sk, digest)
-
- if err == nil {
- sig.ECDSASigR = new(encoding.MPI).SetBig(r)
- sig.ECDSASigS = new(encoding.MPI).SetBig(s)
- }
- case PubKeyAlgoEdDSA:
- sk := priv.PrivateKey.(*eddsa.PrivateKey)
- r, s, err := eddsa.Sign(sk, digest)
- if err == nil {
- sig.EdDSASigR = encoding.NewMPI(r)
- sig.EdDSASigS = encoding.NewMPI(s)
- }
- case PubKeyAlgoEd25519:
- sk := priv.PrivateKey.(*ed25519.PrivateKey)
- signature, err := ed25519.Sign(sk, digest)
- if err == nil {
- sig.EdSig = signature
- }
- case PubKeyAlgoEd448:
- sk := priv.PrivateKey.(*ed448.PrivateKey)
- signature, err := ed448.Sign(sk, digest)
- if err == nil {
- sig.EdSig = signature
- }
- default:
- err = errors.UnsupportedError("public key algorithm: " + strconv.Itoa(int(sig.PubKeyAlgo)))
- }
-
- return
-}
-
-// SignUserId computes a signature from priv, asserting that pub is a valid
-// key for the identity id. On success, the signature is stored in sig. Call
-// Serialize to write it out.
-// If config is nil, sensible defaults will be used.
-func (sig *Signature) SignUserId(id string, pub *PublicKey, priv *PrivateKey, config *Config) error {
- if priv.Dummy() {
- return errors.ErrDummyPrivateKey("dummy key found")
- }
- prepareHash, err := sig.PrepareSign(config)
- if err != nil {
- return err
- }
- if err := userIdSignatureHash(id, pub, prepareHash); err != nil {
- return err
- }
- return sig.Sign(prepareHash, priv, config)
-}
-
-// SignDirectKeyBinding computes a signature from priv
-// On success, the signature is stored in sig.
-// Call Serialize to write it out.
-// If config is nil, sensible defaults will be used.
-func (sig *Signature) SignDirectKeyBinding(pub *PublicKey, priv *PrivateKey, config *Config) error {
- if priv.Dummy() {
- return errors.ErrDummyPrivateKey("dummy key found")
- }
- prepareHash, err := sig.PrepareSign(config)
- if err != nil {
- return err
- }
- if err := directKeySignatureHash(pub, prepareHash); err != nil {
- return err
- }
- return sig.Sign(prepareHash, priv, config)
-}
-
-// CrossSignKey computes a signature from signingKey on pub hashed using hashKey. On success,
-// the signature is stored in sig. Call Serialize to write it out.
-// If config is nil, sensible defaults will be used.
-func (sig *Signature) CrossSignKey(pub *PublicKey, hashKey *PublicKey, signingKey *PrivateKey,
- config *Config) error {
- prepareHash, err := sig.PrepareSign(config)
- if err != nil {
- return err
- }
- h, err := keySignatureHash(hashKey, pub, prepareHash)
- if err != nil {
- return err
- }
- return sig.Sign(h, signingKey, config)
-}
-
-// SignKey computes a signature from priv, asserting that pub is a subkey. On
-// success, the signature is stored in sig. Call Serialize to write it out.
-// If config is nil, sensible defaults will be used.
-func (sig *Signature) SignKey(pub *PublicKey, priv *PrivateKey, config *Config) error {
- if priv.Dummy() {
- return errors.ErrDummyPrivateKey("dummy key found")
- }
- prepareHash, err := sig.PrepareSign(config)
- if err != nil {
- return err
- }
- h, err := keySignatureHash(&priv.PublicKey, pub, prepareHash)
- if err != nil {
- return err
- }
- return sig.Sign(h, priv, config)
-}
-
-// RevokeKey computes a revocation signature of pub using priv. On success, the signature is
-// stored in sig. Call Serialize to write it out.
-// If config is nil, sensible defaults will be used.
-func (sig *Signature) RevokeKey(pub *PublicKey, priv *PrivateKey, config *Config) error {
- prepareHash, err := sig.PrepareSign(config)
- if err != nil {
- return err
- }
- if err := keyRevocationHash(pub, prepareHash); err != nil {
- return err
- }
- return sig.Sign(prepareHash, priv, config)
-}
-
-// RevokeSubkey computes a subkey revocation signature of pub using priv.
-// On success, the signature is stored in sig. Call Serialize to write it out.
-// If config is nil, sensible defaults will be used.
-func (sig *Signature) RevokeSubkey(pub *PublicKey, priv *PrivateKey, config *Config) error {
- // Identical to a subkey binding signature
- return sig.SignKey(pub, priv, config)
-}
-
-// Serialize marshals sig to w. Sign, SignUserId or SignKey must have been
-// called first.
-func (sig *Signature) Serialize(w io.Writer) (err error) {
- if len(sig.outSubpackets) == 0 {
- sig.outSubpackets = sig.rawSubpackets
- }
- if sig.RSASignature == nil && sig.DSASigR == nil && sig.ECDSASigR == nil && sig.EdDSASigR == nil && sig.EdSig == nil {
- return errors.InvalidArgumentError("Signature: need to call Sign, SignUserId or SignKey before Serialize")
- }
-
- sigLength := 0
- switch sig.PubKeyAlgo {
- case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly:
- sigLength = int(sig.RSASignature.EncodedLength())
- case PubKeyAlgoDSA:
- sigLength = int(sig.DSASigR.EncodedLength())
- sigLength += int(sig.DSASigS.EncodedLength())
- case PubKeyAlgoECDSA:
- sigLength = int(sig.ECDSASigR.EncodedLength())
- sigLength += int(sig.ECDSASigS.EncodedLength())
- case PubKeyAlgoEdDSA:
- sigLength = int(sig.EdDSASigR.EncodedLength())
- sigLength += int(sig.EdDSASigS.EncodedLength())
- case PubKeyAlgoEd25519:
- sigLength = ed25519.SignatureSize
- case PubKeyAlgoEd448:
- sigLength = ed448.SignatureSize
- default:
- panic("impossible")
- }
-
- hashedSubpacketsLen := subpacketsLength(sig.outSubpackets, true)
- unhashedSubpacketsLen := subpacketsLength(sig.outSubpackets, false)
- length := 4 + /* length of version|signature type|public-key algorithm|hash algorithm */
- 2 /* length of hashed subpackets */ + hashedSubpacketsLen +
- 2 /* length of unhashed subpackets */ + unhashedSubpacketsLen +
- 2 /* hash tag */ + sigLength
- if sig.Version == 6 {
- length += 4 + /* the two length fields are four-octet instead of two */
- 1 + /* salt length */
- len(sig.salt) /* length salt */
- }
- err = serializeHeader(w, packetTypeSignature, length)
- if err != nil {
- return
- }
- err = sig.serializeBody(w)
- if err != nil {
- return err
- }
- return
-}
-
-func (sig *Signature) serializeBody(w io.Writer) (err error) {
- var fields []byte
- if sig.Version == 6 {
- // v6 signatures use 4 octets for length
- hashedSubpacketsLen :=
- uint32(uint32(sig.HashSuffix[4])<<24) |
- uint32(uint32(sig.HashSuffix[5])<<16) |
- uint32(uint32(sig.HashSuffix[6])<<8) |
- uint32(sig.HashSuffix[7])
- fields = sig.HashSuffix[:8+hashedSubpacketsLen]
- } else {
- hashedSubpacketsLen := uint16(uint16(sig.HashSuffix[4])<<8) |
- uint16(sig.HashSuffix[5])
- fields = sig.HashSuffix[:6+hashedSubpacketsLen]
-
- }
- _, err = w.Write(fields)
- if err != nil {
- return
- }
-
- unhashedSubpacketsLen := subpacketsLength(sig.outSubpackets, false)
- var unhashedSubpackets []byte
- if sig.Version == 6 {
- unhashedSubpackets = make([]byte, 4+unhashedSubpacketsLen)
- unhashedSubpackets[0] = byte(unhashedSubpacketsLen >> 24)
- unhashedSubpackets[1] = byte(unhashedSubpacketsLen >> 16)
- unhashedSubpackets[2] = byte(unhashedSubpacketsLen >> 8)
- unhashedSubpackets[3] = byte(unhashedSubpacketsLen)
- serializeSubpackets(unhashedSubpackets[4:], sig.outSubpackets, false)
- } else {
- unhashedSubpackets = make([]byte, 2+unhashedSubpacketsLen)
- unhashedSubpackets[0] = byte(unhashedSubpacketsLen >> 8)
- unhashedSubpackets[1] = byte(unhashedSubpacketsLen)
- serializeSubpackets(unhashedSubpackets[2:], sig.outSubpackets, false)
- }
-
- _, err = w.Write(unhashedSubpackets)
- if err != nil {
- return
- }
- _, err = w.Write(sig.HashTag[:])
- if err != nil {
- return
- }
-
- if sig.Version == 6 {
- // write salt for v6 signatures
- _, err = w.Write([]byte{uint8(len(sig.salt))})
- if err != nil {
- return
- }
- _, err = w.Write(sig.salt)
- if err != nil {
- return
- }
- }
-
- switch sig.PubKeyAlgo {
- case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly:
- _, err = w.Write(sig.RSASignature.EncodedBytes())
- case PubKeyAlgoDSA:
- if _, err = w.Write(sig.DSASigR.EncodedBytes()); err != nil {
- return
- }
- _, err = w.Write(sig.DSASigS.EncodedBytes())
- case PubKeyAlgoECDSA:
- if _, err = w.Write(sig.ECDSASigR.EncodedBytes()); err != nil {
- return
- }
- _, err = w.Write(sig.ECDSASigS.EncodedBytes())
- case PubKeyAlgoEdDSA:
- if _, err = w.Write(sig.EdDSASigR.EncodedBytes()); err != nil {
- return
- }
- _, err = w.Write(sig.EdDSASigS.EncodedBytes())
- case PubKeyAlgoEd25519:
- err = ed25519.WriteSignature(w, sig.EdSig)
- case PubKeyAlgoEd448:
- err = ed448.WriteSignature(w, sig.EdSig)
- default:
- panic("impossible")
- }
- return
-}
-
-// outputSubpacket represents a subpacket to be marshaled.
-type outputSubpacket struct {
- hashed bool // true if this subpacket is in the hashed area.
- subpacketType signatureSubpacketType
- isCritical bool
- contents []byte
-}
-
-func (sig *Signature) buildSubpackets(issuer PublicKey) (subpackets []outputSubpacket, err error) {
- creationTime := make([]byte, 4)
- binary.BigEndian.PutUint32(creationTime, uint32(sig.CreationTime.Unix()))
- subpackets = append(subpackets, outputSubpacket{true, creationTimeSubpacket, false, creationTime})
-
- if sig.IssuerKeyId != nil && sig.Version == 4 {
- keyId := make([]byte, 8)
- binary.BigEndian.PutUint64(keyId, *sig.IssuerKeyId)
- subpackets = append(subpackets, outputSubpacket{true, issuerSubpacket, false, keyId})
- }
- if sig.IssuerFingerprint != nil {
- contents := append([]uint8{uint8(issuer.Version)}, sig.IssuerFingerprint...)
- subpackets = append(subpackets, outputSubpacket{true, issuerFingerprintSubpacket, sig.Version >= 5, contents})
- }
- if sig.SignerUserId != nil {
- subpackets = append(subpackets, outputSubpacket{true, signerUserIdSubpacket, false, []byte(*sig.SignerUserId)})
- }
- if sig.SigLifetimeSecs != nil && *sig.SigLifetimeSecs != 0 {
- sigLifetime := make([]byte, 4)
- binary.BigEndian.PutUint32(sigLifetime, *sig.SigLifetimeSecs)
- subpackets = append(subpackets, outputSubpacket{true, signatureExpirationSubpacket, true, sigLifetime})
- }
-
- // Key flags may only appear in self-signatures or certification signatures.
-
- if sig.FlagsValid {
- var flags byte
- if sig.FlagCertify {
- flags |= KeyFlagCertify
- }
- if sig.FlagSign {
- flags |= KeyFlagSign
- }
- if sig.FlagEncryptCommunications {
- flags |= KeyFlagEncryptCommunications
- }
- if sig.FlagEncryptStorage {
- flags |= KeyFlagEncryptStorage
- }
- if sig.FlagSplitKey {
- flags |= KeyFlagSplitKey
- }
- if sig.FlagAuthenticate {
- flags |= KeyFlagAuthenticate
- }
- if sig.FlagGroupKey {
- flags |= KeyFlagGroupKey
- }
- subpackets = append(subpackets, outputSubpacket{true, keyFlagsSubpacket, false, []byte{flags}})
- }
-
- for _, notation := range sig.Notations {
- subpackets = append(
- subpackets,
- outputSubpacket{
- true,
- notationDataSubpacket,
- notation.IsCritical,
- notation.getData(),
- })
- }
-
- for _, recipient := range sig.IntendedRecipients {
- subpackets = append(
- subpackets,
- outputSubpacket{
- true,
- intendedRecipientSubpacket,
- false,
- recipient.Serialize(),
- })
- }
-
- // The following subpackets may only appear in self-signatures.
-
- var features = byte(0x00)
- if sig.SEIPDv1 {
- features |= 0x01
- }
- if sig.SEIPDv2 {
- features |= 0x08
- }
-
- if features != 0x00 {
- subpackets = append(subpackets, outputSubpacket{true, featuresSubpacket, false, []byte{features}})
- }
-
- if sig.TrustLevel != 0 {
- subpackets = append(subpackets, outputSubpacket{true, trustSubpacket, true, []byte{byte(sig.TrustLevel), byte(sig.TrustAmount)}})
- }
-
- if sig.TrustRegularExpression != nil {
- // RFC specifies the string should be null-terminated; add a null byte to the end
- subpackets = append(subpackets, outputSubpacket{true, regularExpressionSubpacket, true, []byte(*sig.TrustRegularExpression + "\000")})
- }
-
- if sig.KeyLifetimeSecs != nil && *sig.KeyLifetimeSecs != 0 {
- keyLifetime := make([]byte, 4)
- binary.BigEndian.PutUint32(keyLifetime, *sig.KeyLifetimeSecs)
- subpackets = append(subpackets, outputSubpacket{true, keyExpirationSubpacket, true, keyLifetime})
- }
-
- if sig.IsPrimaryId != nil && *sig.IsPrimaryId {
- subpackets = append(subpackets, outputSubpacket{true, primaryUserIdSubpacket, false, []byte{1}})
- }
-
- if len(sig.PreferredSymmetric) > 0 {
- subpackets = append(subpackets, outputSubpacket{true, prefSymmetricAlgosSubpacket, false, sig.PreferredSymmetric})
- }
-
- if len(sig.PreferredHash) > 0 {
- subpackets = append(subpackets, outputSubpacket{true, prefHashAlgosSubpacket, false, sig.PreferredHash})
- }
-
- if len(sig.PreferredCompression) > 0 {
- subpackets = append(subpackets, outputSubpacket{true, prefCompressionSubpacket, false, sig.PreferredCompression})
- }
-
- if len(sig.PolicyURI) > 0 {
- subpackets = append(subpackets, outputSubpacket{true, policyUriSubpacket, false, []uint8(sig.PolicyURI)})
- }
-
- if len(sig.PreferredCipherSuites) > 0 {
- serialized := make([]byte, len(sig.PreferredCipherSuites)*2)
- for i, cipherSuite := range sig.PreferredCipherSuites {
- serialized[2*i] = cipherSuite[0]
- serialized[2*i+1] = cipherSuite[1]
- }
- subpackets = append(subpackets, outputSubpacket{true, prefCipherSuitesSubpacket, false, serialized})
- }
-
- // Revocation reason appears only in revocation signatures and is serialized as per section 5.2.3.23.
- if sig.RevocationReason != nil {
- subpackets = append(subpackets, outputSubpacket{true, reasonForRevocationSubpacket, true,
- append([]uint8{uint8(*sig.RevocationReason)}, []uint8(sig.RevocationReasonText)...)})
- }
-
- // EmbeddedSignature appears only in subkeys capable of signing and is serialized as per section 5.2.3.26.
- if sig.EmbeddedSignature != nil {
- var buf bytes.Buffer
- err = sig.EmbeddedSignature.serializeBody(&buf)
- if err != nil {
- return
- }
- subpackets = append(subpackets, outputSubpacket{true, embeddedSignatureSubpacket, true, buf.Bytes()})
- }
-
- return
-}
-
-// AddMetadataToHashSuffix modifies the current hash suffix to include metadata
-// (format, filename, and time). Version 5 keys protect this data including it
-// in the hash computation. See section 5.2.4.
-func (sig *Signature) AddMetadataToHashSuffix() {
- if sig == nil || sig.Version != 5 {
- return
- }
- if sig.SigType != 0x00 && sig.SigType != 0x01 {
- return
- }
- lit := sig.Metadata
- if lit == nil {
- // This will translate into six 0x00 bytes.
- lit = &LiteralData{}
- }
-
- // Extract the current byte count
- n := sig.HashSuffix[len(sig.HashSuffix)-8:]
- l := uint64(
- uint64(n[0])<<56 | uint64(n[1])<<48 | uint64(n[2])<<40 | uint64(n[3])<<32 |
- uint64(n[4])<<24 | uint64(n[5])<<16 | uint64(n[6])<<8 | uint64(n[7]))
-
- suffix := bytes.NewBuffer(nil)
- suffix.Write(sig.HashSuffix[:l])
-
- // Add the metadata
- var buf [4]byte
- buf[0] = lit.Format
- fileName := lit.FileName
- if len(lit.FileName) > 255 {
- fileName = fileName[:255]
- }
- buf[1] = byte(len(fileName))
- suffix.Write(buf[:2])
- suffix.Write([]byte(lit.FileName))
- binary.BigEndian.PutUint32(buf[:], lit.Time)
- suffix.Write(buf[:])
-
- suffix.Write([]byte{0x05, 0xff})
- suffix.Write([]byte{
- uint8(l >> 56), uint8(l >> 48), uint8(l >> 40), uint8(l >> 32),
- uint8(l >> 24), uint8(l >> 16), uint8(l >> 8), uint8(l),
- })
- sig.HashSuffix = suffix.Bytes()
-}
-
-// SaltLengthForHash selects the required salt length for the given hash algorithm,
-// as per Table 23 (Hash algorithm registry) of the crypto refresh.
-// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh#section-9.5|Crypto Refresh Section 9.5.
-func SaltLengthForHash(hash crypto.Hash) (int, error) {
- switch hash {
- case crypto.SHA256, crypto.SHA224, crypto.SHA3_256:
- return 16, nil
- case crypto.SHA384:
- return 24, nil
- case crypto.SHA512, crypto.SHA3_512:
- return 32, nil
- default:
- return 0, errors.UnsupportedError("hash function not supported for V6 signatures")
- }
-}
-
-// SignatureSaltForHash generates a random signature salt
-// with the length for the given hash algorithm.
-// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh#section-9.5|Crypto Refresh Section 9.5.
-func SignatureSaltForHash(hash crypto.Hash, randReader io.Reader) ([]byte, error) {
- saltLength, err := SaltLengthForHash(hash)
- if err != nil {
- return nil, err
- }
- salt := make([]byte, saltLength)
- _, err = io.ReadFull(randReader, salt)
- if err != nil {
- return nil, err
- }
- return salt, nil
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetric_key_encrypted.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetric_key_encrypted.go
deleted file mode 100644
index c97b98b9303..00000000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetric_key_encrypted.go
+++ /dev/null
@@ -1,315 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package packet
-
-import (
- "bytes"
- "crypto/cipher"
- "crypto/sha256"
- "io"
- "strconv"
-
- "github.com/ProtonMail/go-crypto/openpgp/errors"
- "github.com/ProtonMail/go-crypto/openpgp/s2k"
- "golang.org/x/crypto/hkdf"
-)
-
-// This is the largest session key that we'll support. Since at most 256-bit cipher
-// is supported in OpenPGP, this is large enough to contain also the auth tag.
-const maxSessionKeySizeInBytes = 64
-
-// SymmetricKeyEncrypted represents a passphrase protected session key. See RFC
-// 4880, section 5.3.
-type SymmetricKeyEncrypted struct {
- Version int
- CipherFunc CipherFunction
- Mode AEADMode
- s2k func(out, in []byte)
- iv []byte
- encryptedKey []byte // Contains also the authentication tag for AEAD
-}
-
-// parse parses an SymmetricKeyEncrypted packet as specified in
-// https://www.ietf.org/archive/id/draft-ietf-openpgp-crypto-refresh-07.html#name-symmetric-key-encrypted-ses
-func (ske *SymmetricKeyEncrypted) parse(r io.Reader) error {
- var buf [1]byte
-
- // Version
- if _, err := readFull(r, buf[:]); err != nil {
- return err
- }
- ske.Version = int(buf[0])
- if ske.Version != 4 && ske.Version != 5 && ske.Version != 6 {
- return errors.UnsupportedError("unknown SymmetricKeyEncrypted version")
- }
-
- if ske.Version > 5 {
- // Scalar octet count
- if _, err := readFull(r, buf[:]); err != nil {
- return err
- }
- }
-
- // Cipher function
- if _, err := readFull(r, buf[:]); err != nil {
- return err
- }
- ske.CipherFunc = CipherFunction(buf[0])
- if !ske.CipherFunc.IsSupported() {
- return errors.UnsupportedError("unknown cipher: " + strconv.Itoa(int(buf[0])))
- }
-
- if ske.Version >= 5 {
- // AEAD mode
- if _, err := readFull(r, buf[:]); err != nil {
- return errors.StructuralError("cannot read AEAD octet from packet")
- }
- ske.Mode = AEADMode(buf[0])
- }
-
- if ske.Version > 5 {
- // Scalar octet count
- if _, err := readFull(r, buf[:]); err != nil {
- return err
- }
- }
-
- var err error
- if ske.s2k, err = s2k.Parse(r); err != nil {
- if _, ok := err.(errors.ErrDummyPrivateKey); ok {
- return errors.UnsupportedError("missing key GNU extension in session key")
- }
- return err
- }
-
- if ske.Version >= 5 {
- // AEAD IV
- iv := make([]byte, ske.Mode.IvLength())
- _, err := readFull(r, iv)
- if err != nil {
- return errors.StructuralError("cannot read AEAD IV")
- }
-
- ske.iv = iv
- }
-
- encryptedKey := make([]byte, maxSessionKeySizeInBytes)
- // The session key may follow. We just have to try and read to find
- // out. If it exists then we limit it to maxSessionKeySizeInBytes.
- n, err := readFull(r, encryptedKey)
- if err != nil && err != io.ErrUnexpectedEOF {
- return err
- }
-
- if n != 0 {
- if n == maxSessionKeySizeInBytes {
- return errors.UnsupportedError("oversized encrypted session key")
- }
- ske.encryptedKey = encryptedKey[:n]
- }
- return nil
-}
-
-// Decrypt attempts to decrypt an encrypted session key and returns the key and
-// the cipher to use when decrypting a subsequent Symmetrically Encrypted Data
-// packet.
-func (ske *SymmetricKeyEncrypted) Decrypt(passphrase []byte) ([]byte, CipherFunction, error) {
- key := make([]byte, ske.CipherFunc.KeySize())
- ske.s2k(key, passphrase)
- if len(ske.encryptedKey) == 0 {
- return key, ske.CipherFunc, nil
- }
- switch ske.Version {
- case 4:
- plaintextKey, cipherFunc, err := ske.decryptV4(key)
- return plaintextKey, cipherFunc, err
- case 5, 6:
- plaintextKey, err := ske.aeadDecrypt(ske.Version, key)
- return plaintextKey, CipherFunction(0), err
- }
- err := errors.UnsupportedError("unknown SymmetricKeyEncrypted version")
- return nil, CipherFunction(0), err
-}
-
-func (ske *SymmetricKeyEncrypted) decryptV4(key []byte) ([]byte, CipherFunction, error) {
- // the IV is all zeros
- iv := make([]byte, ske.CipherFunc.blockSize())
- c := cipher.NewCFBDecrypter(ske.CipherFunc.new(key), iv)
- plaintextKey := make([]byte, len(ske.encryptedKey))
- c.XORKeyStream(plaintextKey, ske.encryptedKey)
- cipherFunc := CipherFunction(plaintextKey[0])
- if cipherFunc.blockSize() == 0 {
- return nil, ske.CipherFunc, errors.UnsupportedError(
- "unknown cipher: " + strconv.Itoa(int(cipherFunc)))
- }
- plaintextKey = plaintextKey[1:]
- if len(plaintextKey) != cipherFunc.KeySize() {
- return nil, cipherFunc, errors.StructuralError(
- "length of decrypted key not equal to cipher keysize")
- }
- return plaintextKey, cipherFunc, nil
-}
-
-func (ske *SymmetricKeyEncrypted) aeadDecrypt(version int, key []byte) ([]byte, error) {
- adata := []byte{0xc3, byte(version), byte(ske.CipherFunc), byte(ske.Mode)}
- aead := getEncryptedKeyAeadInstance(ske.CipherFunc, ske.Mode, key, adata, version)
-
- plaintextKey, err := aead.Open(nil, ske.iv, ske.encryptedKey, adata)
- if err != nil {
- return nil, err
- }
- return plaintextKey, nil
-}
-
-// SerializeSymmetricKeyEncrypted serializes a symmetric key packet to w.
-// The packet contains a random session key, encrypted by a key derived from
-// the given passphrase. The session key is returned and must be passed to
-// SerializeSymmetricallyEncrypted.
-// If config is nil, sensible defaults will be used.
-func SerializeSymmetricKeyEncrypted(w io.Writer, passphrase []byte, config *Config) (key []byte, err error) {
- cipherFunc := config.Cipher()
-
- sessionKey := make([]byte, cipherFunc.KeySize())
- _, err = io.ReadFull(config.Random(), sessionKey)
- if err != nil {
- return
- }
-
- err = SerializeSymmetricKeyEncryptedReuseKey(w, sessionKey, passphrase, config)
- if err != nil {
- return
- }
-
- key = sessionKey
- return
-}
-
-// SerializeSymmetricKeyEncryptedReuseKey serializes a symmetric key packet to w.
-// The packet contains the given session key, encrypted by a key derived from
-// the given passphrase. The returned session key must be passed to
-// SerializeSymmetricallyEncrypted.
-// If config is nil, sensible defaults will be used.
-func SerializeSymmetricKeyEncryptedReuseKey(w io.Writer, sessionKey []byte, passphrase []byte, config *Config) (err error) {
- var version int
- if config.AEAD() != nil {
- version = 6
- } else {
- version = 4
- }
- cipherFunc := config.Cipher()
- // cipherFunc must be AES
- if !cipherFunc.IsSupported() || cipherFunc < CipherAES128 || cipherFunc > CipherAES256 {
- return errors.UnsupportedError("unsupported cipher: " + strconv.Itoa(int(cipherFunc)))
- }
-
- keySize := cipherFunc.KeySize()
- s2kBuf := new(bytes.Buffer)
- keyEncryptingKey := make([]byte, keySize)
- // s2k.Serialize salts and stretches the passphrase, and writes the
- // resulting key to keyEncryptingKey and the s2k descriptor to s2kBuf.
- err = s2k.Serialize(s2kBuf, keyEncryptingKey, config.Random(), passphrase, config.S2K())
- if err != nil {
- return
- }
- s2kBytes := s2kBuf.Bytes()
-
- var packetLength int
- switch version {
- case 4:
- packetLength = 2 /* header */ + len(s2kBytes) + 1 /* cipher type */ + keySize
- case 5, 6:
- ivLen := config.AEAD().Mode().IvLength()
- tagLen := config.AEAD().Mode().TagLength()
- packetLength = 3 + len(s2kBytes) + ivLen + keySize + tagLen
- }
- if version > 5 {
- packetLength += 2 // additional octet count fields
- }
-
- err = serializeHeader(w, packetTypeSymmetricKeyEncrypted, packetLength)
- if err != nil {
- return
- }
-
- // Symmetric Key Encrypted Version
- buf := []byte{byte(version)}
-
- if version > 5 {
- // Scalar octet count
- buf = append(buf, byte(3+len(s2kBytes)+config.AEAD().Mode().IvLength()))
- }
-
- // Cipher function
- buf = append(buf, byte(cipherFunc))
-
- if version >= 5 {
- // AEAD mode
- buf = append(buf, byte(config.AEAD().Mode()))
- }
- if version > 5 {
- // Scalar octet count
- buf = append(buf, byte(len(s2kBytes)))
- }
- _, err = w.Write(buf)
- if err != nil {
- return
- }
- _, err = w.Write(s2kBytes)
- if err != nil {
- return
- }
-
- switch version {
- case 4:
- iv := make([]byte, cipherFunc.blockSize())
- c := cipher.NewCFBEncrypter(cipherFunc.new(keyEncryptingKey), iv)
- encryptedCipherAndKey := make([]byte, keySize+1)
- c.XORKeyStream(encryptedCipherAndKey, buf[1:])
- c.XORKeyStream(encryptedCipherAndKey[1:], sessionKey)
- _, err = w.Write(encryptedCipherAndKey)
- if err != nil {
- return
- }
- case 5, 6:
- mode := config.AEAD().Mode()
- adata := []byte{0xc3, byte(version), byte(cipherFunc), byte(mode)}
- aead := getEncryptedKeyAeadInstance(cipherFunc, mode, keyEncryptingKey, adata, version)
-
- // Sample iv using random reader
- iv := make([]byte, config.AEAD().Mode().IvLength())
- _, err = io.ReadFull(config.Random(), iv)
- if err != nil {
- return
- }
- // Seal and write (encryptedData includes auth. tag)
-
- encryptedData := aead.Seal(nil, iv, sessionKey, adata)
- _, err = w.Write(iv)
- if err != nil {
- return
- }
- _, err = w.Write(encryptedData)
- if err != nil {
- return
- }
- }
-
- return
-}
-
-func getEncryptedKeyAeadInstance(c CipherFunction, mode AEADMode, inputKey, associatedData []byte, version int) (aead cipher.AEAD) {
- var blockCipher cipher.Block
- if version > 5 {
- hkdfReader := hkdf.New(sha256.New, inputKey, []byte{}, associatedData)
-
- encryptionKey := make([]byte, c.KeySize())
- _, _ = readFull(hkdfReader, encryptionKey)
-
- blockCipher = c.new(encryptionKey)
- } else {
- blockCipher = c.new(inputKey)
- }
- return mode.new(blockCipher)
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetrically_encrypted.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetrically_encrypted.go
deleted file mode 100644
index e9bbf0327e1..00000000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetrically_encrypted.go
+++ /dev/null
@@ -1,90 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package packet
-
-import (
- "io"
-
- "github.com/ProtonMail/go-crypto/openpgp/errors"
-)
-
-const aeadSaltSize = 32
-
-// SymmetricallyEncrypted represents a symmetrically encrypted byte string. The
-// encrypted Contents will consist of more OpenPGP packets. See RFC 4880,
-// sections 5.7 and 5.13.
-type SymmetricallyEncrypted struct {
- Version int
- Contents io.Reader // contains tag for version 2
- IntegrityProtected bool // If true it is type 18 (with MDC or AEAD). False is packet type 9
-
- // Specific to version 1
- prefix []byte
-
- // Specific to version 2
- Cipher CipherFunction
- Mode AEADMode
- ChunkSizeByte byte
- Salt [aeadSaltSize]byte
-}
-
-const (
- symmetricallyEncryptedVersionMdc = 1
- symmetricallyEncryptedVersionAead = 2
-)
-
-func (se *SymmetricallyEncrypted) parse(r io.Reader) error {
- if se.IntegrityProtected {
- // See RFC 4880, section 5.13.
- var buf [1]byte
- _, err := readFull(r, buf[:])
- if err != nil {
- return err
- }
-
- switch buf[0] {
- case symmetricallyEncryptedVersionMdc:
- se.Version = symmetricallyEncryptedVersionMdc
- case symmetricallyEncryptedVersionAead:
- se.Version = symmetricallyEncryptedVersionAead
- if err := se.parseAead(r); err != nil {
- return err
- }
- default:
- return errors.UnsupportedError("unknown SymmetricallyEncrypted version")
- }
- }
- se.Contents = r
- return nil
-}
-
-// Decrypt returns a ReadCloser, from which the decrypted Contents of the
-// packet can be read. An incorrect key will only be detected after trying
-// to decrypt the entire data.
-func (se *SymmetricallyEncrypted) Decrypt(c CipherFunction, key []byte) (io.ReadCloser, error) {
- if se.Version == symmetricallyEncryptedVersionAead {
- return se.decryptAead(key)
- }
-
- return se.decryptMdc(c, key)
-}
-
-// SerializeSymmetricallyEncrypted serializes a symmetrically encrypted packet
-// to w and returns a WriteCloser to which the to-be-encrypted packets can be
-// written.
-// If config is nil, sensible defaults will be used.
-func SerializeSymmetricallyEncrypted(w io.Writer, c CipherFunction, aeadSupported bool, cipherSuite CipherSuite, key []byte, config *Config) (Contents io.WriteCloser, err error) {
- writeCloser := noOpCloser{w}
- ciphertext, err := serializeStreamHeader(writeCloser, packetTypeSymmetricallyEncryptedIntegrityProtected)
- if err != nil {
- return
- }
-
- if aeadSupported {
- return serializeSymmetricallyEncryptedAead(ciphertext, cipherSuite, config.AEADConfig.ChunkSizeByte(), config.Random(), key)
- }
-
- return serializeSymmetricallyEncryptedMdc(ciphertext, c, key, config)
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetrically_encrypted_aead.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetrically_encrypted_aead.go
deleted file mode 100644
index a8ef0bbbec2..00000000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetrically_encrypted_aead.go
+++ /dev/null
@@ -1,156 +0,0 @@
-// Copyright 2023 Proton AG. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package packet
-
-import (
- "crypto/cipher"
- "crypto/sha256"
- "io"
-
- "github.com/ProtonMail/go-crypto/openpgp/errors"
- "golang.org/x/crypto/hkdf"
-)
-
-// parseAead parses a V2 SEIPD packet (AEAD) as specified in
-// https://www.ietf.org/archive/id/draft-ietf-openpgp-crypto-refresh-07.html#section-5.13.2
-func (se *SymmetricallyEncrypted) parseAead(r io.Reader) error {
- headerData := make([]byte, 3)
- if n, err := io.ReadFull(r, headerData); n < 3 {
- return errors.StructuralError("could not read aead header: " + err.Error())
- }
-
- // Cipher
- se.Cipher = CipherFunction(headerData[0])
- // cipherFunc must have block size 16 to use AEAD
- if se.Cipher.blockSize() != 16 {
- return errors.UnsupportedError("invalid aead cipher: " + string(se.Cipher))
- }
-
- // Mode
- se.Mode = AEADMode(headerData[1])
- if se.Mode.TagLength() == 0 {
- return errors.UnsupportedError("unknown aead mode: " + string(se.Mode))
- }
-
- // Chunk size
- se.ChunkSizeByte = headerData[2]
- if se.ChunkSizeByte > 16 {
- return errors.UnsupportedError("invalid aead chunk size byte: " + string(se.ChunkSizeByte))
- }
-
- // Salt
- if n, err := io.ReadFull(r, se.Salt[:]); n < aeadSaltSize {
- return errors.StructuralError("could not read aead salt: " + err.Error())
- }
-
- return nil
-}
-
-// associatedData for chunks: tag, version, cipher, mode, chunk size byte
-func (se *SymmetricallyEncrypted) associatedData() []byte {
- return []byte{
- 0xD2,
- symmetricallyEncryptedVersionAead,
- byte(se.Cipher),
- byte(se.Mode),
- se.ChunkSizeByte,
- }
-}
-
-// decryptAead decrypts a V2 SEIPD packet (AEAD) as specified in
-// https://www.ietf.org/archive/id/draft-ietf-openpgp-crypto-refresh-07.html#section-5.13.2
-func (se *SymmetricallyEncrypted) decryptAead(inputKey []byte) (io.ReadCloser, error) {
- aead, nonce := getSymmetricallyEncryptedAeadInstance(se.Cipher, se.Mode, inputKey, se.Salt[:], se.associatedData())
-
- // Carry the first tagLen bytes
- tagLen := se.Mode.TagLength()
- peekedBytes := make([]byte, tagLen)
- n, err := io.ReadFull(se.Contents, peekedBytes)
- if n < tagLen || (err != nil && err != io.EOF) {
- return nil, errors.StructuralError("not enough data to decrypt:" + err.Error())
- }
-
- return &aeadDecrypter{
- aeadCrypter: aeadCrypter{
- aead: aead,
- chunkSize: decodeAEADChunkSize(se.ChunkSizeByte),
- initialNonce: nonce,
- associatedData: se.associatedData(),
- chunkIndex: make([]byte, 8),
- packetTag: packetTypeSymmetricallyEncryptedIntegrityProtected,
- },
- reader: se.Contents,
- peekedBytes: peekedBytes,
- }, nil
-}
-
-// serializeSymmetricallyEncryptedAead encrypts to a writer a V2 SEIPD packet (AEAD) as specified in
-// https://www.ietf.org/archive/id/draft-ietf-openpgp-crypto-refresh-07.html#section-5.13.2
-func serializeSymmetricallyEncryptedAead(ciphertext io.WriteCloser, cipherSuite CipherSuite, chunkSizeByte byte, rand io.Reader, inputKey []byte) (Contents io.WriteCloser, err error) {
- // cipherFunc must have block size 16 to use AEAD
- if cipherSuite.Cipher.blockSize() != 16 {
- return nil, errors.InvalidArgumentError("invalid aead cipher function")
- }
-
- if cipherSuite.Cipher.KeySize() != len(inputKey) {
- return nil, errors.InvalidArgumentError("error in aead serialization: bad key length")
- }
-
- // Data for en/decryption: tag, version, cipher, aead mode, chunk size
- prefix := []byte{
- 0xD2,
- symmetricallyEncryptedVersionAead,
- byte(cipherSuite.Cipher),
- byte(cipherSuite.Mode),
- chunkSizeByte,
- }
-
- // Write header (that correspond to prefix except first byte)
- n, err := ciphertext.Write(prefix[1:])
- if err != nil || n < 4 {
- return nil, err
- }
-
- // Random salt
- salt := make([]byte, aeadSaltSize)
- if _, err := io.ReadFull(rand, salt); err != nil {
- return nil, err
- }
-
- if _, err := ciphertext.Write(salt); err != nil {
- return nil, err
- }
-
- aead, nonce := getSymmetricallyEncryptedAeadInstance(cipherSuite.Cipher, cipherSuite.Mode, inputKey, salt, prefix)
-
- return &aeadEncrypter{
- aeadCrypter: aeadCrypter{
- aead: aead,
- chunkSize: decodeAEADChunkSize(chunkSizeByte),
- associatedData: prefix,
- chunkIndex: make([]byte, 8),
- initialNonce: nonce,
- packetTag: packetTypeSymmetricallyEncryptedIntegrityProtected,
- },
- writer: ciphertext,
- }, nil
-}
-
-func getSymmetricallyEncryptedAeadInstance(c CipherFunction, mode AEADMode, inputKey, salt, associatedData []byte) (aead cipher.AEAD, nonce []byte) {
- hkdfReader := hkdf.New(sha256.New, inputKey, salt, associatedData)
-
- encryptionKey := make([]byte, c.KeySize())
- _, _ = readFull(hkdfReader, encryptionKey)
-
- // Last 64 bits of nonce are the counter
- nonce = make([]byte, mode.IvLength()-8)
-
- _, _ = readFull(hkdfReader, nonce)
-
- blockCipher := c.new(encryptionKey)
- aead = mode.new(blockCipher)
-
- return
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetrically_encrypted_mdc.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetrically_encrypted_mdc.go
deleted file mode 100644
index 645963fa785..00000000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetrically_encrypted_mdc.go
+++ /dev/null
@@ -1,259 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package packet
-
-import (
- "crypto/cipher"
- "crypto/sha1"
- "crypto/subtle"
- "hash"
- "io"
- "strconv"
-
- "github.com/ProtonMail/go-crypto/openpgp/errors"
-)
-
-// seMdcReader wraps an io.Reader with a no-op Close method.
-type seMdcReader struct {
- in io.Reader
-}
-
-func (ser seMdcReader) Read(buf []byte) (int, error) {
- return ser.in.Read(buf)
-}
-
-func (ser seMdcReader) Close() error {
- return nil
-}
-
-func (se *SymmetricallyEncrypted) decryptMdc(c CipherFunction, key []byte) (io.ReadCloser, error) {
- if !c.IsSupported() {
- return nil, errors.UnsupportedError("unsupported cipher: " + strconv.Itoa(int(c)))
- }
-
- if len(key) != c.KeySize() {
- return nil, errors.InvalidArgumentError("SymmetricallyEncrypted: incorrect key length")
- }
-
- if se.prefix == nil {
- se.prefix = make([]byte, c.blockSize()+2)
- _, err := readFull(se.Contents, se.prefix)
- if err != nil {
- return nil, err
- }
- } else if len(se.prefix) != c.blockSize()+2 {
- return nil, errors.InvalidArgumentError("can't try ciphers with different block lengths")
- }
-
- ocfbResync := OCFBResync
- if se.IntegrityProtected {
- // MDC packets use a different form of OCFB mode.
- ocfbResync = OCFBNoResync
- }
-
- s := NewOCFBDecrypter(c.new(key), se.prefix, ocfbResync)
-
- plaintext := cipher.StreamReader{S: s, R: se.Contents}
-
- if se.IntegrityProtected {
- // IntegrityProtected packets have an embedded hash that we need to check.
- h := sha1.New()
- h.Write(se.prefix)
- return &seMDCReader{in: plaintext, h: h}, nil
- }
-
- // Otherwise, we just need to wrap plaintext so that it's a valid ReadCloser.
- return seMdcReader{plaintext}, nil
-}
-
-const mdcTrailerSize = 1 /* tag byte */ + 1 /* length byte */ + sha1.Size
-
-// An seMDCReader wraps an io.Reader, maintains a running hash and keeps hold
-// of the most recent 22 bytes (mdcTrailerSize). Upon EOF, those bytes form an
-// MDC packet containing a hash of the previous Contents which is checked
-// against the running hash. See RFC 4880, section 5.13.
-type seMDCReader struct {
- in io.Reader
- h hash.Hash
- trailer [mdcTrailerSize]byte
- scratch [mdcTrailerSize]byte
- trailerUsed int
- error bool
- eof bool
-}
-
-func (ser *seMDCReader) Read(buf []byte) (n int, err error) {
- if ser.error {
- err = io.ErrUnexpectedEOF
- return
- }
- if ser.eof {
- err = io.EOF
- return
- }
-
- // If we haven't yet filled the trailer buffer then we must do that
- // first.
- for ser.trailerUsed < mdcTrailerSize {
- n, err = ser.in.Read(ser.trailer[ser.trailerUsed:])
- ser.trailerUsed += n
- if err == io.EOF {
- if ser.trailerUsed != mdcTrailerSize {
- n = 0
- err = io.ErrUnexpectedEOF
- ser.error = true
- return
- }
- ser.eof = true
- n = 0
- return
- }
-
- if err != nil {
- n = 0
- return
- }
- }
-
- // If it's a short read then we read into a temporary buffer and shift
- // the data into the caller's buffer.
- if len(buf) <= mdcTrailerSize {
- n, err = readFull(ser.in, ser.scratch[:len(buf)])
- copy(buf, ser.trailer[:n])
- ser.h.Write(buf[:n])
- copy(ser.trailer[:], ser.trailer[n:])
- copy(ser.trailer[mdcTrailerSize-n:], ser.scratch[:])
- if n < len(buf) {
- ser.eof = true
- err = io.EOF
- }
- return
- }
-
- n, err = ser.in.Read(buf[mdcTrailerSize:])
- copy(buf, ser.trailer[:])
- ser.h.Write(buf[:n])
- copy(ser.trailer[:], buf[n:])
-
- if err == io.EOF {
- ser.eof = true
- }
- return
-}
-
-// This is a new-format packet tag byte for a type 19 (Integrity Protected) packet.
-const mdcPacketTagByte = byte(0x80) | 0x40 | 19
-
-func (ser *seMDCReader) Close() error {
- if ser.error {
- return errors.ErrMDCMissing
- }
-
- for !ser.eof {
- // We haven't seen EOF so we need to read to the end
- var buf [1024]byte
- _, err := ser.Read(buf[:])
- if err == io.EOF {
- break
- }
- if err != nil {
- return errors.ErrMDCMissing
- }
- }
-
- ser.h.Write(ser.trailer[:2])
-
- final := ser.h.Sum(nil)
- if subtle.ConstantTimeCompare(final, ser.trailer[2:]) != 1 {
- return errors.ErrMDCHashMismatch
- }
- // The hash already includes the MDC header, but we still check its value
- // to confirm encryption correctness
- if ser.trailer[0] != mdcPacketTagByte || ser.trailer[1] != sha1.Size {
- return errors.ErrMDCMissing
- }
- return nil
-}
-
-// An seMDCWriter writes through to an io.WriteCloser while maintains a running
-// hash of the data written. On close, it emits an MDC packet containing the
-// running hash.
-type seMDCWriter struct {
- w io.WriteCloser
- h hash.Hash
-}
-
-func (w *seMDCWriter) Write(buf []byte) (n int, err error) {
- w.h.Write(buf)
- return w.w.Write(buf)
-}
-
-func (w *seMDCWriter) Close() (err error) {
- var buf [mdcTrailerSize]byte
-
- buf[0] = mdcPacketTagByte
- buf[1] = sha1.Size
- w.h.Write(buf[:2])
- digest := w.h.Sum(nil)
- copy(buf[2:], digest)
-
- _, err = w.w.Write(buf[:])
- if err != nil {
- return
- }
- return w.w.Close()
-}
-
-// noOpCloser is like an ioutil.NopCloser, but for an io.Writer.
-type noOpCloser struct {
- w io.Writer
-}
-
-func (c noOpCloser) Write(data []byte) (n int, err error) {
- return c.w.Write(data)
-}
-
-func (c noOpCloser) Close() error {
- return nil
-}
-
-func serializeSymmetricallyEncryptedMdc(ciphertext io.WriteCloser, c CipherFunction, key []byte, config *Config) (Contents io.WriteCloser, err error) {
- // Disallow old cipher suites
- if !c.IsSupported() || c < CipherAES128 {
- return nil, errors.InvalidArgumentError("invalid mdc cipher function")
- }
-
- if c.KeySize() != len(key) {
- return nil, errors.InvalidArgumentError("error in mdc serialization: bad key length")
- }
-
- _, err = ciphertext.Write([]byte{symmetricallyEncryptedVersionMdc})
- if err != nil {
- return
- }
-
- block := c.new(key)
- blockSize := block.BlockSize()
- iv := make([]byte, blockSize)
- _, err = io.ReadFull(config.Random(), iv)
- if err != nil {
- return nil, err
- }
- if err != nil {
- return
- }
- s, prefix := NewOCFBEncrypter(block, iv, OCFBNoResync)
- _, err = ciphertext.Write(prefix)
- if err != nil {
- return
- }
- plaintext := cipher.StreamWriter{S: s, W: ciphertext}
-
- h := sha1.New()
- h.Write(iv)
- h.Write(iv[blockSize-2:])
- Contents = &seMDCWriter{w: plaintext, h: h}
- return
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/userattribute.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/userattribute.go
deleted file mode 100644
index 63814ed1324..00000000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/userattribute.go
+++ /dev/null
@@ -1,100 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package packet
-
-import (
- "bytes"
- "image"
- "image/jpeg"
- "io"
-)
-
-const UserAttrImageSubpacket = 1
-
-// UserAttribute is capable of storing other types of data about a user
-// beyond name, email and a text comment. In practice, user attributes are typically used
-// to store a signed thumbnail photo JPEG image of the user.
-// See RFC 4880, section 5.12.
-type UserAttribute struct {
- Contents []*OpaqueSubpacket
-}
-
-// NewUserAttributePhoto creates a user attribute packet
-// containing the given images.
-func NewUserAttributePhoto(photos ...image.Image) (uat *UserAttribute, err error) {
- uat = new(UserAttribute)
- for _, photo := range photos {
- var buf bytes.Buffer
- // RFC 4880, Section 5.12.1.
- data := []byte{
- 0x10, 0x00, // Little-endian image header length (16 bytes)
- 0x01, // Image header version 1
- 0x01, // JPEG
- 0, 0, 0, 0, // 12 reserved octets, must be all zero.
- 0, 0, 0, 0,
- 0, 0, 0, 0}
- if _, err = buf.Write(data); err != nil {
- return
- }
- if err = jpeg.Encode(&buf, photo, nil); err != nil {
- return
- }
-
- lengthBuf := make([]byte, 5)
- n := serializeSubpacketLength(lengthBuf, len(buf.Bytes())+1)
- lengthBuf = lengthBuf[:n]
-
- uat.Contents = append(uat.Contents, &OpaqueSubpacket{
- SubType: UserAttrImageSubpacket,
- EncodedLength: lengthBuf,
- Contents: buf.Bytes(),
- })
- }
- return
-}
-
-// NewUserAttribute creates a new user attribute packet containing the given subpackets.
-func NewUserAttribute(contents ...*OpaqueSubpacket) *UserAttribute {
- return &UserAttribute{Contents: contents}
-}
-
-func (uat *UserAttribute) parse(r io.Reader) (err error) {
- // RFC 4880, section 5.13
- b, err := io.ReadAll(r)
- if err != nil {
- return
- }
- uat.Contents, err = OpaqueSubpackets(b)
- return
-}
-
-// Serialize marshals the user attribute to w in the form of an OpenPGP packet, including
-// header.
-func (uat *UserAttribute) Serialize(w io.Writer) (err error) {
- var buf bytes.Buffer
- for _, sp := range uat.Contents {
- err = sp.Serialize(&buf)
- if err != nil {
- return err
- }
- }
- if err = serializeHeader(w, packetTypeUserAttribute, buf.Len()); err != nil {
- return err
- }
- _, err = w.Write(buf.Bytes())
- return
-}
-
-// ImageData returns zero or more byte slices, each containing
-// JPEG File Interchange Format (JFIF), for each photo in the
-// user attribute packet.
-func (uat *UserAttribute) ImageData() (imageData [][]byte) {
- for _, sp := range uat.Contents {
- if sp.SubType == UserAttrImageSubpacket && len(sp.Contents) > 16 {
- imageData = append(imageData, sp.Contents[16:])
- }
- }
- return
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/userid.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/userid.go
deleted file mode 100644
index 3c7451a3c36..00000000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/userid.go
+++ /dev/null
@@ -1,166 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package packet
-
-import (
- "io"
- "strings"
-)
-
-// UserId contains text that is intended to represent the name and email
-// address of the key holder. See RFC 4880, section 5.11. By convention, this
-// takes the form "Full Name (Comment) "
-type UserId struct {
- Id string // By convention, this takes the form "Full Name (Comment) " which is split out in the fields below.
-
- Name, Comment, Email string
-}
-
-func hasInvalidCharacters(s string) bool {
- for _, c := range s {
- switch c {
- case '(', ')', '<', '>', 0:
- return true
- }
- }
- return false
-}
-
-// NewUserId returns a UserId or nil if any of the arguments contain invalid
-// characters. The invalid characters are '\x00', '(', ')', '<' and '>'
-func NewUserId(name, comment, email string) *UserId {
- // RFC 4880 doesn't deal with the structure of userid strings; the
- // name, comment and email form is just a convention. However, there's
- // no convention about escaping the metacharacters and GPG just refuses
- // to create user ids where, say, the name contains a '('. We mirror
- // this behaviour.
-
- if hasInvalidCharacters(name) || hasInvalidCharacters(comment) || hasInvalidCharacters(email) {
- return nil
- }
-
- uid := new(UserId)
- uid.Name, uid.Comment, uid.Email = name, comment, email
- uid.Id = name
- if len(comment) > 0 {
- if len(uid.Id) > 0 {
- uid.Id += " "
- }
- uid.Id += "("
- uid.Id += comment
- uid.Id += ")"
- }
- if len(email) > 0 {
- if len(uid.Id) > 0 {
- uid.Id += " "
- }
- uid.Id += "<"
- uid.Id += email
- uid.Id += ">"
- }
- return uid
-}
-
-func (uid *UserId) parse(r io.Reader) (err error) {
- // RFC 4880, section 5.11
- b, err := io.ReadAll(r)
- if err != nil {
- return
- }
- uid.Id = string(b)
- uid.Name, uid.Comment, uid.Email = parseUserId(uid.Id)
- return
-}
-
-// Serialize marshals uid to w in the form of an OpenPGP packet, including
-// header.
-func (uid *UserId) Serialize(w io.Writer) error {
- err := serializeHeader(w, packetTypeUserId, len(uid.Id))
- if err != nil {
- return err
- }
- _, err = w.Write([]byte(uid.Id))
- return err
-}
-
-// parseUserId extracts the name, comment and email from a user id string that
-// is formatted as "Full Name (Comment) ".
-func parseUserId(id string) (name, comment, email string) {
- var n, c, e struct {
- start, end int
- }
- var state int
-
- for offset, rune := range id {
- switch state {
- case 0:
- // Entering name
- n.start = offset
- state = 1
- fallthrough
- case 1:
- // In name
- if rune == '(' {
- state = 2
- n.end = offset
- } else if rune == '<' {
- state = 5
- n.end = offset
- }
- case 2:
- // Entering comment
- c.start = offset
- state = 3
- fallthrough
- case 3:
- // In comment
- if rune == ')' {
- state = 4
- c.end = offset
- }
- case 4:
- // Between comment and email
- if rune == '<' {
- state = 5
- }
- case 5:
- // Entering email
- e.start = offset
- state = 6
- fallthrough
- case 6:
- // In email
- if rune == '>' {
- state = 7
- e.end = offset
- }
- default:
- // After email
- }
- }
- switch state {
- case 1:
- // ended in the name
- n.end = len(id)
- case 3:
- // ended in comment
- c.end = len(id)
- case 6:
- // ended in email
- e.end = len(id)
- }
-
- name = strings.TrimSpace(id[n.start:n.end])
- comment = strings.TrimSpace(id[c.start:c.end])
- email = strings.TrimSpace(id[e.start:e.end])
-
- // RFC 2822 3.4: alternate simple form of a mailbox
- if email == "" && strings.ContainsRune(name, '@') {
- email = name
- name = ""
- }
-
- return
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/read.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/read.go
deleted file mode 100644
index 408506592fc..00000000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/read.go
+++ /dev/null
@@ -1,619 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package openpgp implements high level operations on OpenPGP messages.
-package openpgp // import "github.com/ProtonMail/go-crypto/openpgp"
-
-import (
- "crypto"
- _ "crypto/sha256"
- _ "crypto/sha512"
- "hash"
- "io"
- "strconv"
-
- "github.com/ProtonMail/go-crypto/openpgp/armor"
- "github.com/ProtonMail/go-crypto/openpgp/errors"
- "github.com/ProtonMail/go-crypto/openpgp/internal/algorithm"
- "github.com/ProtonMail/go-crypto/openpgp/packet"
- _ "golang.org/x/crypto/sha3"
-)
-
-// SignatureType is the armor type for a PGP signature.
-var SignatureType = "PGP SIGNATURE"
-
-// readArmored reads an armored block with the given type.
-func readArmored(r io.Reader, expectedType string) (body io.Reader, err error) {
- block, err := armor.Decode(r)
- if err != nil {
- return
- }
-
- if block.Type != expectedType {
- return nil, errors.InvalidArgumentError("expected '" + expectedType + "', got: " + block.Type)
- }
-
- return block.Body, nil
-}
-
-// MessageDetails contains the result of parsing an OpenPGP encrypted and/or
-// signed message.
-type MessageDetails struct {
- IsEncrypted bool // true if the message was encrypted.
- EncryptedToKeyIds []uint64 // the list of recipient key ids.
- IsSymmetricallyEncrypted bool // true if a passphrase could have decrypted the message.
- DecryptedWith Key // the private key used to decrypt the message, if any.
- IsSigned bool // true if the message is signed.
- SignedByKeyId uint64 // the key id of the signer, if any.
- SignedByFingerprint []byte // the key fingerprint of the signer, if any.
- SignedBy *Key // the key of the signer, if available.
- LiteralData *packet.LiteralData // the metadata of the contents
- UnverifiedBody io.Reader // the contents of the message.
-
- // If IsSigned is true and SignedBy is non-zero then the signature will
- // be verified as UnverifiedBody is read. The signature cannot be
- // checked until the whole of UnverifiedBody is read so UnverifiedBody
- // must be consumed until EOF before the data can be trusted. Even if a
- // message isn't signed (or the signer is unknown) the data may contain
- // an authentication code that is only checked once UnverifiedBody has
- // been consumed. Once EOF has been seen, the following fields are
- // valid. (An authentication code failure is reported as a
- // SignatureError error when reading from UnverifiedBody.)
- Signature *packet.Signature // the signature packet itself.
- SignatureError error // nil if the signature is good.
- UnverifiedSignatures []*packet.Signature // all other unverified signature packets.
-
- decrypted io.ReadCloser
-}
-
-// A PromptFunction is used as a callback by functions that may need to decrypt
-// a private key, or prompt for a passphrase. It is called with a list of
-// acceptable, encrypted private keys and a boolean that indicates whether a
-// passphrase is usable. It should either decrypt a private key or return a
-// passphrase to try. If the decrypted private key or given passphrase isn't
-// correct, the function will be called again, forever. Any error returned will
-// be passed up.
-type PromptFunction func(keys []Key, symmetric bool) ([]byte, error)
-
-// A keyEnvelopePair is used to store a private key with the envelope that
-// contains a symmetric key, encrypted with that key.
-type keyEnvelopePair struct {
- key Key
- encryptedKey *packet.EncryptedKey
-}
-
-// ReadMessage parses an OpenPGP message that may be signed and/or encrypted.
-// The given KeyRing should contain both public keys (for signature
-// verification) and, possibly encrypted, private keys for decrypting.
-// If config is nil, sensible defaults will be used.
-func ReadMessage(r io.Reader, keyring KeyRing, prompt PromptFunction, config *packet.Config) (md *MessageDetails, err error) {
- var p packet.Packet
-
- var symKeys []*packet.SymmetricKeyEncrypted
- var pubKeys []keyEnvelopePair
- // Integrity protected encrypted packet: SymmetricallyEncrypted or AEADEncrypted
- var edp packet.EncryptedDataPacket
-
- packets := packet.NewReader(r)
- md = new(MessageDetails)
- md.IsEncrypted = true
-
- // The message, if encrypted, starts with a number of packets
- // containing an encrypted decryption key. The decryption key is either
- // encrypted to a public key, or with a passphrase. This loop
- // collects these packets.
-ParsePackets:
- for {
- p, err = packets.Next()
- if err != nil {
- return nil, err
- }
- switch p := p.(type) {
- case *packet.SymmetricKeyEncrypted:
- // This packet contains the decryption key encrypted with a passphrase.
- md.IsSymmetricallyEncrypted = true
- symKeys = append(symKeys, p)
- case *packet.EncryptedKey:
- // This packet contains the decryption key encrypted to a public key.
- md.EncryptedToKeyIds = append(md.EncryptedToKeyIds, p.KeyId)
- switch p.Algo {
- case packet.PubKeyAlgoRSA, packet.PubKeyAlgoRSAEncryptOnly, packet.PubKeyAlgoElGamal, packet.PubKeyAlgoECDH, packet.PubKeyAlgoX25519, packet.PubKeyAlgoX448:
- break
- default:
- continue
- }
- if keyring != nil {
- var keys []Key
- if p.KeyId == 0 {
- keys = keyring.DecryptionKeys()
- } else {
- keys = keyring.KeysById(p.KeyId)
- }
- for _, k := range keys {
- pubKeys = append(pubKeys, keyEnvelopePair{k, p})
- }
- }
- case *packet.SymmetricallyEncrypted:
- if !p.IntegrityProtected && !config.AllowUnauthenticatedMessages() {
- return nil, errors.UnsupportedError("message is not integrity protected")
- }
- edp = p
- break ParsePackets
- case *packet.AEADEncrypted:
- edp = p
- break ParsePackets
- case *packet.Compressed, *packet.LiteralData, *packet.OnePassSignature:
- // This message isn't encrypted.
- if len(symKeys) != 0 || len(pubKeys) != 0 {
- return nil, errors.StructuralError("key material not followed by encrypted message")
- }
- packets.Unread(p)
- return readSignedMessage(packets, nil, keyring, config)
- }
- }
-
- var candidates []Key
- var decrypted io.ReadCloser
-
- // Now that we have the list of encrypted keys we need to decrypt at
- // least one of them or, if we cannot, we need to call the prompt
- // function so that it can decrypt a key or give us a passphrase.
-FindKey:
- for {
- // See if any of the keys already have a private key available
- candidates = candidates[:0]
- candidateFingerprints := make(map[string]bool)
-
- for _, pk := range pubKeys {
- if pk.key.PrivateKey == nil {
- continue
- }
- if !pk.key.PrivateKey.Encrypted {
- if len(pk.encryptedKey.Key) == 0 {
- errDec := pk.encryptedKey.Decrypt(pk.key.PrivateKey, config)
- if errDec != nil {
- continue
- }
- }
- // Try to decrypt symmetrically encrypted
- decrypted, err = edp.Decrypt(pk.encryptedKey.CipherFunc, pk.encryptedKey.Key)
- if err != nil && err != errors.ErrKeyIncorrect {
- return nil, err
- }
- if decrypted != nil {
- md.DecryptedWith = pk.key
- break FindKey
- }
- } else {
- fpr := string(pk.key.PublicKey.Fingerprint[:])
- if v := candidateFingerprints[fpr]; v {
- continue
- }
- candidates = append(candidates, pk.key)
- candidateFingerprints[fpr] = true
- }
- }
-
- if len(candidates) == 0 && len(symKeys) == 0 {
- return nil, errors.ErrKeyIncorrect
- }
-
- if prompt == nil {
- return nil, errors.ErrKeyIncorrect
- }
-
- passphrase, err := prompt(candidates, len(symKeys) != 0)
- if err != nil {
- return nil, err
- }
-
- // Try the symmetric passphrase first
- if len(symKeys) != 0 && passphrase != nil {
- for _, s := range symKeys {
- key, cipherFunc, err := s.Decrypt(passphrase)
- // In v4, on wrong passphrase, session key decryption is very likely to result in an invalid cipherFunc:
- // only for < 5% of cases we will proceed to decrypt the data
- if err == nil {
- decrypted, err = edp.Decrypt(cipherFunc, key)
- if err != nil {
- return nil, err
- }
- if decrypted != nil {
- break FindKey
- }
- }
- }
- }
- }
-
- md.decrypted = decrypted
- if err := packets.Push(decrypted); err != nil {
- return nil, err
- }
- mdFinal, sensitiveParsingErr := readSignedMessage(packets, md, keyring, config)
- if sensitiveParsingErr != nil {
- return nil, errors.StructuralError("parsing error")
- }
- return mdFinal, nil
-}
-
-// readSignedMessage reads a possibly signed message if mdin is non-zero then
-// that structure is updated and returned. Otherwise a fresh MessageDetails is
-// used.
-func readSignedMessage(packets *packet.Reader, mdin *MessageDetails, keyring KeyRing, config *packet.Config) (md *MessageDetails, err error) {
- if mdin == nil {
- mdin = new(MessageDetails)
- }
- md = mdin
-
- var p packet.Packet
- var h hash.Hash
- var wrappedHash hash.Hash
- var prevLast bool
-FindLiteralData:
- for {
- p, err = packets.Next()
- if err != nil {
- return nil, err
- }
- switch p := p.(type) {
- case *packet.Compressed:
- if err := packets.Push(p.Body); err != nil {
- return nil, err
- }
- case *packet.OnePassSignature:
- if prevLast {
- return nil, errors.UnsupportedError("nested signature packets")
- }
-
- if p.IsLast {
- prevLast = true
- }
-
- h, wrappedHash, err = hashForSignature(p.Hash, p.SigType, p.Salt)
- if err != nil {
- md.SignatureError = err
- }
-
- md.IsSigned = true
- if p.Version == 6 {
- md.SignedByFingerprint = p.KeyFingerprint
- }
- md.SignedByKeyId = p.KeyId
-
- if keyring != nil {
- keys := keyring.KeysByIdUsage(p.KeyId, packet.KeyFlagSign)
- if len(keys) > 0 {
- md.SignedBy = &keys[0]
- }
- }
- case *packet.LiteralData:
- md.LiteralData = p
- break FindLiteralData
- }
- }
-
- if md.IsSigned && md.SignatureError == nil {
- md.UnverifiedBody = &signatureCheckReader{packets, h, wrappedHash, md, config}
- } else if md.decrypted != nil {
- md.UnverifiedBody = &checkReader{md, false}
- } else {
- md.UnverifiedBody = md.LiteralData.Body
- }
-
- return md, nil
-}
-
-func wrapHashForSignature(hashFunc hash.Hash, sigType packet.SignatureType) (hash.Hash, error) {
- switch sigType {
- case packet.SigTypeBinary:
- return hashFunc, nil
- case packet.SigTypeText:
- return NewCanonicalTextHash(hashFunc), nil
- }
- return nil, errors.UnsupportedError("unsupported signature type: " + strconv.Itoa(int(sigType)))
-}
-
-// hashForSignature returns a pair of hashes that can be used to verify a
-// signature. The signature may specify that the contents of the signed message
-// should be preprocessed (i.e. to normalize line endings). Thus this function
-// returns two hashes. The second should be used to hash the message itself and
-// performs any needed preprocessing.
-func hashForSignature(hashFunc crypto.Hash, sigType packet.SignatureType, sigSalt []byte) (hash.Hash, hash.Hash, error) {
- if _, ok := algorithm.HashToHashIdWithSha1(hashFunc); !ok {
- return nil, nil, errors.UnsupportedError("unsupported hash function")
- }
- if !hashFunc.Available() {
- return nil, nil, errors.UnsupportedError("hash not available: " + strconv.Itoa(int(hashFunc)))
- }
- h := hashFunc.New()
- if sigSalt != nil {
- h.Write(sigSalt)
- }
- wrappedHash, err := wrapHashForSignature(h, sigType)
- if err != nil {
- return nil, nil, err
- }
- switch sigType {
- case packet.SigTypeBinary:
- return h, wrappedHash, nil
- case packet.SigTypeText:
- return h, wrappedHash, nil
- }
- return nil, nil, errors.UnsupportedError("unsupported signature type: " + strconv.Itoa(int(sigType)))
-}
-
-// checkReader wraps an io.Reader from a LiteralData packet. When it sees EOF
-// it closes the ReadCloser from any SymmetricallyEncrypted packet to trigger
-// MDC checks.
-type checkReader struct {
- md *MessageDetails
- checked bool
-}
-
-func (cr *checkReader) Read(buf []byte) (int, error) {
- n, sensitiveParsingError := cr.md.LiteralData.Body.Read(buf)
- if sensitiveParsingError == io.EOF {
- if cr.checked {
- // Only check once
- return n, io.EOF
- }
- mdcErr := cr.md.decrypted.Close()
- if mdcErr != nil {
- return n, mdcErr
- }
- cr.checked = true
- return n, io.EOF
- }
-
- if sensitiveParsingError != nil {
- return n, errors.StructuralError("parsing error")
- }
-
- return n, nil
-}
-
-// signatureCheckReader wraps an io.Reader from a LiteralData packet and hashes
-// the data as it is read. When it sees an EOF from the underlying io.Reader
-// it parses and checks a trailing Signature packet and triggers any MDC checks.
-type signatureCheckReader struct {
- packets *packet.Reader
- h, wrappedHash hash.Hash
- md *MessageDetails
- config *packet.Config
-}
-
-func (scr *signatureCheckReader) Read(buf []byte) (int, error) {
- n, sensitiveParsingError := scr.md.LiteralData.Body.Read(buf)
-
- // Hash only if required
- if scr.md.SignedBy != nil {
- scr.wrappedHash.Write(buf[:n])
- }
-
- if sensitiveParsingError == io.EOF {
- var p packet.Packet
- var readError error
- var sig *packet.Signature
-
- p, readError = scr.packets.Next()
- for readError == nil {
- var ok bool
- if sig, ok = p.(*packet.Signature); ok {
- if sig.Version == 5 && (sig.SigType == 0x00 || sig.SigType == 0x01) {
- sig.Metadata = scr.md.LiteralData
- }
-
- // If signature KeyID matches
- if scr.md.SignedBy != nil && *sig.IssuerKeyId == scr.md.SignedByKeyId {
- key := scr.md.SignedBy
- signatureError := key.PublicKey.VerifySignature(scr.h, sig)
- if signatureError == nil {
- signatureError = checkSignatureDetails(key, sig, scr.config)
- }
- scr.md.Signature = sig
- scr.md.SignatureError = signatureError
- } else {
- scr.md.UnverifiedSignatures = append(scr.md.UnverifiedSignatures, sig)
- }
- }
-
- p, readError = scr.packets.Next()
- }
-
- if scr.md.SignedBy != nil && scr.md.Signature == nil {
- if scr.md.UnverifiedSignatures == nil {
- scr.md.SignatureError = errors.StructuralError("LiteralData not followed by signature")
- } else {
- scr.md.SignatureError = errors.StructuralError("No matching signature found")
- }
- }
-
- // The SymmetricallyEncrypted packet, if any, might have an
- // unsigned hash of its own. In order to check this we need to
- // close that Reader.
- if scr.md.decrypted != nil {
- mdcErr := scr.md.decrypted.Close()
- if mdcErr != nil {
- return n, mdcErr
- }
- }
- return n, io.EOF
- }
-
- if sensitiveParsingError != nil {
- return n, errors.StructuralError("parsing error")
- }
-
- return n, nil
-}
-
-// VerifyDetachedSignature takes a signed file and a detached signature and
-// returns the signature packet and the entity the signature was signed by,
-// if any, and a possible signature verification error.
-// If the signer isn't known, ErrUnknownIssuer is returned.
-func VerifyDetachedSignature(keyring KeyRing, signed, signature io.Reader, config *packet.Config) (sig *packet.Signature, signer *Entity, err error) {
- return verifyDetachedSignature(keyring, signed, signature, nil, false, config)
-}
-
-// VerifyDetachedSignatureAndHash performs the same actions as
-// VerifyDetachedSignature and checks that the expected hash functions were used.
-func VerifyDetachedSignatureAndHash(keyring KeyRing, signed, signature io.Reader, expectedHashes []crypto.Hash, config *packet.Config) (sig *packet.Signature, signer *Entity, err error) {
- return verifyDetachedSignature(keyring, signed, signature, expectedHashes, true, config)
-}
-
-// CheckDetachedSignature takes a signed file and a detached signature and
-// returns the entity the signature was signed by, if any, and a possible
-// signature verification error. If the signer isn't known,
-// ErrUnknownIssuer is returned.
-func CheckDetachedSignature(keyring KeyRing, signed, signature io.Reader, config *packet.Config) (signer *Entity, err error) {
- _, signer, err = verifyDetachedSignature(keyring, signed, signature, nil, false, config)
- return
-}
-
-// CheckDetachedSignatureAndHash performs the same actions as
-// CheckDetachedSignature and checks that the expected hash functions were used.
-func CheckDetachedSignatureAndHash(keyring KeyRing, signed, signature io.Reader, expectedHashes []crypto.Hash, config *packet.Config) (signer *Entity, err error) {
- _, signer, err = verifyDetachedSignature(keyring, signed, signature, expectedHashes, true, config)
- return
-}
-
-func verifyDetachedSignature(keyring KeyRing, signed, signature io.Reader, expectedHashes []crypto.Hash, checkHashes bool, config *packet.Config) (sig *packet.Signature, signer *Entity, err error) {
- var issuerKeyId uint64
- var hashFunc crypto.Hash
- var sigType packet.SignatureType
- var keys []Key
- var p packet.Packet
-
- packets := packet.NewReader(signature)
- for {
- p, err = packets.Next()
- if err == io.EOF {
- return nil, nil, errors.ErrUnknownIssuer
- }
- if err != nil {
- return nil, nil, err
- }
-
- var ok bool
- sig, ok = p.(*packet.Signature)
- if !ok {
- return nil, nil, errors.StructuralError("non signature packet found")
- }
- if sig.IssuerKeyId == nil {
- return nil, nil, errors.StructuralError("signature doesn't have an issuer")
- }
- issuerKeyId = *sig.IssuerKeyId
- hashFunc = sig.Hash
- sigType = sig.SigType
- if checkHashes {
- matchFound := false
- // check for hashes
- for _, expectedHash := range expectedHashes {
- if hashFunc == expectedHash {
- matchFound = true
- break
- }
- }
- if !matchFound {
- return nil, nil, errors.StructuralError("hash algorithm or salt mismatch with cleartext message headers")
- }
- }
- keys = keyring.KeysByIdUsage(issuerKeyId, packet.KeyFlagSign)
- if len(keys) > 0 {
- break
- }
- }
-
- if len(keys) == 0 {
- panic("unreachable")
- }
-
- h, err := sig.PrepareVerify()
- if err != nil {
- return nil, nil, err
- }
- wrappedHash, err := wrapHashForSignature(h, sigType)
- if err != nil {
- return nil, nil, err
- }
-
- if _, err := io.Copy(wrappedHash, signed); err != nil && err != io.EOF {
- return nil, nil, err
- }
-
- for _, key := range keys {
- err = key.PublicKey.VerifySignature(h, sig)
- if err == nil {
- return sig, key.Entity, checkSignatureDetails(&key, sig, config)
- }
- }
-
- return nil, nil, err
-}
-
-// CheckArmoredDetachedSignature performs the same actions as
-// CheckDetachedSignature but expects the signature to be armored.
-func CheckArmoredDetachedSignature(keyring KeyRing, signed, signature io.Reader, config *packet.Config) (signer *Entity, err error) {
- body, err := readArmored(signature, SignatureType)
- if err != nil {
- return
- }
-
- return CheckDetachedSignature(keyring, signed, body, config)
-}
-
-// checkSignatureDetails returns an error if:
-// - The signature (or one of the binding signatures mentioned below)
-// has a unknown critical notation data subpacket
-// - The primary key of the signing entity is revoked
-// - The primary identity is revoked
-// - The signature is expired
-// - The primary key of the signing entity is expired according to the
-// primary identity binding signature
-//
-// ... or, if the signature was signed by a subkey and:
-// - The signing subkey is revoked
-// - The signing subkey is expired according to the subkey binding signature
-// - The signing subkey binding signature is expired
-// - The signing subkey cross-signature is expired
-//
-// NOTE: The order of these checks is important, as the caller may choose to
-// ignore ErrSignatureExpired or ErrKeyExpired errors, but should never
-// ignore any other errors.
-func checkSignatureDetails(key *Key, signature *packet.Signature, config *packet.Config) error {
- now := config.Now()
- primarySelfSignature, primaryIdentity := key.Entity.PrimarySelfSignature()
- signedBySubKey := key.PublicKey != key.Entity.PrimaryKey
- sigsToCheck := []*packet.Signature{signature, primarySelfSignature}
- if signedBySubKey {
- sigsToCheck = append(sigsToCheck, key.SelfSignature, key.SelfSignature.EmbeddedSignature)
- }
- for _, sig := range sigsToCheck {
- for _, notation := range sig.Notations {
- if notation.IsCritical && !config.KnownNotation(notation.Name) {
- return errors.SignatureError("unknown critical notation: " + notation.Name)
- }
- }
- }
- if key.Entity.Revoked(now) || // primary key is revoked
- (signedBySubKey && key.Revoked(now)) || // subkey is revoked
- (primaryIdentity != nil && primaryIdentity.Revoked(now)) { // primary identity is revoked for v4
- return errors.ErrKeyRevoked
- }
- if key.Entity.PrimaryKey.KeyExpired(primarySelfSignature, now) { // primary key is expired
- return errors.ErrKeyExpired
- }
- if signedBySubKey {
- if key.PublicKey.KeyExpired(key.SelfSignature, now) { // subkey is expired
- return errors.ErrKeyExpired
- }
- }
- for _, sig := range sigsToCheck {
- if sig.SigExpired(now) { // any of the relevant signatures are expired
- return errors.ErrSignatureExpired
- }
- }
- return nil
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/read_write_test_data.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/read_write_test_data.go
deleted file mode 100644
index 670d60226a4..00000000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/read_write_test_data.go
+++ /dev/null
@@ -1,457 +0,0 @@
-package openpgp
-
-const testKey1KeyId uint64 = 0xA34D7E18C20C31BB
-const testKey3KeyId uint64 = 0x338934250CCC0360
-const testKeyP256KeyId uint64 = 0xd44a2c495918513e
-
-const signedInput = "Signed message\nline 2\nline 3\n"
-const signedTextInput = "Signed message\r\nline 2\r\nline 3\r\n"
-
-const recipientUnspecifiedHex = "848c0300000000000000000103ff62d4d578d03cf40c3da998dfe216c074fa6ddec5e31c197c9666ba292830d91d18716a80f699f9d897389a90e6d62d0238f5f07a5248073c0f24920e4bc4a30c2d17ee4e0cae7c3d4aaa4e8dced50e3010a80ee692175fa0385f62ecca4b56ee6e9980aa3ec51b61b077096ac9e800edaf161268593eedb6cc7027ff5cb32745d250010d407a6221ae22ef18469b444f2822478c4d190b24d36371a95cb40087cdd42d9399c3d06a53c0673349bfb607927f20d1e122bde1e2bf3aa6cae6edf489629bcaa0689539ae3b718914d88ededc3b"
-
-const detachedSignatureHex = "889c04000102000605024d449cd1000a0910a34d7e18c20c31bb167603ff57718d09f28a519fdc7b5a68b6a3336da04df85e38c5cd5d5bd2092fa4629848a33d85b1729402a2aab39c3ac19f9d573f773cc62c264dc924c067a79dfd8a863ae06c7c8686120760749f5fd9b1e03a64d20a7df3446ddc8f0aeadeaeba7cbaee5c1e366d65b6a0c6cc749bcb912d2f15013f812795c2e29eb7f7b77f39ce77"
-
-const detachedSignatureTextHex = "889c04010102000605024d449d21000a0910a34d7e18c20c31bbc8c60400a24fbef7342603a41cb1165767bd18985d015fb72fe05db42db36cfb2f1d455967f1e491194fbf6cf88146222b23bf6ffbd50d17598d976a0417d3192ff9cc0034fd00f287b02e90418bbefe609484b09231e4e7a5f3562e199bf39909ab5276c4d37382fe088f6b5c3426fc1052865da8b3ab158672d58b6264b10823dc4b39"
-
-const detachedSignatureDSAHex = "884604001102000605024d6c4eac000a0910338934250ccc0360f18d00a087d743d6405ed7b87755476629600b8b694a39e900a0abff8126f46faf1547c1743c37b21b4ea15b8f83"
-
-const detachedSignatureP256Hex = "885e0400130a0006050256e5bb00000a0910d44a2c495918513edef001009841a4f792beb0befccb35c8838a6a87d9b936beaa86db6745ddc7b045eee0cf00fd1ac1f78306b17e965935dd3f8bae4587a76587e4af231efe19cc4011a8434817"
-
-// The plaintext is https://www.gutenberg.org/cache/epub/1080/pg1080.txt
-const modestProposalSha512 = "lbbrB1+WP3T9AaC9OQqBdOcCjgeEQadlulXsNPgVx0tyqPzDHwUugZ2gE7V0ESKAw6kAVfgkcuvfgxAAGaeHtw=="
-
-const testKeys1And2Hex = "988d044d3c5c10010400b1d13382944bd5aba23a4312968b5095d14f947f600eb478e14a6fcb16b0e0cac764884909c020bc495cfcc39a935387c661507bdb236a0612fb582cac3af9b29cc2c8c70090616c41b662f4da4c1201e195472eb7f4ae1ccbcbf9940fe21d985e379a5563dde5b9a23d35f1cfaa5790da3b79db26f23695107bfaca8e7b5bcd0011010001b41054657374204b6579203120285253412988b804130102002205024d3c5c10021b03060b090807030206150802090a0b0416020301021e01021780000a0910a34d7e18c20c31bbb5b304009cc45fe610b641a2c146331be94dade0a396e73ca725e1b25c21708d9cab46ecca5ccebc23055879df8f99eea39b377962a400f2ebdc36a7c99c333d74aeba346315137c3ff9d0a09b0273299090343048afb8107cf94cbd1400e3026f0ccac7ecebbc4d78588eb3e478fe2754d3ca664bcf3eac96ca4a6b0c8d7df5102f60f6b0020003b88d044d3c5c10010400b201df61d67487301f11879d514f4248ade90c8f68c7af1284c161098de4c28c2850f1ec7b8e30f959793e571542ffc6532189409cb51c3d30dad78c4ad5165eda18b20d9826d8707d0f742e2ab492103a85bbd9ddf4f5720f6de7064feb0d39ee002219765bb07bcfb8b877f47abe270ddeda4f676108cecb6b9bb2ad484a4f0011010001889f04180102000905024d3c5c10021b0c000a0910a34d7e18c20c31bb1a03040085c8d62e16d05dc4e9dad64953c8a2eed8b6c12f92b1575eeaa6dcf7be9473dd5b24b37b6dffbb4e7c99ed1bd3cb11634be19b3e6e207bed7505c7ca111ccf47cb323bf1f8851eb6360e8034cbff8dd149993c959de89f8f77f38e7e98b8e3076323aa719328e2b408db5ec0d03936efd57422ba04f925cdc7b4c1af7590e40ab0020003988d044d3c5c33010400b488c3e5f83f4d561f317817538d9d0397981e9aef1321ca68ebfae1cf8b7d388e19f4b5a24a82e2fbbf1c6c26557a6c5845307a03d815756f564ac7325b02bc83e87d5480a8fae848f07cb891f2d51ce7df83dcafdc12324517c86d472cc0ee10d47a68fd1d9ae49a6c19bbd36d82af597a0d88cc9c49de9df4e696fc1f0b5d0011010001b42754657374204b6579203220285253412c20656e637279707465642070726976617465206b65792988b804130102002205024d3c5c33021b03060b090807030206150802090a0b0416020301021e01021780000a0910d4984f961e35246b98940400908a73b6a6169f700434f076c6c79015a49bee37130eaf23aaa3cfa9ce60bfe4acaa7bc95f1146ada5867e0079babb38804891f4f0b8ebca57a86b249dee786161a755b7a342e68ccf3f78ed6440a93a6626beb9a37aa66afcd4f888790cb4bb46d94a4ae3eb3d7d3e6b00f6bfec940303e89ec5b32a1eaaacce66497d539328b0020003b88d044d3c5c33010400a4e913f9442abcc7f1804ccab27d2f787ffa592077ca935a8bb23165bd8d57576acac647cc596b2c3f814518cc8c82953c7a4478f32e0cf645630a5ba38d9618ef2bc3add69d459ae3dece5cab778938d988239f8c5ae437807075e06c828019959c644ff05ef6a5a1dab72227c98e3a040b0cf219026640698d7a13d8538a570011010001889f04180102000905024d3c5c33021b0c000a0910d4984f961e35246b26c703ff7ee29ef53bc1ae1ead533c408fa136db508434e233d6e62be621e031e5940bbd4c08142aed0f82217e7c3e1ec8de574bc06ccf3c36633be41ad78a9eacd209f861cae7b064100758545cc9dd83db71806dc1cfd5fb9ae5c7474bba0c19c44034ae61bae5eca379383339dece94ff56ff7aa44a582f3e5c38f45763af577c0934b0020003"
-
-const testKeys1And2PrivateHex = "9501d8044d3c5c10010400b1d13382944bd5aba23a4312968b5095d14f947f600eb478e14a6fcb16b0e0cac764884909c020bc495cfcc39a935387c661507bdb236a0612fb582cac3af9b29cc2c8c70090616c41b662f4da4c1201e195472eb7f4ae1ccbcbf9940fe21d985e379a5563dde5b9a23d35f1cfaa5790da3b79db26f23695107bfaca8e7b5bcd00110100010003ff4d91393b9a8e3430b14d6209df42f98dc927425b881f1209f319220841273a802a97c7bdb8b3a7740b3ab5866c4d1d308ad0d3a79bd1e883aacf1ac92dfe720285d10d08752a7efe3c609b1d00f17f2805b217be53999a7da7e493bfc3e9618fd17018991b8128aea70a05dbce30e4fbe626aa45775fa255dd9177aabf4df7cf0200c1ded12566e4bc2bb590455e5becfb2e2c9796482270a943343a7835de41080582c2be3caf5981aa838140e97afa40ad652a0b544f83eb1833b0957dce26e47b0200eacd6046741e9ce2ec5beb6fb5e6335457844fb09477f83b050a96be7da043e17f3a9523567ed40e7a521f818813a8b8a72209f1442844843ccc7eb9805442570200bdafe0438d97ac36e773c7162028d65844c4d463e2420aa2228c6e50dc2743c3d6c72d0d782a5173fe7be2169c8a9f4ef8a7cf3e37165e8c61b89c346cdc6c1799d2b41054657374204b6579203120285253412988b804130102002205024d3c5c10021b03060b090807030206150802090a0b0416020301021e01021780000a0910a34d7e18c20c31bbb5b304009cc45fe610b641a2c146331be94dade0a396e73ca725e1b25c21708d9cab46ecca5ccebc23055879df8f99eea39b377962a400f2ebdc36a7c99c333d74aeba346315137c3ff9d0a09b0273299090343048afb8107cf94cbd1400e3026f0ccac7ecebbc4d78588eb3e478fe2754d3ca664bcf3eac96ca4a6b0c8d7df5102f60f6b00200009d01d8044d3c5c10010400b201df61d67487301f11879d514f4248ade90c8f68c7af1284c161098de4c28c2850f1ec7b8e30f959793e571542ffc6532189409cb51c3d30dad78c4ad5165eda18b20d9826d8707d0f742e2ab492103a85bbd9ddf4f5720f6de7064feb0d39ee002219765bb07bcfb8b877f47abe270ddeda4f676108cecb6b9bb2ad484a4f00110100010003fd17a7490c22a79c59281fb7b20f5e6553ec0c1637ae382e8adaea295f50241037f8997cf42c1ce26417e015091451b15424b2c59eb8d4161b0975630408e394d3b00f88d4b4e18e2cc85e8251d4753a27c639c83f5ad4a571c4f19d7cd460b9b73c25ade730c99df09637bd173d8e3e981ac64432078263bb6dc30d3e974150dd0200d0ee05be3d4604d2146fb0457f31ba17c057560785aa804e8ca5530a7cd81d3440d0f4ba6851efcfd3954b7e68908fc0ba47f7ac37bf559c6c168b70d3a7c8cd0200da1c677c4bce06a068070f2b3733b0a714e88d62aa3f9a26c6f5216d48d5c2b5624144f3807c0df30be66b3268eeeca4df1fbded58faf49fc95dc3c35f134f8b01fd1396b6c0fc1b6c4f0eb8f5e44b8eace1e6073e20d0b8bc5385f86f1cf3f050f66af789f3ef1fc107b7f4421e19e0349c730c68f0a226981f4e889054fdb4dc149e8e889f04180102000905024d3c5c10021b0c000a0910a34d7e18c20c31bb1a03040085c8d62e16d05dc4e9dad64953c8a2eed8b6c12f92b1575eeaa6dcf7be9473dd5b24b37b6dffbb4e7c99ed1bd3cb11634be19b3e6e207bed7505c7ca111ccf47cb323bf1f8851eb6360e8034cbff8dd149993c959de89f8f77f38e7e98b8e3076323aa719328e2b408db5ec0d03936efd57422ba04f925cdc7b4c1af7590e40ab00200009501fe044d3c5c33010400b488c3e5f83f4d561f317817538d9d0397981e9aef1321ca68ebfae1cf8b7d388e19f4b5a24a82e2fbbf1c6c26557a6c5845307a03d815756f564ac7325b02bc83e87d5480a8fae848f07cb891f2d51ce7df83dcafdc12324517c86d472cc0ee10d47a68fd1d9ae49a6c19bbd36d82af597a0d88cc9c49de9df4e696fc1f0b5d0011010001fe030302e9030f3c783e14856063f16938530e148bc57a7aa3f3e4f90df9dceccdc779bc0835e1ad3d006e4a8d7b36d08b8e0de5a0d947254ecfbd22037e6572b426bcfdc517796b224b0036ff90bc574b5509bede85512f2eefb520fb4b02aa523ba739bff424a6fe81c5041f253f8d757e69a503d3563a104d0d49e9e890b9d0c26f96b55b743883b472caa7050c4acfd4a21f875bdf1258d88bd61224d303dc9df77f743137d51e6d5246b88c406780528fd9a3e15bab5452e5b93970d9dcc79f48b38651b9f15bfbcf6da452837e9cc70683d1bdca94507870f743e4ad902005812488dd342f836e72869afd00ce1850eea4cfa53ce10e3608e13d3c149394ee3cbd0e23d018fcbcb6e2ec5a1a22972d1d462ca05355d0d290dd2751e550d5efb38c6c89686344df64852bf4ff86638708f644e8ec6bd4af9b50d8541cb91891a431326ab2e332faa7ae86cfb6e0540aa63160c1e5cdd5a4add518b303fff0a20117c6bc77f7cfbaf36b04c865c6c2b42754657374204b6579203220285253412c20656e637279707465642070726976617465206b65792988b804130102002205024d3c5c33021b03060b090807030206150802090a0b0416020301021e01021780000a0910d4984f961e35246b98940400908a73b6a6169f700434f076c6c79015a49bee37130eaf23aaa3cfa9ce60bfe4acaa7bc95f1146ada5867e0079babb38804891f4f0b8ebca57a86b249dee786161a755b7a342e68ccf3f78ed6440a93a6626beb9a37aa66afcd4f888790cb4bb46d94a4ae3eb3d7d3e6b00f6bfec940303e89ec5b32a1eaaacce66497d539328b00200009d01fe044d3c5c33010400a4e913f9442abcc7f1804ccab27d2f787ffa592077ca935a8bb23165bd8d57576acac647cc596b2c3f814518cc8c82953c7a4478f32e0cf645630a5ba38d9618ef2bc3add69d459ae3dece5cab778938d988239f8c5ae437807075e06c828019959c644ff05ef6a5a1dab72227c98e3a040b0cf219026640698d7a13d8538a570011010001fe030302e9030f3c783e148560f936097339ae381d63116efcf802ff8b1c9360767db5219cc987375702a4123fd8657d3e22700f23f95020d1b261eda5257e9a72f9a918e8ef22dd5b3323ae03bbc1923dd224db988cadc16acc04b120a9f8b7e84da9716c53e0334d7b66586ddb9014df604b41be1e960dcfcbc96f4ed150a1a0dd070b9eb14276b9b6be413a769a75b519a53d3ecc0c220e85cd91ca354d57e7344517e64b43b6e29823cbd87eae26e2b2e78e6dedfbb76e3e9f77bcb844f9a8932eb3db2c3f9e44316e6f5d60e9e2a56e46b72abe6b06dc9a31cc63f10023d1f5e12d2a3ee93b675c96f504af0001220991c88db759e231b3320dcedf814dcf723fd9857e3d72d66a0f2af26950b915abdf56c1596f46a325bf17ad4810d3535fb02a259b247ac3dbd4cc3ecf9c51b6c07cebb009c1506fba0a89321ec8683e3fd009a6e551d50243e2d5092fefb3321083a4bad91320dc624bd6b5dddf93553e3d53924c05bfebec1fb4bd47e89a1a889f04180102000905024d3c5c33021b0c000a0910d4984f961e35246b26c703ff7ee29ef53bc1ae1ead533c408fa136db508434e233d6e62be621e031e5940bbd4c08142aed0f82217e7c3e1ec8de574bc06ccf3c36633be41ad78a9eacd209f861cae7b064100758545cc9dd83db71806dc1cfd5fb9ae5c7474bba0c19c44034ae61bae5eca379383339dece94ff56ff7aa44a582f3e5c38f45763af577c0934b0020000"
-
-const dsaElGamalTestKeysHex = "9501e1044dfcb16a110400aa3e5c1a1f43dd28c2ffae8abf5cfce555ee874134d8ba0a0f7b868ce2214beddc74e5e1e21ded354a95d18acdaf69e5e342371a71fbb9093162e0c5f3427de413a7f2c157d83f5cd2f9d791256dc4f6f0e13f13c3302af27f2384075ab3021dff7a050e14854bbde0a1094174855fc02f0bae8e00a340d94a1f22b32e48485700a0cec672ac21258fb95f61de2ce1af74b2c4fa3e6703ff698edc9be22c02ae4d916e4fa223f819d46582c0516235848a77b577ea49018dcd5e9e15cff9dbb4663a1ae6dd7580fa40946d40c05f72814b0f88481207e6c0832c3bded4853ebba0a7e3bd8e8c66df33d5a537cd4acf946d1080e7a3dcea679cb2b11a72a33a2b6a9dc85f466ad2ddf4c3db6283fa645343286971e3dd700703fc0c4e290d45767f370831a90187e74e9972aae5bff488eeff7d620af0362bfb95c1a6c3413ab5d15a2e4139e5d07a54d72583914661ed6a87cce810be28a0aa8879a2dd39e52fb6fe800f4f181ac7e328f740cde3d09a05cecf9483e4cca4253e60d4429ffd679d9996a520012aad119878c941e3cf151459873bdfc2a9563472fe0303027a728f9feb3b864260a1babe83925ce794710cfd642ee4ae0e5b9d74cee49e9c67b6cd0ea5dfbb582132195a121356a1513e1bca73e5b80c58c7ccb4164453412f456c47616d616c2054657374204b65792031886204131102002205024dfcb16a021b03060b090807030206150802090a0b0416020301021e01021780000a091033af447ccd759b09fadd00a0b8fd6f5a790bad7e9f2dbb7632046dc4493588db009c087c6a9ba9f7f49fab221587a74788c00db4889ab00200009d0157044dfcb16a1004008dec3f9291205255ccff8c532318133a6840739dd68b03ba942676f9038612071447bf07d00d559c5c0875724ea16a4c774f80d8338b55fca691a0522e530e604215b467bbc9ccfd483a1da99d7bc2648b4318fdbd27766fc8bfad3fddb37c62b8ae7ccfe9577e9b8d1e77c1d417ed2c2ef02d52f4da11600d85d3229607943700030503ff506c94c87c8cab778e963b76cf63770f0a79bf48fb49d3b4e52234620fc9f7657f9f8d56c96a2b7c7826ae6b57ebb2221a3fe154b03b6637cea7e6d98e3e45d87cf8dc432f723d3d71f89c5192ac8d7290684d2c25ce55846a80c9a7823f6acd9bb29fa6cd71f20bc90eccfca20451d0c976e460e672b000df49466408d527affe0303027a728f9feb3b864260abd761730327bca2aaa4ea0525c175e92bf240682a0e83b226f97ecb2e935b62c9a133858ce31b271fa8eb41f6a1b3cd72a63025ce1a75ee4180dcc284884904181102000905024dfcb16a021b0c000a091033af447ccd759b09dd0b009e3c3e7296092c81bee5a19929462caaf2fff3ae26009e218c437a2340e7ea628149af1ec98ec091a43992b00200009501e1044dfcb1be1104009f61faa61aa43df75d128cbe53de528c4aec49ce9360c992e70c77072ad5623de0a3a6212771b66b39a30dad6781799e92608316900518ec01184a85d872365b7d2ba4bacfb5882ea3c2473d3750dc6178cc1cf82147fb58caa28b28e9f12f6d1efcb0534abed644156c91cca4ab78834268495160b2400bc422beb37d237c2300a0cac94911b6d493bda1e1fbc6feeca7cb7421d34b03fe22cec6ccb39675bb7b94a335c2b7be888fd3906a1125f33301d8aa6ec6ee6878f46f73961c8d57a3e9544d8ef2a2cbfd4d52da665b1266928cfe4cb347a58c412815f3b2d2369dec04b41ac9a71cc9547426d5ab941cccf3b18575637ccfb42df1a802df3cfe0a999f9e7109331170e3a221991bf868543960f8c816c28097e503fe319db10fb98049f3a57d7c80c420da66d56f3644371631fad3f0ff4040a19a4fedc2d07727a1b27576f75a4d28c47d8246f27071e12d7a8de62aad216ddbae6aa02efd6b8a3e2818cda48526549791ab277e447b3a36c57cefe9b592f5eab73959743fcc8e83cbefec03a329b55018b53eec196765ae40ef9e20521a603c551efe0303020950d53a146bf9c66034d00c23130cce95576a2ff78016ca471276e8227fb30b1ffbd92e61804fb0c3eff9e30b1a826ee8f3e4730b4d86273ca977b4164453412f456c47616d616c2054657374204b65792032886204131102002205024dfcb1be021b03060b090807030206150802090a0b0416020301021e01021780000a0910a86bf526325b21b22bd9009e34511620415c974750a20df5cb56b182f3b48e6600a0a9466cb1a1305a84953445f77d461593f1d42bc1b00200009d0157044dfcb1be1004009565a951da1ee87119d600c077198f1c1bceb0f7aa54552489298e41ff788fa8f0d43a69871f0f6f77ebdfb14a4260cf9fbeb65d5844b4272a1904dd95136d06c3da745dc46327dd44a0f16f60135914368c8039a34033862261806bb2c5ce1152e2840254697872c85441ccb7321431d75a747a4bfb1d2c66362b51ce76311700030503fc0ea76601c196768070b7365a200e6ddb09307f262d5f39eec467b5f5784e22abdf1aa49226f59ab37cb49969d8f5230ea65caf56015abda62604544ed526c5c522bf92bed178a078789f6c807b6d34885688024a5bed9e9f8c58d11d4b82487b44c5f470c5606806a0443b79cadb45e0f897a561a53f724e5349b9267c75ca17fe0303020950d53a146bf9c660bc5f4ce8f072465e2d2466434320c1e712272fafc20e342fe7608101580fa1a1a367e60486a7cd1246b7ef5586cf5e10b32762b710a30144f12dd17dd4884904181102000905024dfcb1be021b0c000a0910a86bf526325b21b2904c00a0b2b66b4b39ccffda1d10f3ea8d58f827e30a8b8e009f4255b2d8112a184e40cde43a34e8655ca7809370b0020000"
-
-const ed25519wX25519Key = "c54b0663877fe31b00000020f94da7bb48d60a61e567706a6587d0331999bb9d891a08242ead84543df895a3001972817b12be707e8d5f586ce61361201d344eb266a2c82fde6835762b65b0b7c2b1061f1b0a00000042058263877fe3030b090705150a0e080c021600029b03021e09222106cb186c4f0609a697e4d52dfa6c722b0c1f1e27c18a56708f6525ec27bad9acc905270902070200000000ad2820103e2d7d227ec0e6d7ce4471db36bfc97083253690271498a7ef0576c07faae14585b3b903b0127ec4fda2f023045a2ec76bcb4f9571a9651e14aee1137a1d668442c88f951e33c4ffd33fb9a17d511eed758fc6d9cc50cb5fd793b2039d5804c74b0663877fe319000000208693248367f9e5015db922f8f48095dda784987f2d5985b12fbad16caf5e4435004d600a4f794d44775c57a26e0feefed558e9afffd6ad0d582d57fb2ba2dcedb8c29b06181b0a0000002c050263877fe322a106cb186c4f0609a697e4d52dfa6c722b0c1f1e27c18a56708f6525ec27bad9acc9021b0c00000000defa20a6e9186d9d5935fc8fe56314cdb527486a5a5120f9b762a235a729f039010a56b89c658568341fbef3b894e9834ad9bc72afae2f4c9c47a43855e65f1cb0a3f77bbc5f61085c1f8249fe4e7ca59af5f0bcee9398e0fa8d76e522e1d8ab42bb0d"
-
-const signedMessageHex = "a3019bc0cbccc0c4b8d8b74ee2108fe16ec6d3ca490cbe362d3f8333d3f352531472538b8b13d353b97232f352158c20943157c71c16064626063656269052062e4e01987e9b6fccff4b7df3a34c534b23e679cbec3bc0f8f6e64dfb4b55fe3f8efa9ce110ddb5cd79faf1d753c51aecfa669f7e7aa043436596cccc3359cb7dd6bbe9ecaa69e5989d9e57209571edc0b2fa7f57b9b79a64ee6e99ce1371395fee92fec2796f7b15a77c386ff668ee27f6d38f0baa6c438b561657377bf6acff3c5947befd7bf4c196252f1d6e5c524d0300"
-
-const signedTextMessageHex = "a3019bc0cbccc8c4b8d8b74ee2108fe16ec6d36a250cbece0c178233d3f352531472538b8b13d35379b97232f352158ca0b4312f57c71c1646462606365626906a062e4e019811591798ff99bf8afee860b0d8a8c2a85c3387e3bcf0bb3b17987f2bbcfab2aa526d930cbfd3d98757184df3995c9f3e7790e36e3e9779f06089d4c64e9e47dd6202cb6e9bc73c5d11bb59fbaf89d22d8dc7cf199ddf17af96e77c5f65f9bbed56f427bd8db7af37f6c9984bf9385efaf5f184f986fb3e6adb0ecfe35bbf92d16a7aa2a344fb0bc52fb7624f0200"
-
-const signedEncryptedMessageHex = "c18c032a67d68660df41c70103ff5a84c9a72f80e74ef0384c2d6a9ebfe2b09e06a8f298394f6d2abf174e40934ab0ec01fb2d0ddf21211c6fe13eb238563663b017a6b44edca552eb4736c4b7dc6ed907dd9e12a21b51b64b46f902f76fb7aaf805c1db8070574d8d0431a23e324a750f77fb72340a17a42300ee4ca8207301e95a731da229a63ab9c6b44541fbd2c11d016d810b3b3b2b38f15b5b40f0a4910332829c2062f1f7cc61f5b03677d73c54cafa1004ced41f315d46444946faae571d6f426e6dbd45d9780eb466df042005298adabf7ce0ef766dfeb94cd449c7ed0046c880339599c4711af073ce649b1e237c40b50a5536283e03bdbb7afad78bd08707715c67fb43295f905b4c479178809d429a8e167a9a8c6dfd8ab20b4edebdc38d6dec879a3202e1b752690d9bb5b0c07c5a227c79cc200e713a99251a4219d62ad5556900cf69bd384b6c8e726c7be267471d0d23af956da165af4af757246c2ebcc302b39e8ef2fccb4971b234fcda22d759ddb20e27269ee7f7fe67898a9de721bfa02ab0becaa046d00ea16cb1afc4e2eab40d0ac17121c565686e5cbd0cbdfbd9d6db5c70278b9c9db5a83176d04f61fbfbc4471d721340ede2746e5c312ded4f26787985af92b64fae3f253dbdde97f6a5e1996fd4d865599e32ff76325d3e9abe93184c02988ee89a4504356a4ef3b9b7a57cbb9637ca90af34a7676b9ef559325c3cca4e29d69fec1887f5440bb101361d744ad292a8547f22b4f22b419a42aa836169b89190f46d9560824cb2ac6e8771de8223216a5e647e132ab9eebcba89569ab339cb1c3d70fe806b31f4f4c600b4103b8d7583ebff16e43dcda551e6530f975122eb8b29"
-
-const verifiedSignatureEncryptedMessageHex = "c2b304000108000605026048f6d600210910a34d7e18c20c31bb1621045fb74b1d03b1e3cb31bc2f8aa34d7e18c20c31bb9a3b0400a32ddac1af259c1b0abab0041327ea04970944401978fb647dd1cf9aba4f164e43f0d8a9389501886474bdd4a6e77f6aea945c07dfbf87743835b44cc2c39a1f9aeecfa83135abc92e18e50396f2e6a06c44e0188b0081effbfb4160d28f118d4ff73dd199a102e47cffd8c7ff2bacd83ae72b5820c021a486766dd587b5da61"
-
-const unverifiedSignatureEncryptedMessageHex = "c2b304000108000605026048f6d600210910a34d7e18c20c31bb1621045fb74b1d03b1e3cb31bc2f8aa34d7e18c20c31bb9a3b0400a32ddac1af259c1b0abab0041327ea04970944401978fb647dd1cf9aba4f164e43f0d8a9389501886474bdd4a6e77f6aea945c07dfbf87743835b44cc2c39a1f9aeecfa83135abc92e18e50396f2e6a06c44e0188b0081effbfb4160d28f118d4ff73dd199a102e47cffd8c7ff2bacd83ae72b5820c021a486766dd587b5da61"
-
-const signedEncryptedMessage2Hex = "85010e03cf6a7abcd43e36731003fb057f5495b79db367e277cdbe4ab90d924ddee0c0381494112ff8c1238fb0184af35d1731573b01bc4c55ecacd2aafbe2003d36310487d1ecc9ac994f3fada7f9f7f5c3a64248ab7782906c82c6ff1303b69a84d9a9529c31ecafbcdb9ba87e05439897d87e8a2a3dec55e14df19bba7f7bd316291c002ae2efd24f83f9e3441203fc081c0c23dc3092a454ca8a082b27f631abf73aca341686982e8fbda7e0e7d863941d68f3de4a755c2964407f4b5e0477b3196b8c93d551dd23c8beef7d0f03fbb1b6066f78907faf4bf1677d8fcec72651124080e0b7feae6b476e72ab207d38d90b958759fdedfc3c6c35717c9dbfc979b3cfbbff0a76d24a5e57056bb88acbd2a901ef64bc6e4db02adc05b6250ff378de81dca18c1910ab257dff1b9771b85bb9bbe0a69f5989e6d1710a35e6dfcceb7d8fb5ccea8db3932b3d9ff3fe0d327597c68b3622aec8e3716c83a6c93f497543b459b58ba504ed6bcaa747d37d2ca746fe49ae0a6ce4a8b694234e941b5159ff8bd34b9023da2814076163b86f40eed7c9472f81b551452d5ab87004a373c0172ec87ea6ce42ccfa7dbdad66b745496c4873d8019e8c28d6b3"
-
-const signatureEncryptedMessage2Hex = "c24604001102000605024dfd0166000a091033af447ccd759b09bae600a096ec5e63ecf0a403085e10f75cc3bab327663282009f51fad9df457ed8d2b70d8a73c76e0443eac0f377"
-
-const symmetricallyEncryptedCompressedHex = "c32e040903085a357c1a7b5614ed00cc0d1d92f428162058b3f558a0fb0980d221ebac6c97d5eda4e0fe32f6e706e94dd263012d6ca1ef8c4bbd324098225e603a10c85ebf09cbf7b5aeeb5ce46381a52edc51038b76a8454483be74e6dcd1e50d5689a8ae7eceaeefed98a0023d49b22eb1f65c2aa1ef1783bb5e1995713b0457102ec3c3075fe871267ffa4b686ad5d52000d857"
-
-const dsaTestKeyHex = "9901a2044d6c49de110400cb5ce438cf9250907ac2ba5bf6547931270b89f7c4b53d9d09f4d0213a5ef2ec1f26806d3d259960f872a4a102ef1581ea3f6d6882d15134f21ef6a84de933cc34c47cc9106efe3bd84c6aec12e78523661e29bc1a61f0aab17fa58a627fd5fd33f5149153fbe8cd70edf3d963bc287ef875270ff14b5bfdd1bca4483793923b00a0fe46d76cb6e4cbdc568435cd5480af3266d610d303fe33ae8273f30a96d4d34f42fa28ce1112d425b2e3bf7ea553d526e2db6b9255e9dc7419045ce817214d1a0056dbc8d5289956a4b1b69f20f1105124096e6a438f41f2e2495923b0f34b70642607d45559595c7fe94d7fa85fc41bf7d68c1fd509ebeaa5f315f6059a446b9369c277597e4f474a9591535354c7e7f4fd98a08aa60400b130c24ff20bdfbf683313f5daebf1c9b34b3bdadfc77f2ddd72ee1fb17e56c473664bc21d66467655dd74b9005e3a2bacce446f1920cd7017231ae447b67036c9b431b8179deacd5120262d894c26bc015bffe3d827ba7087ad9b700d2ca1f6d16cc1786581e5dd065f293c31209300f9b0afcc3f7c08dd26d0a22d87580b4db41054657374204b65792033202844534129886204131102002205024d6c49de021b03060b090807030206150802090a0b0416020301021e01021780000a0910338934250ccc03607e0400a0bdb9193e8a6b96fc2dfc108ae848914b504481f100a09c4dc148cb693293a67af24dd40d2b13a9e36794"
-
-const dsaTestKeyPrivateHex = "9501bb044d6c49de110400cb5ce438cf9250907ac2ba5bf6547931270b89f7c4b53d9d09f4d0213a5ef2ec1f26806d3d259960f872a4a102ef1581ea3f6d6882d15134f21ef6a84de933cc34c47cc9106efe3bd84c6aec12e78523661e29bc1a61f0aab17fa58a627fd5fd33f5149153fbe8cd70edf3d963bc287ef875270ff14b5bfdd1bca4483793923b00a0fe46d76cb6e4cbdc568435cd5480af3266d610d303fe33ae8273f30a96d4d34f42fa28ce1112d425b2e3bf7ea553d526e2db6b9255e9dc7419045ce817214d1a0056dbc8d5289956a4b1b69f20f1105124096e6a438f41f2e2495923b0f34b70642607d45559595c7fe94d7fa85fc41bf7d68c1fd509ebeaa5f315f6059a446b9369c277597e4f474a9591535354c7e7f4fd98a08aa60400b130c24ff20bdfbf683313f5daebf1c9b34b3bdadfc77f2ddd72ee1fb17e56c473664bc21d66467655dd74b9005e3a2bacce446f1920cd7017231ae447b67036c9b431b8179deacd5120262d894c26bc015bffe3d827ba7087ad9b700d2ca1f6d16cc1786581e5dd065f293c31209300f9b0afcc3f7c08dd26d0a22d87580b4d00009f592e0619d823953577d4503061706843317e4fee083db41054657374204b65792033202844534129886204131102002205024d6c49de021b03060b090807030206150802090a0b0416020301021e01021780000a0910338934250ccc03607e0400a0bdb9193e8a6b96fc2dfc108ae848914b504481f100a09c4dc148cb693293a67af24dd40d2b13a9e36794"
-
-const p256TestKeyHex = "98520456e5b83813082a8648ce3d030107020304a2072cd6d21321266c758cc5b83fab0510f751cb8d91897cddb7047d8d6f185546e2107111b0a95cb8ef063c33245502af7a65f004d5919d93ee74eb71a66253b424502d3235362054657374204b6579203c696e76616c6964406578616d706c652e636f6d3e8879041313080021050256e5b838021b03050b09080702061508090a0b020416020301021e01021780000a0910d44a2c495918513e54e50100dfa64f97d9b47766fc1943c6314ba3f2b2a103d71ad286dc5b1efb96a345b0c80100dbc8150b54241f559da6ef4baacea6d31902b4f4b1bdc09b34bf0502334b7754b8560456e5b83812082a8648ce3d030107020304bfe3cea9cee13486f8d518aa487fecab451f25467d2bf08e58f63e5fa525d5482133e6a79299c274b068ef0be448152ad65cf11cf764348588ca4f6a0bcf22b6030108078861041813080009050256e5b838021b0c000a0910d44a2c495918513e4a4800ff49d589fa64024ad30be363a032e3a0e0e6f5db56ba4c73db850518bf0121b8f20100fd78e065f4c70ea5be9df319ea67e493b936fc78da834a71828043d3154af56e"
-
-const p256TestKeyPrivateHex = "94a50456e5b83813082a8648ce3d030107020304a2072cd6d21321266c758cc5b83fab0510f751cb8d91897cddb7047d8d6f185546e2107111b0a95cb8ef063c33245502af7a65f004d5919d93ee74eb71a66253fe070302f0c2bfb0b6c30f87ee1599472b8636477eab23ced13b271886a4b50ed34c9d8436af5af5b8f88921f0efba6ef8c37c459bbb88bc1c6a13bbd25c4ce9b1e97679569ee77645d469bf4b43de637f5561b424502d3235362054657374204b6579203c696e76616c6964406578616d706c652e636f6d3e8879041313080021050256e5b838021b03050b09080702061508090a0b020416020301021e01021780000a0910d44a2c495918513e54e50100dfa64f97d9b47766fc1943c6314ba3f2b2a103d71ad286dc5b1efb96a345b0c80100dbc8150b54241f559da6ef4baacea6d31902b4f4b1bdc09b34bf0502334b77549ca90456e5b83812082a8648ce3d030107020304bfe3cea9cee13486f8d518aa487fecab451f25467d2bf08e58f63e5fa525d5482133e6a79299c274b068ef0be448152ad65cf11cf764348588ca4f6a0bcf22b603010807fe0703027510012471a603cfee2968dce19f732721ddf03e966fd133b4e3c7a685b788705cbc46fb026dc94724b830c9edbaecd2fb2c662f23169516cacd1fe423f0475c364ecc10abcabcfd4bbbda1a36a1bd8861041813080009050256e5b838021b0c000a0910d44a2c495918513e4a4800ff49d589fa64024ad30be363a032e3a0e0e6f5db56ba4c73db850518bf0121b8f20100fd78e065f4c70ea5be9df319ea67e493b936fc78da834a71828043d3154af56e"
-
-const armoredPrivateKeyBlock = `-----BEGIN PGP PRIVATE KEY BLOCK-----
-Version: GnuPG v1.4.10 (GNU/Linux)
-
-lQHYBE2rFNoBBADFwqWQIW/DSqcB4yCQqnAFTJ27qS5AnB46ccAdw3u4Greeu3Bp
-idpoHdjULy7zSKlwR1EA873dO/k/e11Ml3dlAFUinWeejWaK2ugFP6JjiieSsrKn
-vWNicdCS4HTWn0X4sjl0ZiAygw6GNhqEQ3cpLeL0g8E9hnYzJKQ0LWJa0QARAQAB
-AAP/TB81EIo2VYNmTq0pK1ZXwUpxCrvAAIG3hwKjEzHcbQznsjNvPUihZ+NZQ6+X
-0HCfPAdPkGDCLCb6NavcSW+iNnLTrdDnSI6+3BbIONqWWdRDYJhqZCkqmG6zqSfL
-IdkJgCw94taUg5BWP/AAeQrhzjChvpMQTVKQL5mnuZbUCeMCAN5qrYMP2S9iKdnk
-VANIFj7656ARKt/nf4CBzxcpHTyB8+d2CtPDKCmlJP6vL8t58Jmih+kHJMvC0dzn
-gr5f5+sCAOOe5gt9e0am7AvQWhdbHVfJU0TQJx+m2OiCJAqGTB1nvtBLHdJnfdC9
-TnXXQ6ZXibqLyBies/xeY2sCKL5qtTMCAKnX9+9d/5yQxRyrQUHt1NYhaXZnJbHx
-q4ytu0eWz+5i68IYUSK69jJ1NWPM0T6SkqpB3KCAIv68VFm9PxqG1KmhSrQIVGVz
-dCBLZXmIuAQTAQIAIgUCTasU2gIbAwYLCQgHAwIGFQgCCQoLBBYCAwECHgECF4AA
-CgkQO9o98PRieSoLhgQAkLEZex02Qt7vGhZzMwuN0R22w3VwyYyjBx+fM3JFETy1
-ut4xcLJoJfIaF5ZS38UplgakHG0FQ+b49i8dMij0aZmDqGxrew1m4kBfjXw9B/v+
-eIqpODryb6cOSwyQFH0lQkXC040pjq9YqDsO5w0WYNXYKDnzRV0p4H1pweo2VDid
-AdgETasU2gEEAN46UPeWRqKHvA99arOxee38fBt2CI08iiWyI8T3J6ivtFGixSqV
-bRcPxYO/qLpVe5l84Nb3X71GfVXlc9hyv7CD6tcowL59hg1E/DC5ydI8K8iEpUmK
-/UnHdIY5h8/kqgGxkY/T/hgp5fRQgW1ZoZxLajVlMRZ8W4tFtT0DeA+JABEBAAEA
-A/0bE1jaaZKj6ndqcw86jd+QtD1SF+Cf21CWRNeLKnUds4FRRvclzTyUMuWPkUeX
-TaNNsUOFqBsf6QQ2oHUBBK4VCHffHCW4ZEX2cd6umz7mpHW6XzN4DECEzOVksXtc
-lUC1j4UB91DC/RNQqwX1IV2QLSwssVotPMPqhOi0ZLNY7wIA3n7DWKInxYZZ4K+6
-rQ+POsz6brEoRHwr8x6XlHenq1Oki855pSa1yXIARoTrSJkBtn5oI+f8AzrnN0BN
-oyeQAwIA/7E++3HDi5aweWrViiul9cd3rcsS0dEnksPhvS0ozCJiHsq/6GFmy7J8
-QSHZPteedBnZyNp5jR+H7cIfVN3KgwH/Skq4PsuPhDq5TKK6i8Pc1WW8MA6DXTdU
-nLkX7RGmMwjC0DBf7KWAlPjFaONAX3a8ndnz//fy1q7u2l9AZwrj1qa1iJ8EGAEC
-AAkFAk2rFNoCGwwACgkQO9o98PRieSo2/QP/WTzr4ioINVsvN1akKuekmEMI3LAp
-BfHwatufxxP1U+3Si/6YIk7kuPB9Hs+pRqCXzbvPRrI8NHZBmc8qIGthishdCYad
-AHcVnXjtxrULkQFGbGvhKURLvS9WnzD/m1K2zzwxzkPTzT9/Yf06O6Mal5AdugPL
-VrM0m72/jnpKo04=
-=zNCn
------END PGP PRIVATE KEY BLOCK-----`
-
-const e2ePublicKey = `-----BEGIN PGP PUBLIC KEY BLOCK-----
-Charset: UTF-8
-
-xv8AAABSBAAAAAATCCqGSM49AwEHAgME1LRoXSpOxtHXDUdmuvzchyg6005qIBJ4
-sfaSxX7QgH9RV2ONUhC+WiayCNADq+UMzuR/vunSr4aQffXvuGnR383/AAAAFDxk
-Z2lsQHlhaG9vLWluYy5jb20+wv8AAACGBBATCAA4/wAAAAWCVGvAG/8AAAACiwn/
-AAAACZC2VkQCOjdvYf8AAAAFlQgJCgv/AAAAA5YBAv8AAAACngEAAE1BAP0X8veD
-24IjmI5/C6ZAfVNXxgZZFhTAACFX75jUA3oD6AEAzoSwKf1aqH6oq62qhCN/pekX
-+WAsVMBhNwzLpqtCRjLO/wAAAFYEAAAAABIIKoZIzj0DAQcCAwT50ain7vXiIRv8
-B1DO3x3cE/aattZ5sHNixJzRCXi2vQIA5QmOxZ6b5jjUekNbdHG3SZi1a2Ak5mfX
-fRxC/5VGAwEIB8L/AAAAZQQYEwgAGP8AAAAFglRrwBz/AAAACZC2VkQCOjdvYQAA
-FJAA9isX3xtGyMLYwp2F3nXm7QEdY5bq5VUcD/RJlj792VwA/1wH0pCzVLl4Q9F9
-ex7En5r7rHR5xwX82Msc+Rq9dSyO
-=7MrZ
------END PGP PUBLIC KEY BLOCK-----`
-
-const dsaKeyWithSHA512 = `9901a2044f04b07f110400db244efecc7316553ee08d179972aab87bb1214de7692593fcf5b6feb1c80fba268722dd464748539b85b81d574cd2d7ad0ca2444de4d849b8756bad7768c486c83a824f9bba4af773d11742bdfb4ac3b89ef8cc9452d4aad31a37e4b630d33927bff68e879284a1672659b8b298222fc68f370f3e24dccacc4a862442b9438b00a0ea444a24088dc23e26df7daf8f43cba3bffc4fe703fe3d6cd7fdca199d54ed8ae501c30e3ec7871ea9cdd4cf63cfe6fc82281d70a5b8bb493f922cd99fba5f088935596af087c8d818d5ec4d0b9afa7f070b3d7c1dd32a84fca08d8280b4890c8da1dde334de8e3cad8450eed2a4a4fcc2db7b8e5528b869a74a7f0189e11ef097ef1253582348de072bb07a9fa8ab838e993cef0ee203ff49298723e2d1f549b00559f886cd417a41692ce58d0ac1307dc71d85a8af21b0cf6eaa14baf2922d3a70389bedf17cc514ba0febbd107675a372fe84b90162a9e88b14d4b1c6be855b96b33fb198c46f058568817780435b6936167ebb3724b680f32bf27382ada2e37a879b3d9de2abe0c3f399350afd1ad438883f4791e2e3b4184453412068617368207472756e636174696f6e207465737488620413110a002205024f04b07f021b03060b090807030206150802090a0b0416020301021e01021780000a0910ef20e0cefca131581318009e2bf3bf047a44d75a9bacd00161ee04d435522397009a03a60d51bd8a568c6c021c8d7cf1be8d990d6417b0020003`
-
-const unknownHashFunctionHex = `8a00000040040001990006050253863c24000a09103b4fe6acc0b21f32ffff0101010101010101010101010101010101010101010101010101010101010101010101010101`
-
-const rsaSignatureBadMPIlength = `8a00000040040001030006050253863c24000a09103b4fe6acc0b21f32ffff0101010101010101010101010101010101010101010101010101010101010101010101010101`
-
-const missingHashFunctionHex = `8a00000040040001030006050253863c24000a09103b4fe6acc0b21f32ffff0101010101010101010101010101010101010101010101010101010101010101010101010101`
-
-const campbellQuine = `a0b001000300fcffa0b001000d00f2ff000300fcffa0b001000d00f2ff8270a01c00000500faff8270a01c00000500faff000500faff001400ebff8270a01c00000500faff000500faff001400ebff428821c400001400ebff428821c400001400ebff428821c400001400ebff428821c400001400ebff428821c400000000ffff000000ffff000b00f4ff428821c400000000ffff000000ffff000b00f4ff0233214c40000100feff000233214c40000100feff0000`
-
-const keyV4forVerifyingSignedMessageV3 = `-----BEGIN PGP PUBLIC KEY BLOCK-----
-Comment: GPGTools - https://gpgtools.org
-
-mI0EVfxoFQEEAMBIqmbDfYygcvP6Phr1wr1XI41IF7Qixqybs/foBF8qqblD9gIY
-BKpXjnBOtbkcVOJ0nljd3/sQIfH4E0vQwK5/4YRQSI59eKOqd6Fx+fWQOLG+uu6z
-tewpeCj9LLHvibx/Sc7VWRnrznia6ftrXxJ/wHMezSab3tnGC0YPVdGNABEBAAG0
-JEdvY3J5cHRvIFRlc3QgS2V5IDx0aGVtYXhAZ21haWwuY29tPoi5BBMBCgAjBQJV
-/GgVAhsDBwsJCAcDAgEGFQgCCQoLBBYCAwECHgECF4AACgkQeXnQmhdGW9PFVAP+
-K7TU0qX5ArvIONIxh/WAweyOk884c5cE8f+3NOPOOCRGyVy0FId5A7MmD5GOQh4H
-JseOZVEVCqlmngEvtHZb3U1VYtVGE5WZ+6rQhGsMcWP5qaT4soYwMBlSYxgYwQcx
-YhN9qOr292f9j2Y//TTIJmZT4Oa+lMxhWdqTfX+qMgG4jQRV/GgVAQQArhFSiij1
-b+hT3dnapbEU+23Z1yTu1DfF6zsxQ4XQWEV3eR8v+8mEDDNcz8oyyF56k6UQ3rXi
-UMTIwRDg4V6SbZmaFbZYCOwp/EmXJ3rfhm7z7yzXj2OFN22luuqbyVhuL7LRdB0M
-pxgmjXb4tTvfgKd26x34S+QqUJ7W6uprY4sAEQEAAYifBBgBCgAJBQJV/GgVAhsM
-AAoJEHl50JoXRlvT7y8D/02ckx4OMkKBZo7viyrBw0MLG92i+DC2bs35PooHR6zz
-786mitjOp5z2QWNLBvxC70S0qVfCIz8jKupO1J6rq6Z8CcbLF3qjm6h1omUBf8Nd
-EfXKD2/2HV6zMKVknnKzIEzauh+eCKS2CeJUSSSryap/QLVAjRnckaES/OsEWhNB
-=RZia
------END PGP PUBLIC KEY BLOCK-----
-`
-
-const signedMessageV3 = `-----BEGIN PGP MESSAGE-----
-Comment: GPGTools - https://gpgtools.org
-
-owGbwMvMwMVYWXlhlrhb9GXG03JJDKF/MtxDMjKLFYAoUaEktbhEITe1uDgxPVWP
-q5NhKjMrWAVcC9evD8z/bF/uWNjqtk/X3y5/38XGRQHm/57rrDRYuGnTw597Xqka
-uM3137/hH3Os+Jf2dc0fXOITKwJvXJvecPVs0ta+Vg7ZO1MLn8w58Xx+6L58mbka
-DGHyU9yTueZE8D+QF/Tz28Y78dqtF56R1VPn9Xw4uJqrWYdd7b3vIZ1V6R4Nh05d
-iT57d/OhWwA=
-=hG7R
------END PGP MESSAGE-----
-`
-
-// https://mailarchive.ietf.org/arch/msg/openpgp/9SheW_LENE0Kxf7haNllovPyAdY/
-const v5PrivKey = `-----BEGIN PGP PRIVATE KEY BLOCK-----
-
-lGEFXJH05BYAAAAtCSsGAQQB2kcPAQEHQFhZlVcVVtwf+21xNQPX+ecMJJBL0MPd
-fj75iux+my8QAAAAAAAiAQCHZ1SnSUmWqxEsoI6facIVZQu6mph3cBFzzTvcm5lA
-Ng5ctBhlbW1hLmdvbGRtYW5AZXhhbXBsZS5uZXSIlgUTFggASCIhBRk0e8mHJGQC
-X5nfPsLgAA7ZiEiS4fez6kyUAJFZVptUBQJckfTkAhsDBQsJCAcCAyICAQYVCgkI
-CwIEFgIDAQIeBwIXgAAA9cAA/jiR3yMsZMeEQ40u6uzEoXa6UXeV/S3wwJAXRJy9
-M8s0AP9vuL/7AyTfFXwwzSjDnYmzS0qAhbLDQ643N+MXGBJ2BZxmBVyR9OQSAAAA
-MgorBgEEAZdVAQUBAQdA+nysrzml2UCweAqtpDuncSPlvrcBWKU0yfU0YvYWWAoD
-AQgHAAAAAAAiAP9OdAPppjU1WwpqjIItkxr+VPQRT8Zm/Riw7U3F6v3OiBFHiHoF
-GBYIACwiIQUZNHvJhyRkAl+Z3z7C4AAO2YhIkuH3s+pMlACRWVabVAUCXJH05AIb
-DAAAOSQBAP4BOOIR/sGLNMOfeb5fPs/02QMieoiSjIBnijhob2U5AQC+RtOHCHx7
-TcIYl5/Uyoi+FOvPLcNw4hOv2nwUzSSVAw==
-=IiS2
------END PGP PRIVATE KEY BLOCK-----`
-
-// See OpenPGP crypto refresh Section A.3.
-const v6PrivKey = `-----BEGIN PGP PRIVATE KEY BLOCK-----
-
-xUsGY4d/4xsAAAAg+U2nu0jWCmHlZ3BqZYfQMxmZu52JGggkLq2EVD34laMAGXKB
-exK+cH6NX1hs5hNhIB00TrJmosgv3mg1ditlsLfCsQYfGwoAAABCBYJjh3/jAwsJ
-BwUVCg4IDAIWAAKbAwIeCSIhBssYbE8GCaaX5NUt+mxyKwwfHifBilZwj2Ul7Ce6
-2azJBScJAgcCAAAAAK0oIBA+LX0ifsDm185Ecds2v8lwgyU2kCcUmKfvBXbAf6rh
-RYWzuQOwEn7E/aLwIwRaLsdry0+VcallHhSu4RN6HWaEQsiPlR4zxP/TP7mhfVEe
-7XWPxtnMUMtf15OyA51YBMdLBmOHf+MZAAAAIIaTJINn+eUBXbki+PSAld2nhJh/
-LVmFsS+60WyvXkQ1AE1gCk95TUR3XFeibg/u/tVY6a//1q0NWC1X+yui3O24wpsG
-GBsKAAAALAWCY4d/4wKbDCIhBssYbE8GCaaX5NUt+mxyKwwfHifBilZwj2Ul7Ce6
-2azJAAAAAAQBIKbpGG2dWTX8j+VjFM21J0hqWlEg+bdiojWnKfA5AQpWUWtnNwDE
-M0g12vYxoWM8Y81W+bHBw805I8kWVkXU6vFOi+HWvv/ira7ofJu16NnoUkhclkUr
-k0mXubZvyl4GBg==
------END PGP PRIVATE KEY BLOCK-----`
-
-// See OpenPGP crypto refresh merge request:
-// https://gitlab.com/openpgp-wg/rfc4880bis/-/merge_requests/304
-const v6PrivKeyMsg = `-----BEGIN PGP MESSAGE-----
-
-wV0GIQYSyD8ecG9jCP4VGkF3Q6HwM3kOk+mXhIjR2zeNqZMIhRmHzxjV8bU/gXzO
-WgBM85PMiVi93AZfJfhK9QmxfdNnZBjeo1VDeVZheQHgaVf7yopqR6W1FT6NOrfS
-aQIHAgZhZBZTW+CwcW1g4FKlbExAf56zaw76/prQoN+bAzxpohup69LA7JW/Vp0l
-yZnuSj3hcFj0DfqLTGgr4/u717J+sPWbtQBfgMfG9AOIwwrUBqsFE9zW+f1zdlYo
-bhF30A+IitsxxA==
------END PGP MESSAGE-----`
-
-// See OpenPGP crypto refresh merge request:
-// https://gitlab.com/openpgp-wg/rfc4880bis/-/merge_requests/305
-const v6PrivKeyInlineSignMsg = `-----BEGIN PGP MESSAGE-----
-
-wV0GIQYSyD8ecG9jCP4VGkF3Q6HwM3kOk+mXhIjR2zeNqZMIhRmHzxjV8bU/gXzO
-WgBM85PMiVi93AZfJfhK9QmxfdNnZBjeo1VDeVZheQHgaVf7yopqR6W1FT6NOrfS
-aQIHAgZhZBZTW+CwcW1g4FKlbExAf56zaw76/prQoN+bAzxpohup69LA7JW/Vp0l
-yZnuSj3hcFj0DfqLTGgr4/u717J+sPWbtQBfgMfG9AOIwwrUBqsFE9zW+f1zdlYo
-bhF30A+IitsxxA==
------END PGP MESSAGE-----`
-
-// See https://gitlab.com/openpgp-wg/rfc4880bis/-/merge_requests/274
-// decryption password: "correct horse battery staple"
-const v6ArgonSealedPrivKey = `-----BEGIN PGP PRIVATE KEY BLOCK-----
-
-xYIGY4d/4xsAAAAg+U2nu0jWCmHlZ3BqZYfQMxmZu52JGggkLq2EVD34laP9JgkC
-FARdb9ccngltHraRe25uHuyuAQQVtKipJ0+r5jL4dacGWSAheCWPpITYiyfyIOPS
-3gIDyg8f7strd1OB4+LZsUhcIjOMpVHgmiY/IutJkulneoBYwrEGHxsKAAAAQgWC
-Y4d/4wMLCQcFFQoOCAwCFgACmwMCHgkiIQbLGGxPBgmml+TVLfpscisMHx4nwYpW
-cI9lJewnutmsyQUnCQIHAgAAAACtKCAQPi19In7A5tfORHHbNr/JcIMlNpAnFJin
-7wV2wH+q4UWFs7kDsBJ+xP2i8CMEWi7Ha8tPlXGpZR4UruETeh1mhELIj5UeM8T/
-0z+5oX1RHu11j8bZzFDLX9eTsgOdWATHggZjh3/jGQAAACCGkySDZ/nlAV25Ivj0
-gJXdp4SYfy1ZhbEvutFsr15ENf0mCQIUBA5hhGgp2oaavg6mFUXcFMwBBBUuE8qf
-9Ock+xwusd+GAglBr5LVyr/lup3xxQvHXFSjjA2haXfoN6xUGRdDEHI6+uevKjVR
-v5oAxgu7eJpaXNjCmwYYGwoAAAAsBYJjh3/jApsMIiEGyxhsTwYJppfk1S36bHIr
-DB8eJ8GKVnCPZSXsJ7rZrMkAAAAABAEgpukYbZ1ZNfyP5WMUzbUnSGpaUSD5t2Ki
-Nacp8DkBClZRa2c3AMQzSDXa9jGhYzxjzVb5scHDzTkjyRZWRdTq8U6L4da+/+Kt
-ruh8m7Xo2ehSSFyWRSuTSZe5tm/KXgYG
------END PGP PRIVATE KEY BLOCK-----`
-
-const v4Key25519 = `-----BEGIN PGP PRIVATE KEY BLOCK-----
-
-xUkEZB3qzRto01j2k2pwN5ux9w70stPinAdXULLr20CRW7U7h2GSeACch0M+
-qzQg8yjFQ8VBvu3uwgKH9senoHmj72lLSCLTmhFKzQR0ZXN0wogEEBsIAD4F
-gmQd6s0ECwkHCAmQIf45+TuC+xMDFQgKBBYAAgECGQECmwMCHgEWIQSWEzMi
-jJUHvyIbVKIh/jn5O4L7EwAAUhaHNlgudvxARdPPETUzVgjuWi+YIz8w1xIb
-lHQMvIrbe2sGCQIethpWofd0x7DHuv/ciHg+EoxJ/Td6h4pWtIoKx0kEZB3q
-zRm4CyA7quliq7yx08AoOqHTuuCgvpkSdEhpp3pEyejQOgBo0p6ywIiLPllY
-0t+jpNspHpAGfXID6oqjpYuJw3AfVRBlwnQEGBsIACoFgmQd6s0JkCH+Ofk7
-gvsTApsMFiEElhMzIoyVB78iG1SiIf45+TuC+xMAAGgQuN9G73446ykvJ/mL
-sCZ7zGFId2gBd1EnG0FTC4npfOKpck0X8dngByrCxU8LDSfvjsEp/xDAiKsQ
-aU71tdtNBQ==
-=e7jT
------END PGP PRIVATE KEY BLOCK-----`
-
-const keyWithExpiredCrossSig = `-----BEGIN PGP PUBLIC KEY BLOCK-----
-
-xsDNBF2lnPIBDAC5cL9PQoQLTMuhjbYvb4Ncuuo0bfmgPRFywX53jPhoFf4Zg6mv
-/seOXpgecTdOcVttfzC8ycIKrt3aQTiwOG/ctaR4Bk/t6ayNFfdUNxHWk4WCKzdz
-/56fW2O0F23qIRd8UUJp5IIlN4RDdRCtdhVQIAuzvp2oVy/LaS2kxQoKvph/5pQ/
-5whqsyroEWDJoSV0yOb25B/iwk/pLUFoyhDG9bj0kIzDxrEqW+7Ba8nocQlecMF3
-X5KMN5kp2zraLv9dlBBpWW43XktjcCZgMy20SouraVma8Je/ECwUWYUiAZxLIlMv
-9CurEOtxUw6N3RdOtLmYZS9uEnn5y1UkF88o8Nku890uk6BrewFzJyLAx5wRZ4F0
-qV/yq36UWQ0JB/AUGhHVPdFf6pl6eaxBwT5GXvbBUibtf8YI2og5RsgTWtXfU7eb
-SGXrl5ZMpbA6mbfhd0R8aPxWfmDWiIOhBufhMCvUHh1sApMKVZnvIff9/0Dca3wb
-vLIwa3T4CyshfT0AEQEAAc0hQm9iIEJhYmJhZ2UgPGJvYkBvcGVucGdwLmV4YW1w
-bGU+wsEABBMBCgATBYJeO2eVAgsJAxUICgKbAQIeAQAhCRD7/MgqAV5zMBYhBNGm
-bhojsYLJmA94jPv8yCoBXnMwKWUMAJ3FKZfJ2mXvh+GFqgymvK4NoKkDRPB0CbUN
-aDdG7ZOizQrWXo7Da2MYIZ6eZUDqBKLdhZ5gZfVnisDfu/yeCgpENaKib1MPHpA8
-nZQjnPejbBDomNqY8HRzr5jvXNlwywBpjWGtegCKUY9xbSynjbfzIlMrWL4S+Rfl
-+bOOQKRyYJWXmECmVyqY8cz2VUYmETjNcwC8VCDUxQnhtcCJ7Aej22hfYwVEPb/J
-BsJBPq8WECCiGfJ9Y2y6TF+62KzG9Kfs5hqUeHhQy8V4TSi479ewwL7DH86XmIIK
-chSANBS+7iyMtctjNZfmF9zYdGJFvjI/mbBR/lK66E515Inuf75XnL8hqlXuwqvG
-ni+i03Aet1DzULZEIio4uIU6ioc1lGO9h7K2Xn4S7QQH1QoISNMWqXibUR0RCGjw
-FsEDTt2QwJl8XXxoJCooM7BCcCQo+rMNVUHDjIwrdoQjPld3YZsUQQRcqH6bLuln
-cfn5ufl8zTGWKydoj/iTz8KcjZ7w187AzQRdpZzyAQwA1jC/XGxjK6ddgrRfW9j+
-s/U00++EvIsgTs2kr3Rg0GP7FLWV0YNtR1mpl55/bEl7yAxCDTkOgPUMXcaKlnQh
-6zrlt6H53mF6Bvs3inOHQvOsGtU0dqvb1vkTF0juLiJgPlM7pWv+pNQ6IA39vKoQ
-sTMBv4v5vYNXP9GgKbg8inUNT17BxzZYHfw5+q63ectgDm2on1e8CIRCZ76oBVwz
-dkVxoy3gjh1eENlk2D4P0uJNZzF1Q8GV67yLANGMCDICE/OkWn6daipYDzW4iJQt
-YPUWP4hWhjdm+CK+hg6IQUEn2Vtvi16D2blRP8BpUNNa4fNuylWVuJV76rIHvsLZ
-1pbM3LHpRgE8s6jivS3Rz3WRs0TmWCNnvHPqWizQ3VTy+r3UQVJ5AmhJDrZdZq9i
-aUIuZ01PoE1+CHiJwuxPtWvVAxf2POcm1M/F1fK1J0e+lKlQuyonTXqXR22Y41wr
-fP2aPk3nPSTW2DUAf3vRMZg57ZpRxLEhEMxcM4/LMR+PABEBAAHCwrIEGAEKAAkF
-gl8sAVYCmwIB3QkQ+/zIKgFeczDA+qAEGQEKAAwFgl47Z5UFgwB4TOAAIQkQfC+q
-Tfk8N7IWIQQd3OFfCSF87i87N2B8L6pN+Tw3st58C/0exp0X2U4LqicSHEOSqHZj
-jiysdqIELHGyo5DSPv92UFPp36aqjF9OFgtNNwSa56fmAVCD4+hor/fKARRIeIjF
-qdIC5Y/9a4B10NQFJa5lsvB38x/d39LI2kEoglZnqWgdJskROo3vNQF4KlIcm6FH
-dn4WI8UkC5oUUcrpZVMSKoacIaxLwqnXT42nIVgYYuqrd/ZagZZjG5WlrTOd5+NI
-zi/l0fWProcPHGLjmAh4Thu8i7omtVw1nQaMnq9I77ffg3cPDgXknYrLL+q8xXh/
-0mEJyIhnmPwllWCSZuLv9DrD5pOexFfdlwXhf6cLzNpW6QhXD/Tf5KrqIPr9aOv8
-9xaEEXWh0vEby2kIsI2++ft+vfdIyxYw/wKqx0awTSnuBV1rG3z1dswX4BfoY66x
-Bz3KOVqlz9+mG/FTRQwrgPvR+qgLCHbuotxoGN7fzW+PI75hQG5JQAqhsC9sHjQH
-UrI21/VUNwzfw3v5pYsWuFb5bdQ3ASJetICQiMy7IW8WIQTRpm4aI7GCyZgPeIz7
-/MgqAV5zMG6/C/wLpPl/9e6Hf5wmXIUwpZNQbNZvpiCcyx9sXsHXaycOQVxn3McZ
-nYOUP9/mobl1tIeDQyTNbkxWjU0zzJl8XQsDZerb5098pg+x7oGIL7M1vn5s5JMl
-owROourqF88JEtOBxLMxlAM7X4hB48xKQ3Hu9hS1GdnqLKki4MqRGl4l5FUwyGOM
-GjyS3TzkfiDJNwQxybQiC9n57ij20ieNyLfuWCMLcNNnZUgZtnF6wCctoq/0ZIWu
-a7nvuA/XC2WW9YjEJJiWdy5109pqac+qWiY11HWy/nms4gpMdxVpT0RhrKGWq4o0
-M5q3ZElOoeN70UO3OSbU5EVrG7gB1GuwF9mTHUVlV0veSTw0axkta3FGT//XfSpD
-lRrCkyLzwq0M+UUHQAuYpAfobDlDdnxxOD2jm5GyTzak3GSVFfjW09QFVO6HlGp5
-01/jtzkUiS6nwoHHkfnyn0beZuR8X6KlcrzLB0VFgQFLmkSM9cSOgYhD0PTu9aHb
-hW1Hj9AO8lzggBQ=
-=Nt+N
------END PGP PUBLIC KEY BLOCK-----
-`
-
-const sigFromKeyWithExpiredCrossSig = `-----BEGIN PGP SIGNATURE-----
-
-wsDzBAABCgAGBYJfLAFsACEJEHwvqk35PDeyFiEEHdzhXwkhfO4vOzdgfC+qTfk8
-N7KiqwwAts4QGB7v9bABCC2qkTxJhmStC0wQMcHRcjL/qAiVnmasQWmvE9KVsdm3
-AaXd8mIx4a37/RRvr9dYrY2eE4uw72cMqPxNja2tvVXkHQvk1oEUqfkvbXs4ypKI
-NyeTWjXNOTZEbg0hbm3nMy+Wv7zgB1CEvAsEboLDJlhGqPcD+X8a6CJGrBGUBUrv
-KVmZr3U6vEzClz3DBLpoddCQseJRhT4YM1nKmBlZ5quh2LFgTSpajv5OsZheqt9y
-EZAPbqmLhDmWRQwGzkWHKceKS7nZ/ox2WK6OS7Ob8ZGZkM64iPo6/EGj5Yc19vQN
-AGiIaPEGszBBWlOpHTPhNm0LB0nMWqqaT87oNYwP8CQuuxDb6rKJ2lffCmZH27Lb
-UbQZcH8J+0UhpeaiadPZxH5ATJAcenmVtVVMLVOFnm+eIlxzov9ntpgGYt8hLdXB
-ITEG9mMgp3TGS9ZzSifMZ8UGtHdp9QdBg8NEVPFzDOMGxpc/Bftav7RRRuPiAER+
-7A5CBid5
-=aQkm
------END PGP SIGNATURE-----
-`
-
-const signedMessageWithCriticalNotation = `-----BEGIN PGP MESSAGE-----
-
-owGbwMvMwMH4oOW7S46CznTG09xJDDE3Wl1KUotLuDousDAwcjBYiSmyXL+48d6x
-U1PSGUxcj8IUszKBVMpMaWAAAgEGZpAeh9SKxNyCnFS95PzcytRiBi5OAZjyXXzM
-f8WYLqv7TXP61Sa4rqT12CI3xaN73YS2pt089f96odCKaEPnWJ3iSGmzJaW/ug10
-2Zo8Wj2k4s7t8wt4H3HtTu+y5UZfV3VOO+l//sdE/o+Lsub8FZH7/eOq7OnbNp4n
-vwjE8mqJXetNMfj8r2SCyvkEnlVRYR+/mnge+ib56FdJ8uKtqSxyvgA=
-=fRXs
------END PGP MESSAGE-----`
-
-const criticalNotationSigner = `-----BEGIN PGP PUBLIC KEY BLOCK-----
-
-mI0EUmEvTgEEANyWtQQMOybQ9JltDqmaX0WnNPJeLILIM36sw6zL0nfTQ5zXSS3+
-fIF6P29lJFxpblWk02PSID5zX/DYU9/zjM2xPO8Oa4xo0cVTOTLj++Ri5mtr//f5
-GLsIXxFrBJhD/ghFsL3Op0GXOeLJ9A5bsOn8th7x6JucNKuaRB6bQbSPABEBAAG0
-JFRlc3QgTWNUZXN0aW5ndG9uIDx0ZXN0QGV4YW1wbGUuY29tPoi5BBMBAgAjBQJS
-YS9OAhsvBwsJCAcDAgEGFQgCCQoLBBYCAwECHgECF4AACgkQSmNhOk1uQJQwDAP6
-AgrTyqkRlJVqz2pb46TfbDM2TDF7o9CBnBzIGoxBhlRwpqALz7z2kxBDmwpQa+ki
-Bq3jZN/UosY9y8bhwMAlnrDY9jP1gdCo+H0sD48CdXybblNwaYpwqC8VSpDdTndf
-9j2wE/weihGp/DAdy/2kyBCaiOY1sjhUfJ1GogF49rC4jQRSYS9OAQQA6R/PtBFa
-JaT4jq10yqASk4sqwVMsc6HcifM5lSdxzExFP74naUMMyEsKHP53QxTF0Grqusag
-Qg/ZtgT0CN1HUM152y7ACOdp1giKjpMzOTQClqCoclyvWOFB+L/SwGEIJf7LSCEr
-woBuJifJc8xAVr0XX0JthoW+uP91eTQ3XpsAEQEAAYkBPQQYAQIACQUCUmEvTgIb
-LgCoCRBKY2E6TW5AlJ0gBBkBAgAGBQJSYS9OAAoJEOCE90RsICyXuqIEANmmiRCA
-SF7YK7PvFkieJNwzeK0V3F2lGX+uu6Y3Q/Zxdtwc4xR+me/CSBmsURyXTO29OWhP
-GLszPH9zSJU9BdDi6v0yNprmFPX/1Ng0Abn/sCkwetvjxC1YIvTLFwtUL/7v6NS2
-bZpsUxRTg9+cSrMWWSNjiY9qUKajm1tuzPDZXAUEAMNmAN3xXN/Kjyvj2OK2ck0X
-W748sl/tc3qiKPMJ+0AkMF7Pjhmh9nxqE9+QCEl7qinFqqBLjuzgUhBU4QlwX1GD
-AtNTq6ihLMD5v1d82ZC7tNatdlDMGWnIdvEMCv2GZcuIqDQ9rXWs49e7tq1NncLY
-hz3tYjKhoFTKEIq3y3Pp
-=h/aX
------END PGP PUBLIC KEY BLOCK-----`
-
-const keyv5Test = `-----BEGIN PGP PRIVATE KEY BLOCK-----
-Comment: Bob's OpenPGP Transferable Secret Key
-
-lQVYBF2lnPIBDAC5cL9PQoQLTMuhjbYvb4Ncuuo0bfmgPRFywX53jPhoFf4Zg6mv
-/seOXpgecTdOcVttfzC8ycIKrt3aQTiwOG/ctaR4Bk/t6ayNFfdUNxHWk4WCKzdz
-/56fW2O0F23qIRd8UUJp5IIlN4RDdRCtdhVQIAuzvp2oVy/LaS2kxQoKvph/5pQ/
-5whqsyroEWDJoSV0yOb25B/iwk/pLUFoyhDG9bj0kIzDxrEqW+7Ba8nocQlecMF3
-X5KMN5kp2zraLv9dlBBpWW43XktjcCZgMy20SouraVma8Je/ECwUWYUiAZxLIlMv
-9CurEOtxUw6N3RdOtLmYZS9uEnn5y1UkF88o8Nku890uk6BrewFzJyLAx5wRZ4F0
-qV/yq36UWQ0JB/AUGhHVPdFf6pl6eaxBwT5GXvbBUibtf8YI2og5RsgTWtXfU7eb
-SGXrl5ZMpbA6mbfhd0R8aPxWfmDWiIOhBufhMCvUHh1sApMKVZnvIff9/0Dca3wb
-vLIwa3T4CyshfT0AEQEAAQAL/RZqbJW2IqQDCnJi4Ozm++gPqBPiX1RhTWSjwxfM
-cJKUZfzLj414rMKm6Jh1cwwGY9jekROhB9WmwaaKT8HtcIgrZNAlYzANGRCM4TLK
-3VskxfSwKKna8l+s+mZglqbAjUg3wmFuf9Tj2xcUZYmyRm1DEmcN2ZzpvRtHgX7z
-Wn1mAKUlSDJZSQks0zjuMNbupcpyJokdlkUg2+wBznBOTKzgMxVNC9b2g5/tMPUs
-hGGWmF1UH+7AHMTaS6dlmr2ZBIyogdnfUqdNg5sZwsxSNrbglKP4sqe7X61uEAIQ
-bD7rT3LonLbhkrj3I8wilUD8usIwt5IecoHhd9HziqZjRCc1BUBkboUEoyedbDV4
-i4qfsFZ6CEWoLuD5pW7dEp0M+WeuHXO164Rc+LnH6i1VQrpb1Okl4qO6ejIpIjBI
-1t3GshtUu/mwGBBxs60KBX5g77mFQ9lLCRj8lSYqOsHRKBhUp4qM869VA+fD0BRP
-fqPT0I9IH4Oa/A3jYJcg622GwQYA1LhnP208Waf6PkQSJ6kyr8ymY1yVh9VBE/g6
-fRDYA+pkqKnw9wfH2Qho3ysAA+OmVOX8Hldg+Pc0Zs0e5pCavb0En8iFLvTA0Q2E
-LR5rLue9uD7aFuKFU/VdcddY9Ww/vo4k5p/tVGp7F8RYCFn9rSjIWbfvvZi1q5Tx
-+akoZbga+4qQ4WYzB/obdX6SCmi6BndcQ1QdjCCQU6gpYx0MddVERbIp9+2SXDyL
-hpxjSyz+RGsZi/9UAshT4txP4+MZBgDfK3ZqtW+h2/eMRxkANqOJpxSjMyLO/FXN
-WxzTDYeWtHNYiAlOwlQZEPOydZFty9IVzzNFQCIUCGjQ/nNyhw7adSgUk3+BXEx/
-MyJPYY0BYuhLxLYcrfQ9nrhaVKxRJj25SVHj2ASsiwGJRZW4CC3uw40OYxfKEvNC
-mer/VxM3kg8qqGf9KUzJ1dVdAvjyx2Hz6jY2qWCyRQ6IMjWHyd43C4r3jxooYKUC
-YnstRQyb/gCSKahveSEjo07CiXMr88UGALwzEr3npFAsPW3osGaFLj49y1oRe11E
-he9gCHFm+fuzbXrWmdPjYU5/ZdqdojzDqfu4ThfnipknpVUM1o6MQqkjM896FHm8
-zbKVFSMhEP6DPHSCexMFrrSgN03PdwHTO6iBaIBBFqmGY01tmJ03SxvSpiBPON9P
-NVvy/6UZFedTq8A07OUAxO62YUSNtT5pmK2vzs3SAZJmbFbMh+NN204TRI72GlqT
-t5hcfkuv8hrmwPS/ZR6q312mKQ6w/1pqO9qitCFCb2IgQmFiYmFnZSA8Ym9iQG9w
-ZW5wZ3AuZXhhbXBsZT6JAc4EEwEKADgCGwMFCwkIBwIGFQoJCAsCBBYCAwECHgEC
-F4AWIQTRpm4aI7GCyZgPeIz7/MgqAV5zMAUCXaWe+gAKCRD7/MgqAV5zMG9sC/9U
-2T3RrqEbw533FPNfEflhEVRIZ8gDXKM8hU6cqqEzCmzZT6xYTe6sv4y+PJBGXJFX
-yhj0g6FDkSyboM5litOcTupURObVqMgA/Y4UKERznm4fzzH9qek85c4ljtLyNufe
-doL2pp3vkGtn7eD0QFRaLLmnxPKQ/TlZKdLE1G3u8Uot8QHicaR6GnAdc5UXQJE3
-BiV7jZuDyWmZ1cUNwJkKL6oRtp+ZNDOQCrLNLecKHcgCqrpjSQG5oouba1I1Q6Vl
-sP44dhA1nkmLHtxlTOzpeHj4jnk1FaXmyasurrrI5CgU/L2Oi39DGKTH/A/cywDN
-4ZplIQ9zR8enkbXquUZvFDe+Xz+6xRXtb5MwQyWODB3nHw85HocLwRoIN9WdQEI+
-L8a/56AuOwhs8llkSuiITjR7r9SgKJC2WlAHl7E8lhJ3VDW3ELC56KH308d6mwOG
-ZRAqIAKzM1T5FGjMBhq7ZV0eqdEntBh3EcOIfj2M8rg1MzJv+0mHZOIjByawikad
-BVgEXaWc8gEMANYwv1xsYyunXYK0X1vY/rP1NNPvhLyLIE7NpK90YNBj+xS1ldGD
-bUdZqZeef2xJe8gMQg05DoD1DF3GipZ0Ies65beh+d5hegb7N4pzh0LzrBrVNHar
-29b5ExdI7i4iYD5TO6Vr/qTUOiAN/byqELEzAb+L+b2DVz/RoCm4PIp1DU9ewcc2
-WB38Ofqut3nLYA5tqJ9XvAiEQme+qAVcM3ZFcaMt4I4dXhDZZNg+D9LiTWcxdUPB
-leu8iwDRjAgyAhPzpFp+nWoqWA81uIiULWD1Fj+IVoY3ZvgivoYOiEFBJ9lbb4te
-g9m5UT/AaVDTWuHzbspVlbiVe+qyB77C2daWzNyx6UYBPLOo4r0t0c91kbNE5lgj
-Z7xz6los0N1U8vq91EFSeQJoSQ62XWavYmlCLmdNT6BNfgh4icLsT7Vr1QMX9jzn
-JtTPxdXytSdHvpSpULsqJ016l0dtmONcK3z9mj5N5z0k1tg1AH970TGYOe2aUcSx
-IRDMXDOPyzEfjwARAQABAAv9F2CwsjS+Sjh1M1vegJbZjei4gF1HHpEM0K0PSXsp
-SfVvpR4AoSJ4He6CXSMWg0ot8XKtDuZoV9jnJaES5UL9pMAD7JwIOqZm/DYVJM5h
-OASCh1c356/wSbFbzRHPtUdZO9Q30WFNJM5pHbCJPjtNoRmRGkf71RxtvHBzy7np
-Ga+W6U/NVKHw0i0CYwMI0YlKDakYW3Pm+QL+gHZFvngGweTod0f9l2VLLAmeQR/c
-+EZs7lNumhuZ8mXcwhUc9JQIhOkpO+wreDysEFkAcsKbkQP3UDUsA1gFx9pbMzT0
-tr1oZq2a4QBtxShHzP/ph7KLpN+6qtjks3xB/yjTgaGmtrwM8tSe0wD1RwXS+/1o
-BHpXTnQ7TfeOGUAu4KCoOQLv6ELpKWbRBLWuiPwMdbGpvVFALO8+kvKAg9/r+/ny
-zM2GQHY+J3Jh5JxPiJnHfXNZjIKLbFbIPdSKNyJBuazXW8xIa//mEHMI5OcvsZBK
-clAIp7LXzjEjKXIwHwDcTn9pBgDpdOKTHOtJ3JUKx0rWVsDH6wq6iKV/FTVSY5jl
-zN+puOEsskF1Lfxn9JsJihAVO3yNsp6RvkKtyNlFazaCVKtDAmkjoh60XNxcNRqr
-gCnwdpbgdHP6v/hvZY54ZaJjz6L2e8unNEkYLxDt8cmAyGPgH2XgL7giHIp9jrsQ
-aS381gnYwNX6wE1aEikgtY91nqJjwPlibF9avSyYQoMtEqM/1UjTjB2KdD/MitK5
-fP0VpvuXpNYZedmyq4UOMwdkiNMGAOrfmOeT0olgLrTMT5H97Cn3Yxbk13uXHNu/
-ZUZZNe8s+QtuLfUlKAJtLEUutN33TlWQY522FV0m17S+b80xJib3yZVJteVurrh5
-HSWHAM+zghQAvCesg5CLXa2dNMkTCmZKgCBvfDLZuZbjFwnwCI6u/NhOY9egKuUf
-SA/je/RXaT8m5VxLYMxwqQXKApzD87fv0tLPlVIEvjEsaf992tFEFSNPcG1l/jpd
-5AVXw6kKuf85UkJtYR1x2MkQDrqY1QX/XMw00kt8y9kMZUre19aCArcmor+hDhRJ
-E3Gt4QJrD9z/bICESw4b4z2DbgD/Xz9IXsA/r9cKiM1h5QMtXvuhyfVeM01enhxM
-GbOH3gjqqGNKysx0UODGEwr6AV9hAd8RWXMchJLaExK9J5SRawSg671ObAU24SdY
-vMQ9Z4kAQ2+1ReUZzf3ogSMRZtMT+d18gT6L90/y+APZIaoArLPhebIAGq39HLmJ
-26x3z0WAgrpA1kNsjXEXkoiZGPLKIGoe3hqJAbYEGAEKACAWIQTRpm4aI7GCyZgP
-eIz7/MgqAV5zMAUCXaWc8gIbDAAKCRD7/MgqAV5zMOn/C/9ugt+HZIwX308zI+QX
-c5vDLReuzmJ3ieE0DMO/uNSC+K1XEioSIZP91HeZJ2kbT9nn9fuReuoff0T0Dief
-rbwcIQQHFFkrqSp1K3VWmUGp2JrUsXFVdjy/fkBIjTd7c5boWljv/6wAsSfiv2V0
-JSM8EFU6TYXxswGjFVfc6X97tJNeIrXL+mpSmPPqy2bztcCCHkWS5lNLWQw+R7Vg
-71Fe6yBSNVrqC2/imYG2J9zlowjx1XU63Wdgqp2Wxt0l8OmsB/W80S1fRF5G4SDH
-s9HXglXXqPsBRZJYfP+VStm9L5P/sKjCcX6WtZR7yS6G8zj/X767MLK/djANvpPd
-NVniEke6hM3CNBXYPAMhQBMWhCulcoz+0lxi8L34rMN+Dsbma96psdUrn7uLaB91
-6we0CTfF8qqm7BsVAgalon/UUiuMY80U3ueoj3okiSTiHIjD/YtpXSPioC8nMng7
-xqAY9Bwizt4FWgXuLm1a4+So4V9j1TRCXd12Uc2l2RNmgDE=
-=miES
------END PGP PRIVATE KEY BLOCK-----
-`
-
-const certv5Test = `-----BEGIN PGP PRIVATE KEY BLOCK-----
-
-lGEFXJH05BYAAAAtCSsGAQQB2kcPAQEHQFhZlVcVVtwf+21xNQPX+ecMJJBL0MPd
-fj75iux+my8QAAAAAAAiAQCHZ1SnSUmWqxEsoI6facIVZQu6mph3cBFzzTvcm5lA
-Ng5ctBhlbW1hLmdvbGRtYW5AZXhhbXBsZS5uZXSIlgUTFggASCIhBRk0e8mHJGQC
-X5nfPsLgAA7ZiEiS4fez6kyUAJFZVptUBQJckfTkAhsDBQsJCAcCAyICAQYVCgkI
-CwIEFgIDAQIeBwIXgAAA9cAA/jiR3yMsZMeEQ40u6uzEoXa6UXeV/S3wwJAXRJy9
-M8s0AP9vuL/7AyTfFXwwzSjDnYmzS0qAhbLDQ643N+MXGBJ2BZxmBVyR9OQSAAAA
-MgorBgEEAZdVAQUBAQdA+nysrzml2UCweAqtpDuncSPlvrcBWKU0yfU0YvYWWAoD
-AQgHAAAAAAAiAP9OdAPppjU1WwpqjIItkxr+VPQRT8Zm/Riw7U3F6v3OiBFHiHoF
-GBYIACwiIQUZNHvJhyRkAl+Z3z7C4AAO2YhIkuH3s+pMlACRWVabVAUCXJH05AIb
-DAAAOSQBAP4BOOIR/sGLNMOfeb5fPs/02QMieoiSjIBnijhob2U5AQC+RtOHCHx7
-TcIYl5/Uyoi+FOvPLcNw4hOv2nwUzSSVAw==
-=IiS2
------END PGP PRIVATE KEY BLOCK-----
-`
-
-const msgv5Test = `-----BEGIN PGP MESSAGE-----
-
-wcDMA3wvqk35PDeyAQv+PcQiLsoYTH30nJYQh3j3cJaO2+jErtVCrIQRIU0+
-rmgMddERYST4A9mA0DQIiTI4FQ0Lp440D3BWCgpq3LlNWewGzduaWwym5rN6
-cwHz5ccDqOcqbd9X0GXXGy/ZH/ljSgzuVMIytMAXKdF/vrRrVgH/+I7cxvm9
-HwnhjMN5dF0j4aEt996H2T7cbtzSr2GN9SWGW8Gyu7I8Zx73hgrGUI7gDiJB
-Afaff+P6hfkkHSGOItr94dde8J/7AUF4VEwwxdVVPvsNEFyvv6gRIbYtOCa2
-6RE6h1V/QTxW2O7zZgzWALrE2ui0oaYr9QuqQSssd9CdgExLfdPbI+3/ZAnE
-v31Idzpk3/6ILiakYHtXkElPXvf46mCNpobty8ysT34irF+fy3C1p3oGwAsx
-5VDV9OSFU6z5U+UPbSPYAy9rkc5ZssuIKxCER2oTvZ2L8Q5cfUvEUiJtRGGn
-CJlHrVDdp3FssKv2tlKgLkvxJLyoOjuEkj44H1qRk+D02FzmmUT/0sAHAYYx
-lTir6mjHeLpcGjn4waUuWIAJyph8SxUexP60bic0L0NBa6Qp5SxxijKsPIDb
-FPHxWwfJSDZRrgUyYT7089YFB/ZM4FHyH9TZcnxn0f0xIB7NS6YNDsxzN2zT
-EVEYf+De4qT/dQTsdww78Chtcv9JY9r2kDm77dk2MUGHL2j7n8jasbLtgA7h
-pn2DMIWLrGamMLWRmlwslolKr1sMV5x8w+5Ias6C33iBMl9phkg42an0gYmc
-byVJHvLO/XErtC+GNIJeMg==
-=liRq
------END PGP MESSAGE-----
-`
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/s2k/s2k.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/s2k/s2k.go
deleted file mode 100644
index f4f5c7832d4..00000000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/s2k/s2k.go
+++ /dev/null
@@ -1,410 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package s2k implements the various OpenPGP string-to-key transforms as
-// specified in RFC 4800 section 3.7.1, and Argon2 specified in
-// draft-ietf-openpgp-crypto-refresh-08 section 3.7.1.4.
-package s2k // import "github.com/ProtonMail/go-crypto/openpgp/s2k"
-
-import (
- "crypto"
- "hash"
- "io"
- "strconv"
-
- "github.com/ProtonMail/go-crypto/openpgp/errors"
- "github.com/ProtonMail/go-crypto/openpgp/internal/algorithm"
- "golang.org/x/crypto/argon2"
-)
-
-type Mode uint8
-
-// Defines the default S2KMode constants
-//
-// 0 (simple), 1(salted), 3(iterated), 4(argon2)
-const (
- SimpleS2K Mode = 0
- SaltedS2K Mode = 1
- IteratedSaltedS2K Mode = 3
- Argon2S2K Mode = 4
- GnuS2K Mode = 101
-)
-
-const Argon2SaltSize int = 16
-
-// Params contains all the parameters of the s2k packet
-type Params struct {
- // mode is the mode of s2k function.
- // It can be 0 (simple), 1(salted), 3(iterated)
- // 2(reserved) 100-110(private/experimental).
- mode Mode
- // hashId is the ID of the hash function used in any of the modes
- hashId byte
- // salt is a byte array to use as a salt in hashing process or argon2
- saltBytes [Argon2SaltSize]byte
- // countByte is used to determine how many rounds of hashing are to
- // be performed in s2k mode 3. See RFC 4880 Section 3.7.1.3.
- countByte byte
- // passes is a parameter in Argon2 to determine the number of iterations
- // See RFC the crypto refresh Section 3.7.1.4.
- passes byte
- // parallelism is a parameter in Argon2 to determine the degree of paralellism
- // See RFC the crypto refresh Section 3.7.1.4.
- parallelism byte
- // memoryExp is a parameter in Argon2 to determine the memory usage
- // i.e., 2 ** memoryExp kibibytes
- // See RFC the crypto refresh Section 3.7.1.4.
- memoryExp byte
-}
-
-// encodeCount converts an iterative "count" in the range 1024 to
-// 65011712, inclusive, to an encoded count. The return value is the
-// octet that is actually stored in the GPG file. encodeCount panics
-// if i is not in the above range (encodedCount above takes care to
-// pass i in the correct range). See RFC 4880 Section 3.7.7.1.
-func encodeCount(i int) uint8 {
- if i < 65536 || i > 65011712 {
- panic("count arg i outside the required range")
- }
-
- for encoded := 96; encoded < 256; encoded++ {
- count := decodeCount(uint8(encoded))
- if count >= i {
- return uint8(encoded)
- }
- }
-
- return 255
-}
-
-// decodeCount returns the s2k mode 3 iterative "count" corresponding to
-// the encoded octet c.
-func decodeCount(c uint8) int {
- return (16 + int(c&15)) << (uint32(c>>4) + 6)
-}
-
-// encodeMemory converts the Argon2 "memory" in the range parallelism*8 to
-// 2**31, inclusive, to an encoded memory. The return value is the
-// octet that is actually stored in the GPG file. encodeMemory panics
-// if is not in the above range
-// See OpenPGP crypto refresh Section 3.7.1.4.
-func encodeMemory(memory uint32, parallelism uint8) uint8 {
- if memory < (8*uint32(parallelism)) || memory > uint32(2147483648) {
- panic("Memory argument memory is outside the required range")
- }
-
- for exp := 3; exp < 31; exp++ {
- compare := decodeMemory(uint8(exp))
- if compare >= memory {
- return uint8(exp)
- }
- }
-
- return 31
-}
-
-// decodeMemory computes the decoded memory in kibibytes as 2**memoryExponent
-func decodeMemory(memoryExponent uint8) uint32 {
- return uint32(1) << memoryExponent
-}
-
-// Simple writes to out the result of computing the Simple S2K function (RFC
-// 4880, section 3.7.1.1) using the given hash and input passphrase.
-func Simple(out []byte, h hash.Hash, in []byte) {
- Salted(out, h, in, nil)
-}
-
-var zero [1]byte
-
-// Salted writes to out the result of computing the Salted S2K function (RFC
-// 4880, section 3.7.1.2) using the given hash, input passphrase and salt.
-func Salted(out []byte, h hash.Hash, in []byte, salt []byte) {
- done := 0
- var digest []byte
-
- for i := 0; done < len(out); i++ {
- h.Reset()
- for j := 0; j < i; j++ {
- h.Write(zero[:])
- }
- h.Write(salt)
- h.Write(in)
- digest = h.Sum(digest[:0])
- n := copy(out[done:], digest)
- done += n
- }
-}
-
-// Iterated writes to out the result of computing the Iterated and Salted S2K
-// function (RFC 4880, section 3.7.1.3) using the given hash, input passphrase,
-// salt and iteration count.
-func Iterated(out []byte, h hash.Hash, in []byte, salt []byte, count int) {
- combined := make([]byte, len(in)+len(salt))
- copy(combined, salt)
- copy(combined[len(salt):], in)
-
- if count < len(combined) {
- count = len(combined)
- }
-
- done := 0
- var digest []byte
- for i := 0; done < len(out); i++ {
- h.Reset()
- for j := 0; j < i; j++ {
- h.Write(zero[:])
- }
- written := 0
- for written < count {
- if written+len(combined) > count {
- todo := count - written
- h.Write(combined[:todo])
- written = count
- } else {
- h.Write(combined)
- written += len(combined)
- }
- }
- digest = h.Sum(digest[:0])
- n := copy(out[done:], digest)
- done += n
- }
-}
-
-// Argon2 writes to out the key derived from the password (in) with the Argon2
-// function (the crypto refresh, section 3.7.1.4)
-func Argon2(out []byte, in []byte, salt []byte, passes uint8, paralellism uint8, memoryExp uint8) {
- key := argon2.IDKey(in, salt, uint32(passes), decodeMemory(memoryExp), paralellism, uint32(len(out)))
- copy(out[:], key)
-}
-
-// Generate generates valid parameters from given configuration.
-// It will enforce the Iterated and Salted or Argon2 S2K method.
-func Generate(rand io.Reader, c *Config) (*Params, error) {
- var params *Params
- if c != nil && c.Mode() == Argon2S2K {
- // handle Argon2 case
- argonConfig := c.Argon2()
- params = &Params{
- mode: Argon2S2K,
- passes: argonConfig.Passes(),
- parallelism: argonConfig.Parallelism(),
- memoryExp: argonConfig.EncodedMemory(),
- }
- } else if c != nil && c.PassphraseIsHighEntropy && c.Mode() == SaltedS2K { // Allow SaltedS2K if PassphraseIsHighEntropy
- hashId, ok := algorithm.HashToHashId(c.hash())
- if !ok {
- return nil, errors.UnsupportedError("no such hash")
- }
-
- params = &Params{
- mode: SaltedS2K,
- hashId: hashId,
- }
- } else { // Enforce IteratedSaltedS2K method otherwise
- hashId, ok := algorithm.HashToHashId(c.hash())
- if !ok {
- return nil, errors.UnsupportedError("no such hash")
- }
- if c != nil {
- c.S2KMode = IteratedSaltedS2K
- }
- params = &Params{
- mode: IteratedSaltedS2K,
- hashId: hashId,
- countByte: c.EncodedCount(),
- }
- }
- if _, err := io.ReadFull(rand, params.salt()); err != nil {
- return nil, err
- }
- return params, nil
-}
-
-// Parse reads a binary specification for a string-to-key transformation from r
-// and returns a function which performs that transform. If the S2K is a special
-// GNU extension that indicates that the private key is missing, then the error
-// returned is errors.ErrDummyPrivateKey.
-func Parse(r io.Reader) (f func(out, in []byte), err error) {
- params, err := ParseIntoParams(r)
- if err != nil {
- return nil, err
- }
-
- return params.Function()
-}
-
-// ParseIntoParams reads a binary specification for a string-to-key
-// transformation from r and returns a struct describing the s2k parameters.
-func ParseIntoParams(r io.Reader) (params *Params, err error) {
- var buf [Argon2SaltSize + 3]byte
-
- _, err = io.ReadFull(r, buf[:1])
- if err != nil {
- return
- }
-
- params = &Params{
- mode: Mode(buf[0]),
- }
-
- switch params.mode {
- case SimpleS2K:
- _, err = io.ReadFull(r, buf[:1])
- if err != nil {
- return nil, err
- }
- params.hashId = buf[0]
- return params, nil
- case SaltedS2K:
- _, err = io.ReadFull(r, buf[:9])
- if err != nil {
- return nil, err
- }
- params.hashId = buf[0]
- copy(params.salt(), buf[1:9])
- return params, nil
- case IteratedSaltedS2K:
- _, err = io.ReadFull(r, buf[:10])
- if err != nil {
- return nil, err
- }
- params.hashId = buf[0]
- copy(params.salt(), buf[1:9])
- params.countByte = buf[9]
- return params, nil
- case Argon2S2K:
- _, err = io.ReadFull(r, buf[:Argon2SaltSize+3])
- if err != nil {
- return nil, err
- }
- copy(params.salt(), buf[:Argon2SaltSize])
- params.passes = buf[Argon2SaltSize]
- params.parallelism = buf[Argon2SaltSize+1]
- params.memoryExp = buf[Argon2SaltSize+2]
- return params, nil
- case GnuS2K:
- // This is a GNU extension. See
- // https://git.gnupg.org/cgi-bin/gitweb.cgi?p=gnupg.git;a=blob;f=doc/DETAILS;h=fe55ae16ab4e26d8356dc574c9e8bc935e71aef1;hb=23191d7851eae2217ecdac6484349849a24fd94a#l1109
- if _, err = io.ReadFull(r, buf[:5]); err != nil {
- return nil, err
- }
- params.hashId = buf[0]
- if buf[1] == 'G' && buf[2] == 'N' && buf[3] == 'U' && buf[4] == 1 {
- return params, nil
- }
- return nil, errors.UnsupportedError("GNU S2K extension")
- }
-
- return nil, errors.UnsupportedError("S2K function")
-}
-
-func (params *Params) Dummy() bool {
- return params != nil && params.mode == GnuS2K
-}
-
-func (params *Params) salt() []byte {
- switch params.mode {
- case SaltedS2K, IteratedSaltedS2K:
- return params.saltBytes[:8]
- case Argon2S2K:
- return params.saltBytes[:Argon2SaltSize]
- default:
- return nil
- }
-}
-
-func (params *Params) Function() (f func(out, in []byte), err error) {
- if params.Dummy() {
- return nil, errors.ErrDummyPrivateKey("dummy key found")
- }
- var hashObj crypto.Hash
- if params.mode != Argon2S2K {
- var ok bool
- hashObj, ok = algorithm.HashIdToHashWithSha1(params.hashId)
- if !ok {
- return nil, errors.UnsupportedError("hash for S2K function: " + strconv.Itoa(int(params.hashId)))
- }
- if !hashObj.Available() {
- return nil, errors.UnsupportedError("hash not available: " + strconv.Itoa(int(hashObj)))
- }
- }
-
- switch params.mode {
- case SimpleS2K:
- f := func(out, in []byte) {
- Simple(out, hashObj.New(), in)
- }
-
- return f, nil
- case SaltedS2K:
- f := func(out, in []byte) {
- Salted(out, hashObj.New(), in, params.salt())
- }
-
- return f, nil
- case IteratedSaltedS2K:
- f := func(out, in []byte) {
- Iterated(out, hashObj.New(), in, params.salt(), decodeCount(params.countByte))
- }
-
- return f, nil
- case Argon2S2K:
- f := func(out, in []byte) {
- Argon2(out, in, params.salt(), params.passes, params.parallelism, params.memoryExp)
- }
- return f, nil
- }
-
- return nil, errors.UnsupportedError("S2K function")
-}
-
-func (params *Params) Serialize(w io.Writer) (err error) {
- if _, err = w.Write([]byte{uint8(params.mode)}); err != nil {
- return
- }
- if params.mode != Argon2S2K {
- if _, err = w.Write([]byte{params.hashId}); err != nil {
- return
- }
- }
- if params.Dummy() {
- _, err = w.Write(append([]byte("GNU"), 1))
- return
- }
- if params.mode > 0 {
- if _, err = w.Write(params.salt()); err != nil {
- return
- }
- if params.mode == IteratedSaltedS2K {
- _, err = w.Write([]byte{params.countByte})
- }
- if params.mode == Argon2S2K {
- _, err = w.Write([]byte{params.passes, params.parallelism, params.memoryExp})
- }
- }
- return
-}
-
-// Serialize salts and stretches the given passphrase and writes the
-// resulting key into key. It also serializes an S2K descriptor to
-// w. The key stretching can be configured with c, which may be
-// nil. In that case, sensible defaults will be used.
-func Serialize(w io.Writer, key []byte, rand io.Reader, passphrase []byte, c *Config) error {
- params, err := Generate(rand, c)
- if err != nil {
- return err
- }
- err = params.Serialize(w)
- if err != nil {
- return err
- }
-
- f, err := params.Function()
- if err != nil {
- return err
- }
- f(key, passphrase)
- return nil
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/s2k/s2k_cache.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/s2k/s2k_cache.go
deleted file mode 100644
index 616e0d12c6c..00000000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/s2k/s2k_cache.go
+++ /dev/null
@@ -1,26 +0,0 @@
-package s2k
-
-// Cache stores keys derived with s2k functions from one passphrase
-// to avoid recomputation if multiple items are encrypted with
-// the same parameters.
-type Cache map[Params][]byte
-
-// GetOrComputeDerivedKey tries to retrieve the key
-// for the given s2k parameters from the cache.
-// If there is no hit, it derives the key with the s2k function from the passphrase,
-// updates the cache, and returns the key.
-func (c *Cache) GetOrComputeDerivedKey(passphrase []byte, params *Params, expectedKeySize int) ([]byte, error) {
- key, found := (*c)[*params]
- if !found || len(key) != expectedKeySize {
- var err error
- derivedKey := make([]byte, expectedKeySize)
- s2k, err := params.Function()
- if err != nil {
- return nil, err
- }
- s2k(derivedKey, passphrase)
- (*c)[*params] = key
- return derivedKey, nil
- }
- return key, nil
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/s2k/s2k_config.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/s2k/s2k_config.go
deleted file mode 100644
index b93db1ab853..00000000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/s2k/s2k_config.go
+++ /dev/null
@@ -1,129 +0,0 @@
-package s2k
-
-import "crypto"
-
-// Config collects configuration parameters for s2k key-stretching
-// transformations. A nil *Config is valid and results in all default
-// values.
-type Config struct {
- // S2K (String to Key) mode, used for key derivation in the context of secret key encryption
- // and passphrase-encrypted data. Either s2k.Argon2S2K or s2k.IteratedSaltedS2K may be used.
- // If the passphrase is a high-entropy key, indicated by setting PassphraseIsHighEntropy to true,
- // s2k.SaltedS2K can also be used.
- // Note: Argon2 is the strongest option but not all OpenPGP implementations are compatible with it
- //(pending standardisation).
- // 0 (simple), 1(salted), 3(iterated), 4(argon2)
- // 2(reserved) 100-110(private/experimental).
- S2KMode Mode
- // Only relevant if S2KMode is not set to s2k.Argon2S2K.
- // Hash is the default hash function to be used. If
- // nil, SHA256 is used.
- Hash crypto.Hash
- // Argon2 parameters for S2K (String to Key).
- // Only relevant if S2KMode is set to s2k.Argon2S2K.
- // If nil, default parameters are used.
- // For more details on the choice of parameters, see https://tools.ietf.org/html/rfc9106#section-4.
- Argon2Config *Argon2Config
- // Only relevant if S2KMode is set to s2k.IteratedSaltedS2K.
- // Iteration count for Iterated S2K (String to Key). It
- // determines the strength of the passphrase stretching when
- // the said passphrase is hashed to produce a key. S2KCount
- // should be between 65536 and 65011712, inclusive. If Config
- // is nil or S2KCount is 0, the value 16777216 used. Not all
- // values in the above range can be represented. S2KCount will
- // be rounded up to the next representable value if it cannot
- // be encoded exactly. When set, it is strongly encrouraged to
- // use a value that is at least 65536. See RFC 4880 Section
- // 3.7.1.3.
- S2KCount int
- // Indicates whether the passphrase passed by the application is a
- // high-entropy key (e.g. it's randomly generated or derived from
- // another passphrase using a strong key derivation function).
- // When true, allows the S2KMode to be s2k.SaltedS2K.
- // When the passphrase is not a high-entropy key, using SaltedS2K is
- // insecure, and not allowed by draft-ietf-openpgp-crypto-refresh-08.
- PassphraseIsHighEntropy bool
-}
-
-// Argon2Config stores the Argon2 parameters
-// A nil *Argon2Config is valid and results in all default
-type Argon2Config struct {
- NumberOfPasses uint8
- DegreeOfParallelism uint8
- // Memory specifies the desired Argon2 memory usage in kibibytes.
- // For example memory=64*1024 sets the memory cost to ~64 MB.
- Memory uint32
-}
-
-func (c *Config) Mode() Mode {
- if c == nil {
- return IteratedSaltedS2K
- }
- return c.S2KMode
-}
-
-func (c *Config) hash() crypto.Hash {
- if c == nil || uint(c.Hash) == 0 {
- return crypto.SHA256
- }
-
- return c.Hash
-}
-
-func (c *Config) Argon2() *Argon2Config {
- if c == nil || c.Argon2Config == nil {
- return nil
- }
- return c.Argon2Config
-}
-
-// EncodedCount get encoded count
-func (c *Config) EncodedCount() uint8 {
- if c == nil || c.S2KCount == 0 {
- return 224 // The common case. Corresponding to 16777216
- }
-
- i := c.S2KCount
-
- switch {
- case i < 65536:
- i = 65536
- case i > 65011712:
- i = 65011712
- }
-
- return encodeCount(i)
-}
-
-func (c *Argon2Config) Passes() uint8 {
- if c == nil || c.NumberOfPasses == 0 {
- return 3
- }
- return c.NumberOfPasses
-}
-
-func (c *Argon2Config) Parallelism() uint8 {
- if c == nil || c.DegreeOfParallelism == 0 {
- return 4
- }
- return c.DegreeOfParallelism
-}
-
-func (c *Argon2Config) EncodedMemory() uint8 {
- if c == nil || c.Memory == 0 {
- return 16 // 64 MiB of RAM
- }
-
- memory := c.Memory
- lowerBound := uint32(c.Parallelism()) * 8
- upperBound := uint32(2147483648)
-
- switch {
- case memory < lowerBound:
- memory = lowerBound
- case memory > upperBound:
- memory = upperBound
- }
-
- return encodeMemory(memory, c.Parallelism())
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/write.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/write.go
deleted file mode 100644
index 0db5526ce0e..00000000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/write.go
+++ /dev/null
@@ -1,614 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package openpgp
-
-import (
- "crypto"
- "hash"
- "io"
- "strconv"
- "time"
-
- "github.com/ProtonMail/go-crypto/openpgp/armor"
- "github.com/ProtonMail/go-crypto/openpgp/errors"
- "github.com/ProtonMail/go-crypto/openpgp/internal/algorithm"
- "github.com/ProtonMail/go-crypto/openpgp/packet"
-)
-
-// DetachSign signs message with the private key from signer (which must
-// already have been decrypted) and writes the signature to w.
-// If config is nil, sensible defaults will be used.
-func DetachSign(w io.Writer, signer *Entity, message io.Reader, config *packet.Config) error {
- return detachSign(w, signer, message, packet.SigTypeBinary, config)
-}
-
-// ArmoredDetachSign signs message with the private key from signer (which
-// must already have been decrypted) and writes an armored signature to w.
-// If config is nil, sensible defaults will be used.
-func ArmoredDetachSign(w io.Writer, signer *Entity, message io.Reader, config *packet.Config) (err error) {
- return armoredDetachSign(w, signer, message, packet.SigTypeBinary, config)
-}
-
-// DetachSignText signs message (after canonicalising the line endings) with
-// the private key from signer (which must already have been decrypted) and
-// writes the signature to w.
-// If config is nil, sensible defaults will be used.
-func DetachSignText(w io.Writer, signer *Entity, message io.Reader, config *packet.Config) error {
- return detachSign(w, signer, message, packet.SigTypeText, config)
-}
-
-// ArmoredDetachSignText signs message (after canonicalising the line endings)
-// with the private key from signer (which must already have been decrypted)
-// and writes an armored signature to w.
-// If config is nil, sensible defaults will be used.
-func ArmoredDetachSignText(w io.Writer, signer *Entity, message io.Reader, config *packet.Config) error {
- return armoredDetachSign(w, signer, message, packet.SigTypeText, config)
-}
-
-func armoredDetachSign(w io.Writer, signer *Entity, message io.Reader, sigType packet.SignatureType, config *packet.Config) (err error) {
- out, err := armor.Encode(w, SignatureType, nil)
- if err != nil {
- return
- }
- err = detachSign(out, signer, message, sigType, config)
- if err != nil {
- return
- }
- return out.Close()
-}
-
-func detachSign(w io.Writer, signer *Entity, message io.Reader, sigType packet.SignatureType, config *packet.Config) (err error) {
- signingKey, ok := signer.SigningKeyById(config.Now(), config.SigningKey())
- if !ok {
- return errors.InvalidArgumentError("no valid signing keys")
- }
- if signingKey.PrivateKey == nil {
- return errors.InvalidArgumentError("signing key doesn't have a private key")
- }
- if signingKey.PrivateKey.Encrypted {
- return errors.InvalidArgumentError("signing key is encrypted")
- }
- if _, ok := algorithm.HashToHashId(config.Hash()); !ok {
- return errors.InvalidArgumentError("invalid hash function")
- }
-
- sig := createSignaturePacket(signingKey.PublicKey, sigType, config)
-
- h, err := sig.PrepareSign(config)
- if err != nil {
- return
- }
- wrappedHash, err := wrapHashForSignature(h, sig.SigType)
- if err != nil {
- return
- }
- if _, err = io.Copy(wrappedHash, message); err != nil {
- return err
- }
-
- err = sig.Sign(h, signingKey.PrivateKey, config)
- if err != nil {
- return
- }
-
- return sig.Serialize(w)
-}
-
-// FileHints contains metadata about encrypted files. This metadata is, itself,
-// encrypted.
-type FileHints struct {
- // IsBinary can be set to hint that the contents are binary data.
- IsBinary bool
- // FileName hints at the name of the file that should be written. It's
- // truncated to 255 bytes if longer. It may be empty to suggest that the
- // file should not be written to disk. It may be equal to "_CONSOLE" to
- // suggest the data should not be written to disk.
- FileName string
- // ModTime contains the modification time of the file, or the zero time if not applicable.
- ModTime time.Time
-}
-
-// SymmetricallyEncrypt acts like gpg -c: it encrypts a file with a passphrase.
-// The resulting WriteCloser must be closed after the contents of the file have
-// been written.
-// If config is nil, sensible defaults will be used.
-func SymmetricallyEncrypt(ciphertext io.Writer, passphrase []byte, hints *FileHints, config *packet.Config) (plaintext io.WriteCloser, err error) {
- if hints == nil {
- hints = &FileHints{}
- }
-
- key, err := packet.SerializeSymmetricKeyEncrypted(ciphertext, passphrase, config)
- if err != nil {
- return
- }
-
- var w io.WriteCloser
- cipherSuite := packet.CipherSuite{
- Cipher: config.Cipher(),
- Mode: config.AEAD().Mode(),
- }
- w, err = packet.SerializeSymmetricallyEncrypted(ciphertext, config.Cipher(), config.AEAD() != nil, cipherSuite, key, config)
- if err != nil {
- return
- }
-
- literalData := w
- if algo := config.Compression(); algo != packet.CompressionNone {
- var compConfig *packet.CompressionConfig
- if config != nil {
- compConfig = config.CompressionConfig
- }
- literalData, err = packet.SerializeCompressed(w, algo, compConfig)
- if err != nil {
- return
- }
- }
-
- var epochSeconds uint32
- if !hints.ModTime.IsZero() {
- epochSeconds = uint32(hints.ModTime.Unix())
- }
- return packet.SerializeLiteral(literalData, hints.IsBinary, hints.FileName, epochSeconds)
-}
-
-// intersectPreferences mutates and returns a prefix of a that contains only
-// the values in the intersection of a and b. The order of a is preserved.
-func intersectPreferences(a []uint8, b []uint8) (intersection []uint8) {
- var j int
- for _, v := range a {
- for _, v2 := range b {
- if v == v2 {
- a[j] = v
- j++
- break
- }
- }
- }
-
- return a[:j]
-}
-
-// intersectPreferences mutates and returns a prefix of a that contains only
-// the values in the intersection of a and b. The order of a is preserved.
-func intersectCipherSuites(a [][2]uint8, b [][2]uint8) (intersection [][2]uint8) {
- var j int
- for _, v := range a {
- for _, v2 := range b {
- if v[0] == v2[0] && v[1] == v2[1] {
- a[j] = v
- j++
- break
- }
- }
- }
-
- return a[:j]
-}
-
-func hashToHashId(h crypto.Hash) uint8 {
- v, ok := algorithm.HashToHashId(h)
- if !ok {
- panic("tried to convert unknown hash")
- }
- return v
-}
-
-// EncryptText encrypts a message to a number of recipients and, optionally,
-// signs it. Optional information is contained in 'hints', also encrypted, that
-// aids the recipients in processing the message. The resulting WriteCloser
-// must be closed after the contents of the file have been written. If config
-// is nil, sensible defaults will be used. The signing is done in text mode.
-func EncryptText(ciphertext io.Writer, to []*Entity, signed *Entity, hints *FileHints, config *packet.Config) (plaintext io.WriteCloser, err error) {
- return encrypt(ciphertext, ciphertext, to, signed, hints, packet.SigTypeText, config)
-}
-
-// Encrypt encrypts a message to a number of recipients and, optionally, signs
-// it. hints contains optional information, that is also encrypted, that aids
-// the recipients in processing the message. The resulting WriteCloser must
-// be closed after the contents of the file have been written.
-// If config is nil, sensible defaults will be used.
-func Encrypt(ciphertext io.Writer, to []*Entity, signed *Entity, hints *FileHints, config *packet.Config) (plaintext io.WriteCloser, err error) {
- return encrypt(ciphertext, ciphertext, to, signed, hints, packet.SigTypeBinary, config)
-}
-
-// EncryptSplit encrypts a message to a number of recipients and, optionally, signs
-// it. hints contains optional information, that is also encrypted, that aids
-// the recipients in processing the message. The resulting WriteCloser must
-// be closed after the contents of the file have been written.
-// If config is nil, sensible defaults will be used.
-func EncryptSplit(keyWriter io.Writer, dataWriter io.Writer, to []*Entity, signed *Entity, hints *FileHints, config *packet.Config) (plaintext io.WriteCloser, err error) {
- return encrypt(keyWriter, dataWriter, to, signed, hints, packet.SigTypeBinary, config)
-}
-
-// EncryptTextSplit encrypts a message to a number of recipients and, optionally, signs
-// it. hints contains optional information, that is also encrypted, that aids
-// the recipients in processing the message. The resulting WriteCloser must
-// be closed after the contents of the file have been written.
-// If config is nil, sensible defaults will be used.
-func EncryptTextSplit(keyWriter io.Writer, dataWriter io.Writer, to []*Entity, signed *Entity, hints *FileHints, config *packet.Config) (plaintext io.WriteCloser, err error) {
- return encrypt(keyWriter, dataWriter, to, signed, hints, packet.SigTypeText, config)
-}
-
-// writeAndSign writes the data as a payload package and, optionally, signs
-// it. hints contains optional information, that is also encrypted,
-// that aids the recipients in processing the message. The resulting
-// WriteCloser must be closed after the contents of the file have been
-// written. If config is nil, sensible defaults will be used.
-func writeAndSign(payload io.WriteCloser, candidateHashes []uint8, signed *Entity, hints *FileHints, sigType packet.SignatureType, config *packet.Config) (plaintext io.WriteCloser, err error) {
- var signer *packet.PrivateKey
- if signed != nil {
- signKey, ok := signed.SigningKeyById(config.Now(), config.SigningKey())
- if !ok {
- return nil, errors.InvalidArgumentError("no valid signing keys")
- }
- signer = signKey.PrivateKey
- if signer == nil {
- return nil, errors.InvalidArgumentError("no private key in signing key")
- }
- if signer.Encrypted {
- return nil, errors.InvalidArgumentError("signing key must be decrypted")
- }
- }
-
- var hash crypto.Hash
- for _, hashId := range candidateHashes {
- if h, ok := algorithm.HashIdToHash(hashId); ok && h.Available() {
- hash = h
- break
- }
- }
-
- // If the hash specified by config is a candidate, we'll use that.
- if configuredHash := config.Hash(); configuredHash.Available() {
- for _, hashId := range candidateHashes {
- if h, ok := algorithm.HashIdToHash(hashId); ok && h == configuredHash {
- hash = h
- break
- }
- }
- }
-
- if hash == 0 {
- hashId := candidateHashes[0]
- name, ok := algorithm.HashIdToString(hashId)
- if !ok {
- name = "#" + strconv.Itoa(int(hashId))
- }
- return nil, errors.InvalidArgumentError("cannot encrypt because no candidate hash functions are compiled in. (Wanted " + name + " in this case.)")
- }
-
- var salt []byte
- if signer != nil {
- var opsVersion = 3
- if signer.Version == 6 {
- opsVersion = signer.Version
- }
- ops := &packet.OnePassSignature{
- Version: opsVersion,
- SigType: sigType,
- Hash: hash,
- PubKeyAlgo: signer.PubKeyAlgo,
- KeyId: signer.KeyId,
- IsLast: true,
- }
- if opsVersion == 6 {
- ops.KeyFingerprint = signer.Fingerprint
- salt, err = packet.SignatureSaltForHash(hash, config.Random())
- if err != nil {
- return nil, err
- }
- ops.Salt = salt
- }
- if err := ops.Serialize(payload); err != nil {
- return nil, err
- }
- }
-
- if hints == nil {
- hints = &FileHints{}
- }
-
- w := payload
- if signer != nil {
- // If we need to write a signature packet after the literal
- // data then we need to stop literalData from closing
- // encryptedData.
- w = noOpCloser{w}
-
- }
- var epochSeconds uint32
- if !hints.ModTime.IsZero() {
- epochSeconds = uint32(hints.ModTime.Unix())
- }
- literalData, err := packet.SerializeLiteral(w, hints.IsBinary, hints.FileName, epochSeconds)
- if err != nil {
- return nil, err
- }
-
- if signer != nil {
- h, wrappedHash, err := hashForSignature(hash, sigType, salt)
- if err != nil {
- return nil, err
- }
- metadata := &packet.LiteralData{
- Format: 'u',
- FileName: hints.FileName,
- Time: epochSeconds,
- }
- if hints.IsBinary {
- metadata.Format = 'b'
- }
- return signatureWriter{payload, literalData, hash, wrappedHash, h, salt, signer, sigType, config, metadata}, nil
- }
- return literalData, nil
-}
-
-// encrypt encrypts a message to a number of recipients and, optionally, signs
-// it. hints contains optional information, that is also encrypted, that aids
-// the recipients in processing the message. The resulting WriteCloser must
-// be closed after the contents of the file have been written.
-// If config is nil, sensible defaults will be used.
-func encrypt(keyWriter io.Writer, dataWriter io.Writer, to []*Entity, signed *Entity, hints *FileHints, sigType packet.SignatureType, config *packet.Config) (plaintext io.WriteCloser, err error) {
- if len(to) == 0 {
- return nil, errors.InvalidArgumentError("no encryption recipient provided")
- }
-
- // These are the possible ciphers that we'll use for the message.
- candidateCiphers := []uint8{
- uint8(packet.CipherAES256),
- uint8(packet.CipherAES128),
- }
-
- // These are the possible hash functions that we'll use for the signature.
- candidateHashes := []uint8{
- hashToHashId(crypto.SHA256),
- hashToHashId(crypto.SHA384),
- hashToHashId(crypto.SHA512),
- hashToHashId(crypto.SHA3_256),
- hashToHashId(crypto.SHA3_512),
- }
-
- // Prefer GCM if everyone supports it
- candidateCipherSuites := [][2]uint8{
- {uint8(packet.CipherAES256), uint8(packet.AEADModeGCM)},
- {uint8(packet.CipherAES256), uint8(packet.AEADModeEAX)},
- {uint8(packet.CipherAES256), uint8(packet.AEADModeOCB)},
- {uint8(packet.CipherAES128), uint8(packet.AEADModeGCM)},
- {uint8(packet.CipherAES128), uint8(packet.AEADModeEAX)},
- {uint8(packet.CipherAES128), uint8(packet.AEADModeOCB)},
- }
-
- candidateCompression := []uint8{
- uint8(packet.CompressionNone),
- uint8(packet.CompressionZIP),
- uint8(packet.CompressionZLIB),
- }
-
- encryptKeys := make([]Key, len(to))
-
- // AEAD is used only if config enables it and every key supports it
- aeadSupported := config.AEAD() != nil
-
- for i := range to {
- var ok bool
- encryptKeys[i], ok = to[i].EncryptionKey(config.Now())
- if !ok {
- return nil, errors.InvalidArgumentError("cannot encrypt a message to key id " + strconv.FormatUint(to[i].PrimaryKey.KeyId, 16) + " because it has no valid encryption keys")
- }
-
- primarySelfSignature, _ := to[i].PrimarySelfSignature()
- if primarySelfSignature == nil {
- return nil, errors.InvalidArgumentError("entity without a self-signature")
- }
-
- if !primarySelfSignature.SEIPDv2 {
- aeadSupported = false
- }
-
- candidateCiphers = intersectPreferences(candidateCiphers, primarySelfSignature.PreferredSymmetric)
- candidateHashes = intersectPreferences(candidateHashes, primarySelfSignature.PreferredHash)
- candidateCipherSuites = intersectCipherSuites(candidateCipherSuites, primarySelfSignature.PreferredCipherSuites)
- candidateCompression = intersectPreferences(candidateCompression, primarySelfSignature.PreferredCompression)
- }
-
- // In the event that the intersection of supported algorithms is empty we use the ones
- // labelled as MUST that every implementation supports.
- if len(candidateCiphers) == 0 {
- // https://www.ietf.org/archive/id/draft-ietf-openpgp-crypto-refresh-07.html#section-9.3
- candidateCiphers = []uint8{uint8(packet.CipherAES128)}
- }
- if len(candidateHashes) == 0 {
- // https://www.ietf.org/archive/id/draft-ietf-openpgp-crypto-refresh-07.html#hash-algos
- candidateHashes = []uint8{hashToHashId(crypto.SHA256)}
- }
- if len(candidateCipherSuites) == 0 {
- // https://www.ietf.org/archive/id/draft-ietf-openpgp-crypto-refresh-07.html#section-9.6
- candidateCipherSuites = [][2]uint8{{uint8(packet.CipherAES128), uint8(packet.AEADModeOCB)}}
- }
-
- cipher := packet.CipherFunction(candidateCiphers[0])
- aeadCipherSuite := packet.CipherSuite{
- Cipher: packet.CipherFunction(candidateCipherSuites[0][0]),
- Mode: packet.AEADMode(candidateCipherSuites[0][1]),
- }
-
- // If the cipher specified by config is a candidate, we'll use that.
- configuredCipher := config.Cipher()
- for _, c := range candidateCiphers {
- cipherFunc := packet.CipherFunction(c)
- if cipherFunc == configuredCipher {
- cipher = cipherFunc
- break
- }
- }
-
- symKey := make([]byte, cipher.KeySize())
- if _, err := io.ReadFull(config.Random(), symKey); err != nil {
- return nil, err
- }
-
- for _, key := range encryptKeys {
- if err := packet.SerializeEncryptedKeyAEAD(keyWriter, key.PublicKey, cipher, aeadSupported, symKey, config); err != nil {
- return nil, err
- }
- }
-
- var payload io.WriteCloser
- payload, err = packet.SerializeSymmetricallyEncrypted(dataWriter, cipher, aeadSupported, aeadCipherSuite, symKey, config)
- if err != nil {
- return
- }
-
- payload, err = handleCompression(payload, candidateCompression, config)
- if err != nil {
- return nil, err
- }
-
- return writeAndSign(payload, candidateHashes, signed, hints, sigType, config)
-}
-
-// Sign signs a message. The resulting WriteCloser must be closed after the
-// contents of the file have been written. hints contains optional information
-// that aids the recipients in processing the message.
-// If config is nil, sensible defaults will be used.
-func Sign(output io.Writer, signed *Entity, hints *FileHints, config *packet.Config) (input io.WriteCloser, err error) {
- if signed == nil {
- return nil, errors.InvalidArgumentError("no signer provided")
- }
-
- // These are the possible hash functions that we'll use for the signature.
- candidateHashes := []uint8{
- hashToHashId(crypto.SHA256),
- hashToHashId(crypto.SHA384),
- hashToHashId(crypto.SHA512),
- hashToHashId(crypto.SHA3_256),
- hashToHashId(crypto.SHA3_512),
- }
- defaultHashes := candidateHashes[0:1]
- primarySelfSignature, _ := signed.PrimarySelfSignature()
- if primarySelfSignature == nil {
- return nil, errors.StructuralError("signed entity has no self-signature")
- }
- preferredHashes := primarySelfSignature.PreferredHash
- if len(preferredHashes) == 0 {
- preferredHashes = defaultHashes
- }
- candidateHashes = intersectPreferences(candidateHashes, preferredHashes)
- if len(candidateHashes) == 0 {
- return nil, errors.StructuralError("cannot sign because signing key shares no common algorithms with candidate hashes")
- }
-
- return writeAndSign(noOpCloser{output}, candidateHashes, signed, hints, packet.SigTypeBinary, config)
-}
-
-// signatureWriter hashes the contents of a message while passing it along to
-// literalData. When closed, it closes literalData, writes a signature packet
-// to encryptedData and then also closes encryptedData.
-type signatureWriter struct {
- encryptedData io.WriteCloser
- literalData io.WriteCloser
- hashType crypto.Hash
- wrappedHash hash.Hash
- h hash.Hash
- salt []byte // v6 only
- signer *packet.PrivateKey
- sigType packet.SignatureType
- config *packet.Config
- metadata *packet.LiteralData // V5 signatures protect document metadata
-}
-
-func (s signatureWriter) Write(data []byte) (int, error) {
- s.wrappedHash.Write(data)
- switch s.sigType {
- case packet.SigTypeBinary:
- return s.literalData.Write(data)
- case packet.SigTypeText:
- flag := 0
- return writeCanonical(s.literalData, data, &flag)
- }
- return 0, errors.UnsupportedError("unsupported signature type: " + strconv.Itoa(int(s.sigType)))
-}
-
-func (s signatureWriter) Close() error {
- sig := createSignaturePacket(&s.signer.PublicKey, s.sigType, s.config)
- sig.Hash = s.hashType
- sig.Metadata = s.metadata
-
- if err := sig.SetSalt(s.salt); err != nil {
- return err
- }
-
- if err := sig.Sign(s.h, s.signer, s.config); err != nil {
- return err
- }
- if err := s.literalData.Close(); err != nil {
- return err
- }
- if err := sig.Serialize(s.encryptedData); err != nil {
- return err
- }
- return s.encryptedData.Close()
-}
-
-func createSignaturePacket(signer *packet.PublicKey, sigType packet.SignatureType, config *packet.Config) *packet.Signature {
- sigLifetimeSecs := config.SigLifetime()
- return &packet.Signature{
- Version: signer.Version,
- SigType: sigType,
- PubKeyAlgo: signer.PubKeyAlgo,
- Hash: config.Hash(),
- CreationTime: config.Now(),
- IssuerKeyId: &signer.KeyId,
- IssuerFingerprint: signer.Fingerprint,
- Notations: config.Notations(),
- SigLifetimeSecs: &sigLifetimeSecs,
- }
-}
-
-// noOpCloser is like an ioutil.NopCloser, but for an io.Writer.
-// TODO: we have two of these in OpenPGP packages alone. This probably needs
-// to be promoted somewhere more common.
-type noOpCloser struct {
- w io.Writer
-}
-
-func (c noOpCloser) Write(data []byte) (n int, err error) {
- return c.w.Write(data)
-}
-
-func (c noOpCloser) Close() error {
- return nil
-}
-
-func handleCompression(compressed io.WriteCloser, candidateCompression []uint8, config *packet.Config) (data io.WriteCloser, err error) {
- data = compressed
- confAlgo := config.Compression()
- if confAlgo == packet.CompressionNone {
- return
- }
-
- // Set algorithm labelled as MUST as fallback
- // https://www.ietf.org/archive/id/draft-ietf-openpgp-crypto-refresh-07.html#section-9.4
- finalAlgo := packet.CompressionNone
- // if compression specified by config available we will use it
- for _, c := range candidateCompression {
- if uint8(confAlgo) == c {
- finalAlgo = confAlgo
- break
- }
- }
-
- if finalAlgo != packet.CompressionNone {
- var compConfig *packet.CompressionConfig
- if config != nil {
- compConfig = config.CompressionConfig
- }
- data, err = packet.SerializeCompressed(compressed, finalAlgo, compConfig)
- if err != nil {
- return
- }
- }
- return data, nil
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/x25519/x25519.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/x25519/x25519.go
deleted file mode 100644
index 38afcc74fa3..00000000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/x25519/x25519.go
+++ /dev/null
@@ -1,221 +0,0 @@
-package x25519
-
-import (
- "crypto/sha256"
- "crypto/subtle"
- "io"
-
- "github.com/ProtonMail/go-crypto/openpgp/aes/keywrap"
- "github.com/ProtonMail/go-crypto/openpgp/errors"
- x25519lib "github.com/cloudflare/circl/dh/x25519"
- "golang.org/x/crypto/hkdf"
-)
-
-const (
- hkdfInfo = "OpenPGP X25519"
- aes128KeySize = 16
- // The size of a public or private key in bytes.
- KeySize = x25519lib.Size
-)
-
-type PublicKey struct {
- // Point represents the encoded elliptic curve point of the public key.
- Point []byte
-}
-
-type PrivateKey struct {
- PublicKey
- // Secret represents the secret of the private key.
- Secret []byte
-}
-
-// NewPrivateKey creates a new empty private key including the public key.
-func NewPrivateKey(key PublicKey) *PrivateKey {
- return &PrivateKey{
- PublicKey: key,
- }
-}
-
-// Validate validates that the provided public key matches the private key.
-func Validate(pk *PrivateKey) (err error) {
- var expectedPublicKey, privateKey x25519lib.Key
- subtle.ConstantTimeCopy(1, privateKey[:], pk.Secret)
- x25519lib.KeyGen(&expectedPublicKey, &privateKey)
- if subtle.ConstantTimeCompare(expectedPublicKey[:], pk.PublicKey.Point) == 0 {
- return errors.KeyInvalidError("x25519: invalid key")
- }
- return nil
-}
-
-// GenerateKey generates a new x25519 key pair.
-func GenerateKey(rand io.Reader) (*PrivateKey, error) {
- var privateKey, publicKey x25519lib.Key
- privateKeyOut := new(PrivateKey)
- err := generateKey(rand, &privateKey, &publicKey)
- if err != nil {
- return nil, err
- }
- privateKeyOut.PublicKey.Point = publicKey[:]
- privateKeyOut.Secret = privateKey[:]
- return privateKeyOut, nil
-}
-
-func generateKey(rand io.Reader, privateKey *x25519lib.Key, publicKey *x25519lib.Key) error {
- maxRounds := 10
- isZero := true
- for round := 0; isZero; round++ {
- if round == maxRounds {
- return errors.InvalidArgumentError("x25519: zero keys only, randomness source might be corrupt")
- }
- _, err := io.ReadFull(rand, privateKey[:])
- if err != nil {
- return err
- }
- isZero = constantTimeIsZero(privateKey[:])
- }
- x25519lib.KeyGen(publicKey, privateKey)
- return nil
-}
-
-// Encrypt encrypts a sessionKey with x25519 according to
-// the OpenPGP crypto refresh specification section 5.1.6. The function assumes that the
-// sessionKey has the correct format and padding according to the specification.
-func Encrypt(rand io.Reader, publicKey *PublicKey, sessionKey []byte) (ephemeralPublicKey *PublicKey, encryptedSessionKey []byte, err error) {
- var ephemeralPrivate, ephemeralPublic, staticPublic, shared x25519lib.Key
- // Check that the input static public key has 32 bytes
- if len(publicKey.Point) != KeySize {
- err = errors.KeyInvalidError("x25519: the public key has the wrong size")
- return
- }
- copy(staticPublic[:], publicKey.Point)
- // Generate ephemeral keyPair
- err = generateKey(rand, &ephemeralPrivate, &ephemeralPublic)
- if err != nil {
- return
- }
- // Compute shared key
- ok := x25519lib.Shared(&shared, &ephemeralPrivate, &staticPublic)
- if !ok {
- err = errors.KeyInvalidError("x25519: the public key is a low order point")
- return
- }
- // Derive the encryption key from the shared secret
- encryptionKey := applyHKDF(ephemeralPublic[:], publicKey.Point[:], shared[:])
- ephemeralPublicKey = &PublicKey{
- Point: ephemeralPublic[:],
- }
- // Encrypt the sessionKey with aes key wrapping
- encryptedSessionKey, err = keywrap.Wrap(encryptionKey, sessionKey)
- return
-}
-
-// Decrypt decrypts a session key stored in ciphertext with the provided x25519
-// private key and ephemeral public key.
-func Decrypt(privateKey *PrivateKey, ephemeralPublicKey *PublicKey, ciphertext []byte) (encodedSessionKey []byte, err error) {
- var ephemeralPublic, staticPrivate, shared x25519lib.Key
- // Check that the input ephemeral public key has 32 bytes
- if len(ephemeralPublicKey.Point) != KeySize {
- err = errors.KeyInvalidError("x25519: the public key has the wrong size")
- return
- }
- copy(ephemeralPublic[:], ephemeralPublicKey.Point)
- subtle.ConstantTimeCopy(1, staticPrivate[:], privateKey.Secret)
- // Compute shared key
- ok := x25519lib.Shared(&shared, &staticPrivate, &ephemeralPublic)
- if !ok {
- err = errors.KeyInvalidError("x25519: the ephemeral public key is a low order point")
- return
- }
- // Derive the encryption key from the shared secret
- encryptionKey := applyHKDF(ephemeralPublicKey.Point[:], privateKey.PublicKey.Point[:], shared[:])
- // Decrypt the session key with aes key wrapping
- encodedSessionKey, err = keywrap.Unwrap(encryptionKey, ciphertext)
- return
-}
-
-func applyHKDF(ephemeralPublicKey []byte, publicKey []byte, sharedSecret []byte) []byte {
- inputKey := make([]byte, 3*KeySize)
- // ephemeral public key | recipient public key | shared secret
- subtle.ConstantTimeCopy(1, inputKey[:KeySize], ephemeralPublicKey)
- subtle.ConstantTimeCopy(1, inputKey[KeySize:2*KeySize], publicKey)
- subtle.ConstantTimeCopy(1, inputKey[2*KeySize:], sharedSecret)
- hkdfReader := hkdf.New(sha256.New, inputKey, []byte{}, []byte(hkdfInfo))
- encryptionKey := make([]byte, aes128KeySize)
- _, _ = io.ReadFull(hkdfReader, encryptionKey)
- return encryptionKey
-}
-
-func constantTimeIsZero(bytes []byte) bool {
- isZero := byte(0)
- for _, b := range bytes {
- isZero |= b
- }
- return isZero == 0
-}
-
-// ENCODING/DECODING ciphertexts:
-
-// EncodeFieldsLength returns the length of the ciphertext encoding
-// given the encrypted session key.
-func EncodedFieldsLength(encryptedSessionKey []byte, v6 bool) int {
- lenCipherFunction := 0
- if !v6 {
- lenCipherFunction = 1
- }
- return KeySize + 1 + len(encryptedSessionKey) + lenCipherFunction
-}
-
-// EncodeField encodes x25519 session key encryption fields as
-// ephemeral x25519 public key | follow byte length | cipherFunction (v3 only) | encryptedSessionKey
-// and writes it to writer.
-func EncodeFields(writer io.Writer, ephemeralPublicKey *PublicKey, encryptedSessionKey []byte, cipherFunction byte, v6 bool) (err error) {
- lenAlgorithm := 0
- if !v6 {
- lenAlgorithm = 1
- }
- if _, err = writer.Write(ephemeralPublicKey.Point); err != nil {
- return err
- }
- if _, err = writer.Write([]byte{byte(len(encryptedSessionKey) + lenAlgorithm)}); err != nil {
- return err
- }
- if !v6 {
- if _, err = writer.Write([]byte{cipherFunction}); err != nil {
- return err
- }
- }
- _, err = writer.Write(encryptedSessionKey)
- return err
-}
-
-// DecodeField decodes a x25519 session key encryption as
-// ephemeral x25519 public key | follow byte length | cipherFunction (v3 only) | encryptedSessionKey.
-func DecodeFields(reader io.Reader, v6 bool) (ephemeralPublicKey *PublicKey, encryptedSessionKey []byte, cipherFunction byte, err error) {
- var buf [1]byte
- ephemeralPublicKey = &PublicKey{
- Point: make([]byte, KeySize),
- }
- // 32 octets representing an ephemeral x25519 public key.
- if _, err = io.ReadFull(reader, ephemeralPublicKey.Point); err != nil {
- return nil, nil, 0, err
- }
- // A one-octet size of the following fields.
- if _, err = io.ReadFull(reader, buf[:]); err != nil {
- return nil, nil, 0, err
- }
- followingLen := buf[0]
- // The one-octet algorithm identifier, if it was passed (in the case of a v3 PKESK packet).
- if !v6 {
- if _, err = io.ReadFull(reader, buf[:]); err != nil {
- return nil, nil, 0, err
- }
- cipherFunction = buf[0]
- followingLen -= 1
- }
- // The encrypted session key.
- encryptedSessionKey = make([]byte, followingLen)
- if _, err = io.ReadFull(reader, encryptedSessionKey); err != nil {
- return nil, nil, 0, err
- }
- return ephemeralPublicKey, encryptedSessionKey, cipherFunction, nil
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/x448/x448.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/x448/x448.go
deleted file mode 100644
index 65a082dabd7..00000000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/x448/x448.go
+++ /dev/null
@@ -1,229 +0,0 @@
-package x448
-
-import (
- "crypto/sha512"
- "crypto/subtle"
- "io"
-
- "github.com/ProtonMail/go-crypto/openpgp/aes/keywrap"
- "github.com/ProtonMail/go-crypto/openpgp/errors"
- x448lib "github.com/cloudflare/circl/dh/x448"
- "golang.org/x/crypto/hkdf"
-)
-
-const (
- hkdfInfo = "OpenPGP X448"
- aes256KeySize = 32
- // The size of a public or private key in bytes.
- KeySize = x448lib.Size
-)
-
-type PublicKey struct {
- // Point represents the encoded elliptic curve point of the public key.
- Point []byte
-}
-
-type PrivateKey struct {
- PublicKey
- // Secret represents the secret of the private key.
- Secret []byte
-}
-
-// NewPrivateKey creates a new empty private key including the public key.
-func NewPrivateKey(key PublicKey) *PrivateKey {
- return &PrivateKey{
- PublicKey: key,
- }
-}
-
-// Validate validates that the provided public key matches
-// the private key.
-func Validate(pk *PrivateKey) (err error) {
- var expectedPublicKey, privateKey x448lib.Key
- subtle.ConstantTimeCopy(1, privateKey[:], pk.Secret)
- x448lib.KeyGen(&expectedPublicKey, &privateKey)
- if subtle.ConstantTimeCompare(expectedPublicKey[:], pk.PublicKey.Point) == 0 {
- return errors.KeyInvalidError("x448: invalid key")
- }
- return nil
-}
-
-// GenerateKey generates a new x448 key pair.
-func GenerateKey(rand io.Reader) (*PrivateKey, error) {
- var privateKey, publicKey x448lib.Key
- privateKeyOut := new(PrivateKey)
- err := generateKey(rand, &privateKey, &publicKey)
- if err != nil {
- return nil, err
- }
- privateKeyOut.PublicKey.Point = publicKey[:]
- privateKeyOut.Secret = privateKey[:]
- return privateKeyOut, nil
-}
-
-func generateKey(rand io.Reader, privateKey *x448lib.Key, publicKey *x448lib.Key) error {
- maxRounds := 10
- isZero := true
- for round := 0; isZero; round++ {
- if round == maxRounds {
- return errors.InvalidArgumentError("x448: zero keys only, randomness source might be corrupt")
- }
- _, err := io.ReadFull(rand, privateKey[:])
- if err != nil {
- return err
- }
- isZero = constantTimeIsZero(privateKey[:])
- }
- x448lib.KeyGen(publicKey, privateKey)
- return nil
-}
-
-// Encrypt encrypts a sessionKey with x448 according to
-// the OpenPGP crypto refresh specification section 5.1.7. The function assumes that the
-// sessionKey has the correct format and padding according to the specification.
-func Encrypt(rand io.Reader, publicKey *PublicKey, sessionKey []byte) (ephemeralPublicKey *PublicKey, encryptedSessionKey []byte, err error) {
- var ephemeralPrivate, ephemeralPublic, staticPublic, shared x448lib.Key
- // Check that the input static public key has 56 bytes.
- if len(publicKey.Point) != KeySize {
- err = errors.KeyInvalidError("x448: the public key has the wrong size")
- return nil, nil, err
- }
- copy(staticPublic[:], publicKey.Point)
- // Generate ephemeral keyPair.
- if err = generateKey(rand, &ephemeralPrivate, &ephemeralPublic); err != nil {
- return nil, nil, err
- }
- // Compute shared key.
- ok := x448lib.Shared(&shared, &ephemeralPrivate, &staticPublic)
- if !ok {
- err = errors.KeyInvalidError("x448: the public key is a low order point")
- return nil, nil, err
- }
- // Derive the encryption key from the shared secret.
- encryptionKey := applyHKDF(ephemeralPublic[:], publicKey.Point[:], shared[:])
- ephemeralPublicKey = &PublicKey{
- Point: ephemeralPublic[:],
- }
- // Encrypt the sessionKey with aes key wrapping.
- encryptedSessionKey, err = keywrap.Wrap(encryptionKey, sessionKey)
- if err != nil {
- return nil, nil, err
- }
- return ephemeralPublicKey, encryptedSessionKey, nil
-}
-
-// Decrypt decrypts a session key stored in ciphertext with the provided x448
-// private key and ephemeral public key.
-func Decrypt(privateKey *PrivateKey, ephemeralPublicKey *PublicKey, ciphertext []byte) (encodedSessionKey []byte, err error) {
- var ephemeralPublic, staticPrivate, shared x448lib.Key
- // Check that the input ephemeral public key has 56 bytes.
- if len(ephemeralPublicKey.Point) != KeySize {
- err = errors.KeyInvalidError("x448: the public key has the wrong size")
- return nil, err
- }
- copy(ephemeralPublic[:], ephemeralPublicKey.Point)
- subtle.ConstantTimeCopy(1, staticPrivate[:], privateKey.Secret)
- // Compute shared key.
- ok := x448lib.Shared(&shared, &staticPrivate, &ephemeralPublic)
- if !ok {
- err = errors.KeyInvalidError("x448: the ephemeral public key is a low order point")
- return nil, err
- }
- // Derive the encryption key from the shared secret.
- encryptionKey := applyHKDF(ephemeralPublicKey.Point[:], privateKey.PublicKey.Point[:], shared[:])
- // Decrypt the session key with aes key wrapping.
- encodedSessionKey, err = keywrap.Unwrap(encryptionKey, ciphertext)
- if err != nil {
- return nil, err
- }
- return encodedSessionKey, nil
-}
-
-func applyHKDF(ephemeralPublicKey []byte, publicKey []byte, sharedSecret []byte) []byte {
- inputKey := make([]byte, 3*KeySize)
- // ephemeral public key | recipient public key | shared secret.
- subtle.ConstantTimeCopy(1, inputKey[:KeySize], ephemeralPublicKey)
- subtle.ConstantTimeCopy(1, inputKey[KeySize:2*KeySize], publicKey)
- subtle.ConstantTimeCopy(1, inputKey[2*KeySize:], sharedSecret)
- hkdfReader := hkdf.New(sha512.New, inputKey, []byte{}, []byte(hkdfInfo))
- encryptionKey := make([]byte, aes256KeySize)
- _, _ = io.ReadFull(hkdfReader, encryptionKey)
- return encryptionKey
-}
-
-func constantTimeIsZero(bytes []byte) bool {
- isZero := byte(0)
- for _, b := range bytes {
- isZero |= b
- }
- return isZero == 0
-}
-
-// ENCODING/DECODING ciphertexts:
-
-// EncodeFieldsLength returns the length of the ciphertext encoding
-// given the encrypted session key.
-func EncodedFieldsLength(encryptedSessionKey []byte, v6 bool) int {
- lenCipherFunction := 0
- if !v6 {
- lenCipherFunction = 1
- }
- return KeySize + 1 + len(encryptedSessionKey) + lenCipherFunction
-}
-
-// EncodeField encodes x448 session key encryption fields as
-// ephemeral x448 public key | follow byte length | cipherFunction (v3 only) | encryptedSessionKey
-// and writes it to writer.
-func EncodeFields(writer io.Writer, ephemeralPublicKey *PublicKey, encryptedSessionKey []byte, cipherFunction byte, v6 bool) (err error) {
- lenAlgorithm := 0
- if !v6 {
- lenAlgorithm = 1
- }
- if _, err = writer.Write(ephemeralPublicKey.Point); err != nil {
- return err
- }
- if _, err = writer.Write([]byte{byte(len(encryptedSessionKey) + lenAlgorithm)}); err != nil {
- return err
- }
- if !v6 {
- if _, err = writer.Write([]byte{cipherFunction}); err != nil {
- return err
- }
- }
- if _, err = writer.Write(encryptedSessionKey); err != nil {
- return err
- }
- return nil
-}
-
-// DecodeField decodes a x448 session key encryption as
-// ephemeral x448 public key | follow byte length | cipherFunction (v3 only) | encryptedSessionKey.
-func DecodeFields(reader io.Reader, v6 bool) (ephemeralPublicKey *PublicKey, encryptedSessionKey []byte, cipherFunction byte, err error) {
- var buf [1]byte
- ephemeralPublicKey = &PublicKey{
- Point: make([]byte, KeySize),
- }
- // 56 octets representing an ephemeral x448 public key.
- if _, err = io.ReadFull(reader, ephemeralPublicKey.Point); err != nil {
- return nil, nil, 0, err
- }
- // A one-octet size of the following fields.
- if _, err = io.ReadFull(reader, buf[:]); err != nil {
- return nil, nil, 0, err
- }
- followingLen := buf[0]
- // The one-octet algorithm identifier, if it was passed (in the case of a v3 PKESK packet).
- if !v6 {
- if _, err = io.ReadFull(reader, buf[:]); err != nil {
- return nil, nil, 0, err
- }
- cipherFunction = buf[0]
- followingLen -= 1
- }
- // The encrypted session key.
- encryptedSessionKey = make([]byte, followingLen)
- if _, err = io.ReadFull(reader, encryptedSessionKey); err != nil {
- return nil, nil, 0, err
- }
- return ephemeralPublicKey, encryptedSessionKey, cipherFunction, nil
-}
diff --git a/vendor/github.com/cloudflare/circl/LICENSE b/vendor/github.com/cloudflare/circl/LICENSE
deleted file mode 100644
index 67edaa90a04..00000000000
--- a/vendor/github.com/cloudflare/circl/LICENSE
+++ /dev/null
@@ -1,57 +0,0 @@
-Copyright (c) 2019 Cloudflare. All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
-
- * Redistributions of source code must retain the above copyright
-notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above
-copyright notice, this list of conditions and the following disclaimer
-in the documentation and/or other materials provided with the
-distribution.
- * Neither the name of Cloudflare nor the names of its
-contributors may be used to endorse or promote products derived from
-this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-========================================================================
-
-Copyright (c) 2009 The Go Authors. All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
-
- * Redistributions of source code must retain the above copyright
-notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above
-copyright notice, this list of conditions and the following disclaimer
-in the documentation and/or other materials provided with the
-distribution.
- * Neither the name of Google Inc. nor the names of its
-contributors may be used to endorse or promote products derived from
-this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/cloudflare/circl/dh/x25519/curve.go b/vendor/github.com/cloudflare/circl/dh/x25519/curve.go
deleted file mode 100644
index f9057c2b866..00000000000
--- a/vendor/github.com/cloudflare/circl/dh/x25519/curve.go
+++ /dev/null
@@ -1,96 +0,0 @@
-package x25519
-
-import (
- fp "github.com/cloudflare/circl/math/fp25519"
-)
-
-// ladderJoye calculates a fixed-point multiplication with the generator point.
-// The algorithm is the right-to-left Joye's ladder as described
-// in "How to precompute a ladder" in SAC'2017.
-func ladderJoye(k *Key) {
- w := [5]fp.Elt{} // [mu,x1,z1,x2,z2] order must be preserved.
- fp.SetOne(&w[1]) // x1 = 1
- fp.SetOne(&w[2]) // z1 = 1
- w[3] = fp.Elt{ // x2 = G-S
- 0xbd, 0xaa, 0x2f, 0xc8, 0xfe, 0xe1, 0x94, 0x7e,
- 0xf8, 0xed, 0xb2, 0x14, 0xae, 0x95, 0xf0, 0xbb,
- 0xe2, 0x48, 0x5d, 0x23, 0xb9, 0xa0, 0xc7, 0xad,
- 0x34, 0xab, 0x7c, 0xe2, 0xee, 0xcd, 0xae, 0x1e,
- }
- fp.SetOne(&w[4]) // z2 = 1
-
- const n = 255
- const h = 3
- swap := uint(1)
- for s := 0; s < n-h; s++ {
- i := (s + h) / 8
- j := (s + h) % 8
- bit := uint((k[i] >> uint(j)) & 1)
- copy(w[0][:], tableGenerator[s*Size:(s+1)*Size])
- diffAdd(&w, swap^bit)
- swap = bit
- }
- for s := 0; s < h; s++ {
- double(&w[1], &w[2])
- }
- toAffine((*[fp.Size]byte)(k), &w[1], &w[2])
-}
-
-// ladderMontgomery calculates a generic scalar point multiplication
-// The algorithm implemented is the left-to-right Montgomery's ladder.
-func ladderMontgomery(k, xP *Key) {
- w := [5]fp.Elt{} // [x1, x2, z2, x3, z3] order must be preserved.
- w[0] = *(*fp.Elt)(xP) // x1 = xP
- fp.SetOne(&w[1]) // x2 = 1
- w[3] = *(*fp.Elt)(xP) // x3 = xP
- fp.SetOne(&w[4]) // z3 = 1
-
- move := uint(0)
- for s := 255 - 1; s >= 0; s-- {
- i := s / 8
- j := s % 8
- bit := uint((k[i] >> uint(j)) & 1)
- ladderStep(&w, move^bit)
- move = bit
- }
- toAffine((*[fp.Size]byte)(k), &w[1], &w[2])
-}
-
-func toAffine(k *[fp.Size]byte, x, z *fp.Elt) {
- fp.Inv(z, z)
- fp.Mul(x, x, z)
- _ = fp.ToBytes(k[:], x)
-}
-
-var lowOrderPoints = [5]fp.Elt{
- { /* (0,_,1) point of order 2 on Curve25519 */
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- },
- { /* (1,_,1) point of order 4 on Curve25519 */
- 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- },
- { /* (x,_,1) first point of order 8 on Curve25519 */
- 0xe0, 0xeb, 0x7a, 0x7c, 0x3b, 0x41, 0xb8, 0xae,
- 0x16, 0x56, 0xe3, 0xfa, 0xf1, 0x9f, 0xc4, 0x6a,
- 0xda, 0x09, 0x8d, 0xeb, 0x9c, 0x32, 0xb1, 0xfd,
- 0x86, 0x62, 0x05, 0x16, 0x5f, 0x49, 0xb8, 0x00,
- },
- { /* (x,_,1) second point of order 8 on Curve25519 */
- 0x5f, 0x9c, 0x95, 0xbc, 0xa3, 0x50, 0x8c, 0x24,
- 0xb1, 0xd0, 0xb1, 0x55, 0x9c, 0x83, 0xef, 0x5b,
- 0x04, 0x44, 0x5c, 0xc4, 0x58, 0x1c, 0x8e, 0x86,
- 0xd8, 0x22, 0x4e, 0xdd, 0xd0, 0x9f, 0x11, 0x57,
- },
- { /* (-1,_,1) a point of order 4 on the twist of Curve25519 */
- 0xec, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f,
- },
-}
diff --git a/vendor/github.com/cloudflare/circl/dh/x25519/curve_amd64.go b/vendor/github.com/cloudflare/circl/dh/x25519/curve_amd64.go
deleted file mode 100644
index 8a3d54c570f..00000000000
--- a/vendor/github.com/cloudflare/circl/dh/x25519/curve_amd64.go
+++ /dev/null
@@ -1,30 +0,0 @@
-//go:build amd64 && !purego
-// +build amd64,!purego
-
-package x25519
-
-import (
- fp "github.com/cloudflare/circl/math/fp25519"
- "golang.org/x/sys/cpu"
-)
-
-var hasBmi2Adx = cpu.X86.HasBMI2 && cpu.X86.HasADX
-
-var _ = hasBmi2Adx
-
-func double(x, z *fp.Elt) { doubleAmd64(x, z) }
-func diffAdd(w *[5]fp.Elt, b uint) { diffAddAmd64(w, b) }
-func ladderStep(w *[5]fp.Elt, b uint) { ladderStepAmd64(w, b) }
-func mulA24(z, x *fp.Elt) { mulA24Amd64(z, x) }
-
-//go:noescape
-func ladderStepAmd64(w *[5]fp.Elt, b uint)
-
-//go:noescape
-func diffAddAmd64(w *[5]fp.Elt, b uint)
-
-//go:noescape
-func doubleAmd64(x, z *fp.Elt)
-
-//go:noescape
-func mulA24Amd64(z, x *fp.Elt)
diff --git a/vendor/github.com/cloudflare/circl/dh/x25519/curve_amd64.h b/vendor/github.com/cloudflare/circl/dh/x25519/curve_amd64.h
deleted file mode 100644
index 8c1ae4d0fbb..00000000000
--- a/vendor/github.com/cloudflare/circl/dh/x25519/curve_amd64.h
+++ /dev/null
@@ -1,111 +0,0 @@
-#define ladderStepLeg \
- addSub(x2,z2) \
- addSub(x3,z3) \
- integerMulLeg(b0,x2,z3) \
- integerMulLeg(b1,x3,z2) \
- reduceFromDoubleLeg(t0,b0) \
- reduceFromDoubleLeg(t1,b1) \
- addSub(t0,t1) \
- cselect(x2,x3,regMove) \
- cselect(z2,z3,regMove) \
- integerSqrLeg(b0,t0) \
- integerSqrLeg(b1,t1) \
- reduceFromDoubleLeg(x3,b0) \
- reduceFromDoubleLeg(z3,b1) \
- integerMulLeg(b0,x1,z3) \
- reduceFromDoubleLeg(z3,b0) \
- integerSqrLeg(b0,x2) \
- integerSqrLeg(b1,z2) \
- reduceFromDoubleLeg(x2,b0) \
- reduceFromDoubleLeg(z2,b1) \
- subtraction(t0,x2,z2) \
- multiplyA24Leg(t1,t0) \
- additionLeg(t1,t1,z2) \
- integerMulLeg(b0,x2,z2) \
- integerMulLeg(b1,t0,t1) \
- reduceFromDoubleLeg(x2,b0) \
- reduceFromDoubleLeg(z2,b1)
-
-#define ladderStepBmi2Adx \
- addSub(x2,z2) \
- addSub(x3,z3) \
- integerMulAdx(b0,x2,z3) \
- integerMulAdx(b1,x3,z2) \
- reduceFromDoubleAdx(t0,b0) \
- reduceFromDoubleAdx(t1,b1) \
- addSub(t0,t1) \
- cselect(x2,x3,regMove) \
- cselect(z2,z3,regMove) \
- integerSqrAdx(b0,t0) \
- integerSqrAdx(b1,t1) \
- reduceFromDoubleAdx(x3,b0) \
- reduceFromDoubleAdx(z3,b1) \
- integerMulAdx(b0,x1,z3) \
- reduceFromDoubleAdx(z3,b0) \
- integerSqrAdx(b0,x2) \
- integerSqrAdx(b1,z2) \
- reduceFromDoubleAdx(x2,b0) \
- reduceFromDoubleAdx(z2,b1) \
- subtraction(t0,x2,z2) \
- multiplyA24Adx(t1,t0) \
- additionAdx(t1,t1,z2) \
- integerMulAdx(b0,x2,z2) \
- integerMulAdx(b1,t0,t1) \
- reduceFromDoubleAdx(x2,b0) \
- reduceFromDoubleAdx(z2,b1)
-
-#define difAddLeg \
- addSub(x1,z1) \
- integerMulLeg(b0,z1,ui) \
- reduceFromDoubleLeg(z1,b0) \
- addSub(x1,z1) \
- integerSqrLeg(b0,x1) \
- integerSqrLeg(b1,z1) \
- reduceFromDoubleLeg(x1,b0) \
- reduceFromDoubleLeg(z1,b1) \
- integerMulLeg(b0,x1,z2) \
- integerMulLeg(b1,z1,x2) \
- reduceFromDoubleLeg(x1,b0) \
- reduceFromDoubleLeg(z1,b1)
-
-#define difAddBmi2Adx \
- addSub(x1,z1) \
- integerMulAdx(b0,z1,ui) \
- reduceFromDoubleAdx(z1,b0) \
- addSub(x1,z1) \
- integerSqrAdx(b0,x1) \
- integerSqrAdx(b1,z1) \
- reduceFromDoubleAdx(x1,b0) \
- reduceFromDoubleAdx(z1,b1) \
- integerMulAdx(b0,x1,z2) \
- integerMulAdx(b1,z1,x2) \
- reduceFromDoubleAdx(x1,b0) \
- reduceFromDoubleAdx(z1,b1)
-
-#define doubleLeg \
- addSub(x1,z1) \
- integerSqrLeg(b0,x1) \
- integerSqrLeg(b1,z1) \
- reduceFromDoubleLeg(x1,b0) \
- reduceFromDoubleLeg(z1,b1) \
- subtraction(t0,x1,z1) \
- multiplyA24Leg(t1,t0) \
- additionLeg(t1,t1,z1) \
- integerMulLeg(b0,x1,z1) \
- integerMulLeg(b1,t0,t1) \
- reduceFromDoubleLeg(x1,b0) \
- reduceFromDoubleLeg(z1,b1)
-
-#define doubleBmi2Adx \
- addSub(x1,z1) \
- integerSqrAdx(b0,x1) \
- integerSqrAdx(b1,z1) \
- reduceFromDoubleAdx(x1,b0) \
- reduceFromDoubleAdx(z1,b1) \
- subtraction(t0,x1,z1) \
- multiplyA24Adx(t1,t0) \
- additionAdx(t1,t1,z1) \
- integerMulAdx(b0,x1,z1) \
- integerMulAdx(b1,t0,t1) \
- reduceFromDoubleAdx(x1,b0) \
- reduceFromDoubleAdx(z1,b1)
diff --git a/vendor/github.com/cloudflare/circl/dh/x25519/curve_amd64.s b/vendor/github.com/cloudflare/circl/dh/x25519/curve_amd64.s
deleted file mode 100644
index ce9f062894a..00000000000
--- a/vendor/github.com/cloudflare/circl/dh/x25519/curve_amd64.s
+++ /dev/null
@@ -1,157 +0,0 @@
-//go:build amd64 && !purego
-// +build amd64,!purego
-
-#include "textflag.h"
-
-// Depends on circl/math/fp25519 package
-#include "../../math/fp25519/fp_amd64.h"
-#include "curve_amd64.h"
-
-// CTE_A24 is (A+2)/4 from Curve25519
-#define CTE_A24 121666
-
-#define Size 32
-
-// multiplyA24Leg multiplies x times CTE_A24 and stores in z
-// Uses: AX, DX, R8-R13, FLAGS
-// Instr: x86_64, cmov
-#define multiplyA24Leg(z,x) \
- MOVL $CTE_A24, AX; MULQ 0+x; MOVQ AX, R8; MOVQ DX, R9; \
- MOVL $CTE_A24, AX; MULQ 8+x; MOVQ AX, R12; MOVQ DX, R10; \
- MOVL $CTE_A24, AX; MULQ 16+x; MOVQ AX, R13; MOVQ DX, R11; \
- MOVL $CTE_A24, AX; MULQ 24+x; \
- ADDQ R12, R9; \
- ADCQ R13, R10; \
- ADCQ AX, R11; \
- ADCQ $0, DX; \
- MOVL $38, AX; /* 2*C = 38 = 2^256 MOD 2^255-19*/ \
- IMULQ AX, DX; \
- ADDQ DX, R8; \
- ADCQ $0, R9; MOVQ R9, 8+z; \
- ADCQ $0, R10; MOVQ R10, 16+z; \
- ADCQ $0, R11; MOVQ R11, 24+z; \
- MOVQ $0, DX; \
- CMOVQCS AX, DX; \
- ADDQ DX, R8; MOVQ R8, 0+z;
-
-// multiplyA24Adx multiplies x times CTE_A24 and stores in z
-// Uses: AX, DX, R8-R12, FLAGS
-// Instr: x86_64, cmov, bmi2
-#define multiplyA24Adx(z,x) \
- MOVQ $CTE_A24, DX; \
- MULXQ 0+x, R8, R10; \
- MULXQ 8+x, R9, R11; ADDQ R10, R9; \
- MULXQ 16+x, R10, AX; ADCQ R11, R10; \
- MULXQ 24+x, R11, R12; ADCQ AX, R11; \
- ;;;;;;;;;;;;;;;;;;;;; ADCQ $0, R12; \
- MOVL $38, DX; /* 2*C = 38 = 2^256 MOD 2^255-19*/ \
- IMULQ DX, R12; \
- ADDQ R12, R8; \
- ADCQ $0, R9; MOVQ R9, 8+z; \
- ADCQ $0, R10; MOVQ R10, 16+z; \
- ADCQ $0, R11; MOVQ R11, 24+z; \
- MOVQ $0, R12; \
- CMOVQCS DX, R12; \
- ADDQ R12, R8; MOVQ R8, 0+z;
-
-#define mulA24Legacy \
- multiplyA24Leg(0(DI),0(SI))
-#define mulA24Bmi2Adx \
- multiplyA24Adx(0(DI),0(SI))
-
-// func mulA24Amd64(z, x *fp255.Elt)
-TEXT ·mulA24Amd64(SB),NOSPLIT,$0-16
- MOVQ z+0(FP), DI
- MOVQ x+8(FP), SI
- CHECK_BMI2ADX(LMA24, mulA24Legacy, mulA24Bmi2Adx)
-
-
-// func ladderStepAmd64(w *[5]fp255.Elt, b uint)
-// ladderStepAmd64 calculates a point addition and doubling as follows:
-// (x2,z2) = 2*(x2,z2) and (x3,z3) = (x2,z2)+(x3,z3) using as a difference (x1,-).
-// work = (x1,x2,z2,x3,z3) are five fp255.Elt of 32 bytes.
-// stack = (t0,t1) are two fp.Elt of fp.Size bytes, and
-// (b0,b1) are two-double precision fp.Elt of 2*fp.Size bytes.
-TEXT ·ladderStepAmd64(SB),NOSPLIT,$192-16
- // Parameters
- #define regWork DI
- #define regMove SI
- #define x1 0*Size(regWork)
- #define x2 1*Size(regWork)
- #define z2 2*Size(regWork)
- #define x3 3*Size(regWork)
- #define z3 4*Size(regWork)
- // Local variables
- #define t0 0*Size(SP)
- #define t1 1*Size(SP)
- #define b0 2*Size(SP)
- #define b1 4*Size(SP)
- MOVQ w+0(FP), regWork
- MOVQ b+8(FP), regMove
- CHECK_BMI2ADX(LLADSTEP, ladderStepLeg, ladderStepBmi2Adx)
- #undef regWork
- #undef regMove
- #undef x1
- #undef x2
- #undef z2
- #undef x3
- #undef z3
- #undef t0
- #undef t1
- #undef b0
- #undef b1
-
-// func diffAddAmd64(w *[5]fp255.Elt, b uint)
-// diffAddAmd64 calculates a differential point addition using a precomputed point.
-// (x1,z1) = (x1,z1)+(mu) using a difference point (x2,z2)
-// w = (mu,x1,z1,x2,z2) are five fp.Elt, and
-// stack = (b0,b1) are two-double precision fp.Elt of 2*fp.Size bytes.
-TEXT ·diffAddAmd64(SB),NOSPLIT,$128-16
- // Parameters
- #define regWork DI
- #define regSwap SI
- #define ui 0*Size(regWork)
- #define x1 1*Size(regWork)
- #define z1 2*Size(regWork)
- #define x2 3*Size(regWork)
- #define z2 4*Size(regWork)
- // Local variables
- #define b0 0*Size(SP)
- #define b1 2*Size(SP)
- MOVQ w+0(FP), regWork
- MOVQ b+8(FP), regSwap
- cswap(x1,x2,regSwap)
- cswap(z1,z2,regSwap)
- CHECK_BMI2ADX(LDIFADD, difAddLeg, difAddBmi2Adx)
- #undef regWork
- #undef regSwap
- #undef ui
- #undef x1
- #undef z1
- #undef x2
- #undef z2
- #undef b0
- #undef b1
-
-// func doubleAmd64(x, z *fp255.Elt)
-// doubleAmd64 calculates a point doubling (x1,z1) = 2*(x1,z1).
-// stack = (t0,t1) are two fp.Elt of fp.Size bytes, and
-// (b0,b1) are two-double precision fp.Elt of 2*fp.Size bytes.
-TEXT ·doubleAmd64(SB),NOSPLIT,$192-16
- // Parameters
- #define x1 0(DI)
- #define z1 0(SI)
- // Local variables
- #define t0 0*Size(SP)
- #define t1 1*Size(SP)
- #define b0 2*Size(SP)
- #define b1 4*Size(SP)
- MOVQ x+0(FP), DI
- MOVQ z+8(FP), SI
- CHECK_BMI2ADX(LDOUB,doubleLeg,doubleBmi2Adx)
- #undef x1
- #undef z1
- #undef t0
- #undef t1
- #undef b0
- #undef b1
diff --git a/vendor/github.com/cloudflare/circl/dh/x25519/curve_generic.go b/vendor/github.com/cloudflare/circl/dh/x25519/curve_generic.go
deleted file mode 100644
index dae67ea37df..00000000000
--- a/vendor/github.com/cloudflare/circl/dh/x25519/curve_generic.go
+++ /dev/null
@@ -1,85 +0,0 @@
-package x25519
-
-import (
- "encoding/binary"
- "math/bits"
-
- fp "github.com/cloudflare/circl/math/fp25519"
-)
-
-func doubleGeneric(x, z *fp.Elt) {
- t0, t1 := &fp.Elt{}, &fp.Elt{}
- fp.AddSub(x, z)
- fp.Sqr(x, x)
- fp.Sqr(z, z)
- fp.Sub(t0, x, z)
- mulA24Generic(t1, t0)
- fp.Add(t1, t1, z)
- fp.Mul(x, x, z)
- fp.Mul(z, t0, t1)
-}
-
-func diffAddGeneric(w *[5]fp.Elt, b uint) {
- mu, x1, z1, x2, z2 := &w[0], &w[1], &w[2], &w[3], &w[4]
- fp.Cswap(x1, x2, b)
- fp.Cswap(z1, z2, b)
- fp.AddSub(x1, z1)
- fp.Mul(z1, z1, mu)
- fp.AddSub(x1, z1)
- fp.Sqr(x1, x1)
- fp.Sqr(z1, z1)
- fp.Mul(x1, x1, z2)
- fp.Mul(z1, z1, x2)
-}
-
-func ladderStepGeneric(w *[5]fp.Elt, b uint) {
- x1, x2, z2, x3, z3 := &w[0], &w[1], &w[2], &w[3], &w[4]
- t0 := &fp.Elt{}
- t1 := &fp.Elt{}
- fp.AddSub(x2, z2)
- fp.AddSub(x3, z3)
- fp.Mul(t0, x2, z3)
- fp.Mul(t1, x3, z2)
- fp.AddSub(t0, t1)
- fp.Cmov(x2, x3, b)
- fp.Cmov(z2, z3, b)
- fp.Sqr(x3, t0)
- fp.Sqr(z3, t1)
- fp.Mul(z3, x1, z3)
- fp.Sqr(x2, x2)
- fp.Sqr(z2, z2)
- fp.Sub(t0, x2, z2)
- mulA24Generic(t1, t0)
- fp.Add(t1, t1, z2)
- fp.Mul(x2, x2, z2)
- fp.Mul(z2, t0, t1)
-}
-
-func mulA24Generic(z, x *fp.Elt) {
- const A24 = 121666
- const n = 8
- var xx [4]uint64
- for i := range xx {
- xx[i] = binary.LittleEndian.Uint64(x[i*n : (i+1)*n])
- }
-
- h0, l0 := bits.Mul64(xx[0], A24)
- h1, l1 := bits.Mul64(xx[1], A24)
- h2, l2 := bits.Mul64(xx[2], A24)
- h3, l3 := bits.Mul64(xx[3], A24)
-
- var c3 uint64
- l1, c0 := bits.Add64(h0, l1, 0)
- l2, c1 := bits.Add64(h1, l2, c0)
- l3, c2 := bits.Add64(h2, l3, c1)
- l4, _ := bits.Add64(h3, 0, c2)
- _, l4 = bits.Mul64(l4, 38)
- l0, c0 = bits.Add64(l0, l4, 0)
- xx[1], c1 = bits.Add64(l1, 0, c0)
- xx[2], c2 = bits.Add64(l2, 0, c1)
- xx[3], c3 = bits.Add64(l3, 0, c2)
- xx[0], _ = bits.Add64(l0, (-c3)&38, 0)
- for i := range xx {
- binary.LittleEndian.PutUint64(z[i*n:(i+1)*n], xx[i])
- }
-}
diff --git a/vendor/github.com/cloudflare/circl/dh/x25519/curve_noasm.go b/vendor/github.com/cloudflare/circl/dh/x25519/curve_noasm.go
deleted file mode 100644
index 07fab97d2af..00000000000
--- a/vendor/github.com/cloudflare/circl/dh/x25519/curve_noasm.go
+++ /dev/null
@@ -1,11 +0,0 @@
-//go:build !amd64 || purego
-// +build !amd64 purego
-
-package x25519
-
-import fp "github.com/cloudflare/circl/math/fp25519"
-
-func double(x, z *fp.Elt) { doubleGeneric(x, z) }
-func diffAdd(w *[5]fp.Elt, b uint) { diffAddGeneric(w, b) }
-func ladderStep(w *[5]fp.Elt, b uint) { ladderStepGeneric(w, b) }
-func mulA24(z, x *fp.Elt) { mulA24Generic(z, x) }
diff --git a/vendor/github.com/cloudflare/circl/dh/x25519/doc.go b/vendor/github.com/cloudflare/circl/dh/x25519/doc.go
deleted file mode 100644
index 3ce102d1457..00000000000
--- a/vendor/github.com/cloudflare/circl/dh/x25519/doc.go
+++ /dev/null
@@ -1,19 +0,0 @@
-/*
-Package x25519 provides Diffie-Hellman functions as specified in RFC-7748.
-
-Validation of public keys.
-
-The Diffie-Hellman function, as described in RFC-7748 [1], works for any
-public key. However, if a different protocol requires contributory
-behaviour [2,3], then the public keys must be validated against low-order
-points [3,4]. To do that, the Shared function performs this validation
-internally and returns false when the public key is invalid (i.e., it
-is a low-order point).
-
-References:
- - [1] RFC7748 by Langley, Hamburg, Turner (https://rfc-editor.org/rfc/rfc7748.txt)
- - [2] Curve25519 by Bernstein (https://cr.yp.to/ecdh.html)
- - [3] Bernstein (https://cr.yp.to/ecdh.html#validate)
- - [4] Cremers&Jackson (https://eprint.iacr.org/2019/526)
-*/
-package x25519
diff --git a/vendor/github.com/cloudflare/circl/dh/x25519/key.go b/vendor/github.com/cloudflare/circl/dh/x25519/key.go
deleted file mode 100644
index c76f72ac7fa..00000000000
--- a/vendor/github.com/cloudflare/circl/dh/x25519/key.go
+++ /dev/null
@@ -1,47 +0,0 @@
-package x25519
-
-import (
- "crypto/subtle"
-
- fp "github.com/cloudflare/circl/math/fp25519"
-)
-
-// Size is the length in bytes of a X25519 key.
-const Size = 32
-
-// Key represents a X25519 key.
-type Key [Size]byte
-
-func (k *Key) clamp(in *Key) *Key {
- *k = *in
- k[0] &= 248
- k[31] = (k[31] & 127) | 64
- return k
-}
-
-// isValidPubKey verifies if the public key is not a low-order point.
-func (k *Key) isValidPubKey() bool {
- fp.Modp((*fp.Elt)(k))
- var isLowOrder int
- for _, P := range lowOrderPoints {
- isLowOrder |= subtle.ConstantTimeCompare(P[:], k[:])
- }
- return isLowOrder == 0
-}
-
-// KeyGen obtains a public key given a secret key.
-func KeyGen(public, secret *Key) {
- ladderJoye(public.clamp(secret))
-}
-
-// Shared calculates Alice's shared key from Alice's secret key and Bob's
-// public key returning true on success. A failure case happens when the public
-// key is a low-order point, thus the shared key is all-zeros and the function
-// returns false.
-func Shared(shared, secret, public *Key) bool {
- validPk := *public
- validPk[31] &= (1 << (255 % 8)) - 1
- ok := validPk.isValidPubKey()
- ladderMontgomery(shared.clamp(secret), &validPk)
- return ok
-}
diff --git a/vendor/github.com/cloudflare/circl/dh/x25519/table.go b/vendor/github.com/cloudflare/circl/dh/x25519/table.go
deleted file mode 100644
index 28c8c4ac032..00000000000
--- a/vendor/github.com/cloudflare/circl/dh/x25519/table.go
+++ /dev/null
@@ -1,268 +0,0 @@
-package x25519
-
-import "github.com/cloudflare/circl/math/fp25519"
-
-// tableGenerator contains the set of points:
-//
-// t[i] = (xi+1)/(xi-1),
-//
-// where (xi,yi) = 2^iG and G is the generator point
-// Size = (256)*(256/8) = 8192 bytes.
-var tableGenerator = [256 * fp25519.Size]byte{
- /* (2^ 0)P */ 0xf3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x5f,
- /* (2^ 1)P */ 0x96, 0xfe, 0xaa, 0x16, 0xf4, 0x20, 0x82, 0x6b, 0x34, 0x6a, 0x56, 0x4f, 0x2b, 0xeb, 0xeb, 0x82, 0x0f, 0x95, 0xa5, 0x75, 0xb0, 0xa5, 0xa9, 0xd5, 0xf4, 0x88, 0x24, 0x4b, 0xcf, 0xb2, 0x42, 0x51,
- /* (2^ 2)P */ 0x0c, 0x68, 0x69, 0x00, 0x75, 0xbc, 0xae, 0x6a, 0x41, 0x9c, 0xf9, 0xa0, 0x20, 0x78, 0xcf, 0x89, 0xf4, 0xd0, 0x56, 0x3b, 0x18, 0xd9, 0x58, 0x2a, 0xa4, 0x11, 0x60, 0xe3, 0x80, 0xca, 0x5a, 0x4b,
- /* (2^ 3)P */ 0x5d, 0x74, 0x29, 0x8c, 0x34, 0x32, 0x91, 0x32, 0xd7, 0x2f, 0x64, 0xe1, 0x16, 0xe6, 0xa2, 0xf4, 0x34, 0xbc, 0x67, 0xff, 0x03, 0xbb, 0x45, 0x1e, 0x4a, 0x9b, 0x2a, 0xf4, 0xd0, 0x12, 0x69, 0x30,
- /* (2^ 4)P */ 0x54, 0x71, 0xaf, 0xe6, 0x07, 0x65, 0x88, 0xff, 0x2f, 0xc8, 0xee, 0xdf, 0x13, 0x0e, 0xf5, 0x04, 0xce, 0xb5, 0xba, 0x2a, 0xe8, 0x2f, 0x51, 0xaa, 0x22, 0xf2, 0xd5, 0x68, 0x1a, 0x25, 0x4e, 0x17,
- /* (2^ 5)P */ 0x98, 0x88, 0x02, 0x82, 0x0d, 0x70, 0x96, 0xcf, 0xc5, 0x02, 0x2c, 0x0a, 0x37, 0xe3, 0x43, 0x17, 0xaa, 0x6e, 0xe8, 0xb4, 0x98, 0xec, 0x9e, 0x37, 0x2e, 0x48, 0xe0, 0x51, 0x8a, 0x88, 0x59, 0x0c,
- /* (2^ 6)P */ 0x89, 0xd1, 0xb5, 0x99, 0xd6, 0xf1, 0xcb, 0xfb, 0x84, 0xdc, 0x9f, 0x8e, 0xd5, 0xf0, 0xae, 0xac, 0x14, 0x76, 0x1f, 0x23, 0x06, 0x0d, 0xc2, 0xc1, 0x72, 0xf9, 0x74, 0xa2, 0x8d, 0x21, 0x38, 0x29,
- /* (2^ 7)P */ 0x18, 0x7f, 0x1d, 0xff, 0xbe, 0x49, 0xaf, 0xf6, 0xc2, 0xc9, 0x7a, 0x38, 0x22, 0x1c, 0x54, 0xcc, 0x6b, 0xc5, 0x15, 0x40, 0xef, 0xc9, 0xfc, 0x96, 0xa9, 0x13, 0x09, 0x69, 0x7c, 0x62, 0xc1, 0x69,
- /* (2^ 8)P */ 0x0e, 0xdb, 0x33, 0x47, 0x2f, 0xfd, 0x86, 0x7a, 0xe9, 0x7d, 0x08, 0x9e, 0xf2, 0xc4, 0xb8, 0xfd, 0x29, 0xa2, 0xa2, 0x8e, 0x1a, 0x4b, 0x5e, 0x09, 0x79, 0x7a, 0xb3, 0x29, 0xc8, 0xa7, 0xd7, 0x1a,
- /* (2^ 9)P */ 0xc0, 0xa0, 0x7e, 0xd1, 0xca, 0x89, 0x2d, 0x34, 0x51, 0x20, 0xed, 0xcc, 0xa6, 0xdd, 0xbe, 0x67, 0x74, 0x2f, 0xb4, 0x2b, 0xbf, 0x31, 0xca, 0x19, 0xbb, 0xac, 0x80, 0x49, 0xc8, 0xb4, 0xf7, 0x3d,
- /* (2^ 10)P */ 0x83, 0xd8, 0x0a, 0xc8, 0x4d, 0x44, 0xc6, 0xa8, 0x85, 0xab, 0xe3, 0x66, 0x03, 0x44, 0x1e, 0xb9, 0xd8, 0xf6, 0x64, 0x01, 0xa0, 0xcd, 0x15, 0xc2, 0x68, 0xe6, 0x47, 0xf2, 0x6e, 0x7c, 0x86, 0x3d,
- /* (2^ 11)P */ 0x8c, 0x65, 0x3e, 0xcc, 0x2b, 0x58, 0xdd, 0xc7, 0x28, 0x55, 0x0e, 0xee, 0x48, 0x47, 0x2c, 0xfd, 0x71, 0x4f, 0x9f, 0xcc, 0x95, 0x9b, 0xfd, 0xa0, 0xdf, 0x5d, 0x67, 0xb0, 0x71, 0xd8, 0x29, 0x75,
- /* (2^ 12)P */ 0x78, 0xbd, 0x3c, 0x2d, 0xb4, 0x68, 0xf5, 0xb8, 0x82, 0xda, 0xf3, 0x91, 0x1b, 0x01, 0x33, 0x12, 0x62, 0x3b, 0x7c, 0x4a, 0xcd, 0x6c, 0xce, 0x2d, 0x03, 0x86, 0x49, 0x9e, 0x8e, 0xfc, 0xe7, 0x75,
- /* (2^ 13)P */ 0xec, 0xb6, 0xd0, 0xfc, 0xf1, 0x13, 0x4f, 0x2f, 0x45, 0x7a, 0xff, 0x29, 0x1f, 0xca, 0xa8, 0xf1, 0x9b, 0xe2, 0x81, 0x29, 0xa7, 0xc1, 0x49, 0xc2, 0x6a, 0xb5, 0x83, 0x8c, 0xbb, 0x0d, 0xbe, 0x6e,
- /* (2^ 14)P */ 0x22, 0xb2, 0x0b, 0x17, 0x8d, 0xfa, 0x14, 0x71, 0x5f, 0x93, 0x93, 0xbf, 0xd5, 0xdc, 0xa2, 0x65, 0x9a, 0x97, 0x9c, 0xb5, 0x68, 0x1f, 0xc4, 0xbd, 0x89, 0x92, 0xce, 0xa2, 0x79, 0xef, 0x0e, 0x2f,
- /* (2^ 15)P */ 0xce, 0x37, 0x3c, 0x08, 0x0c, 0xbf, 0xec, 0x42, 0x22, 0x63, 0x49, 0xec, 0x09, 0xbc, 0x30, 0x29, 0x0d, 0xac, 0xfe, 0x9c, 0xc1, 0xb0, 0x94, 0xf2, 0x80, 0xbb, 0xfa, 0xed, 0x4b, 0xaa, 0x80, 0x37,
- /* (2^ 16)P */ 0x29, 0xd9, 0xea, 0x7c, 0x3e, 0x7d, 0xc1, 0x56, 0xc5, 0x22, 0x57, 0x2e, 0xeb, 0x4b, 0xcb, 0xe7, 0x5a, 0xe1, 0xbf, 0x2d, 0x73, 0x31, 0xe9, 0x0c, 0xf8, 0x52, 0x10, 0x62, 0xc7, 0x83, 0xb8, 0x41,
- /* (2^ 17)P */ 0x50, 0x53, 0xd2, 0xc3, 0xa0, 0x5c, 0xf7, 0xdb, 0x51, 0xe3, 0xb1, 0x6e, 0x08, 0xbe, 0x36, 0x29, 0x12, 0xb2, 0xa9, 0xb4, 0x3c, 0xe0, 0x36, 0xc9, 0xaa, 0x25, 0x22, 0x32, 0x82, 0xbf, 0x45, 0x1d,
- /* (2^ 18)P */ 0xc5, 0x4c, 0x02, 0x6a, 0x03, 0xb1, 0x1a, 0xe8, 0x72, 0x9a, 0x4c, 0x30, 0x1c, 0x20, 0x12, 0xe2, 0xfc, 0xb1, 0x32, 0x68, 0xba, 0x3f, 0xd7, 0xc5, 0x81, 0x95, 0x83, 0x4d, 0x5a, 0xdb, 0xff, 0x20,
- /* (2^ 19)P */ 0xad, 0x0f, 0x5d, 0xbe, 0x67, 0xd3, 0x83, 0xa2, 0x75, 0x44, 0x16, 0x8b, 0xca, 0x25, 0x2b, 0x6c, 0x2e, 0xf2, 0xaa, 0x7c, 0x46, 0x35, 0x49, 0x9d, 0x49, 0xff, 0x85, 0xee, 0x8e, 0x40, 0x66, 0x51,
- /* (2^ 20)P */ 0x61, 0xe3, 0xb4, 0xfa, 0xa2, 0xba, 0x67, 0x3c, 0xef, 0x5c, 0xf3, 0x7e, 0xc6, 0x33, 0xe4, 0xb3, 0x1c, 0x9b, 0x15, 0x41, 0x92, 0x72, 0x59, 0x52, 0x33, 0xab, 0xb0, 0xd5, 0x92, 0x18, 0x62, 0x6a,
- /* (2^ 21)P */ 0xcb, 0xcd, 0x55, 0x75, 0x38, 0x4a, 0xb7, 0x20, 0x3f, 0x92, 0x08, 0x12, 0x0e, 0xa1, 0x2a, 0x53, 0xd1, 0x1d, 0x28, 0x62, 0x77, 0x7b, 0xa1, 0xea, 0xbf, 0x44, 0x5c, 0xf0, 0x43, 0x34, 0xab, 0x61,
- /* (2^ 22)P */ 0xf8, 0xde, 0x24, 0x23, 0x42, 0x6c, 0x7a, 0x25, 0x7f, 0xcf, 0xe3, 0x17, 0x10, 0x6c, 0x1c, 0x13, 0x57, 0xa2, 0x30, 0xf6, 0x39, 0x87, 0x75, 0x23, 0x80, 0x85, 0xa7, 0x01, 0x7a, 0x40, 0x5a, 0x29,
- /* (2^ 23)P */ 0xd9, 0xa8, 0x5d, 0x6d, 0x24, 0x43, 0xc4, 0xf8, 0x5d, 0xfa, 0x52, 0x0c, 0x45, 0x75, 0xd7, 0x19, 0x3d, 0xf8, 0x1b, 0x73, 0x92, 0xfc, 0xfc, 0x2a, 0x00, 0x47, 0x2b, 0x1b, 0xe8, 0xc8, 0x10, 0x7d,
- /* (2^ 24)P */ 0x0b, 0xa2, 0xba, 0x70, 0x1f, 0x27, 0xe0, 0xc8, 0x57, 0x39, 0xa6, 0x7c, 0x86, 0x48, 0x37, 0x99, 0xbb, 0xd4, 0x7e, 0xcb, 0xb3, 0xef, 0x12, 0x54, 0x75, 0x29, 0xe6, 0x73, 0x61, 0xd3, 0x96, 0x31,
- /* (2^ 25)P */ 0xfc, 0xdf, 0xc7, 0x41, 0xd1, 0xca, 0x5b, 0xde, 0x48, 0xc8, 0x95, 0xb3, 0xd2, 0x8c, 0xcc, 0x47, 0xcb, 0xf3, 0x1a, 0xe1, 0x42, 0xd9, 0x4c, 0xa3, 0xc2, 0xce, 0x4e, 0xd0, 0xf2, 0xdb, 0x56, 0x02,
- /* (2^ 26)P */ 0x7f, 0x66, 0x0e, 0x4b, 0xe9, 0xb7, 0x5a, 0x87, 0x10, 0x0d, 0x85, 0xc0, 0x83, 0xdd, 0xd4, 0xca, 0x9f, 0xc7, 0x72, 0x4e, 0x8f, 0x2e, 0xf1, 0x47, 0x9b, 0xb1, 0x85, 0x8c, 0xbb, 0x87, 0x1a, 0x5f,
- /* (2^ 27)P */ 0xb8, 0x51, 0x7f, 0x43, 0xb6, 0xd0, 0xe9, 0x7a, 0x65, 0x90, 0x87, 0x18, 0x55, 0xce, 0xc7, 0x12, 0xee, 0x7a, 0xf7, 0x5c, 0xfe, 0x09, 0xde, 0x2a, 0x27, 0x56, 0x2c, 0x7d, 0x2f, 0x5a, 0xa0, 0x23,
- /* (2^ 28)P */ 0x9a, 0x16, 0x7c, 0xf1, 0x28, 0xe1, 0x08, 0x59, 0x2d, 0x85, 0xd0, 0x8a, 0xdd, 0x98, 0x74, 0xf7, 0x64, 0x2f, 0x10, 0xab, 0xce, 0xc4, 0xb4, 0x74, 0x45, 0x98, 0x13, 0x10, 0xdd, 0xba, 0x3a, 0x18,
- /* (2^ 29)P */ 0xac, 0xaa, 0x92, 0xaa, 0x8d, 0xba, 0x65, 0xb1, 0x05, 0x67, 0x38, 0x99, 0x95, 0xef, 0xc5, 0xd5, 0xd1, 0x40, 0xfc, 0xf8, 0x0c, 0x8f, 0x2f, 0xbe, 0x14, 0x45, 0x20, 0xee, 0x35, 0xe6, 0x01, 0x27,
- /* (2^ 30)P */ 0x14, 0x65, 0x15, 0x20, 0x00, 0xa8, 0x9f, 0x62, 0xce, 0xc1, 0xa8, 0x64, 0x87, 0x86, 0x23, 0xf2, 0x0e, 0x06, 0x3f, 0x0b, 0xff, 0x4f, 0x89, 0x5b, 0xfa, 0xa3, 0x08, 0xf7, 0x4c, 0x94, 0xd9, 0x60,
- /* (2^ 31)P */ 0x1f, 0x20, 0x7a, 0x1c, 0x1a, 0x00, 0xea, 0xae, 0x63, 0xce, 0xe2, 0x3e, 0x63, 0x6a, 0xf1, 0xeb, 0xe1, 0x07, 0x7a, 0x4c, 0x59, 0x09, 0x77, 0x6f, 0xcb, 0x08, 0x02, 0x0d, 0x15, 0x58, 0xb9, 0x79,
- /* (2^ 32)P */ 0xe7, 0x10, 0xd4, 0x01, 0x53, 0x5e, 0xb5, 0x24, 0x4d, 0xc8, 0xfd, 0xf3, 0xdf, 0x4e, 0xa3, 0xe3, 0xd8, 0x32, 0x40, 0x90, 0xe4, 0x68, 0x87, 0xd8, 0xec, 0xae, 0x3a, 0x7b, 0x42, 0x84, 0x13, 0x13,
- /* (2^ 33)P */ 0x14, 0x4f, 0x23, 0x86, 0x12, 0xe5, 0x05, 0x84, 0x29, 0xc5, 0xb4, 0xad, 0x39, 0x47, 0xdc, 0x14, 0xfd, 0x4f, 0x63, 0x50, 0xb2, 0xb5, 0xa2, 0xb8, 0x93, 0xff, 0xa7, 0xd8, 0x4a, 0xa9, 0xe2, 0x2f,
- /* (2^ 34)P */ 0xdd, 0xfa, 0x43, 0xe8, 0xef, 0x57, 0x5c, 0xec, 0x18, 0x99, 0xbb, 0xf0, 0x40, 0xce, 0x43, 0x28, 0x05, 0x63, 0x3d, 0xcf, 0xd6, 0x61, 0xb5, 0xa4, 0x7e, 0x77, 0xfb, 0xe8, 0xbd, 0x29, 0x36, 0x74,
- /* (2^ 35)P */ 0x8f, 0x73, 0xaf, 0xbb, 0x46, 0xdd, 0x3e, 0x34, 0x51, 0xa6, 0x01, 0xb1, 0x28, 0x18, 0x98, 0xed, 0x7a, 0x79, 0x2c, 0x88, 0x0b, 0x76, 0x01, 0xa4, 0x30, 0x87, 0xc8, 0x8d, 0xe2, 0x23, 0xc2, 0x1f,
- /* (2^ 36)P */ 0x0e, 0xba, 0x0f, 0xfc, 0x91, 0x4e, 0x60, 0x48, 0xa4, 0x6f, 0x2c, 0x05, 0x8f, 0xf7, 0x37, 0xb6, 0x9c, 0x23, 0xe9, 0x09, 0x3d, 0xac, 0xcc, 0x91, 0x7c, 0x68, 0x7a, 0x43, 0xd4, 0xee, 0xf7, 0x23,
- /* (2^ 37)P */ 0x00, 0xd8, 0x9b, 0x8d, 0x11, 0xb1, 0x73, 0x51, 0xa7, 0xd4, 0x89, 0x31, 0xb6, 0x41, 0xd6, 0x29, 0x86, 0xc5, 0xbb, 0x88, 0x79, 0x17, 0xbf, 0xfd, 0xf5, 0x1d, 0xd8, 0xca, 0x4f, 0x89, 0x59, 0x29,
- /* (2^ 38)P */ 0x99, 0xc8, 0xbb, 0xb4, 0xf3, 0x8e, 0xbc, 0xae, 0xb9, 0x92, 0x69, 0xb2, 0x5a, 0x99, 0x48, 0x41, 0xfb, 0x2c, 0xf9, 0x34, 0x01, 0x0b, 0xe2, 0x24, 0xe8, 0xde, 0x05, 0x4a, 0x89, 0x58, 0xd1, 0x40,
- /* (2^ 39)P */ 0xf6, 0x76, 0xaf, 0x85, 0x11, 0x0b, 0xb0, 0x46, 0x79, 0x7a, 0x18, 0x73, 0x78, 0xc7, 0xba, 0x26, 0x5f, 0xff, 0x8f, 0xab, 0x95, 0xbf, 0xc0, 0x3d, 0xd7, 0x24, 0x55, 0x94, 0xd8, 0x8b, 0x60, 0x2a,
- /* (2^ 40)P */ 0x02, 0x63, 0x44, 0xbd, 0x88, 0x95, 0x44, 0x26, 0x9c, 0x43, 0x88, 0x03, 0x1c, 0xc2, 0x4b, 0x7c, 0xb2, 0x11, 0xbd, 0x83, 0xf3, 0xa4, 0x98, 0x8e, 0xb9, 0x76, 0xd8, 0xc9, 0x7b, 0x8d, 0x21, 0x26,
- /* (2^ 41)P */ 0x8a, 0x17, 0x7c, 0x99, 0x42, 0x15, 0x08, 0xe3, 0x6f, 0x60, 0xb6, 0x6f, 0xa8, 0x29, 0x2d, 0x3c, 0x74, 0x93, 0x27, 0xfa, 0x36, 0x77, 0x21, 0x5c, 0xfa, 0xb1, 0xfe, 0x4a, 0x73, 0x05, 0xde, 0x7d,
- /* (2^ 42)P */ 0xab, 0x2b, 0xd4, 0x06, 0x39, 0x0e, 0xf1, 0x3b, 0x9c, 0x64, 0x80, 0x19, 0x3e, 0x80, 0xf7, 0xe4, 0x7a, 0xbf, 0x95, 0x95, 0xf8, 0x3b, 0x05, 0xe6, 0x30, 0x55, 0x24, 0xda, 0x38, 0xaf, 0x4f, 0x39,
- /* (2^ 43)P */ 0xf4, 0x28, 0x69, 0x89, 0x58, 0xfb, 0x8e, 0x7a, 0x3c, 0x11, 0x6a, 0xcc, 0xe9, 0x78, 0xc7, 0xfb, 0x6f, 0x59, 0xaf, 0x30, 0xe3, 0x0c, 0x67, 0x72, 0xf7, 0x6c, 0x3d, 0x1d, 0xa8, 0x22, 0xf2, 0x48,
- /* (2^ 44)P */ 0xa7, 0xca, 0x72, 0x0d, 0x41, 0xce, 0x1f, 0xf0, 0x95, 0x55, 0x3b, 0x21, 0xc7, 0xec, 0x20, 0x5a, 0x83, 0x14, 0xfa, 0xc1, 0x65, 0x11, 0xc2, 0x7b, 0x41, 0xa7, 0xa8, 0x1d, 0xe3, 0x9a, 0xf8, 0x07,
- /* (2^ 45)P */ 0xf9, 0x0f, 0x83, 0xc6, 0xb4, 0xc2, 0xd2, 0x05, 0x93, 0x62, 0x31, 0xc6, 0x0f, 0x33, 0x3e, 0xd4, 0x04, 0xa9, 0xd3, 0x96, 0x0a, 0x59, 0xa5, 0xa5, 0xb6, 0x33, 0x53, 0xa6, 0x91, 0xdb, 0x5e, 0x70,
- /* (2^ 46)P */ 0xf7, 0xa5, 0xb9, 0x0b, 0x5e, 0xe1, 0x8e, 0x04, 0x5d, 0xaf, 0x0a, 0x9e, 0xca, 0xcf, 0x40, 0x32, 0x0b, 0xa4, 0xc4, 0xed, 0xce, 0x71, 0x4b, 0x8f, 0x6d, 0x4a, 0x54, 0xde, 0xa3, 0x0d, 0x1c, 0x62,
- /* (2^ 47)P */ 0x91, 0x40, 0x8c, 0xa0, 0x36, 0x28, 0x87, 0x92, 0x45, 0x14, 0xc9, 0x10, 0xb0, 0x75, 0x83, 0xce, 0x94, 0x63, 0x27, 0x4f, 0x52, 0xeb, 0x72, 0x8a, 0x35, 0x36, 0xc8, 0x7e, 0xfa, 0xfc, 0x67, 0x26,
- /* (2^ 48)P */ 0x2a, 0x75, 0xe8, 0x45, 0x33, 0x17, 0x4c, 0x7f, 0xa5, 0x79, 0x70, 0xee, 0xfe, 0x47, 0x1b, 0x06, 0x34, 0xff, 0x86, 0x9f, 0xfa, 0x9a, 0xdd, 0x25, 0x9c, 0xc8, 0x5d, 0x42, 0xf5, 0xce, 0x80, 0x37,
- /* (2^ 49)P */ 0xe9, 0xb4, 0x3b, 0x51, 0x5a, 0x03, 0x46, 0x1a, 0xda, 0x5a, 0x57, 0xac, 0x79, 0xf3, 0x1e, 0x3e, 0x50, 0x4b, 0xa2, 0x5f, 0x1c, 0x5f, 0x8c, 0xc7, 0x22, 0x9f, 0xfd, 0x34, 0x76, 0x96, 0x1a, 0x32,
- /* (2^ 50)P */ 0xfa, 0x27, 0x6e, 0x82, 0xb8, 0x07, 0x67, 0x94, 0xd0, 0x6f, 0x50, 0x4c, 0xd6, 0x84, 0xca, 0x3d, 0x36, 0x14, 0xe9, 0x75, 0x80, 0x21, 0x89, 0xc1, 0x84, 0x84, 0x3b, 0x9b, 0x16, 0x84, 0x92, 0x6d,
- /* (2^ 51)P */ 0xdf, 0x2d, 0x3f, 0x38, 0x40, 0xe8, 0x67, 0x3a, 0x75, 0x9b, 0x4f, 0x0c, 0xa3, 0xc9, 0xee, 0x33, 0x47, 0xef, 0x83, 0xa7, 0x6f, 0xc8, 0xc7, 0x3e, 0xc4, 0xfb, 0xc9, 0xba, 0x9f, 0x44, 0xec, 0x26,
- /* (2^ 52)P */ 0x7d, 0x9e, 0x9b, 0xa0, 0xcb, 0x38, 0x0f, 0x5c, 0x8c, 0x47, 0xa3, 0x62, 0xc7, 0x8c, 0x16, 0x81, 0x1c, 0x12, 0xfc, 0x06, 0xd3, 0xb0, 0x23, 0x3e, 0xdd, 0xdc, 0xef, 0xa5, 0xa0, 0x8a, 0x23, 0x5a,
- /* (2^ 53)P */ 0xff, 0x43, 0xea, 0xc4, 0x21, 0x61, 0xa2, 0x1b, 0xb5, 0x32, 0x88, 0x7c, 0x7f, 0xc7, 0xf8, 0x36, 0x9a, 0xf9, 0xdc, 0x0a, 0x0b, 0xea, 0xfb, 0x88, 0xf9, 0xeb, 0x5b, 0xc2, 0x8e, 0x93, 0xa9, 0x5c,
- /* (2^ 54)P */ 0xa0, 0xcd, 0xfc, 0x51, 0x5e, 0x6a, 0x43, 0xd5, 0x3b, 0x89, 0xcd, 0xc2, 0x97, 0x47, 0xbc, 0x1d, 0x08, 0x4a, 0x22, 0xd3, 0x65, 0x6a, 0x34, 0x19, 0x66, 0xf4, 0x9a, 0x9b, 0xe4, 0x34, 0x50, 0x0f,
- /* (2^ 55)P */ 0x6e, 0xb9, 0xe0, 0xa1, 0x67, 0x39, 0x3c, 0xf2, 0x88, 0x4d, 0x7a, 0x86, 0xfa, 0x08, 0x8b, 0xe5, 0x79, 0x16, 0x34, 0xa7, 0xc6, 0xab, 0x2f, 0xfb, 0x46, 0x69, 0x02, 0xb6, 0x1e, 0x38, 0x75, 0x2a,
- /* (2^ 56)P */ 0xac, 0x20, 0x94, 0xc1, 0xe4, 0x3b, 0x0a, 0xc8, 0xdc, 0xb6, 0xf2, 0x81, 0xc6, 0xf6, 0xb1, 0x66, 0x88, 0x33, 0xe9, 0x61, 0x67, 0x03, 0xf7, 0x7c, 0xc4, 0xa4, 0x60, 0xa6, 0xd8, 0xbb, 0xab, 0x25,
- /* (2^ 57)P */ 0x98, 0x51, 0xfd, 0x14, 0xba, 0x12, 0xea, 0x91, 0xa9, 0xff, 0x3c, 0x4a, 0xfc, 0x50, 0x49, 0x68, 0x28, 0xad, 0xf5, 0x30, 0x21, 0x84, 0x26, 0xf8, 0x41, 0xa4, 0x01, 0x53, 0xf7, 0x88, 0xa9, 0x3e,
- /* (2^ 58)P */ 0x6f, 0x8c, 0x5f, 0x69, 0x9a, 0x10, 0x78, 0xc9, 0xf3, 0xc3, 0x30, 0x05, 0x4a, 0xeb, 0x46, 0x17, 0x95, 0x99, 0x45, 0xb4, 0x77, 0x6d, 0x4d, 0x44, 0xc7, 0x5c, 0x4e, 0x05, 0x8c, 0x2b, 0x95, 0x75,
- /* (2^ 59)P */ 0xaa, 0xd6, 0xf4, 0x15, 0x79, 0x3f, 0x70, 0xa3, 0xd8, 0x47, 0x26, 0x2f, 0x20, 0x46, 0xc3, 0x66, 0x4b, 0x64, 0x1d, 0x81, 0xdf, 0x69, 0x14, 0xd0, 0x1f, 0xd7, 0xa5, 0x81, 0x7d, 0xa4, 0xfe, 0x77,
- /* (2^ 60)P */ 0x81, 0xa3, 0x7c, 0xf5, 0x9e, 0x52, 0xe9, 0xc5, 0x1a, 0x88, 0x2f, 0xce, 0xb9, 0xb4, 0xee, 0x6e, 0xd6, 0x9b, 0x00, 0xe8, 0x28, 0x1a, 0xe9, 0xb6, 0xec, 0x3f, 0xfc, 0x9a, 0x3e, 0xbe, 0x80, 0x4b,
- /* (2^ 61)P */ 0xc5, 0xd2, 0xae, 0x26, 0xc5, 0x73, 0x37, 0x7e, 0x9d, 0xa4, 0xc9, 0x53, 0xb4, 0xfc, 0x4a, 0x1b, 0x4d, 0xb2, 0xff, 0xba, 0xd7, 0xbd, 0x20, 0xa9, 0x0e, 0x40, 0x2d, 0x12, 0x9f, 0x69, 0x54, 0x7c,
- /* (2^ 62)P */ 0xc8, 0x4b, 0xa9, 0x4f, 0xe1, 0xc8, 0x46, 0xef, 0x5e, 0xed, 0x52, 0x29, 0xce, 0x74, 0xb0, 0xe0, 0xd5, 0x85, 0xd8, 0xdb, 0xe1, 0x50, 0xa4, 0xbe, 0x2c, 0x71, 0x0f, 0x32, 0x49, 0x86, 0xb6, 0x61,
- /* (2^ 63)P */ 0xd1, 0xbd, 0xcc, 0x09, 0x73, 0x5f, 0x48, 0x8a, 0x2d, 0x1a, 0x4d, 0x7d, 0x0d, 0x32, 0x06, 0xbd, 0xf4, 0xbe, 0x2d, 0x32, 0x73, 0x29, 0x23, 0x25, 0x70, 0xf7, 0x17, 0x8c, 0x75, 0xc4, 0x5d, 0x44,
- /* (2^ 64)P */ 0x3c, 0x93, 0xc8, 0x7c, 0x17, 0x34, 0x04, 0xdb, 0x9f, 0x05, 0xea, 0x75, 0x21, 0xe8, 0x6f, 0xed, 0x34, 0xdb, 0x53, 0xc0, 0xfd, 0xbe, 0xfe, 0x1e, 0x99, 0xaf, 0x5d, 0xc6, 0x67, 0xe8, 0xdb, 0x4a,
- /* (2^ 65)P */ 0xdf, 0x09, 0x06, 0xa9, 0xa2, 0x71, 0xcd, 0x3a, 0x50, 0x40, 0xd0, 0x6d, 0x85, 0x91, 0xe9, 0xe5, 0x3c, 0xc2, 0x57, 0x81, 0x68, 0x9b, 0xc6, 0x1e, 0x4d, 0xfe, 0x5c, 0x88, 0xf6, 0x27, 0x74, 0x69,
- /* (2^ 66)P */ 0x51, 0xa8, 0xe1, 0x65, 0x9b, 0x7b, 0xbe, 0xd7, 0xdd, 0x36, 0xc5, 0x22, 0xd5, 0x28, 0x3d, 0xa0, 0x45, 0xb6, 0xd2, 0x8f, 0x65, 0x9d, 0x39, 0x28, 0xe1, 0x41, 0x26, 0x7c, 0xe1, 0xb7, 0xe5, 0x49,
- /* (2^ 67)P */ 0xa4, 0x57, 0x04, 0x70, 0x98, 0x3a, 0x8c, 0x6f, 0x78, 0x67, 0xbb, 0x5e, 0xa2, 0xf0, 0x78, 0x50, 0x0f, 0x96, 0x82, 0xc3, 0xcb, 0x3c, 0x3c, 0xd1, 0xb1, 0x84, 0xdf, 0xa7, 0x58, 0x32, 0x00, 0x2e,
- /* (2^ 68)P */ 0x1c, 0x6a, 0x29, 0xe6, 0x9b, 0xf3, 0xd1, 0x8a, 0xb2, 0xbf, 0x5f, 0x2a, 0x65, 0xaa, 0xee, 0xc1, 0xcb, 0xf3, 0x26, 0xfd, 0x73, 0x06, 0xee, 0x33, 0xcc, 0x2c, 0x9d, 0xa6, 0x73, 0x61, 0x25, 0x59,
- /* (2^ 69)P */ 0x41, 0xfc, 0x18, 0x4e, 0xaa, 0x07, 0xea, 0x41, 0x1e, 0xa5, 0x87, 0x7c, 0x52, 0x19, 0xfc, 0xd9, 0x6f, 0xca, 0x31, 0x58, 0x80, 0xcb, 0xaa, 0xbd, 0x4f, 0x69, 0x16, 0xc9, 0x2d, 0x65, 0x5b, 0x44,
- /* (2^ 70)P */ 0x15, 0x23, 0x17, 0xf2, 0xa7, 0xa3, 0x92, 0xce, 0x64, 0x99, 0x1b, 0xe1, 0x2d, 0x28, 0xdc, 0x1e, 0x4a, 0x31, 0x4c, 0xe0, 0xaf, 0x3a, 0x82, 0xa1, 0x86, 0xf5, 0x7c, 0x43, 0x94, 0x2d, 0x0a, 0x79,
- /* (2^ 71)P */ 0x09, 0xe0, 0xf6, 0x93, 0xfb, 0x47, 0xc4, 0x71, 0x76, 0x52, 0x84, 0x22, 0x67, 0xa5, 0x22, 0x89, 0x69, 0x51, 0x4f, 0x20, 0x3b, 0x90, 0x70, 0xbf, 0xfe, 0x19, 0xa3, 0x1b, 0x89, 0x89, 0x7a, 0x2f,
- /* (2^ 72)P */ 0x0c, 0x14, 0xe2, 0x77, 0xb5, 0x8e, 0xa0, 0x02, 0xf4, 0xdc, 0x7b, 0x42, 0xd4, 0x4e, 0x9a, 0xed, 0xd1, 0x3c, 0x32, 0xe4, 0x44, 0xec, 0x53, 0x52, 0x5b, 0x35, 0xe9, 0x14, 0x3c, 0x36, 0x88, 0x3e,
- /* (2^ 73)P */ 0x8c, 0x0b, 0x11, 0x77, 0x42, 0xc1, 0x66, 0xaa, 0x90, 0x33, 0xa2, 0x10, 0x16, 0x39, 0xe0, 0x1a, 0xa2, 0xc2, 0x3f, 0xc9, 0x12, 0xbd, 0x30, 0x20, 0xab, 0xc7, 0x55, 0x95, 0x57, 0x41, 0xe1, 0x3e,
- /* (2^ 74)P */ 0x41, 0x7d, 0x6e, 0x6d, 0x3a, 0xde, 0x14, 0x92, 0xfe, 0x7e, 0xf1, 0x07, 0x86, 0xd8, 0xcd, 0x3c, 0x17, 0x12, 0xe1, 0xf8, 0x88, 0x12, 0x4f, 0x67, 0xd0, 0x93, 0x9f, 0x32, 0x0f, 0x25, 0x82, 0x56,
- /* (2^ 75)P */ 0x6e, 0x39, 0x2e, 0x6d, 0x13, 0x0b, 0xf0, 0x6c, 0xbf, 0xde, 0x14, 0x10, 0x6f, 0xf8, 0x4c, 0x6e, 0x83, 0x4e, 0xcc, 0xbf, 0xb5, 0xb1, 0x30, 0x59, 0xb6, 0x16, 0xba, 0x8a, 0xb4, 0x69, 0x70, 0x04,
- /* (2^ 76)P */ 0x93, 0x07, 0xb2, 0x69, 0xab, 0xe4, 0x4c, 0x0d, 0x9e, 0xfb, 0xd0, 0x97, 0x1a, 0xb9, 0x4d, 0xb2, 0x1d, 0xd0, 0x00, 0x4e, 0xf5, 0x50, 0xfa, 0xcd, 0xb5, 0xdd, 0x8b, 0x36, 0x85, 0x10, 0x1b, 0x22,
- /* (2^ 77)P */ 0xd2, 0xd8, 0xe3, 0xb1, 0x68, 0x94, 0xe5, 0xe7, 0x93, 0x2f, 0x12, 0xbd, 0x63, 0x65, 0xc5, 0x53, 0x09, 0x3f, 0x66, 0xe0, 0x03, 0xa9, 0xe8, 0xee, 0x42, 0x3d, 0xbe, 0xcb, 0x62, 0xa6, 0xef, 0x61,
- /* (2^ 78)P */ 0x2a, 0xab, 0x6e, 0xde, 0xdd, 0xdd, 0xf8, 0x2c, 0x31, 0xf2, 0x35, 0x14, 0xd5, 0x0a, 0xf8, 0x9b, 0x73, 0x49, 0xf0, 0xc9, 0xce, 0xda, 0xea, 0x5d, 0x27, 0x9b, 0xd2, 0x41, 0x5d, 0x5b, 0x27, 0x29,
- /* (2^ 79)P */ 0x4f, 0xf1, 0xeb, 0x95, 0x08, 0x0f, 0xde, 0xcf, 0xa7, 0x05, 0x49, 0x05, 0x6b, 0xb9, 0xaa, 0xb9, 0xfd, 0x20, 0xc4, 0xa1, 0xd9, 0x0d, 0xe8, 0xca, 0xc7, 0xbb, 0x73, 0x16, 0x2f, 0xbf, 0x63, 0x0a,
- /* (2^ 80)P */ 0x8c, 0xbc, 0x8f, 0x95, 0x11, 0x6e, 0x2f, 0x09, 0xad, 0x2f, 0x82, 0x04, 0xe8, 0x81, 0x2a, 0x67, 0x17, 0x25, 0xd5, 0x60, 0x15, 0x35, 0xc8, 0xca, 0xf8, 0x92, 0xf1, 0xc8, 0x22, 0x77, 0x3f, 0x6f,
- /* (2^ 81)P */ 0xb7, 0x94, 0xe8, 0xc2, 0xcc, 0x90, 0xba, 0xf8, 0x0d, 0x9f, 0xff, 0x38, 0xa4, 0x57, 0x75, 0x2c, 0x59, 0x23, 0xe5, 0x5a, 0x85, 0x1d, 0x4d, 0x89, 0x69, 0x3d, 0x74, 0x7b, 0x15, 0x22, 0xe1, 0x68,
- /* (2^ 82)P */ 0xf3, 0x19, 0xb9, 0xcf, 0x70, 0x55, 0x7e, 0xd8, 0xb9, 0x8d, 0x79, 0x95, 0xcd, 0xde, 0x2c, 0x3f, 0xce, 0xa2, 0xc0, 0x10, 0x47, 0x15, 0x21, 0x21, 0xb2, 0xc5, 0x6d, 0x24, 0x15, 0xa1, 0x66, 0x3c,
- /* (2^ 83)P */ 0x72, 0xcb, 0x4e, 0x29, 0x62, 0xc5, 0xed, 0xcb, 0x16, 0x0b, 0x28, 0x6a, 0xc3, 0x43, 0x71, 0xba, 0x67, 0x8b, 0x07, 0xd4, 0xef, 0xc2, 0x10, 0x96, 0x1e, 0x4b, 0x6a, 0x94, 0x5d, 0x73, 0x44, 0x61,
- /* (2^ 84)P */ 0x50, 0x33, 0x5b, 0xd7, 0x1e, 0x11, 0x6f, 0x53, 0x1b, 0xd8, 0x41, 0x20, 0x8c, 0xdb, 0x11, 0x02, 0x3c, 0x41, 0x10, 0x0e, 0x00, 0xb1, 0x3c, 0xf9, 0x76, 0x88, 0x9e, 0x03, 0x3c, 0xfd, 0x9d, 0x14,
- /* (2^ 85)P */ 0x5b, 0x15, 0x63, 0x6b, 0xe4, 0xdd, 0x79, 0xd4, 0x76, 0x79, 0x83, 0x3c, 0xe9, 0x15, 0x6e, 0xb6, 0x38, 0xe0, 0x13, 0x1f, 0x3b, 0xe4, 0xfd, 0xda, 0x35, 0x0b, 0x4b, 0x2e, 0x1a, 0xda, 0xaf, 0x5f,
- /* (2^ 86)P */ 0x81, 0x75, 0x19, 0x17, 0xdf, 0xbb, 0x00, 0x36, 0xc2, 0xd2, 0x3c, 0xbe, 0x0b, 0x05, 0x72, 0x39, 0x86, 0xbe, 0xd5, 0xbd, 0x6d, 0x90, 0x38, 0x59, 0x0f, 0x86, 0x9b, 0x3f, 0xe4, 0xe5, 0xfc, 0x34,
- /* (2^ 87)P */ 0x02, 0x4d, 0xd1, 0x42, 0xcd, 0xa4, 0xa8, 0x75, 0x65, 0xdf, 0x41, 0x34, 0xc5, 0xab, 0x8d, 0x82, 0xd3, 0x31, 0xe1, 0xd2, 0xed, 0xab, 0xdc, 0x33, 0x5f, 0xd2, 0x14, 0xb8, 0x6f, 0xd7, 0xba, 0x3e,
- /* (2^ 88)P */ 0x0f, 0xe1, 0x70, 0x6f, 0x56, 0x6f, 0x90, 0xd4, 0x5a, 0x0f, 0x69, 0x51, 0xaa, 0xf7, 0x12, 0x5d, 0xf2, 0xfc, 0xce, 0x76, 0x6e, 0xb1, 0xad, 0x45, 0x99, 0x29, 0x23, 0xad, 0xae, 0x68, 0xf7, 0x01,
- /* (2^ 89)P */ 0xbd, 0xfe, 0x48, 0x62, 0x7b, 0xc7, 0x6c, 0x2b, 0xfd, 0xaf, 0x3a, 0xec, 0x28, 0x06, 0xd3, 0x3c, 0x6a, 0x48, 0xef, 0xd4, 0x80, 0x0b, 0x1c, 0xce, 0x23, 0x6c, 0xf6, 0xa6, 0x2e, 0xff, 0x3b, 0x4c,
- /* (2^ 90)P */ 0x5f, 0xeb, 0xea, 0x4a, 0x09, 0xc4, 0x2e, 0x3f, 0xa7, 0x2c, 0x37, 0x6e, 0x28, 0x9b, 0xb1, 0x61, 0x1d, 0x70, 0x2a, 0xde, 0x66, 0xa9, 0xef, 0x5e, 0xef, 0xe3, 0x55, 0xde, 0x65, 0x05, 0xb2, 0x23,
- /* (2^ 91)P */ 0x57, 0x85, 0xd5, 0x79, 0x52, 0xca, 0x01, 0xe3, 0x4f, 0x87, 0xc2, 0x27, 0xce, 0xd4, 0xb2, 0x07, 0x67, 0x1d, 0xcf, 0x9d, 0x8a, 0xcd, 0x32, 0xa5, 0x56, 0xff, 0x2b, 0x3f, 0xe2, 0xfe, 0x52, 0x2a,
- /* (2^ 92)P */ 0x3d, 0x66, 0xd8, 0x7c, 0xb3, 0xef, 0x24, 0x86, 0x94, 0x75, 0xbd, 0xff, 0x20, 0xac, 0xc7, 0xbb, 0x45, 0x74, 0xd3, 0x82, 0x9c, 0x5e, 0xb8, 0x57, 0x66, 0xec, 0xa6, 0x86, 0xcb, 0x52, 0x30, 0x7b,
- /* (2^ 93)P */ 0x1e, 0xe9, 0x25, 0x25, 0xad, 0xf0, 0x82, 0x34, 0xa0, 0xdc, 0x8e, 0xd2, 0x43, 0x80, 0xb6, 0x2c, 0x3a, 0x00, 0x1b, 0x2e, 0x05, 0x6d, 0x4f, 0xaf, 0x0a, 0x1b, 0x78, 0x29, 0x25, 0x8c, 0x5f, 0x18,
- /* (2^ 94)P */ 0xd6, 0xe0, 0x0c, 0xd8, 0x5b, 0xde, 0x41, 0xaa, 0xd6, 0xe9, 0x53, 0x68, 0x41, 0xb2, 0x07, 0x94, 0x3a, 0x4c, 0x7f, 0x35, 0x6e, 0xc3, 0x3e, 0x56, 0xce, 0x7b, 0x29, 0x0e, 0xdd, 0xb8, 0xc4, 0x4c,
- /* (2^ 95)P */ 0x0e, 0x73, 0xb8, 0xff, 0x52, 0x1a, 0xfc, 0xa2, 0x37, 0x8e, 0x05, 0x67, 0x6e, 0xf1, 0x11, 0x18, 0xe1, 0x4e, 0xdf, 0xcd, 0x66, 0xa3, 0xf9, 0x10, 0x99, 0xf0, 0xb9, 0xa0, 0xc4, 0xa0, 0xf4, 0x72,
- /* (2^ 96)P */ 0xa7, 0x4e, 0x3f, 0x66, 0x6f, 0xc0, 0x16, 0x8c, 0xba, 0x0f, 0x97, 0x4e, 0xf7, 0x3a, 0x3b, 0x69, 0x45, 0xc3, 0x9e, 0xd6, 0xf1, 0xe7, 0x02, 0x21, 0x89, 0x80, 0x8a, 0x96, 0xbc, 0x3c, 0xa5, 0x0b,
- /* (2^ 97)P */ 0x37, 0x55, 0xa1, 0xfe, 0xc7, 0x9d, 0x3d, 0xca, 0x93, 0x64, 0x53, 0x51, 0xbb, 0x24, 0x68, 0x4c, 0xb1, 0x06, 0x40, 0x84, 0x14, 0x63, 0x88, 0xb9, 0x60, 0xcc, 0x54, 0xb4, 0x2a, 0xa7, 0xd2, 0x40,
- /* (2^ 98)P */ 0x75, 0x09, 0x57, 0x12, 0xb7, 0xa1, 0x36, 0x59, 0x57, 0xa6, 0xbd, 0xde, 0x48, 0xd6, 0xb9, 0x91, 0xea, 0x30, 0x43, 0xb6, 0x4b, 0x09, 0x44, 0x33, 0xd0, 0x51, 0xee, 0x12, 0x0d, 0xa1, 0x6b, 0x00,
- /* (2^ 99)P */ 0x58, 0x5d, 0xde, 0xf5, 0x68, 0x84, 0x22, 0x19, 0xb0, 0x05, 0xcc, 0x38, 0x4c, 0x2f, 0xb1, 0x0e, 0x90, 0x19, 0x60, 0xd5, 0x9d, 0x9f, 0x03, 0xa1, 0x0b, 0x0e, 0xff, 0x4f, 0xce, 0xd4, 0x02, 0x45,
- /* (2^100)P */ 0x89, 0xc1, 0x37, 0x68, 0x10, 0x54, 0x20, 0xeb, 0x3c, 0xb9, 0xd3, 0x6d, 0x4c, 0x54, 0xf6, 0xd0, 0x4f, 0xd7, 0x16, 0xc4, 0x64, 0x70, 0x72, 0x40, 0xf0, 0x2e, 0x50, 0x4b, 0x11, 0xc6, 0x15, 0x6e,
- /* (2^101)P */ 0x6b, 0xa7, 0xb1, 0xcf, 0x98, 0xa3, 0xf2, 0x4d, 0xb1, 0xf6, 0xf2, 0x19, 0x74, 0x6c, 0x25, 0x11, 0x43, 0x60, 0x6e, 0x06, 0x62, 0x79, 0x49, 0x4a, 0x44, 0x5b, 0x35, 0x41, 0xab, 0x3a, 0x5b, 0x70,
- /* (2^102)P */ 0xd8, 0xb1, 0x97, 0xd7, 0x36, 0xf5, 0x5e, 0x36, 0xdb, 0xf0, 0xdd, 0x22, 0xd6, 0x6b, 0x07, 0x00, 0x88, 0x5a, 0x57, 0xe0, 0xb0, 0x33, 0xbf, 0x3b, 0x4d, 0xca, 0xe4, 0xc8, 0x05, 0xaa, 0x77, 0x37,
- /* (2^103)P */ 0x5f, 0xdb, 0x78, 0x55, 0xc8, 0x45, 0x27, 0x39, 0xe2, 0x5a, 0xae, 0xdb, 0x49, 0x41, 0xda, 0x6f, 0x67, 0x98, 0xdc, 0x8a, 0x0b, 0xb0, 0xf0, 0xb1, 0xa3, 0x1d, 0x6f, 0xd3, 0x37, 0x34, 0x96, 0x09,
- /* (2^104)P */ 0x53, 0x38, 0xdc, 0xa5, 0x90, 0x4e, 0x82, 0x7e, 0xbd, 0x5c, 0x13, 0x1f, 0x64, 0xf6, 0xb5, 0xcc, 0xcc, 0x8f, 0xce, 0x87, 0x6c, 0xd8, 0x36, 0x67, 0x9f, 0x24, 0x04, 0x66, 0xe2, 0x3c, 0x5f, 0x62,
- /* (2^105)P */ 0x3f, 0xf6, 0x02, 0x95, 0x05, 0xc8, 0x8a, 0xaf, 0x69, 0x14, 0x35, 0x2e, 0x0a, 0xe7, 0x05, 0x0c, 0x05, 0x63, 0x4b, 0x76, 0x9c, 0x2e, 0x29, 0x35, 0xc3, 0x3a, 0xe2, 0xc7, 0x60, 0x43, 0x39, 0x1a,
- /* (2^106)P */ 0x64, 0x32, 0x18, 0x51, 0x32, 0xd5, 0xc6, 0xd5, 0x4f, 0xb7, 0xc2, 0x43, 0xbd, 0x5a, 0x06, 0x62, 0x9b, 0x3f, 0x97, 0x3b, 0xd0, 0xf5, 0xfb, 0xb5, 0x5e, 0x6e, 0x20, 0x61, 0x36, 0xda, 0xa3, 0x13,
- /* (2^107)P */ 0xe5, 0x94, 0x5d, 0x72, 0x37, 0x58, 0xbd, 0xc6, 0xc5, 0x16, 0x50, 0x20, 0x12, 0x09, 0xe3, 0x18, 0x68, 0x3c, 0x03, 0x70, 0x15, 0xce, 0x88, 0x20, 0x87, 0x79, 0x83, 0x5c, 0x49, 0x1f, 0xba, 0x7f,
- /* (2^108)P */ 0x9d, 0x07, 0xf9, 0xf2, 0x23, 0x74, 0x8c, 0x5a, 0xc5, 0x3f, 0x02, 0x34, 0x7b, 0x15, 0x35, 0x17, 0x51, 0xb3, 0xfa, 0xd2, 0x9a, 0xb4, 0xf9, 0xe4, 0x3c, 0xe3, 0x78, 0xc8, 0x72, 0xff, 0x91, 0x66,
- /* (2^109)P */ 0x3e, 0xff, 0x5e, 0xdc, 0xde, 0x2a, 0x2c, 0x12, 0xf4, 0x6c, 0x95, 0xd8, 0xf1, 0x4b, 0xdd, 0xf8, 0xda, 0x5b, 0x9e, 0x9e, 0x5d, 0x20, 0x86, 0xeb, 0x43, 0xc7, 0x75, 0xd9, 0xb9, 0x92, 0x9b, 0x04,
- /* (2^110)P */ 0x5a, 0xc0, 0xf6, 0xb0, 0x30, 0x97, 0x37, 0xa5, 0x53, 0xa5, 0xf3, 0xc6, 0xac, 0xff, 0xa0, 0x72, 0x6d, 0xcd, 0x0d, 0xb2, 0x34, 0x2c, 0x03, 0xb0, 0x4a, 0x16, 0xd5, 0x88, 0xbc, 0x9d, 0x0e, 0x47,
- /* (2^111)P */ 0x47, 0xc0, 0x37, 0xa2, 0x0c, 0xf1, 0x9c, 0xb1, 0xa2, 0x81, 0x6c, 0x1f, 0x71, 0x66, 0x54, 0xb6, 0x43, 0x0b, 0xd8, 0x6d, 0xd1, 0x1b, 0x32, 0xb3, 0x8e, 0xbe, 0x5f, 0x0c, 0x60, 0x4f, 0xc1, 0x48,
- /* (2^112)P */ 0x03, 0xc8, 0xa6, 0x4a, 0x26, 0x1c, 0x45, 0x66, 0xa6, 0x7d, 0xfa, 0xa4, 0x04, 0x39, 0x6e, 0xb6, 0x95, 0x83, 0x12, 0xb3, 0xb0, 0x19, 0x5f, 0xd4, 0x10, 0xbc, 0xc9, 0xc3, 0x27, 0x26, 0x60, 0x31,
- /* (2^113)P */ 0x0d, 0xe1, 0xe4, 0x32, 0x48, 0xdc, 0x20, 0x31, 0xf7, 0x17, 0xc7, 0x56, 0x67, 0xc4, 0x20, 0xeb, 0x94, 0x02, 0x28, 0x67, 0x3f, 0x2e, 0xf5, 0x00, 0x09, 0xc5, 0x30, 0x47, 0xc1, 0x4f, 0x6d, 0x56,
- /* (2^114)P */ 0x06, 0x72, 0x83, 0xfd, 0x40, 0x5d, 0x3a, 0x7e, 0x7a, 0x54, 0x59, 0x71, 0xdc, 0x26, 0xe9, 0xc1, 0x95, 0x60, 0x8d, 0xa6, 0xfb, 0x30, 0x67, 0x21, 0xa7, 0xce, 0x69, 0x3f, 0x84, 0xc3, 0xe8, 0x22,
- /* (2^115)P */ 0x2b, 0x4b, 0x0e, 0x93, 0xe8, 0x74, 0xd0, 0x33, 0x16, 0x58, 0xd1, 0x84, 0x0e, 0x35, 0xe4, 0xb6, 0x65, 0x23, 0xba, 0xd6, 0x6a, 0xc2, 0x34, 0x55, 0xf3, 0xf3, 0xf1, 0x89, 0x2f, 0xc1, 0x73, 0x77,
- /* (2^116)P */ 0xaa, 0x62, 0x79, 0xa5, 0x4d, 0x40, 0xba, 0x8c, 0x56, 0xce, 0x99, 0x19, 0xa8, 0x97, 0x98, 0x5b, 0xfc, 0x92, 0x16, 0x12, 0x2f, 0x86, 0x8e, 0x50, 0x91, 0xc2, 0x93, 0xa0, 0x7f, 0x90, 0x81, 0x3a,
- /* (2^117)P */ 0x10, 0xa5, 0x25, 0x47, 0xff, 0xd0, 0xde, 0x0d, 0x03, 0xc5, 0x3f, 0x67, 0x10, 0xcc, 0xd8, 0x10, 0x89, 0x4e, 0x1f, 0x9f, 0x1c, 0x15, 0x9d, 0x5b, 0x4c, 0xa4, 0x09, 0xcb, 0xd5, 0xc1, 0xa5, 0x32,
- /* (2^118)P */ 0xfb, 0x41, 0x05, 0xb9, 0x42, 0xa4, 0x0a, 0x1e, 0xdb, 0x85, 0xb4, 0xc1, 0x7c, 0xeb, 0x85, 0x5f, 0xe5, 0xf2, 0x9d, 0x8a, 0xce, 0x95, 0xe5, 0xbe, 0x36, 0x22, 0x42, 0x22, 0xc7, 0x96, 0xe4, 0x25,
- /* (2^119)P */ 0xb9, 0xe5, 0x0f, 0xcd, 0x46, 0x3c, 0xdf, 0x5e, 0x88, 0x33, 0xa4, 0xd2, 0x7e, 0x5a, 0xe7, 0x34, 0x52, 0xe3, 0x61, 0xd7, 0x11, 0xde, 0x88, 0xe4, 0x5c, 0x54, 0x85, 0xa0, 0x01, 0x8a, 0x87, 0x0e,
- /* (2^120)P */ 0x04, 0xbb, 0x21, 0xe0, 0x77, 0x3c, 0x49, 0xba, 0x9a, 0x89, 0xdf, 0xc7, 0x43, 0x18, 0x4d, 0x2b, 0x67, 0x0d, 0xe8, 0x7a, 0x48, 0x7a, 0xa3, 0x9e, 0x94, 0x17, 0xe4, 0x11, 0x80, 0x95, 0xa9, 0x67,
- /* (2^121)P */ 0x65, 0xb0, 0x97, 0x66, 0x1a, 0x05, 0x58, 0x4b, 0xd4, 0xa6, 0x6b, 0x8d, 0x7d, 0x3f, 0xe3, 0x47, 0xc1, 0x46, 0xca, 0x83, 0xd4, 0xa8, 0x4d, 0xbb, 0x0d, 0xdb, 0xc2, 0x81, 0xa1, 0xca, 0xbe, 0x68,
- /* (2^122)P */ 0xa5, 0x9a, 0x98, 0x0b, 0xe9, 0x80, 0x89, 0x8d, 0x9b, 0xc9, 0x93, 0x2c, 0x4a, 0xb1, 0x5e, 0xf9, 0xa2, 0x73, 0x6e, 0x79, 0xc4, 0xc7, 0xc6, 0x51, 0x69, 0xb5, 0xef, 0xb5, 0x63, 0x83, 0x22, 0x6e,
- /* (2^123)P */ 0xc8, 0x24, 0xd6, 0x2d, 0xb0, 0xc0, 0xbb, 0xc6, 0xee, 0x70, 0x81, 0xec, 0x7d, 0xb4, 0x7e, 0x77, 0xa9, 0xaf, 0xcf, 0x04, 0xa0, 0x15, 0xde, 0x3c, 0x9b, 0xbf, 0x60, 0x71, 0x08, 0xbc, 0xc6, 0x1d,
- /* (2^124)P */ 0x02, 0x40, 0xc3, 0xee, 0x43, 0xe0, 0x07, 0x2e, 0x7f, 0xdc, 0x68, 0x7a, 0x67, 0xfc, 0xe9, 0x18, 0x9a, 0x5b, 0xd1, 0x8b, 0x18, 0x03, 0xda, 0xd8, 0x53, 0x82, 0x56, 0x00, 0xbb, 0xc3, 0xfb, 0x48,
- /* (2^125)P */ 0xe1, 0x4c, 0x65, 0xfb, 0x4c, 0x7d, 0x54, 0x57, 0xad, 0xe2, 0x58, 0xa0, 0x82, 0x5b, 0x56, 0xd3, 0x78, 0x44, 0x15, 0xbf, 0x0b, 0xaf, 0x3e, 0xf6, 0x18, 0xbb, 0xdf, 0x14, 0xf1, 0x1e, 0x53, 0x47,
- /* (2^126)P */ 0x87, 0xc5, 0x78, 0x42, 0x0a, 0x63, 0xec, 0xe1, 0xf3, 0x83, 0x8e, 0xca, 0x46, 0xd5, 0x07, 0x55, 0x2b, 0x0c, 0xdc, 0x3a, 0xc6, 0x35, 0xe1, 0x85, 0x4e, 0x84, 0x82, 0x56, 0xa8, 0xef, 0xa7, 0x0a,
- /* (2^127)P */ 0x15, 0xf6, 0xe1, 0xb3, 0xa8, 0x1b, 0x69, 0x72, 0xfa, 0x3f, 0xbe, 0x1f, 0x70, 0xe9, 0xb4, 0x32, 0x68, 0x78, 0xbb, 0x39, 0x2e, 0xd9, 0xb6, 0x97, 0xe8, 0x39, 0x2e, 0xa0, 0xde, 0x53, 0xfe, 0x2c,
- /* (2^128)P */ 0xb0, 0x52, 0xcd, 0x85, 0xcd, 0x92, 0x73, 0x68, 0x31, 0x98, 0xe2, 0x10, 0xc9, 0x66, 0xff, 0x27, 0x06, 0x2d, 0x83, 0xa9, 0x56, 0x45, 0x13, 0x97, 0xa0, 0xf8, 0x84, 0x0a, 0x36, 0xb0, 0x9b, 0x26,
- /* (2^129)P */ 0x5c, 0xf8, 0x43, 0x76, 0x45, 0x55, 0x6e, 0x70, 0x1b, 0x7d, 0x59, 0x9b, 0x8c, 0xa4, 0x34, 0x37, 0x72, 0xa4, 0xef, 0xc6, 0xe8, 0x91, 0xee, 0x7a, 0xe0, 0xd9, 0xa9, 0x98, 0xc1, 0xab, 0xd6, 0x5c,
- /* (2^130)P */ 0x1a, 0xe4, 0x3c, 0xcb, 0x06, 0xde, 0x04, 0x0e, 0x38, 0xe1, 0x02, 0x34, 0x89, 0xeb, 0xc6, 0xd8, 0x72, 0x37, 0x6e, 0x68, 0xbb, 0x59, 0x46, 0x90, 0xc8, 0xa8, 0x6b, 0x74, 0x71, 0xc3, 0x15, 0x72,
- /* (2^131)P */ 0xd9, 0xa2, 0xe4, 0xea, 0x7e, 0xa9, 0x12, 0xfd, 0xc5, 0xf2, 0x94, 0x63, 0x51, 0xb7, 0x14, 0x95, 0x94, 0xf2, 0x08, 0x92, 0x80, 0xd5, 0x6f, 0x26, 0xb9, 0x26, 0x9a, 0x61, 0x85, 0x70, 0x84, 0x5c,
- /* (2^132)P */ 0xea, 0x94, 0xd6, 0xfe, 0x10, 0x54, 0x98, 0x52, 0x54, 0xd2, 0x2e, 0x4a, 0x93, 0x5b, 0x90, 0x3c, 0x67, 0xe4, 0x3b, 0x2d, 0x69, 0x47, 0xbb, 0x10, 0xe1, 0xe9, 0xe5, 0x69, 0x2d, 0x3d, 0x3b, 0x06,
- /* (2^133)P */ 0xeb, 0x7d, 0xa5, 0xdd, 0xee, 0x26, 0x27, 0x47, 0x91, 0x18, 0xf4, 0x10, 0xae, 0xc4, 0xb6, 0xef, 0x14, 0x76, 0x30, 0x7b, 0x91, 0x41, 0x16, 0x2b, 0x7c, 0x5b, 0xf4, 0xc4, 0x4f, 0x55, 0x7c, 0x11,
- /* (2^134)P */ 0x12, 0x88, 0x9d, 0x8f, 0x11, 0xf3, 0x7c, 0xc0, 0x39, 0x79, 0x01, 0x50, 0x20, 0xd8, 0xdb, 0x01, 0x27, 0x28, 0x1b, 0x17, 0xf4, 0x03, 0xe8, 0xd7, 0xea, 0x25, 0xd2, 0x87, 0x74, 0xe8, 0x15, 0x10,
- /* (2^135)P */ 0x4d, 0xcc, 0x3a, 0xd2, 0xfe, 0xe3, 0x8d, 0xc5, 0x2d, 0xbe, 0xa7, 0x94, 0xc2, 0x91, 0xdb, 0x50, 0x57, 0xf4, 0x9c, 0x1c, 0x3d, 0xd4, 0x94, 0x0b, 0x4a, 0x52, 0x37, 0x6e, 0xfa, 0x40, 0x16, 0x6b,
- /* (2^136)P */ 0x09, 0x0d, 0xda, 0x5f, 0x6c, 0x34, 0x2f, 0x69, 0x51, 0x31, 0x4d, 0xfa, 0x59, 0x1c, 0x0b, 0x20, 0x96, 0xa2, 0x77, 0x07, 0x76, 0x6f, 0xc4, 0xb8, 0xcf, 0xfb, 0xfd, 0x3f, 0x5f, 0x39, 0x38, 0x4b,
- /* (2^137)P */ 0x71, 0xd6, 0x54, 0xbe, 0x00, 0x5e, 0xd2, 0x18, 0xa6, 0xab, 0xc8, 0xbe, 0x82, 0x05, 0xd5, 0x60, 0x82, 0xb9, 0x78, 0x3b, 0x26, 0x8f, 0xad, 0x87, 0x32, 0x04, 0xda, 0x9c, 0x4e, 0xf6, 0xfd, 0x50,
- /* (2^138)P */ 0xf0, 0xdc, 0x78, 0xc5, 0xaa, 0x67, 0xf5, 0x90, 0x3b, 0x13, 0xa3, 0xf2, 0x0e, 0x9b, 0x1e, 0xef, 0x71, 0xde, 0xd9, 0x42, 0x92, 0xba, 0xeb, 0x0e, 0xc7, 0x01, 0x31, 0xf0, 0x9b, 0x3c, 0x47, 0x15,
- /* (2^139)P */ 0x95, 0x80, 0xb7, 0x56, 0xae, 0xe8, 0x77, 0x7c, 0x8e, 0x07, 0x6f, 0x6e, 0x66, 0xe7, 0x78, 0xb6, 0x1f, 0xba, 0x48, 0x53, 0x61, 0xb9, 0xa0, 0x2d, 0x0b, 0x3f, 0x73, 0xff, 0xc1, 0x31, 0xf9, 0x7c,
- /* (2^140)P */ 0x6c, 0x36, 0x0a, 0x0a, 0xf5, 0x57, 0xb3, 0x26, 0x32, 0xd7, 0x87, 0x2b, 0xf4, 0x8c, 0x70, 0xe9, 0xc0, 0xb2, 0x1c, 0xf9, 0xa5, 0xee, 0x3a, 0xc1, 0x4c, 0xbb, 0x43, 0x11, 0x99, 0x0c, 0xd9, 0x35,
- /* (2^141)P */ 0xdc, 0xd9, 0xa0, 0xa9, 0x04, 0xc4, 0xc1, 0x47, 0x51, 0xd2, 0x72, 0x19, 0x45, 0x58, 0x9e, 0x65, 0x31, 0x8c, 0xb3, 0x73, 0xc4, 0xa8, 0x75, 0x38, 0x24, 0x1f, 0x56, 0x79, 0xd3, 0x9e, 0xbd, 0x1f,
- /* (2^142)P */ 0x8d, 0xc2, 0x1e, 0xd4, 0x6f, 0xbc, 0xfa, 0x11, 0xca, 0x2d, 0x2a, 0xcd, 0xe3, 0xdf, 0xf8, 0x7e, 0x95, 0x45, 0x40, 0x8c, 0x5d, 0x3b, 0xe7, 0x72, 0x27, 0x2f, 0xb7, 0x54, 0x49, 0xfa, 0x35, 0x61,
- /* (2^143)P */ 0x9c, 0xb6, 0x24, 0xde, 0xa2, 0x32, 0xfc, 0xcc, 0x88, 0x5d, 0x09, 0x1f, 0x8c, 0x69, 0x55, 0x3f, 0x29, 0xf9, 0xc3, 0x5a, 0xed, 0x50, 0x33, 0xbe, 0xeb, 0x7e, 0x47, 0xca, 0x06, 0xf8, 0x9b, 0x5e,
- /* (2^144)P */ 0x68, 0x9f, 0x30, 0x3c, 0xb6, 0x8f, 0xce, 0xe9, 0xf4, 0xf9, 0xe1, 0x65, 0x35, 0xf6, 0x76, 0x53, 0xf1, 0x93, 0x63, 0x5a, 0xb3, 0xcf, 0xaf, 0xd1, 0x06, 0x35, 0x62, 0xe5, 0xed, 0xa1, 0x32, 0x66,
- /* (2^145)P */ 0x4c, 0xed, 0x2d, 0x0c, 0x39, 0x6c, 0x7d, 0x0b, 0x1f, 0xcb, 0x04, 0xdf, 0x81, 0x32, 0xcb, 0x56, 0xc7, 0xc3, 0xec, 0x49, 0x12, 0x5a, 0x30, 0x66, 0x2a, 0xa7, 0x8c, 0xa3, 0x60, 0x8b, 0x58, 0x5d,
- /* (2^146)P */ 0x2d, 0xf4, 0xe5, 0xe8, 0x78, 0xbf, 0xec, 0xa6, 0xec, 0x3e, 0x8a, 0x3c, 0x4b, 0xb4, 0xee, 0x86, 0x04, 0x16, 0xd2, 0xfb, 0x48, 0x9c, 0x21, 0xec, 0x31, 0x67, 0xc3, 0x17, 0xf5, 0x1a, 0xaf, 0x1a,
- /* (2^147)P */ 0xe7, 0xbd, 0x69, 0x67, 0x83, 0xa2, 0x06, 0xc3, 0xdb, 0x2a, 0x1e, 0x2b, 0x62, 0x80, 0x82, 0x20, 0xa6, 0x94, 0xff, 0xfb, 0x1f, 0xf5, 0x27, 0x80, 0x6b, 0xf2, 0x24, 0x11, 0xce, 0xa1, 0xcf, 0x76,
- /* (2^148)P */ 0xb6, 0xab, 0x22, 0x24, 0x56, 0x00, 0xeb, 0x18, 0xc3, 0x29, 0x8c, 0x8f, 0xd5, 0xc4, 0x77, 0xf3, 0x1a, 0x56, 0x31, 0xf5, 0x07, 0xc2, 0xbb, 0x4d, 0x27, 0x8a, 0x12, 0x82, 0xf0, 0xb7, 0x53, 0x02,
- /* (2^149)P */ 0xe0, 0x17, 0x2c, 0xb6, 0x1c, 0x09, 0x1f, 0x3d, 0xa9, 0x28, 0x46, 0xd6, 0xab, 0xe1, 0x60, 0x48, 0x53, 0x42, 0x9d, 0x30, 0x36, 0x74, 0xd1, 0x52, 0x76, 0xe5, 0xfa, 0x3e, 0xe1, 0x97, 0x6f, 0x35,
- /* (2^150)P */ 0x5b, 0x53, 0x50, 0xa1, 0x1a, 0xe1, 0x51, 0xd3, 0xcc, 0x78, 0xd8, 0x1d, 0xbb, 0x45, 0x6b, 0x3e, 0x98, 0x2c, 0xd9, 0xbe, 0x28, 0x61, 0x77, 0x0c, 0xb8, 0x85, 0x28, 0x03, 0x93, 0xae, 0x34, 0x1d,
- /* (2^151)P */ 0xc3, 0xa4, 0x5b, 0xa8, 0x8c, 0x48, 0xa0, 0x4b, 0xce, 0xe6, 0x9c, 0x3c, 0xc3, 0x48, 0x53, 0x98, 0x70, 0xa7, 0xbd, 0x97, 0x6f, 0x4c, 0x12, 0x66, 0x4a, 0x12, 0x54, 0x06, 0x29, 0xa0, 0x81, 0x0f,
- /* (2^152)P */ 0xfd, 0x86, 0x9b, 0x56, 0xa6, 0x9c, 0xd0, 0x9e, 0x2d, 0x9a, 0xaf, 0x18, 0xfd, 0x09, 0x10, 0x81, 0x0a, 0xc2, 0xd8, 0x93, 0x3f, 0xd0, 0x08, 0xff, 0x6b, 0xf2, 0xae, 0x9f, 0x19, 0x48, 0xa1, 0x52,
- /* (2^153)P */ 0x73, 0x1b, 0x8d, 0x2d, 0xdc, 0xf9, 0x03, 0x3e, 0x70, 0x1a, 0x96, 0x73, 0x18, 0x80, 0x05, 0x42, 0x70, 0x59, 0xa3, 0x41, 0xf0, 0x87, 0xd9, 0xc0, 0x49, 0xd5, 0xc0, 0xa1, 0x15, 0x1f, 0xaa, 0x07,
- /* (2^154)P */ 0x24, 0x72, 0xd2, 0x8c, 0xe0, 0x6c, 0xd4, 0xdf, 0x39, 0x42, 0x4e, 0x93, 0x4f, 0x02, 0x0a, 0x6d, 0x59, 0x7b, 0x89, 0x99, 0x63, 0x7a, 0x8a, 0x80, 0xa2, 0x95, 0x3d, 0xe1, 0xe9, 0x56, 0x45, 0x0a,
- /* (2^155)P */ 0x45, 0x30, 0xc1, 0xe9, 0x1f, 0x99, 0x1a, 0xd2, 0xb8, 0x51, 0x77, 0xfe, 0x48, 0x85, 0x0e, 0x9b, 0x35, 0x00, 0xf3, 0x4b, 0xcb, 0x43, 0xa6, 0x5d, 0x21, 0xf7, 0x40, 0x39, 0xd6, 0x28, 0xdb, 0x77,
- /* (2^156)P */ 0x11, 0x90, 0xdc, 0x4a, 0x61, 0xeb, 0x5e, 0xfc, 0xeb, 0x11, 0xc4, 0xe8, 0x9a, 0x41, 0x29, 0x52, 0x74, 0xcf, 0x1d, 0x7d, 0x78, 0xe7, 0xc3, 0x9e, 0xb5, 0x4c, 0x6e, 0x21, 0x3e, 0x05, 0x0d, 0x34,
- /* (2^157)P */ 0xb4, 0xf2, 0x8d, 0xb4, 0x39, 0xaf, 0xc7, 0xca, 0x94, 0x0a, 0xa1, 0x71, 0x28, 0xec, 0xfa, 0xc0, 0xed, 0x75, 0xa5, 0x5c, 0x24, 0x69, 0x0a, 0x14, 0x4c, 0x3a, 0x27, 0x34, 0x71, 0xc3, 0xf1, 0x0c,
- /* (2^158)P */ 0xa5, 0xb8, 0x24, 0xc2, 0x6a, 0x30, 0xee, 0xc8, 0xb0, 0x30, 0x49, 0xcb, 0x7c, 0xee, 0xea, 0x57, 0x4f, 0xe7, 0xcb, 0xaa, 0xbd, 0x06, 0xe8, 0xa1, 0x7d, 0x65, 0xeb, 0x2e, 0x74, 0x62, 0x9a, 0x7d,
- /* (2^159)P */ 0x30, 0x48, 0x6c, 0x54, 0xef, 0xb6, 0xb6, 0x9e, 0x2e, 0x6e, 0xb3, 0xdd, 0x1f, 0xca, 0x5c, 0x88, 0x05, 0x71, 0x0d, 0xef, 0x83, 0xf3, 0xb9, 0xe6, 0x12, 0x04, 0x2e, 0x9d, 0xef, 0x4f, 0x65, 0x58,
- /* (2^160)P */ 0x26, 0x8e, 0x0e, 0xbe, 0xff, 0xc4, 0x05, 0xa9, 0x6e, 0x81, 0x31, 0x9b, 0xdf, 0xe5, 0x2d, 0x94, 0xe1, 0x88, 0x2e, 0x80, 0x3f, 0x72, 0x7d, 0x49, 0x8d, 0x40, 0x2f, 0x60, 0xea, 0x4d, 0x68, 0x30,
- /* (2^161)P */ 0x34, 0xcb, 0xe6, 0xa3, 0x78, 0xa2, 0xe5, 0x21, 0xc4, 0x1d, 0x15, 0x5b, 0x6f, 0x6e, 0xfb, 0xae, 0x15, 0xca, 0x77, 0x9d, 0x04, 0x8e, 0x0b, 0xb3, 0x81, 0x89, 0xb9, 0x53, 0xcf, 0xc9, 0xc3, 0x28,
- /* (2^162)P */ 0x2a, 0xdd, 0x6c, 0x55, 0x21, 0xb7, 0x7f, 0x28, 0x74, 0x22, 0x02, 0x97, 0xa8, 0x7c, 0x31, 0x0d, 0x58, 0x32, 0x54, 0x3a, 0x42, 0xc7, 0x68, 0x74, 0x2f, 0x64, 0xb5, 0x4e, 0x46, 0x11, 0x7f, 0x4a,
- /* (2^163)P */ 0xa6, 0x3a, 0x19, 0x4d, 0x77, 0xa4, 0x37, 0xa2, 0xa1, 0x29, 0x21, 0xa9, 0x6e, 0x98, 0x65, 0xd8, 0x88, 0x1a, 0x7c, 0xf8, 0xec, 0x15, 0xc5, 0x24, 0xeb, 0xf5, 0x39, 0x5f, 0x57, 0x03, 0x40, 0x60,
- /* (2^164)P */ 0x27, 0x9b, 0x0a, 0x57, 0x89, 0xf1, 0xb9, 0x47, 0x78, 0x4b, 0x5e, 0x46, 0xde, 0xce, 0x98, 0x2b, 0x20, 0x5c, 0xb8, 0xdb, 0x51, 0xf5, 0x6d, 0x02, 0x01, 0x19, 0xe2, 0x47, 0x10, 0xd9, 0xfc, 0x74,
- /* (2^165)P */ 0xa3, 0xbf, 0xc1, 0x23, 0x0a, 0xa9, 0xe2, 0x13, 0xf6, 0x19, 0x85, 0x47, 0x4e, 0x07, 0xb0, 0x0c, 0x44, 0xcf, 0xf6, 0x3a, 0xbe, 0xcb, 0xf1, 0x5f, 0xbe, 0x2d, 0x81, 0xbe, 0x38, 0x54, 0xfe, 0x67,
- /* (2^166)P */ 0xb0, 0x05, 0x0f, 0xa4, 0x4f, 0xf6, 0x3c, 0xd1, 0x87, 0x37, 0x28, 0x32, 0x2f, 0xfb, 0x4d, 0x05, 0xea, 0x2a, 0x0d, 0x7f, 0x5b, 0x91, 0x73, 0x41, 0x4e, 0x0d, 0x61, 0x1f, 0x4f, 0x14, 0x2f, 0x48,
- /* (2^167)P */ 0x34, 0x82, 0x7f, 0xb4, 0x01, 0x02, 0x21, 0xf6, 0x90, 0xb9, 0x70, 0x9e, 0x92, 0xe1, 0x0a, 0x5d, 0x7c, 0x56, 0x49, 0xb0, 0x55, 0xf4, 0xd7, 0xdc, 0x01, 0x6f, 0x91, 0xf0, 0xf1, 0xd0, 0x93, 0x7e,
- /* (2^168)P */ 0xfa, 0xb4, 0x7d, 0x8a, 0xf1, 0xcb, 0x79, 0xdd, 0x2f, 0xc6, 0x74, 0x6f, 0xbf, 0x91, 0x83, 0xbe, 0xbd, 0x91, 0x82, 0x4b, 0xd1, 0x45, 0x71, 0x02, 0x05, 0x17, 0xbf, 0x2c, 0xea, 0x73, 0x5a, 0x58,
- /* (2^169)P */ 0xb2, 0x0d, 0x8a, 0x92, 0x3e, 0xa0, 0x5c, 0x48, 0xe7, 0x57, 0x28, 0x74, 0xa5, 0x01, 0xfc, 0x10, 0xa7, 0x51, 0xd5, 0xd6, 0xdb, 0x2e, 0x48, 0x2f, 0x8a, 0xdb, 0x8f, 0x04, 0xb5, 0x33, 0x04, 0x0f,
- /* (2^170)P */ 0x47, 0x62, 0xdc, 0xd7, 0x8d, 0x2e, 0xda, 0x60, 0x9a, 0x81, 0xd4, 0x8c, 0xd3, 0xc9, 0xb4, 0x88, 0x97, 0x66, 0xf6, 0x01, 0xc0, 0x3a, 0x03, 0x13, 0x75, 0x7d, 0x36, 0x3b, 0xfe, 0x24, 0x3b, 0x27,
- /* (2^171)P */ 0xd4, 0xb9, 0xb3, 0x31, 0x6a, 0xf6, 0xe8, 0xc6, 0xd5, 0x49, 0xdf, 0x94, 0xa4, 0x14, 0x15, 0x28, 0xa7, 0x3d, 0xb2, 0xc8, 0xdf, 0x6f, 0x72, 0xd1, 0x48, 0xe5, 0xde, 0x03, 0xd1, 0xe7, 0x3a, 0x4b,
- /* (2^172)P */ 0x7e, 0x9d, 0x4b, 0xce, 0x19, 0x6e, 0x25, 0xc6, 0x1c, 0xc6, 0xe3, 0x86, 0xf1, 0x5c, 0x5c, 0xff, 0x45, 0xc1, 0x8e, 0x4b, 0xa3, 0x3c, 0xc6, 0xac, 0x74, 0x65, 0xe6, 0xfe, 0x88, 0x18, 0x62, 0x74,
- /* (2^173)P */ 0x1e, 0x0a, 0x29, 0x45, 0x96, 0x40, 0x6f, 0x95, 0x2e, 0x96, 0x3a, 0x26, 0xe3, 0xf8, 0x0b, 0xef, 0x7b, 0x64, 0xc2, 0x5e, 0xeb, 0x50, 0x6a, 0xed, 0x02, 0x75, 0xca, 0x9d, 0x3a, 0x28, 0x94, 0x06,
- /* (2^174)P */ 0xd1, 0xdc, 0xa2, 0x43, 0x36, 0x96, 0x9b, 0x76, 0x53, 0x53, 0xfc, 0x09, 0xea, 0xc8, 0xb7, 0x42, 0xab, 0x7e, 0x39, 0x13, 0xee, 0x2a, 0x00, 0x4f, 0x3a, 0xd6, 0xb7, 0x19, 0x2c, 0x5e, 0x00, 0x63,
- /* (2^175)P */ 0xea, 0x3b, 0x02, 0x63, 0xda, 0x36, 0x67, 0xca, 0xb7, 0x99, 0x2a, 0xb1, 0x6d, 0x7f, 0x6c, 0x96, 0xe1, 0xc5, 0x37, 0xc5, 0x90, 0x93, 0xe0, 0xac, 0xee, 0x89, 0xaa, 0xa1, 0x63, 0x60, 0x69, 0x0b,
- /* (2^176)P */ 0xe5, 0x56, 0x8c, 0x28, 0x97, 0x3e, 0xb0, 0xeb, 0xe8, 0x8b, 0x8c, 0x93, 0x9f, 0x9f, 0x2a, 0x43, 0x71, 0x7f, 0x71, 0x5b, 0x3d, 0xa9, 0xa5, 0xa6, 0x97, 0x9d, 0x8f, 0xe1, 0xc3, 0xb4, 0x5f, 0x1a,
- /* (2^177)P */ 0xce, 0xcd, 0x60, 0x1c, 0xad, 0xe7, 0x94, 0x1c, 0xa0, 0xc4, 0x02, 0xfc, 0x43, 0x2a, 0x20, 0xee, 0x20, 0x6a, 0xc4, 0x67, 0xd8, 0xe4, 0xaf, 0x8d, 0x58, 0x7b, 0xc2, 0x8a, 0x3c, 0x26, 0x10, 0x0a,
- /* (2^178)P */ 0x4a, 0x2a, 0x43, 0xe4, 0xdf, 0xa9, 0xde, 0xd0, 0xc5, 0x77, 0x92, 0xbe, 0x7b, 0xf8, 0x6a, 0x85, 0x1a, 0xc7, 0x12, 0xc2, 0xac, 0x72, 0x84, 0xce, 0x91, 0x1e, 0xbb, 0x9b, 0x6d, 0x1b, 0x15, 0x6f,
- /* (2^179)P */ 0x6a, 0xd5, 0xee, 0x7c, 0x52, 0x6c, 0x77, 0x26, 0xec, 0xfa, 0xf8, 0xfb, 0xb7, 0x1c, 0x21, 0x7d, 0xcc, 0x09, 0x46, 0xfd, 0xa6, 0x66, 0xae, 0x37, 0x42, 0x0c, 0x77, 0xd2, 0x02, 0xb7, 0x81, 0x1f,
- /* (2^180)P */ 0x92, 0x83, 0xc5, 0xea, 0x57, 0xb0, 0xb0, 0x2f, 0x9d, 0x4e, 0x74, 0x29, 0xfe, 0x89, 0xdd, 0xe1, 0xf8, 0xb4, 0xbe, 0x17, 0xeb, 0xf8, 0x64, 0xc9, 0x1e, 0xd4, 0xa2, 0xc9, 0x73, 0x10, 0x57, 0x29,
- /* (2^181)P */ 0x54, 0xe2, 0xc0, 0x81, 0x89, 0xa1, 0x48, 0xa9, 0x30, 0x28, 0xb2, 0x65, 0x9b, 0x36, 0xf6, 0x2d, 0xc6, 0xd3, 0xcf, 0x5f, 0xd7, 0xb2, 0x3e, 0xa3, 0x1f, 0xa0, 0x99, 0x41, 0xec, 0xd6, 0x8c, 0x07,
- /* (2^182)P */ 0x2f, 0x0d, 0x90, 0xad, 0x41, 0x4a, 0x58, 0x4a, 0x52, 0x4c, 0xc7, 0xe2, 0x78, 0x2b, 0x14, 0x32, 0x78, 0xc9, 0x31, 0x84, 0x33, 0xe8, 0xc4, 0x68, 0xc2, 0x9f, 0x68, 0x08, 0x90, 0xea, 0x69, 0x7f,
- /* (2^183)P */ 0x65, 0x82, 0xa3, 0x46, 0x1e, 0xc8, 0xf2, 0x52, 0xfd, 0x32, 0xa8, 0x04, 0x2d, 0x07, 0x78, 0xfd, 0x94, 0x9e, 0x35, 0x25, 0xfa, 0xd5, 0xd7, 0x8c, 0xd2, 0x29, 0xcc, 0x54, 0x74, 0x1b, 0xe7, 0x4d,
- /* (2^184)P */ 0xc9, 0x6a, 0xda, 0x1e, 0xad, 0x60, 0xeb, 0x42, 0x3a, 0x9c, 0xc0, 0xdb, 0xdf, 0x37, 0xad, 0x0a, 0x91, 0xc1, 0x3c, 0xe3, 0x71, 0x4b, 0x00, 0x81, 0x3c, 0x80, 0x22, 0x51, 0x34, 0xbe, 0xe6, 0x44,
- /* (2^185)P */ 0xdb, 0x20, 0x19, 0xba, 0x88, 0x83, 0xfe, 0x03, 0x08, 0xb0, 0x0d, 0x15, 0x32, 0x7c, 0xd5, 0xf5, 0x29, 0x0c, 0xf6, 0x1a, 0x28, 0xc4, 0xc8, 0x49, 0xee, 0x1a, 0x70, 0xde, 0x18, 0xb5, 0xed, 0x21,
- /* (2^186)P */ 0x99, 0xdc, 0x06, 0x8f, 0x41, 0x3e, 0xb6, 0x7f, 0xb8, 0xd7, 0x66, 0xc1, 0x99, 0x0d, 0x46, 0xa4, 0x83, 0x0a, 0x52, 0xce, 0x48, 0x52, 0xdd, 0x24, 0x58, 0x83, 0x92, 0x2b, 0x71, 0xad, 0xc3, 0x5e,
- /* (2^187)P */ 0x0f, 0x93, 0x17, 0xbd, 0x5f, 0x2a, 0x02, 0x15, 0xe3, 0x70, 0x25, 0xd8, 0x77, 0x4a, 0xf6, 0xa4, 0x12, 0x37, 0x78, 0x15, 0x69, 0x8d, 0xbc, 0x12, 0xbb, 0x0a, 0x62, 0xfc, 0xc0, 0x94, 0x81, 0x49,
- /* (2^188)P */ 0x82, 0x6c, 0x68, 0x55, 0xd2, 0xd9, 0xa2, 0x38, 0xf0, 0x21, 0x3e, 0x19, 0xd9, 0x6b, 0x5c, 0x78, 0x84, 0x54, 0x4a, 0xb2, 0x1a, 0xc8, 0xd5, 0xe4, 0x89, 0x09, 0xe2, 0xb2, 0x60, 0x78, 0x30, 0x56,
- /* (2^189)P */ 0xc4, 0x74, 0x4d, 0x8b, 0xf7, 0x55, 0x9d, 0x42, 0x31, 0x01, 0x35, 0x43, 0x46, 0x83, 0xf1, 0x22, 0xff, 0x1f, 0xc7, 0x98, 0x45, 0xc2, 0x60, 0x1e, 0xef, 0x83, 0x99, 0x97, 0x14, 0xf0, 0xf2, 0x59,
- /* (2^190)P */ 0x44, 0x4a, 0x49, 0xeb, 0x56, 0x7d, 0xa4, 0x46, 0x8e, 0xa1, 0x36, 0xd6, 0x54, 0xa8, 0x22, 0x3e, 0x3b, 0x1c, 0x49, 0x74, 0x52, 0xe1, 0x46, 0xb3, 0xe7, 0xcd, 0x90, 0x53, 0x4e, 0xfd, 0xea, 0x2c,
- /* (2^191)P */ 0x75, 0x66, 0x0d, 0xbe, 0x38, 0x85, 0x8a, 0xba, 0x23, 0x8e, 0x81, 0x50, 0xbb, 0x74, 0x90, 0x4b, 0xc3, 0x04, 0xd3, 0x85, 0x90, 0xb8, 0xda, 0xcb, 0xc4, 0x92, 0x61, 0xe5, 0xe0, 0x4f, 0xa2, 0x61,
- /* (2^192)P */ 0xcb, 0x5b, 0x52, 0xdb, 0xe6, 0x15, 0x76, 0xcb, 0xca, 0xe4, 0x67, 0xa5, 0x35, 0x8c, 0x7d, 0xdd, 0x69, 0xdd, 0xfc, 0xca, 0x3a, 0x15, 0xb4, 0xe6, 0x66, 0x97, 0x3c, 0x7f, 0x09, 0x8e, 0x66, 0x2d,
- /* (2^193)P */ 0xf0, 0x5e, 0xe5, 0x5c, 0x26, 0x7e, 0x7e, 0xa5, 0x67, 0xb9, 0xd4, 0x7c, 0x52, 0x4e, 0x9f, 0x5d, 0xe5, 0xd1, 0x2f, 0x49, 0x06, 0x36, 0xc8, 0xfb, 0xae, 0xf7, 0xc3, 0xb7, 0xbe, 0x52, 0x0d, 0x09,
- /* (2^194)P */ 0x7c, 0x4d, 0x7b, 0x1e, 0x5a, 0x51, 0xb9, 0x09, 0xc0, 0x44, 0xda, 0x99, 0x25, 0x6a, 0x26, 0x1f, 0x04, 0x55, 0xc5, 0xe2, 0x48, 0x95, 0xc4, 0xa1, 0xcc, 0x15, 0x6f, 0x12, 0x87, 0x42, 0xf0, 0x7e,
- /* (2^195)P */ 0x15, 0xef, 0x30, 0xbd, 0x9d, 0x65, 0xd1, 0xfe, 0x7b, 0x27, 0xe0, 0xc4, 0xee, 0xb9, 0x4a, 0x8b, 0x91, 0x32, 0xdf, 0xa5, 0x36, 0x62, 0x4d, 0x88, 0x88, 0xf7, 0x5c, 0xbf, 0xa6, 0x6e, 0xd9, 0x1f,
- /* (2^196)P */ 0x9a, 0x0d, 0x19, 0x1f, 0x98, 0x61, 0xa1, 0x42, 0xc1, 0x52, 0x60, 0x7e, 0x50, 0x49, 0xd8, 0x61, 0xd5, 0x2c, 0x5a, 0x28, 0xbf, 0x13, 0xe1, 0x9f, 0xd8, 0x85, 0xad, 0xdb, 0x76, 0xd6, 0x22, 0x7c,
- /* (2^197)P */ 0x7d, 0xd2, 0xfb, 0x2b, 0xed, 0x70, 0xe7, 0x82, 0xa5, 0xf5, 0x96, 0xe9, 0xec, 0xb2, 0x05, 0x4c, 0x50, 0x01, 0x90, 0xb0, 0xc2, 0xa9, 0x40, 0xcd, 0x64, 0xbf, 0xd9, 0x13, 0x92, 0x31, 0x95, 0x58,
- /* (2^198)P */ 0x08, 0x2e, 0xea, 0x3f, 0x70, 0x5d, 0xcc, 0xe7, 0x8c, 0x18, 0xe2, 0x58, 0x12, 0x49, 0x0c, 0xb5, 0xf0, 0x5b, 0x20, 0x48, 0xaa, 0x0b, 0xe3, 0xcc, 0x62, 0x2d, 0xa3, 0xcf, 0x9c, 0x65, 0x7c, 0x53,
- /* (2^199)P */ 0x88, 0xc0, 0xcf, 0x98, 0x3a, 0x62, 0xb6, 0x37, 0xa4, 0xac, 0xd6, 0xa4, 0x1f, 0xed, 0x9b, 0xfe, 0xb0, 0xd1, 0xa8, 0x56, 0x8e, 0x9b, 0xd2, 0x04, 0x75, 0x95, 0x51, 0x0b, 0xc4, 0x71, 0x5f, 0x72,
- /* (2^200)P */ 0xe6, 0x9c, 0x33, 0xd0, 0x9c, 0xf8, 0xc7, 0x28, 0x8b, 0xc1, 0xdd, 0x69, 0x44, 0xb1, 0x67, 0x83, 0x2c, 0x65, 0xa1, 0xa6, 0x83, 0xda, 0x3a, 0x88, 0x17, 0x6c, 0x4d, 0x03, 0x74, 0x19, 0x5f, 0x58,
- /* (2^201)P */ 0x88, 0x91, 0xb1, 0xf1, 0x66, 0xb2, 0xcf, 0x89, 0x17, 0x52, 0xc3, 0xe7, 0x63, 0x48, 0x3b, 0xe6, 0x6a, 0x52, 0xc0, 0xb4, 0xa6, 0x9d, 0x8c, 0xd8, 0x35, 0x46, 0x95, 0xf0, 0x9d, 0x5c, 0x03, 0x3e,
- /* (2^202)P */ 0x9d, 0xde, 0x45, 0xfb, 0x12, 0x54, 0x9d, 0xdd, 0x0d, 0xf4, 0xcf, 0xe4, 0x32, 0x45, 0x68, 0xdd, 0x1c, 0x67, 0x1d, 0x15, 0x9b, 0x99, 0x5c, 0x4b, 0x90, 0xf6, 0xe7, 0x11, 0xc8, 0x2c, 0x8c, 0x2d,
- /* (2^203)P */ 0x40, 0x5d, 0x05, 0x90, 0x1d, 0xbe, 0x54, 0x7f, 0x40, 0xaf, 0x4a, 0x46, 0xdf, 0xc5, 0x64, 0xa4, 0xbe, 0x17, 0xe9, 0xf0, 0x24, 0x96, 0x97, 0x33, 0x30, 0x6b, 0x35, 0x27, 0xc5, 0x8d, 0x01, 0x2c,
- /* (2^204)P */ 0xd4, 0xb3, 0x30, 0xe3, 0x24, 0x50, 0x41, 0xa5, 0xd3, 0x52, 0x16, 0x69, 0x96, 0x3d, 0xff, 0x73, 0xf1, 0x59, 0x9b, 0xef, 0xc4, 0x42, 0xec, 0x94, 0x5a, 0x8e, 0xd0, 0x18, 0x16, 0x20, 0x47, 0x07,
- /* (2^205)P */ 0x53, 0x1c, 0x41, 0xca, 0x8a, 0xa4, 0x6c, 0x4d, 0x19, 0x61, 0xa6, 0xcf, 0x2f, 0x5f, 0x41, 0x66, 0xff, 0x27, 0xe2, 0x51, 0x00, 0xd4, 0x4d, 0x9c, 0xeb, 0xf7, 0x02, 0x9a, 0xc0, 0x0b, 0x81, 0x59,
- /* (2^206)P */ 0x1d, 0x10, 0xdc, 0xb3, 0x71, 0xb1, 0x7e, 0x2a, 0x8e, 0xf6, 0xfe, 0x9f, 0xb9, 0x5a, 0x1c, 0x44, 0xea, 0x59, 0xb3, 0x93, 0x9b, 0x5c, 0x02, 0x32, 0x2f, 0x11, 0x9d, 0x1e, 0xa7, 0xe0, 0x8c, 0x5e,
- /* (2^207)P */ 0xfd, 0x03, 0x95, 0x42, 0x92, 0xcb, 0xcc, 0xbf, 0x55, 0x5d, 0x09, 0x2f, 0x75, 0xba, 0x71, 0xd2, 0x1e, 0x09, 0x2d, 0x97, 0x5e, 0xad, 0x5e, 0x34, 0xba, 0x03, 0x31, 0xa8, 0x11, 0xdf, 0xc8, 0x18,
- /* (2^208)P */ 0x4c, 0x0f, 0xed, 0x9a, 0x9a, 0x94, 0xcd, 0x90, 0x7e, 0xe3, 0x60, 0x66, 0xcb, 0xf4, 0xd1, 0xc5, 0x0b, 0x2e, 0xc5, 0x56, 0x2d, 0xc5, 0xca, 0xb8, 0x0d, 0x8e, 0x80, 0xc5, 0x00, 0xe4, 0x42, 0x6e,
- /* (2^209)P */ 0x23, 0xfd, 0xae, 0xee, 0x66, 0x69, 0xb4, 0xa3, 0xca, 0xcd, 0x9e, 0xe3, 0x0b, 0x1f, 0x4f, 0x0c, 0x1d, 0xa5, 0x83, 0xd6, 0xc9, 0xc8, 0x9d, 0x18, 0x1b, 0x35, 0x09, 0x4c, 0x05, 0x7f, 0xf2, 0x51,
- /* (2^210)P */ 0x82, 0x06, 0x32, 0x2a, 0xcd, 0x7c, 0x48, 0x4c, 0x96, 0x1c, 0xdf, 0xb3, 0x5b, 0xa9, 0x7e, 0x58, 0xe8, 0xb8, 0x5c, 0x55, 0x9e, 0xf7, 0xcc, 0xc8, 0x3d, 0xd7, 0x06, 0xa2, 0x29, 0xc8, 0x7d, 0x54,
- /* (2^211)P */ 0x06, 0x9b, 0xc3, 0x80, 0xcd, 0xa6, 0x22, 0xb8, 0xc6, 0xd4, 0x00, 0x20, 0x73, 0x54, 0x6d, 0xe9, 0x4d, 0x3b, 0x46, 0x91, 0x6f, 0x5b, 0x53, 0x28, 0x1d, 0x6e, 0x48, 0xe2, 0x60, 0x46, 0x8f, 0x22,
- /* (2^212)P */ 0xbf, 0x3a, 0x8d, 0xde, 0x38, 0x95, 0x79, 0x98, 0x6e, 0xca, 0xeb, 0x45, 0x00, 0x33, 0xd8, 0x8c, 0x38, 0xe7, 0x21, 0x82, 0x00, 0x2a, 0x95, 0x79, 0xbb, 0xd2, 0x5c, 0x53, 0xa7, 0xe1, 0x22, 0x43,
- /* (2^213)P */ 0x1c, 0x80, 0xd1, 0x19, 0x18, 0xc1, 0x14, 0xb1, 0xc7, 0x5e, 0x3f, 0x4f, 0xd8, 0xe4, 0x16, 0x20, 0x4c, 0x0f, 0x26, 0x09, 0xf4, 0x2d, 0x0e, 0xdd, 0x66, 0x72, 0x5f, 0xae, 0xc0, 0x62, 0xc3, 0x5e,
- /* (2^214)P */ 0xee, 0xb4, 0xb2, 0xb8, 0x18, 0x2b, 0x46, 0xc0, 0xfb, 0x1a, 0x4d, 0x27, 0x50, 0xd9, 0xc8, 0x7c, 0xd2, 0x02, 0x6b, 0x43, 0x05, 0x71, 0x5f, 0xf2, 0xd3, 0xcc, 0xf9, 0xbf, 0xdc, 0xf8, 0xbb, 0x43,
- /* (2^215)P */ 0xdf, 0xe9, 0x39, 0xa0, 0x67, 0x17, 0xad, 0xb6, 0x83, 0x35, 0x9d, 0xf6, 0xa8, 0x4d, 0x71, 0xb0, 0xf5, 0x31, 0x29, 0xb4, 0x18, 0xfa, 0x55, 0x5e, 0x61, 0x09, 0xc6, 0x33, 0x8f, 0x55, 0xd5, 0x4e,
- /* (2^216)P */ 0xdd, 0xa5, 0x47, 0xc6, 0x01, 0x79, 0xe3, 0x1f, 0x57, 0xd3, 0x81, 0x80, 0x1f, 0xdf, 0x3d, 0x59, 0xa6, 0xd7, 0x3f, 0x81, 0xfd, 0xa4, 0x49, 0x02, 0x61, 0xaf, 0x9c, 0x4e, 0x27, 0xca, 0xac, 0x69,
- /* (2^217)P */ 0xc9, 0x21, 0x07, 0x33, 0xea, 0xa3, 0x7b, 0x04, 0xa0, 0x1e, 0x7e, 0x0e, 0xc2, 0x3f, 0x42, 0x83, 0x60, 0x4a, 0x31, 0x01, 0xaf, 0xc0, 0xf4, 0x1d, 0x27, 0x95, 0x28, 0x89, 0xab, 0x2d, 0xa6, 0x09,
- /* (2^218)P */ 0x00, 0xcb, 0xc6, 0x9c, 0xa4, 0x25, 0xb3, 0xa5, 0xb6, 0x6c, 0xb5, 0x54, 0xc6, 0x5d, 0x4b, 0xe9, 0xa0, 0x94, 0xc9, 0xad, 0x79, 0x87, 0xe2, 0x3b, 0xad, 0x4a, 0x3a, 0xba, 0xf8, 0xe8, 0x96, 0x42,
- /* (2^219)P */ 0xab, 0x1e, 0x45, 0x1e, 0x76, 0x89, 0x86, 0x32, 0x4a, 0x59, 0x59, 0xff, 0x8b, 0x59, 0x4d, 0x2e, 0x4a, 0x08, 0xa7, 0xd7, 0x53, 0x68, 0xb9, 0x49, 0xa8, 0x20, 0x14, 0x60, 0x19, 0xa3, 0x80, 0x49,
- /* (2^220)P */ 0x42, 0x2c, 0x55, 0x2f, 0xe1, 0xb9, 0x65, 0x95, 0x96, 0xfe, 0x00, 0x71, 0xdb, 0x18, 0x53, 0x8a, 0xd7, 0xd0, 0xad, 0x43, 0x4d, 0x0b, 0xc9, 0x05, 0xda, 0x4e, 0x5d, 0x6a, 0xd6, 0x4c, 0x8b, 0x53,
- /* (2^221)P */ 0x9f, 0x03, 0x9f, 0xe8, 0xc3, 0x4f, 0xe9, 0xf4, 0x45, 0x80, 0x61, 0x6f, 0xf2, 0x9a, 0x2c, 0x59, 0x50, 0x95, 0x4b, 0xfd, 0xb5, 0x6e, 0xa3, 0x08, 0x19, 0x14, 0xed, 0xc2, 0xf6, 0xfa, 0xff, 0x25,
- /* (2^222)P */ 0x54, 0xd3, 0x79, 0xcc, 0x59, 0x44, 0x43, 0x34, 0x6b, 0x47, 0xd5, 0xb1, 0xb4, 0xbf, 0xec, 0xee, 0x99, 0x5d, 0x61, 0x61, 0xa0, 0x34, 0xeb, 0xdd, 0x73, 0xb7, 0x64, 0xeb, 0xcc, 0xce, 0x29, 0x51,
- /* (2^223)P */ 0x20, 0x35, 0x99, 0x94, 0x58, 0x21, 0x43, 0xee, 0x3b, 0x0b, 0x4c, 0xf1, 0x7c, 0x9c, 0x2f, 0x77, 0xd5, 0xda, 0xbe, 0x06, 0xe3, 0xfc, 0xe2, 0xd2, 0x97, 0x6a, 0xf0, 0x46, 0xb5, 0x42, 0x5f, 0x71,
- /* (2^224)P */ 0x1a, 0x5f, 0x5b, 0xda, 0xce, 0xcd, 0x4e, 0x43, 0xa9, 0x41, 0x97, 0xa4, 0x15, 0x71, 0xa1, 0x0d, 0x2e, 0xad, 0xed, 0x73, 0x7c, 0xd7, 0x0b, 0x68, 0x41, 0x90, 0xdd, 0x4e, 0x35, 0x02, 0x7c, 0x48,
- /* (2^225)P */ 0xc4, 0xd9, 0x0e, 0xa7, 0xf3, 0xef, 0xef, 0xb8, 0x02, 0xe3, 0x57, 0xe8, 0xa3, 0x2a, 0xa3, 0x56, 0xa0, 0xa5, 0xa2, 0x48, 0xbd, 0x68, 0x3a, 0xdf, 0x44, 0xc4, 0x76, 0x31, 0xb7, 0x50, 0xf6, 0x07,
- /* (2^226)P */ 0xb1, 0xcc, 0xe0, 0x26, 0x16, 0x9b, 0x8b, 0xe3, 0x36, 0xfb, 0x09, 0x8b, 0xc1, 0x53, 0xe0, 0x79, 0x64, 0x49, 0xf9, 0xc9, 0x19, 0x03, 0xd9, 0x56, 0xc4, 0xf5, 0x9f, 0xac, 0xe7, 0x41, 0xa9, 0x1c,
- /* (2^227)P */ 0xbb, 0xa0, 0x2f, 0x16, 0x29, 0xdf, 0xc4, 0x49, 0x05, 0x33, 0xb3, 0x82, 0x32, 0xcf, 0x88, 0x84, 0x7d, 0x43, 0xbb, 0xca, 0x14, 0xda, 0xdf, 0x95, 0x86, 0xad, 0xd5, 0x64, 0x82, 0xf7, 0x91, 0x33,
- /* (2^228)P */ 0x5d, 0x09, 0xb5, 0xe2, 0x6a, 0xe0, 0x9a, 0x72, 0x46, 0xa9, 0x59, 0x32, 0xd7, 0x58, 0x8a, 0xd5, 0xed, 0x21, 0x39, 0xd1, 0x62, 0x42, 0x83, 0xe9, 0x92, 0xb5, 0x4b, 0xa5, 0xfa, 0xda, 0xfe, 0x27,
- /* (2^229)P */ 0xbb, 0x48, 0xad, 0x29, 0xb8, 0xc5, 0x9d, 0xa9, 0x60, 0xe2, 0x9e, 0x49, 0x42, 0x57, 0x02, 0x5f, 0xfd, 0x13, 0x75, 0x5d, 0xcd, 0x8e, 0x2c, 0x80, 0x38, 0xd9, 0x6d, 0x3f, 0xef, 0xb3, 0xce, 0x78,
- /* (2^230)P */ 0x94, 0x5d, 0x13, 0x8a, 0x4f, 0xf4, 0x42, 0xc3, 0xa3, 0xdd, 0x8c, 0x82, 0x44, 0xdb, 0x9e, 0x7b, 0xe7, 0xcf, 0x37, 0x05, 0x1a, 0xd1, 0x36, 0x94, 0xc8, 0xb4, 0x1a, 0xec, 0x64, 0xb1, 0x64, 0x50,
- /* (2^231)P */ 0xfc, 0xb2, 0x7e, 0xd3, 0xcf, 0xec, 0x20, 0x70, 0xfc, 0x25, 0x0d, 0xd9, 0x3e, 0xea, 0x31, 0x1f, 0x34, 0xbb, 0xa1, 0xdf, 0x7b, 0x0d, 0x93, 0x1b, 0x44, 0x30, 0x11, 0x48, 0x7a, 0x46, 0x44, 0x53,
- /* (2^232)P */ 0xfb, 0x6d, 0x5e, 0xf2, 0x70, 0x31, 0x07, 0x70, 0xc8, 0x4c, 0x11, 0x50, 0x1a, 0xdc, 0x85, 0xe3, 0x00, 0x4f, 0xfc, 0xc8, 0x8a, 0x69, 0x48, 0x23, 0xd8, 0x40, 0xdd, 0x84, 0x52, 0xa5, 0x77, 0x2a,
- /* (2^233)P */ 0xe4, 0x6c, 0x8c, 0xc9, 0xe0, 0xaf, 0x06, 0xfe, 0xe4, 0xd6, 0xdf, 0xdd, 0x96, 0xdf, 0x35, 0xc2, 0xd3, 0x1e, 0xbf, 0x33, 0x1e, 0xd0, 0x28, 0x14, 0xaf, 0xbd, 0x00, 0x93, 0xec, 0x68, 0x57, 0x78,
- /* (2^234)P */ 0x3b, 0xb6, 0xde, 0x91, 0x7a, 0xe5, 0x02, 0x97, 0x80, 0x8b, 0xce, 0xe5, 0xbf, 0xb8, 0xbd, 0x61, 0xac, 0x58, 0x1d, 0x3d, 0x6f, 0x42, 0x5b, 0x64, 0xbc, 0x57, 0xa5, 0x27, 0x22, 0xa8, 0x04, 0x48,
- /* (2^235)P */ 0x01, 0x26, 0x4d, 0xb4, 0x8a, 0x04, 0x57, 0x8e, 0x35, 0x69, 0x3a, 0x4b, 0x1a, 0x50, 0xd6, 0x68, 0x93, 0xc2, 0xe1, 0xf9, 0xc3, 0x9e, 0x9c, 0xc3, 0xe2, 0x63, 0xde, 0xd4, 0x57, 0xf2, 0x72, 0x41,
- /* (2^236)P */ 0x01, 0x64, 0x0c, 0x33, 0x50, 0xb4, 0x68, 0xd3, 0x91, 0x23, 0x8f, 0x41, 0x17, 0x30, 0x0d, 0x04, 0x0d, 0xd9, 0xb7, 0x90, 0x60, 0xbb, 0x34, 0x2c, 0x1f, 0xd5, 0xdf, 0x8f, 0x22, 0x49, 0xf6, 0x16,
- /* (2^237)P */ 0xf5, 0x8e, 0x92, 0x2b, 0x8e, 0x81, 0xa6, 0xbe, 0x72, 0x1e, 0xc1, 0xcd, 0x91, 0xcf, 0x8c, 0xe2, 0xcd, 0x36, 0x7a, 0xe7, 0x68, 0xaa, 0x4a, 0x59, 0x0f, 0xfd, 0x7f, 0x6c, 0x80, 0x34, 0x30, 0x31,
- /* (2^238)P */ 0x65, 0xbd, 0x49, 0x22, 0xac, 0x27, 0x9d, 0x8a, 0x12, 0x95, 0x8e, 0x01, 0x64, 0xb4, 0xa3, 0x19, 0xc7, 0x7e, 0xb3, 0x52, 0xf3, 0xcf, 0x6c, 0xc2, 0x21, 0x7b, 0x79, 0x1d, 0x34, 0x68, 0x6f, 0x05,
- /* (2^239)P */ 0x27, 0x23, 0xfd, 0x7e, 0x75, 0xd6, 0x79, 0x5e, 0x15, 0xfe, 0x3a, 0x55, 0xb6, 0xbc, 0xbd, 0xfa, 0x60, 0x5a, 0xaf, 0x6e, 0x2c, 0x22, 0xe7, 0xd3, 0x3b, 0x74, 0xae, 0x4d, 0x6d, 0xc7, 0x46, 0x70,
- /* (2^240)P */ 0x55, 0x4a, 0x8d, 0xb1, 0x72, 0xe8, 0x0b, 0x66, 0x96, 0x14, 0x4e, 0x57, 0x18, 0x25, 0x99, 0x19, 0xbb, 0xdc, 0x2b, 0x30, 0x3a, 0x05, 0x03, 0xc1, 0x8e, 0x8e, 0x21, 0x0b, 0x80, 0xe9, 0xd8, 0x3e,
- /* (2^241)P */ 0x3e, 0xe0, 0x75, 0xfa, 0x39, 0x92, 0x0b, 0x7b, 0x83, 0xc0, 0x33, 0x46, 0x68, 0xfb, 0xe9, 0xef, 0x93, 0x77, 0x1a, 0x39, 0xbe, 0x5f, 0xa3, 0x98, 0x34, 0xfe, 0xd0, 0xe2, 0x0f, 0x51, 0x65, 0x60,
- /* (2^242)P */ 0x0c, 0xad, 0xab, 0x48, 0x85, 0x66, 0xcb, 0x55, 0x27, 0xe5, 0x87, 0xda, 0x48, 0x45, 0x58, 0xb4, 0xdd, 0xc1, 0x07, 0x01, 0xea, 0xec, 0x43, 0x2c, 0x35, 0xde, 0x72, 0x93, 0x80, 0x28, 0x60, 0x52,
- /* (2^243)P */ 0x1f, 0x3b, 0x21, 0xf9, 0x6a, 0xc5, 0x15, 0x34, 0xdb, 0x98, 0x7e, 0x01, 0x4d, 0x1a, 0xee, 0x5b, 0x9b, 0x70, 0xcf, 0xb5, 0x05, 0xb1, 0xf6, 0x13, 0xb6, 0x9a, 0xb2, 0x82, 0x34, 0x0e, 0xf2, 0x5f,
- /* (2^244)P */ 0x90, 0x6c, 0x2e, 0xcc, 0x75, 0x9c, 0xa2, 0x0a, 0x06, 0xe2, 0x70, 0x3a, 0xca, 0x73, 0x7d, 0xfc, 0x15, 0xc5, 0xb5, 0xc4, 0x8f, 0xc3, 0x9f, 0x89, 0x07, 0xc2, 0xff, 0x24, 0xb1, 0x86, 0x03, 0x25,
- /* (2^245)P */ 0x56, 0x2b, 0x3d, 0xae, 0xd5, 0x28, 0xea, 0x54, 0xce, 0x60, 0xde, 0xd6, 0x9d, 0x14, 0x13, 0x99, 0xc1, 0xd6, 0x06, 0x8f, 0xc5, 0x4f, 0x69, 0x16, 0xc7, 0x8f, 0x01, 0xeb, 0x75, 0x39, 0xb2, 0x46,
- /* (2^246)P */ 0xe2, 0xb4, 0xb7, 0xb4, 0x0f, 0x6a, 0x0a, 0x47, 0xde, 0x53, 0x72, 0x8f, 0x5a, 0x47, 0x92, 0x5d, 0xdb, 0x3a, 0xbd, 0x2f, 0xb5, 0xe5, 0xee, 0xab, 0x68, 0x69, 0x80, 0xa0, 0x01, 0x08, 0xa2, 0x7f,
- /* (2^247)P */ 0xd2, 0x14, 0x77, 0x9f, 0xf1, 0xfa, 0xf3, 0x76, 0xc3, 0x60, 0x46, 0x2f, 0xc1, 0x40, 0xe8, 0xb3, 0x4e, 0x74, 0x12, 0xf2, 0x8d, 0xcd, 0xb4, 0x0f, 0xd2, 0x2d, 0x3a, 0x1d, 0x25, 0x5a, 0x06, 0x4b,
- /* (2^248)P */ 0x4a, 0xcd, 0x77, 0x3d, 0x38, 0xde, 0xeb, 0x5c, 0xb1, 0x9c, 0x2c, 0x88, 0xdf, 0x39, 0xdf, 0x6a, 0x59, 0xf7, 0x9a, 0xb0, 0x2e, 0x24, 0xdd, 0xa2, 0x22, 0x64, 0x5f, 0x0e, 0xe5, 0xc0, 0x47, 0x31,
- /* (2^249)P */ 0xdb, 0x50, 0x13, 0x1d, 0x10, 0xa5, 0x4c, 0x16, 0x62, 0xc9, 0x3f, 0xc3, 0x79, 0x34, 0xd1, 0xf8, 0x08, 0xda, 0xe5, 0x13, 0x4d, 0xce, 0x40, 0xe6, 0xba, 0xf8, 0x61, 0x50, 0xc4, 0xe0, 0xde, 0x4b,
- /* (2^250)P */ 0xc9, 0xb1, 0xed, 0xa4, 0xc1, 0x6d, 0xc4, 0xd7, 0x8a, 0xd9, 0x7f, 0x43, 0xb6, 0xd7, 0x14, 0x55, 0x0b, 0xc0, 0xa1, 0xb2, 0x6b, 0x2f, 0x94, 0x58, 0x0e, 0x71, 0x70, 0x1d, 0xab, 0xb2, 0xff, 0x2d,
- /* (2^251)P */ 0x68, 0x6d, 0x8b, 0xc1, 0x2f, 0xcf, 0xdf, 0xcc, 0x67, 0x61, 0x80, 0xb7, 0xa8, 0xcb, 0xeb, 0xa8, 0xe3, 0x37, 0x29, 0x5e, 0xf9, 0x97, 0x06, 0x98, 0x8c, 0x6e, 0x12, 0xd0, 0x1c, 0xba, 0xfb, 0x02,
- /* (2^252)P */ 0x65, 0x45, 0xff, 0xad, 0x60, 0xc3, 0x98, 0xcb, 0x19, 0x15, 0xdb, 0x4b, 0xd2, 0x01, 0x71, 0x44, 0xd5, 0x15, 0xfb, 0x75, 0x74, 0xc8, 0xc4, 0x98, 0x7d, 0xa2, 0x22, 0x6e, 0x6d, 0xc7, 0xf8, 0x05,
- /* (2^253)P */ 0x94, 0xf4, 0xb9, 0xfe, 0xdf, 0xe5, 0x69, 0xab, 0x75, 0x6b, 0x40, 0x18, 0x9d, 0xc7, 0x09, 0xae, 0x1d, 0x2d, 0xa4, 0x94, 0xfb, 0x45, 0x9b, 0x19, 0x84, 0xfa, 0x2a, 0xae, 0xeb, 0x0a, 0x71, 0x79,
- /* (2^254)P */ 0xdf, 0xd2, 0x34, 0xf3, 0xa7, 0xed, 0xad, 0xa6, 0xb4, 0x57, 0x2a, 0xaf, 0x51, 0x9c, 0xde, 0x7b, 0xa8, 0xea, 0xdc, 0x86, 0x4f, 0xc6, 0x8f, 0xa9, 0x7b, 0xd0, 0x0e, 0xc2, 0x35, 0x03, 0xbe, 0x6b,
- /* (2^255)P */ 0x44, 0x43, 0x98, 0x53, 0xbe, 0xdc, 0x7f, 0x66, 0xa8, 0x49, 0x59, 0x00, 0x1c, 0xbc, 0x72, 0x07, 0x8e, 0xd6, 0xbe, 0x4e, 0x9f, 0xa4, 0x07, 0xba, 0xbf, 0x30, 0xdf, 0xba, 0x85, 0xb0, 0xa7, 0x1f,
-}
diff --git a/vendor/github.com/cloudflare/circl/dh/x448/curve.go b/vendor/github.com/cloudflare/circl/dh/x448/curve.go
deleted file mode 100644
index d59564e4b42..00000000000
--- a/vendor/github.com/cloudflare/circl/dh/x448/curve.go
+++ /dev/null
@@ -1,104 +0,0 @@
-package x448
-
-import (
- fp "github.com/cloudflare/circl/math/fp448"
-)
-
-// ladderJoye calculates a fixed-point multiplication with the generator point.
-// The algorithm is the right-to-left Joye's ladder as described
-// in "How to precompute a ladder" in SAC'2017.
-func ladderJoye(k *Key) {
- w := [5]fp.Elt{} // [mu,x1,z1,x2,z2] order must be preserved.
- w[1] = fp.Elt{ // x1 = S
- 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- }
- fp.SetOne(&w[2]) // z1 = 1
- w[3] = fp.Elt{ // x2 = G-S
- 0x20, 0x27, 0x9d, 0xc9, 0x7d, 0x19, 0xb1, 0xac,
- 0xf8, 0xba, 0x69, 0x1c, 0xff, 0x33, 0xac, 0x23,
- 0x51, 0x1b, 0xce, 0x3a, 0x64, 0x65, 0xbd, 0xf1,
- 0x23, 0xf8, 0xc1, 0x84, 0x9d, 0x45, 0x54, 0x29,
- 0x67, 0xb9, 0x81, 0x1c, 0x03, 0xd1, 0xcd, 0xda,
- 0x7b, 0xeb, 0xff, 0x1a, 0x88, 0x03, 0xcf, 0x3a,
- 0x42, 0x44, 0x32, 0x01, 0x25, 0xb7, 0xfa, 0xf0,
- }
- fp.SetOne(&w[4]) // z2 = 1
-
- const n = 448
- const h = 2
- swap := uint(1)
- for s := 0; s < n-h; s++ {
- i := (s + h) / 8
- j := (s + h) % 8
- bit := uint((k[i] >> uint(j)) & 1)
- copy(w[0][:], tableGenerator[s*Size:(s+1)*Size])
- diffAdd(&w, swap^bit)
- swap = bit
- }
- for s := 0; s < h; s++ {
- double(&w[1], &w[2])
- }
- toAffine((*[fp.Size]byte)(k), &w[1], &w[2])
-}
-
-// ladderMontgomery calculates a generic scalar point multiplication
-// The algorithm implemented is the left-to-right Montgomery's ladder.
-func ladderMontgomery(k, xP *Key) {
- w := [5]fp.Elt{} // [x1, x2, z2, x3, z3] order must be preserved.
- w[0] = *(*fp.Elt)(xP) // x1 = xP
- fp.SetOne(&w[1]) // x2 = 1
- w[3] = *(*fp.Elt)(xP) // x3 = xP
- fp.SetOne(&w[4]) // z3 = 1
-
- move := uint(0)
- for s := 448 - 1; s >= 0; s-- {
- i := s / 8
- j := s % 8
- bit := uint((k[i] >> uint(j)) & 1)
- ladderStep(&w, move^bit)
- move = bit
- }
- toAffine((*[fp.Size]byte)(k), &w[1], &w[2])
-}
-
-func toAffine(k *[fp.Size]byte, x, z *fp.Elt) {
- fp.Inv(z, z)
- fp.Mul(x, x, z)
- _ = fp.ToBytes(k[:], x)
-}
-
-var lowOrderPoints = [3]fp.Elt{
- { /* (0,_,1) point of order 2 on Curve448 */
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- },
- { /* (1,_,1) a point of order 4 on the twist of Curve448 */
- 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- },
- { /* (-1,_,1) point of order 4 on Curve448 */
- 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- },
-}
diff --git a/vendor/github.com/cloudflare/circl/dh/x448/curve_amd64.go b/vendor/github.com/cloudflare/circl/dh/x448/curve_amd64.go
deleted file mode 100644
index a0622666136..00000000000
--- a/vendor/github.com/cloudflare/circl/dh/x448/curve_amd64.go
+++ /dev/null
@@ -1,30 +0,0 @@
-//go:build amd64 && !purego
-// +build amd64,!purego
-
-package x448
-
-import (
- fp "github.com/cloudflare/circl/math/fp448"
- "golang.org/x/sys/cpu"
-)
-
-var hasBmi2Adx = cpu.X86.HasBMI2 && cpu.X86.HasADX
-
-var _ = hasBmi2Adx
-
-func double(x, z *fp.Elt) { doubleAmd64(x, z) }
-func diffAdd(w *[5]fp.Elt, b uint) { diffAddAmd64(w, b) }
-func ladderStep(w *[5]fp.Elt, b uint) { ladderStepAmd64(w, b) }
-func mulA24(z, x *fp.Elt) { mulA24Amd64(z, x) }
-
-//go:noescape
-func doubleAmd64(x, z *fp.Elt)
-
-//go:noescape
-func diffAddAmd64(w *[5]fp.Elt, b uint)
-
-//go:noescape
-func ladderStepAmd64(w *[5]fp.Elt, b uint)
-
-//go:noescape
-func mulA24Amd64(z, x *fp.Elt)
diff --git a/vendor/github.com/cloudflare/circl/dh/x448/curve_amd64.h b/vendor/github.com/cloudflare/circl/dh/x448/curve_amd64.h
deleted file mode 100644
index 8c1ae4d0fbb..00000000000
--- a/vendor/github.com/cloudflare/circl/dh/x448/curve_amd64.h
+++ /dev/null
@@ -1,111 +0,0 @@
-#define ladderStepLeg \
- addSub(x2,z2) \
- addSub(x3,z3) \
- integerMulLeg(b0,x2,z3) \
- integerMulLeg(b1,x3,z2) \
- reduceFromDoubleLeg(t0,b0) \
- reduceFromDoubleLeg(t1,b1) \
- addSub(t0,t1) \
- cselect(x2,x3,regMove) \
- cselect(z2,z3,regMove) \
- integerSqrLeg(b0,t0) \
- integerSqrLeg(b1,t1) \
- reduceFromDoubleLeg(x3,b0) \
- reduceFromDoubleLeg(z3,b1) \
- integerMulLeg(b0,x1,z3) \
- reduceFromDoubleLeg(z3,b0) \
- integerSqrLeg(b0,x2) \
- integerSqrLeg(b1,z2) \
- reduceFromDoubleLeg(x2,b0) \
- reduceFromDoubleLeg(z2,b1) \
- subtraction(t0,x2,z2) \
- multiplyA24Leg(t1,t0) \
- additionLeg(t1,t1,z2) \
- integerMulLeg(b0,x2,z2) \
- integerMulLeg(b1,t0,t1) \
- reduceFromDoubleLeg(x2,b0) \
- reduceFromDoubleLeg(z2,b1)
-
-#define ladderStepBmi2Adx \
- addSub(x2,z2) \
- addSub(x3,z3) \
- integerMulAdx(b0,x2,z3) \
- integerMulAdx(b1,x3,z2) \
- reduceFromDoubleAdx(t0,b0) \
- reduceFromDoubleAdx(t1,b1) \
- addSub(t0,t1) \
- cselect(x2,x3,regMove) \
- cselect(z2,z3,regMove) \
- integerSqrAdx(b0,t0) \
- integerSqrAdx(b1,t1) \
- reduceFromDoubleAdx(x3,b0) \
- reduceFromDoubleAdx(z3,b1) \
- integerMulAdx(b0,x1,z3) \
- reduceFromDoubleAdx(z3,b0) \
- integerSqrAdx(b0,x2) \
- integerSqrAdx(b1,z2) \
- reduceFromDoubleAdx(x2,b0) \
- reduceFromDoubleAdx(z2,b1) \
- subtraction(t0,x2,z2) \
- multiplyA24Adx(t1,t0) \
- additionAdx(t1,t1,z2) \
- integerMulAdx(b0,x2,z2) \
- integerMulAdx(b1,t0,t1) \
- reduceFromDoubleAdx(x2,b0) \
- reduceFromDoubleAdx(z2,b1)
-
-#define difAddLeg \
- addSub(x1,z1) \
- integerMulLeg(b0,z1,ui) \
- reduceFromDoubleLeg(z1,b0) \
- addSub(x1,z1) \
- integerSqrLeg(b0,x1) \
- integerSqrLeg(b1,z1) \
- reduceFromDoubleLeg(x1,b0) \
- reduceFromDoubleLeg(z1,b1) \
- integerMulLeg(b0,x1,z2) \
- integerMulLeg(b1,z1,x2) \
- reduceFromDoubleLeg(x1,b0) \
- reduceFromDoubleLeg(z1,b1)
-
-#define difAddBmi2Adx \
- addSub(x1,z1) \
- integerMulAdx(b0,z1,ui) \
- reduceFromDoubleAdx(z1,b0) \
- addSub(x1,z1) \
- integerSqrAdx(b0,x1) \
- integerSqrAdx(b1,z1) \
- reduceFromDoubleAdx(x1,b0) \
- reduceFromDoubleAdx(z1,b1) \
- integerMulAdx(b0,x1,z2) \
- integerMulAdx(b1,z1,x2) \
- reduceFromDoubleAdx(x1,b0) \
- reduceFromDoubleAdx(z1,b1)
-
-#define doubleLeg \
- addSub(x1,z1) \
- integerSqrLeg(b0,x1) \
- integerSqrLeg(b1,z1) \
- reduceFromDoubleLeg(x1,b0) \
- reduceFromDoubleLeg(z1,b1) \
- subtraction(t0,x1,z1) \
- multiplyA24Leg(t1,t0) \
- additionLeg(t1,t1,z1) \
- integerMulLeg(b0,x1,z1) \
- integerMulLeg(b1,t0,t1) \
- reduceFromDoubleLeg(x1,b0) \
- reduceFromDoubleLeg(z1,b1)
-
-#define doubleBmi2Adx \
- addSub(x1,z1) \
- integerSqrAdx(b0,x1) \
- integerSqrAdx(b1,z1) \
- reduceFromDoubleAdx(x1,b0) \
- reduceFromDoubleAdx(z1,b1) \
- subtraction(t0,x1,z1) \
- multiplyA24Adx(t1,t0) \
- additionAdx(t1,t1,z1) \
- integerMulAdx(b0,x1,z1) \
- integerMulAdx(b1,t0,t1) \
- reduceFromDoubleAdx(x1,b0) \
- reduceFromDoubleAdx(z1,b1)
diff --git a/vendor/github.com/cloudflare/circl/dh/x448/curve_amd64.s b/vendor/github.com/cloudflare/circl/dh/x448/curve_amd64.s
deleted file mode 100644
index ed33ba3d032..00000000000
--- a/vendor/github.com/cloudflare/circl/dh/x448/curve_amd64.s
+++ /dev/null
@@ -1,194 +0,0 @@
-//go:build amd64 && !purego
-// +build amd64,!purego
-
-#include "textflag.h"
-
-// Depends on circl/math/fp448 package
-#include "../../math/fp448/fp_amd64.h"
-#include "curve_amd64.h"
-
-// CTE_A24 is (A+2)/4 from Curve448
-#define CTE_A24 39082
-
-#define Size 56
-
-// multiplyA24Leg multiplies x times CTE_A24 and stores in z
-// Uses: AX, DX, R8-R15, FLAGS
-// Instr: x86_64, cmov, adx
-#define multiplyA24Leg(z,x) \
- MOVQ $CTE_A24, R15; \
- MOVQ 0+x, AX; MULQ R15; MOVQ AX, R8; ;;;;;;;;;;;; MOVQ DX, R9; \
- MOVQ 8+x, AX; MULQ R15; ADDQ AX, R9; ADCQ $0, DX; MOVQ DX, R10; \
- MOVQ 16+x, AX; MULQ R15; ADDQ AX, R10; ADCQ $0, DX; MOVQ DX, R11; \
- MOVQ 24+x, AX; MULQ R15; ADDQ AX, R11; ADCQ $0, DX; MOVQ DX, R12; \
- MOVQ 32+x, AX; MULQ R15; ADDQ AX, R12; ADCQ $0, DX; MOVQ DX, R13; \
- MOVQ 40+x, AX; MULQ R15; ADDQ AX, R13; ADCQ $0, DX; MOVQ DX, R14; \
- MOVQ 48+x, AX; MULQ R15; ADDQ AX, R14; ADCQ $0, DX; \
- MOVQ DX, AX; \
- SHLQ $32, AX; \
- ADDQ DX, R8; MOVQ $0, DX; \
- ADCQ $0, R9; \
- ADCQ $0, R10; \
- ADCQ AX, R11; \
- ADCQ $0, R12; \
- ADCQ $0, R13; \
- ADCQ $0, R14; \
- ADCQ $0, DX; \
- MOVQ DX, AX; \
- SHLQ $32, AX; \
- ADDQ DX, R8; \
- ADCQ $0, R9; \
- ADCQ $0, R10; \
- ADCQ AX, R11; \
- ADCQ $0, R12; \
- ADCQ $0, R13; \
- ADCQ $0, R14; \
- MOVQ R8, 0+z; \
- MOVQ R9, 8+z; \
- MOVQ R10, 16+z; \
- MOVQ R11, 24+z; \
- MOVQ R12, 32+z; \
- MOVQ R13, 40+z; \
- MOVQ R14, 48+z;
-
-// multiplyA24Adx multiplies x times CTE_A24 and stores in z
-// Uses: AX, DX, R8-R14, FLAGS
-// Instr: x86_64, bmi2
-#define multiplyA24Adx(z,x) \
- MOVQ $CTE_A24, DX; \
- MULXQ 0+x, R8, R9; \
- MULXQ 8+x, AX, R10; ADDQ AX, R9; \
- MULXQ 16+x, AX, R11; ADCQ AX, R10; \
- MULXQ 24+x, AX, R12; ADCQ AX, R11; \
- MULXQ 32+x, AX, R13; ADCQ AX, R12; \
- MULXQ 40+x, AX, R14; ADCQ AX, R13; \
- MULXQ 48+x, AX, DX; ADCQ AX, R14; \
- ;;;;;;;;;;;;;;;;;;;; ADCQ $0, DX; \
- MOVQ DX, AX; \
- SHLQ $32, AX; \
- ADDQ DX, R8; MOVQ $0, DX; \
- ADCQ $0, R9; \
- ADCQ $0, R10; \
- ADCQ AX, R11; \
- ADCQ $0, R12; \
- ADCQ $0, R13; \
- ADCQ $0, R14; \
- ADCQ $0, DX; \
- MOVQ DX, AX; \
- SHLQ $32, AX; \
- ADDQ DX, R8; \
- ADCQ $0, R9; \
- ADCQ $0, R10; \
- ADCQ AX, R11; \
- ADCQ $0, R12; \
- ADCQ $0, R13; \
- ADCQ $0, R14; \
- MOVQ R8, 0+z; \
- MOVQ R9, 8+z; \
- MOVQ R10, 16+z; \
- MOVQ R11, 24+z; \
- MOVQ R12, 32+z; \
- MOVQ R13, 40+z; \
- MOVQ R14, 48+z;
-
-#define mulA24Legacy \
- multiplyA24Leg(0(DI),0(SI))
-#define mulA24Bmi2Adx \
- multiplyA24Adx(0(DI),0(SI))
-
-// func mulA24Amd64(z, x *fp448.Elt)
-TEXT ·mulA24Amd64(SB),NOSPLIT,$0-16
- MOVQ z+0(FP), DI
- MOVQ x+8(FP), SI
- CHECK_BMI2ADX(LMA24, mulA24Legacy, mulA24Bmi2Adx)
-
-// func ladderStepAmd64(w *[5]fp448.Elt, b uint)
-// ladderStepAmd64 calculates a point addition and doubling as follows:
-// (x2,z2) = 2*(x2,z2) and (x3,z3) = (x2,z2)+(x3,z3) using as a difference (x1,-).
-// w = {x1,x2,z2,x3,z4} are five fp255.Elt of 56 bytes.
-// stack = (t0,t1) are two fp.Elt of fp.Size bytes, and
-// (b0,b1) are two-double precision fp.Elt of 2*fp.Size bytes.
-TEXT ·ladderStepAmd64(SB),NOSPLIT,$336-16
- // Parameters
- #define regWork DI
- #define regMove SI
- #define x1 0*Size(regWork)
- #define x2 1*Size(regWork)
- #define z2 2*Size(regWork)
- #define x3 3*Size(regWork)
- #define z3 4*Size(regWork)
- // Local variables
- #define t0 0*Size(SP)
- #define t1 1*Size(SP)
- #define b0 2*Size(SP)
- #define b1 4*Size(SP)
- MOVQ w+0(FP), regWork
- MOVQ b+8(FP), regMove
- CHECK_BMI2ADX(LLADSTEP, ladderStepLeg, ladderStepBmi2Adx)
- #undef regWork
- #undef regMove
- #undef x1
- #undef x2
- #undef z2
- #undef x3
- #undef z3
- #undef t0
- #undef t1
- #undef b0
- #undef b1
-
-// func diffAddAmd64(work *[5]fp.Elt, swap uint)
-// diffAddAmd64 calculates a differential point addition using a precomputed point.
-// (x1,z1) = (x1,z1)+(mu) using a difference point (x2,z2)
-// work = {mu,x1,z1,x2,z2} are five fp448.Elt of 56 bytes, and
-// stack = (b0,b1) are two-double precision fp.Elt of 2*fp.Size bytes.
-// This is Equation 7 at https://eprint.iacr.org/2017/264.
-TEXT ·diffAddAmd64(SB),NOSPLIT,$224-16
- // Parameters
- #define regWork DI
- #define regSwap SI
- #define ui 0*Size(regWork)
- #define x1 1*Size(regWork)
- #define z1 2*Size(regWork)
- #define x2 3*Size(regWork)
- #define z2 4*Size(regWork)
- // Local variables
- #define b0 0*Size(SP)
- #define b1 2*Size(SP)
- MOVQ w+0(FP), regWork
- MOVQ b+8(FP), regSwap
- cswap(x1,x2,regSwap)
- cswap(z1,z2,regSwap)
- CHECK_BMI2ADX(LDIFADD, difAddLeg, difAddBmi2Adx)
- #undef regWork
- #undef regSwap
- #undef ui
- #undef x1
- #undef z1
- #undef x2
- #undef z2
- #undef b0
- #undef b1
-
-// func doubleAmd64(x, z *fp448.Elt)
-// doubleAmd64 calculates a point doubling (x1,z1) = 2*(x1,z1).
-// stack = (t0,t1) are two fp.Elt of fp.Size bytes, and
-// (b0,b1) are two-double precision fp.Elt of 2*fp.Size bytes.
-TEXT ·doubleAmd64(SB),NOSPLIT,$336-16
- // Parameters
- #define x1 0(DI)
- #define z1 0(SI)
- // Local variables
- #define t0 0*Size(SP)
- #define t1 1*Size(SP)
- #define b0 2*Size(SP)
- #define b1 4*Size(SP)
- MOVQ x+0(FP), DI
- MOVQ z+8(FP), SI
- CHECK_BMI2ADX(LDOUB,doubleLeg,doubleBmi2Adx)
- #undef x1
- #undef z1
- #undef t0
- #undef t1
- #undef b0
- #undef b1
diff --git a/vendor/github.com/cloudflare/circl/dh/x448/curve_generic.go b/vendor/github.com/cloudflare/circl/dh/x448/curve_generic.go
deleted file mode 100644
index b0b65ccf7eb..00000000000
--- a/vendor/github.com/cloudflare/circl/dh/x448/curve_generic.go
+++ /dev/null
@@ -1,100 +0,0 @@
-package x448
-
-import (
- "encoding/binary"
- "math/bits"
-
- "github.com/cloudflare/circl/math/fp448"
-)
-
-func doubleGeneric(x, z *fp448.Elt) {
- t0, t1 := &fp448.Elt{}, &fp448.Elt{}
- fp448.AddSub(x, z)
- fp448.Sqr(x, x)
- fp448.Sqr(z, z)
- fp448.Sub(t0, x, z)
- mulA24Generic(t1, t0)
- fp448.Add(t1, t1, z)
- fp448.Mul(x, x, z)
- fp448.Mul(z, t0, t1)
-}
-
-func diffAddGeneric(w *[5]fp448.Elt, b uint) {
- mu, x1, z1, x2, z2 := &w[0], &w[1], &w[2], &w[3], &w[4]
- fp448.Cswap(x1, x2, b)
- fp448.Cswap(z1, z2, b)
- fp448.AddSub(x1, z1)
- fp448.Mul(z1, z1, mu)
- fp448.AddSub(x1, z1)
- fp448.Sqr(x1, x1)
- fp448.Sqr(z1, z1)
- fp448.Mul(x1, x1, z2)
- fp448.Mul(z1, z1, x2)
-}
-
-func ladderStepGeneric(w *[5]fp448.Elt, b uint) {
- x1, x2, z2, x3, z3 := &w[0], &w[1], &w[2], &w[3], &w[4]
- t0 := &fp448.Elt{}
- t1 := &fp448.Elt{}
- fp448.AddSub(x2, z2)
- fp448.AddSub(x3, z3)
- fp448.Mul(t0, x2, z3)
- fp448.Mul(t1, x3, z2)
- fp448.AddSub(t0, t1)
- fp448.Cmov(x2, x3, b)
- fp448.Cmov(z2, z3, b)
- fp448.Sqr(x3, t0)
- fp448.Sqr(z3, t1)
- fp448.Mul(z3, x1, z3)
- fp448.Sqr(x2, x2)
- fp448.Sqr(z2, z2)
- fp448.Sub(t0, x2, z2)
- mulA24Generic(t1, t0)
- fp448.Add(t1, t1, z2)
- fp448.Mul(x2, x2, z2)
- fp448.Mul(z2, t0, t1)
-}
-
-func mulA24Generic(z, x *fp448.Elt) {
- const A24 = 39082
- const n = 8
- var xx [7]uint64
- for i := range xx {
- xx[i] = binary.LittleEndian.Uint64(x[i*n : (i+1)*n])
- }
- h0, l0 := bits.Mul64(xx[0], A24)
- h1, l1 := bits.Mul64(xx[1], A24)
- h2, l2 := bits.Mul64(xx[2], A24)
- h3, l3 := bits.Mul64(xx[3], A24)
- h4, l4 := bits.Mul64(xx[4], A24)
- h5, l5 := bits.Mul64(xx[5], A24)
- h6, l6 := bits.Mul64(xx[6], A24)
-
- l1, c0 := bits.Add64(h0, l1, 0)
- l2, c1 := bits.Add64(h1, l2, c0)
- l3, c2 := bits.Add64(h2, l3, c1)
- l4, c3 := bits.Add64(h3, l4, c2)
- l5, c4 := bits.Add64(h4, l5, c3)
- l6, c5 := bits.Add64(h5, l6, c4)
- l7, _ := bits.Add64(h6, 0, c5)
-
- l0, c0 = bits.Add64(l0, l7, 0)
- l1, c1 = bits.Add64(l1, 0, c0)
- l2, c2 = bits.Add64(l2, 0, c1)
- l3, c3 = bits.Add64(l3, l7<<32, c2)
- l4, c4 = bits.Add64(l4, 0, c3)
- l5, c5 = bits.Add64(l5, 0, c4)
- l6, l7 = bits.Add64(l6, 0, c5)
-
- xx[0], c0 = bits.Add64(l0, l7, 0)
- xx[1], c1 = bits.Add64(l1, 0, c0)
- xx[2], c2 = bits.Add64(l2, 0, c1)
- xx[3], c3 = bits.Add64(l3, l7<<32, c2)
- xx[4], c4 = bits.Add64(l4, 0, c3)
- xx[5], c5 = bits.Add64(l5, 0, c4)
- xx[6], _ = bits.Add64(l6, 0, c5)
-
- for i := range xx {
- binary.LittleEndian.PutUint64(z[i*n:(i+1)*n], xx[i])
- }
-}
diff --git a/vendor/github.com/cloudflare/circl/dh/x448/curve_noasm.go b/vendor/github.com/cloudflare/circl/dh/x448/curve_noasm.go
deleted file mode 100644
index 3755b7c83b3..00000000000
--- a/vendor/github.com/cloudflare/circl/dh/x448/curve_noasm.go
+++ /dev/null
@@ -1,11 +0,0 @@
-//go:build !amd64 || purego
-// +build !amd64 purego
-
-package x448
-
-import fp "github.com/cloudflare/circl/math/fp448"
-
-func double(x, z *fp.Elt) { doubleGeneric(x, z) }
-func diffAdd(w *[5]fp.Elt, b uint) { diffAddGeneric(w, b) }
-func ladderStep(w *[5]fp.Elt, b uint) { ladderStepGeneric(w, b) }
-func mulA24(z, x *fp.Elt) { mulA24Generic(z, x) }
diff --git a/vendor/github.com/cloudflare/circl/dh/x448/doc.go b/vendor/github.com/cloudflare/circl/dh/x448/doc.go
deleted file mode 100644
index c02904fedae..00000000000
--- a/vendor/github.com/cloudflare/circl/dh/x448/doc.go
+++ /dev/null
@@ -1,19 +0,0 @@
-/*
-Package x448 provides Diffie-Hellman functions as specified in RFC-7748.
-
-Validation of public keys.
-
-The Diffie-Hellman function, as described in RFC-7748 [1], works for any
-public key. However, if a different protocol requires contributory
-behaviour [2,3], then the public keys must be validated against low-order
-points [3,4]. To do that, the Shared function performs this validation
-internally and returns false when the public key is invalid (i.e., it
-is a low-order point).
-
-References:
- - [1] RFC7748 by Langley, Hamburg, Turner (https://rfc-editor.org/rfc/rfc7748.txt)
- - [2] Curve25519 by Bernstein (https://cr.yp.to/ecdh.html)
- - [3] Bernstein (https://cr.yp.to/ecdh.html#validate)
- - [4] Cremers&Jackson (https://eprint.iacr.org/2019/526)
-*/
-package x448
diff --git a/vendor/github.com/cloudflare/circl/dh/x448/key.go b/vendor/github.com/cloudflare/circl/dh/x448/key.go
deleted file mode 100644
index 2fdde51168a..00000000000
--- a/vendor/github.com/cloudflare/circl/dh/x448/key.go
+++ /dev/null
@@ -1,46 +0,0 @@
-package x448
-
-import (
- "crypto/subtle"
-
- fp "github.com/cloudflare/circl/math/fp448"
-)
-
-// Size is the length in bytes of a X448 key.
-const Size = 56
-
-// Key represents a X448 key.
-type Key [Size]byte
-
-func (k *Key) clamp(in *Key) *Key {
- *k = *in
- k[0] &= 252
- k[55] |= 128
- return k
-}
-
-// isValidPubKey verifies if the public key is not a low-order point.
-func (k *Key) isValidPubKey() bool {
- fp.Modp((*fp.Elt)(k))
- var isLowOrder int
- for _, P := range lowOrderPoints {
- isLowOrder |= subtle.ConstantTimeCompare(P[:], k[:])
- }
- return isLowOrder == 0
-}
-
-// KeyGen obtains a public key given a secret key.
-func KeyGen(public, secret *Key) {
- ladderJoye(public.clamp(secret))
-}
-
-// Shared calculates Alice's shared key from Alice's secret key and Bob's
-// public key returning true on success. A failure case happens when the public
-// key is a low-order point, thus the shared key is all-zeros and the function
-// returns false.
-func Shared(shared, secret, public *Key) bool {
- validPk := *public
- ok := validPk.isValidPubKey()
- ladderMontgomery(shared.clamp(secret), &validPk)
- return ok
-}
diff --git a/vendor/github.com/cloudflare/circl/dh/x448/table.go b/vendor/github.com/cloudflare/circl/dh/x448/table.go
deleted file mode 100644
index eef53c30f80..00000000000
--- a/vendor/github.com/cloudflare/circl/dh/x448/table.go
+++ /dev/null
@@ -1,460 +0,0 @@
-package x448
-
-import fp "github.com/cloudflare/circl/math/fp448"
-
-// tableGenerator contains the set of points:
-//
-// t[i] = (xi+1)/(xi-1),
-//
-// where (xi,yi) = 2^iG and G is the generator point
-// Size = (448)*(448/8) = 25088 bytes.
-var tableGenerator = [448 * fp.Size]byte{
- /* (2^ 0)P */ 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f,
- /* (2^ 1)P */ 0x37, 0xfa, 0xaa, 0x0d, 0x86, 0xa6, 0x24, 0xe9, 0x6c, 0x95, 0x08, 0x34, 0xba, 0x1a, 0x81, 0x3a, 0xae, 0x01, 0xa5, 0xa7, 0x05, 0x85, 0x96, 0x00, 0x06, 0x5a, 0xd7, 0xff, 0xee, 0x8e, 0x8f, 0x94, 0xd2, 0xdc, 0xd7, 0xfc, 0xe7, 0xe5, 0x99, 0x1d, 0x05, 0x46, 0x43, 0xe8, 0xbc, 0x12, 0xb7, 0xeb, 0x30, 0x5e, 0x7a, 0x85, 0x68, 0xed, 0x9d, 0x28,
- /* (2^ 2)P */ 0xf1, 0x7d, 0x08, 0x2b, 0x32, 0x4a, 0x62, 0x80, 0x36, 0xe7, 0xa4, 0x76, 0x5a, 0x2a, 0x1e, 0xf7, 0x9e, 0x3c, 0x40, 0x46, 0x9a, 0x1b, 0x61, 0xc1, 0xbf, 0x1a, 0x1b, 0xae, 0x91, 0x80, 0xa3, 0x76, 0x6c, 0xd4, 0x8f, 0xa4, 0xee, 0x26, 0x39, 0x23, 0xa4, 0x80, 0xf4, 0x66, 0x92, 0xe4, 0xe1, 0x18, 0x76, 0xc5, 0xe2, 0x19, 0x87, 0xd5, 0xc3, 0xe8,
- /* (2^ 3)P */ 0xfb, 0xc9, 0xf0, 0x07, 0xf2, 0x93, 0xd8, 0x50, 0x36, 0xed, 0xfb, 0xbd, 0xb2, 0xd3, 0xfc, 0xdf, 0xd5, 0x2a, 0x6e, 0x26, 0x09, 0xce, 0xd4, 0x07, 0x64, 0x9f, 0x40, 0x74, 0xad, 0x98, 0x2f, 0x1c, 0xb6, 0xdc, 0x2d, 0x42, 0xff, 0xbf, 0x97, 0xd8, 0xdb, 0xef, 0x99, 0xca, 0x73, 0x99, 0x1a, 0x04, 0x3b, 0x56, 0x2c, 0x1f, 0x87, 0x9d, 0x9f, 0x03,
- /* (2^ 4)P */ 0x4c, 0x35, 0x97, 0xf7, 0x81, 0x2c, 0x84, 0xa6, 0xe0, 0xcb, 0xce, 0x37, 0x4c, 0x21, 0x1c, 0x67, 0xfa, 0xab, 0x18, 0x4d, 0xef, 0xd0, 0xf0, 0x44, 0xa9, 0xfb, 0xc0, 0x8e, 0xda, 0x57, 0xa1, 0xd8, 0xeb, 0x87, 0xf4, 0x17, 0xea, 0x66, 0x0f, 0x16, 0xea, 0xcd, 0x5f, 0x3e, 0x88, 0xea, 0x09, 0x68, 0x40, 0xdf, 0x43, 0xcc, 0x54, 0x61, 0x58, 0xaa,
- /* (2^ 5)P */ 0x8d, 0xe7, 0x59, 0xd7, 0x5e, 0x63, 0x37, 0xa7, 0x3f, 0xd1, 0x49, 0x85, 0x01, 0xdd, 0x5e, 0xb3, 0xe6, 0x29, 0xcb, 0x25, 0x93, 0xdd, 0x08, 0x96, 0x83, 0x52, 0x76, 0x85, 0xf5, 0x5d, 0x02, 0xbf, 0xe9, 0x6d, 0x15, 0x27, 0xc1, 0x09, 0xd1, 0x14, 0x4d, 0x6e, 0xe8, 0xaf, 0x59, 0x58, 0x34, 0x9d, 0x2a, 0x99, 0x85, 0x26, 0xbe, 0x4b, 0x1e, 0xb9,
- /* (2^ 6)P */ 0x8d, 0xce, 0x94, 0xe2, 0x18, 0x56, 0x0d, 0x82, 0x8e, 0xdf, 0x85, 0x01, 0x8f, 0x93, 0x3c, 0xc6, 0xbd, 0x61, 0xfb, 0xf4, 0x22, 0xc5, 0x16, 0x87, 0xd1, 0xb1, 0x9e, 0x09, 0xc5, 0x83, 0x2e, 0x4a, 0x07, 0x88, 0xee, 0xe0, 0x29, 0x8d, 0x2e, 0x1f, 0x88, 0xad, 0xfd, 0x18, 0x93, 0xb7, 0xed, 0x42, 0x86, 0x78, 0xf0, 0xb8, 0x70, 0xbe, 0x01, 0x67,
- /* (2^ 7)P */ 0xdf, 0x62, 0x2d, 0x94, 0xc7, 0x35, 0x23, 0xda, 0x27, 0xbb, 0x2b, 0xdb, 0x30, 0x80, 0x68, 0x16, 0xa3, 0xae, 0xd7, 0xd2, 0xa7, 0x7c, 0xbf, 0x6a, 0x1d, 0x83, 0xde, 0x96, 0x0a, 0x43, 0xb6, 0x30, 0x37, 0xd6, 0xee, 0x63, 0x59, 0x9a, 0xbf, 0xa3, 0x30, 0x6c, 0xaf, 0x0c, 0xee, 0x3d, 0xcb, 0x35, 0x4b, 0x55, 0x5f, 0x84, 0x85, 0xcb, 0x4f, 0x1e,
- /* (2^ 8)P */ 0x9d, 0x04, 0x68, 0x89, 0xa4, 0xa9, 0x0d, 0x87, 0xc1, 0x70, 0xf1, 0xeb, 0xfb, 0x47, 0x0a, 0xf0, 0xde, 0x67, 0xb7, 0x94, 0xcd, 0x36, 0x43, 0xa5, 0x49, 0x43, 0x67, 0xc3, 0xee, 0x3c, 0x6b, 0xec, 0xd0, 0x1a, 0xf4, 0xad, 0xef, 0x06, 0x4a, 0xe8, 0x46, 0x24, 0xd7, 0x93, 0xbf, 0xf0, 0xe3, 0x81, 0x61, 0xec, 0xea, 0x64, 0xfe, 0x67, 0xeb, 0xc7,
- /* (2^ 9)P */ 0x95, 0x45, 0x79, 0xcf, 0x2c, 0xfd, 0x9b, 0xfe, 0x84, 0x46, 0x4b, 0x8f, 0xa1, 0xcf, 0xc3, 0x04, 0x94, 0x78, 0xdb, 0xc9, 0xa6, 0x01, 0x75, 0xa4, 0xb4, 0x93, 0x72, 0x43, 0xa7, 0x7d, 0xda, 0x31, 0x38, 0x54, 0xab, 0x4e, 0x3f, 0x89, 0xa6, 0xab, 0x57, 0xc0, 0x16, 0x65, 0xdb, 0x92, 0x96, 0xe4, 0xc8, 0xae, 0xe7, 0x4c, 0x7a, 0xeb, 0xbb, 0x5a,
- /* (2^ 10)P */ 0xbe, 0xfe, 0x86, 0xc3, 0x97, 0xe0, 0x6a, 0x18, 0x20, 0x21, 0xca, 0x22, 0x55, 0xa1, 0xeb, 0xf5, 0x74, 0xe5, 0xc9, 0x59, 0xa7, 0x92, 0x65, 0x15, 0x08, 0x71, 0xd1, 0x09, 0x7e, 0x83, 0xfc, 0xbc, 0x5a, 0x93, 0x38, 0x0d, 0x43, 0x42, 0xfd, 0x76, 0x30, 0xe8, 0x63, 0x60, 0x09, 0x8d, 0x6c, 0xd3, 0xf8, 0x56, 0x3d, 0x68, 0x47, 0xab, 0xa0, 0x1d,
- /* (2^ 11)P */ 0x38, 0x50, 0x1c, 0xb1, 0xac, 0x88, 0x8f, 0x38, 0xe3, 0x69, 0xe6, 0xfc, 0x4f, 0x8f, 0xe1, 0x9b, 0xb1, 0x1a, 0x09, 0x39, 0x19, 0xdf, 0xcd, 0x98, 0x7b, 0x64, 0x42, 0xf6, 0x11, 0xea, 0xc7, 0xe8, 0x92, 0x65, 0x00, 0x2c, 0x75, 0xb5, 0x94, 0x1e, 0x5b, 0xa6, 0x66, 0x81, 0x77, 0xf3, 0x39, 0x94, 0xac, 0xbd, 0xe4, 0x2a, 0x66, 0x84, 0x9c, 0x60,
- /* (2^ 12)P */ 0xb5, 0xb6, 0xd9, 0x03, 0x67, 0xa4, 0xa8, 0x0a, 0x4a, 0x2b, 0x9d, 0xfa, 0x13, 0xe1, 0x99, 0x25, 0x4a, 0x5c, 0x67, 0xb9, 0xb2, 0xb7, 0xdd, 0x1e, 0xaf, 0xeb, 0x63, 0x41, 0xb6, 0xb9, 0xa0, 0x87, 0x0a, 0xe0, 0x06, 0x07, 0xaa, 0x97, 0xf8, 0xf9, 0x38, 0x4f, 0xdf, 0x0c, 0x40, 0x7c, 0xc3, 0x98, 0xa9, 0x74, 0xf1, 0x5d, 0xda, 0xd1, 0xc0, 0x0a,
- /* (2^ 13)P */ 0xf2, 0x0a, 0xab, 0xab, 0x94, 0x50, 0xf0, 0xa3, 0x6f, 0xc6, 0x66, 0xba, 0xa6, 0xdc, 0x44, 0xdd, 0xd6, 0x08, 0xf4, 0xd3, 0xed, 0xb1, 0x40, 0x93, 0xee, 0xf6, 0xb8, 0x8e, 0xb4, 0x7c, 0xb9, 0x82, 0xc9, 0x9d, 0x45, 0x3b, 0x8e, 0x10, 0xcb, 0x70, 0x1e, 0xba, 0x3c, 0x62, 0x50, 0xda, 0xa9, 0x93, 0xb5, 0xd7, 0xd0, 0x6f, 0x29, 0x52, 0x95, 0xae,
- /* (2^ 14)P */ 0x14, 0x68, 0x69, 0x23, 0xa8, 0x44, 0x87, 0x9e, 0x22, 0x91, 0xe8, 0x92, 0xdf, 0xf7, 0xae, 0xba, 0x1c, 0x96, 0xe1, 0xc3, 0x94, 0xed, 0x6c, 0x95, 0xae, 0x96, 0xa7, 0x15, 0x9f, 0xf1, 0x17, 0x11, 0x92, 0x42, 0xd5, 0xcd, 0x18, 0xe7, 0xa9, 0xb5, 0x2f, 0xcd, 0xde, 0x6c, 0xc9, 0x7d, 0xfc, 0x7e, 0xbd, 0x7f, 0x10, 0x3d, 0x01, 0x00, 0x8d, 0x95,
- /* (2^ 15)P */ 0x3b, 0x76, 0x72, 0xae, 0xaf, 0x84, 0xf2, 0xf7, 0xd1, 0x6d, 0x13, 0x9c, 0x47, 0xe1, 0xb7, 0xa3, 0x19, 0x16, 0xee, 0x75, 0x45, 0xf6, 0x1a, 0x7b, 0x78, 0x49, 0x79, 0x05, 0x86, 0xf0, 0x7f, 0x9f, 0xfc, 0xc4, 0xbd, 0x86, 0xf3, 0x41, 0xa7, 0xfe, 0x01, 0xd5, 0x67, 0x16, 0x10, 0x5b, 0xa5, 0x16, 0xf3, 0x7f, 0x60, 0xce, 0xd2, 0x0c, 0x8e, 0x4b,
- /* (2^ 16)P */ 0x4a, 0x07, 0x99, 0x4a, 0x0f, 0x74, 0x91, 0x14, 0x68, 0xb9, 0x48, 0xb7, 0x44, 0x77, 0x9b, 0x4a, 0xe0, 0x68, 0x0e, 0x43, 0x4d, 0x98, 0x98, 0xbf, 0xa8, 0x3a, 0xb7, 0x6d, 0x2a, 0x9a, 0x77, 0x5f, 0x62, 0xf5, 0x6b, 0x4a, 0xb7, 0x7d, 0xe5, 0x09, 0x6b, 0xc0, 0x8b, 0x9c, 0x88, 0x37, 0x33, 0xf2, 0x41, 0xac, 0x22, 0x1f, 0xcf, 0x3b, 0x82, 0x34,
- /* (2^ 17)P */ 0x00, 0xc3, 0x78, 0x42, 0x32, 0x2e, 0xdc, 0xda, 0xb1, 0x96, 0x21, 0xa4, 0xe4, 0xbb, 0xe9, 0x9d, 0xbb, 0x0f, 0x93, 0xed, 0x26, 0x3d, 0xb5, 0xdb, 0x94, 0x31, 0x37, 0x07, 0xa2, 0xb2, 0xd5, 0x99, 0x0d, 0x93, 0xe1, 0xce, 0x3f, 0x0b, 0x96, 0x82, 0x47, 0xfe, 0x60, 0x6f, 0x8f, 0x61, 0x88, 0xd7, 0x05, 0x95, 0x0b, 0x46, 0x06, 0xb7, 0x32, 0x06,
- /* (2^ 18)P */ 0x44, 0xf5, 0x34, 0xdf, 0x2f, 0x9c, 0x5d, 0x9f, 0x53, 0x5c, 0x42, 0x8f, 0xc9, 0xdc, 0xd8, 0x40, 0xa2, 0xe7, 0x6a, 0x4a, 0x05, 0xf7, 0x86, 0x77, 0x2b, 0xae, 0x37, 0xed, 0x48, 0xfb, 0xf7, 0x62, 0x7c, 0x17, 0x59, 0x92, 0x41, 0x61, 0x93, 0x38, 0x30, 0xd1, 0xef, 0x54, 0x54, 0x03, 0x17, 0x57, 0x91, 0x15, 0x11, 0x33, 0xb5, 0xfa, 0xfb, 0x17,
- /* (2^ 19)P */ 0x29, 0xbb, 0xd4, 0xb4, 0x9c, 0xf1, 0x72, 0x94, 0xce, 0x6a, 0x29, 0xa8, 0x89, 0x18, 0x19, 0xf7, 0xb7, 0xcc, 0xee, 0x9a, 0x02, 0xe3, 0xc0, 0xb1, 0xe0, 0xee, 0x83, 0x78, 0xb4, 0x9e, 0x07, 0x87, 0xdf, 0xb0, 0x82, 0x26, 0x4e, 0xa4, 0x0c, 0x33, 0xaf, 0x40, 0x59, 0xb6, 0xdd, 0x52, 0x45, 0xf0, 0xb4, 0xf6, 0xe8, 0x4e, 0x4e, 0x79, 0x1a, 0x5d,
- /* (2^ 20)P */ 0x27, 0x33, 0x4d, 0x4c, 0x6b, 0x4f, 0x75, 0xb1, 0xbc, 0x1f, 0xab, 0x5b, 0x2b, 0xf0, 0x1c, 0x57, 0x86, 0xdd, 0xfd, 0x60, 0xb0, 0x8c, 0xe7, 0x9a, 0xe5, 0x5c, 0xeb, 0x11, 0x3a, 0xda, 0x22, 0x25, 0x99, 0x06, 0x8d, 0xf4, 0xaf, 0x29, 0x7a, 0xc9, 0xe5, 0xd2, 0x16, 0x9e, 0xd4, 0x63, 0x1d, 0x64, 0xa6, 0x47, 0x96, 0x37, 0x6f, 0x93, 0x2c, 0xcc,
- /* (2^ 21)P */ 0xc1, 0x94, 0x74, 0x86, 0x75, 0xf2, 0x91, 0x58, 0x23, 0x85, 0x63, 0x76, 0x54, 0xc7, 0xb4, 0x8c, 0xbc, 0x4e, 0xc4, 0xa7, 0xba, 0xa0, 0x55, 0x26, 0x71, 0xd5, 0x33, 0x72, 0xc9, 0xad, 0x1e, 0xf9, 0x5d, 0x78, 0x70, 0x93, 0x4e, 0x85, 0xfc, 0x39, 0x06, 0x73, 0x76, 0xff, 0xe8, 0x64, 0x69, 0x42, 0x45, 0xb2, 0x69, 0xb5, 0x32, 0xe7, 0x2c, 0xde,
- /* (2^ 22)P */ 0xde, 0x16, 0xd8, 0x33, 0x49, 0x32, 0xe9, 0x0e, 0x3a, 0x60, 0xee, 0x2e, 0x24, 0x75, 0xe3, 0x9c, 0x92, 0x07, 0xdb, 0xad, 0x92, 0xf5, 0x11, 0xdf, 0xdb, 0xb0, 0x17, 0x5c, 0xd6, 0x1a, 0x70, 0x00, 0xb7, 0xe2, 0x18, 0xec, 0xdc, 0xc2, 0x02, 0x93, 0xb3, 0xc8, 0x3f, 0x4f, 0x1b, 0x96, 0xe6, 0x33, 0x8c, 0xfb, 0xcc, 0xa5, 0x4e, 0xe8, 0xe7, 0x11,
- /* (2^ 23)P */ 0x05, 0x7a, 0x74, 0x52, 0xf8, 0xdf, 0x0d, 0x7c, 0x6a, 0x1a, 0x4e, 0x9a, 0x02, 0x1d, 0xae, 0x77, 0xf8, 0x8e, 0xf9, 0xa2, 0x38, 0x54, 0x50, 0xb2, 0x2c, 0x08, 0x9d, 0x9b, 0x9f, 0xfb, 0x2b, 0x06, 0xde, 0x9d, 0xc2, 0x03, 0x0b, 0x22, 0x2b, 0x10, 0x5b, 0x3a, 0x73, 0x29, 0x8e, 0x3e, 0x37, 0x08, 0x2c, 0x3b, 0xf8, 0x80, 0xc1, 0x66, 0x1e, 0x98,
- /* (2^ 24)P */ 0xd8, 0xd6, 0x3e, 0xcd, 0x63, 0x8c, 0x2b, 0x41, 0x81, 0xc0, 0x0c, 0x06, 0x87, 0xd6, 0xe7, 0x92, 0xfe, 0xf1, 0x0c, 0x4a, 0x84, 0x5b, 0xaf, 0x40, 0x53, 0x6f, 0x60, 0xd6, 0x6b, 0x76, 0x4b, 0xc2, 0xad, 0xc9, 0xb6, 0xb6, 0x6a, 0xa2, 0xb3, 0xf5, 0xf5, 0xc2, 0x55, 0x83, 0xb2, 0xd3, 0xe9, 0x41, 0x6c, 0x63, 0x51, 0xb8, 0x81, 0x74, 0xc8, 0x2c,
- /* (2^ 25)P */ 0xb2, 0xaf, 0x1c, 0xee, 0x07, 0xb0, 0x58, 0xa8, 0x2c, 0x6a, 0xc9, 0x2d, 0x62, 0x28, 0x75, 0x0c, 0x40, 0xb6, 0x11, 0x33, 0x96, 0x80, 0x28, 0x6d, 0xd5, 0x9e, 0x87, 0x90, 0x01, 0x66, 0x1d, 0x1c, 0xf8, 0xb4, 0x92, 0xac, 0x38, 0x18, 0x05, 0xc2, 0x4c, 0x4b, 0x54, 0x7d, 0x80, 0x46, 0x87, 0x2d, 0x99, 0x8e, 0x70, 0x80, 0x69, 0x71, 0x8b, 0xed,
- /* (2^ 26)P */ 0x37, 0xa7, 0x6b, 0x71, 0x36, 0x75, 0x8e, 0xff, 0x0f, 0x42, 0xda, 0x5a, 0x46, 0xa6, 0x97, 0x79, 0x7e, 0x30, 0xb3, 0x8f, 0xc7, 0x3a, 0xa0, 0xcb, 0x1d, 0x9c, 0x78, 0x77, 0x36, 0xc2, 0xe7, 0xf4, 0x2f, 0x29, 0x07, 0xb1, 0x07, 0xfd, 0xed, 0x1b, 0x39, 0x77, 0x06, 0x38, 0x77, 0x0f, 0x50, 0x31, 0x12, 0xbf, 0x92, 0xbf, 0x72, 0x79, 0x54, 0xa9,
- /* (2^ 27)P */ 0xbd, 0x4d, 0x46, 0x6b, 0x1a, 0x80, 0x46, 0x2d, 0xed, 0xfd, 0x64, 0x6d, 0x94, 0xbc, 0x4a, 0x6e, 0x0c, 0x12, 0xf6, 0x12, 0xab, 0x54, 0x88, 0xd3, 0x85, 0xac, 0x51, 0xae, 0x6f, 0xca, 0xc4, 0xb7, 0xec, 0x22, 0x54, 0x6d, 0x80, 0xb2, 0x1c, 0x63, 0x33, 0x76, 0x6b, 0x8e, 0x6d, 0x59, 0xcd, 0x73, 0x92, 0x5f, 0xff, 0xad, 0x10, 0x35, 0x70, 0x5f,
- /* (2^ 28)P */ 0xb3, 0x84, 0xde, 0xc8, 0x04, 0x43, 0x63, 0xfa, 0x29, 0xd9, 0xf0, 0x69, 0x65, 0x5a, 0x0c, 0xe8, 0x2e, 0x0b, 0xfe, 0xb0, 0x7a, 0x42, 0xb3, 0xc3, 0xfc, 0xe6, 0xb8, 0x92, 0x29, 0xae, 0xed, 0xec, 0xd5, 0xe8, 0x4a, 0xa1, 0xbd, 0x3b, 0xd3, 0xc0, 0x07, 0xab, 0x65, 0x65, 0x35, 0x9a, 0xa6, 0x5e, 0x78, 0x18, 0x76, 0x1c, 0x15, 0x49, 0xe6, 0x75,
- /* (2^ 29)P */ 0x45, 0xb3, 0x92, 0xa9, 0xc3, 0xb8, 0x11, 0x68, 0x64, 0x3a, 0x83, 0x5d, 0xa8, 0x94, 0x6a, 0x9d, 0xaa, 0x27, 0x9f, 0x98, 0x5d, 0xc0, 0x29, 0xf0, 0xc0, 0x4b, 0x14, 0x3c, 0x05, 0xe7, 0xf8, 0xbd, 0x38, 0x22, 0x96, 0x75, 0x65, 0x5e, 0x0d, 0x3f, 0xbb, 0x6f, 0xe8, 0x3f, 0x96, 0x76, 0x9f, 0xba, 0xd9, 0x44, 0x92, 0x96, 0x22, 0xe7, 0x52, 0xe7,
- /* (2^ 30)P */ 0xf4, 0xa3, 0x95, 0x90, 0x47, 0xdf, 0x7d, 0xdc, 0xf4, 0x13, 0x87, 0x67, 0x7d, 0x4f, 0x9d, 0xa0, 0x00, 0x46, 0x72, 0x08, 0xc3, 0xa2, 0x7a, 0x3e, 0xe7, 0x6d, 0x52, 0x7c, 0x11, 0x36, 0x50, 0x83, 0x89, 0x64, 0xcb, 0x1f, 0x08, 0x83, 0x46, 0xcb, 0xac, 0xa6, 0xd8, 0x9c, 0x1b, 0xe8, 0x05, 0x47, 0xc7, 0x26, 0x06, 0x83, 0x39, 0xe9, 0xb1, 0x1c,
- /* (2^ 31)P */ 0x11, 0xe8, 0xc8, 0x42, 0xbf, 0x30, 0x9c, 0xa3, 0xf1, 0x85, 0x96, 0x95, 0x4f, 0x4f, 0x52, 0xa2, 0xf5, 0x8b, 0x68, 0x24, 0x16, 0xac, 0x9b, 0xa9, 0x27, 0x28, 0x0e, 0x84, 0x03, 0x46, 0x22, 0x5f, 0xf7, 0x0d, 0xa6, 0x85, 0x88, 0xc1, 0x45, 0x4b, 0x85, 0x1a, 0x10, 0x7f, 0xc9, 0x94, 0x20, 0xb0, 0x04, 0x28, 0x12, 0x30, 0xb9, 0xe6, 0x40, 0x6b,
- /* (2^ 32)P */ 0xac, 0x1b, 0x57, 0xb6, 0x42, 0xdb, 0x81, 0x8d, 0x76, 0xfd, 0x9b, 0x1c, 0x29, 0x30, 0xd5, 0x3a, 0xcc, 0x53, 0xd9, 0x26, 0x7a, 0x0f, 0x9c, 0x2e, 0x79, 0xf5, 0x62, 0xeb, 0x61, 0x9d, 0x9b, 0x80, 0x39, 0xcd, 0x60, 0x2e, 0x1f, 0x08, 0x22, 0xbc, 0x19, 0xb3, 0x2a, 0x43, 0x44, 0xf2, 0x4e, 0x66, 0xf4, 0x36, 0xa6, 0xa7, 0xbc, 0xa4, 0x15, 0x7e,
- /* (2^ 33)P */ 0xc1, 0x90, 0x8a, 0xde, 0xff, 0x78, 0xc3, 0x73, 0x16, 0xee, 0x76, 0xa0, 0x84, 0x60, 0x8d, 0xe6, 0x82, 0x0f, 0xde, 0x4e, 0xc5, 0x99, 0x34, 0x06, 0x90, 0x44, 0x55, 0xf8, 0x91, 0xd8, 0xe1, 0xe4, 0x2c, 0x8a, 0xde, 0x94, 0x1e, 0x78, 0x25, 0x3d, 0xfd, 0xd8, 0x59, 0x7d, 0xaf, 0x6e, 0xbe, 0x96, 0xbe, 0x3c, 0x16, 0x23, 0x0f, 0x4c, 0xa4, 0x28,
- /* (2^ 34)P */ 0xba, 0x11, 0x35, 0x57, 0x03, 0xb6, 0xf4, 0x24, 0x89, 0xb8, 0x5a, 0x0d, 0x50, 0x9c, 0xaa, 0x51, 0x7f, 0xa4, 0x0e, 0xfc, 0x71, 0xb3, 0x3b, 0xf1, 0x96, 0x50, 0x23, 0x15, 0xf5, 0xf5, 0xd4, 0x23, 0xdc, 0x8b, 0x26, 0x9e, 0xae, 0xb7, 0x50, 0xcd, 0xc4, 0x25, 0xf6, 0x75, 0x40, 0x9c, 0x37, 0x79, 0x33, 0x60, 0xd4, 0x4b, 0x13, 0x32, 0xee, 0xe2,
- /* (2^ 35)P */ 0x43, 0xb8, 0x56, 0x59, 0xf0, 0x68, 0x23, 0xb3, 0xea, 0x70, 0x58, 0x4c, 0x1e, 0x5a, 0x16, 0x54, 0x03, 0xb2, 0xf4, 0x73, 0xb6, 0xd9, 0x5c, 0x9c, 0x6f, 0xcf, 0x82, 0x2e, 0x54, 0x15, 0x46, 0x2c, 0xa3, 0xda, 0x4e, 0x87, 0xf5, 0x2b, 0xba, 0x91, 0xa3, 0xa0, 0x89, 0xba, 0x48, 0x2b, 0xfa, 0x64, 0x02, 0x7f, 0x78, 0x03, 0xd1, 0xe8, 0x3b, 0xe9,
- /* (2^ 36)P */ 0x15, 0xa4, 0x71, 0xd4, 0x0c, 0x24, 0xe9, 0x07, 0xa1, 0x43, 0xf4, 0x7f, 0xbb, 0xa2, 0xa6, 0x6b, 0xfa, 0xb7, 0xea, 0x58, 0xd1, 0x96, 0xb0, 0x24, 0x5c, 0xc7, 0x37, 0x4e, 0x60, 0x0f, 0x40, 0xf2, 0x2f, 0x44, 0x70, 0xea, 0x80, 0x63, 0xfe, 0xfc, 0x46, 0x59, 0x12, 0x27, 0xb5, 0x27, 0xfd, 0xb7, 0x73, 0x0b, 0xca, 0x8b, 0xc2, 0xd3, 0x71, 0x08,
- /* (2^ 37)P */ 0x26, 0x0e, 0xd7, 0x52, 0x6f, 0xf1, 0xf2, 0x9d, 0xb8, 0x3d, 0xbd, 0xd4, 0x75, 0x97, 0xd8, 0xbf, 0xa8, 0x86, 0x96, 0xa5, 0x80, 0xa0, 0x45, 0x75, 0xf6, 0x77, 0x71, 0xdb, 0x77, 0x96, 0x55, 0x99, 0x31, 0xd0, 0x4f, 0x34, 0xf4, 0x35, 0x39, 0x41, 0xd3, 0x7d, 0xf7, 0xe2, 0x74, 0xde, 0xbe, 0x5b, 0x1f, 0x39, 0x10, 0x21, 0xa3, 0x4d, 0x3b, 0xc8,
- /* (2^ 38)P */ 0x04, 0x00, 0x2a, 0x45, 0xb2, 0xaf, 0x9b, 0x18, 0x6a, 0xeb, 0x96, 0x28, 0xa4, 0x77, 0xd0, 0x13, 0xcf, 0x17, 0x65, 0xe8, 0xc5, 0x81, 0x28, 0xad, 0x39, 0x7a, 0x0b, 0xaa, 0x55, 0x2b, 0xf3, 0xfc, 0x86, 0x40, 0xad, 0x0d, 0x1e, 0x28, 0xa2, 0x2d, 0xc5, 0xd6, 0x04, 0x15, 0xa2, 0x30, 0x3d, 0x12, 0x8e, 0xd6, 0xb5, 0xf7, 0x69, 0xbb, 0x84, 0x20,
- /* (2^ 39)P */ 0xd7, 0x7a, 0x77, 0x2c, 0xfb, 0x81, 0x80, 0xe9, 0x1e, 0xc6, 0x36, 0x31, 0x79, 0xc3, 0x7c, 0xa9, 0x57, 0x6b, 0xb5, 0x70, 0xfb, 0xe4, 0xa1, 0xff, 0xfd, 0x21, 0xa5, 0x7c, 0xfa, 0x44, 0xba, 0x0d, 0x96, 0x3d, 0xc4, 0x5c, 0x39, 0x52, 0x87, 0xd7, 0x22, 0x0f, 0x52, 0x88, 0x91, 0x87, 0x96, 0xac, 0xfa, 0x3b, 0xdf, 0xdc, 0x83, 0x8c, 0x99, 0x29,
- /* (2^ 40)P */ 0x98, 0x6b, 0x3a, 0x8d, 0x83, 0x17, 0xe1, 0x62, 0xd8, 0x80, 0x4c, 0x97, 0xce, 0x6b, 0xaa, 0x10, 0xa7, 0xc4, 0xe9, 0xeb, 0xa5, 0xfb, 0xc9, 0xdd, 0x2d, 0xeb, 0xfc, 0x9a, 0x71, 0xcd, 0x68, 0x6e, 0xc0, 0x35, 0x64, 0x62, 0x1b, 0x95, 0x12, 0xe8, 0x53, 0xec, 0xf0, 0xf4, 0x86, 0x86, 0x78, 0x18, 0xc4, 0xc6, 0xbc, 0x5a, 0x59, 0x8f, 0x7c, 0x7e,
- /* (2^ 41)P */ 0x7f, 0xd7, 0x1e, 0xc5, 0x83, 0xdc, 0x1f, 0xbe, 0x0b, 0xcf, 0x2e, 0x01, 0x01, 0xed, 0xac, 0x17, 0x3b, 0xed, 0xa4, 0x30, 0x96, 0x0e, 0x14, 0x7e, 0x19, 0x2b, 0xa5, 0x67, 0x1e, 0xb3, 0x34, 0x03, 0xa8, 0xbb, 0x0a, 0x7d, 0x08, 0x2d, 0xd5, 0x53, 0x19, 0x6f, 0x13, 0xd5, 0xc0, 0x90, 0x8a, 0xcc, 0xc9, 0x5c, 0xab, 0x24, 0xd7, 0x03, 0xf6, 0x57,
- /* (2^ 42)P */ 0x49, 0xcb, 0xb4, 0x96, 0x5f, 0xa6, 0xf8, 0x71, 0x6f, 0x59, 0xad, 0x05, 0x24, 0x2d, 0xaf, 0x67, 0xa8, 0xbe, 0x95, 0xdf, 0x0d, 0x28, 0x5a, 0x7f, 0x6e, 0x87, 0x8c, 0x6e, 0x67, 0x0c, 0xf4, 0xe0, 0x1c, 0x30, 0xc2, 0x66, 0xae, 0x20, 0xa1, 0x34, 0xec, 0x9c, 0xbc, 0xae, 0x3d, 0xa1, 0x28, 0x28, 0x95, 0x1d, 0xc9, 0x3a, 0xa8, 0xfd, 0xfc, 0xa1,
- /* (2^ 43)P */ 0xe2, 0x2b, 0x9d, 0xed, 0x02, 0x99, 0x67, 0xbb, 0x2e, 0x16, 0x62, 0x05, 0x70, 0xc7, 0x27, 0xb9, 0x1c, 0x3f, 0xf2, 0x11, 0x01, 0xd8, 0x51, 0xa4, 0x18, 0x92, 0xa9, 0x5d, 0xfb, 0xa9, 0xe4, 0x42, 0xba, 0x38, 0x34, 0x1a, 0x4a, 0xc5, 0x6a, 0x37, 0xde, 0xa7, 0x0c, 0xb4, 0x7e, 0x7f, 0xde, 0xa6, 0xee, 0xcd, 0x55, 0x57, 0x05, 0x06, 0xfd, 0x5d,
- /* (2^ 44)P */ 0x2f, 0x32, 0xcf, 0x2e, 0x2c, 0x7b, 0xbe, 0x9a, 0x0c, 0x57, 0x35, 0xf8, 0x87, 0xda, 0x9c, 0xec, 0x48, 0xf2, 0xbb, 0xe2, 0xda, 0x10, 0x58, 0x20, 0xc6, 0xd3, 0x87, 0xe9, 0xc7, 0x26, 0xd1, 0x9a, 0x46, 0x87, 0x90, 0xda, 0xdc, 0xde, 0xc3, 0xb3, 0xf2, 0xe8, 0x6f, 0x4a, 0xe6, 0xe8, 0x9d, 0x98, 0x36, 0x20, 0x03, 0x47, 0x15, 0x3f, 0x64, 0x59,
- /* (2^ 45)P */ 0xd4, 0x71, 0x49, 0x0a, 0x67, 0x97, 0xaa, 0x3f, 0xf4, 0x1b, 0x3a, 0x6e, 0x5e, 0x17, 0xcc, 0x0a, 0x8f, 0x81, 0x6a, 0x41, 0x38, 0x77, 0x40, 0x8a, 0x11, 0x42, 0x62, 0xd2, 0x50, 0x32, 0x79, 0x78, 0x28, 0xc2, 0x2e, 0x10, 0x01, 0x94, 0x30, 0x4f, 0x7f, 0x18, 0x17, 0x56, 0x85, 0x4e, 0xad, 0xf7, 0xcb, 0x87, 0x3c, 0x3f, 0x50, 0x2c, 0xc0, 0xba,
- /* (2^ 46)P */ 0xbc, 0x30, 0x8e, 0x65, 0x8e, 0x57, 0x5b, 0x38, 0x7a, 0xd4, 0x95, 0x52, 0x7a, 0x32, 0x59, 0x69, 0xcd, 0x9d, 0x47, 0x34, 0x5b, 0x55, 0xa5, 0x24, 0x60, 0xdd, 0xc0, 0xc1, 0x62, 0x73, 0x44, 0xae, 0x4c, 0x9c, 0x65, 0x55, 0x1b, 0x9d, 0x8a, 0x29, 0xb0, 0x1a, 0x52, 0xa8, 0xf1, 0xe6, 0x9a, 0xb3, 0xf6, 0xa3, 0xc9, 0x0a, 0x70, 0x7d, 0x0f, 0xee,
- /* (2^ 47)P */ 0x77, 0xd3, 0xe5, 0x8e, 0xfa, 0x00, 0xeb, 0x1b, 0x7f, 0xdc, 0x68, 0x3f, 0x92, 0xbd, 0xb7, 0x0b, 0xb7, 0xb5, 0x24, 0xdf, 0xc5, 0x67, 0x53, 0xd4, 0x36, 0x79, 0xc4, 0x7b, 0x57, 0xbc, 0x99, 0x97, 0x60, 0xef, 0xe4, 0x01, 0xa1, 0xa7, 0xaa, 0x12, 0x36, 0x29, 0xb1, 0x03, 0xc2, 0x83, 0x1c, 0x2b, 0x83, 0xef, 0x2e, 0x2c, 0x23, 0x92, 0xfd, 0xd1,
- /* (2^ 48)P */ 0x94, 0xef, 0x03, 0x59, 0xfa, 0x8a, 0x18, 0x76, 0xee, 0x58, 0x08, 0x4d, 0x44, 0xce, 0xf1, 0x52, 0x33, 0x49, 0xf6, 0x69, 0x71, 0xe3, 0xa9, 0xbc, 0x86, 0xe3, 0x43, 0xde, 0x33, 0x7b, 0x90, 0x8b, 0x3e, 0x7d, 0xd5, 0x4a, 0xf0, 0x23, 0x99, 0xa6, 0xea, 0x5f, 0x08, 0xe5, 0xb9, 0x49, 0x8b, 0x0d, 0x6a, 0x21, 0xab, 0x07, 0x62, 0xcd, 0xc4, 0xbe,
- /* (2^ 49)P */ 0x61, 0xbf, 0x70, 0x14, 0xfa, 0x4e, 0x9e, 0x7c, 0x0c, 0xf8, 0xb2, 0x48, 0x71, 0x62, 0x83, 0xd6, 0xd1, 0xdc, 0x9c, 0x29, 0x66, 0xb1, 0x34, 0x9c, 0x8d, 0xe6, 0x88, 0xaf, 0xbe, 0xdc, 0x4d, 0xeb, 0xb0, 0xe7, 0x28, 0xae, 0xb2, 0x05, 0x56, 0xc6, 0x0e, 0x10, 0x26, 0xab, 0x2c, 0x59, 0x72, 0x03, 0x66, 0xfe, 0x8f, 0x2c, 0x51, 0x2d, 0xdc, 0xae,
- /* (2^ 50)P */ 0xdc, 0x63, 0xf1, 0x8b, 0x5c, 0x65, 0x0b, 0xf1, 0xa6, 0x22, 0xe2, 0xd9, 0xdb, 0x49, 0xb1, 0x3c, 0x47, 0xc2, 0xfe, 0xac, 0x86, 0x07, 0x52, 0xec, 0xb0, 0x08, 0x69, 0xfb, 0xd1, 0x06, 0xdc, 0x48, 0x5c, 0x3d, 0xb2, 0x4d, 0xb8, 0x1a, 0x4e, 0xda, 0xb9, 0xc1, 0x2b, 0xab, 0x4b, 0x62, 0x81, 0x21, 0x9a, 0xfc, 0x3d, 0x39, 0x83, 0x11, 0x36, 0xeb,
- /* (2^ 51)P */ 0x94, 0xf3, 0x17, 0xef, 0xf9, 0x60, 0x54, 0xc3, 0xd7, 0x27, 0x35, 0xc5, 0x98, 0x5e, 0xf6, 0x63, 0x6c, 0xa0, 0x4a, 0xd3, 0xa3, 0x98, 0xd9, 0x42, 0xe3, 0xf1, 0xf8, 0x81, 0x96, 0xa9, 0xea, 0x6d, 0x4b, 0x8e, 0x33, 0xca, 0x94, 0x0d, 0xa0, 0xf7, 0xbb, 0x64, 0xa3, 0x36, 0x6f, 0xdc, 0x5a, 0x94, 0x42, 0xca, 0x06, 0xb2, 0x2b, 0x9a, 0x9f, 0x71,
- /* (2^ 52)P */ 0xec, 0xdb, 0xa6, 0x1f, 0xdf, 0x15, 0x36, 0xa3, 0xda, 0x8a, 0x7a, 0xb6, 0xa7, 0xe3, 0xaf, 0x52, 0xe0, 0x8d, 0xe8, 0xf2, 0x44, 0x20, 0xeb, 0xa1, 0x20, 0xc4, 0x65, 0x3c, 0x7c, 0x6c, 0x49, 0xed, 0x2f, 0x66, 0x23, 0x68, 0x61, 0x91, 0x40, 0x9f, 0x50, 0x19, 0xd1, 0x84, 0xa7, 0xe2, 0xed, 0x34, 0x37, 0xe3, 0xe4, 0x11, 0x7f, 0x87, 0x55, 0x0f,
- /* (2^ 53)P */ 0xb3, 0xa1, 0x0f, 0xb0, 0x48, 0xc0, 0x4d, 0x96, 0xa7, 0xcf, 0x5a, 0x81, 0xb8, 0x4a, 0x46, 0xef, 0x0a, 0xd3, 0x40, 0x7e, 0x02, 0xe3, 0x63, 0xaa, 0x50, 0xd1, 0x2a, 0x37, 0x22, 0x4a, 0x7f, 0x4f, 0xb6, 0xf9, 0x01, 0x82, 0x78, 0x3d, 0x93, 0x14, 0x11, 0x8a, 0x90, 0x60, 0xcd, 0x45, 0x4e, 0x7b, 0x42, 0xb9, 0x3e, 0x6e, 0x68, 0x1f, 0x36, 0x41,
- /* (2^ 54)P */ 0x13, 0x73, 0x0e, 0x4f, 0x79, 0x93, 0x9e, 0x29, 0x70, 0x7b, 0x4a, 0x59, 0x1a, 0x9a, 0xf4, 0x55, 0x08, 0xf0, 0xdb, 0x17, 0x58, 0xec, 0x64, 0xad, 0x7f, 0x29, 0xeb, 0x3f, 0x85, 0x4e, 0x60, 0x28, 0x98, 0x1f, 0x73, 0x4e, 0xe6, 0xa8, 0xab, 0xd5, 0xd6, 0xfc, 0xa1, 0x36, 0x6d, 0x15, 0xc6, 0x13, 0x83, 0xa0, 0xc2, 0x6e, 0xd9, 0xdb, 0xc9, 0xcc,
- /* (2^ 55)P */ 0xff, 0xd8, 0x52, 0xa3, 0xdc, 0x99, 0xcf, 0x3e, 0x19, 0xb3, 0x68, 0xd0, 0xb5, 0x0d, 0xb8, 0xee, 0x3f, 0xef, 0x6e, 0xc0, 0x38, 0x28, 0x44, 0x92, 0x78, 0x91, 0x1a, 0x08, 0x78, 0x6c, 0x65, 0x24, 0xf3, 0xa2, 0x3d, 0xf2, 0xe5, 0x79, 0x62, 0x69, 0x29, 0xf4, 0x22, 0xc5, 0xdb, 0x6a, 0xae, 0xf4, 0x44, 0xa3, 0x6f, 0xc7, 0x86, 0xab, 0xef, 0xef,
- /* (2^ 56)P */ 0xbf, 0x54, 0x9a, 0x09, 0x5d, 0x17, 0xd0, 0xde, 0xfb, 0xf5, 0xca, 0xff, 0x13, 0x20, 0x88, 0x82, 0x3a, 0xe2, 0xd0, 0x3b, 0xfb, 0x05, 0x76, 0xd1, 0xc0, 0x02, 0x71, 0x3b, 0x94, 0xe8, 0xc9, 0x84, 0xcf, 0xa4, 0xe9, 0x28, 0x7b, 0xf5, 0x09, 0xc3, 0x2b, 0x22, 0x40, 0xf1, 0x68, 0x24, 0x24, 0x7d, 0x9f, 0x6e, 0xcd, 0xfe, 0xb0, 0x19, 0x61, 0xf5,
- /* (2^ 57)P */ 0xe8, 0x63, 0x51, 0xb3, 0x95, 0x6b, 0x7b, 0x74, 0x92, 0x52, 0x45, 0xa4, 0xed, 0xea, 0x0e, 0x0d, 0x2b, 0x01, 0x1e, 0x2c, 0xbc, 0x91, 0x06, 0x69, 0xdb, 0x1f, 0xb5, 0x77, 0x1d, 0x56, 0xf5, 0xb4, 0x02, 0x80, 0x49, 0x56, 0x12, 0xce, 0x86, 0x05, 0xc9, 0xd9, 0xae, 0xf3, 0x6d, 0xe6, 0x3f, 0x40, 0x52, 0xe9, 0x49, 0x2b, 0x31, 0x06, 0x86, 0x14,
- /* (2^ 58)P */ 0xf5, 0x09, 0x3b, 0xd2, 0xff, 0xdf, 0x11, 0xa5, 0x1c, 0x99, 0xe8, 0x1b, 0xa4, 0x2c, 0x7d, 0x8e, 0xc8, 0xf7, 0x03, 0x46, 0xfa, 0xb6, 0xde, 0x73, 0x91, 0x7e, 0x5a, 0x7a, 0xd7, 0x9a, 0x5b, 0x80, 0x24, 0x62, 0x5e, 0x92, 0xf1, 0xa3, 0x45, 0xa3, 0x43, 0x92, 0x8a, 0x2a, 0x5b, 0x0c, 0xb4, 0xc8, 0xad, 0x1c, 0xb6, 0x6c, 0x5e, 0x81, 0x18, 0x91,
- /* (2^ 59)P */ 0x96, 0xb3, 0xca, 0x2b, 0xe3, 0x7a, 0x59, 0x72, 0x17, 0x74, 0x29, 0x21, 0xe7, 0x78, 0x07, 0xad, 0xda, 0xb6, 0xcd, 0xf9, 0x27, 0x4d, 0xc8, 0xf2, 0x98, 0x22, 0xca, 0xf2, 0x33, 0x74, 0x7a, 0xdd, 0x1e, 0x71, 0xec, 0xe3, 0x3f, 0xe2, 0xa2, 0xd2, 0x38, 0x75, 0xb0, 0xd0, 0x0a, 0xcf, 0x7d, 0x36, 0xdc, 0x49, 0x38, 0x25, 0x34, 0x4f, 0x20, 0x9a,
- /* (2^ 60)P */ 0x2b, 0x6e, 0x04, 0x0d, 0x4f, 0x3d, 0x3b, 0x24, 0xf6, 0x4e, 0x5e, 0x0a, 0xbd, 0x48, 0x96, 0xba, 0x81, 0x8f, 0x39, 0x82, 0x13, 0xe6, 0x72, 0xf3, 0x0f, 0xb6, 0x94, 0xf4, 0xc5, 0x90, 0x74, 0x91, 0xa8, 0xf2, 0xc9, 0xca, 0x9a, 0x4d, 0x98, 0xf2, 0xdf, 0x52, 0x4e, 0x97, 0x2f, 0xeb, 0x84, 0xd3, 0xaf, 0xc2, 0xcc, 0xfb, 0x4c, 0x26, 0x4b, 0xe4,
- /* (2^ 61)P */ 0x12, 0x9e, 0xfb, 0x9d, 0x78, 0x79, 0x99, 0xdd, 0xb3, 0x0b, 0x2e, 0x56, 0x41, 0x8e, 0x3f, 0x39, 0xb8, 0x97, 0x89, 0x53, 0x9b, 0x8a, 0x3c, 0x40, 0x9d, 0xa4, 0x6c, 0x2e, 0x31, 0x71, 0xc6, 0x0a, 0x41, 0xd4, 0x95, 0x06, 0x5e, 0xc1, 0xab, 0xc2, 0x14, 0xc4, 0xc7, 0x15, 0x08, 0x3a, 0xad, 0x7a, 0xb4, 0x62, 0xa3, 0x0c, 0x90, 0xf4, 0x47, 0x08,
- /* (2^ 62)P */ 0x7f, 0xec, 0x09, 0x82, 0xf5, 0x94, 0x09, 0x93, 0x32, 0xd3, 0xdc, 0x56, 0x80, 0x7b, 0x5b, 0x22, 0x80, 0x6a, 0x96, 0x72, 0xb1, 0xc2, 0xd9, 0xa1, 0x8b, 0x66, 0x42, 0x16, 0xe2, 0x07, 0xb3, 0x2d, 0xf1, 0x75, 0x35, 0x72, 0xc7, 0x98, 0xbe, 0x63, 0x3b, 0x20, 0x75, 0x05, 0xc1, 0x3e, 0x31, 0x5a, 0xf7, 0xaa, 0xae, 0x4b, 0xdb, 0x1d, 0xd0, 0x74,
- /* (2^ 63)P */ 0x36, 0x5c, 0x74, 0xe6, 0x5d, 0x59, 0x3f, 0x15, 0x4b, 0x4d, 0x4e, 0x67, 0x41, 0xfe, 0x98, 0x1f, 0x49, 0x76, 0x91, 0x0f, 0x9b, 0xf4, 0xaf, 0x86, 0xaf, 0x66, 0x19, 0xed, 0x46, 0xf1, 0x05, 0x9a, 0xcc, 0xd1, 0x14, 0x1f, 0x82, 0x12, 0x8e, 0xe6, 0xf4, 0xc3, 0x42, 0x5c, 0x4e, 0x33, 0x93, 0xbe, 0x30, 0xe7, 0x64, 0xa9, 0x35, 0x00, 0x4d, 0xf9,
- /* (2^ 64)P */ 0x1f, 0xc1, 0x1e, 0xb7, 0xe3, 0x7c, 0xfa, 0xa3, 0x6b, 0x76, 0xaf, 0x9c, 0x05, 0x85, 0x4a, 0xa9, 0xfb, 0xe3, 0x7e, 0xf2, 0x49, 0x56, 0xdc, 0x2f, 0x57, 0x10, 0xba, 0x37, 0xb2, 0x62, 0xf5, 0x6b, 0xe5, 0x8f, 0x0a, 0x87, 0xd1, 0x6a, 0xcb, 0x9d, 0x07, 0xd0, 0xf6, 0x38, 0x99, 0x2c, 0x61, 0x4a, 0x4e, 0xd8, 0xd2, 0x88, 0x29, 0x99, 0x11, 0x95,
- /* (2^ 65)P */ 0x6f, 0xdc, 0xd5, 0xd6, 0xd6, 0xa7, 0x4c, 0x46, 0x93, 0x65, 0x62, 0x23, 0x95, 0x32, 0x9c, 0xde, 0x40, 0x41, 0x68, 0x2c, 0x18, 0x4e, 0x5a, 0x8c, 0xc0, 0xc5, 0xc5, 0xea, 0x5c, 0x45, 0x0f, 0x60, 0x78, 0x39, 0xb6, 0x36, 0x23, 0x12, 0xbc, 0x21, 0x9a, 0xf8, 0x91, 0xac, 0xc4, 0x70, 0xdf, 0x85, 0x8e, 0x3c, 0xec, 0x22, 0x04, 0x98, 0xa8, 0xaa,
- /* (2^ 66)P */ 0xcc, 0x52, 0x10, 0x5b, 0x4b, 0x6c, 0xc5, 0xfa, 0x3e, 0xd4, 0xf8, 0x1c, 0x04, 0x14, 0x48, 0x33, 0xd9, 0xfc, 0x5f, 0xb0, 0xa5, 0x48, 0x8c, 0x45, 0x8a, 0xee, 0x3e, 0xa7, 0xc1, 0x2e, 0x34, 0xca, 0xf6, 0xc9, 0xeb, 0x10, 0xbb, 0xe1, 0x59, 0x84, 0x25, 0xe8, 0x81, 0x70, 0xc0, 0x09, 0x42, 0xa7, 0x3b, 0x0d, 0x33, 0x00, 0xb5, 0x77, 0xbe, 0x25,
- /* (2^ 67)P */ 0xcd, 0x1f, 0xbc, 0x7d, 0xef, 0xe5, 0xca, 0x91, 0xaf, 0xa9, 0x59, 0x6a, 0x09, 0xca, 0xd6, 0x1b, 0x3d, 0x55, 0xde, 0xa2, 0x6a, 0x80, 0xd6, 0x95, 0x47, 0xe4, 0x5f, 0x68, 0x54, 0x08, 0xdf, 0x29, 0xba, 0x2a, 0x02, 0x84, 0xe8, 0xe9, 0x00, 0x77, 0x99, 0x36, 0x03, 0xf6, 0x4a, 0x3e, 0x21, 0x81, 0x7d, 0xb8, 0xa4, 0x8a, 0xa2, 0x05, 0xef, 0xbc,
- /* (2^ 68)P */ 0x7c, 0x59, 0x5f, 0x66, 0xd9, 0xb7, 0x83, 0x43, 0x8a, 0xa1, 0x8d, 0x51, 0x70, 0xba, 0xf2, 0x9b, 0x95, 0xc0, 0x4b, 0x4c, 0xa0, 0x14, 0xd3, 0xa4, 0x5d, 0x4a, 0x37, 0x36, 0x97, 0x31, 0x1e, 0x12, 0xe7, 0xbb, 0x08, 0x67, 0xa5, 0x23, 0xd7, 0xfb, 0x97, 0xd8, 0x6a, 0x03, 0xb1, 0xf8, 0x7f, 0xda, 0x58, 0xd9, 0x3f, 0x73, 0x4a, 0x53, 0xe1, 0x7b,
- /* (2^ 69)P */ 0x55, 0x83, 0x98, 0x78, 0x6c, 0x56, 0x5e, 0xed, 0xf7, 0x23, 0x3e, 0x4c, 0x7d, 0x09, 0x2d, 0x09, 0x9c, 0x58, 0x8b, 0x32, 0xca, 0xfe, 0xbf, 0x47, 0x03, 0xeb, 0x4d, 0xe7, 0xeb, 0x9c, 0x83, 0x05, 0x68, 0xaa, 0x80, 0x89, 0x44, 0xf9, 0xd4, 0xdc, 0xdb, 0xb1, 0xdb, 0x77, 0xac, 0xf9, 0x2a, 0xae, 0x35, 0xac, 0x74, 0xb5, 0x95, 0x62, 0x18, 0x85,
- /* (2^ 70)P */ 0xab, 0x82, 0x7e, 0x10, 0xd7, 0xe6, 0x57, 0xd1, 0x66, 0x12, 0x31, 0x9c, 0x9c, 0xa6, 0x27, 0x59, 0x71, 0x2e, 0xeb, 0xa0, 0x68, 0xc5, 0x87, 0x51, 0xf4, 0xca, 0x3f, 0x98, 0x56, 0xb0, 0x89, 0xb1, 0xc7, 0x7b, 0x46, 0xb3, 0xae, 0x36, 0xf2, 0xee, 0x15, 0x1a, 0x60, 0xf4, 0x50, 0x76, 0x4f, 0xc4, 0x53, 0x0d, 0x36, 0x4d, 0x31, 0xb1, 0x20, 0x51,
- /* (2^ 71)P */ 0xf7, 0x1d, 0x8c, 0x1b, 0x5e, 0xe5, 0x02, 0x6f, 0xc5, 0xa5, 0xe0, 0x5f, 0xc6, 0xb6, 0x63, 0x43, 0xaf, 0x3c, 0x19, 0x6c, 0xf4, 0xaf, 0xa4, 0x33, 0xb1, 0x0a, 0x37, 0x3d, 0xd9, 0x4d, 0xe2, 0x29, 0x24, 0x26, 0x94, 0x7c, 0x02, 0xe4, 0xe2, 0xf2, 0xbe, 0xbd, 0xac, 0x1b, 0x48, 0xb8, 0xdd, 0xe9, 0x0d, 0x9a, 0x50, 0x1a, 0x98, 0x71, 0x6e, 0xdc,
- /* (2^ 72)P */ 0x9f, 0x40, 0xb1, 0xb3, 0x66, 0x28, 0x6c, 0xfe, 0xa6, 0x7d, 0xf8, 0x3e, 0xb8, 0xf3, 0xde, 0x52, 0x76, 0x52, 0xa3, 0x92, 0x98, 0x23, 0xab, 0x4f, 0x88, 0x97, 0xfc, 0x22, 0xe1, 0x6b, 0x67, 0xcd, 0x13, 0x95, 0xda, 0x65, 0xdd, 0x3b, 0x67, 0x3f, 0x5f, 0x4c, 0xf2, 0x8a, 0xad, 0x98, 0xa7, 0x94, 0x24, 0x45, 0x87, 0x11, 0x7c, 0x75, 0x79, 0x85,
- /* (2^ 73)P */ 0x70, 0xbf, 0xf9, 0x3b, 0xa9, 0x44, 0x57, 0x72, 0x96, 0xc9, 0xa4, 0x98, 0x65, 0xbf, 0x87, 0xb3, 0x3a, 0x39, 0x12, 0xde, 0xe5, 0x39, 0x01, 0x4f, 0xf7, 0xc0, 0x71, 0x52, 0x36, 0x85, 0xb3, 0x18, 0xf8, 0x14, 0xc0, 0x6d, 0xae, 0x9e, 0x4f, 0xb0, 0x72, 0x87, 0xac, 0x5c, 0xd1, 0x6c, 0x41, 0x6c, 0x90, 0x9d, 0x22, 0x81, 0xe4, 0x2b, 0xea, 0xe5,
- /* (2^ 74)P */ 0xfc, 0xea, 0x1a, 0x65, 0xd9, 0x49, 0x6a, 0x39, 0xb5, 0x96, 0x72, 0x7b, 0x32, 0xf1, 0xd0, 0xe9, 0x45, 0xd9, 0x31, 0x55, 0xc7, 0x34, 0xe9, 0x5a, 0xec, 0x73, 0x0b, 0x03, 0xc4, 0xb3, 0xe6, 0xc9, 0x5e, 0x0a, 0x17, 0xfe, 0x53, 0x66, 0x7f, 0x21, 0x18, 0x74, 0x54, 0x1b, 0xc9, 0x49, 0x16, 0xd2, 0x48, 0xaf, 0x5b, 0x47, 0x7b, 0xeb, 0xaa, 0xc9,
- /* (2^ 75)P */ 0x47, 0x04, 0xf5, 0x5a, 0x87, 0x77, 0x9e, 0x21, 0x34, 0x4e, 0x83, 0x88, 0xaf, 0x02, 0x1d, 0xb0, 0x5a, 0x1d, 0x1d, 0x7d, 0x8d, 0x2c, 0xd3, 0x8d, 0x63, 0xa9, 0x45, 0xfb, 0x15, 0x6d, 0x86, 0x45, 0xcd, 0x38, 0x0e, 0xf7, 0x37, 0x79, 0xed, 0x6d, 0x5a, 0xbc, 0x32, 0xcc, 0x66, 0xf1, 0x3a, 0xb2, 0x87, 0x6f, 0x70, 0x71, 0xd9, 0xf2, 0xfa, 0x7b,
- /* (2^ 76)P */ 0x68, 0x07, 0xdc, 0x61, 0x40, 0xe4, 0xec, 0x32, 0xc8, 0xbe, 0x66, 0x30, 0x54, 0x80, 0xfd, 0x13, 0x7a, 0xef, 0xae, 0xed, 0x2e, 0x00, 0x6d, 0x3f, 0xbd, 0xfc, 0x91, 0x24, 0x53, 0x7f, 0x63, 0x9d, 0x2e, 0xe3, 0x76, 0xe0, 0xf3, 0xe1, 0x8f, 0x7a, 0xc4, 0x77, 0x0c, 0x91, 0xc0, 0xc2, 0x18, 0x6b, 0x04, 0xad, 0xb6, 0x70, 0x9a, 0x64, 0xc5, 0x82,
- /* (2^ 77)P */ 0x7f, 0xea, 0x13, 0xd8, 0x9e, 0xfc, 0x5b, 0x06, 0xb5, 0x4f, 0xda, 0x38, 0xe0, 0x9c, 0xd2, 0x3a, 0xc1, 0x1c, 0x62, 0x70, 0x7f, 0xc6, 0x24, 0x0a, 0x47, 0x04, 0x01, 0xc4, 0x55, 0x09, 0xd1, 0x7a, 0x07, 0xba, 0xa3, 0x80, 0x4f, 0xc1, 0x65, 0x36, 0x6d, 0xc0, 0x10, 0xcf, 0x94, 0xa9, 0xa2, 0x01, 0x44, 0xd1, 0xf9, 0x1c, 0x4c, 0xfb, 0xf8, 0x99,
- /* (2^ 78)P */ 0x6c, 0xb9, 0x6b, 0xee, 0x43, 0x5b, 0xb9, 0xbb, 0xee, 0x2e, 0x52, 0xc1, 0xc6, 0xb9, 0x61, 0xd2, 0x93, 0xa5, 0xaf, 0x52, 0xf4, 0xa4, 0x1a, 0x51, 0x61, 0xa7, 0xcb, 0x9e, 0xbb, 0x56, 0x65, 0xe2, 0xbf, 0x75, 0xb9, 0x9c, 0x50, 0x96, 0x60, 0x81, 0x74, 0x47, 0xc0, 0x04, 0x88, 0x71, 0x76, 0x39, 0x9a, 0xa7, 0xb1, 0x4e, 0x43, 0x15, 0xe0, 0xbb,
- /* (2^ 79)P */ 0xbb, 0xce, 0xe2, 0xbb, 0xf9, 0x17, 0x0f, 0x82, 0x40, 0xad, 0x73, 0xe3, 0xeb, 0x3b, 0x06, 0x1a, 0xcf, 0x8e, 0x6e, 0x28, 0xb8, 0x26, 0xd9, 0x5b, 0xb7, 0xb3, 0xcf, 0xb4, 0x6a, 0x1c, 0xbf, 0x7f, 0xb8, 0xb5, 0x79, 0xcf, 0x45, 0x68, 0x7d, 0xc5, 0xeb, 0xf3, 0xbe, 0x39, 0x40, 0xfc, 0x07, 0x90, 0x7a, 0x62, 0xad, 0x86, 0x08, 0x71, 0x25, 0xe1,
- /* (2^ 80)P */ 0x9b, 0x46, 0xac, 0xef, 0xc1, 0x4e, 0xa1, 0x97, 0x95, 0x76, 0xf9, 0x1b, 0xc2, 0xb2, 0x6a, 0x41, 0xea, 0x80, 0x3d, 0xe9, 0x08, 0x52, 0x5a, 0xe3, 0xf2, 0x08, 0xc5, 0xea, 0x39, 0x3f, 0x44, 0x71, 0x4d, 0xea, 0x0d, 0x05, 0x23, 0xe4, 0x2e, 0x3c, 0x89, 0xfe, 0x12, 0x8a, 0x95, 0x42, 0x0a, 0x68, 0xea, 0x5a, 0x28, 0x06, 0x9e, 0xe3, 0x5f, 0xe0,
- /* (2^ 81)P */ 0x00, 0x61, 0x6c, 0x98, 0x9b, 0xe7, 0xb9, 0x06, 0x1c, 0xc5, 0x1b, 0xed, 0xbe, 0xc8, 0xb3, 0xea, 0x87, 0xf0, 0xc4, 0x24, 0x7d, 0xbb, 0x5d, 0xa4, 0x1d, 0x7a, 0x16, 0x00, 0x55, 0x94, 0x67, 0x78, 0xbd, 0x58, 0x02, 0x82, 0x90, 0x53, 0x76, 0xd4, 0x72, 0x99, 0x51, 0x6f, 0x7b, 0xcf, 0x80, 0x30, 0x31, 0x3b, 0x01, 0xc7, 0xc1, 0xef, 0xe6, 0x42,
- /* (2^ 82)P */ 0xe2, 0x35, 0xaf, 0x4b, 0x79, 0xc6, 0x12, 0x24, 0x99, 0xc0, 0x68, 0xb0, 0x43, 0x3e, 0xe5, 0xef, 0xe2, 0x29, 0xea, 0xb8, 0xb3, 0xbc, 0x6a, 0x53, 0x2c, 0x69, 0x18, 0x5a, 0xf9, 0x15, 0xae, 0x66, 0x58, 0x18, 0xd3, 0x2d, 0x4b, 0x00, 0xfd, 0x84, 0xab, 0x4f, 0xae, 0x70, 0x6b, 0x9e, 0x9a, 0xdf, 0x83, 0xfd, 0x2e, 0x3c, 0xcf, 0xf8, 0x88, 0x5b,
- /* (2^ 83)P */ 0xa4, 0x90, 0x31, 0x85, 0x13, 0xcd, 0xdf, 0x64, 0xc9, 0xa1, 0x0b, 0xe7, 0xb6, 0x73, 0x8a, 0x1b, 0x22, 0x78, 0x4c, 0xd4, 0xae, 0x48, 0x18, 0x00, 0x00, 0xa8, 0x9f, 0x06, 0xf9, 0xfb, 0x2d, 0xc3, 0xb1, 0x2a, 0xbc, 0x13, 0x99, 0x57, 0xaf, 0xf0, 0x8d, 0x61, 0x54, 0x29, 0xd5, 0xf2, 0x72, 0x00, 0x96, 0xd1, 0x85, 0x12, 0x8a, 0xf0, 0x23, 0xfb,
- /* (2^ 84)P */ 0x69, 0xc7, 0xdb, 0xd9, 0x92, 0x75, 0x08, 0x9b, 0xeb, 0xa5, 0x93, 0xd1, 0x1a, 0xf4, 0xf5, 0xaf, 0xe6, 0xc4, 0x4a, 0x0d, 0x35, 0x26, 0x39, 0x9d, 0xd3, 0x17, 0x3e, 0xae, 0x2d, 0xbf, 0x73, 0x9f, 0xb7, 0x74, 0x91, 0xd1, 0xd8, 0x5c, 0x14, 0xf9, 0x75, 0xdf, 0xeb, 0xc2, 0x22, 0xd8, 0x14, 0x8d, 0x86, 0x23, 0x4d, 0xd1, 0x2d, 0xdb, 0x6b, 0x42,
- /* (2^ 85)P */ 0x8c, 0xda, 0xc6, 0xf8, 0x71, 0xba, 0x2b, 0x06, 0x78, 0xae, 0xcc, 0x3a, 0xe3, 0xe3, 0xa1, 0x8b, 0xe2, 0x34, 0x6d, 0x28, 0x9e, 0x46, 0x13, 0x4d, 0x9e, 0xa6, 0x73, 0x49, 0x65, 0x79, 0x88, 0xb9, 0x3a, 0xd1, 0x6d, 0x2f, 0x48, 0x2b, 0x0a, 0x7f, 0x58, 0x20, 0x37, 0xf4, 0x0e, 0xbb, 0x4a, 0x95, 0x58, 0x0c, 0x88, 0x30, 0xc4, 0x74, 0xdd, 0xfd,
- /* (2^ 86)P */ 0x6d, 0x13, 0x4e, 0x89, 0x2d, 0xa9, 0xa3, 0xed, 0x09, 0xe3, 0x0e, 0x71, 0x3e, 0x4a, 0xab, 0x90, 0xde, 0x03, 0xeb, 0x56, 0x46, 0x60, 0x06, 0xf5, 0x71, 0xe5, 0xee, 0x9b, 0xef, 0xff, 0xc4, 0x2c, 0x9f, 0x37, 0x48, 0x45, 0x94, 0x12, 0x41, 0x81, 0x15, 0x70, 0x91, 0x99, 0x5e, 0x56, 0x6b, 0xf4, 0xa6, 0xc9, 0xf5, 0x69, 0x9d, 0x78, 0x37, 0x57,
- /* (2^ 87)P */ 0xf3, 0x51, 0x57, 0x7e, 0x43, 0x6f, 0xc6, 0x67, 0x59, 0x0c, 0xcf, 0x94, 0xe6, 0x3d, 0xb5, 0x07, 0xc9, 0x77, 0x48, 0xc9, 0x68, 0x0d, 0x98, 0x36, 0x62, 0x35, 0x38, 0x1c, 0xf5, 0xc5, 0xec, 0x66, 0x78, 0xfe, 0x47, 0xab, 0x26, 0xd6, 0x44, 0xb6, 0x06, 0x0f, 0x89, 0xe3, 0x19, 0x40, 0x1a, 0xe7, 0xd8, 0x65, 0x55, 0xf7, 0x1a, 0xfc, 0xa3, 0x0e,
- /* (2^ 88)P */ 0x0e, 0x30, 0xa6, 0xb7, 0x58, 0x60, 0x62, 0x2a, 0x6c, 0x13, 0xa8, 0x14, 0x9b, 0xb8, 0xf2, 0x70, 0xd8, 0xb1, 0x71, 0x88, 0x8c, 0x18, 0x31, 0x25, 0x93, 0x90, 0xb4, 0xc7, 0x49, 0xd8, 0xd4, 0xdb, 0x1e, 0x1e, 0x7f, 0xaa, 0xba, 0xc9, 0xf2, 0x5d, 0xa9, 0x3a, 0x43, 0xb4, 0x5c, 0xee, 0x7b, 0xc7, 0x97, 0xb7, 0x66, 0xd7, 0x23, 0xd9, 0x22, 0x59,
- /* (2^ 89)P */ 0x28, 0x19, 0xa6, 0xf9, 0x89, 0x20, 0x78, 0xd4, 0x6d, 0xcb, 0x79, 0x8f, 0x61, 0x6f, 0xb2, 0x5c, 0x4f, 0xa6, 0x54, 0x84, 0x95, 0x24, 0x36, 0x64, 0xcb, 0x39, 0xe7, 0x8f, 0x97, 0x9c, 0x5c, 0x3c, 0xfb, 0x51, 0x11, 0x01, 0x17, 0xdb, 0xc9, 0x9b, 0x51, 0x03, 0x9a, 0xe9, 0xe5, 0x24, 0x1e, 0xf5, 0xda, 0xe0, 0x48, 0x02, 0x23, 0xd0, 0x2c, 0x81,
- /* (2^ 90)P */ 0x42, 0x1b, 0xe4, 0x91, 0x85, 0x2a, 0x0c, 0xd2, 0x28, 0x66, 0x57, 0x9e, 0x33, 0x8d, 0x25, 0x71, 0x10, 0x65, 0x76, 0xa2, 0x8c, 0x21, 0x86, 0x81, 0x15, 0xc2, 0x27, 0xeb, 0x54, 0x2d, 0x4f, 0x6c, 0xe6, 0xd6, 0x24, 0x9c, 0x1a, 0x12, 0xb8, 0x81, 0xe2, 0x0a, 0xf3, 0xd3, 0xf0, 0xd3, 0xe1, 0x74, 0x1f, 0x9b, 0x11, 0x47, 0xd0, 0xcf, 0xb6, 0x54,
- /* (2^ 91)P */ 0x26, 0x45, 0xa2, 0x10, 0xd4, 0x2d, 0xae, 0xc0, 0xb0, 0xe8, 0x86, 0xb3, 0xc7, 0xea, 0x70, 0x87, 0x61, 0xb5, 0xa5, 0x55, 0xbe, 0x88, 0x1d, 0x7a, 0xd9, 0x6f, 0xeb, 0x83, 0xe2, 0x44, 0x7f, 0x98, 0x04, 0xd6, 0x50, 0x9d, 0xa7, 0x86, 0x66, 0x09, 0x63, 0xe1, 0xed, 0x72, 0xb1, 0xe4, 0x1d, 0x3a, 0xfd, 0x47, 0xce, 0x1c, 0xaa, 0x3b, 0x8f, 0x1b,
- /* (2^ 92)P */ 0xf4, 0x3c, 0x4a, 0xb6, 0xc2, 0x9c, 0xe0, 0x2e, 0xb7, 0x38, 0xea, 0x61, 0x35, 0x97, 0x10, 0x90, 0xae, 0x22, 0x48, 0xb3, 0xa9, 0xc6, 0x7a, 0xbb, 0x23, 0xf2, 0xf8, 0x1b, 0xa7, 0xa1, 0x79, 0xcc, 0xc4, 0xf8, 0x08, 0x76, 0x8a, 0x5a, 0x1c, 0x1b, 0xc5, 0x33, 0x91, 0xa9, 0xb8, 0xb9, 0xd3, 0xf8, 0x49, 0xcd, 0xe5, 0x82, 0x43, 0xf7, 0xca, 0x68,
- /* (2^ 93)P */ 0x38, 0xba, 0xae, 0x44, 0xfe, 0x57, 0x64, 0x56, 0x7c, 0x0e, 0x9c, 0xca, 0xff, 0xa9, 0x82, 0xbb, 0x38, 0x4a, 0xa7, 0xf7, 0x47, 0xab, 0xbe, 0x6d, 0x23, 0x0b, 0x8a, 0xed, 0xc2, 0xb9, 0x8f, 0xf1, 0xec, 0x91, 0x44, 0x73, 0x64, 0xba, 0xd5, 0x8f, 0x37, 0x38, 0x0d, 0xd5, 0xf8, 0x73, 0x57, 0xb6, 0xc2, 0x45, 0xdc, 0x25, 0xb2, 0xb6, 0xea, 0xd9,
- /* (2^ 94)P */ 0xbf, 0xe9, 0x1a, 0x40, 0x4d, 0xcc, 0xe6, 0x1d, 0x70, 0x1a, 0x65, 0xcc, 0x34, 0x2c, 0x37, 0x2c, 0x2d, 0x6b, 0x6d, 0xe5, 0x2f, 0x19, 0x9e, 0xe4, 0xe1, 0xaa, 0xd4, 0xab, 0x54, 0xf4, 0xa8, 0xe4, 0x69, 0x2d, 0x8e, 0x4d, 0xd7, 0xac, 0xb0, 0x5b, 0xfe, 0xe3, 0x26, 0x07, 0xc3, 0xf8, 0x1b, 0x43, 0xa8, 0x1d, 0x64, 0xa5, 0x25, 0x88, 0xbb, 0x77,
- /* (2^ 95)P */ 0x92, 0xcd, 0x6e, 0xa0, 0x79, 0x04, 0x18, 0xf4, 0x11, 0x58, 0x48, 0xb5, 0x3c, 0x7b, 0xd1, 0xcc, 0xd3, 0x14, 0x2c, 0xa0, 0xdd, 0x04, 0x44, 0x11, 0xb3, 0x6d, 0x2f, 0x0d, 0xf5, 0x2a, 0x75, 0x5d, 0x1d, 0xda, 0x86, 0x8d, 0x7d, 0x6b, 0x32, 0x68, 0xb6, 0x6c, 0x64, 0x9e, 0xde, 0x80, 0x88, 0xce, 0x08, 0xbf, 0x0b, 0xe5, 0x8e, 0x4f, 0x1d, 0xfb,
- /* (2^ 96)P */ 0xaf, 0xe8, 0x85, 0xbf, 0x7f, 0x37, 0x8d, 0x66, 0x7c, 0xd5, 0xd3, 0x96, 0xa5, 0x81, 0x67, 0x95, 0xff, 0x48, 0xde, 0xde, 0xd7, 0x7a, 0x46, 0x34, 0xb1, 0x13, 0x70, 0x29, 0xed, 0x87, 0x90, 0xb0, 0x40, 0x2c, 0xa6, 0x43, 0x6e, 0xb6, 0xbc, 0x48, 0x8a, 0xc1, 0xae, 0xb8, 0xd4, 0xe2, 0xc0, 0x32, 0xb2, 0xa6, 0x2a, 0x8f, 0xb5, 0x16, 0x9e, 0xc3,
- /* (2^ 97)P */ 0xff, 0x4d, 0xd2, 0xd6, 0x74, 0xef, 0x2c, 0x96, 0xc1, 0x11, 0xa8, 0xb8, 0xfe, 0x94, 0x87, 0x3e, 0xa0, 0xfb, 0x57, 0xa3, 0xfc, 0x7a, 0x7e, 0x6a, 0x59, 0x6c, 0x54, 0xbb, 0xbb, 0xa2, 0x25, 0x38, 0x1b, 0xdf, 0x5d, 0x7b, 0x94, 0x14, 0xde, 0x07, 0x6e, 0xd3, 0xab, 0x02, 0x26, 0x74, 0x16, 0x12, 0xdf, 0x2e, 0x2a, 0xa7, 0xb0, 0xe8, 0x29, 0xc0,
- /* (2^ 98)P */ 0x6a, 0x38, 0x0b, 0xd3, 0xba, 0x45, 0x23, 0xe0, 0x04, 0x3b, 0x83, 0x39, 0xc5, 0x11, 0xe6, 0xcf, 0x39, 0x0a, 0xb3, 0xb0, 0x3b, 0x27, 0x29, 0x63, 0x1c, 0xf3, 0x00, 0xe6, 0xd2, 0x55, 0x21, 0x1f, 0x84, 0x97, 0x9f, 0x01, 0x49, 0x43, 0x30, 0x5f, 0xe0, 0x1d, 0x24, 0xc4, 0x4e, 0xa0, 0x2b, 0x0b, 0x12, 0x55, 0xc3, 0x27, 0xae, 0x08, 0x83, 0x7c,
- /* (2^ 99)P */ 0x5d, 0x1a, 0xb7, 0xa9, 0xf5, 0xfd, 0xec, 0xad, 0xb7, 0x87, 0x02, 0x5f, 0x0d, 0x30, 0x4d, 0xe2, 0x65, 0x87, 0xa4, 0x41, 0x45, 0x1d, 0x67, 0xe0, 0x30, 0x5c, 0x13, 0x87, 0xf6, 0x2e, 0x08, 0xc1, 0xc7, 0x12, 0x45, 0xc8, 0x9b, 0xad, 0xb8, 0xd5, 0x57, 0xbb, 0x5c, 0x48, 0x3a, 0xe1, 0x91, 0x5e, 0xf6, 0x4d, 0x8a, 0x63, 0x75, 0x69, 0x0c, 0x01,
- /* (2^100)P */ 0x8f, 0x53, 0x2d, 0xa0, 0x71, 0x3d, 0xfc, 0x45, 0x10, 0x96, 0xcf, 0x56, 0xf9, 0xbb, 0x40, 0x3c, 0x86, 0x52, 0x76, 0xbe, 0x84, 0xf9, 0xa6, 0x9d, 0x3d, 0x27, 0xbe, 0xb4, 0x00, 0x49, 0x94, 0xf5, 0x5d, 0xe1, 0x62, 0x85, 0x66, 0xe5, 0xb8, 0x20, 0x2c, 0x09, 0x7d, 0x9d, 0x3d, 0x6e, 0x74, 0x39, 0xab, 0xad, 0xa0, 0x90, 0x97, 0x5f, 0xbb, 0xa7,
- /* (2^101)P */ 0xdb, 0x2d, 0x99, 0x08, 0x16, 0x46, 0x83, 0x7a, 0xa8, 0xea, 0x3d, 0x28, 0x5b, 0x49, 0xfc, 0xb9, 0x6d, 0x00, 0x9e, 0x54, 0x4f, 0x47, 0x64, 0x9b, 0x58, 0x4d, 0x07, 0x0c, 0x6f, 0x29, 0x56, 0x0b, 0x00, 0x14, 0x85, 0x96, 0x41, 0x04, 0xb9, 0x5c, 0xa4, 0xf6, 0x16, 0x73, 0x6a, 0xc7, 0x62, 0x0c, 0x65, 0x2f, 0x93, 0xbf, 0xf7, 0xb9, 0xb7, 0xf1,
- /* (2^102)P */ 0xeb, 0x6d, 0xb3, 0x46, 0x32, 0xd2, 0xcb, 0x08, 0x94, 0x14, 0xbf, 0x3f, 0xc5, 0xcb, 0x5f, 0x9f, 0x8a, 0x89, 0x0c, 0x1b, 0x45, 0xad, 0x4c, 0x50, 0xb4, 0xe1, 0xa0, 0x6b, 0x11, 0x92, 0xaf, 0x1f, 0x00, 0xcc, 0xe5, 0x13, 0x7e, 0xe4, 0x2e, 0xa0, 0x57, 0xf3, 0xa7, 0x84, 0x79, 0x7a, 0xc2, 0xb7, 0xb7, 0xfc, 0x5d, 0xa5, 0xa9, 0x64, 0xcc, 0xd8,
- /* (2^103)P */ 0xa9, 0xc4, 0x12, 0x8b, 0x34, 0x78, 0x3e, 0x38, 0xfd, 0x3f, 0x87, 0xfa, 0x88, 0x94, 0xd5, 0xd9, 0x7f, 0xeb, 0x58, 0xff, 0xb9, 0x45, 0xdb, 0xa1, 0xed, 0x22, 0x28, 0x1d, 0x00, 0x6d, 0x79, 0x85, 0x7a, 0x75, 0x5d, 0xf0, 0xb1, 0x9e, 0x47, 0x28, 0x8c, 0x62, 0xdf, 0xfb, 0x4c, 0x7b, 0xc5, 0x1a, 0x42, 0x95, 0xef, 0x9a, 0xb7, 0x27, 0x7e, 0xda,
- /* (2^104)P */ 0xca, 0xd5, 0xc0, 0x17, 0xa1, 0x66, 0x79, 0x9c, 0x2a, 0xb7, 0x0a, 0xfe, 0x62, 0xe4, 0x26, 0x78, 0x90, 0xa7, 0xcb, 0xb0, 0x4f, 0x6d, 0xf9, 0x8f, 0xf7, 0x7d, 0xac, 0xb8, 0x78, 0x1f, 0x41, 0xea, 0x97, 0x1e, 0x62, 0x97, 0x43, 0x80, 0x58, 0x80, 0xb6, 0x69, 0x7d, 0xee, 0x16, 0xd2, 0xa1, 0x81, 0xd7, 0xb1, 0x27, 0x03, 0x48, 0xda, 0xab, 0xec,
- /* (2^105)P */ 0x5b, 0xed, 0x40, 0x8e, 0x8c, 0xc1, 0x66, 0x90, 0x7f, 0x0c, 0xb2, 0xfc, 0xbd, 0x16, 0xac, 0x7d, 0x4c, 0x6a, 0xf9, 0xae, 0xe7, 0x4e, 0x11, 0x12, 0xe9, 0xbe, 0x17, 0x09, 0xc6, 0xc1, 0x5e, 0xb5, 0x7b, 0x50, 0x5c, 0x27, 0xfb, 0x80, 0xab, 0x01, 0xfa, 0x5b, 0x9b, 0x75, 0x16, 0x6e, 0xb2, 0x5c, 0x8c, 0x2f, 0xa5, 0x6a, 0x1a, 0x68, 0xa6, 0x90,
- /* (2^106)P */ 0x75, 0xfe, 0xb6, 0x96, 0x96, 0x87, 0x4c, 0x12, 0xa9, 0xd1, 0xd8, 0x03, 0xa3, 0xc1, 0x15, 0x96, 0xe8, 0xa0, 0x75, 0x82, 0xa0, 0x6d, 0xea, 0x54, 0xdc, 0x5f, 0x0d, 0x7e, 0xf6, 0x70, 0xb5, 0xdc, 0x7a, 0xf6, 0xc4, 0xd4, 0x21, 0x49, 0xf5, 0xd4, 0x14, 0x6d, 0x48, 0x1d, 0x7c, 0x99, 0x42, 0xdf, 0x78, 0x6b, 0x9d, 0xb9, 0x30, 0x3c, 0xd0, 0x29,
- /* (2^107)P */ 0x85, 0xd6, 0xd8, 0xf3, 0x91, 0x74, 0xdd, 0xbd, 0x72, 0x96, 0x10, 0xe4, 0x76, 0x02, 0x5a, 0x72, 0x67, 0xd3, 0x17, 0x72, 0x14, 0x9a, 0x20, 0x5b, 0x0f, 0x8d, 0xed, 0x6d, 0x4e, 0xe3, 0xd9, 0x82, 0xc2, 0x99, 0xee, 0x39, 0x61, 0x69, 0x8a, 0x24, 0x01, 0x92, 0x15, 0xe7, 0xfc, 0xf9, 0x4d, 0xac, 0xf1, 0x30, 0x49, 0x01, 0x0b, 0x6e, 0x0f, 0x20,
- /* (2^108)P */ 0xd8, 0x25, 0x94, 0x5e, 0x43, 0x29, 0xf5, 0xcc, 0xe8, 0xe3, 0x55, 0x41, 0x3c, 0x9f, 0x58, 0x5b, 0x00, 0xeb, 0xc5, 0xdf, 0xcf, 0xfb, 0xfd, 0x6e, 0x92, 0xec, 0x99, 0x30, 0xd6, 0x05, 0xdd, 0x80, 0x7a, 0x5d, 0x6d, 0x16, 0x85, 0xd8, 0x9d, 0x43, 0x65, 0xd8, 0x2c, 0x33, 0x2f, 0x5c, 0x41, 0xea, 0xb7, 0x95, 0x77, 0xf2, 0x9e, 0x59, 0x09, 0xe8,
- /* (2^109)P */ 0x00, 0xa0, 0x03, 0x80, 0xcd, 0x60, 0xe5, 0x17, 0xd4, 0x15, 0x99, 0xdd, 0x4f, 0xbf, 0x66, 0xb8, 0xc0, 0xf5, 0xf9, 0xfc, 0x6d, 0x42, 0x18, 0x34, 0x1c, 0x7d, 0x5b, 0xb5, 0x09, 0xd0, 0x99, 0x57, 0x81, 0x0b, 0x62, 0xb3, 0xa2, 0xf9, 0x0b, 0xae, 0x95, 0xb8, 0xc2, 0x3b, 0x0d, 0x5b, 0x00, 0xf1, 0xed, 0xbc, 0x05, 0x9d, 0x61, 0xbc, 0x73, 0x9d,
- /* (2^110)P */ 0xd4, 0xdb, 0x29, 0xe5, 0x85, 0xe9, 0xc6, 0x89, 0x2a, 0xa8, 0x54, 0xab, 0xb3, 0x7f, 0x88, 0xc0, 0x4d, 0xe0, 0xd1, 0x74, 0x6e, 0xa3, 0xa7, 0x39, 0xd5, 0xcc, 0xa1, 0x8a, 0xcb, 0x5b, 0x34, 0xad, 0x92, 0xb4, 0xd8, 0xd5, 0x17, 0xf6, 0x77, 0x18, 0x9e, 0xaf, 0x45, 0x3b, 0x03, 0xe2, 0xf8, 0x52, 0x60, 0xdc, 0x15, 0x20, 0x9e, 0xdf, 0xd8, 0x5d,
- /* (2^111)P */ 0x02, 0xc1, 0xac, 0x1a, 0x15, 0x8e, 0x6c, 0xf5, 0x1e, 0x1e, 0xba, 0x7e, 0xc2, 0xda, 0x7d, 0x02, 0xda, 0x43, 0xae, 0x04, 0x70, 0x28, 0x54, 0x78, 0x94, 0xf5, 0x4f, 0x07, 0x84, 0x8f, 0xed, 0xaa, 0xc0, 0xb8, 0xcd, 0x7f, 0x7e, 0x33, 0xa3, 0xbe, 0x21, 0x29, 0xc8, 0x56, 0x34, 0xc0, 0x76, 0x87, 0x8f, 0xc7, 0x73, 0x58, 0x90, 0x16, 0xfc, 0xd6,
- /* (2^112)P */ 0xb8, 0x3f, 0xe1, 0xdf, 0x3a, 0x91, 0x25, 0x0c, 0xf6, 0x47, 0xa8, 0x89, 0xc4, 0xc6, 0x61, 0xec, 0x86, 0x2c, 0xfd, 0xbe, 0xa4, 0x6f, 0xc2, 0xd4, 0x46, 0x19, 0x70, 0x5d, 0x09, 0x02, 0x86, 0xd3, 0x4b, 0xe9, 0x16, 0x7b, 0xf0, 0x0d, 0x6c, 0xff, 0x91, 0x05, 0xbf, 0x55, 0xb4, 0x00, 0x8d, 0xe5, 0x6d, 0x68, 0x20, 0x90, 0x12, 0xb5, 0x5c, 0x32,
- /* (2^113)P */ 0x80, 0x45, 0xc8, 0x51, 0x87, 0xba, 0x1c, 0x5c, 0xcf, 0x5f, 0x4b, 0x3c, 0x9e, 0x3b, 0x36, 0xd2, 0x26, 0xa2, 0x7f, 0xab, 0xb7, 0xbf, 0xda, 0x68, 0x23, 0x8f, 0xc3, 0xa0, 0xfd, 0xad, 0xf1, 0x56, 0x3b, 0xd0, 0x75, 0x2b, 0x44, 0x61, 0xd8, 0xf4, 0xf1, 0x05, 0x49, 0x53, 0x07, 0xee, 0x47, 0xef, 0xc0, 0x7c, 0x9d, 0xe4, 0x15, 0x88, 0xc5, 0x47,
- /* (2^114)P */ 0x2d, 0xb5, 0x09, 0x80, 0xb9, 0xd3, 0xd8, 0xfe, 0x4c, 0xd2, 0xa6, 0x6e, 0xd3, 0x75, 0xcf, 0xb0, 0x99, 0xcb, 0x50, 0x8d, 0xe9, 0x67, 0x9b, 0x20, 0xe8, 0x57, 0xd8, 0x14, 0x85, 0x73, 0x6a, 0x74, 0xe0, 0x99, 0xf0, 0x6b, 0x6e, 0x59, 0x30, 0x31, 0x33, 0x96, 0x5f, 0xa1, 0x0c, 0x1b, 0xf4, 0xca, 0x09, 0xe1, 0x9b, 0xb5, 0xcf, 0x6d, 0x0b, 0xeb,
- /* (2^115)P */ 0x1a, 0xde, 0x50, 0xa9, 0xac, 0x3e, 0x10, 0x43, 0x4f, 0x82, 0x4f, 0xc0, 0xfe, 0x3f, 0x33, 0xd2, 0x64, 0x86, 0x50, 0xa9, 0x51, 0x76, 0x5e, 0x50, 0x97, 0x6c, 0x73, 0x8d, 0x77, 0xa3, 0x75, 0x03, 0xbc, 0xc9, 0xfb, 0x50, 0xd9, 0x6d, 0x16, 0xad, 0x5d, 0x32, 0x3d, 0xac, 0x44, 0xdf, 0x51, 0xf7, 0x19, 0xd4, 0x0b, 0x57, 0x78, 0x0b, 0x81, 0x4e,
- /* (2^116)P */ 0x32, 0x24, 0xf1, 0x6c, 0x55, 0x62, 0x1d, 0xb3, 0x1f, 0xda, 0xfa, 0x6a, 0x8f, 0x98, 0x01, 0x16, 0xde, 0x44, 0x50, 0x0d, 0x2e, 0x6c, 0x0b, 0xa2, 0xd3, 0x74, 0x0e, 0xa9, 0xbf, 0x8d, 0xa9, 0xc8, 0xc8, 0x2f, 0x62, 0xc1, 0x35, 0x5e, 0xfd, 0x3a, 0xb3, 0x83, 0x2d, 0xee, 0x4e, 0xfd, 0x5c, 0x5e, 0xad, 0x85, 0xa5, 0x10, 0xb5, 0x4f, 0x34, 0xa7,
- /* (2^117)P */ 0xd1, 0x58, 0x6f, 0xe6, 0x54, 0x2c, 0xc2, 0xcd, 0xcf, 0x83, 0xdc, 0x88, 0x0c, 0xb9, 0xb4, 0x62, 0x18, 0x89, 0x65, 0x28, 0xe9, 0x72, 0x4b, 0x65, 0xcf, 0xd6, 0x90, 0x88, 0xd7, 0x76, 0x17, 0x4f, 0x74, 0x64, 0x1e, 0xcb, 0xd3, 0xf5, 0x4b, 0xaa, 0x2e, 0x4d, 0x2d, 0x7c, 0x13, 0x1f, 0xfd, 0xd9, 0x60, 0x83, 0x7e, 0xda, 0x64, 0x1c, 0xdc, 0x9f,
- /* (2^118)P */ 0xad, 0xef, 0xac, 0x1b, 0xc1, 0x30, 0x5a, 0x15, 0xc9, 0x1f, 0xac, 0xf1, 0xca, 0x44, 0x95, 0x95, 0xea, 0xf2, 0x22, 0xe7, 0x8d, 0x25, 0xf0, 0xff, 0xd8, 0x71, 0xf7, 0xf8, 0x8f, 0x8f, 0xcd, 0xf4, 0x1e, 0xfe, 0x6c, 0x68, 0x04, 0xb8, 0x78, 0xa1, 0x5f, 0xa6, 0x5d, 0x5e, 0xf9, 0x8d, 0xea, 0x80, 0xcb, 0xf3, 0x17, 0xa6, 0x03, 0xc9, 0x38, 0xd5,
- /* (2^119)P */ 0x79, 0x14, 0x31, 0xc3, 0x38, 0xe5, 0xaa, 0xbf, 0x17, 0xa3, 0x04, 0x4e, 0x80, 0x59, 0x9c, 0x9f, 0x19, 0x39, 0xe4, 0x2d, 0x23, 0x54, 0x4a, 0x7f, 0x3e, 0xf3, 0xd9, 0xc7, 0xba, 0x6c, 0x8f, 0x6b, 0xfa, 0x34, 0xb5, 0x23, 0x17, 0x1d, 0xff, 0x1d, 0xea, 0x1f, 0xd7, 0xba, 0x61, 0xb2, 0xe0, 0x38, 0x6a, 0xe9, 0xcf, 0x48, 0x5d, 0x6a, 0x10, 0x9c,
- /* (2^120)P */ 0xc8, 0xbb, 0x13, 0x1c, 0x3f, 0x3c, 0x34, 0xfd, 0xac, 0x37, 0x52, 0x44, 0x25, 0xa8, 0xde, 0x1d, 0x63, 0xf4, 0x81, 0x9a, 0xbe, 0x0b, 0x74, 0x2e, 0xc8, 0x51, 0x16, 0xd3, 0xac, 0x4a, 0xaf, 0xe2, 0x5f, 0x3a, 0x89, 0x32, 0xd1, 0x9b, 0x7c, 0x90, 0x0d, 0xac, 0xdc, 0x8b, 0x73, 0x45, 0x45, 0x97, 0xb1, 0x90, 0x2c, 0x1b, 0x31, 0xca, 0xb1, 0x94,
- /* (2^121)P */ 0x07, 0x28, 0xdd, 0x10, 0x14, 0xa5, 0x95, 0x7e, 0xf3, 0xe4, 0xd4, 0x14, 0xb4, 0x7e, 0x76, 0xdb, 0x42, 0xd6, 0x94, 0x3e, 0xeb, 0x44, 0x64, 0x88, 0x0d, 0xec, 0xc1, 0x21, 0xf0, 0x79, 0xe0, 0x83, 0x67, 0x55, 0x53, 0xc2, 0xf6, 0xc5, 0xc5, 0x89, 0x39, 0xe8, 0x42, 0xd0, 0x17, 0xbd, 0xff, 0x35, 0x59, 0x0e, 0xc3, 0x06, 0x86, 0xd4, 0x64, 0xcf,
- /* (2^122)P */ 0x91, 0xa8, 0xdb, 0x57, 0x9b, 0xe2, 0x96, 0x31, 0x10, 0x6e, 0xd7, 0x9a, 0x97, 0xb3, 0xab, 0xb5, 0x15, 0x66, 0xbe, 0xcc, 0x6d, 0x9a, 0xac, 0x06, 0xb3, 0x0d, 0xaa, 0x4b, 0x9c, 0x96, 0x79, 0x6c, 0x34, 0xee, 0x9e, 0x53, 0x4d, 0x6e, 0xbd, 0x88, 0x02, 0xbf, 0x50, 0x54, 0x12, 0x5d, 0x01, 0x02, 0x46, 0xc6, 0x74, 0x02, 0x8c, 0x24, 0xae, 0xb1,
- /* (2^123)P */ 0xf5, 0x22, 0xea, 0xac, 0x7d, 0x9c, 0x33, 0x8a, 0xa5, 0x36, 0x79, 0x6a, 0x4f, 0xa4, 0xdc, 0xa5, 0x73, 0x64, 0xc4, 0x6f, 0x43, 0x02, 0x3b, 0x94, 0x66, 0xd2, 0x4b, 0x4f, 0xf6, 0x45, 0x33, 0x5d, 0x10, 0x33, 0x18, 0x1e, 0xa3, 0xfc, 0xf7, 0xd2, 0xb8, 0xc8, 0xa7, 0xe0, 0x76, 0x8a, 0xcd, 0xff, 0x4f, 0x99, 0x34, 0x47, 0x84, 0x91, 0x96, 0x9f,
- /* (2^124)P */ 0x8a, 0x48, 0x3b, 0x48, 0x4a, 0xbc, 0xac, 0xe2, 0x80, 0xd6, 0xd2, 0x35, 0xde, 0xd0, 0x56, 0x42, 0x33, 0xb3, 0x56, 0x5a, 0xcd, 0xb8, 0x3d, 0xb5, 0x25, 0xc1, 0xed, 0xff, 0x87, 0x0b, 0x79, 0xff, 0xf2, 0x62, 0xe1, 0x76, 0xc6, 0xa2, 0x0f, 0xa8, 0x9b, 0x0d, 0xcc, 0x3f, 0x3d, 0x35, 0x27, 0x8d, 0x0b, 0x74, 0xb0, 0xc3, 0x78, 0x8c, 0xcc, 0xc8,
- /* (2^125)P */ 0xfc, 0x9a, 0x0c, 0xa8, 0x49, 0x42, 0xb8, 0xdf, 0xcf, 0xb3, 0x19, 0xa6, 0x64, 0x57, 0xfe, 0xe8, 0xf8, 0xa6, 0x4b, 0x86, 0xa1, 0xd5, 0x83, 0x7f, 0x14, 0x99, 0x18, 0x0c, 0x7d, 0x5b, 0xf7, 0x3d, 0xf9, 0x4b, 0x79, 0xb1, 0x86, 0x30, 0xb4, 0x5e, 0x6a, 0xe8, 0x9d, 0xfa, 0x8a, 0x41, 0xc4, 0x30, 0xfc, 0x56, 0x74, 0x14, 0x42, 0xc8, 0x96, 0x0e,
- /* (2^126)P */ 0xdf, 0x66, 0xec, 0xbc, 0x44, 0xdb, 0x19, 0xce, 0xd4, 0xb5, 0x49, 0x40, 0x07, 0x49, 0xe0, 0x3a, 0x61, 0x10, 0xfb, 0x7d, 0xba, 0xb1, 0xe0, 0x28, 0x5b, 0x99, 0x59, 0x96, 0xa2, 0xee, 0xe0, 0x23, 0x37, 0x39, 0x1f, 0xe6, 0x57, 0x9f, 0xf8, 0xf8, 0xdc, 0x74, 0xf6, 0x8f, 0x4f, 0x5e, 0x51, 0xa4, 0x12, 0xac, 0xbe, 0xe4, 0xf3, 0xd1, 0xf0, 0x24,
- /* (2^127)P */ 0x1e, 0x3e, 0x9a, 0x5f, 0xdf, 0x9f, 0xd6, 0x4e, 0x8a, 0x28, 0xc3, 0xcd, 0x96, 0x9d, 0x57, 0xc7, 0x61, 0x81, 0x90, 0xff, 0xae, 0xb1, 0x4f, 0xc2, 0x96, 0x8b, 0x1a, 0x18, 0xf4, 0x50, 0xcb, 0x31, 0xe1, 0x57, 0xf4, 0x90, 0xa8, 0xea, 0xac, 0xe7, 0x61, 0x98, 0xb6, 0x15, 0xc1, 0x7b, 0x29, 0xa4, 0xc3, 0x18, 0xef, 0xb9, 0xd8, 0xdf, 0xf6, 0xac,
- /* (2^128)P */ 0xca, 0xa8, 0x6c, 0xf1, 0xb4, 0xca, 0xfe, 0x31, 0xee, 0x48, 0x38, 0x8b, 0x0e, 0xbb, 0x7a, 0x30, 0xaa, 0xf9, 0xee, 0x27, 0x53, 0x24, 0xdc, 0x2e, 0x15, 0xa6, 0x48, 0x8f, 0xa0, 0x7e, 0xf1, 0xdc, 0x93, 0x87, 0x39, 0xeb, 0x7f, 0x38, 0x92, 0x92, 0x4c, 0x29, 0xe9, 0x57, 0xd8, 0x59, 0xfc, 0xe9, 0x9c, 0x44, 0xc0, 0x65, 0xcf, 0xac, 0x4b, 0xdc,
- /* (2^129)P */ 0xa3, 0xd0, 0x37, 0x8f, 0x86, 0x2f, 0xc6, 0x47, 0x55, 0x46, 0x65, 0x26, 0x4b, 0x91, 0xe2, 0x18, 0x5c, 0x4f, 0x23, 0xc1, 0x37, 0x29, 0xb9, 0xc1, 0x27, 0xc5, 0x3c, 0xbf, 0x7e, 0x23, 0xdb, 0x73, 0x99, 0xbd, 0x1b, 0xb2, 0x31, 0x68, 0x3a, 0xad, 0xb7, 0xb0, 0x10, 0xc5, 0xe5, 0x11, 0x51, 0xba, 0xa7, 0x60, 0x66, 0x54, 0xf0, 0x08, 0xd7, 0x69,
- /* (2^130)P */ 0x89, 0x41, 0x79, 0xcc, 0xeb, 0x0a, 0xf5, 0x4b, 0xa3, 0x4c, 0xce, 0x52, 0xb0, 0xa7, 0xe4, 0x41, 0x75, 0x7d, 0x04, 0xbb, 0x09, 0x4c, 0x50, 0x9f, 0xdf, 0xea, 0x74, 0x61, 0x02, 0xad, 0xb4, 0x9d, 0xb7, 0x05, 0xb9, 0xea, 0xeb, 0x91, 0x35, 0xe7, 0x49, 0xea, 0xd3, 0x4f, 0x3c, 0x60, 0x21, 0x7a, 0xde, 0xc7, 0xe2, 0x5a, 0xee, 0x8e, 0x93, 0xc7,
- /* (2^131)P */ 0x00, 0xe8, 0xed, 0xd0, 0xb3, 0x0d, 0xaf, 0xb2, 0xde, 0x2c, 0xf6, 0x00, 0xe2, 0xea, 0x6d, 0xf8, 0x0e, 0xd9, 0x67, 0x59, 0xa9, 0x50, 0xbb, 0x17, 0x8f, 0xff, 0xb1, 0x9f, 0x17, 0xb6, 0xf2, 0xb5, 0xba, 0x80, 0xf7, 0x0f, 0xba, 0xd5, 0x09, 0x43, 0xaa, 0x4e, 0x3a, 0x67, 0x6a, 0x89, 0x9b, 0x18, 0x65, 0x35, 0xf8, 0x3a, 0x49, 0x91, 0x30, 0x51,
- /* (2^132)P */ 0x8d, 0x25, 0xe9, 0x0e, 0x7d, 0x50, 0x76, 0xe4, 0x58, 0x7e, 0xb9, 0x33, 0xe6, 0x65, 0x90, 0xc2, 0x50, 0x9d, 0x50, 0x2e, 0x11, 0xad, 0xd5, 0x43, 0x52, 0x32, 0x41, 0x4f, 0x7b, 0xb6, 0xa0, 0xec, 0x81, 0x75, 0x36, 0x7c, 0x77, 0x85, 0x59, 0x70, 0xe4, 0xf9, 0xef, 0x66, 0x8d, 0x35, 0xc8, 0x2a, 0x6e, 0x5b, 0xc6, 0x0d, 0x0b, 0x29, 0x60, 0x68,
- /* (2^133)P */ 0xf8, 0xce, 0xb0, 0x3a, 0x56, 0x7d, 0x51, 0x9a, 0x25, 0x73, 0xea, 0xdd, 0xe4, 0xe0, 0x0e, 0xf0, 0x07, 0xc0, 0x31, 0x00, 0x73, 0x35, 0xd0, 0x39, 0xc4, 0x9b, 0xb7, 0x95, 0xe0, 0x62, 0x70, 0x36, 0x0b, 0xcb, 0xa0, 0x42, 0xde, 0x51, 0xcf, 0x41, 0xe0, 0xb8, 0xb4, 0xc0, 0xe5, 0x46, 0x99, 0x9f, 0x02, 0x7f, 0x14, 0x8c, 0xc1, 0x4e, 0xef, 0xe8,
- /* (2^134)P */ 0x10, 0x01, 0x57, 0x0a, 0xbe, 0x8b, 0x18, 0xc8, 0xca, 0x00, 0x28, 0x77, 0x4a, 0x9a, 0xc7, 0x55, 0x2a, 0xcc, 0x0c, 0x7b, 0xb9, 0xe9, 0xc8, 0x97, 0x7c, 0x02, 0xe3, 0x09, 0x2f, 0x62, 0x30, 0xb8, 0x40, 0x09, 0x65, 0xe9, 0x55, 0x63, 0xb5, 0x07, 0xca, 0x9f, 0x00, 0xdf, 0x9d, 0x5c, 0xc7, 0xee, 0x57, 0xa5, 0x90, 0x15, 0x1e, 0x22, 0xa0, 0x12,
- /* (2^135)P */ 0x71, 0x2d, 0xc9, 0xef, 0x27, 0xb9, 0xd8, 0x12, 0x43, 0x6b, 0xa8, 0xce, 0x3b, 0x6d, 0x6e, 0x91, 0x43, 0x23, 0xbc, 0x32, 0xb3, 0xbf, 0xe1, 0xc7, 0x39, 0xcf, 0x7c, 0x42, 0x4c, 0xb1, 0x30, 0xe2, 0xdd, 0x69, 0x06, 0xe5, 0xea, 0xf0, 0x2a, 0x16, 0x50, 0x71, 0xca, 0x92, 0xdf, 0xc1, 0xcc, 0xec, 0xe6, 0x54, 0x07, 0xf3, 0x18, 0x8d, 0xd8, 0x29,
- /* (2^136)P */ 0x98, 0x51, 0x48, 0x8f, 0xfa, 0x2e, 0x5e, 0x67, 0xb0, 0xc6, 0x17, 0x12, 0xb6, 0x7d, 0xc9, 0xad, 0x81, 0x11, 0xad, 0x0c, 0x1c, 0x2d, 0x45, 0xdf, 0xac, 0x66, 0xbd, 0x08, 0x6f, 0x7c, 0xc7, 0x06, 0x6e, 0x19, 0x08, 0x39, 0x64, 0xd7, 0xe4, 0xd1, 0x11, 0x5f, 0x1c, 0xf4, 0x67, 0xc3, 0x88, 0x6a, 0xe6, 0x07, 0xa3, 0x83, 0xd7, 0xfd, 0x2a, 0xf9,
- /* (2^137)P */ 0x87, 0xed, 0xeb, 0xd9, 0xdf, 0xff, 0x43, 0x8b, 0xaa, 0x20, 0x58, 0xb0, 0xb4, 0x6b, 0x14, 0xb8, 0x02, 0xc5, 0x40, 0x20, 0x22, 0xbb, 0xf7, 0xb4, 0xf3, 0x05, 0x1e, 0x4d, 0x94, 0xff, 0xe3, 0xc5, 0x22, 0x82, 0xfe, 0xaf, 0x90, 0x42, 0x98, 0x6b, 0x76, 0x8b, 0x3e, 0x89, 0x3f, 0x42, 0x2a, 0xa7, 0x26, 0x00, 0xda, 0x5c, 0xa2, 0x2b, 0xec, 0xdd,
- /* (2^138)P */ 0x5c, 0x21, 0x16, 0x0d, 0x46, 0xb8, 0xd0, 0xa7, 0x88, 0xe7, 0x25, 0xcb, 0x3e, 0x50, 0x73, 0x61, 0xe7, 0xaf, 0x5a, 0x3f, 0x47, 0x8b, 0x3d, 0x97, 0x79, 0x2c, 0xe6, 0x6d, 0x95, 0x74, 0x65, 0x70, 0x36, 0xfd, 0xd1, 0x9e, 0x13, 0x18, 0x63, 0xb1, 0x2d, 0x0b, 0xb5, 0x36, 0x3e, 0xe7, 0x35, 0x42, 0x3b, 0xe6, 0x1f, 0x4d, 0x9d, 0x59, 0xa2, 0x43,
- /* (2^139)P */ 0x8c, 0x0c, 0x7c, 0x24, 0x9e, 0xe0, 0xf8, 0x05, 0x1c, 0x9e, 0x1f, 0x31, 0xc0, 0x70, 0xb3, 0xfb, 0x4e, 0xf8, 0x0a, 0x57, 0xb7, 0x49, 0xb5, 0x73, 0xa1, 0x5f, 0x9b, 0x6a, 0x07, 0x6c, 0x87, 0x71, 0x87, 0xd4, 0xbe, 0x98, 0x1e, 0x98, 0xee, 0x52, 0xc1, 0x7b, 0x95, 0x0f, 0x28, 0x32, 0x36, 0x28, 0xd0, 0x3a, 0x0f, 0x7d, 0x2a, 0xa9, 0x62, 0xb9,
- /* (2^140)P */ 0x97, 0xe6, 0x18, 0x77, 0xf9, 0x34, 0xac, 0xbc, 0xe0, 0x62, 0x9f, 0x42, 0xde, 0xbd, 0x2f, 0xf7, 0x1f, 0xb7, 0x14, 0x52, 0x8a, 0x79, 0xb2, 0x3f, 0xd2, 0x95, 0x71, 0x01, 0xe8, 0xaf, 0x8c, 0xa4, 0xa4, 0xa7, 0x27, 0xf3, 0x5c, 0xdf, 0x3e, 0x57, 0x7a, 0xf1, 0x76, 0x49, 0xe6, 0x42, 0x3f, 0x8f, 0x1e, 0x63, 0x4a, 0x65, 0xb5, 0x41, 0xf5, 0x02,
- /* (2^141)P */ 0x72, 0x85, 0xc5, 0x0b, 0xe1, 0x47, 0x64, 0x02, 0xc5, 0x4d, 0x81, 0x69, 0xb2, 0xcf, 0x0f, 0x6c, 0xd4, 0x6d, 0xd0, 0xc7, 0xb4, 0x1c, 0xd0, 0x32, 0x59, 0x89, 0xe2, 0xe0, 0x96, 0x8b, 0x12, 0x98, 0xbf, 0x63, 0x7a, 0x4c, 0x76, 0x7e, 0x58, 0x17, 0x8f, 0x5b, 0x0a, 0x59, 0x65, 0x75, 0xbc, 0x61, 0x1f, 0xbe, 0xc5, 0x6e, 0x0a, 0x57, 0x52, 0x70,
- /* (2^142)P */ 0x92, 0x1c, 0x77, 0xbb, 0x62, 0x02, 0x6c, 0x25, 0x9c, 0x66, 0x07, 0x83, 0xab, 0xcc, 0x80, 0x5d, 0xd2, 0x76, 0x0c, 0xa4, 0xc5, 0xb4, 0x8a, 0x68, 0x23, 0x31, 0x32, 0x29, 0x8a, 0x47, 0x92, 0x12, 0x80, 0xb3, 0xfa, 0x18, 0xe4, 0x8d, 0xc0, 0x4d, 0xfe, 0x97, 0x5f, 0x72, 0x41, 0xb5, 0x5c, 0x7a, 0xbd, 0xf0, 0xcf, 0x5e, 0x97, 0xaa, 0x64, 0x32,
- /* (2^143)P */ 0x35, 0x3f, 0x75, 0xc1, 0x7a, 0x75, 0x7e, 0xa9, 0xc6, 0x0b, 0x4e, 0x32, 0x62, 0xec, 0xe3, 0x5c, 0xfb, 0x01, 0x43, 0xb6, 0xd4, 0x5b, 0x75, 0xd2, 0xee, 0x7f, 0x5d, 0x23, 0x2b, 0xb3, 0x54, 0x34, 0x4c, 0xd3, 0xb4, 0x32, 0x84, 0x81, 0xb5, 0x09, 0x76, 0x19, 0xda, 0x58, 0xda, 0x7c, 0xdb, 0x2e, 0xdd, 0x4c, 0x8e, 0xdd, 0x5d, 0x89, 0x10, 0x10,
- /* (2^144)P */ 0x57, 0x25, 0x6a, 0x08, 0x37, 0x92, 0xa8, 0xdf, 0x24, 0xef, 0x8f, 0x33, 0x34, 0x52, 0xa4, 0x4c, 0xf0, 0x77, 0x9f, 0x69, 0x77, 0xd5, 0x8f, 0xd2, 0x9a, 0xb3, 0xb6, 0x1d, 0x2d, 0xa6, 0xf7, 0x1f, 0xda, 0xd7, 0xcb, 0x75, 0x11, 0xc3, 0x6b, 0xc0, 0x38, 0xb1, 0xd5, 0x2d, 0x96, 0x84, 0x16, 0xfa, 0x26, 0xb9, 0xcc, 0x3f, 0x16, 0x47, 0x23, 0x74,
- /* (2^145)P */ 0x9b, 0x61, 0x2a, 0x1c, 0xdd, 0x39, 0xa5, 0xfa, 0x1c, 0x7d, 0x63, 0x50, 0xca, 0xe6, 0x9d, 0xfa, 0xb7, 0xc4, 0x4c, 0x6a, 0x97, 0x5f, 0x36, 0x4e, 0x47, 0xdd, 0x17, 0xf7, 0xf9, 0x19, 0xce, 0x75, 0x17, 0xad, 0xce, 0x2a, 0xf3, 0xfe, 0x27, 0x8f, 0x3e, 0x48, 0xc0, 0x60, 0x87, 0x24, 0x19, 0xae, 0x59, 0xe4, 0x5a, 0x00, 0x2a, 0xba, 0xa2, 0x1f,
- /* (2^146)P */ 0x26, 0x88, 0x42, 0x60, 0x9f, 0x6e, 0x2c, 0x7c, 0x39, 0x0f, 0x47, 0x6a, 0x0e, 0x02, 0xbb, 0x4b, 0x34, 0x29, 0x55, 0x18, 0x36, 0xcf, 0x3b, 0x47, 0xf1, 0x2e, 0xfc, 0x6e, 0x94, 0xff, 0xe8, 0x6b, 0x06, 0xd2, 0xba, 0x77, 0x5e, 0x60, 0xd7, 0x19, 0xef, 0x02, 0x9d, 0x3a, 0xc2, 0xb7, 0xa9, 0xd8, 0x57, 0xee, 0x7e, 0x2b, 0xf2, 0x6d, 0x28, 0xda,
- /* (2^147)P */ 0xdf, 0xd9, 0x92, 0x11, 0x98, 0x23, 0xe2, 0x45, 0x2f, 0x74, 0x70, 0xee, 0x0e, 0x55, 0x65, 0x79, 0x86, 0x38, 0x17, 0x92, 0x85, 0x87, 0x99, 0x50, 0xd9, 0x7c, 0xdb, 0xa1, 0x10, 0xec, 0x30, 0xb7, 0x40, 0xa3, 0x23, 0x9b, 0x0e, 0x27, 0x49, 0x29, 0x03, 0x94, 0xff, 0x53, 0xdc, 0xd7, 0xed, 0x49, 0xa9, 0x5a, 0x3b, 0xee, 0xd7, 0xc7, 0x65, 0xaf,
- /* (2^148)P */ 0xa0, 0xbd, 0xbe, 0x03, 0xee, 0x0c, 0xbe, 0x32, 0x00, 0x7b, 0x52, 0xcb, 0x92, 0x29, 0xbf, 0xa0, 0xc6, 0xd9, 0xd2, 0xd6, 0x15, 0xe8, 0x3a, 0x75, 0x61, 0x65, 0x56, 0xae, 0xad, 0x3c, 0x2a, 0x64, 0x14, 0x3f, 0x8e, 0xc1, 0x2d, 0x0c, 0x8d, 0x20, 0xdb, 0x58, 0x4b, 0xe5, 0x40, 0x15, 0x4b, 0xdc, 0xa8, 0xbd, 0xef, 0x08, 0xa7, 0xd1, 0xf4, 0xb0,
- /* (2^149)P */ 0xa9, 0x0f, 0x05, 0x94, 0x66, 0xac, 0x1f, 0x65, 0x3f, 0xe1, 0xb8, 0xe1, 0x34, 0x5e, 0x1d, 0x8f, 0xe3, 0x93, 0x03, 0x15, 0xff, 0xb6, 0x65, 0xb6, 0x6e, 0xc0, 0x2f, 0xd4, 0x2e, 0xb9, 0x2c, 0x13, 0x3c, 0x99, 0x1c, 0xb5, 0x87, 0xba, 0x79, 0xcb, 0xf0, 0x18, 0x06, 0x86, 0x04, 0x14, 0x25, 0x09, 0xcd, 0x1c, 0x14, 0xda, 0x35, 0xd0, 0x38, 0x3b,
- /* (2^150)P */ 0x1b, 0x04, 0xa3, 0x27, 0xb4, 0xd3, 0x37, 0x48, 0x1e, 0x8f, 0x69, 0xd3, 0x5a, 0x2f, 0x20, 0x02, 0x36, 0xbe, 0x06, 0x7b, 0x6b, 0x6c, 0x12, 0x5b, 0x80, 0x74, 0x44, 0xe6, 0xf8, 0xf5, 0x95, 0x59, 0x29, 0xab, 0x51, 0x47, 0x83, 0x28, 0xe0, 0xad, 0xde, 0xaa, 0xd3, 0xb1, 0x1a, 0xcb, 0xa3, 0xcd, 0x8b, 0x6a, 0xb1, 0xa7, 0x0a, 0xd1, 0xf9, 0xbe,
- /* (2^151)P */ 0xce, 0x2f, 0x85, 0xca, 0x74, 0x6d, 0x49, 0xb8, 0xce, 0x80, 0x44, 0xe0, 0xda, 0x5b, 0xcf, 0x2f, 0x79, 0x74, 0xfe, 0xb4, 0x2c, 0x99, 0x20, 0x6e, 0x09, 0x04, 0xfb, 0x6d, 0x57, 0x5b, 0x95, 0x0c, 0x45, 0xda, 0x4f, 0x7f, 0x63, 0xcc, 0x85, 0x5a, 0x67, 0x50, 0x68, 0x71, 0xb4, 0x67, 0xb1, 0x2e, 0xc1, 0x1c, 0xdc, 0xff, 0x2a, 0x7c, 0x10, 0x5e,
- /* (2^152)P */ 0xa6, 0xde, 0xf3, 0xd4, 0x22, 0x30, 0x24, 0x9e, 0x0b, 0x30, 0x54, 0x59, 0x7e, 0xa2, 0xeb, 0x89, 0x54, 0x65, 0x3e, 0x40, 0xd1, 0xde, 0xe6, 0xee, 0x4d, 0xbf, 0x5e, 0x40, 0x1d, 0xee, 0x4f, 0x68, 0xd9, 0xa7, 0x2f, 0xb3, 0x64, 0xb3, 0xf5, 0xc8, 0xd3, 0xaa, 0x70, 0x70, 0x3d, 0xef, 0xd3, 0x95, 0x54, 0xdb, 0x3e, 0x94, 0x95, 0x92, 0x1f, 0x45,
- /* (2^153)P */ 0x22, 0x80, 0x1d, 0x9d, 0x96, 0xa5, 0x78, 0x6f, 0xe0, 0x1e, 0x1b, 0x66, 0x42, 0xc8, 0xae, 0x9e, 0x46, 0x45, 0x08, 0x41, 0xdf, 0x80, 0xae, 0x6f, 0xdb, 0x15, 0x5a, 0x21, 0x31, 0x7a, 0xd0, 0xf2, 0x54, 0x15, 0x88, 0xd3, 0x0f, 0x7f, 0x14, 0x5a, 0x14, 0x97, 0xab, 0xf4, 0x58, 0x6a, 0x9f, 0xea, 0x74, 0xe5, 0x6b, 0x90, 0x59, 0x2b, 0x48, 0xd9,
- /* (2^154)P */ 0x12, 0x24, 0x04, 0xf5, 0x50, 0xc2, 0x8c, 0xb0, 0x7c, 0x46, 0x98, 0xd5, 0x24, 0xad, 0xf6, 0x72, 0xdc, 0x82, 0x1a, 0x60, 0xc1, 0xeb, 0x48, 0xef, 0x7f, 0x6e, 0xe6, 0xcc, 0xdb, 0x7b, 0xae, 0xbe, 0x5e, 0x1e, 0x5c, 0xe6, 0x0a, 0x70, 0xdf, 0xa4, 0xa3, 0x85, 0x1b, 0x1b, 0x7f, 0x72, 0xb9, 0x96, 0x6f, 0xdc, 0x03, 0x76, 0x66, 0xfb, 0xa0, 0x33,
- /* (2^155)P */ 0x37, 0x40, 0xbb, 0xbc, 0x68, 0x58, 0x86, 0xca, 0xbb, 0xa5, 0x24, 0x76, 0x3d, 0x48, 0xd1, 0xad, 0xb4, 0xa8, 0xcf, 0xc3, 0xb6, 0xa8, 0xba, 0x1a, 0x3a, 0xbe, 0x33, 0x75, 0x04, 0x5c, 0x13, 0x8c, 0x0d, 0x70, 0x8d, 0xa6, 0x4e, 0x2a, 0xeb, 0x17, 0x3c, 0x22, 0xdd, 0x3e, 0x96, 0x40, 0x11, 0x9e, 0x4e, 0xae, 0x3d, 0xf8, 0x91, 0xd7, 0x50, 0xc8,
- /* (2^156)P */ 0xd8, 0xca, 0xde, 0x19, 0xcf, 0x00, 0xe4, 0x73, 0x18, 0x7f, 0x9b, 0x9f, 0xf4, 0x5b, 0x49, 0x49, 0x99, 0xdc, 0xa4, 0x46, 0x21, 0xb5, 0xd7, 0x3e, 0xb7, 0x47, 0x1b, 0xa9, 0x9f, 0x4c, 0x69, 0x7d, 0xec, 0x33, 0xd6, 0x1c, 0x51, 0x7f, 0x47, 0x74, 0x7a, 0x6c, 0xf3, 0xd2, 0x2e, 0xbf, 0xdf, 0x6c, 0x9e, 0x77, 0x3b, 0x34, 0xf6, 0x73, 0x80, 0xed,
- /* (2^157)P */ 0x16, 0xfb, 0x16, 0xc3, 0xc2, 0x83, 0xe4, 0xf4, 0x03, 0x7f, 0x52, 0xb0, 0x67, 0x51, 0x7b, 0x24, 0x5a, 0x51, 0xd3, 0xb6, 0x4e, 0x59, 0x76, 0xcd, 0x08, 0x7b, 0x1d, 0x7a, 0x9c, 0x65, 0xae, 0xce, 0xaa, 0xd2, 0x1c, 0x85, 0x66, 0x68, 0x06, 0x15, 0xa8, 0x06, 0xe6, 0x16, 0x37, 0xf4, 0x49, 0x9e, 0x0f, 0x50, 0x37, 0xb1, 0xb2, 0x93, 0x70, 0x43,
- /* (2^158)P */ 0x18, 0x3a, 0x16, 0xe5, 0x8d, 0xc8, 0x35, 0xd6, 0x7b, 0x09, 0xec, 0x61, 0x5f, 0x5c, 0x2a, 0x19, 0x96, 0x2e, 0xc3, 0xfd, 0xab, 0xe6, 0x23, 0xae, 0xab, 0xc5, 0xcb, 0xb9, 0x7b, 0x2d, 0x34, 0x51, 0xb9, 0x41, 0x9e, 0x7d, 0xca, 0xda, 0x25, 0x45, 0x14, 0xb0, 0xc7, 0x4d, 0x26, 0x2b, 0xfe, 0x43, 0xb0, 0x21, 0x5e, 0xfa, 0xdc, 0x7c, 0xf9, 0x5a,
- /* (2^159)P */ 0x94, 0xad, 0x42, 0x17, 0xf5, 0xcd, 0x1c, 0x0d, 0xf6, 0x41, 0xd2, 0x55, 0xbb, 0x50, 0xf1, 0xc6, 0xbc, 0xa6, 0xc5, 0x3a, 0xfd, 0x9b, 0x75, 0x3e, 0xf6, 0x1a, 0xa7, 0xb2, 0x6e, 0x64, 0x12, 0xdc, 0x3c, 0xe5, 0xf6, 0xfc, 0x3b, 0xfa, 0x43, 0x81, 0xd4, 0xa5, 0xee, 0xf5, 0x9c, 0x47, 0x2f, 0xd0, 0x9c, 0xde, 0xa1, 0x48, 0x91, 0x9a, 0x34, 0xc1,
- /* (2^160)P */ 0x37, 0x1b, 0xb3, 0x88, 0xc9, 0x98, 0x4e, 0xfb, 0x84, 0x4f, 0x2b, 0x0a, 0xb6, 0x8f, 0x35, 0x15, 0xcd, 0x61, 0x7a, 0x5f, 0x5c, 0xa0, 0xca, 0x23, 0xa0, 0x93, 0x1f, 0xcc, 0x3c, 0x39, 0x3a, 0x24, 0xa7, 0x49, 0xad, 0x8d, 0x59, 0xcc, 0x94, 0x5a, 0x16, 0xf5, 0x70, 0xe8, 0x52, 0x1e, 0xee, 0x20, 0x30, 0x17, 0x7e, 0xf0, 0x4c, 0x93, 0x06, 0x5a,
- /* (2^161)P */ 0x81, 0xba, 0x3b, 0xd7, 0x3e, 0xb4, 0x32, 0x3a, 0x22, 0x39, 0x2a, 0xfc, 0x19, 0xd9, 0xd2, 0xf6, 0xc5, 0x79, 0x6c, 0x0e, 0xde, 0xda, 0x01, 0xff, 0x52, 0xfb, 0xb6, 0x95, 0x4e, 0x7a, 0x10, 0xb8, 0x06, 0x86, 0x3c, 0xcd, 0x56, 0xd6, 0x15, 0xbf, 0x6e, 0x3e, 0x4f, 0x35, 0x5e, 0xca, 0xbc, 0xa5, 0x95, 0xa2, 0xdf, 0x2d, 0x1d, 0xaf, 0x59, 0xf9,
- /* (2^162)P */ 0x69, 0xe5, 0xe2, 0xfa, 0xc9, 0x7f, 0xdd, 0x09, 0xf5, 0x6b, 0x4e, 0x2e, 0xbe, 0xb4, 0xbf, 0x3e, 0xb2, 0xf2, 0x81, 0x30, 0xe1, 0x07, 0xa8, 0x0d, 0x2b, 0xd2, 0x5a, 0x55, 0xbe, 0x4b, 0x86, 0x5d, 0xb0, 0x5e, 0x7c, 0x8f, 0xc1, 0x3c, 0x81, 0x4c, 0xf7, 0x6d, 0x7d, 0xe6, 0x4f, 0x8a, 0x85, 0xc2, 0x2f, 0x28, 0xef, 0x8c, 0x69, 0xc2, 0xc2, 0x1a,
- /* (2^163)P */ 0xd9, 0xe4, 0x0e, 0x1e, 0xc2, 0xf7, 0x2f, 0x9f, 0xa1, 0x40, 0xfe, 0x46, 0x16, 0xaf, 0x2e, 0xd1, 0xec, 0x15, 0x9b, 0x61, 0x92, 0xce, 0xfc, 0x10, 0x43, 0x1d, 0x00, 0xf6, 0xbe, 0x20, 0x80, 0x80, 0x6f, 0x3c, 0x16, 0x94, 0x59, 0xba, 0x03, 0x53, 0x6e, 0xb6, 0xdd, 0x25, 0x7b, 0x86, 0xbf, 0x96, 0xf4, 0x2f, 0xa1, 0x96, 0x8d, 0xf9, 0xb3, 0x29,
- /* (2^164)P */ 0x3b, 0x04, 0x60, 0x6e, 0xce, 0xab, 0xd2, 0x63, 0x18, 0x53, 0x88, 0x16, 0x4a, 0x6a, 0xab, 0x72, 0x03, 0x68, 0xa5, 0xd4, 0x0d, 0xb2, 0x82, 0x81, 0x1f, 0x2b, 0x5c, 0x75, 0xe8, 0xd2, 0x1d, 0x7f, 0xe7, 0x1b, 0x35, 0x02, 0xde, 0xec, 0xbd, 0xcb, 0xc7, 0x01, 0xd3, 0x95, 0x61, 0xfe, 0xb2, 0x7a, 0x66, 0x09, 0x4c, 0x6d, 0xfd, 0x39, 0xf7, 0x52,
- /* (2^165)P */ 0x42, 0xc1, 0x5f, 0xf8, 0x35, 0x52, 0xc1, 0xfe, 0xc5, 0x11, 0x80, 0x1c, 0x11, 0x46, 0x31, 0x11, 0xbe, 0xd0, 0xc4, 0xb6, 0x07, 0x13, 0x38, 0xa0, 0x8d, 0x65, 0xf0, 0x56, 0x9e, 0x16, 0xbf, 0x9d, 0xcd, 0x51, 0x34, 0xf9, 0x08, 0x48, 0x7b, 0x76, 0x0c, 0x7b, 0x30, 0x07, 0xa8, 0x76, 0xaf, 0xa3, 0x29, 0x38, 0xb0, 0x58, 0xde, 0x72, 0x4b, 0x45,
- /* (2^166)P */ 0xd4, 0x16, 0xa7, 0xc0, 0xb4, 0x9f, 0xdf, 0x1a, 0x37, 0xc8, 0x35, 0xed, 0xc5, 0x85, 0x74, 0x64, 0x09, 0x22, 0xef, 0xe9, 0x0c, 0xaf, 0x12, 0x4c, 0x9e, 0xf8, 0x47, 0x56, 0xe0, 0x7f, 0x4e, 0x24, 0x6b, 0x0c, 0xe7, 0xad, 0xc6, 0x47, 0x1d, 0xa4, 0x0d, 0x86, 0x89, 0x65, 0xe8, 0x5f, 0x71, 0xc7, 0xe9, 0xcd, 0xec, 0x6c, 0x62, 0xc7, 0xe3, 0xb3,
- /* (2^167)P */ 0xb5, 0xea, 0x86, 0xe3, 0x15, 0x18, 0x3f, 0x6d, 0x7b, 0x05, 0x95, 0x15, 0x53, 0x26, 0x1c, 0xeb, 0xbe, 0x7e, 0x16, 0x42, 0x4b, 0xa2, 0x3d, 0xdd, 0x0e, 0xff, 0xba, 0x67, 0xb5, 0xae, 0x7a, 0x17, 0xde, 0x23, 0xad, 0x14, 0xcc, 0xd7, 0xaf, 0x57, 0x01, 0xe0, 0xdd, 0x48, 0xdd, 0xd7, 0xe3, 0xdf, 0xe9, 0x2d, 0xda, 0x67, 0xa4, 0x9f, 0x29, 0x04,
- /* (2^168)P */ 0x16, 0x53, 0xe6, 0x9c, 0x4e, 0xe5, 0x1e, 0x70, 0x81, 0x25, 0x02, 0x9b, 0x47, 0x6d, 0xd2, 0x08, 0x73, 0xbe, 0x0a, 0xf1, 0x7b, 0xeb, 0x24, 0xeb, 0x38, 0x23, 0x5c, 0xb6, 0x3e, 0xce, 0x1e, 0xe3, 0xbc, 0x82, 0x35, 0x1f, 0xaf, 0x3a, 0x3a, 0xe5, 0x4e, 0xc1, 0xca, 0xbf, 0x47, 0xb4, 0xbb, 0xbc, 0x5f, 0xea, 0xc6, 0xca, 0xf3, 0xa0, 0xa2, 0x73,
- /* (2^169)P */ 0xef, 0xa4, 0x7a, 0x4e, 0xe4, 0xc7, 0xb6, 0x43, 0x2e, 0xa5, 0xe4, 0xa5, 0xba, 0x1e, 0xa5, 0xfe, 0x9e, 0xce, 0xa9, 0x80, 0x04, 0xcb, 0x4f, 0xd8, 0x74, 0x05, 0x48, 0xfa, 0x99, 0x11, 0x5d, 0x97, 0x3b, 0x07, 0x0d, 0xdd, 0xe6, 0xb1, 0x74, 0x87, 0x1a, 0xd3, 0x26, 0xb7, 0x8f, 0xe1, 0x63, 0x3d, 0xec, 0x53, 0x93, 0xb0, 0x81, 0x78, 0x34, 0xa4,
- /* (2^170)P */ 0xe1, 0xe7, 0xd4, 0x58, 0x9d, 0x0e, 0x8b, 0x65, 0x66, 0x37, 0x16, 0x48, 0x6f, 0xaa, 0x42, 0x37, 0x77, 0xad, 0xb1, 0x56, 0x48, 0xdf, 0x65, 0x36, 0x30, 0xb8, 0x00, 0x12, 0xd8, 0x32, 0x28, 0x7f, 0xc1, 0x71, 0xeb, 0x93, 0x0f, 0x48, 0x04, 0xe1, 0x5a, 0x6a, 0x96, 0xc1, 0xca, 0x89, 0x6d, 0x1b, 0x82, 0x4c, 0x18, 0x6d, 0x55, 0x4b, 0xea, 0xfd,
- /* (2^171)P */ 0x62, 0x1a, 0x53, 0xb4, 0xb1, 0xbe, 0x6f, 0x15, 0x18, 0x88, 0xd4, 0x66, 0x61, 0xc7, 0x12, 0x69, 0x02, 0xbd, 0x03, 0x23, 0x2b, 0xef, 0xf9, 0x54, 0xa4, 0x85, 0xa8, 0xe3, 0xb7, 0xbd, 0xa9, 0xa3, 0xf3, 0x2a, 0xdd, 0xf1, 0xd4, 0x03, 0x0f, 0xa9, 0xa1, 0xd8, 0xa3, 0xcd, 0xb2, 0x71, 0x90, 0x4b, 0x35, 0x62, 0xf2, 0x2f, 0xce, 0x67, 0x1f, 0xaa,
- /* (2^172)P */ 0x9e, 0x1e, 0xcd, 0x43, 0x7e, 0x87, 0x37, 0x94, 0x3a, 0x97, 0x4c, 0x7e, 0xee, 0xc9, 0x37, 0x85, 0xf1, 0xd9, 0x4f, 0xbf, 0xf9, 0x6f, 0x39, 0x9a, 0x39, 0x87, 0x2e, 0x25, 0x84, 0x42, 0xc3, 0x80, 0xcb, 0x07, 0x22, 0xae, 0x30, 0xd5, 0x50, 0xa1, 0x23, 0xcc, 0x31, 0x81, 0x9d, 0xf1, 0x30, 0xd9, 0x2b, 0x73, 0x41, 0x16, 0x50, 0xab, 0x2d, 0xa2,
- /* (2^173)P */ 0xa4, 0x69, 0x4f, 0xa1, 0x4e, 0xb9, 0xbf, 0x14, 0xe8, 0x2b, 0x04, 0x93, 0xb7, 0x6e, 0x9f, 0x7d, 0x73, 0x0a, 0xc5, 0x14, 0xb8, 0xde, 0x8c, 0xc1, 0xfe, 0xc0, 0xa7, 0xa4, 0xcc, 0x42, 0x42, 0x81, 0x15, 0x65, 0x8a, 0x80, 0xb9, 0xde, 0x1f, 0x60, 0x33, 0x0e, 0xcb, 0xfc, 0xe0, 0xdb, 0x83, 0xa1, 0xe5, 0xd0, 0x16, 0x86, 0x2c, 0xe2, 0x87, 0xed,
- /* (2^174)P */ 0x7a, 0xc0, 0xeb, 0x6b, 0xf6, 0x0d, 0x4c, 0x6d, 0x1e, 0xdb, 0xab, 0xe7, 0x19, 0x45, 0xc6, 0xe3, 0xb2, 0x06, 0xbb, 0xbc, 0x70, 0x99, 0x83, 0x33, 0xeb, 0x28, 0xc8, 0x77, 0xf6, 0x4d, 0x01, 0xb7, 0x59, 0xa0, 0xd2, 0xb3, 0x2a, 0x72, 0x30, 0xe7, 0x11, 0x39, 0xb6, 0x41, 0x29, 0x65, 0x5a, 0x14, 0xb9, 0x86, 0x08, 0xe0, 0x7d, 0x32, 0x8c, 0xf0,
- /* (2^175)P */ 0x5c, 0x11, 0x30, 0x9e, 0x05, 0x27, 0xf5, 0x45, 0x0f, 0xb3, 0xc9, 0x75, 0xc3, 0xd7, 0xe1, 0x82, 0x3b, 0x8e, 0x87, 0x23, 0x00, 0x15, 0x19, 0x07, 0xd9, 0x21, 0x53, 0xc7, 0xf1, 0xa3, 0xbf, 0x70, 0x64, 0x15, 0x18, 0xca, 0x23, 0x9e, 0xd3, 0x08, 0xc3, 0x2a, 0x8b, 0xe5, 0x83, 0x04, 0x89, 0x14, 0xfd, 0x28, 0x25, 0x1c, 0xe3, 0x26, 0xa7, 0x22,
- /* (2^176)P */ 0xdc, 0xd4, 0x75, 0x60, 0x99, 0x94, 0xea, 0x09, 0x8e, 0x8a, 0x3c, 0x1b, 0xf9, 0xbd, 0x33, 0x0d, 0x51, 0x3d, 0x12, 0x6f, 0x4e, 0x72, 0xe0, 0x17, 0x20, 0xe9, 0x75, 0xe6, 0x3a, 0xb2, 0x13, 0x83, 0x4e, 0x7a, 0x08, 0x9e, 0xd1, 0x04, 0x5f, 0x6b, 0x42, 0x0b, 0x76, 0x2a, 0x2d, 0x77, 0x53, 0x6c, 0x65, 0x6d, 0x8e, 0x25, 0x3c, 0xb6, 0x8b, 0x69,
- /* (2^177)P */ 0xb9, 0x49, 0x28, 0xd0, 0xdc, 0x6c, 0x8f, 0x4c, 0xc9, 0x14, 0x8a, 0x38, 0xa3, 0xcb, 0xc4, 0x9d, 0x53, 0xcf, 0xe9, 0xe3, 0xcf, 0xe0, 0xb1, 0xf2, 0x1b, 0x4c, 0x7f, 0x83, 0x2a, 0x7a, 0xe9, 0x8b, 0x3b, 0x86, 0x61, 0x30, 0xe9, 0x99, 0xbd, 0xba, 0x19, 0x6e, 0x65, 0x2a, 0x12, 0x3e, 0x9c, 0xa8, 0xaf, 0xc3, 0xcf, 0xf8, 0x1f, 0x77, 0x86, 0xea,
- /* (2^178)P */ 0x30, 0xde, 0xe7, 0xff, 0x54, 0xf7, 0xa2, 0x59, 0xf6, 0x0b, 0xfb, 0x7a, 0xf2, 0x39, 0xf0, 0xdb, 0x39, 0xbc, 0xf0, 0xfa, 0x60, 0xeb, 0x6b, 0x4f, 0x47, 0x17, 0xc8, 0x00, 0x65, 0x6d, 0x25, 0x1c, 0xd0, 0x48, 0x56, 0x53, 0x45, 0x11, 0x30, 0x02, 0x49, 0x20, 0x27, 0xac, 0xf2, 0x4c, 0xac, 0x64, 0x3d, 0x52, 0xb8, 0x89, 0xe0, 0x93, 0x16, 0x0f,
- /* (2^179)P */ 0x84, 0x09, 0xba, 0x40, 0xb2, 0x2f, 0xa3, 0xa8, 0xc2, 0xba, 0x46, 0x33, 0x05, 0x9d, 0x62, 0xad, 0xa1, 0x3c, 0x33, 0xef, 0x0d, 0xeb, 0xf0, 0x77, 0x11, 0x5a, 0xb0, 0x21, 0x9c, 0xdf, 0x55, 0x24, 0x25, 0x35, 0x51, 0x61, 0x92, 0xf0, 0xb1, 0xce, 0xf5, 0xd4, 0x7b, 0x6c, 0x21, 0x9d, 0x56, 0x52, 0xf8, 0xa1, 0x4c, 0xe9, 0x27, 0x55, 0xac, 0x91,
- /* (2^180)P */ 0x03, 0x3e, 0x30, 0xd2, 0x0a, 0xfa, 0x7d, 0x82, 0x3d, 0x1f, 0x8b, 0xcb, 0xb6, 0x04, 0x5c, 0xcc, 0x8b, 0xda, 0xe2, 0x68, 0x74, 0x08, 0x8c, 0x44, 0x83, 0x57, 0x6d, 0x6f, 0x80, 0xb0, 0x7e, 0xa9, 0x82, 0x91, 0x7b, 0x4c, 0x37, 0x97, 0xd1, 0x63, 0xd1, 0xbd, 0x45, 0xe6, 0x8a, 0x86, 0xd6, 0x89, 0x54, 0xfd, 0xd2, 0xb1, 0xd7, 0x54, 0xad, 0xaf,
- /* (2^181)P */ 0x8b, 0x33, 0x62, 0x49, 0x9f, 0x63, 0xf9, 0x87, 0x42, 0x58, 0xbf, 0xb3, 0xe6, 0x68, 0x02, 0x60, 0x5c, 0x76, 0x62, 0xf7, 0x61, 0xd7, 0x36, 0x31, 0xf7, 0x9c, 0xb5, 0xe5, 0x13, 0x6c, 0xea, 0x78, 0xae, 0xcf, 0xde, 0xbf, 0xb6, 0xeb, 0x4f, 0xc8, 0x2a, 0xb4, 0x9a, 0x9f, 0xf3, 0xd1, 0x6a, 0xec, 0x0c, 0xbd, 0x85, 0x98, 0x40, 0x06, 0x1c, 0x2a,
- /* (2^182)P */ 0x74, 0x3b, 0xe7, 0x81, 0xd5, 0xae, 0x54, 0x56, 0x03, 0xe8, 0x97, 0x16, 0x76, 0xcf, 0x24, 0x96, 0x96, 0x5b, 0xcc, 0x09, 0xab, 0x23, 0x6f, 0x54, 0xae, 0x8f, 0xe4, 0x12, 0xcb, 0xfd, 0xbc, 0xac, 0x93, 0x45, 0x3d, 0x68, 0x08, 0x22, 0x59, 0xc6, 0xf0, 0x47, 0x19, 0x8c, 0x79, 0x93, 0x1e, 0x0e, 0x30, 0xb0, 0x94, 0xfb, 0x17, 0x1d, 0x5a, 0x12,
- /* (2^183)P */ 0x85, 0xff, 0x40, 0x18, 0x85, 0xff, 0x44, 0x37, 0x69, 0x23, 0x4d, 0x34, 0xe1, 0xeb, 0xa3, 0x1b, 0x55, 0x40, 0xc1, 0x64, 0xf4, 0xd4, 0x13, 0x0a, 0x9f, 0xb9, 0x19, 0xfc, 0x88, 0x7d, 0xc0, 0x72, 0xcf, 0x69, 0x2f, 0xd2, 0x0c, 0x82, 0x0f, 0xda, 0x08, 0xba, 0x0f, 0xaa, 0x3b, 0xe9, 0xe5, 0x83, 0x7a, 0x06, 0xe8, 0x1b, 0x38, 0x43, 0xc3, 0x54,
- /* (2^184)P */ 0x14, 0xaa, 0xb3, 0x6e, 0xe6, 0x28, 0xee, 0xc5, 0x22, 0x6c, 0x7c, 0xf9, 0xa8, 0x71, 0xcc, 0xfe, 0x68, 0x7e, 0xd3, 0xb8, 0x37, 0x96, 0xca, 0x0b, 0xd9, 0xb6, 0x06, 0xa9, 0xf6, 0x71, 0xe8, 0x31, 0xf7, 0xd8, 0xf1, 0x5d, 0xab, 0xb9, 0xf0, 0x5c, 0x98, 0xcf, 0x22, 0xa2, 0x2a, 0xf6, 0xd0, 0x59, 0xf0, 0x9d, 0xd9, 0x6a, 0x4f, 0x59, 0x57, 0xad,
- /* (2^185)P */ 0xd7, 0x2b, 0x3d, 0x38, 0x4c, 0x2e, 0x23, 0x4d, 0x49, 0xa2, 0x62, 0x62, 0xf9, 0x0f, 0xde, 0x08, 0xf3, 0x86, 0x71, 0xb6, 0xc7, 0xf9, 0x85, 0x9c, 0x33, 0xa1, 0xcf, 0x16, 0xaa, 0x60, 0xb9, 0xb7, 0xea, 0xed, 0x01, 0x1c, 0x59, 0xdb, 0x3f, 0x3f, 0x97, 0x2e, 0xf0, 0x09, 0x9f, 0x10, 0x85, 0x5f, 0x53, 0x39, 0xf3, 0x13, 0x40, 0x56, 0x95, 0xf9,
- /* (2^186)P */ 0xb4, 0xe3, 0xda, 0xc6, 0x1f, 0x78, 0x8e, 0xac, 0xd4, 0x20, 0x1d, 0xa0, 0xbf, 0x4c, 0x09, 0x16, 0xa7, 0x30, 0xb5, 0x8d, 0x9e, 0xa1, 0x5f, 0x6d, 0x52, 0xf4, 0x71, 0xb6, 0x32, 0x2d, 0x21, 0x51, 0xc6, 0xfc, 0x2f, 0x08, 0xf4, 0x13, 0x6c, 0x55, 0xba, 0x72, 0x81, 0x24, 0x49, 0x0e, 0x4f, 0x06, 0x36, 0x39, 0x6a, 0xc5, 0x81, 0xfc, 0xeb, 0xb2,
- /* (2^187)P */ 0x7d, 0x8d, 0xc8, 0x6c, 0xea, 0xb4, 0xb9, 0xe8, 0x40, 0xc9, 0x69, 0xc9, 0x30, 0x05, 0xfd, 0x34, 0x46, 0xfd, 0x94, 0x05, 0x16, 0xf5, 0x4b, 0x13, 0x3d, 0x24, 0x1a, 0xd6, 0x64, 0x2b, 0x9c, 0xe2, 0xa5, 0xd9, 0x98, 0xe0, 0xe8, 0xf4, 0xbc, 0x2c, 0xbd, 0xa2, 0x56, 0xe3, 0x9e, 0x14, 0xdb, 0xbf, 0x05, 0xbf, 0x9a, 0x13, 0x5d, 0xf7, 0x91, 0xa3,
- /* (2^188)P */ 0x8b, 0xcb, 0x27, 0xf3, 0x15, 0x26, 0x05, 0x40, 0x0f, 0xa6, 0x15, 0x13, 0x71, 0x95, 0xa2, 0xc6, 0x38, 0x04, 0x67, 0xf8, 0x9a, 0x83, 0x06, 0xaa, 0x25, 0x36, 0x72, 0x01, 0x6f, 0x74, 0x5f, 0xe5, 0x6e, 0x44, 0x99, 0xce, 0x13, 0xbc, 0x82, 0xc2, 0x0d, 0xa4, 0x98, 0x50, 0x38, 0xf3, 0xa2, 0xc5, 0xe5, 0x24, 0x1f, 0x6f, 0x56, 0x3e, 0x07, 0xb2,
- /* (2^189)P */ 0xbd, 0x0f, 0x32, 0x60, 0x07, 0xb1, 0xd7, 0x0b, 0x11, 0x07, 0x57, 0x02, 0x89, 0xe8, 0x8b, 0xe8, 0x5a, 0x1f, 0xee, 0x54, 0x6b, 0xff, 0xb3, 0x04, 0x07, 0x57, 0x13, 0x0b, 0x94, 0xa8, 0x4d, 0x81, 0xe2, 0x17, 0x16, 0x45, 0xd4, 0x4b, 0xf7, 0x7e, 0x64, 0x66, 0x20, 0xe8, 0x0b, 0x26, 0xfd, 0xa9, 0x8a, 0x47, 0x52, 0x89, 0x14, 0xd0, 0xd1, 0xa1,
- /* (2^190)P */ 0xdc, 0x03, 0xe6, 0x20, 0x44, 0x47, 0x8f, 0x04, 0x16, 0x24, 0x22, 0xc1, 0x55, 0x5c, 0xbe, 0x43, 0xc3, 0x92, 0xc5, 0x54, 0x3d, 0x5d, 0xd1, 0x05, 0x9c, 0xc6, 0x7c, 0xbf, 0x23, 0x84, 0x1a, 0xba, 0x4f, 0x1f, 0xfc, 0xa1, 0xae, 0x1a, 0x64, 0x02, 0x51, 0xf1, 0xcb, 0x7a, 0x20, 0xce, 0xb2, 0x34, 0x3c, 0xca, 0xe0, 0xe4, 0xba, 0x22, 0xd4, 0x7b,
- /* (2^191)P */ 0xca, 0xfd, 0xca, 0xd7, 0xde, 0x61, 0xae, 0xf0, 0x79, 0x0c, 0x20, 0xab, 0xbc, 0x6f, 0x4d, 0x61, 0xf0, 0xc7, 0x9c, 0x8d, 0x4b, 0x52, 0xf3, 0xb9, 0x48, 0x63, 0x0b, 0xb6, 0xd2, 0x25, 0x9a, 0x96, 0x72, 0xc1, 0x6b, 0x0c, 0xb5, 0xfb, 0x71, 0xaa, 0xad, 0x47, 0x5b, 0xe7, 0xc0, 0x0a, 0x55, 0xb2, 0xd4, 0x16, 0x2f, 0xb1, 0x01, 0xfd, 0xce, 0x27,
- /* (2^192)P */ 0x64, 0x11, 0x4b, 0xab, 0x57, 0x09, 0xc6, 0x49, 0x4a, 0x37, 0xc3, 0x36, 0xc4, 0x7b, 0x81, 0x1f, 0x42, 0xed, 0xbb, 0xe0, 0xa0, 0x8d, 0x51, 0xe6, 0xca, 0x8b, 0xb9, 0xcd, 0x99, 0x2d, 0x91, 0x53, 0xa9, 0x47, 0xcb, 0x32, 0xc7, 0xa4, 0x92, 0xec, 0x46, 0x74, 0x44, 0x6d, 0x71, 0x9f, 0x6d, 0x0c, 0x69, 0xa4, 0xf8, 0xbe, 0x9f, 0x7f, 0xa0, 0xd7,
- /* (2^193)P */ 0x5f, 0x33, 0xb6, 0x91, 0xc8, 0xa5, 0x3f, 0x5d, 0x7f, 0x38, 0x6e, 0x74, 0x20, 0x4a, 0xd6, 0x2b, 0x98, 0x2a, 0x41, 0x4b, 0x83, 0x64, 0x0b, 0x92, 0x7a, 0x06, 0x1e, 0xc6, 0x2c, 0xf6, 0xe4, 0x91, 0xe5, 0xb1, 0x2e, 0x6e, 0x4e, 0xa8, 0xc8, 0x14, 0x32, 0x57, 0x44, 0x1c, 0xe4, 0xb9, 0x7f, 0x54, 0x51, 0x08, 0x81, 0xaa, 0x4e, 0xce, 0xa1, 0x5d,
- /* (2^194)P */ 0x5c, 0xd5, 0x9b, 0x5e, 0x7c, 0xb5, 0xb1, 0x52, 0x73, 0x00, 0x41, 0x56, 0x79, 0x08, 0x7e, 0x07, 0x28, 0x06, 0xa6, 0xfb, 0x7f, 0x69, 0xbd, 0x7a, 0x3c, 0xae, 0x9f, 0x39, 0xbb, 0x54, 0xa2, 0x79, 0xb9, 0x0e, 0x7f, 0xbb, 0xe0, 0xe6, 0xb7, 0x27, 0x64, 0x38, 0x45, 0xdb, 0x84, 0xe4, 0x61, 0x72, 0x3f, 0xe2, 0x24, 0xfe, 0x7a, 0x31, 0x9a, 0xc9,
- /* (2^195)P */ 0xa1, 0xd2, 0xa4, 0xee, 0x24, 0x96, 0xe5, 0x5b, 0x79, 0x78, 0x3c, 0x7b, 0x82, 0x3b, 0x8b, 0x58, 0x0b, 0xa3, 0x63, 0x2d, 0xbc, 0x75, 0x46, 0xe8, 0x83, 0x1a, 0xc0, 0x2a, 0x92, 0x61, 0xa8, 0x75, 0x37, 0x3c, 0xbf, 0x0f, 0xef, 0x8f, 0x6c, 0x97, 0x75, 0x10, 0x05, 0x7a, 0xde, 0x23, 0xe8, 0x2a, 0x35, 0xeb, 0x41, 0x64, 0x7d, 0xcf, 0xe0, 0x52,
- /* (2^196)P */ 0x4a, 0xd0, 0x49, 0x93, 0xae, 0xf3, 0x24, 0x8c, 0xe1, 0x09, 0x98, 0x45, 0xd8, 0xb9, 0xfe, 0x8e, 0x8c, 0xa8, 0x2c, 0xc9, 0x9f, 0xce, 0x01, 0xdc, 0x38, 0x11, 0xab, 0x85, 0xb9, 0xe8, 0x00, 0x51, 0xfd, 0x82, 0xe1, 0x9b, 0x4e, 0xfc, 0xb5, 0x2a, 0x0f, 0x8b, 0xda, 0x4e, 0x02, 0xca, 0xcc, 0xe3, 0x91, 0xc4, 0xe0, 0xcf, 0x7b, 0xd6, 0xe6, 0x6a,
- /* (2^197)P */ 0xfe, 0x11, 0xd7, 0xaa, 0xe3, 0x0c, 0x52, 0x2e, 0x04, 0xe0, 0xe0, 0x61, 0xc8, 0x05, 0xd7, 0x31, 0x4c, 0xc3, 0x9b, 0x2d, 0xce, 0x59, 0xbe, 0x12, 0xb7, 0x30, 0x21, 0xfc, 0x81, 0xb8, 0x5e, 0x57, 0x73, 0xd0, 0xad, 0x8e, 0x9e, 0xe4, 0xeb, 0xcd, 0xcf, 0xd2, 0x0f, 0x01, 0x35, 0x16, 0xed, 0x7a, 0x43, 0x8e, 0x42, 0xdc, 0xea, 0x4c, 0xa8, 0x7c,
- /* (2^198)P */ 0x37, 0x26, 0xcc, 0x76, 0x0b, 0xe5, 0x76, 0xdd, 0x3e, 0x19, 0x3c, 0xc4, 0x6c, 0x7f, 0xd0, 0x03, 0xc1, 0xb8, 0x59, 0x82, 0xca, 0x36, 0xc1, 0xe4, 0xc8, 0xb2, 0x83, 0x69, 0x9c, 0xc5, 0x9d, 0x12, 0x82, 0x1c, 0xea, 0xb2, 0x84, 0x9f, 0xf3, 0x52, 0x6b, 0xbb, 0xd8, 0x81, 0x56, 0x83, 0x04, 0x66, 0x05, 0x22, 0x49, 0x37, 0x93, 0xb1, 0xfd, 0xd5,
- /* (2^199)P */ 0xaf, 0x96, 0xbf, 0x03, 0xbe, 0xe6, 0x5d, 0x78, 0x19, 0xba, 0x37, 0x46, 0x0a, 0x2b, 0x52, 0x7c, 0xd8, 0x51, 0x9e, 0x3d, 0x29, 0x42, 0xdb, 0x0e, 0x31, 0x20, 0x94, 0xf8, 0x43, 0x9a, 0x2d, 0x22, 0xd3, 0xe3, 0xa1, 0x79, 0x68, 0xfb, 0x2d, 0x7e, 0xd6, 0x79, 0xda, 0x0b, 0xc6, 0x5b, 0x76, 0x68, 0xf0, 0xfe, 0x72, 0x59, 0xbb, 0xa1, 0x9c, 0x74,
- /* (2^200)P */ 0x0a, 0xd9, 0xec, 0xc5, 0xbd, 0xf0, 0xda, 0xcf, 0x82, 0xab, 0x46, 0xc5, 0x32, 0x13, 0xdc, 0x5b, 0xac, 0xc3, 0x53, 0x9a, 0x7f, 0xef, 0xa5, 0x40, 0x5a, 0x1f, 0xc1, 0x12, 0x91, 0x54, 0x83, 0x6a, 0xb0, 0x9a, 0x85, 0x4d, 0xbf, 0x36, 0x8e, 0xd3, 0xa2, 0x2b, 0xe5, 0xd6, 0xc6, 0xe1, 0x58, 0x5b, 0x82, 0x9b, 0xc8, 0xf2, 0x03, 0xba, 0xf5, 0x92,
- /* (2^201)P */ 0xfb, 0x21, 0x7e, 0xde, 0xe7, 0xb4, 0xc0, 0x56, 0x86, 0x3a, 0x5b, 0x78, 0xf8, 0xf0, 0xf4, 0xe7, 0x5c, 0x00, 0xd2, 0xd7, 0xd6, 0xf8, 0x75, 0x5e, 0x0f, 0x3e, 0xd1, 0x4b, 0x77, 0xd8, 0xad, 0xb0, 0xc9, 0x8b, 0x59, 0x7d, 0x30, 0x76, 0x64, 0x7a, 0x76, 0xd9, 0x51, 0x69, 0xfc, 0xbd, 0x8e, 0xb5, 0x55, 0xe0, 0xd2, 0x07, 0x15, 0xa9, 0xf7, 0xa4,
- /* (2^202)P */ 0xaa, 0x2d, 0x2f, 0x2b, 0x3c, 0x15, 0xdd, 0xcd, 0xe9, 0x28, 0x82, 0x4f, 0xa2, 0xaa, 0x31, 0x48, 0xcc, 0xfa, 0x07, 0x73, 0x8a, 0x34, 0x74, 0x0d, 0xab, 0x1a, 0xca, 0xd2, 0xbf, 0x3a, 0xdb, 0x1a, 0x5f, 0x50, 0x62, 0xf4, 0x6b, 0x83, 0x38, 0x43, 0x96, 0xee, 0x6b, 0x39, 0x1e, 0xf0, 0x17, 0x80, 0x1e, 0x9b, 0xed, 0x2b, 0x2f, 0xcc, 0x65, 0xf7,
- /* (2^203)P */ 0x03, 0xb3, 0x23, 0x9c, 0x0d, 0xd1, 0xeb, 0x7e, 0x34, 0x17, 0x8a, 0x4c, 0xde, 0x54, 0x39, 0xc4, 0x11, 0x82, 0xd3, 0xa4, 0x00, 0x32, 0x95, 0x9c, 0xa6, 0x64, 0x76, 0x6e, 0xd6, 0x53, 0x27, 0xb4, 0x6a, 0x14, 0x8c, 0x54, 0xf6, 0x58, 0x9e, 0x22, 0x4a, 0x55, 0x18, 0x77, 0xd0, 0x08, 0x6b, 0x19, 0x8a, 0xb5, 0xe7, 0x19, 0xb8, 0x60, 0x92, 0xb1,
- /* (2^204)P */ 0x66, 0xec, 0xf3, 0x12, 0xde, 0x67, 0x7f, 0xd4, 0x5b, 0xf6, 0x70, 0x64, 0x0a, 0xb5, 0xc2, 0xf9, 0xb3, 0x64, 0xab, 0x56, 0x46, 0xc7, 0x93, 0xc2, 0x8b, 0x2d, 0xd0, 0xd6, 0x39, 0x3b, 0x1f, 0xcd, 0xb3, 0xac, 0xcc, 0x2c, 0x27, 0x6a, 0xbc, 0xb3, 0x4b, 0xa8, 0x3c, 0x69, 0x20, 0xe2, 0x18, 0x35, 0x17, 0xe1, 0x8a, 0xd3, 0x11, 0x74, 0xaa, 0x4d,
- /* (2^205)P */ 0x96, 0xc4, 0x16, 0x7e, 0xfd, 0xf5, 0xd0, 0x7d, 0x1f, 0x32, 0x1b, 0xdb, 0xa6, 0xfd, 0x51, 0x75, 0x4d, 0xd7, 0x00, 0xe5, 0x7f, 0x58, 0x5b, 0xeb, 0x4b, 0x6a, 0x78, 0xfe, 0xe5, 0xd6, 0x8f, 0x99, 0x17, 0xca, 0x96, 0x45, 0xf7, 0x52, 0xdf, 0x84, 0x06, 0x77, 0xb9, 0x05, 0x63, 0x5d, 0xe9, 0x91, 0xb1, 0x4b, 0x82, 0x5a, 0xdb, 0xd7, 0xca, 0x69,
- /* (2^206)P */ 0x02, 0xd3, 0x38, 0x38, 0x87, 0xea, 0xbd, 0x9f, 0x11, 0xca, 0xf3, 0x21, 0xf1, 0x9b, 0x35, 0x97, 0x98, 0xff, 0x8e, 0x6d, 0x3d, 0xd6, 0xb2, 0xfa, 0x68, 0xcb, 0x7e, 0x62, 0x85, 0xbb, 0xc7, 0x5d, 0xee, 0x32, 0x30, 0x2e, 0x71, 0x96, 0x63, 0x43, 0x98, 0xc4, 0xa7, 0xde, 0x60, 0xb2, 0xd9, 0x43, 0x4a, 0xfa, 0x97, 0x2d, 0x5f, 0x21, 0xd4, 0xfe,
- /* (2^207)P */ 0x3b, 0x20, 0x29, 0x07, 0x07, 0xb5, 0x78, 0xc3, 0xc7, 0xab, 0x56, 0xba, 0x40, 0xde, 0x1d, 0xcf, 0xc3, 0x00, 0x56, 0x21, 0x0c, 0xc8, 0x42, 0xd9, 0x0e, 0xcd, 0x02, 0x7c, 0x07, 0xb9, 0x11, 0xd7, 0x96, 0xaf, 0xff, 0xad, 0xc5, 0xba, 0x30, 0x6d, 0x82, 0x3a, 0xbf, 0xef, 0x7b, 0xf7, 0x0a, 0x74, 0xbd, 0x31, 0x0c, 0xe4, 0xec, 0x1a, 0xe5, 0xc5,
- /* (2^208)P */ 0xcc, 0xf2, 0x28, 0x16, 0x12, 0xbf, 0xef, 0x85, 0xbc, 0xf7, 0xcb, 0x9f, 0xdb, 0xa8, 0xb2, 0x49, 0x53, 0x48, 0xa8, 0x24, 0xa8, 0x68, 0x8d, 0xbb, 0x21, 0x0a, 0x5a, 0xbd, 0xb2, 0x91, 0x61, 0x47, 0xc4, 0x43, 0x08, 0xa6, 0x19, 0xef, 0x8e, 0x88, 0x39, 0xc6, 0x33, 0x30, 0xf3, 0x0e, 0xc5, 0x92, 0x66, 0xd6, 0xfe, 0xc5, 0x12, 0xd9, 0x4c, 0x2d,
- /* (2^209)P */ 0x30, 0x34, 0x07, 0xbf, 0x9c, 0x5a, 0x4e, 0x65, 0xf1, 0x39, 0x35, 0x38, 0xae, 0x7b, 0x55, 0xac, 0x6a, 0x92, 0x24, 0x7e, 0x50, 0xd3, 0xba, 0x78, 0x51, 0xfe, 0x4d, 0x32, 0x05, 0x11, 0xf5, 0x52, 0xf1, 0x31, 0x45, 0x39, 0x98, 0x7b, 0x28, 0x56, 0xc3, 0x5d, 0x4f, 0x07, 0x6f, 0x84, 0xb8, 0x1a, 0x58, 0x0b, 0xc4, 0x7c, 0xc4, 0x8d, 0x32, 0x8e,
- /* (2^210)P */ 0x7e, 0xaf, 0x98, 0xce, 0xc5, 0x2b, 0x9d, 0xf6, 0xfa, 0x2c, 0xb6, 0x2a, 0x5a, 0x1d, 0xc0, 0x24, 0x8d, 0xa4, 0xce, 0xb1, 0x12, 0x01, 0xf9, 0x79, 0xc6, 0x79, 0x38, 0x0c, 0xd4, 0x07, 0xc9, 0xf7, 0x37, 0xa1, 0x0b, 0xfe, 0x72, 0xec, 0x5d, 0xd6, 0xb0, 0x1c, 0x70, 0xbe, 0x70, 0x01, 0x13, 0xe0, 0x86, 0x95, 0xc7, 0x2e, 0x12, 0x3b, 0xe6, 0xa6,
- /* (2^211)P */ 0x24, 0x82, 0x67, 0xe0, 0x14, 0x7b, 0x56, 0x08, 0x38, 0x44, 0xdb, 0xa0, 0x3a, 0x05, 0x47, 0xb2, 0xc0, 0xac, 0xd1, 0xcc, 0x3f, 0x82, 0xb8, 0x8a, 0x88, 0xbc, 0xf5, 0x33, 0xa1, 0x35, 0x0f, 0xf6, 0xe2, 0xef, 0x6c, 0xf7, 0x37, 0x9e, 0xe8, 0x10, 0xca, 0xb0, 0x8e, 0x80, 0x86, 0x00, 0x23, 0xd0, 0x4a, 0x76, 0x9f, 0xf7, 0x2c, 0x52, 0x15, 0x0e,
- /* (2^212)P */ 0x5e, 0x49, 0xe1, 0x2c, 0x9a, 0x01, 0x76, 0xa6, 0xb3, 0x07, 0x5b, 0xa4, 0x07, 0xef, 0x1d, 0xc3, 0x6a, 0xbb, 0x64, 0xbe, 0x71, 0x15, 0x6e, 0x32, 0x31, 0x46, 0x9a, 0x9e, 0x8f, 0x45, 0x73, 0xce, 0x0b, 0x94, 0x1a, 0x52, 0x07, 0xf4, 0x50, 0x30, 0x49, 0x53, 0x50, 0xfb, 0x71, 0x1f, 0x5a, 0x03, 0xa9, 0x76, 0xf2, 0x8f, 0x42, 0xff, 0xed, 0xed,
- /* (2^213)P */ 0xed, 0x08, 0xdb, 0x91, 0x1c, 0xee, 0xa2, 0xb4, 0x47, 0xa2, 0xfa, 0xcb, 0x03, 0xd1, 0xff, 0x8c, 0xad, 0x64, 0x50, 0x61, 0xcd, 0xfc, 0x88, 0xa0, 0x31, 0x95, 0x30, 0xb9, 0x58, 0xdd, 0xd7, 0x43, 0xe4, 0x46, 0xc2, 0x16, 0xd9, 0x72, 0x4a, 0x56, 0x51, 0x70, 0x85, 0xf1, 0xa1, 0x80, 0x40, 0xd5, 0xba, 0x67, 0x81, 0xda, 0xcd, 0x03, 0xea, 0x51,
- /* (2^214)P */ 0x42, 0x50, 0xf0, 0xef, 0x37, 0x61, 0x72, 0x85, 0xe1, 0xf1, 0xff, 0x6f, 0x3d, 0xe8, 0x7b, 0x21, 0x5c, 0xe5, 0x50, 0x03, 0xde, 0x00, 0xc1, 0xf7, 0x3a, 0x55, 0x12, 0x1c, 0x9e, 0x1e, 0xce, 0xd1, 0x2f, 0xaf, 0x05, 0x70, 0x5b, 0x47, 0xf2, 0x04, 0x7a, 0x89, 0xbc, 0x78, 0xa6, 0x65, 0x6c, 0xaa, 0x3c, 0xa2, 0x3c, 0x8b, 0x5c, 0xa9, 0x22, 0x48,
- /* (2^215)P */ 0x7e, 0x8c, 0x8f, 0x2f, 0x60, 0xe3, 0x5a, 0x94, 0xd4, 0xce, 0xdd, 0x9d, 0x83, 0x3b, 0x77, 0x78, 0x43, 0x1d, 0xfd, 0x8f, 0xc8, 0xe8, 0x02, 0x90, 0xab, 0xf6, 0xc9, 0xfc, 0xf1, 0x63, 0xaa, 0x5f, 0x42, 0xf1, 0x78, 0x34, 0x64, 0x16, 0x75, 0x9c, 0x7d, 0xd0, 0xe4, 0x74, 0x5a, 0xa8, 0xfb, 0xcb, 0xac, 0x20, 0xa3, 0xc2, 0xa6, 0x20, 0xf8, 0x1b,
- /* (2^216)P */ 0x00, 0x4f, 0x1e, 0x56, 0xb5, 0x34, 0xb2, 0x87, 0x31, 0xe5, 0xee, 0x8d, 0xf1, 0x41, 0x67, 0xb7, 0x67, 0x3a, 0x54, 0x86, 0x5c, 0xf0, 0x0b, 0x37, 0x2f, 0x1b, 0x92, 0x5d, 0x58, 0x93, 0xdc, 0xd8, 0x58, 0xcc, 0x9e, 0x67, 0xd0, 0x97, 0x3a, 0xaf, 0x49, 0x39, 0x2d, 0x3b, 0xd8, 0x98, 0xfb, 0x76, 0x6b, 0xe7, 0xaf, 0xc3, 0x45, 0x44, 0x53, 0x94,
- /* (2^217)P */ 0x30, 0xbd, 0x90, 0x75, 0xd3, 0xbd, 0x3b, 0x58, 0x27, 0x14, 0x9f, 0x6b, 0xd4, 0x31, 0x99, 0xcd, 0xde, 0x3a, 0x21, 0x1e, 0xb4, 0x02, 0xe4, 0x33, 0x04, 0x02, 0xb0, 0x50, 0x66, 0x68, 0x90, 0xdd, 0x7b, 0x69, 0x31, 0xd9, 0xcf, 0x68, 0x73, 0xf1, 0x60, 0xdd, 0xc8, 0x1d, 0x5d, 0xe3, 0xd6, 0x5b, 0x2a, 0xa4, 0xea, 0xc4, 0x3f, 0x08, 0xcd, 0x9c,
- /* (2^218)P */ 0x6b, 0x1a, 0xbf, 0x55, 0xc1, 0x1b, 0x0c, 0x05, 0x09, 0xdf, 0xf5, 0x5e, 0xa3, 0x77, 0x95, 0xe9, 0xdf, 0x19, 0xdd, 0xc7, 0x94, 0xcb, 0x06, 0x73, 0xd0, 0x88, 0x02, 0x33, 0x94, 0xca, 0x7a, 0x2f, 0x8e, 0x3d, 0x72, 0x61, 0x2d, 0x4d, 0xa6, 0x61, 0x1f, 0x32, 0x5e, 0x87, 0x53, 0x36, 0x11, 0x15, 0x20, 0xb3, 0x5a, 0x57, 0x51, 0x93, 0x20, 0xd8,
- /* (2^219)P */ 0xb7, 0x56, 0xf4, 0xab, 0x7d, 0x0c, 0xfb, 0x99, 0x1a, 0x30, 0x29, 0xb0, 0x75, 0x2a, 0xf8, 0x53, 0x71, 0x23, 0xbd, 0xa7, 0xd8, 0x0a, 0xe2, 0x27, 0x65, 0xe9, 0x74, 0x26, 0x98, 0x4a, 0x69, 0x19, 0xb2, 0x4d, 0x0a, 0x17, 0x98, 0xb2, 0xa9, 0x57, 0x4e, 0xf6, 0x86, 0xc8, 0x01, 0xa4, 0xc6, 0x98, 0xad, 0x5a, 0x90, 0x2c, 0x05, 0x46, 0x64, 0xb7,
- /* (2^220)P */ 0x7b, 0x91, 0xdf, 0xfc, 0xf8, 0x1c, 0x8c, 0x15, 0x9e, 0xf7, 0xd5, 0xa8, 0xe8, 0xe7, 0xe3, 0xa3, 0xb0, 0x04, 0x74, 0xfa, 0x78, 0xfb, 0x26, 0xbf, 0x67, 0x42, 0xf9, 0x8c, 0x9b, 0xb4, 0x69, 0x5b, 0x02, 0x13, 0x6d, 0x09, 0x6c, 0xd6, 0x99, 0x61, 0x7b, 0x89, 0x4a, 0x67, 0x75, 0xa3, 0x98, 0x13, 0x23, 0x1d, 0x18, 0x24, 0x0e, 0xef, 0x41, 0x79,
- /* (2^221)P */ 0x86, 0x33, 0xab, 0x08, 0xcb, 0xbf, 0x1e, 0x76, 0x3c, 0x0b, 0xbd, 0x30, 0xdb, 0xe9, 0xa3, 0x35, 0x87, 0x1b, 0xe9, 0x07, 0x00, 0x66, 0x7f, 0x3b, 0x35, 0x0c, 0x8a, 0x3f, 0x61, 0xbc, 0xe0, 0xae, 0xf6, 0xcc, 0x54, 0xe1, 0x72, 0x36, 0x2d, 0xee, 0x93, 0x24, 0xf8, 0xd7, 0xc5, 0xf9, 0xcb, 0xb0, 0xe5, 0x88, 0x0d, 0x23, 0x4b, 0x76, 0x15, 0xa2,
- /* (2^222)P */ 0x37, 0xdb, 0x83, 0xd5, 0x6d, 0x06, 0x24, 0x37, 0x1b, 0x15, 0x85, 0x15, 0xe2, 0xc0, 0x4e, 0x02, 0xa9, 0x6d, 0x0a, 0x3a, 0x94, 0x4a, 0x6f, 0x49, 0x00, 0x01, 0x72, 0xbb, 0x60, 0x14, 0x35, 0xae, 0xb4, 0xc6, 0x01, 0x0a, 0x00, 0x9e, 0xc3, 0x58, 0xc5, 0xd1, 0x5e, 0x30, 0x73, 0x96, 0x24, 0x85, 0x9d, 0xf0, 0xf9, 0xec, 0x09, 0xd3, 0xe7, 0x70,
- /* (2^223)P */ 0xf3, 0xbd, 0x96, 0x87, 0xe9, 0x71, 0xbd, 0xd6, 0xa2, 0x45, 0xeb, 0x0a, 0xcd, 0x2c, 0xf1, 0x72, 0xa6, 0x31, 0xa9, 0x6f, 0x09, 0xa1, 0x5e, 0xdd, 0xc8, 0x8d, 0x0d, 0xbc, 0x5a, 0x8d, 0xb1, 0x2c, 0x9a, 0xcc, 0x37, 0x74, 0xc2, 0xa9, 0x4e, 0xd6, 0xc0, 0x3c, 0xa0, 0x23, 0xb0, 0xa0, 0x77, 0x14, 0x80, 0x45, 0x71, 0x6a, 0x2d, 0x41, 0xc3, 0x82,
- /* (2^224)P */ 0x37, 0x44, 0xec, 0x8a, 0x3e, 0xc1, 0x0c, 0xa9, 0x12, 0x9c, 0x08, 0x88, 0xcb, 0xd9, 0xf8, 0xba, 0x00, 0xd6, 0xc3, 0xdf, 0xef, 0x7a, 0x44, 0x7e, 0x25, 0x69, 0xc9, 0xc1, 0x46, 0xe5, 0x20, 0x9e, 0xcc, 0x0b, 0x05, 0x3e, 0xf4, 0x78, 0x43, 0x0c, 0xa6, 0x2f, 0xc1, 0xfa, 0x70, 0xb2, 0x3c, 0x31, 0x7a, 0x63, 0x58, 0xab, 0x17, 0xcf, 0x4c, 0x4f,
- /* (2^225)P */ 0x2b, 0x08, 0x31, 0x59, 0x75, 0x8b, 0xec, 0x0a, 0xa9, 0x79, 0x70, 0xdd, 0xf1, 0x11, 0xc3, 0x11, 0x1f, 0xab, 0x37, 0xaa, 0x26, 0xea, 0x53, 0xc4, 0x79, 0xa7, 0x91, 0x00, 0xaa, 0x08, 0x42, 0xeb, 0x8b, 0x8b, 0xe8, 0xc3, 0x2f, 0xb8, 0x78, 0x90, 0x38, 0x0e, 0x8a, 0x42, 0x0c, 0x0f, 0xbf, 0x3e, 0xf8, 0xd8, 0x07, 0xcf, 0x6a, 0x34, 0xc9, 0xfa,
- /* (2^226)P */ 0x11, 0xe0, 0x76, 0x4d, 0x23, 0xc5, 0xa6, 0xcc, 0x9f, 0x9a, 0x2a, 0xde, 0x3a, 0xb5, 0x92, 0x39, 0x19, 0x8a, 0xf1, 0x8d, 0xf9, 0x4d, 0xc9, 0xb4, 0x39, 0x9f, 0x57, 0xd8, 0x72, 0xab, 0x1d, 0x61, 0x6a, 0xb2, 0xff, 0x52, 0xba, 0x54, 0x0e, 0xfb, 0x83, 0x30, 0x8a, 0xf7, 0x3b, 0xf4, 0xd8, 0xae, 0x1a, 0x94, 0x3a, 0xec, 0x63, 0xfe, 0x6e, 0x7c,
- /* (2^227)P */ 0xdc, 0x70, 0x8e, 0x55, 0x44, 0xbf, 0xd2, 0x6a, 0xa0, 0x14, 0x61, 0x89, 0xd5, 0x55, 0x45, 0x3c, 0xf6, 0x40, 0x0d, 0x83, 0x85, 0x44, 0xb4, 0x62, 0x56, 0xfe, 0x60, 0xd7, 0x07, 0x1d, 0x47, 0x30, 0x3b, 0x73, 0xa4, 0xb5, 0xb7, 0xea, 0xac, 0xda, 0xf1, 0x17, 0xaa, 0x60, 0xdf, 0xe9, 0x84, 0xda, 0x31, 0x32, 0x61, 0xbf, 0xd0, 0x7e, 0x8a, 0x02,
- /* (2^228)P */ 0xb9, 0x51, 0xb3, 0x89, 0x21, 0x5d, 0xa2, 0xfe, 0x79, 0x2a, 0xb3, 0x2a, 0x3b, 0xe6, 0x6f, 0x2b, 0x22, 0x03, 0xea, 0x7b, 0x1f, 0xaf, 0x85, 0xc3, 0x38, 0x55, 0x5b, 0x8e, 0xb4, 0xaa, 0x77, 0xfe, 0x03, 0x6e, 0xda, 0x91, 0x24, 0x0c, 0x48, 0x39, 0x27, 0x43, 0x16, 0xd2, 0x0a, 0x0d, 0x43, 0xa3, 0x0e, 0xca, 0x45, 0xd1, 0x7f, 0xf5, 0xd3, 0x16,
- /* (2^229)P */ 0x3d, 0x32, 0x9b, 0x38, 0xf8, 0x06, 0x93, 0x78, 0x5b, 0x50, 0x2b, 0x06, 0xd8, 0x66, 0xfe, 0xab, 0x9b, 0x58, 0xc7, 0xd1, 0x4d, 0xd5, 0xf8, 0x3b, 0x10, 0x7e, 0x85, 0xde, 0x58, 0x4e, 0xdf, 0x53, 0xd9, 0x58, 0xe0, 0x15, 0x81, 0x9f, 0x1a, 0x78, 0xfc, 0x9f, 0x10, 0xc2, 0x23, 0xd6, 0x78, 0xd1, 0x9d, 0xd2, 0xd5, 0x1c, 0x53, 0xe2, 0xc9, 0x76,
- /* (2^230)P */ 0x98, 0x1e, 0x38, 0x7b, 0x71, 0x18, 0x4b, 0x15, 0xaf, 0xa1, 0xa6, 0x98, 0xcb, 0x26, 0xa3, 0xc8, 0x07, 0x46, 0xda, 0x3b, 0x70, 0x65, 0xec, 0x7a, 0x2b, 0x34, 0x94, 0xa8, 0xb6, 0x14, 0xf8, 0x1a, 0xce, 0xf7, 0xc8, 0x60, 0xf3, 0x88, 0xf4, 0x33, 0x60, 0x7b, 0xd1, 0x02, 0xe7, 0xda, 0x00, 0x4a, 0xea, 0xd2, 0xfd, 0x88, 0xd2, 0x99, 0x28, 0xf3,
- /* (2^231)P */ 0x28, 0x24, 0x1d, 0x26, 0xc2, 0xeb, 0x8b, 0x3b, 0xb4, 0x6b, 0xbe, 0x6b, 0x77, 0xff, 0xf3, 0x21, 0x3b, 0x26, 0x6a, 0x8c, 0x8e, 0x2a, 0x44, 0xa8, 0x01, 0x2b, 0x71, 0xea, 0x64, 0x30, 0xfd, 0xfd, 0x95, 0xcb, 0x39, 0x38, 0x48, 0xfa, 0x96, 0x97, 0x8c, 0x2f, 0x33, 0xca, 0x03, 0xe6, 0xd7, 0x94, 0x55, 0x6c, 0xc3, 0xb3, 0xa8, 0xf7, 0xae, 0x8c,
- /* (2^232)P */ 0xea, 0x62, 0x8a, 0xb4, 0xeb, 0x74, 0xf7, 0xb8, 0xae, 0xc5, 0x20, 0x71, 0x06, 0xd6, 0x7c, 0x62, 0x9b, 0x69, 0x74, 0xef, 0xa7, 0x6d, 0xd6, 0x8c, 0x37, 0xb9, 0xbf, 0xcf, 0xeb, 0xe4, 0x2f, 0x04, 0x02, 0x21, 0x7d, 0x75, 0x6b, 0x92, 0x48, 0xf8, 0x70, 0xad, 0x69, 0xe2, 0xea, 0x0e, 0x88, 0x67, 0x72, 0xcc, 0x2d, 0x10, 0xce, 0x2d, 0xcf, 0x65,
- /* (2^233)P */ 0x49, 0xf3, 0x57, 0x64, 0xe5, 0x5c, 0xc5, 0x65, 0x49, 0x97, 0xc4, 0x8a, 0xcc, 0xa9, 0xca, 0x94, 0x7b, 0x86, 0x88, 0xb6, 0x51, 0x27, 0x69, 0xa5, 0x0f, 0x8b, 0x06, 0x59, 0xa0, 0x94, 0xef, 0x63, 0x1a, 0x01, 0x9e, 0x4f, 0xd2, 0x5a, 0x93, 0xc0, 0x7c, 0xe6, 0x61, 0x77, 0xb6, 0xf5, 0x40, 0xd9, 0x98, 0x43, 0x5b, 0x56, 0x68, 0xe9, 0x37, 0x8f,
- /* (2^234)P */ 0xee, 0x87, 0xd2, 0x05, 0x1b, 0x39, 0x89, 0x10, 0x07, 0x6d, 0xe8, 0xfd, 0x8b, 0x4d, 0xb2, 0xa7, 0x7b, 0x1e, 0xa0, 0x6c, 0x0d, 0x3d, 0x3d, 0x49, 0xba, 0x61, 0x36, 0x1f, 0xc2, 0x84, 0x4a, 0xcc, 0x87, 0xa9, 0x1b, 0x23, 0x04, 0xe2, 0x3e, 0x97, 0xe1, 0xdb, 0xd5, 0x5a, 0xe8, 0x41, 0x6b, 0xe5, 0x5a, 0xa1, 0x99, 0xe5, 0x7b, 0xa7, 0xe0, 0x3b,
- /* (2^235)P */ 0xea, 0xa3, 0x6a, 0xdd, 0x77, 0x7f, 0x77, 0x41, 0xc5, 0x6a, 0xe4, 0xaf, 0x11, 0x5f, 0x88, 0xa5, 0x10, 0xee, 0xd0, 0x8c, 0x0c, 0xb4, 0xa5, 0x2a, 0xd0, 0xd8, 0x1d, 0x47, 0x06, 0xc0, 0xd5, 0xce, 0x51, 0x54, 0x9b, 0x2b, 0xe6, 0x2f, 0xe7, 0xe7, 0x31, 0x5f, 0x5c, 0x23, 0x81, 0x3e, 0x03, 0x93, 0xaa, 0x2d, 0x71, 0x84, 0xa0, 0x89, 0x32, 0xa6,
- /* (2^236)P */ 0x55, 0xa3, 0x13, 0x92, 0x4e, 0x93, 0x7d, 0xec, 0xca, 0x57, 0xfb, 0x37, 0xae, 0xd2, 0x18, 0x2e, 0x54, 0x05, 0x6c, 0xd1, 0x28, 0xca, 0x90, 0x40, 0x82, 0x2e, 0x79, 0xc6, 0x5a, 0xc7, 0xdd, 0x84, 0x93, 0xdf, 0x15, 0xb8, 0x1f, 0xb1, 0xf9, 0xaf, 0x2c, 0xe5, 0x32, 0xcd, 0xc2, 0x99, 0x6d, 0xac, 0x85, 0x5c, 0x63, 0xd3, 0xe2, 0xff, 0x24, 0xda,
- /* (2^237)P */ 0x2d, 0x8d, 0xfd, 0x65, 0xcc, 0xe5, 0x02, 0xa0, 0xe5, 0xb9, 0xec, 0x59, 0x09, 0x50, 0x27, 0xb7, 0x3d, 0x2a, 0x79, 0xb2, 0x76, 0x5d, 0x64, 0x95, 0xf8, 0xc5, 0xaf, 0x8a, 0x62, 0x11, 0x5c, 0x56, 0x1c, 0x05, 0x64, 0x9e, 0x5e, 0xbd, 0x54, 0x04, 0xe6, 0x9e, 0xab, 0xe6, 0x22, 0x7e, 0x42, 0x54, 0xb5, 0xa5, 0xd0, 0x8d, 0x28, 0x6b, 0x0f, 0x0b,
- /* (2^238)P */ 0x2d, 0xb2, 0x8c, 0x59, 0x10, 0x37, 0x84, 0x3b, 0x9b, 0x65, 0x1b, 0x0f, 0x10, 0xf9, 0xea, 0x60, 0x1b, 0x02, 0xf5, 0xee, 0x8b, 0xe6, 0x32, 0x7d, 0x10, 0x7f, 0x5f, 0x8c, 0x72, 0x09, 0x4e, 0x1f, 0x29, 0xff, 0x65, 0xcb, 0x3e, 0x3a, 0xd2, 0x96, 0x50, 0x1e, 0xea, 0x64, 0x99, 0xb5, 0x4c, 0x7a, 0x69, 0xb8, 0x95, 0xae, 0x48, 0xc0, 0x7c, 0xb1,
- /* (2^239)P */ 0xcd, 0x7c, 0x4f, 0x3e, 0xea, 0xf3, 0x90, 0xcb, 0x12, 0x76, 0xd1, 0x17, 0xdc, 0x0d, 0x13, 0x0f, 0xfd, 0x4d, 0xb5, 0x1f, 0xe4, 0xdd, 0xf2, 0x4d, 0x58, 0xea, 0xa5, 0x66, 0x92, 0xcf, 0xe5, 0x54, 0xea, 0x9b, 0x35, 0x83, 0x1a, 0x44, 0x8e, 0x62, 0x73, 0x45, 0x98, 0xa3, 0x89, 0x95, 0x52, 0x93, 0x1a, 0x8d, 0x63, 0x0f, 0xc2, 0x57, 0x3c, 0xb1,
- /* (2^240)P */ 0x72, 0xb4, 0xdf, 0x51, 0xb7, 0xf6, 0x52, 0xa2, 0x14, 0x56, 0xe5, 0x0a, 0x2e, 0x75, 0x81, 0x02, 0xee, 0x93, 0x48, 0x0a, 0x92, 0x4e, 0x0c, 0x0f, 0xdf, 0x09, 0x89, 0x99, 0xf6, 0xf9, 0x22, 0xa2, 0x32, 0xf8, 0xb0, 0x76, 0x0c, 0xb2, 0x4d, 0x6e, 0xbe, 0x83, 0x35, 0x61, 0x44, 0xd2, 0x58, 0xc7, 0xdd, 0x14, 0xcf, 0xc3, 0x4b, 0x7c, 0x07, 0xee,
- /* (2^241)P */ 0x8b, 0x03, 0xee, 0xcb, 0xa7, 0x2e, 0x28, 0xbd, 0x97, 0xd1, 0x4c, 0x2b, 0xd1, 0x92, 0x67, 0x5b, 0x5a, 0x12, 0xbf, 0x29, 0x17, 0xfc, 0x50, 0x09, 0x74, 0x76, 0xa2, 0xd4, 0x82, 0xfd, 0x2c, 0x0c, 0x90, 0xf7, 0xe7, 0xe5, 0x9a, 0x2c, 0x16, 0x40, 0xb9, 0x6c, 0xd9, 0xe0, 0x22, 0x9e, 0xf8, 0xdd, 0x73, 0xe4, 0x7b, 0x9e, 0xbe, 0x4f, 0x66, 0x22,
- /* (2^242)P */ 0xa4, 0x10, 0xbe, 0xb8, 0x83, 0x3a, 0x77, 0x8e, 0xea, 0x0a, 0xc4, 0x97, 0x3e, 0xb6, 0x6c, 0x81, 0xd7, 0x65, 0xd9, 0xf7, 0xae, 0xe6, 0xbe, 0xab, 0x59, 0x81, 0x29, 0x4b, 0xff, 0xe1, 0x0f, 0xc3, 0x2b, 0xad, 0x4b, 0xef, 0xc4, 0x50, 0x9f, 0x88, 0x31, 0xf2, 0xde, 0x80, 0xd6, 0xf4, 0x20, 0x9c, 0x77, 0x9b, 0xbe, 0xbe, 0x08, 0xf5, 0xf0, 0x95,
- /* (2^243)P */ 0x0e, 0x7c, 0x7b, 0x7c, 0xb3, 0xd8, 0x83, 0xfc, 0x8c, 0x75, 0x51, 0x74, 0x1b, 0xe1, 0x6d, 0x11, 0x05, 0x46, 0x24, 0x0d, 0xa4, 0x2b, 0x32, 0xfd, 0x2c, 0x4e, 0x21, 0xdf, 0x39, 0x6b, 0x96, 0xfc, 0xff, 0x92, 0xfc, 0x35, 0x0d, 0x9a, 0x4b, 0xc0, 0x70, 0x46, 0x32, 0x7d, 0xc0, 0xc4, 0x04, 0xe0, 0x2d, 0x83, 0xa7, 0x00, 0xc7, 0xcb, 0xb4, 0x8f,
- /* (2^244)P */ 0xa9, 0x5a, 0x7f, 0x0e, 0xdd, 0x2c, 0x85, 0xaa, 0x4d, 0xac, 0xde, 0xb3, 0xb6, 0xaf, 0xe6, 0xd1, 0x06, 0x7b, 0x2c, 0xa4, 0x01, 0x19, 0x22, 0x7d, 0x78, 0xf0, 0x3a, 0xea, 0x89, 0xfe, 0x21, 0x61, 0x6d, 0xb8, 0xfe, 0xa5, 0x2a, 0xab, 0x0d, 0x7b, 0x51, 0x39, 0xb6, 0xde, 0xbc, 0xf0, 0xc5, 0x48, 0xd7, 0x09, 0x82, 0x6e, 0x66, 0x75, 0xc5, 0xcd,
- /* (2^245)P */ 0xee, 0xdf, 0x2b, 0x6c, 0xa8, 0xde, 0x61, 0xe1, 0x27, 0xfa, 0x2a, 0x0f, 0x68, 0xe7, 0x7a, 0x9b, 0x13, 0xe9, 0x56, 0xd2, 0x1c, 0x3d, 0x2f, 0x3c, 0x7a, 0xf6, 0x6f, 0x45, 0xee, 0xe8, 0xf4, 0xa0, 0xa6, 0xe8, 0xa5, 0x27, 0xee, 0xf2, 0x85, 0xa9, 0xd5, 0x0e, 0xa9, 0x26, 0x60, 0xfe, 0xee, 0xc7, 0x59, 0x99, 0x5e, 0xa3, 0xdf, 0x23, 0x36, 0xd5,
- /* (2^246)P */ 0x15, 0x66, 0x6f, 0xd5, 0x78, 0xa4, 0x0a, 0xf7, 0xb1, 0xe8, 0x75, 0x6b, 0x48, 0x7d, 0xa6, 0x4d, 0x3d, 0x36, 0x9b, 0xc7, 0xcc, 0x68, 0x9a, 0xfe, 0x2f, 0x39, 0x2a, 0x51, 0x31, 0x39, 0x7d, 0x73, 0x6f, 0xc8, 0x74, 0x72, 0x6f, 0x6e, 0xda, 0x5f, 0xad, 0x48, 0xc8, 0x40, 0xe1, 0x06, 0x01, 0x36, 0xa1, 0x88, 0xc8, 0x99, 0x9c, 0xd1, 0x11, 0x8f,
- /* (2^247)P */ 0xab, 0xc5, 0xcb, 0xcf, 0xbd, 0x73, 0x21, 0xd0, 0x82, 0xb1, 0x2e, 0x2d, 0xd4, 0x36, 0x1b, 0xed, 0xa9, 0x8a, 0x26, 0x79, 0xc4, 0x17, 0xae, 0xe5, 0x09, 0x0a, 0x0c, 0xa4, 0x21, 0xa0, 0x6e, 0xdd, 0x62, 0x8e, 0x44, 0x62, 0xcc, 0x50, 0xff, 0x93, 0xb3, 0x9a, 0x72, 0x8c, 0x3f, 0xa1, 0xa6, 0x4d, 0x87, 0xd5, 0x1c, 0x5a, 0xc0, 0x0b, 0x1a, 0xd6,
- /* (2^248)P */ 0x67, 0x36, 0x6a, 0x1f, 0x96, 0xe5, 0x80, 0x20, 0xa9, 0xe8, 0x0b, 0x0e, 0x21, 0x29, 0x3f, 0xc8, 0x0a, 0x6d, 0x27, 0x47, 0xca, 0xd9, 0x05, 0x55, 0xbf, 0x11, 0xcf, 0x31, 0x7a, 0x37, 0xc7, 0x90, 0xa9, 0xf4, 0x07, 0x5e, 0xd5, 0xc3, 0x92, 0xaa, 0x95, 0xc8, 0x23, 0x2a, 0x53, 0x45, 0xe3, 0x3a, 0x24, 0xe9, 0x67, 0x97, 0x3a, 0x82, 0xf9, 0xa6,
- /* (2^249)P */ 0x92, 0x9e, 0x6d, 0x82, 0x67, 0xe9, 0xf9, 0x17, 0x96, 0x2c, 0xa7, 0xd3, 0x89, 0xf9, 0xdb, 0xd8, 0x20, 0xc6, 0x2e, 0xec, 0x4a, 0x76, 0x64, 0xbf, 0x27, 0x40, 0xe2, 0xb4, 0xdf, 0x1f, 0xa0, 0xef, 0x07, 0x80, 0xfb, 0x8e, 0x12, 0xf8, 0xb8, 0xe1, 0xc6, 0xdf, 0x7c, 0x69, 0x35, 0x5a, 0xe1, 0x8e, 0x5d, 0x69, 0x84, 0x56, 0xb6, 0x31, 0x1c, 0x0b,
- /* (2^250)P */ 0xd6, 0x94, 0x5c, 0xef, 0xbb, 0x46, 0x45, 0x44, 0x5b, 0xa1, 0xae, 0x03, 0x65, 0xdd, 0xb5, 0x66, 0x88, 0x35, 0x29, 0x95, 0x16, 0x54, 0xa6, 0xf5, 0xc9, 0x78, 0x34, 0xe6, 0x0f, 0xc4, 0x2b, 0x5b, 0x79, 0x51, 0x68, 0x48, 0x3a, 0x26, 0x87, 0x05, 0x70, 0xaf, 0x8b, 0xa6, 0xc7, 0x2e, 0xb3, 0xa9, 0x10, 0x01, 0xb0, 0xb9, 0x31, 0xfd, 0xdc, 0x80,
- /* (2^251)P */ 0x25, 0xf2, 0xad, 0xd6, 0x75, 0xa3, 0x04, 0x05, 0x64, 0x8a, 0x97, 0x60, 0x27, 0x2a, 0xe5, 0x6d, 0xb0, 0x73, 0xf4, 0x07, 0x2a, 0x9d, 0xe9, 0x46, 0xb4, 0x1c, 0x51, 0xf8, 0x63, 0x98, 0x7e, 0xe5, 0x13, 0x51, 0xed, 0x98, 0x65, 0x98, 0x4f, 0x8f, 0xe7, 0x7e, 0x72, 0xd7, 0x64, 0x11, 0x2f, 0xcd, 0x12, 0xf8, 0xc4, 0x63, 0x52, 0x0f, 0x7f, 0xc4,
- /* (2^252)P */ 0x5c, 0xd9, 0x85, 0x63, 0xc7, 0x8a, 0x65, 0x9a, 0x25, 0x83, 0x31, 0x73, 0x49, 0xf0, 0x93, 0x96, 0x70, 0x67, 0x6d, 0xb1, 0xff, 0x95, 0x54, 0xe4, 0xf8, 0x15, 0x6c, 0x5f, 0xbd, 0xf6, 0x0f, 0x38, 0x7b, 0x68, 0x7d, 0xd9, 0x3d, 0xf0, 0xa9, 0xa0, 0xe4, 0xd1, 0xb6, 0x34, 0x6d, 0x14, 0x16, 0xc2, 0x4c, 0x30, 0x0e, 0x67, 0xd3, 0xbe, 0x2e, 0xc0,
- /* (2^253)P */ 0x06, 0x6b, 0x52, 0xc8, 0x14, 0xcd, 0xae, 0x03, 0x93, 0xea, 0xc1, 0xf2, 0xf6, 0x8b, 0xc5, 0xb6, 0xdc, 0x82, 0x42, 0x29, 0x94, 0xe0, 0x25, 0x6c, 0x3f, 0x9f, 0x5d, 0xe4, 0x96, 0xf6, 0x8e, 0x3f, 0xf9, 0x72, 0xc4, 0x77, 0x60, 0x8b, 0xa4, 0xf9, 0xa8, 0xc3, 0x0a, 0x81, 0xb1, 0x97, 0x70, 0x18, 0xab, 0xea, 0x37, 0x8a, 0x08, 0xc7, 0xe2, 0x95,
- /* (2^254)P */ 0x94, 0x49, 0xd9, 0x5f, 0x76, 0x72, 0x82, 0xad, 0x2d, 0x50, 0x1a, 0x7a, 0x5b, 0xe6, 0x95, 0x1e, 0x95, 0x65, 0x87, 0x1c, 0x52, 0xd7, 0x44, 0xe6, 0x9b, 0x56, 0xcd, 0x6f, 0x05, 0xff, 0x67, 0xc5, 0xdb, 0xa2, 0xac, 0xe4, 0xa2, 0x28, 0x63, 0x5f, 0xfb, 0x0c, 0x3b, 0xf1, 0x87, 0xc3, 0x36, 0x78, 0x3f, 0x77, 0xfa, 0x50, 0x85, 0xf9, 0xd7, 0x82,
- /* (2^255)P */ 0x64, 0xc0, 0xe0, 0xd8, 0x2d, 0xed, 0xcb, 0x6a, 0xfd, 0xcd, 0xbc, 0x7e, 0x9f, 0xc8, 0x85, 0xe9, 0xc1, 0x7c, 0x0f, 0xe5, 0x18, 0xea, 0xd4, 0x51, 0xad, 0x59, 0x13, 0x75, 0xd9, 0x3d, 0xd4, 0x8a, 0xb2, 0xbe, 0x78, 0x52, 0x2b, 0x52, 0x94, 0x37, 0x41, 0xd6, 0xb4, 0xb6, 0x45, 0x20, 0x76, 0xe0, 0x1f, 0x31, 0xdb, 0xb1, 0xa1, 0x43, 0xf0, 0x18,
- /* (2^256)P */ 0x74, 0xa9, 0xa4, 0xa9, 0xdd, 0x6e, 0x3e, 0x68, 0xe5, 0xc3, 0x2e, 0x92, 0x17, 0xa4, 0xcb, 0x80, 0xb1, 0xf0, 0x06, 0x93, 0xef, 0xe6, 0x00, 0xe6, 0x3b, 0xb1, 0x32, 0x65, 0x7b, 0x83, 0xb6, 0x8a, 0x49, 0x1b, 0x14, 0x89, 0xee, 0xba, 0xf5, 0x6a, 0x8d, 0x36, 0xef, 0xb0, 0xd8, 0xb2, 0x16, 0x99, 0x17, 0x35, 0x02, 0x16, 0x55, 0x58, 0xdd, 0x82,
- /* (2^257)P */ 0x36, 0x95, 0xe8, 0xf4, 0x36, 0x42, 0xbb, 0xc5, 0x3e, 0xfa, 0x30, 0x84, 0x9e, 0x59, 0xfd, 0xd2, 0x95, 0x42, 0xf8, 0x64, 0xd9, 0xb9, 0x0e, 0x9f, 0xfa, 0xd0, 0x7b, 0x20, 0x31, 0x77, 0x48, 0x29, 0x4d, 0xd0, 0x32, 0x57, 0x56, 0x30, 0xa6, 0x17, 0x53, 0x04, 0xbf, 0x08, 0x28, 0xec, 0xb8, 0x46, 0xc1, 0x03, 0x89, 0xdc, 0xed, 0xa0, 0x35, 0x53,
- /* (2^258)P */ 0xc5, 0x7f, 0x9e, 0xd8, 0xc5, 0xba, 0x5f, 0x68, 0xc8, 0x23, 0x75, 0xea, 0x0d, 0xd9, 0x5a, 0xfd, 0x61, 0x1a, 0xa3, 0x2e, 0x45, 0x63, 0x14, 0x55, 0x86, 0x21, 0x29, 0xbe, 0xef, 0x5e, 0x50, 0xe5, 0x18, 0x59, 0xe7, 0xe3, 0xce, 0x4d, 0x8c, 0x15, 0x8f, 0x89, 0x66, 0x44, 0x52, 0x3d, 0xfa, 0xc7, 0x9a, 0x59, 0x90, 0x8e, 0xc0, 0x06, 0x3f, 0xc9,
- /* (2^259)P */ 0x8e, 0x04, 0xd9, 0x16, 0x50, 0x1d, 0x8c, 0x9f, 0xd5, 0xe3, 0xce, 0xfd, 0x47, 0x04, 0x27, 0x4d, 0xc2, 0xfa, 0x71, 0xd9, 0x0b, 0xb8, 0x65, 0xf4, 0x11, 0xf3, 0x08, 0xee, 0x81, 0xc8, 0x67, 0x99, 0x0b, 0x8d, 0x77, 0xa3, 0x4f, 0xb5, 0x9b, 0xdb, 0x26, 0xf1, 0x97, 0xeb, 0x04, 0x54, 0xeb, 0x80, 0x08, 0x1d, 0x1d, 0xf6, 0x3d, 0x1f, 0x5a, 0xb8,
- /* (2^260)P */ 0xb7, 0x9c, 0x9d, 0xee, 0xb9, 0x5c, 0xad, 0x0d, 0x9e, 0xfd, 0x60, 0x3c, 0x27, 0x4e, 0xa2, 0x95, 0xfb, 0x64, 0x7e, 0x79, 0x64, 0x87, 0x10, 0xb4, 0x73, 0xe0, 0x9d, 0x46, 0x4d, 0x3d, 0xee, 0x83, 0xe4, 0x16, 0x88, 0x97, 0xe6, 0x4d, 0xba, 0x70, 0xb6, 0x96, 0x7b, 0xff, 0x4b, 0xc8, 0xcf, 0x72, 0x83, 0x3e, 0x5b, 0x24, 0x2e, 0x57, 0xf1, 0x82,
- /* (2^261)P */ 0x30, 0x71, 0x40, 0x51, 0x4f, 0x44, 0xbb, 0xc7, 0xf0, 0x54, 0x6e, 0x9d, 0xeb, 0x15, 0xad, 0xf8, 0x61, 0x43, 0x5a, 0xef, 0xc0, 0xb1, 0x57, 0xae, 0x03, 0x40, 0xe8, 0x68, 0x6f, 0x03, 0x20, 0x4f, 0x8a, 0x51, 0x2a, 0x9e, 0xd2, 0x45, 0xaf, 0xb4, 0xf5, 0xd4, 0x95, 0x7f, 0x3d, 0x3d, 0xb7, 0xb6, 0x28, 0xc5, 0x08, 0x8b, 0x44, 0xd6, 0x3f, 0xe7,
- /* (2^262)P */ 0xa9, 0x52, 0x04, 0x67, 0xcb, 0x20, 0x63, 0xf8, 0x18, 0x01, 0x44, 0x21, 0x6a, 0x8a, 0x83, 0x48, 0xd4, 0xaf, 0x23, 0x0f, 0x35, 0x8d, 0xe5, 0x5a, 0xc4, 0x7c, 0x55, 0x46, 0x19, 0x5f, 0x35, 0xe0, 0x5d, 0x97, 0x4c, 0x2d, 0x04, 0xed, 0x59, 0xd4, 0xb0, 0xb2, 0xc6, 0xe3, 0x51, 0xe1, 0x38, 0xc6, 0x30, 0x49, 0x8f, 0xae, 0x61, 0x64, 0xce, 0xa8,
- /* (2^263)P */ 0x9b, 0x64, 0x83, 0x3c, 0xd3, 0xdf, 0xb9, 0x27, 0xe7, 0x5b, 0x7f, 0xeb, 0xf3, 0x26, 0xcf, 0xb1, 0x8f, 0xaf, 0x26, 0xc8, 0x48, 0xce, 0xa1, 0xac, 0x7d, 0x10, 0x34, 0x28, 0xe1, 0x1f, 0x69, 0x03, 0x64, 0x77, 0x61, 0xdd, 0x4a, 0x9b, 0x18, 0x47, 0xf8, 0xca, 0x63, 0xc9, 0x03, 0x2d, 0x20, 0x2a, 0x69, 0x6e, 0x42, 0xd0, 0xe7, 0xaa, 0xb5, 0xf3,
- /* (2^264)P */ 0xea, 0x31, 0x0c, 0x57, 0x0f, 0x3e, 0xe3, 0x35, 0xd8, 0x30, 0xa5, 0x6f, 0xdd, 0x95, 0x43, 0xc6, 0x66, 0x07, 0x4f, 0x34, 0xc3, 0x7e, 0x04, 0x10, 0x2d, 0xc4, 0x1c, 0x94, 0x52, 0x2e, 0x5b, 0x9a, 0x65, 0x2f, 0x91, 0xaa, 0x4f, 0x3c, 0xdc, 0x23, 0x18, 0xe1, 0x4f, 0x85, 0xcd, 0xf4, 0x8c, 0x51, 0xf7, 0xab, 0x4f, 0xdc, 0x15, 0x5c, 0x9e, 0xc5,
- /* (2^265)P */ 0x54, 0x57, 0x23, 0x17, 0xe7, 0x82, 0x2f, 0x04, 0x7d, 0xfe, 0xe7, 0x1f, 0xa2, 0x57, 0x79, 0xe9, 0x58, 0x9b, 0xbe, 0xc6, 0x16, 0x4a, 0x17, 0x50, 0x90, 0x4a, 0x34, 0x70, 0x87, 0x37, 0x01, 0x26, 0xd8, 0xa3, 0x5f, 0x07, 0x7c, 0xd0, 0x7d, 0x05, 0x8a, 0x93, 0x51, 0x2f, 0x99, 0xea, 0xcf, 0x00, 0xd8, 0xc7, 0xe6, 0x9b, 0x8c, 0x62, 0x45, 0x87,
- /* (2^266)P */ 0xc3, 0xfd, 0x29, 0x66, 0xe7, 0x30, 0x29, 0x77, 0xe0, 0x0d, 0x63, 0x5b, 0xe6, 0x90, 0x1a, 0x1e, 0x99, 0xc2, 0xa7, 0xab, 0xff, 0xa7, 0xbd, 0x79, 0x01, 0x97, 0xfd, 0x27, 0x1b, 0x43, 0x2b, 0xe6, 0xfe, 0x5e, 0xf1, 0xb9, 0x35, 0x38, 0x08, 0x25, 0x55, 0x90, 0x68, 0x2e, 0xc3, 0x67, 0x39, 0x9f, 0x2b, 0x2c, 0x70, 0x48, 0x8c, 0x47, 0xee, 0x56,
- /* (2^267)P */ 0xf7, 0x32, 0x70, 0xb5, 0xe6, 0x42, 0xfd, 0x0a, 0x39, 0x9b, 0x07, 0xfe, 0x0e, 0xf4, 0x47, 0xba, 0x6a, 0x3f, 0xf5, 0x2c, 0x15, 0xf3, 0x60, 0x3f, 0xb1, 0x83, 0x7b, 0x2e, 0x34, 0x58, 0x1a, 0x6e, 0x4a, 0x49, 0x05, 0x45, 0xca, 0xdb, 0x00, 0x01, 0x0c, 0x42, 0x5e, 0x60, 0x40, 0x5f, 0xd9, 0xc7, 0x3a, 0x9e, 0x1c, 0x8d, 0xab, 0x11, 0x55, 0x65,
- /* (2^268)P */ 0x87, 0x40, 0xb7, 0x0d, 0xaa, 0x34, 0x89, 0x90, 0x75, 0x6d, 0xa2, 0xfe, 0x3b, 0x6d, 0x5c, 0x39, 0x98, 0x10, 0x9e, 0x15, 0xc5, 0x35, 0xa2, 0x27, 0x23, 0x0a, 0x2d, 0x60, 0xe2, 0xa8, 0x7f, 0x3e, 0x77, 0x8f, 0xcc, 0x44, 0xcc, 0x30, 0x28, 0xe2, 0xf0, 0x04, 0x8c, 0xee, 0xe4, 0x5f, 0x68, 0x8c, 0xdf, 0x70, 0xbf, 0x31, 0xee, 0x2a, 0xfc, 0xce,
- /* (2^269)P */ 0x92, 0xf2, 0xa0, 0xd9, 0x58, 0x3b, 0x7c, 0x1a, 0x99, 0x46, 0x59, 0x54, 0x60, 0x06, 0x8d, 0x5e, 0xf0, 0x22, 0xa1, 0xed, 0x92, 0x8a, 0x4d, 0x76, 0x95, 0x05, 0x0b, 0xff, 0xfc, 0x9a, 0xd1, 0xcc, 0x05, 0xb9, 0x5e, 0x99, 0xe8, 0x2a, 0x76, 0x7b, 0xfd, 0xa6, 0xe2, 0xd1, 0x1a, 0xd6, 0x76, 0x9f, 0x2f, 0x0e, 0xd1, 0xa8, 0x77, 0x5a, 0x40, 0x5a,
- /* (2^270)P */ 0xff, 0xf9, 0x3f, 0xa9, 0xa6, 0x6c, 0x6d, 0x03, 0x8b, 0xa7, 0x10, 0x5d, 0x3f, 0xec, 0x3e, 0x1c, 0x0b, 0x6b, 0xa2, 0x6a, 0x22, 0xa9, 0x28, 0xd0, 0x66, 0xc9, 0xc2, 0x3d, 0x47, 0x20, 0x7d, 0xa6, 0x1d, 0xd8, 0x25, 0xb5, 0xf2, 0xf9, 0x70, 0x19, 0x6b, 0xf8, 0x43, 0x36, 0xc5, 0x1f, 0xe4, 0x5a, 0x4c, 0x13, 0xe4, 0x6d, 0x08, 0x0b, 0x1d, 0xb1,
- /* (2^271)P */ 0x3f, 0x20, 0x9b, 0xfb, 0xec, 0x7d, 0x31, 0xc5, 0xfc, 0x88, 0x0b, 0x30, 0xed, 0x36, 0xc0, 0x63, 0xb1, 0x7d, 0x10, 0xda, 0xb6, 0x2e, 0xad, 0xf3, 0xec, 0x94, 0xe7, 0xec, 0xb5, 0x9c, 0xfe, 0xf5, 0x35, 0xf0, 0xa2, 0x2d, 0x7f, 0xca, 0x6b, 0x67, 0x1a, 0xf6, 0xb3, 0xda, 0x09, 0x2a, 0xaa, 0xdf, 0xb1, 0xca, 0x9b, 0xfb, 0xeb, 0xb3, 0xcd, 0xc0,
- /* (2^272)P */ 0xcd, 0x4d, 0x89, 0x00, 0xa4, 0x3b, 0x48, 0xf0, 0x76, 0x91, 0x35, 0xa5, 0xf8, 0xc9, 0xb6, 0x46, 0xbc, 0xf6, 0x9a, 0x45, 0x47, 0x17, 0x96, 0x80, 0x5b, 0x3a, 0x28, 0x33, 0xf9, 0x5a, 0xef, 0x43, 0x07, 0xfe, 0x3b, 0xf4, 0x8e, 0x19, 0xce, 0xd2, 0x94, 0x4b, 0x6d, 0x8e, 0x67, 0x20, 0xc7, 0x4f, 0x2f, 0x59, 0x8e, 0xe1, 0xa1, 0xa9, 0xf9, 0x0e,
- /* (2^273)P */ 0xdc, 0x7b, 0xb5, 0x50, 0x2e, 0xe9, 0x7e, 0x8b, 0x78, 0xa1, 0x38, 0x96, 0x22, 0xc3, 0x61, 0x67, 0x6d, 0xc8, 0x58, 0xed, 0x41, 0x1d, 0x5d, 0x86, 0x98, 0x7f, 0x2f, 0x1b, 0x8d, 0x3e, 0xaa, 0xc1, 0xd2, 0x0a, 0xf3, 0xbf, 0x95, 0x04, 0xf3, 0x10, 0x3c, 0x2b, 0x7f, 0x90, 0x46, 0x04, 0xaa, 0x6a, 0xa9, 0x35, 0x76, 0xac, 0x49, 0xb5, 0x00, 0x45,
- /* (2^274)P */ 0xb1, 0x93, 0x79, 0x84, 0x4a, 0x2a, 0x30, 0x78, 0x16, 0xaa, 0xc5, 0x74, 0x06, 0xce, 0xa5, 0xa7, 0x32, 0x86, 0xe0, 0xf9, 0x10, 0xd2, 0x58, 0x76, 0xfb, 0x66, 0x49, 0x76, 0x3a, 0x90, 0xba, 0xb5, 0xcc, 0x99, 0xcd, 0x09, 0xc1, 0x9a, 0x74, 0x23, 0xdf, 0x0c, 0xfe, 0x99, 0x52, 0x80, 0xa3, 0x7c, 0x1c, 0x71, 0x5f, 0x2c, 0x49, 0x57, 0xf4, 0xf9,
- /* (2^275)P */ 0x6d, 0xbf, 0x52, 0xe6, 0x25, 0x98, 0xed, 0xcf, 0xe3, 0xbc, 0x08, 0xa2, 0x1a, 0x90, 0xae, 0xa0, 0xbf, 0x07, 0x15, 0xad, 0x0a, 0x9f, 0x3e, 0x47, 0x44, 0xc2, 0x10, 0x46, 0xa6, 0x7a, 0x9e, 0x2f, 0x57, 0xbc, 0xe2, 0xf0, 0x1d, 0xd6, 0x9a, 0x06, 0xed, 0xfc, 0x54, 0x95, 0x92, 0x15, 0xa2, 0xf7, 0x8d, 0x6b, 0xef, 0xb2, 0x05, 0xed, 0x5c, 0x63,
- /* (2^276)P */ 0xbc, 0x0b, 0x27, 0x3a, 0x3a, 0xf8, 0xe1, 0x48, 0x02, 0x7e, 0x27, 0xe6, 0x81, 0x62, 0x07, 0x73, 0x74, 0xe5, 0x52, 0xd7, 0xf8, 0x26, 0xca, 0x93, 0x4d, 0x3e, 0x9b, 0x55, 0x09, 0x8e, 0xe3, 0xd7, 0xa6, 0xe3, 0xb6, 0x2a, 0xa9, 0xb3, 0xb0, 0xa0, 0x8c, 0x01, 0xbb, 0x07, 0x90, 0x78, 0x6d, 0x6d, 0xe9, 0xf0, 0x7a, 0x90, 0xbd, 0xdc, 0x0c, 0x36,
- /* (2^277)P */ 0x7f, 0x20, 0x12, 0x0f, 0x40, 0x00, 0x53, 0xd8, 0x0c, 0x27, 0x47, 0x47, 0x22, 0x80, 0xfb, 0x62, 0xe4, 0xa7, 0xf7, 0xbd, 0x42, 0xa5, 0xc3, 0x2b, 0xb2, 0x7f, 0x50, 0xcc, 0xe2, 0xfb, 0xd5, 0xc0, 0x63, 0xdd, 0x24, 0x5f, 0x7c, 0x08, 0x91, 0xbf, 0x6e, 0x47, 0x44, 0xd4, 0x6a, 0xc0, 0xc3, 0x09, 0x39, 0x27, 0xdd, 0xc7, 0xca, 0x06, 0x29, 0x55,
- /* (2^278)P */ 0x76, 0x28, 0x58, 0xb0, 0xd2, 0xf3, 0x0f, 0x04, 0xe9, 0xc9, 0xab, 0x66, 0x5b, 0x75, 0x51, 0xdc, 0xe5, 0x8f, 0xe8, 0x1f, 0xdb, 0x03, 0x0f, 0xb0, 0x7d, 0xf9, 0x20, 0x64, 0x89, 0xe9, 0xdc, 0xe6, 0x24, 0xc3, 0xd5, 0xd2, 0x41, 0xa6, 0xe4, 0xe3, 0xc4, 0x79, 0x7c, 0x0f, 0xa1, 0x61, 0x2f, 0xda, 0xa4, 0xc9, 0xfd, 0xad, 0x5c, 0x65, 0x6a, 0xf3,
- /* (2^279)P */ 0xd5, 0xab, 0x72, 0x7a, 0x3b, 0x59, 0xea, 0xcf, 0xd5, 0x17, 0xd2, 0xb2, 0x5f, 0x2d, 0xab, 0xad, 0x9e, 0x88, 0x64, 0x55, 0x96, 0x6e, 0xf3, 0x44, 0xa9, 0x11, 0xf5, 0xf8, 0x3a, 0xf1, 0xcd, 0x79, 0x4c, 0x99, 0x6d, 0x23, 0x6a, 0xa0, 0xc2, 0x1a, 0x19, 0x45, 0xb5, 0xd8, 0x95, 0x2f, 0x49, 0xe9, 0x46, 0x39, 0x26, 0x60, 0x04, 0x15, 0x8b, 0xcc,
- /* (2^280)P */ 0x66, 0x0c, 0xf0, 0x54, 0x41, 0x02, 0x91, 0xab, 0xe5, 0x85, 0x8a, 0x44, 0xa6, 0x34, 0x96, 0x32, 0xc0, 0xdf, 0x6c, 0x41, 0x39, 0xd4, 0xc6, 0xe1, 0xe3, 0x81, 0xb0, 0x4c, 0x34, 0x4f, 0xe5, 0xf4, 0x35, 0x46, 0x1f, 0xeb, 0x75, 0xfd, 0x43, 0x37, 0x50, 0x99, 0xab, 0xad, 0xb7, 0x8c, 0xa1, 0x57, 0xcb, 0xe6, 0xce, 0x16, 0x2e, 0x85, 0xcc, 0xf9,
- /* (2^281)P */ 0x63, 0xd1, 0x3f, 0x9e, 0xa2, 0x17, 0x2e, 0x1d, 0x3e, 0xce, 0x48, 0x2d, 0xbb, 0x8f, 0x69, 0xc9, 0xa6, 0x3d, 0x4e, 0xfe, 0x09, 0x56, 0xb3, 0x02, 0x5f, 0x99, 0x97, 0x0c, 0x54, 0xda, 0x32, 0x97, 0x9b, 0xf4, 0x95, 0xf1, 0xad, 0xe3, 0x2b, 0x04, 0xa7, 0x9b, 0x3f, 0xbb, 0xe7, 0x87, 0x2e, 0x1f, 0x8b, 0x4b, 0x7a, 0xa4, 0x43, 0x0c, 0x0f, 0x35,
- /* (2^282)P */ 0x05, 0xdc, 0xe0, 0x2c, 0xa1, 0xc1, 0xd0, 0xf1, 0x1f, 0x4e, 0xc0, 0x6c, 0x35, 0x7b, 0xca, 0x8f, 0x8b, 0x02, 0xb1, 0xf7, 0xd6, 0x2e, 0xe7, 0x93, 0x80, 0x85, 0x18, 0x88, 0x19, 0xb9, 0xb4, 0x4a, 0xbc, 0xeb, 0x5a, 0x78, 0x38, 0xed, 0xc6, 0x27, 0x2a, 0x74, 0x76, 0xf0, 0x1b, 0x79, 0x92, 0x2f, 0xd2, 0x81, 0x98, 0xdf, 0xa9, 0x50, 0x19, 0xeb,
- /* (2^283)P */ 0xb5, 0xe7, 0xb4, 0x11, 0x3a, 0x81, 0xb6, 0xb4, 0xf8, 0xa2, 0xb3, 0x6c, 0xfc, 0x9d, 0xe0, 0xc0, 0xe0, 0x59, 0x7f, 0x05, 0x37, 0xef, 0x2c, 0xa9, 0x3a, 0x24, 0xac, 0x7b, 0x25, 0xa0, 0x55, 0xd2, 0x44, 0x82, 0x82, 0x6e, 0x64, 0xa3, 0x58, 0xc8, 0x67, 0xae, 0x26, 0xa7, 0x0f, 0x42, 0x63, 0xe1, 0x93, 0x01, 0x52, 0x19, 0xaf, 0x49, 0x3e, 0x33,
- /* (2^284)P */ 0x05, 0x85, 0xe6, 0x66, 0xaf, 0x5f, 0xdf, 0xbf, 0x9d, 0x24, 0x62, 0x60, 0x90, 0xe2, 0x4c, 0x7d, 0x4e, 0xc3, 0x74, 0x5d, 0x4f, 0x53, 0xf3, 0x63, 0x13, 0xf4, 0x74, 0x28, 0x6b, 0x7d, 0x57, 0x0c, 0x9d, 0x84, 0xa7, 0x1a, 0xff, 0xa0, 0x79, 0xdf, 0xfc, 0x65, 0x98, 0x8e, 0x22, 0x0d, 0x62, 0x7e, 0xf2, 0x34, 0x60, 0x83, 0x05, 0x14, 0xb1, 0xc1,
- /* (2^285)P */ 0x64, 0x22, 0xcc, 0xdf, 0x5c, 0xbc, 0x88, 0x68, 0x4c, 0xd9, 0xbc, 0x0e, 0xc9, 0x8b, 0xb4, 0x23, 0x52, 0xad, 0xb0, 0xb3, 0xf1, 0x17, 0xd8, 0x15, 0x04, 0x6b, 0x99, 0xf0, 0xc4, 0x7d, 0x48, 0x22, 0x4a, 0xf8, 0x6f, 0xaa, 0x88, 0x0d, 0xc5, 0x5e, 0xa9, 0x1c, 0x61, 0x3d, 0x95, 0xa9, 0x7b, 0x6a, 0x79, 0x33, 0x0a, 0x2b, 0x99, 0xe3, 0x4e, 0x48,
- /* (2^286)P */ 0x6b, 0x9b, 0x6a, 0x2a, 0xf1, 0x60, 0x31, 0xb4, 0x73, 0xd1, 0x87, 0x45, 0x9c, 0x15, 0x58, 0x4b, 0x91, 0x6d, 0x94, 0x1c, 0x41, 0x11, 0x4a, 0x83, 0xec, 0xaf, 0x65, 0xbc, 0x34, 0xaa, 0x26, 0xe2, 0xaf, 0xed, 0x46, 0x05, 0x4e, 0xdb, 0xc6, 0x4e, 0x10, 0x28, 0x4e, 0x72, 0xe5, 0x31, 0xa3, 0x20, 0xd7, 0xb1, 0x96, 0x64, 0xf6, 0xce, 0x08, 0x08,
- /* (2^287)P */ 0x16, 0xa9, 0x5c, 0x9f, 0x9a, 0xb4, 0xb8, 0xc8, 0x32, 0x78, 0xc0, 0x3a, 0xd9, 0x5f, 0x94, 0xac, 0x3a, 0x42, 0x1f, 0x43, 0xd6, 0x80, 0x47, 0x2c, 0xdc, 0x76, 0x27, 0xfa, 0x50, 0xe5, 0xa1, 0xe4, 0xc3, 0xcb, 0x61, 0x31, 0xe1, 0x2e, 0xde, 0x81, 0x3b, 0x77, 0x1c, 0x39, 0x3c, 0xdb, 0xda, 0x87, 0x4b, 0x84, 0x12, 0xeb, 0xdd, 0x54, 0xbf, 0xe7,
- /* (2^288)P */ 0xbf, 0xcb, 0x73, 0x21, 0x3d, 0x7e, 0x13, 0x8c, 0xa6, 0x34, 0x21, 0x2b, 0xa5, 0xe4, 0x9f, 0x8e, 0x9c, 0x01, 0x9c, 0x43, 0xd9, 0xc7, 0xb9, 0xf1, 0xbe, 0x7f, 0x45, 0x51, 0x97, 0xa1, 0x8e, 0x01, 0xf8, 0xbd, 0xd2, 0xbf, 0x81, 0x3a, 0x8b, 0xab, 0xe4, 0x89, 0xb7, 0xbd, 0xf2, 0xcd, 0xa9, 0x8a, 0x8a, 0xde, 0xfb, 0x8a, 0x55, 0x12, 0x7b, 0x17,
- /* (2^289)P */ 0x1b, 0x95, 0x58, 0x4d, 0xe6, 0x51, 0x31, 0x52, 0x1c, 0xd8, 0x15, 0x84, 0xb1, 0x0d, 0x36, 0x25, 0x88, 0x91, 0x46, 0x71, 0x42, 0x56, 0xe2, 0x90, 0x08, 0x9e, 0x77, 0x1b, 0xee, 0x22, 0x3f, 0xec, 0xee, 0x8c, 0x7b, 0x2e, 0x79, 0xc4, 0x6c, 0x07, 0xa1, 0x7e, 0x52, 0xf5, 0x26, 0x5c, 0x84, 0x2a, 0x50, 0x6e, 0x82, 0xb3, 0x76, 0xda, 0x35, 0x16,
- /* (2^290)P */ 0x0a, 0x6f, 0x99, 0x87, 0xc0, 0x7d, 0x8a, 0xb2, 0xca, 0xae, 0xe8, 0x65, 0x98, 0x0f, 0xb3, 0x44, 0xe1, 0xdc, 0x52, 0x79, 0x75, 0xec, 0x8f, 0x95, 0x87, 0x45, 0xd1, 0x32, 0x18, 0x55, 0x15, 0xce, 0x64, 0x9b, 0x08, 0x4f, 0x2c, 0xea, 0xba, 0x1c, 0x57, 0x06, 0x63, 0xc8, 0xb1, 0xfd, 0xc5, 0x67, 0xe7, 0x1f, 0x87, 0x9e, 0xde, 0x72, 0x7d, 0xec,
- /* (2^291)P */ 0x36, 0x8b, 0x4d, 0x2c, 0xc2, 0x46, 0xe8, 0x96, 0xac, 0x0b, 0x8c, 0xc5, 0x09, 0x10, 0xfc, 0xf2, 0xda, 0xea, 0x22, 0xb2, 0xd3, 0x89, 0xeb, 0xb2, 0x85, 0x0f, 0xff, 0x59, 0x50, 0x2c, 0x99, 0x5a, 0x1f, 0xec, 0x2a, 0x6f, 0xec, 0xcf, 0xe9, 0xce, 0x12, 0x6b, 0x19, 0xd8, 0xde, 0x9b, 0xce, 0x0e, 0x6a, 0xaa, 0xe1, 0x32, 0xea, 0x4c, 0xfe, 0x92,
- /* (2^292)P */ 0x5f, 0x17, 0x70, 0x53, 0x26, 0x03, 0x0b, 0xab, 0xd1, 0xc1, 0x42, 0x0b, 0xab, 0x2b, 0x3d, 0x31, 0xa4, 0xd5, 0x2b, 0x5e, 0x00, 0xd5, 0x9a, 0x22, 0x34, 0xe0, 0x53, 0x3f, 0x59, 0x7f, 0x2c, 0x6d, 0x72, 0x9a, 0xa4, 0xbe, 0x3d, 0x42, 0x05, 0x1b, 0xf2, 0x7f, 0x88, 0x56, 0xd1, 0x7c, 0x7d, 0x6b, 0x9f, 0x43, 0xfe, 0x65, 0x19, 0xae, 0x9c, 0x4c,
- /* (2^293)P */ 0xf3, 0x7c, 0x20, 0xa9, 0xfc, 0xf2, 0xf2, 0x3b, 0x3c, 0x57, 0x41, 0x94, 0xe5, 0xcc, 0x6a, 0x37, 0x5d, 0x09, 0xf2, 0xab, 0xc2, 0xca, 0x60, 0x38, 0x6b, 0x7a, 0xe1, 0x78, 0x2b, 0xc1, 0x1d, 0xe8, 0xfd, 0xbc, 0x3d, 0x5c, 0xa2, 0xdb, 0x49, 0x20, 0x79, 0xe6, 0x1b, 0x9b, 0x65, 0xd9, 0x6d, 0xec, 0x57, 0x1d, 0xd2, 0xe9, 0x90, 0xeb, 0x43, 0x7b,
- /* (2^294)P */ 0x2a, 0x8b, 0x2e, 0x19, 0x18, 0x10, 0xb8, 0x83, 0xe7, 0x7d, 0x2d, 0x9a, 0x3a, 0xe5, 0xd1, 0xe4, 0x7c, 0x38, 0xe5, 0x59, 0x2a, 0x6e, 0xd9, 0x01, 0x29, 0x3d, 0x23, 0xf7, 0x52, 0xba, 0x61, 0x04, 0x9a, 0xde, 0xc4, 0x31, 0x50, 0xeb, 0x1b, 0xaa, 0xde, 0x39, 0x58, 0xd8, 0x1b, 0x1e, 0xfc, 0x57, 0x9a, 0x28, 0x43, 0x9e, 0x97, 0x5e, 0xaa, 0xa3,
- /* (2^295)P */ 0x97, 0x0a, 0x74, 0xc4, 0x39, 0x99, 0x6b, 0x40, 0xc7, 0x3e, 0x8c, 0xa7, 0xb1, 0x4e, 0x9a, 0x59, 0x6e, 0x1c, 0xfe, 0xfc, 0x2a, 0x5e, 0x73, 0x2b, 0x8c, 0xa9, 0x71, 0xf5, 0xda, 0x6b, 0x15, 0xab, 0xf7, 0xbe, 0x2a, 0x44, 0x5f, 0xba, 0xae, 0x67, 0x93, 0xc5, 0x86, 0xc1, 0xb8, 0xdf, 0xdc, 0xcb, 0xd7, 0xff, 0xb1, 0x71, 0x7c, 0x6f, 0x88, 0xf8,
- /* (2^296)P */ 0x3f, 0x89, 0xb1, 0xbf, 0x24, 0x16, 0xac, 0x56, 0xfe, 0xdf, 0x94, 0x71, 0xbf, 0xd6, 0x57, 0x0c, 0xb4, 0x77, 0x37, 0xaa, 0x2a, 0x70, 0x76, 0x49, 0xaf, 0x0c, 0x97, 0x8e, 0x78, 0x2a, 0x67, 0xc9, 0x3b, 0x3d, 0x5b, 0x01, 0x2f, 0xda, 0xd5, 0xa8, 0xde, 0x02, 0xa9, 0xac, 0x76, 0x00, 0x0b, 0x46, 0xc6, 0x2d, 0xdc, 0x08, 0xf4, 0x10, 0x2c, 0xbe,
- /* (2^297)P */ 0xcb, 0x07, 0xf9, 0x91, 0xc6, 0xd5, 0x3e, 0x54, 0x63, 0xae, 0xfc, 0x10, 0xbe, 0x3a, 0x20, 0x73, 0x4e, 0x65, 0x0e, 0x2d, 0x86, 0x77, 0x83, 0x9d, 0xe2, 0x0a, 0xe9, 0xac, 0x22, 0x52, 0x76, 0xd4, 0x6e, 0xfa, 0xe0, 0x09, 0xef, 0x78, 0x82, 0x9f, 0x26, 0xf9, 0x06, 0xb5, 0xe7, 0x05, 0x0e, 0xf2, 0x46, 0x72, 0x93, 0xd3, 0x24, 0xbd, 0x87, 0x60,
- /* (2^298)P */ 0x14, 0x55, 0x84, 0x7b, 0x6c, 0x60, 0x80, 0x73, 0x8c, 0xbe, 0x2d, 0xd6, 0x69, 0xd6, 0x17, 0x26, 0x44, 0x9f, 0x88, 0xa2, 0x39, 0x7c, 0x89, 0xbc, 0x6d, 0x9e, 0x46, 0xb6, 0x68, 0x66, 0xea, 0xdc, 0x31, 0xd6, 0x21, 0x51, 0x9f, 0x28, 0x28, 0xaf, 0x9e, 0x47, 0x2c, 0x4c, 0x8f, 0xf3, 0xaf, 0x1f, 0xe4, 0xab, 0xac, 0xe9, 0x0c, 0x91, 0x3a, 0x61,
- /* (2^299)P */ 0xb0, 0x37, 0x55, 0x4b, 0xe9, 0xc3, 0xb1, 0xce, 0x42, 0xe6, 0xc5, 0x11, 0x7f, 0x2c, 0x11, 0xfc, 0x4e, 0x71, 0x17, 0x00, 0x74, 0x7f, 0xbf, 0x07, 0x4d, 0xfd, 0x40, 0xb2, 0x87, 0xb0, 0xef, 0x1f, 0x35, 0x2c, 0x2d, 0xd7, 0xe1, 0xe4, 0xad, 0x0e, 0x7f, 0x63, 0x66, 0x62, 0x23, 0x41, 0xf6, 0xc1, 0x14, 0xa6, 0xd7, 0xa9, 0x11, 0x56, 0x9d, 0x1b,
- /* (2^300)P */ 0x02, 0x82, 0x42, 0x18, 0x4f, 0x1b, 0xc9, 0x5d, 0x78, 0x5f, 0xee, 0xed, 0x01, 0x49, 0x8f, 0xf2, 0xa0, 0xe2, 0x6e, 0xbb, 0x6b, 0x04, 0x8d, 0xb2, 0x41, 0xae, 0xc8, 0x1b, 0x59, 0x34, 0xb8, 0x2a, 0xdb, 0x1f, 0xd2, 0x52, 0xdf, 0x3f, 0x35, 0x00, 0x8b, 0x61, 0xbc, 0x97, 0xa0, 0xc4, 0x77, 0xd1, 0xe4, 0x2c, 0x59, 0x68, 0xff, 0x30, 0xf2, 0xe2,
- /* (2^301)P */ 0x79, 0x08, 0xb1, 0xdb, 0x55, 0xae, 0xd0, 0xed, 0xda, 0xa0, 0xec, 0x6c, 0xae, 0x68, 0xf2, 0x0b, 0x61, 0xb3, 0xf5, 0x21, 0x69, 0x87, 0x0b, 0x03, 0xea, 0x8a, 0x15, 0xd9, 0x7e, 0xca, 0xf7, 0xcd, 0xf3, 0x33, 0xb3, 0x4c, 0x5b, 0x23, 0x4e, 0x6f, 0x90, 0xad, 0x91, 0x4b, 0x4f, 0x46, 0x37, 0xe5, 0xe8, 0xb7, 0xeb, 0xd5, 0xca, 0x34, 0x4e, 0x23,
- /* (2^302)P */ 0x09, 0x02, 0xdd, 0xfd, 0x70, 0xac, 0x56, 0x80, 0x36, 0x5e, 0x49, 0xd0, 0x3f, 0xc2, 0xe0, 0xba, 0x46, 0x7f, 0x5c, 0xf7, 0xc5, 0xbd, 0xd5, 0x55, 0x7d, 0x3f, 0xd5, 0x7d, 0x06, 0xdf, 0x27, 0x20, 0x4f, 0xe9, 0x30, 0xec, 0x1b, 0xa0, 0x0c, 0xd4, 0x2c, 0xe1, 0x2b, 0x65, 0x73, 0xea, 0x75, 0x35, 0xe8, 0xe6, 0x56, 0xd6, 0x07, 0x15, 0x99, 0xdf,
- /* (2^303)P */ 0x4e, 0x10, 0xb7, 0xd0, 0x63, 0x8c, 0xcf, 0x16, 0x00, 0x7c, 0x58, 0xdf, 0x86, 0xdc, 0x4e, 0xca, 0x9c, 0x40, 0x5a, 0x42, 0xfd, 0xec, 0x98, 0xa4, 0x42, 0x53, 0xae, 0x16, 0x9d, 0xfd, 0x75, 0x5a, 0x12, 0x56, 0x1e, 0xc6, 0x57, 0xcc, 0x79, 0x27, 0x96, 0x00, 0xcf, 0x80, 0x4f, 0x8a, 0x36, 0x5c, 0xbb, 0xe9, 0x12, 0xdb, 0xb6, 0x2b, 0xad, 0x96,
- /* (2^304)P */ 0x92, 0x32, 0x1f, 0xfd, 0xc6, 0x02, 0x94, 0x08, 0x1b, 0x60, 0x6a, 0x9f, 0x8b, 0xd6, 0xc8, 0xad, 0xd5, 0x1b, 0x27, 0x4e, 0xa4, 0x4d, 0x4a, 0x00, 0x10, 0x5f, 0x86, 0x11, 0xf5, 0xe3, 0x14, 0x32, 0x43, 0xee, 0xb9, 0xc7, 0xab, 0xf4, 0x6f, 0xe5, 0x66, 0x0c, 0x06, 0x0d, 0x96, 0x79, 0x28, 0xaf, 0x45, 0x2b, 0x56, 0xbe, 0xe4, 0x4a, 0x52, 0xd6,
- /* (2^305)P */ 0x15, 0x16, 0x69, 0xef, 0x60, 0xca, 0x82, 0x25, 0x0f, 0xc6, 0x30, 0xa0, 0x0a, 0xd1, 0x83, 0x29, 0xcd, 0xb6, 0x89, 0x6c, 0xf5, 0xb2, 0x08, 0x38, 0xe6, 0xca, 0x6b, 0x19, 0x93, 0xc6, 0x5f, 0x75, 0x8e, 0x60, 0x34, 0x23, 0xc4, 0x13, 0x17, 0x69, 0x55, 0xcc, 0x72, 0x9c, 0x2b, 0x6c, 0x80, 0xf4, 0x4b, 0x8b, 0xb6, 0x97, 0x65, 0x07, 0xb6, 0xfb,
- /* (2^306)P */ 0x01, 0x99, 0x74, 0x28, 0xa6, 0x67, 0xa3, 0xe5, 0x25, 0xfb, 0xdf, 0x82, 0x93, 0xe7, 0x35, 0x74, 0xce, 0xe3, 0x15, 0x1c, 0x1d, 0x79, 0x52, 0x84, 0x08, 0x04, 0x2f, 0x5c, 0xb8, 0xcd, 0x7f, 0x89, 0xb0, 0x39, 0x93, 0x63, 0xc9, 0x5d, 0x06, 0x01, 0x59, 0xf7, 0x7e, 0xf1, 0x4c, 0x3d, 0x12, 0x8d, 0x69, 0x1d, 0xb7, 0x21, 0x5e, 0x88, 0x82, 0xa2,
- /* (2^307)P */ 0x8e, 0x69, 0xaf, 0x9a, 0x41, 0x0d, 0x9d, 0xcf, 0x8e, 0x8d, 0x5c, 0x51, 0x6e, 0xde, 0x0e, 0x48, 0x23, 0x89, 0xe5, 0x37, 0x80, 0xd6, 0x9d, 0x72, 0x32, 0x26, 0x38, 0x2d, 0x63, 0xa0, 0xfa, 0xd3, 0x40, 0xc0, 0x8c, 0x68, 0x6f, 0x2b, 0x1e, 0x9a, 0x39, 0x51, 0x78, 0x74, 0x9a, 0x7b, 0x4a, 0x8f, 0x0c, 0xa0, 0x88, 0x60, 0xa5, 0x21, 0xcd, 0xc7,
- /* (2^308)P */ 0x3a, 0x7f, 0x73, 0x14, 0xbf, 0x89, 0x6a, 0x4c, 0x09, 0x5d, 0xf2, 0x93, 0x20, 0x2d, 0xc4, 0x29, 0x86, 0x06, 0x95, 0xab, 0x22, 0x76, 0x4c, 0x54, 0xe1, 0x7e, 0x80, 0x6d, 0xab, 0x29, 0x61, 0x87, 0x77, 0xf6, 0xc0, 0x3e, 0xda, 0xab, 0x65, 0x7e, 0x39, 0x12, 0xa1, 0x6b, 0x42, 0xf7, 0xc5, 0x97, 0x77, 0xec, 0x6f, 0x22, 0xbe, 0x44, 0xc7, 0x03,
- /* (2^309)P */ 0xa5, 0x23, 0x90, 0x41, 0xa3, 0xc5, 0x3e, 0xe0, 0xa5, 0x32, 0x49, 0x1f, 0x39, 0x78, 0xb1, 0xd8, 0x24, 0xea, 0xd4, 0x87, 0x53, 0x42, 0x51, 0xf4, 0xd9, 0x46, 0x25, 0x2f, 0x62, 0xa9, 0x90, 0x9a, 0x4a, 0x25, 0x8a, 0xd2, 0x10, 0xe7, 0x3c, 0xbc, 0x58, 0x8d, 0x16, 0x14, 0x96, 0xa4, 0x6f, 0xf8, 0x12, 0x69, 0x91, 0x73, 0xe2, 0xfa, 0xf4, 0x57,
- /* (2^310)P */ 0x51, 0x45, 0x3f, 0x96, 0xdc, 0x97, 0x38, 0xa6, 0x01, 0x63, 0x09, 0xea, 0xc2, 0x13, 0x30, 0xb0, 0x00, 0xb8, 0x0a, 0xce, 0xd1, 0x8f, 0x3e, 0x69, 0x62, 0x46, 0x33, 0x9c, 0xbf, 0x4b, 0xcb, 0x0c, 0x90, 0x1c, 0x45, 0xcf, 0x37, 0x5b, 0xf7, 0x4b, 0x5e, 0x95, 0xc3, 0x28, 0x9f, 0x08, 0x83, 0x53, 0x74, 0xab, 0x0c, 0xb4, 0xc0, 0xa1, 0xbc, 0x89,
- /* (2^311)P */ 0x06, 0xb1, 0x51, 0x15, 0x65, 0x60, 0x21, 0x17, 0x7a, 0x20, 0x65, 0xee, 0x12, 0x35, 0x4d, 0x46, 0xf4, 0xf8, 0xd0, 0xb1, 0xca, 0x09, 0x30, 0x08, 0x89, 0x23, 0x3b, 0xe7, 0xab, 0x8b, 0x77, 0xa6, 0xad, 0x25, 0xdd, 0xea, 0x3c, 0x7d, 0xa5, 0x24, 0xb3, 0xe8, 0xfa, 0xfb, 0xc9, 0xf2, 0x71, 0xe9, 0xfa, 0xf2, 0xdc, 0x54, 0xdd, 0x55, 0x2e, 0x2f,
- /* (2^312)P */ 0x7f, 0x96, 0x96, 0xfb, 0x52, 0x86, 0xcf, 0xea, 0x62, 0x18, 0xf1, 0x53, 0x1f, 0x61, 0x2a, 0x9f, 0x8c, 0x51, 0xca, 0x2c, 0xde, 0x6d, 0xce, 0xab, 0x58, 0x32, 0x0b, 0x33, 0x9b, 0x99, 0xb4, 0x5c, 0x88, 0x2a, 0x76, 0xcc, 0x3e, 0x54, 0x1e, 0x9d, 0xa2, 0x89, 0xe4, 0x19, 0xba, 0x80, 0xc8, 0x39, 0x32, 0x7f, 0x0f, 0xc7, 0x84, 0xbb, 0x43, 0x56,
- /* (2^313)P */ 0x9b, 0x07, 0xb4, 0x42, 0xa9, 0xa0, 0x78, 0x4f, 0x28, 0x70, 0x2b, 0x7e, 0x61, 0xe0, 0xdd, 0x02, 0x98, 0xfc, 0xed, 0x31, 0x80, 0xf1, 0x15, 0x52, 0x89, 0x23, 0xcd, 0x5d, 0x2b, 0xc5, 0x19, 0x32, 0xfb, 0x70, 0x50, 0x7a, 0x97, 0x6b, 0x42, 0xdb, 0xca, 0xdb, 0xc4, 0x59, 0x99, 0xe0, 0x12, 0x1f, 0x17, 0xba, 0x8b, 0xf0, 0xc4, 0x38, 0x5d, 0x27,
- /* (2^314)P */ 0x29, 0x1d, 0xdc, 0x2b, 0xf6, 0x5b, 0x04, 0x61, 0x36, 0x76, 0xa0, 0x56, 0x36, 0x6e, 0xd7, 0x24, 0x4d, 0xe7, 0xef, 0x44, 0xd2, 0xd5, 0x07, 0xcd, 0xc4, 0x9d, 0x80, 0x48, 0xc3, 0x38, 0xcf, 0xd8, 0xa3, 0xdd, 0xb2, 0x5e, 0xb5, 0x70, 0x15, 0xbb, 0x36, 0x85, 0x8a, 0xd7, 0xfb, 0x56, 0x94, 0x73, 0x9c, 0x81, 0xbe, 0xb1, 0x44, 0x28, 0xf1, 0x37,
- /* (2^315)P */ 0xbf, 0xcf, 0x5c, 0xd2, 0xe2, 0xea, 0xc2, 0xcd, 0x70, 0x7a, 0x9d, 0xcb, 0x81, 0xc1, 0xe9, 0xf1, 0x56, 0x71, 0x52, 0xf7, 0x1b, 0x87, 0xc6, 0xd8, 0xcc, 0xb2, 0x69, 0xf3, 0xb0, 0xbd, 0xba, 0x83, 0x12, 0x26, 0xc4, 0xce, 0x72, 0xde, 0x3b, 0x21, 0x28, 0x9e, 0x5a, 0x94, 0xf5, 0x04, 0xa3, 0xc8, 0x0f, 0x5e, 0xbc, 0x71, 0xf9, 0x0d, 0xce, 0xf5,
- /* (2^316)P */ 0x93, 0x97, 0x00, 0x85, 0xf4, 0xb4, 0x40, 0xec, 0xd9, 0x2b, 0x6c, 0xd6, 0x63, 0x9e, 0x93, 0x0a, 0x5a, 0xf4, 0xa7, 0x9a, 0xe3, 0x3c, 0xf0, 0x55, 0xd1, 0x96, 0x6c, 0xf5, 0x2a, 0xce, 0xd7, 0x95, 0x72, 0xbf, 0xc5, 0x0c, 0xce, 0x79, 0xa2, 0x0a, 0x78, 0xe0, 0x72, 0xd0, 0x66, 0x28, 0x05, 0x75, 0xd3, 0x23, 0x09, 0x91, 0xed, 0x7e, 0xc4, 0xbc,
- /* (2^317)P */ 0x77, 0xc2, 0x9a, 0xf7, 0xa6, 0xe6, 0x18, 0xb4, 0xe7, 0xf6, 0xda, 0xec, 0x44, 0x6d, 0xfb, 0x08, 0xee, 0x65, 0xa8, 0x92, 0x85, 0x1f, 0xba, 0x38, 0x93, 0x20, 0x5c, 0x4d, 0xd2, 0x18, 0x0f, 0x24, 0xbe, 0x1a, 0x96, 0x44, 0x7d, 0xeb, 0xb3, 0xda, 0x95, 0xf4, 0xaf, 0x6c, 0x06, 0x0f, 0x47, 0x37, 0xc8, 0x77, 0x63, 0xe1, 0x29, 0xef, 0xff, 0xa5,
- /* (2^318)P */ 0x16, 0x12, 0xd9, 0x47, 0x90, 0x22, 0x9b, 0x05, 0xf2, 0xa5, 0x9a, 0xae, 0x83, 0x98, 0xb5, 0xac, 0xab, 0x29, 0xaa, 0xdc, 0x5f, 0xde, 0xcd, 0xf7, 0x42, 0xad, 0x3b, 0x96, 0xd6, 0x3e, 0x6e, 0x52, 0x47, 0xb1, 0xab, 0x51, 0xde, 0x49, 0x7c, 0x87, 0x8d, 0x86, 0xe2, 0x70, 0x13, 0x21, 0x51, 0x1c, 0x0c, 0x25, 0xc1, 0xb0, 0xe6, 0x19, 0xcf, 0x12,
- /* (2^319)P */ 0xf0, 0xbc, 0x97, 0x8f, 0x4b, 0x2f, 0xd1, 0x1f, 0x8c, 0x57, 0xed, 0x3c, 0xf4, 0x26, 0x19, 0xbb, 0x60, 0xca, 0x24, 0xc5, 0xd9, 0x97, 0xe2, 0x5f, 0x76, 0x49, 0x39, 0x7e, 0x2d, 0x12, 0x21, 0x98, 0xda, 0xe6, 0xdb, 0xd2, 0xd8, 0x9f, 0x18, 0xd8, 0x83, 0x6c, 0xba, 0x89, 0x8d, 0x29, 0xfa, 0x46, 0x33, 0x8c, 0x28, 0xdf, 0x6a, 0xb3, 0x69, 0x28,
- /* (2^320)P */ 0x86, 0x17, 0xbc, 0xd6, 0x7c, 0xba, 0x1e, 0x83, 0xbb, 0x84, 0xb5, 0x8c, 0xad, 0xdf, 0xa1, 0x24, 0x81, 0x70, 0x40, 0x0f, 0xad, 0xad, 0x3b, 0x23, 0xd0, 0x93, 0xa0, 0x49, 0x5c, 0x4b, 0x51, 0xbe, 0x20, 0x49, 0x4e, 0xda, 0x2d, 0xd3, 0xad, 0x1b, 0x74, 0x08, 0x41, 0xf0, 0xef, 0x19, 0xe9, 0x45, 0x5d, 0x02, 0xae, 0x26, 0x25, 0xd9, 0xd1, 0xc2,
- /* (2^321)P */ 0x48, 0x81, 0x3e, 0xb2, 0x83, 0xf8, 0x4d, 0xb3, 0xd0, 0x4c, 0x75, 0xb3, 0xa0, 0x52, 0x26, 0xf2, 0xaf, 0x5d, 0x36, 0x70, 0x72, 0xd6, 0xb7, 0x88, 0x08, 0x69, 0xbd, 0x15, 0x25, 0xb1, 0x45, 0x1b, 0xb7, 0x0b, 0x5f, 0x71, 0x5d, 0x83, 0x49, 0xb9, 0x84, 0x3b, 0x7c, 0xc1, 0x50, 0x93, 0x05, 0x53, 0xe0, 0x61, 0xea, 0xc1, 0xef, 0xdb, 0x82, 0x97,
- /* (2^322)P */ 0x00, 0xd5, 0xc3, 0x3a, 0x4d, 0x8a, 0x23, 0x7a, 0xef, 0xff, 0x37, 0xef, 0xf3, 0xbc, 0xa9, 0xb6, 0xae, 0xd7, 0x3a, 0x7b, 0xfd, 0x3e, 0x8e, 0x9b, 0xab, 0x44, 0x54, 0x60, 0x28, 0x6c, 0xbf, 0x15, 0x24, 0x4a, 0x56, 0x60, 0x7f, 0xa9, 0x7a, 0x28, 0x59, 0x2c, 0x8a, 0xd1, 0x7d, 0x6b, 0x00, 0xfd, 0xa5, 0xad, 0xbc, 0x19, 0x3f, 0xcb, 0x73, 0xe0,
- /* (2^323)P */ 0xcf, 0x9e, 0x66, 0x06, 0x4d, 0x2b, 0xf5, 0x9c, 0xc2, 0x9d, 0x9e, 0xed, 0x5a, 0x5c, 0x2d, 0x00, 0xbf, 0x29, 0x90, 0x88, 0xe4, 0x5d, 0xfd, 0xe2, 0xf0, 0x38, 0xec, 0x4d, 0x26, 0xea, 0x54, 0xf0, 0x3c, 0x84, 0x10, 0x6a, 0xf9, 0x66, 0x9c, 0xe7, 0x21, 0xfd, 0x0f, 0xc7, 0x13, 0x50, 0x81, 0xb6, 0x50, 0xf9, 0x04, 0x7f, 0xa4, 0x37, 0x85, 0x14,
- /* (2^324)P */ 0xdb, 0x87, 0x49, 0xc7, 0xa8, 0x39, 0x0c, 0x32, 0x98, 0x0c, 0xb9, 0x1a, 0x1b, 0x4d, 0xe0, 0x8a, 0x9a, 0x8e, 0x8f, 0xab, 0x5a, 0x17, 0x3d, 0x04, 0x21, 0xce, 0x3e, 0x2c, 0xf9, 0xa3, 0x97, 0xe4, 0x77, 0x95, 0x0e, 0xb6, 0xa5, 0x15, 0xad, 0x3a, 0x1e, 0x46, 0x53, 0x17, 0x09, 0x83, 0x71, 0x4e, 0x86, 0x38, 0xd5, 0x23, 0x44, 0x16, 0x8d, 0xc8,
- /* (2^325)P */ 0x05, 0x5e, 0x99, 0x08, 0xbb, 0xc3, 0xc0, 0xb7, 0x6c, 0x12, 0xf2, 0xf3, 0xf4, 0x7c, 0x6a, 0x4d, 0x9e, 0xeb, 0x3d, 0xb9, 0x63, 0x94, 0xce, 0x81, 0xd8, 0x11, 0xcb, 0x55, 0x69, 0x4a, 0x20, 0x0b, 0x4c, 0x2e, 0x14, 0xb8, 0xd4, 0x6a, 0x7c, 0xf0, 0xed, 0xfc, 0x8f, 0xef, 0xa0, 0xeb, 0x6c, 0x01, 0xe2, 0xdc, 0x10, 0x22, 0xa2, 0x01, 0x85, 0x64,
- /* (2^326)P */ 0x58, 0xe1, 0x9c, 0x27, 0x55, 0xc6, 0x25, 0xa6, 0x7d, 0x67, 0x88, 0x65, 0x99, 0x6c, 0xcb, 0xdb, 0x27, 0x4f, 0x44, 0x29, 0xf5, 0x4a, 0x23, 0x10, 0xbc, 0x03, 0x3f, 0x36, 0x1e, 0xef, 0xb0, 0xba, 0x75, 0xe8, 0x74, 0x5f, 0x69, 0x3e, 0x26, 0x40, 0xb4, 0x2f, 0xdc, 0x43, 0xbf, 0xa1, 0x8b, 0xbd, 0xca, 0x6e, 0xc1, 0x6e, 0x21, 0x79, 0xa0, 0xd0,
- /* (2^327)P */ 0x78, 0x93, 0x4a, 0x2d, 0x22, 0x6e, 0x6e, 0x7d, 0x74, 0xd2, 0x66, 0x58, 0xce, 0x7b, 0x1d, 0x97, 0xb1, 0xf2, 0xda, 0x1c, 0x79, 0xfb, 0xba, 0xd1, 0xc0, 0xc5, 0x6e, 0xc9, 0x11, 0x89, 0xd2, 0x41, 0x8d, 0x70, 0xb9, 0xcc, 0xea, 0x6a, 0xb3, 0x45, 0xb6, 0x05, 0x2e, 0xf2, 0x17, 0xf1, 0x27, 0xb8, 0xed, 0x06, 0x1f, 0xdb, 0x9d, 0x1f, 0x69, 0x28,
- /* (2^328)P */ 0x93, 0x12, 0xa8, 0x11, 0xe1, 0x92, 0x30, 0x8d, 0xac, 0xe1, 0x1c, 0x60, 0x7c, 0xed, 0x2d, 0x2e, 0xd3, 0x03, 0x5c, 0x9c, 0xc5, 0xbd, 0x64, 0x4a, 0x8c, 0xba, 0x76, 0xfe, 0xc6, 0xc1, 0xea, 0xc2, 0x4f, 0xbe, 0x70, 0x3d, 0x64, 0xcf, 0x8e, 0x18, 0xcb, 0xcd, 0x57, 0xa7, 0xf7, 0x36, 0xa9, 0x6b, 0x3e, 0xb8, 0x69, 0xee, 0x47, 0xa2, 0x7e, 0xb2,
- /* (2^329)P */ 0x96, 0xaf, 0x3a, 0xf5, 0xed, 0xcd, 0xaf, 0xf7, 0x82, 0xaf, 0x59, 0x62, 0x0b, 0x36, 0x85, 0xf9, 0xaf, 0xd6, 0x38, 0xff, 0x87, 0x2e, 0x1d, 0x6c, 0x8b, 0xaf, 0x3b, 0xdf, 0x28, 0xa2, 0xd6, 0x4d, 0x80, 0x92, 0xc3, 0x0f, 0x34, 0xa8, 0xae, 0x69, 0x5d, 0x7b, 0x9d, 0xbc, 0xf5, 0xfd, 0x1d, 0xb1, 0x96, 0x55, 0x86, 0xe1, 0x5c, 0xb6, 0xac, 0xb9,
- /* (2^330)P */ 0x50, 0x9e, 0x37, 0x28, 0x7d, 0xa8, 0x33, 0x63, 0xda, 0x3f, 0x20, 0x98, 0x0e, 0x09, 0xa8, 0x77, 0x3b, 0x7a, 0xfc, 0x16, 0x85, 0x44, 0x64, 0x77, 0x65, 0x68, 0x92, 0x41, 0xc6, 0x1f, 0xdf, 0x27, 0xf9, 0xec, 0xa0, 0x61, 0x22, 0xea, 0x19, 0xe7, 0x75, 0x8b, 0x4e, 0xe5, 0x0f, 0xb7, 0xf7, 0xd2, 0x53, 0xf4, 0xdd, 0x4a, 0xaa, 0x78, 0x40, 0xb7,
- /* (2^331)P */ 0xd4, 0x89, 0xe3, 0x79, 0xba, 0xb6, 0xc3, 0xda, 0xe6, 0x78, 0x65, 0x7d, 0x6e, 0x22, 0x62, 0xb1, 0x3d, 0xea, 0x90, 0x84, 0x30, 0x5e, 0xd4, 0x39, 0x84, 0x78, 0xd9, 0x75, 0xd6, 0xce, 0x2a, 0x11, 0x29, 0x69, 0xa4, 0x5e, 0xaa, 0x2a, 0x98, 0x5a, 0xe5, 0x91, 0x8f, 0xb2, 0xfb, 0xda, 0x97, 0xe8, 0x83, 0x6f, 0x04, 0xb9, 0x5d, 0xaf, 0xe1, 0x9b,
- /* (2^332)P */ 0x8b, 0xe4, 0xe1, 0x48, 0x9c, 0xc4, 0x83, 0x89, 0xdf, 0x65, 0xd3, 0x35, 0x55, 0x13, 0xf4, 0x1f, 0x36, 0x92, 0x33, 0x38, 0xcb, 0xed, 0x15, 0xe6, 0x60, 0x2d, 0x25, 0xf5, 0x36, 0x60, 0x3a, 0x37, 0x9b, 0x71, 0x9d, 0x42, 0xb0, 0x14, 0xc8, 0xba, 0x62, 0xa3, 0x49, 0xb0, 0x88, 0xc1, 0x72, 0x73, 0xdd, 0x62, 0x40, 0xa9, 0x62, 0x88, 0x99, 0xca,
- /* (2^333)P */ 0x47, 0x7b, 0xea, 0xda, 0x46, 0x2f, 0x45, 0xc6, 0xe3, 0xb4, 0x4d, 0x8d, 0xac, 0x0b, 0x54, 0x22, 0x06, 0x31, 0x16, 0x66, 0x3e, 0xe4, 0x38, 0x12, 0xcd, 0xf3, 0xe7, 0x99, 0x37, 0xd9, 0x62, 0x24, 0x4b, 0x05, 0xf2, 0x58, 0xe6, 0x29, 0x4b, 0x0d, 0xf6, 0xc1, 0xba, 0xa0, 0x1e, 0x0f, 0xcb, 0x1f, 0xc6, 0x2b, 0x19, 0xfc, 0x82, 0x01, 0xd0, 0x86,
- /* (2^334)P */ 0xa2, 0xae, 0x77, 0x20, 0xfb, 0xa8, 0x18, 0xb4, 0x61, 0xef, 0xe8, 0x52, 0x79, 0xbb, 0x86, 0x90, 0x5d, 0x2e, 0x76, 0xed, 0x66, 0x60, 0x5d, 0x00, 0xb5, 0xa4, 0x00, 0x40, 0x89, 0xec, 0xd1, 0xd2, 0x0d, 0x26, 0xb9, 0x30, 0xb2, 0xd2, 0xb8, 0xe8, 0x0e, 0x56, 0xf9, 0x67, 0x94, 0x2e, 0x62, 0xe1, 0x79, 0x48, 0x2b, 0xa9, 0xfa, 0xea, 0xdb, 0x28,
- /* (2^335)P */ 0x35, 0xf1, 0xb0, 0x43, 0xbd, 0x27, 0xef, 0x18, 0x44, 0xa2, 0x04, 0xb4, 0x69, 0xa1, 0x97, 0x1f, 0x8c, 0x04, 0x82, 0x9b, 0x00, 0x6d, 0xf8, 0xbf, 0x7d, 0xc1, 0x5b, 0xab, 0xe8, 0xb2, 0x34, 0xbd, 0xaf, 0x7f, 0xb2, 0x0d, 0xf3, 0xed, 0xfc, 0x5b, 0x50, 0xee, 0xe7, 0x4a, 0x20, 0xd9, 0xf5, 0xc6, 0x9a, 0x97, 0x6d, 0x07, 0x2f, 0xb9, 0x31, 0x02,
- /* (2^336)P */ 0xf9, 0x54, 0x4a, 0xc5, 0x61, 0x7e, 0x1d, 0xa6, 0x0e, 0x1a, 0xa8, 0xd3, 0x8c, 0x36, 0x7d, 0xf1, 0x06, 0xb1, 0xac, 0x93, 0xcd, 0xe9, 0x8f, 0x61, 0x6c, 0x5d, 0x03, 0x23, 0xdf, 0x85, 0x53, 0x39, 0x63, 0x5e, 0xeb, 0xf3, 0xd3, 0xd3, 0x75, 0x97, 0x9b, 0x62, 0x9b, 0x01, 0xb3, 0x19, 0xd8, 0x2b, 0x36, 0xf2, 0x2c, 0x2c, 0x6f, 0x36, 0xc6, 0x3c,
- /* (2^337)P */ 0x05, 0x74, 0x43, 0x10, 0xb6, 0xb0, 0xf8, 0xbf, 0x02, 0x46, 0x9a, 0xee, 0xc1, 0xaf, 0xc1, 0xe5, 0x5a, 0x2e, 0xbb, 0xe1, 0xdc, 0xc6, 0xce, 0x51, 0x29, 0x50, 0xbf, 0x1b, 0xde, 0xff, 0xba, 0x4d, 0x8d, 0x8b, 0x7e, 0xe7, 0xbd, 0x5b, 0x8f, 0xbe, 0xe3, 0x75, 0x71, 0xff, 0x37, 0x05, 0x5a, 0x10, 0xeb, 0x54, 0x7e, 0x44, 0x72, 0x2c, 0xd4, 0xfc,
- /* (2^338)P */ 0x03, 0x12, 0x1c, 0xb2, 0x08, 0x90, 0xa1, 0x2d, 0x50, 0xa0, 0xad, 0x7f, 0x8d, 0xa6, 0x97, 0xc1, 0xbd, 0xdc, 0xc3, 0xa7, 0xad, 0x31, 0xdf, 0xb8, 0x03, 0x84, 0xc3, 0xb9, 0x29, 0x3d, 0x92, 0x2e, 0xc3, 0x90, 0x07, 0xe8, 0xa7, 0xc7, 0xbc, 0x61, 0xe9, 0x3e, 0xa0, 0x35, 0xda, 0x1d, 0xab, 0x48, 0xfe, 0x50, 0xc9, 0x25, 0x59, 0x23, 0x69, 0x3f,
- /* (2^339)P */ 0x8e, 0x91, 0xab, 0x6b, 0x91, 0x4f, 0x89, 0x76, 0x67, 0xad, 0xb2, 0x65, 0x9d, 0xad, 0x02, 0x36, 0xdc, 0xac, 0x96, 0x93, 0x97, 0x21, 0x14, 0xd0, 0xe8, 0x11, 0x60, 0x1e, 0xeb, 0x96, 0x06, 0xf2, 0x53, 0xf2, 0x6d, 0xb7, 0x93, 0x6f, 0x26, 0x91, 0x23, 0xe3, 0x34, 0x04, 0x92, 0x91, 0x37, 0x08, 0x50, 0xd6, 0x28, 0x09, 0x27, 0xa1, 0x0c, 0x00,
- /* (2^340)P */ 0x1f, 0xbb, 0x21, 0x26, 0x33, 0xcb, 0xa4, 0xd1, 0xee, 0x85, 0xf9, 0xd9, 0x3c, 0x90, 0xc3, 0xd1, 0x26, 0xa2, 0x25, 0x93, 0x43, 0x61, 0xed, 0x91, 0x6e, 0x54, 0x03, 0x2e, 0x42, 0x9d, 0xf7, 0xa6, 0x02, 0x0f, 0x2f, 0x9c, 0x7a, 0x8d, 0x12, 0xc2, 0x18, 0xfc, 0x41, 0xff, 0x85, 0x26, 0x1a, 0x44, 0x55, 0x0b, 0x89, 0xab, 0x6f, 0x62, 0x33, 0x8c,
- /* (2^341)P */ 0xe0, 0x3c, 0x5d, 0x70, 0x64, 0x87, 0x81, 0x35, 0xf2, 0x37, 0xa6, 0x24, 0x3e, 0xe0, 0x62, 0xd5, 0x71, 0xe7, 0x93, 0xfb, 0xac, 0xc3, 0xe7, 0xc7, 0x04, 0xe2, 0x70, 0xd3, 0x29, 0x5b, 0x21, 0xbf, 0xf4, 0x26, 0x5d, 0xf3, 0x95, 0xb4, 0x2a, 0x6a, 0x07, 0x55, 0xa6, 0x4b, 0x3b, 0x15, 0xf2, 0x25, 0x8a, 0x95, 0x3f, 0x63, 0x2f, 0x7a, 0x23, 0x96,
- /* (2^342)P */ 0x0d, 0x3d, 0xd9, 0x13, 0xa7, 0xb3, 0x5e, 0x67, 0xf7, 0x02, 0x23, 0xee, 0x84, 0xff, 0x99, 0xda, 0xb9, 0x53, 0xf8, 0xf0, 0x0e, 0x39, 0x2f, 0x3c, 0x64, 0x34, 0xe3, 0x09, 0xfd, 0x2b, 0x33, 0xc7, 0xfe, 0x62, 0x2b, 0x84, 0xdf, 0x2b, 0xd2, 0x7c, 0x26, 0x01, 0x70, 0x66, 0x5b, 0x85, 0xc2, 0xbe, 0x88, 0x37, 0xf1, 0x30, 0xac, 0xb8, 0x76, 0xa3,
- /* (2^343)P */ 0x6e, 0x01, 0xf0, 0x55, 0x35, 0xe4, 0xbd, 0x43, 0x62, 0x9d, 0xd6, 0x11, 0xef, 0x6f, 0xb8, 0x8c, 0xaa, 0x98, 0x87, 0xc6, 0x6d, 0xc4, 0xcc, 0x74, 0x92, 0x53, 0x4a, 0xdf, 0xe4, 0x08, 0x89, 0x17, 0xd0, 0x0f, 0xf4, 0x00, 0x60, 0x78, 0x08, 0x44, 0xb5, 0xda, 0x18, 0xed, 0x98, 0xc8, 0x61, 0x3d, 0x39, 0xdb, 0xcf, 0x1d, 0x49, 0x40, 0x65, 0x75,
- /* (2^344)P */ 0x8e, 0x10, 0xae, 0x5f, 0x06, 0xd2, 0x95, 0xfd, 0x20, 0x16, 0x49, 0x5b, 0x57, 0xbe, 0x22, 0x8b, 0x43, 0xfb, 0xe6, 0xcc, 0x26, 0xa5, 0x5d, 0xd3, 0x68, 0xc5, 0xf9, 0x5a, 0x86, 0x24, 0x87, 0x27, 0x05, 0xfd, 0xe2, 0xff, 0xb3, 0xa3, 0x7b, 0x37, 0x59, 0xc5, 0x4e, 0x14, 0x94, 0xf9, 0x3b, 0xcb, 0x7c, 0xed, 0xca, 0x1d, 0xb2, 0xac, 0x05, 0x4a,
- /* (2^345)P */ 0xf4, 0xd1, 0x81, 0xeb, 0x89, 0xbf, 0xfe, 0x1e, 0x41, 0x92, 0x29, 0xee, 0xe1, 0x43, 0xf5, 0x86, 0x1d, 0x2f, 0xbb, 0x1e, 0x84, 0x5d, 0x7b, 0x8d, 0xd5, 0xda, 0xee, 0x1e, 0x8a, 0xd0, 0x27, 0xf2, 0x60, 0x51, 0x59, 0x82, 0xf4, 0x84, 0x2b, 0x5b, 0x14, 0x2d, 0x81, 0x82, 0x3e, 0x2b, 0xb4, 0x6d, 0x51, 0x4f, 0xc5, 0xcb, 0xbf, 0x74, 0xe3, 0xb4,
- /* (2^346)P */ 0x19, 0x2f, 0x22, 0xb3, 0x04, 0x5f, 0x81, 0xca, 0x05, 0x60, 0xb9, 0xaa, 0xee, 0x0e, 0x2f, 0x48, 0x38, 0xf9, 0x91, 0xb4, 0x66, 0xe4, 0x57, 0x28, 0x54, 0x10, 0xe9, 0x61, 0x9d, 0xd4, 0x90, 0x75, 0xb1, 0x39, 0x23, 0xb6, 0xfc, 0x82, 0xe0, 0xfa, 0xbb, 0x5c, 0x6e, 0xc3, 0x44, 0x13, 0x00, 0x83, 0x55, 0x9e, 0x8e, 0x10, 0x61, 0x81, 0x91, 0x04,
- /* (2^347)P */ 0x5f, 0x2a, 0xd7, 0x81, 0xd9, 0x9c, 0xbb, 0x79, 0xbc, 0x62, 0x56, 0x98, 0x03, 0x5a, 0x18, 0x85, 0x2a, 0x9c, 0xd0, 0xfb, 0xd2, 0xb1, 0xaf, 0xef, 0x0d, 0x24, 0xc5, 0xfa, 0x39, 0xbb, 0x6b, 0xed, 0xa4, 0xdf, 0xe4, 0x87, 0xcd, 0x41, 0xd3, 0x72, 0x32, 0xc6, 0x28, 0x21, 0xb1, 0xba, 0x8b, 0xa3, 0x91, 0x79, 0x76, 0x22, 0x25, 0x10, 0x61, 0xd1,
- /* (2^348)P */ 0x73, 0xb5, 0x32, 0x97, 0xdd, 0xeb, 0xdd, 0x22, 0x22, 0xf1, 0x33, 0x3c, 0x77, 0x56, 0x7d, 0x6b, 0x48, 0x2b, 0x05, 0x81, 0x03, 0x03, 0x91, 0x9a, 0xe3, 0x5e, 0xd4, 0xee, 0x3f, 0xf8, 0xbb, 0x50, 0x21, 0x32, 0x4c, 0x4a, 0x58, 0x49, 0xde, 0x0c, 0xde, 0x30, 0x82, 0x3d, 0x92, 0xf0, 0x6c, 0xcc, 0x32, 0x3e, 0xd2, 0x78, 0x8a, 0x6e, 0x2c, 0xd0,
- /* (2^349)P */ 0xf0, 0xf7, 0xa1, 0x0b, 0xc1, 0x74, 0x85, 0xa8, 0xe9, 0xdd, 0x48, 0xa1, 0xc0, 0x16, 0xd8, 0x2b, 0x61, 0x08, 0xc2, 0x2b, 0x30, 0x26, 0x79, 0xce, 0x9e, 0xfd, 0x39, 0xd7, 0x81, 0xa4, 0x63, 0x8c, 0xd5, 0x74, 0xa0, 0x88, 0xfa, 0x03, 0x30, 0xe9, 0x7f, 0x2b, 0xc6, 0x02, 0xc9, 0x5e, 0xe4, 0xd5, 0x4d, 0x92, 0xd0, 0xf6, 0xf2, 0x5b, 0x79, 0x08,
- /* (2^350)P */ 0x34, 0x89, 0x81, 0x43, 0xd1, 0x94, 0x2c, 0x10, 0x54, 0x9b, 0xa0, 0xe5, 0x44, 0xe8, 0xc2, 0x2f, 0x3e, 0x0e, 0x74, 0xae, 0xba, 0xe2, 0xac, 0x85, 0x6b, 0xd3, 0x5c, 0x97, 0xf7, 0x90, 0xf1, 0x12, 0xc0, 0x03, 0xc8, 0x1f, 0x37, 0x72, 0x8c, 0x9b, 0x9c, 0x17, 0x96, 0x9d, 0xc7, 0xbf, 0xa3, 0x3f, 0x44, 0x3d, 0x87, 0x81, 0xbd, 0x81, 0xa6, 0x5f,
- /* (2^351)P */ 0xe4, 0xff, 0x78, 0x62, 0x82, 0x5b, 0x76, 0x58, 0xf5, 0x5b, 0xa6, 0xc4, 0x53, 0x11, 0x3b, 0x7b, 0xaa, 0x67, 0xf8, 0xea, 0x3b, 0x5d, 0x9a, 0x2e, 0x04, 0xeb, 0x4a, 0x24, 0xfb, 0x56, 0xf0, 0xa8, 0xd4, 0x14, 0xed, 0x0f, 0xfd, 0xc5, 0x26, 0x17, 0x2a, 0xf0, 0xb9, 0x13, 0x8c, 0xbd, 0x65, 0x14, 0x24, 0x95, 0x27, 0x12, 0x63, 0x2a, 0x09, 0x18,
- /* (2^352)P */ 0xe1, 0x5c, 0xe7, 0xe0, 0x00, 0x6a, 0x96, 0xf2, 0x49, 0x6a, 0x39, 0xa5, 0xe0, 0x17, 0x79, 0x4a, 0x63, 0x07, 0x62, 0x09, 0x61, 0x1b, 0x6e, 0xa9, 0xb5, 0x62, 0xb7, 0xde, 0xdf, 0x80, 0x4c, 0x5a, 0x99, 0x73, 0x59, 0x9d, 0xfb, 0xb1, 0x5e, 0xbe, 0xb8, 0xb7, 0x63, 0x93, 0xe8, 0xad, 0x5e, 0x1f, 0xae, 0x59, 0x1c, 0xcd, 0xb4, 0xc2, 0xb3, 0x8a,
- /* (2^353)P */ 0x78, 0x53, 0xa1, 0x4c, 0x70, 0x9c, 0x63, 0x7e, 0xb3, 0x12, 0x40, 0x5f, 0xbb, 0x23, 0xa7, 0xf7, 0x77, 0x96, 0x5b, 0x4d, 0x91, 0x10, 0x52, 0x85, 0x9e, 0xa5, 0x38, 0x0b, 0xfd, 0x25, 0x01, 0x4b, 0xfa, 0x4d, 0xd3, 0x3f, 0x78, 0x74, 0x42, 0xff, 0x62, 0x2d, 0x27, 0xdc, 0x9d, 0xd1, 0x29, 0x76, 0x2e, 0x78, 0xb3, 0x35, 0xfa, 0x15, 0xd5, 0x38,
- /* (2^354)P */ 0x8b, 0xc7, 0x43, 0xce, 0xf0, 0x5e, 0xf1, 0x0d, 0x02, 0x38, 0xe8, 0x82, 0xc9, 0x25, 0xad, 0x2d, 0x27, 0xa4, 0x54, 0x18, 0xb2, 0x30, 0x73, 0xa4, 0x41, 0x08, 0xe4, 0x86, 0xe6, 0x8c, 0xe9, 0x2a, 0x34, 0xb3, 0xd6, 0x61, 0x8f, 0x66, 0x26, 0x08, 0xb6, 0x06, 0x33, 0xaa, 0x12, 0xac, 0x72, 0xec, 0x2e, 0x52, 0xa3, 0x25, 0x3e, 0xd7, 0x62, 0xe8,
- /* (2^355)P */ 0xc4, 0xbb, 0x89, 0xc8, 0x40, 0xcc, 0x84, 0xec, 0x4a, 0xd9, 0xc4, 0x55, 0x78, 0x00, 0xcf, 0xd8, 0xe9, 0x24, 0x59, 0xdc, 0x5e, 0xf0, 0x66, 0xa1, 0x83, 0xae, 0x97, 0x18, 0xc5, 0x54, 0x27, 0xa2, 0x21, 0x52, 0x03, 0x31, 0x5b, 0x11, 0x67, 0xf6, 0x12, 0x00, 0x87, 0x2f, 0xff, 0x59, 0x70, 0x8f, 0x6d, 0x71, 0xab, 0xab, 0x24, 0xb8, 0xba, 0x35,
- /* (2^356)P */ 0x69, 0x43, 0xa7, 0x14, 0x06, 0x96, 0xe9, 0xc2, 0xe3, 0x2b, 0x45, 0x22, 0xc0, 0xd0, 0x2f, 0x34, 0xd1, 0x01, 0x99, 0xfc, 0x99, 0x38, 0xa1, 0x25, 0x2e, 0x59, 0x6c, 0x27, 0xc9, 0xeb, 0x7b, 0xdc, 0x4e, 0x26, 0x68, 0xba, 0xfa, 0xec, 0x02, 0x05, 0x64, 0x80, 0x30, 0x20, 0x5c, 0x26, 0x7f, 0xaf, 0x95, 0x17, 0x3d, 0x5c, 0x9e, 0x96, 0x96, 0xaf,
- /* (2^357)P */ 0xa6, 0xba, 0x21, 0x29, 0x32, 0xe2, 0x98, 0xde, 0x9b, 0x6d, 0x0b, 0x44, 0x91, 0xa8, 0x3e, 0xd4, 0xb8, 0x04, 0x6c, 0xf6, 0x04, 0x39, 0xbd, 0x52, 0x05, 0x15, 0x27, 0x78, 0x8e, 0x55, 0xac, 0x79, 0xc5, 0xe6, 0x00, 0x7f, 0x90, 0xa2, 0xdd, 0x07, 0x13, 0xe0, 0x24, 0x70, 0x5c, 0x0f, 0x4d, 0xa9, 0xf9, 0xae, 0xcb, 0x34, 0x10, 0x9d, 0x89, 0x9d,
- /* (2^358)P */ 0x12, 0xe0, 0xb3, 0x9f, 0xc4, 0x96, 0x1d, 0xcf, 0xed, 0x99, 0x64, 0x28, 0x8d, 0xc7, 0x31, 0x82, 0xee, 0x5e, 0x75, 0x48, 0xff, 0x3a, 0xf2, 0x09, 0x34, 0x03, 0x93, 0x52, 0x19, 0xb2, 0xc5, 0x81, 0x93, 0x45, 0x5e, 0x59, 0x21, 0x2b, 0xec, 0x89, 0xba, 0x36, 0x6e, 0xf9, 0x82, 0x75, 0x7e, 0x82, 0x3f, 0xaa, 0xe2, 0xe3, 0x3b, 0x94, 0xfd, 0x98,
- /* (2^359)P */ 0x7c, 0xdb, 0x75, 0x31, 0x61, 0xfb, 0x15, 0x28, 0x94, 0xd7, 0xc3, 0x5a, 0xa9, 0xa1, 0x0a, 0x66, 0x0f, 0x2b, 0x13, 0x3e, 0x42, 0xb5, 0x28, 0x3a, 0xca, 0x83, 0xf3, 0x61, 0x22, 0xf4, 0x40, 0xc5, 0xdf, 0xe7, 0x31, 0x9f, 0x7e, 0x51, 0x75, 0x06, 0x9d, 0x51, 0xc8, 0xe7, 0x9f, 0xc3, 0x71, 0x4f, 0x3d, 0x5b, 0xfb, 0xe9, 0x8e, 0x08, 0x40, 0x8e,
- /* (2^360)P */ 0xf7, 0x31, 0xad, 0x50, 0x5d, 0x25, 0x93, 0x73, 0x68, 0xf6, 0x7c, 0x89, 0x5a, 0x3d, 0x9f, 0x9b, 0x05, 0x82, 0xe7, 0x70, 0x4b, 0x19, 0xaa, 0xcf, 0xff, 0xde, 0x50, 0x8f, 0x2f, 0x69, 0xd3, 0xf0, 0x99, 0x51, 0x6b, 0x9d, 0xb6, 0x56, 0x6f, 0xf8, 0x4c, 0x74, 0x8b, 0x4c, 0x91, 0xf9, 0xa9, 0xb1, 0x3e, 0x07, 0xdf, 0x0b, 0x27, 0x8a, 0xb1, 0xed,
- /* (2^361)P */ 0xfb, 0x67, 0xd9, 0x48, 0xd2, 0xe4, 0x44, 0x9b, 0x43, 0x15, 0x8a, 0xeb, 0x00, 0x53, 0xad, 0x25, 0xc7, 0x7e, 0x19, 0x30, 0x87, 0xb7, 0xd5, 0x5f, 0x04, 0xf8, 0xaa, 0xdd, 0x57, 0xae, 0x34, 0x75, 0xe2, 0x84, 0x4b, 0x54, 0x60, 0x37, 0x95, 0xe4, 0xd3, 0xec, 0xac, 0xef, 0x47, 0x31, 0xa3, 0xc8, 0x31, 0x22, 0xdb, 0x26, 0xe7, 0x6a, 0xb5, 0xad,
- /* (2^362)P */ 0x44, 0x09, 0x5c, 0x95, 0xe4, 0x72, 0x3c, 0x1a, 0xd1, 0xac, 0x42, 0x51, 0x99, 0x6f, 0xfa, 0x1f, 0xf2, 0x22, 0xbe, 0xff, 0x7b, 0x66, 0xf5, 0x6c, 0xb3, 0x66, 0xc7, 0x4d, 0x78, 0x31, 0x83, 0x80, 0xf5, 0x41, 0xe9, 0x7f, 0xbe, 0xf7, 0x23, 0x49, 0x6b, 0x84, 0x4e, 0x7e, 0x47, 0x07, 0x6e, 0x74, 0xdf, 0xe5, 0x9d, 0x9e, 0x56, 0x2a, 0xc0, 0xbc,
- /* (2^363)P */ 0xac, 0x10, 0x80, 0x8c, 0x7c, 0xfa, 0x83, 0xdf, 0xb3, 0xd0, 0xc4, 0xbe, 0xfb, 0x9f, 0xac, 0xc9, 0xc3, 0x40, 0x95, 0x0b, 0x09, 0x23, 0xda, 0x63, 0x67, 0xcf, 0xe7, 0x9f, 0x7d, 0x7b, 0x6b, 0xe2, 0xe6, 0x6d, 0xdb, 0x87, 0x9e, 0xa6, 0xff, 0x6d, 0xab, 0xbd, 0xfb, 0x54, 0x84, 0x68, 0xcf, 0x89, 0xf1, 0xd0, 0xe2, 0x85, 0x61, 0xdc, 0x22, 0xd1,
- /* (2^364)P */ 0xa8, 0x48, 0xfb, 0x8c, 0x6a, 0x63, 0x01, 0x72, 0x43, 0x43, 0xeb, 0x21, 0xa3, 0x00, 0x8a, 0xc0, 0x87, 0x51, 0x9e, 0x86, 0x75, 0x16, 0x79, 0xf9, 0x6b, 0x11, 0x80, 0x62, 0xc2, 0x9d, 0xb8, 0x8c, 0x30, 0x8e, 0x8d, 0x03, 0x52, 0x7e, 0x31, 0x59, 0x38, 0xf9, 0x25, 0xc7, 0x0f, 0xc7, 0xa8, 0x2b, 0x5c, 0x80, 0xfa, 0x90, 0xa2, 0x63, 0xca, 0xe7,
- /* (2^365)P */ 0xf1, 0x5d, 0xb5, 0xd9, 0x20, 0x10, 0x7d, 0x0f, 0xc5, 0x50, 0x46, 0x07, 0xff, 0x02, 0x75, 0x2b, 0x4a, 0xf3, 0x39, 0x91, 0x72, 0xb7, 0xd5, 0xcc, 0x38, 0xb8, 0xe7, 0x36, 0x26, 0x5e, 0x11, 0x97, 0x25, 0xfb, 0x49, 0x68, 0xdc, 0xb4, 0x46, 0x87, 0x5c, 0xc2, 0x7f, 0xaa, 0x7d, 0x36, 0x23, 0xa6, 0xc6, 0x53, 0xec, 0xbc, 0x57, 0x47, 0xc1, 0x2b,
- /* (2^366)P */ 0x25, 0x5d, 0x7d, 0x95, 0xda, 0x0b, 0x8f, 0x78, 0x1e, 0x19, 0x09, 0xfa, 0x67, 0xe0, 0xa0, 0x17, 0x24, 0x76, 0x6c, 0x30, 0x1f, 0x62, 0x3d, 0xbe, 0x45, 0x70, 0xcc, 0xb6, 0x1e, 0x68, 0x06, 0x25, 0x68, 0x16, 0x1a, 0x33, 0x3f, 0x90, 0xc7, 0x78, 0x2d, 0x98, 0x3c, 0x2f, 0xb9, 0x2d, 0x94, 0x0b, 0xfb, 0x49, 0x56, 0x30, 0xd7, 0xc1, 0xe6, 0x48,
- /* (2^367)P */ 0x7a, 0xd1, 0xe0, 0x8e, 0x67, 0xfc, 0x0b, 0x50, 0x1f, 0x84, 0x98, 0xfa, 0xaf, 0xae, 0x2e, 0x31, 0x27, 0xcf, 0x3f, 0xf2, 0x6e, 0x8d, 0x81, 0x8f, 0xd2, 0x5f, 0xde, 0xd3, 0x5e, 0xe9, 0xe7, 0x13, 0x48, 0x83, 0x5a, 0x4e, 0x84, 0xd1, 0x58, 0xcf, 0x6b, 0x84, 0xdf, 0x13, 0x1d, 0x91, 0x85, 0xe8, 0xcb, 0x29, 0x79, 0xd2, 0xca, 0xac, 0x6a, 0x93,
- /* (2^368)P */ 0x53, 0x82, 0xce, 0x61, 0x96, 0x88, 0x6f, 0xe1, 0x4a, 0x4c, 0x1e, 0x30, 0x73, 0xe8, 0x74, 0xde, 0x40, 0x2b, 0xe0, 0xc4, 0xb5, 0xd8, 0x7c, 0x15, 0xe7, 0xe1, 0xb1, 0xe0, 0xd6, 0x88, 0xb1, 0x6a, 0x57, 0x19, 0x6a, 0x22, 0x66, 0x57, 0xf6, 0x8d, 0xfd, 0xc0, 0xf2, 0xa3, 0x03, 0x56, 0xfb, 0x2e, 0x75, 0x5e, 0xc7, 0x8e, 0x22, 0x96, 0x5c, 0x06,
- /* (2^369)P */ 0x98, 0x7e, 0xbf, 0x3e, 0xbf, 0x24, 0x9d, 0x15, 0xd3, 0xf6, 0xd3, 0xd2, 0xf0, 0x11, 0xf2, 0xdb, 0x36, 0x23, 0x38, 0xf7, 0x1d, 0x71, 0x20, 0xd2, 0x54, 0x7f, 0x1e, 0x24, 0x8f, 0xe2, 0xaa, 0xf7, 0x3f, 0x6b, 0x41, 0x4e, 0xdc, 0x0e, 0xec, 0xe8, 0x35, 0x0a, 0x08, 0x6d, 0x89, 0x5b, 0x32, 0x91, 0x01, 0xb6, 0xe0, 0x2c, 0xc6, 0xa1, 0xbe, 0xb4,
- /* (2^370)P */ 0x29, 0xf2, 0x1e, 0x1c, 0xdc, 0x68, 0x8a, 0x43, 0x87, 0x2c, 0x48, 0xb3, 0x9e, 0xed, 0xd2, 0x82, 0x46, 0xac, 0x2f, 0xef, 0x93, 0x34, 0x37, 0xca, 0x64, 0x8d, 0xc9, 0x06, 0x90, 0xbb, 0x78, 0x0a, 0x3c, 0x4c, 0xcf, 0x35, 0x7a, 0x0f, 0xf7, 0xa7, 0xf4, 0x2f, 0x45, 0x69, 0x3f, 0xa9, 0x5d, 0xce, 0x7b, 0x8a, 0x84, 0xc3, 0xae, 0xf4, 0xda, 0xd5,
- /* (2^371)P */ 0xca, 0xba, 0x95, 0x43, 0x05, 0x7b, 0x06, 0xd9, 0x5c, 0x0a, 0x18, 0x5f, 0x6a, 0x6a, 0xce, 0xc0, 0x3d, 0x95, 0x51, 0x0e, 0x1a, 0xbe, 0x85, 0x7a, 0xf2, 0x69, 0xec, 0xc0, 0x8c, 0xca, 0xa3, 0x32, 0x0a, 0x76, 0x50, 0xc6, 0x76, 0x61, 0x00, 0x89, 0xbf, 0x6e, 0x0f, 0x48, 0x90, 0x31, 0x93, 0xec, 0x34, 0x70, 0xf0, 0xc3, 0x8d, 0xf0, 0x0f, 0xb5,
- /* (2^372)P */ 0xbe, 0x23, 0xe2, 0x18, 0x99, 0xf1, 0xed, 0x8a, 0xf6, 0xc9, 0xac, 0xb8, 0x1e, 0x9a, 0x3c, 0x15, 0xae, 0xd7, 0x6d, 0xb3, 0x04, 0xee, 0x5b, 0x0d, 0x1e, 0x79, 0xb7, 0xf9, 0xf9, 0x8d, 0xad, 0xf9, 0x8f, 0x5a, 0x6a, 0x7b, 0xd7, 0x9b, 0xca, 0x62, 0xfe, 0x9c, 0xc0, 0x6f, 0x6d, 0x9d, 0x76, 0xa3, 0x69, 0xb9, 0x4c, 0xa1, 0xc4, 0x0c, 0x76, 0xaa,
- /* (2^373)P */ 0x1c, 0x06, 0xfe, 0x3f, 0x45, 0x70, 0xcd, 0x97, 0xa9, 0xa2, 0xb1, 0xd3, 0xf2, 0xa5, 0x0c, 0x49, 0x2c, 0x75, 0x73, 0x1f, 0xcf, 0x00, 0xaf, 0xd5, 0x2e, 0xde, 0x0d, 0x8f, 0x8f, 0x7c, 0xc4, 0x58, 0xce, 0xd4, 0xf6, 0x24, 0x19, 0x2e, 0xd8, 0xc5, 0x1d, 0x1a, 0x3f, 0xb8, 0x4f, 0xbc, 0x7d, 0xbd, 0x68, 0xe3, 0x81, 0x98, 0x1b, 0xa8, 0xc9, 0xd9,
- /* (2^374)P */ 0x39, 0x95, 0x78, 0x24, 0x6c, 0x38, 0xe4, 0xe7, 0xd0, 0x8d, 0xb9, 0x38, 0x71, 0x5e, 0xc1, 0x62, 0x80, 0xcc, 0xcb, 0x8c, 0x97, 0xca, 0xf8, 0xb9, 0xd9, 0x9c, 0xce, 0x72, 0x7b, 0x70, 0xee, 0x5f, 0xea, 0xa2, 0xdf, 0xa9, 0x14, 0x10, 0xf9, 0x6e, 0x59, 0x9f, 0x9c, 0xe0, 0x0c, 0xb2, 0x07, 0x97, 0xcd, 0xd2, 0x89, 0x16, 0xfd, 0x9c, 0xa8, 0xa5,
- /* (2^375)P */ 0x5a, 0x61, 0xf1, 0x59, 0x7c, 0x38, 0xda, 0xe2, 0x85, 0x99, 0x68, 0xe9, 0xc9, 0xf7, 0x32, 0x7e, 0xc4, 0xca, 0xb7, 0x11, 0x08, 0x69, 0x2b, 0x66, 0x02, 0xf7, 0x2e, 0x18, 0xc3, 0x8e, 0xe1, 0xf9, 0xc5, 0x19, 0x9a, 0x0a, 0x9c, 0x07, 0xba, 0xc7, 0x9c, 0x03, 0x34, 0x89, 0x99, 0x67, 0x0b, 0x16, 0x4b, 0x07, 0x36, 0x16, 0x36, 0x2c, 0xe2, 0xa1,
- /* (2^376)P */ 0x70, 0x10, 0x91, 0x27, 0xa8, 0x24, 0x8e, 0x29, 0x04, 0x6f, 0x79, 0x1f, 0xd3, 0xa5, 0x68, 0xd3, 0x0b, 0x7d, 0x56, 0x4d, 0x14, 0x57, 0x7b, 0x2e, 0x00, 0x9f, 0x9a, 0xfd, 0x6c, 0x63, 0x18, 0x81, 0xdb, 0x9d, 0xb7, 0xd7, 0xa4, 0x1e, 0xe8, 0x40, 0xf1, 0x4c, 0xa3, 0x01, 0xd5, 0x4b, 0x75, 0xea, 0xdd, 0x97, 0xfd, 0x5b, 0xb2, 0x66, 0x6a, 0x24,
- /* (2^377)P */ 0x72, 0x11, 0xfe, 0x73, 0x1b, 0xd3, 0xea, 0x7f, 0x93, 0x15, 0x15, 0x05, 0xfe, 0x40, 0xe8, 0x28, 0xd8, 0x50, 0x47, 0x66, 0xfa, 0xb7, 0xb5, 0x04, 0xba, 0x35, 0x1e, 0x32, 0x9f, 0x5f, 0x32, 0xba, 0x3d, 0xd1, 0xed, 0x9a, 0x76, 0xca, 0xa3, 0x3e, 0x77, 0xd8, 0xd8, 0x7c, 0x5f, 0x68, 0x42, 0xb5, 0x86, 0x7f, 0x3b, 0xc9, 0xc1, 0x89, 0x64, 0xda,
- /* (2^378)P */ 0xd5, 0xd4, 0x17, 0x31, 0xfc, 0x6a, 0xfd, 0xb8, 0xe8, 0xe5, 0x3e, 0x39, 0x06, 0xe4, 0xd1, 0x90, 0x2a, 0xca, 0xf6, 0x54, 0x6c, 0x1b, 0x2f, 0x49, 0x97, 0xb1, 0x2a, 0x82, 0x43, 0x3d, 0x1f, 0x8b, 0xe2, 0x47, 0xc5, 0x24, 0xa8, 0xd5, 0x53, 0x29, 0x7d, 0xc6, 0x87, 0xa6, 0x25, 0x3a, 0x64, 0xdd, 0x71, 0x08, 0x9e, 0xcd, 0xe9, 0x45, 0xc7, 0xba,
- /* (2^379)P */ 0x37, 0x72, 0x6d, 0x13, 0x7a, 0x8d, 0x04, 0x31, 0xe6, 0xe3, 0x9e, 0x36, 0x71, 0x3e, 0xc0, 0x1e, 0xe3, 0x71, 0xd3, 0x49, 0x4e, 0x4a, 0x36, 0x42, 0x68, 0x68, 0x61, 0xc7, 0x3c, 0xdb, 0x81, 0x49, 0xf7, 0x91, 0x4d, 0xea, 0x4c, 0x4f, 0x98, 0xc6, 0x7e, 0x60, 0x84, 0x4b, 0x6a, 0x37, 0xbb, 0x52, 0xf7, 0xce, 0x02, 0xe4, 0xad, 0xd1, 0x3c, 0xa7,
- /* (2^380)P */ 0x51, 0x06, 0x2d, 0xf8, 0x08, 0xe8, 0xf1, 0x0c, 0xe5, 0xa9, 0xac, 0x29, 0x73, 0x3b, 0xed, 0x98, 0x5f, 0x55, 0x08, 0x38, 0x51, 0x44, 0x36, 0x5d, 0xea, 0xc3, 0xb8, 0x0e, 0xa0, 0x4f, 0xd2, 0x79, 0xe9, 0x98, 0xc3, 0xf5, 0x00, 0xb9, 0x26, 0x27, 0x42, 0xa8, 0x07, 0xc1, 0x12, 0x31, 0xc1, 0xc3, 0x3c, 0x3b, 0x7a, 0x72, 0x97, 0xc2, 0x70, 0x3a,
- /* (2^381)P */ 0xf4, 0xb2, 0xba, 0x32, 0xbc, 0xa9, 0x2f, 0x87, 0xc7, 0x3c, 0x45, 0xcd, 0xae, 0xe2, 0x13, 0x6d, 0x3a, 0xf2, 0xf5, 0x66, 0x97, 0x29, 0xaf, 0x53, 0x9f, 0xda, 0xea, 0x14, 0xdf, 0x04, 0x98, 0x19, 0x95, 0x9e, 0x2a, 0x00, 0x5c, 0x9d, 0x1d, 0xf0, 0x39, 0x23, 0xff, 0xfc, 0xca, 0x36, 0xb7, 0xde, 0xdf, 0x37, 0x78, 0x52, 0x21, 0xfa, 0x19, 0x10,
- /* (2^382)P */ 0x50, 0x20, 0x73, 0x74, 0x62, 0x21, 0xf2, 0xf7, 0x9b, 0x66, 0x85, 0x34, 0x74, 0xd4, 0x9d, 0x60, 0xd7, 0xbc, 0xc8, 0x46, 0x3b, 0xb8, 0x80, 0x42, 0x15, 0x0a, 0x6c, 0x35, 0x1a, 0x69, 0xf0, 0x1d, 0x4b, 0x29, 0x54, 0x5a, 0x9a, 0x48, 0xec, 0x9f, 0x37, 0x74, 0x91, 0xd0, 0xd1, 0x9e, 0x00, 0xc2, 0x76, 0x56, 0xd6, 0xa0, 0x15, 0x14, 0x83, 0x59,
- /* (2^383)P */ 0xc2, 0xf8, 0x22, 0x20, 0x23, 0x07, 0xbd, 0x1d, 0x6f, 0x1e, 0x8c, 0x56, 0x06, 0x6a, 0x4b, 0x9f, 0xe2, 0xa9, 0x92, 0x46, 0x4b, 0x46, 0x59, 0xd7, 0xe1, 0xda, 0x14, 0x98, 0x07, 0x65, 0x7e, 0x28, 0x20, 0xf2, 0x9d, 0x4f, 0x36, 0x5c, 0x92, 0xe0, 0x9d, 0xfe, 0x3e, 0xda, 0xe4, 0x47, 0x19, 0x3c, 0x00, 0x7f, 0x22, 0xf2, 0x9e, 0x51, 0xae, 0x4d,
- /* (2^384)P */ 0xbe, 0x8c, 0x1b, 0x10, 0xb6, 0xad, 0xcc, 0xcc, 0xd8, 0x5e, 0x21, 0xa6, 0xfb, 0xf1, 0xf6, 0xbd, 0x0a, 0x24, 0x67, 0xb4, 0x57, 0x7a, 0xbc, 0xe8, 0xe9, 0xff, 0xee, 0x0a, 0x1f, 0xee, 0xbd, 0xc8, 0x44, 0xed, 0x2b, 0xbb, 0x55, 0x1f, 0xdd, 0x7c, 0xb3, 0xeb, 0x3f, 0x63, 0xa1, 0x28, 0x91, 0x21, 0xab, 0x71, 0xc6, 0x4c, 0xd0, 0xe9, 0xb0, 0x21,
- /* (2^385)P */ 0xad, 0xc9, 0x77, 0x2b, 0xee, 0x89, 0xa4, 0x7b, 0xfd, 0xf9, 0xf6, 0x14, 0xe4, 0xed, 0x1a, 0x16, 0x9b, 0x78, 0x41, 0x43, 0xa8, 0x83, 0x72, 0x06, 0x2e, 0x7c, 0xdf, 0xeb, 0x7e, 0xdd, 0xd7, 0x8b, 0xea, 0x9a, 0x2b, 0x03, 0xba, 0x57, 0xf3, 0xf1, 0xd9, 0xe5, 0x09, 0xc5, 0x98, 0x61, 0x1c, 0x51, 0x6d, 0x5d, 0x6e, 0xfb, 0x5e, 0x95, 0x9f, 0xb5,
- /* (2^386)P */ 0x23, 0xe2, 0x1e, 0x95, 0xa3, 0x5e, 0x42, 0x10, 0xc7, 0xc3, 0x70, 0xbf, 0x4b, 0x6b, 0x83, 0x36, 0x93, 0xb7, 0x68, 0x47, 0x88, 0x3a, 0x10, 0x88, 0x48, 0x7f, 0x8c, 0xae, 0x54, 0x10, 0x02, 0xa4, 0x52, 0x8f, 0x8d, 0xf7, 0x26, 0x4f, 0x50, 0xc3, 0x6a, 0xe2, 0x4e, 0x3b, 0x4c, 0xb9, 0x8a, 0x14, 0x15, 0x6d, 0x21, 0x29, 0xb3, 0x6e, 0x4e, 0xd0,
- /* (2^387)P */ 0x4c, 0x8a, 0x18, 0x3f, 0xb7, 0x20, 0xfd, 0x3e, 0x54, 0xca, 0x68, 0x3c, 0xea, 0x6f, 0xf4, 0x6b, 0xa2, 0xbd, 0x01, 0xbd, 0xfe, 0x08, 0xa8, 0xd8, 0xc2, 0x20, 0x36, 0x05, 0xcd, 0xe9, 0xf3, 0x9e, 0xfa, 0x85, 0x66, 0x8f, 0x4b, 0x1d, 0x8c, 0x64, 0x4f, 0xb8, 0xc6, 0x0f, 0x5b, 0x57, 0xd8, 0x24, 0x19, 0x5a, 0x14, 0x4b, 0x92, 0xd3, 0x96, 0xbc,
- /* (2^388)P */ 0xa9, 0x3f, 0xc9, 0x6c, 0xca, 0x64, 0x1e, 0x6f, 0xdf, 0x65, 0x7f, 0x9a, 0x47, 0x6b, 0x8a, 0x60, 0x31, 0xa6, 0x06, 0xac, 0x69, 0x30, 0xe6, 0xea, 0x63, 0x42, 0x26, 0x5f, 0xdb, 0xd0, 0xf2, 0x8e, 0x34, 0x0a, 0x3a, 0xeb, 0xf3, 0x79, 0xc8, 0xb7, 0x60, 0x56, 0x5c, 0x37, 0x95, 0x71, 0xf8, 0x7f, 0x49, 0x3e, 0x9e, 0x01, 0x26, 0x1e, 0x80, 0x9f,
- /* (2^389)P */ 0xf8, 0x16, 0x9a, 0xaa, 0xb0, 0x28, 0xb5, 0x8e, 0xd0, 0x60, 0xe5, 0x26, 0xa9, 0x47, 0xc4, 0x5c, 0xa9, 0x39, 0xfe, 0x0a, 0xd8, 0x07, 0x2b, 0xb3, 0xce, 0xf1, 0xea, 0x1a, 0xf4, 0x7b, 0x98, 0x31, 0x3d, 0x13, 0x29, 0x80, 0xe8, 0x0d, 0xcf, 0x56, 0x39, 0x86, 0x50, 0x0c, 0xb3, 0x18, 0xf4, 0xc5, 0xca, 0xf2, 0x6f, 0xcd, 0x8d, 0xd5, 0x02, 0xb0,
- /* (2^390)P */ 0xbf, 0x39, 0x3f, 0xac, 0x6d, 0x1a, 0x6a, 0xe4, 0x42, 0x24, 0xd6, 0x41, 0x9d, 0xb9, 0x5b, 0x46, 0x73, 0x93, 0x76, 0xaa, 0xb7, 0x37, 0x36, 0xa6, 0x09, 0xe5, 0x04, 0x3b, 0x66, 0xc4, 0x29, 0x3e, 0x41, 0xc2, 0xcb, 0xe5, 0x17, 0xd7, 0x34, 0x67, 0x1d, 0x2c, 0x12, 0xec, 0x24, 0x7a, 0x40, 0xa2, 0x45, 0x41, 0xf0, 0x75, 0xed, 0x43, 0x30, 0xc9,
- /* (2^391)P */ 0x80, 0xf6, 0x47, 0x5b, 0xad, 0x54, 0x02, 0xbc, 0xdd, 0xa4, 0xb2, 0xd7, 0x42, 0x95, 0xf2, 0x0d, 0x1b, 0xef, 0x37, 0xa7, 0xb4, 0x34, 0x04, 0x08, 0x71, 0x1b, 0xd3, 0xdf, 0xa1, 0xf0, 0x2b, 0xfa, 0xc0, 0x1f, 0xf3, 0x44, 0xb5, 0xc6, 0x47, 0x3d, 0x65, 0x67, 0x45, 0x4d, 0x2f, 0xde, 0x52, 0x73, 0xfc, 0x30, 0x01, 0x6b, 0xc1, 0x03, 0xd8, 0xd7,
- /* (2^392)P */ 0x1c, 0x67, 0x55, 0x3e, 0x01, 0x17, 0x0f, 0x3e, 0xe5, 0x34, 0x58, 0xfc, 0xcb, 0x71, 0x24, 0x74, 0x5d, 0x36, 0x1e, 0x89, 0x2a, 0x63, 0xf8, 0xf8, 0x9f, 0x50, 0x9f, 0x32, 0x92, 0x29, 0xd8, 0x1a, 0xec, 0x76, 0x57, 0x6c, 0x67, 0x12, 0x6a, 0x6e, 0xef, 0x97, 0x1f, 0xc3, 0x77, 0x60, 0x3c, 0x22, 0xcb, 0xc7, 0x04, 0x1a, 0x89, 0x2d, 0x10, 0xa6,
- /* (2^393)P */ 0x12, 0xf5, 0xa9, 0x26, 0x16, 0xd9, 0x3c, 0x65, 0x5d, 0x83, 0xab, 0xd1, 0x70, 0x6b, 0x1c, 0xdb, 0xe7, 0x86, 0x0d, 0xfb, 0xe7, 0xf8, 0x2a, 0x58, 0x6e, 0x7a, 0x66, 0x13, 0x53, 0x3a, 0x6f, 0x8d, 0x43, 0x5f, 0x14, 0x23, 0x14, 0xff, 0x3d, 0x52, 0x7f, 0xee, 0xbd, 0x7a, 0x34, 0x8b, 0x35, 0x24, 0xc3, 0x7a, 0xdb, 0xcf, 0x22, 0x74, 0x9a, 0x8f,
- /* (2^394)P */ 0xdb, 0x20, 0xfc, 0xe5, 0x39, 0x4e, 0x7d, 0x78, 0xee, 0x0b, 0xbf, 0x1d, 0x80, 0xd4, 0x05, 0x4f, 0xb9, 0xd7, 0x4e, 0x94, 0x88, 0x9a, 0x50, 0x78, 0x1a, 0x70, 0x8c, 0xcc, 0x25, 0xb6, 0x61, 0x09, 0xdc, 0x7b, 0xea, 0x3f, 0x7f, 0xea, 0x2a, 0x0d, 0x47, 0x1c, 0x8e, 0xa6, 0x5b, 0xd2, 0xa3, 0x61, 0x93, 0x3c, 0x68, 0x9f, 0x8b, 0xea, 0xb0, 0xcb,
- /* (2^395)P */ 0xff, 0x54, 0x02, 0x19, 0xae, 0x8b, 0x4c, 0x2c, 0x3a, 0xe0, 0xe4, 0xac, 0x87, 0xf7, 0x51, 0x45, 0x41, 0x43, 0xdc, 0xaa, 0xcd, 0xcb, 0xdc, 0x40, 0xe3, 0x44, 0x3b, 0x1d, 0x9e, 0x3d, 0xb9, 0x82, 0xcc, 0x7a, 0xc5, 0x12, 0xf8, 0x1e, 0xdd, 0xdb, 0x8d, 0xb0, 0x2a, 0xe8, 0xe6, 0x6c, 0x94, 0x3b, 0xb7, 0x2d, 0xba, 0x79, 0x3b, 0xb5, 0x86, 0xfb,
- /* (2^396)P */ 0x82, 0x88, 0x13, 0xdd, 0x6c, 0xcd, 0x85, 0x2b, 0x90, 0x86, 0xb7, 0xac, 0x16, 0xa6, 0x6e, 0x6a, 0x94, 0xd8, 0x1e, 0x4e, 0x41, 0x0f, 0xce, 0x81, 0x6a, 0xa8, 0x26, 0x56, 0x43, 0x52, 0x52, 0xe6, 0xff, 0x88, 0xcf, 0x47, 0x05, 0x1d, 0xff, 0xf3, 0xa0, 0x10, 0xb2, 0x97, 0x87, 0xeb, 0x47, 0xbb, 0xfa, 0x1f, 0xe8, 0x4c, 0xce, 0xc4, 0xcd, 0x93,
- /* (2^397)P */ 0xf4, 0x11, 0xf5, 0x8d, 0x89, 0x29, 0x79, 0xb3, 0x59, 0x0b, 0x29, 0x7d, 0x9c, 0x12, 0x4a, 0x65, 0x72, 0x3a, 0xf9, 0xec, 0x37, 0x18, 0x86, 0xef, 0x44, 0x07, 0x25, 0x74, 0x76, 0x53, 0xed, 0x51, 0x01, 0xc6, 0x28, 0xc5, 0xc3, 0x4a, 0x0f, 0x99, 0xec, 0xc8, 0x40, 0x5a, 0x83, 0x30, 0x79, 0xa2, 0x3e, 0x63, 0x09, 0x2d, 0x6f, 0x23, 0x54, 0x1c,
- /* (2^398)P */ 0x5c, 0x6f, 0x3b, 0x1c, 0x30, 0x77, 0x7e, 0x87, 0x66, 0x83, 0x2e, 0x7e, 0x85, 0x50, 0xfd, 0xa0, 0x7a, 0xc2, 0xf5, 0x0f, 0xc1, 0x64, 0xe7, 0x0b, 0xbd, 0x59, 0xa7, 0xe7, 0x65, 0x53, 0xc3, 0xf5, 0x55, 0x5b, 0xe1, 0x82, 0x30, 0x5a, 0x61, 0xcd, 0xa0, 0x89, 0x32, 0xdb, 0x87, 0xfc, 0x21, 0x8a, 0xab, 0x6d, 0x82, 0xa8, 0x42, 0x81, 0x4f, 0xf2,
- /* (2^399)P */ 0xb3, 0xeb, 0x88, 0x18, 0xf6, 0x56, 0x96, 0xbf, 0xba, 0x5d, 0x71, 0xa1, 0x5a, 0xd1, 0x04, 0x7b, 0xd5, 0x46, 0x01, 0x74, 0xfe, 0x15, 0x25, 0xb7, 0xff, 0x0c, 0x24, 0x47, 0xac, 0xfd, 0xab, 0x47, 0x32, 0xe1, 0x6a, 0x4e, 0xca, 0xcf, 0x7f, 0xdd, 0xf8, 0xd2, 0x4b, 0x3b, 0xf5, 0x17, 0xba, 0xba, 0x8b, 0xa1, 0xec, 0x28, 0x3f, 0x97, 0xab, 0x2a,
- /* (2^400)P */ 0x51, 0x38, 0xc9, 0x5e, 0xc6, 0xb3, 0x64, 0xf2, 0x24, 0x4d, 0x04, 0x7d, 0xc8, 0x39, 0x0c, 0x4a, 0xc9, 0x73, 0x74, 0x1b, 0x5c, 0xb2, 0xc5, 0x41, 0x62, 0xa0, 0x4c, 0x6d, 0x8d, 0x91, 0x9a, 0x7b, 0x88, 0xab, 0x9c, 0x7e, 0x23, 0xdb, 0x6f, 0xb5, 0x72, 0xd6, 0x47, 0x40, 0xef, 0x22, 0x58, 0x62, 0x19, 0x6c, 0x38, 0xba, 0x5b, 0x00, 0x30, 0x9f,
- /* (2^401)P */ 0x65, 0xbb, 0x3b, 0x9b, 0xe9, 0xae, 0xbf, 0xbe, 0xe4, 0x13, 0x95, 0xf3, 0xe3, 0x77, 0xcb, 0xe4, 0x9a, 0x22, 0xb5, 0x4a, 0x08, 0x9d, 0xb3, 0x9e, 0x27, 0xe0, 0x15, 0x6c, 0x9f, 0x7e, 0x9a, 0x5e, 0x15, 0x45, 0x25, 0x8d, 0x01, 0x0a, 0xd2, 0x2b, 0xbd, 0x48, 0x06, 0x0d, 0x18, 0x97, 0x4b, 0xdc, 0xbc, 0xf0, 0xcd, 0xb2, 0x52, 0x3c, 0xac, 0xf5,
- /* (2^402)P */ 0x3e, 0xed, 0x47, 0x6b, 0x5c, 0xf6, 0x76, 0xd0, 0xe9, 0x15, 0xa3, 0xcb, 0x36, 0x00, 0x21, 0xa3, 0x79, 0x20, 0xa5, 0x3e, 0x88, 0x03, 0xcb, 0x7e, 0x63, 0xbb, 0xed, 0xa9, 0x13, 0x35, 0x16, 0xaf, 0x2e, 0xb4, 0x70, 0x14, 0x93, 0xfb, 0xc4, 0x9b, 0xd8, 0xb1, 0xbe, 0x43, 0xd1, 0x85, 0xb8, 0x97, 0xef, 0xea, 0x88, 0xa1, 0x25, 0x52, 0x62, 0x75,
- /* (2^403)P */ 0x8e, 0x4f, 0xaa, 0x23, 0x62, 0x7e, 0x2b, 0x37, 0x89, 0x00, 0x11, 0x30, 0xc5, 0x33, 0x4a, 0x89, 0x8a, 0xe2, 0xfc, 0x5c, 0x6a, 0x75, 0xe5, 0xf7, 0x02, 0x4a, 0x9b, 0xf7, 0xb5, 0x6a, 0x85, 0x31, 0xd3, 0x5a, 0xcf, 0xc3, 0xf8, 0xde, 0x2f, 0xcf, 0xb5, 0x24, 0xf4, 0xe3, 0xa1, 0xad, 0x42, 0xae, 0x09, 0xb9, 0x2e, 0x04, 0x2d, 0x01, 0x22, 0x3f,
- /* (2^404)P */ 0x41, 0x16, 0xfb, 0x7d, 0x50, 0xfd, 0xb5, 0xba, 0x88, 0x24, 0xba, 0xfd, 0x3d, 0xb2, 0x90, 0x15, 0xb7, 0xfa, 0xa2, 0xe1, 0x4c, 0x7d, 0xb9, 0xc6, 0xff, 0x81, 0x57, 0xb6, 0xc2, 0x9e, 0xcb, 0xc4, 0x35, 0xbd, 0x01, 0xb7, 0xaa, 0xce, 0xd0, 0xe9, 0xb5, 0xd6, 0x72, 0xbf, 0xd2, 0xee, 0xc7, 0xac, 0x94, 0xff, 0x29, 0x57, 0x02, 0x49, 0x09, 0xad,
- /* (2^405)P */ 0x27, 0xa5, 0x78, 0x1b, 0xbf, 0x6b, 0xaf, 0x0b, 0x8c, 0xd9, 0xa8, 0x37, 0xb0, 0x67, 0x18, 0xb6, 0xc7, 0x05, 0x8a, 0x67, 0x03, 0x30, 0x62, 0x6e, 0x56, 0x82, 0xa9, 0x54, 0x3e, 0x0c, 0x4e, 0x07, 0xe1, 0x5a, 0x38, 0xed, 0xfa, 0xc8, 0x55, 0x6b, 0x08, 0xa3, 0x6b, 0x64, 0x2a, 0x15, 0xd6, 0x39, 0x6f, 0x47, 0x99, 0x42, 0x3f, 0x33, 0x84, 0x8f,
- /* (2^406)P */ 0xbc, 0x45, 0x29, 0x81, 0x0e, 0xa4, 0xc5, 0x72, 0x3a, 0x10, 0xe1, 0xc4, 0x1e, 0xda, 0xc3, 0xfe, 0xb0, 0xce, 0xd2, 0x13, 0x34, 0x67, 0x21, 0xc6, 0x7e, 0xf9, 0x8c, 0xff, 0x39, 0x50, 0xae, 0x92, 0x60, 0x35, 0x2f, 0x8b, 0x6e, 0xc9, 0xc1, 0x27, 0x3a, 0x94, 0x66, 0x3e, 0x26, 0x84, 0x93, 0xc8, 0x6c, 0xcf, 0xd2, 0x03, 0xa1, 0x10, 0xcf, 0xb7,
- /* (2^407)P */ 0x64, 0xda, 0x19, 0xf6, 0xc5, 0x73, 0x17, 0x44, 0x88, 0x81, 0x07, 0x0d, 0x34, 0xb2, 0x75, 0xf9, 0xd9, 0xe2, 0xe0, 0x8b, 0x71, 0xcf, 0x72, 0x34, 0x83, 0xb4, 0xce, 0xfc, 0xd7, 0x29, 0x09, 0x5a, 0x98, 0xbf, 0x14, 0xac, 0x77, 0x55, 0x38, 0x47, 0x5b, 0x0f, 0x40, 0x24, 0xe5, 0xa5, 0xa6, 0xac, 0x2d, 0xa6, 0xff, 0x9c, 0x73, 0xfe, 0x5c, 0x7e,
- /* (2^408)P */ 0x1e, 0x33, 0xcc, 0x68, 0xb2, 0xbc, 0x8c, 0x93, 0xaf, 0xcc, 0x38, 0xf8, 0xd9, 0x16, 0x72, 0x50, 0xac, 0xd9, 0xb5, 0x0b, 0x9a, 0xbe, 0x46, 0x7a, 0xf1, 0xee, 0xf1, 0xad, 0xec, 0x5b, 0x59, 0x27, 0x9c, 0x05, 0xa3, 0x87, 0xe0, 0x37, 0x2c, 0x83, 0xce, 0xb3, 0x65, 0x09, 0x8e, 0xc3, 0x9c, 0xbf, 0x6a, 0xa2, 0x00, 0xcc, 0x12, 0x36, 0xc5, 0x95,
- /* (2^409)P */ 0x36, 0x11, 0x02, 0x14, 0x9c, 0x3c, 0xeb, 0x2f, 0x23, 0x5b, 0x6b, 0x2b, 0x08, 0x54, 0x53, 0xac, 0xb2, 0xa3, 0xe0, 0x26, 0x62, 0x3c, 0xe4, 0xe1, 0x81, 0xee, 0x13, 0x3e, 0xa4, 0x97, 0xef, 0xf9, 0x92, 0x27, 0x01, 0xce, 0x54, 0x8b, 0x3e, 0x31, 0xbe, 0xa7, 0x88, 0xcf, 0x47, 0x99, 0x3c, 0x10, 0x6f, 0x60, 0xb3, 0x06, 0x4e, 0xee, 0x1b, 0xf0,
- /* (2^410)P */ 0x59, 0x49, 0x66, 0xcf, 0x22, 0xe6, 0xf6, 0x73, 0xfe, 0xa3, 0x1c, 0x09, 0xfa, 0x5f, 0x65, 0xa8, 0xf0, 0x82, 0xc2, 0xef, 0x16, 0x63, 0x6e, 0x79, 0x69, 0x51, 0x39, 0x07, 0x65, 0xc4, 0x81, 0xec, 0x73, 0x0f, 0x15, 0x93, 0xe1, 0x30, 0x33, 0xe9, 0x37, 0x86, 0x42, 0x4c, 0x1f, 0x9b, 0xad, 0xee, 0x3f, 0xf1, 0x2a, 0x8e, 0x6a, 0xa3, 0xc8, 0x35,
- /* (2^411)P */ 0x1e, 0x49, 0xf1, 0xdd, 0xd2, 0x9c, 0x8e, 0x78, 0xb2, 0x06, 0xe4, 0x6a, 0xab, 0x3a, 0xdc, 0xcd, 0xf4, 0xeb, 0xe1, 0xe7, 0x2f, 0xaa, 0xeb, 0x40, 0x31, 0x9f, 0xb9, 0xab, 0x13, 0xa9, 0x78, 0xbf, 0x38, 0x89, 0x0e, 0x85, 0x14, 0x8b, 0x46, 0x76, 0x14, 0xda, 0xcf, 0x33, 0xc8, 0x79, 0xd3, 0xd5, 0xa3, 0x6a, 0x69, 0x45, 0x70, 0x34, 0xc3, 0xe9,
- /* (2^412)P */ 0x5e, 0xe7, 0x78, 0xe9, 0x24, 0xcc, 0xe9, 0xf4, 0xc8, 0x6b, 0xe0, 0xfb, 0x3a, 0xbe, 0xcc, 0x42, 0x4a, 0x00, 0x22, 0xf8, 0xe6, 0x32, 0xbe, 0x6d, 0x18, 0x55, 0x60, 0xe9, 0x72, 0x69, 0x50, 0x56, 0xca, 0x04, 0x18, 0x38, 0xa1, 0xee, 0xd8, 0x38, 0x3c, 0xa7, 0x70, 0xe2, 0xb9, 0x4c, 0xa0, 0xc8, 0x89, 0x72, 0xcf, 0x49, 0x7f, 0xdf, 0xbc, 0x67,
- /* (2^413)P */ 0x1d, 0x17, 0xcb, 0x0b, 0xbd, 0xb2, 0x36, 0xe3, 0xa8, 0x99, 0x31, 0xb6, 0x26, 0x9c, 0x0c, 0x74, 0xaf, 0x4d, 0x24, 0x61, 0xcf, 0x31, 0x7b, 0xed, 0xdd, 0xc3, 0xf6, 0x32, 0x70, 0xfe, 0x17, 0xf6, 0x51, 0x37, 0x65, 0xce, 0x5d, 0xaf, 0xa5, 0x2f, 0x2a, 0xfe, 0x00, 0x71, 0x7c, 0x50, 0xbe, 0x21, 0xc7, 0xed, 0xc6, 0xfc, 0x67, 0xcf, 0x9c, 0xdd,
- /* (2^414)P */ 0x26, 0x3e, 0xf8, 0xbb, 0xd0, 0xb1, 0x01, 0xd8, 0xeb, 0x0b, 0x62, 0x87, 0x35, 0x4c, 0xde, 0xca, 0x99, 0x9c, 0x6d, 0xf7, 0xb6, 0xf0, 0x57, 0x0a, 0x52, 0x29, 0x6a, 0x3f, 0x26, 0x31, 0x04, 0x07, 0x2a, 0xc9, 0xfa, 0x9b, 0x0e, 0x62, 0x8e, 0x72, 0xf2, 0xad, 0xce, 0xb6, 0x35, 0x7a, 0xc1, 0xae, 0x35, 0xc7, 0xa3, 0x14, 0xcf, 0x0c, 0x28, 0xb7,
- /* (2^415)P */ 0xa6, 0xf1, 0x32, 0x3a, 0x20, 0xd2, 0x24, 0x97, 0xcf, 0x5d, 0x37, 0x99, 0xaf, 0x33, 0x7a, 0x5b, 0x7a, 0xcc, 0x4e, 0x41, 0x38, 0xb1, 0x4e, 0xad, 0xc9, 0xd9, 0x71, 0x7e, 0xb2, 0xf5, 0xd5, 0x01, 0x6c, 0x4d, 0xfd, 0xa1, 0xda, 0x03, 0x38, 0x9b, 0x3d, 0x92, 0x92, 0xf2, 0xca, 0xbf, 0x1f, 0x24, 0xa4, 0xbb, 0x30, 0x6a, 0x74, 0x56, 0xc8, 0xce,
- /* (2^416)P */ 0x27, 0xf4, 0xed, 0xc9, 0xc3, 0xb1, 0x79, 0x85, 0xbe, 0xf6, 0xeb, 0xf3, 0x55, 0xc7, 0xaa, 0xa6, 0xe9, 0x07, 0x5d, 0xf4, 0xeb, 0xa6, 0x81, 0xe3, 0x0e, 0xcf, 0xa3, 0xc1, 0xef, 0xe7, 0x34, 0xb2, 0x03, 0x73, 0x8a, 0x91, 0xf1, 0xad, 0x05, 0xc7, 0x0b, 0x43, 0x99, 0x12, 0x31, 0xc8, 0xc7, 0xc5, 0xa4, 0x3d, 0xcd, 0xe5, 0x4e, 0x6d, 0x24, 0xdd,
- /* (2^417)P */ 0x61, 0x54, 0xd0, 0x95, 0x2c, 0x45, 0x75, 0xac, 0xb5, 0x1a, 0x9d, 0x11, 0xeb, 0xed, 0x6b, 0x57, 0xa3, 0xe6, 0xcd, 0x77, 0xd4, 0x83, 0x8e, 0x39, 0xf1, 0x0f, 0x98, 0xcb, 0x40, 0x02, 0x6e, 0x10, 0x82, 0x9e, 0xb4, 0x93, 0x76, 0xd7, 0x97, 0xa3, 0x53, 0x12, 0x86, 0xc6, 0x15, 0x78, 0x73, 0x93, 0xe7, 0x7f, 0xcf, 0x1f, 0xbf, 0xcd, 0xd2, 0x7a,
- /* (2^418)P */ 0xc2, 0x21, 0xdc, 0xd5, 0x69, 0xff, 0xca, 0x49, 0x3a, 0xe1, 0xc3, 0x69, 0x41, 0x56, 0xc1, 0x76, 0x63, 0x24, 0xbd, 0x64, 0x1b, 0x3d, 0x92, 0xf9, 0x13, 0x04, 0x25, 0xeb, 0x27, 0xa6, 0xef, 0x39, 0x3a, 0x80, 0xe0, 0xf8, 0x27, 0xee, 0xc9, 0x49, 0x77, 0xef, 0x3f, 0x29, 0x3d, 0x5e, 0xe6, 0x66, 0x83, 0xd1, 0xf6, 0xfe, 0x9d, 0xbc, 0xf1, 0x96,
- /* (2^419)P */ 0x6b, 0xc6, 0x99, 0x26, 0x3c, 0xf3, 0x63, 0xf9, 0xc7, 0x29, 0x8c, 0x52, 0x62, 0x2d, 0xdc, 0x8a, 0x66, 0xce, 0x2c, 0xa7, 0xe4, 0xf0, 0xd7, 0x37, 0x17, 0x1e, 0xe4, 0xa3, 0x53, 0x7b, 0x29, 0x8e, 0x60, 0x99, 0xf9, 0x0c, 0x7c, 0x6f, 0xa2, 0xcc, 0x9f, 0x80, 0xdd, 0x5e, 0x46, 0xaa, 0x0d, 0x6c, 0xc9, 0x6c, 0xf7, 0x78, 0x5b, 0x38, 0xe3, 0x24,
- /* (2^420)P */ 0x4b, 0x75, 0x6a, 0x2f, 0x08, 0xe1, 0x72, 0x76, 0xab, 0x82, 0x96, 0xdf, 0x3b, 0x1f, 0x9b, 0xd8, 0xed, 0xdb, 0xcd, 0x15, 0x09, 0x5a, 0x1e, 0xb7, 0xc5, 0x26, 0x72, 0x07, 0x0c, 0x50, 0xcd, 0x3b, 0x4d, 0x3f, 0xa2, 0x67, 0xc2, 0x02, 0x61, 0x2e, 0x68, 0xe9, 0x6f, 0xf0, 0x21, 0x2a, 0xa7, 0x3b, 0x88, 0x04, 0x11, 0x64, 0x49, 0x0d, 0xb4, 0x46,
- /* (2^421)P */ 0x63, 0x85, 0xf3, 0xc5, 0x2b, 0x5a, 0x9f, 0xf0, 0x17, 0xcb, 0x45, 0x0a, 0xf3, 0x6e, 0x7e, 0xb0, 0x7c, 0xbc, 0xf0, 0x4f, 0x3a, 0xb0, 0xbc, 0x36, 0x36, 0x52, 0x51, 0xcb, 0xfe, 0x9a, 0xcb, 0xe8, 0x7e, 0x4b, 0x06, 0x7f, 0xaa, 0x35, 0xc8, 0x0e, 0x7a, 0x30, 0xa3, 0xb1, 0x09, 0xbb, 0x86, 0x4c, 0xbe, 0xb8, 0xbd, 0xe0, 0x32, 0xa5, 0xd4, 0xf7,
- /* (2^422)P */ 0x7d, 0x50, 0x37, 0x68, 0x4e, 0x22, 0xb2, 0x2c, 0xd5, 0x0f, 0x2b, 0x6d, 0xb1, 0x51, 0xf2, 0x82, 0xe9, 0x98, 0x7c, 0x50, 0xc7, 0x96, 0x7e, 0x0e, 0xdc, 0xb1, 0x0e, 0xb2, 0x63, 0x8c, 0x30, 0x37, 0x72, 0x21, 0x9c, 0x61, 0xc2, 0xa7, 0x33, 0xd9, 0xb2, 0x63, 0x93, 0xd1, 0x6b, 0x6a, 0x73, 0xa5, 0x58, 0x80, 0xff, 0x04, 0xc7, 0x83, 0x21, 0x29,
- /* (2^423)P */ 0x29, 0x04, 0xbc, 0x99, 0x39, 0xc9, 0x58, 0xc9, 0x6b, 0x17, 0xe8, 0x90, 0xb3, 0xe6, 0xa9, 0xb6, 0x28, 0x9b, 0xcb, 0x3b, 0x28, 0x90, 0x68, 0x71, 0xff, 0xcf, 0x08, 0x78, 0xc9, 0x8d, 0xa8, 0x4e, 0x43, 0xd1, 0x1c, 0x9e, 0xa4, 0xe3, 0xdf, 0xbf, 0x92, 0xf4, 0xf9, 0x41, 0xba, 0x4d, 0x1c, 0xf9, 0xdd, 0x74, 0x76, 0x1c, 0x6e, 0x3e, 0x94, 0x87,
- /* (2^424)P */ 0xe4, 0xda, 0xc5, 0xd7, 0xfb, 0x87, 0xc5, 0x4d, 0x6b, 0x19, 0xaa, 0xb9, 0xbc, 0x8c, 0xf2, 0x8a, 0xd8, 0x5d, 0xdb, 0x4d, 0xef, 0xa6, 0xf2, 0x65, 0xf1, 0x22, 0x9c, 0xf1, 0x46, 0x30, 0x71, 0x7c, 0xe4, 0x53, 0x8e, 0x55, 0x2e, 0x9c, 0x9a, 0x31, 0x2a, 0xc3, 0xab, 0x0f, 0xde, 0xe4, 0xbe, 0xd8, 0x96, 0x50, 0x6e, 0x0c, 0x54, 0x49, 0xe6, 0xec,
- /* (2^425)P */ 0x3c, 0x1d, 0x5a, 0xa5, 0xda, 0xad, 0xdd, 0xc2, 0xae, 0xac, 0x6f, 0x86, 0x75, 0x31, 0x91, 0x64, 0x45, 0x9d, 0xa4, 0xf0, 0x81, 0xf1, 0x0e, 0xba, 0x74, 0xaf, 0x7b, 0xcd, 0x6f, 0xfe, 0xac, 0x4e, 0xdb, 0x4e, 0x45, 0x35, 0x36, 0xc5, 0xc0, 0x6c, 0x3d, 0x64, 0xf4, 0xd8, 0x07, 0x62, 0xd1, 0xec, 0xf3, 0xfc, 0x93, 0xc9, 0x28, 0x0c, 0x2c, 0xf3,
- /* (2^426)P */ 0x0c, 0x69, 0x2b, 0x5c, 0xb6, 0x41, 0x69, 0xf1, 0xa4, 0xf1, 0x5b, 0x75, 0x4c, 0x42, 0x8b, 0x47, 0xeb, 0x69, 0xfb, 0xa8, 0xe6, 0xf9, 0x7b, 0x48, 0x50, 0xaf, 0xd3, 0xda, 0xb2, 0x35, 0x10, 0xb5, 0x5b, 0x40, 0x90, 0x39, 0xc9, 0x07, 0x06, 0x73, 0x26, 0x20, 0x95, 0x01, 0xa4, 0x2d, 0xf0, 0xe7, 0x2e, 0x00, 0x7d, 0x41, 0x09, 0x68, 0x13, 0xc4,
- /* (2^427)P */ 0xbe, 0x38, 0x78, 0xcf, 0xc9, 0x4f, 0x36, 0xca, 0x09, 0x61, 0x31, 0x3c, 0x57, 0x2e, 0xec, 0x17, 0xa4, 0x7d, 0x19, 0x2b, 0x9b, 0x5b, 0xbe, 0x8f, 0xd6, 0xc5, 0x2f, 0x86, 0xf2, 0x64, 0x76, 0x17, 0x00, 0x6e, 0x1a, 0x8c, 0x67, 0x1b, 0x68, 0xeb, 0x15, 0xa2, 0xd6, 0x09, 0x91, 0xdd, 0x23, 0x0d, 0x98, 0xb2, 0x10, 0x19, 0x55, 0x9b, 0x63, 0xf2,
- /* (2^428)P */ 0x51, 0x1f, 0x93, 0xea, 0x2a, 0x3a, 0xfa, 0x41, 0xc0, 0x57, 0xfb, 0x74, 0xa6, 0x65, 0x09, 0x56, 0x14, 0xb6, 0x12, 0xaa, 0xb3, 0x1a, 0x8d, 0x3b, 0x76, 0x91, 0x7a, 0x23, 0x56, 0x9c, 0x6a, 0xc0, 0xe0, 0x3c, 0x3f, 0xb5, 0x1a, 0xf4, 0x57, 0x71, 0x93, 0x2b, 0xb1, 0xa7, 0x70, 0x57, 0x22, 0x80, 0xf5, 0xb8, 0x07, 0x77, 0x87, 0x0c, 0xbe, 0x83,
- /* (2^429)P */ 0x07, 0x9b, 0x0e, 0x52, 0x38, 0x63, 0x13, 0x86, 0x6a, 0xa6, 0xb4, 0xd2, 0x60, 0x68, 0x9a, 0x99, 0x82, 0x0a, 0x04, 0x5f, 0x89, 0x7a, 0x1a, 0x2a, 0xae, 0x2d, 0x35, 0x0c, 0x1e, 0xad, 0xef, 0x4f, 0x9a, 0xfc, 0xc8, 0xd9, 0xcf, 0x9d, 0x48, 0x71, 0xa5, 0x55, 0x79, 0x73, 0x39, 0x1b, 0xd8, 0x73, 0xec, 0x9b, 0x03, 0x16, 0xd8, 0x82, 0xf7, 0x67,
- /* (2^430)P */ 0x52, 0x67, 0x42, 0x21, 0xc9, 0x40, 0x78, 0x82, 0x2b, 0x95, 0x2d, 0x20, 0x92, 0xd1, 0xe2, 0x61, 0x25, 0xb0, 0xc6, 0x9c, 0x20, 0x59, 0x8e, 0x28, 0x6f, 0xf3, 0xfd, 0xd3, 0xc1, 0x32, 0x43, 0xc9, 0xa6, 0x08, 0x7a, 0x77, 0x9c, 0x4c, 0x8c, 0x33, 0x71, 0x13, 0x69, 0xe3, 0x52, 0x30, 0xa7, 0xf5, 0x07, 0x67, 0xac, 0xad, 0x46, 0x8a, 0x26, 0x25,
- /* (2^431)P */ 0xda, 0x86, 0xc4, 0xa2, 0x71, 0x56, 0xdd, 0xd2, 0x48, 0xd3, 0xde, 0x42, 0x63, 0x01, 0xa7, 0x2c, 0x92, 0x83, 0x6f, 0x2e, 0xd8, 0x1e, 0x3f, 0xc1, 0xc5, 0x42, 0x4e, 0x34, 0x19, 0x54, 0x6e, 0x35, 0x2c, 0x51, 0x2e, 0xfd, 0x0f, 0x9a, 0x45, 0x66, 0x5e, 0x4a, 0x83, 0xda, 0x0a, 0x53, 0x68, 0x63, 0xfa, 0xce, 0x47, 0x20, 0xd3, 0x34, 0xba, 0x0d,
- /* (2^432)P */ 0xd0, 0xe9, 0x64, 0xa4, 0x61, 0x4b, 0x86, 0xe5, 0x93, 0x6f, 0xda, 0x0e, 0x31, 0x7e, 0x6e, 0xe3, 0xc6, 0x73, 0xd8, 0xa3, 0x08, 0x57, 0x52, 0xcd, 0x51, 0x63, 0x1d, 0x9f, 0x93, 0x00, 0x62, 0x91, 0x26, 0x21, 0xa7, 0xdd, 0x25, 0x0f, 0x09, 0x0d, 0x35, 0xad, 0xcf, 0x11, 0x8e, 0x6e, 0xe8, 0xae, 0x1d, 0x95, 0xcb, 0x88, 0xf8, 0x70, 0x7b, 0x91,
- /* (2^433)P */ 0x0c, 0x19, 0x5c, 0xd9, 0x8d, 0xda, 0x9d, 0x2c, 0x90, 0x54, 0x65, 0xe8, 0xb6, 0x35, 0x50, 0xae, 0xea, 0xae, 0x43, 0xb7, 0x1e, 0x99, 0x8b, 0x4c, 0x36, 0x4e, 0xe4, 0x1e, 0xc4, 0x64, 0x43, 0xb6, 0xeb, 0xd4, 0xe9, 0x60, 0x22, 0xee, 0xcf, 0xb8, 0x52, 0x1b, 0xf0, 0x04, 0xce, 0xbc, 0x2b, 0xf0, 0xbe, 0xcd, 0x44, 0x74, 0x1e, 0x1f, 0x63, 0xf9,
- /* (2^434)P */ 0xe1, 0x3f, 0x95, 0x94, 0xb2, 0xb6, 0x31, 0xa9, 0x1b, 0xdb, 0xfd, 0x0e, 0xdb, 0xdd, 0x1a, 0x22, 0x78, 0x60, 0x9f, 0x75, 0x5f, 0x93, 0x06, 0x0c, 0xd8, 0xbb, 0xa2, 0x85, 0x2b, 0x5e, 0xc0, 0x9b, 0xa8, 0x5d, 0xaf, 0x93, 0x91, 0x91, 0x47, 0x41, 0x1a, 0xfc, 0xb4, 0x51, 0x85, 0xad, 0x69, 0x4d, 0x73, 0x69, 0xd5, 0x4e, 0x82, 0xfb, 0x66, 0xcb,
- /* (2^435)P */ 0x7c, 0xbe, 0xc7, 0x51, 0xc4, 0x74, 0x6e, 0xab, 0xfd, 0x41, 0x4f, 0x76, 0x4f, 0x24, 0x03, 0xd6, 0x2a, 0xb7, 0x42, 0xb4, 0xda, 0x41, 0x2c, 0x82, 0x48, 0x4c, 0x7f, 0x6f, 0x25, 0x5d, 0x36, 0xd4, 0x69, 0xf5, 0xef, 0x02, 0x81, 0xea, 0x6f, 0x19, 0x69, 0xe8, 0x6f, 0x5b, 0x2f, 0x14, 0x0e, 0x6f, 0x89, 0xb4, 0xb5, 0xd8, 0xae, 0xef, 0x7b, 0x87,
- /* (2^436)P */ 0xe9, 0x91, 0xa0, 0x8b, 0xc9, 0xe0, 0x01, 0x90, 0x37, 0xc1, 0x6f, 0xdc, 0x5e, 0xf7, 0xbf, 0x43, 0x00, 0xaa, 0x10, 0x76, 0x76, 0x18, 0x6e, 0x19, 0x1e, 0x94, 0x50, 0x11, 0x0a, 0xd1, 0xe2, 0xdb, 0x08, 0x21, 0xa0, 0x1f, 0xdb, 0x54, 0xfe, 0xea, 0x6e, 0xa3, 0x68, 0x56, 0x87, 0x0b, 0x22, 0x4e, 0x66, 0xf3, 0x82, 0x82, 0x00, 0xcd, 0xd4, 0x12,
- /* (2^437)P */ 0x25, 0x8e, 0x24, 0x77, 0x64, 0x4c, 0xe0, 0xf8, 0x18, 0xc0, 0xdc, 0xc7, 0x1b, 0x35, 0x65, 0xde, 0x67, 0x41, 0x5e, 0x6f, 0x90, 0x82, 0xa7, 0x2e, 0x6d, 0xf1, 0x47, 0xb4, 0x92, 0x9c, 0xfd, 0x6a, 0x9a, 0x41, 0x36, 0x20, 0x24, 0x58, 0xc3, 0x59, 0x07, 0x9a, 0xfa, 0x9f, 0x03, 0xcb, 0xc7, 0x69, 0x37, 0x60, 0xe1, 0xab, 0x13, 0x72, 0xee, 0xa2,
- /* (2^438)P */ 0x74, 0x78, 0xfb, 0x13, 0xcb, 0x8e, 0x37, 0x1a, 0xf6, 0x1d, 0x17, 0x83, 0x06, 0xd4, 0x27, 0x06, 0x21, 0xe8, 0xda, 0xdf, 0x6b, 0xf3, 0x83, 0x6b, 0x34, 0x8a, 0x8c, 0xee, 0x01, 0x05, 0x5b, 0xed, 0xd3, 0x1b, 0xc9, 0x64, 0x83, 0xc9, 0x49, 0xc2, 0x57, 0x1b, 0xdd, 0xcf, 0xf1, 0x9d, 0x63, 0xee, 0x1c, 0x0d, 0xa0, 0x0a, 0x73, 0x1f, 0x5b, 0x32,
- /* (2^439)P */ 0x29, 0xce, 0x1e, 0xc0, 0x6a, 0xf5, 0xeb, 0x99, 0x5a, 0x39, 0x23, 0xe9, 0xdd, 0xac, 0x44, 0x88, 0xbc, 0x80, 0x22, 0xde, 0x2c, 0xcb, 0xa8, 0x3b, 0xff, 0xf7, 0x6f, 0xc7, 0x71, 0x72, 0xa8, 0xa3, 0xf6, 0x4d, 0xc6, 0x75, 0xda, 0x80, 0xdc, 0xd9, 0x30, 0xd9, 0x07, 0x50, 0x5a, 0x54, 0x7d, 0xda, 0x39, 0x6f, 0x78, 0x94, 0xbf, 0x25, 0x98, 0xdc,
- /* (2^440)P */ 0x01, 0x26, 0x62, 0x44, 0xfb, 0x0f, 0x11, 0x72, 0x73, 0x0a, 0x16, 0xc7, 0x16, 0x9c, 0x9b, 0x37, 0xd8, 0xff, 0x4f, 0xfe, 0x57, 0xdb, 0xae, 0xef, 0x7d, 0x94, 0x30, 0x04, 0x70, 0x83, 0xde, 0x3c, 0xd4, 0xb5, 0x70, 0xda, 0xa7, 0x55, 0xc8, 0x19, 0xe1, 0x36, 0x15, 0x61, 0xe7, 0x3b, 0x7d, 0x85, 0xbb, 0xf3, 0x42, 0x5a, 0x94, 0xf4, 0x53, 0x2a,
- /* (2^441)P */ 0x14, 0x60, 0xa6, 0x0b, 0x83, 0xe1, 0x23, 0x77, 0xc0, 0xce, 0x50, 0xed, 0x35, 0x8d, 0x98, 0x99, 0x7d, 0xf5, 0x8d, 0xce, 0x94, 0x25, 0xc8, 0x0f, 0x6d, 0xfa, 0x4a, 0xa4, 0x3a, 0x1f, 0x66, 0xfb, 0x5a, 0x64, 0xaf, 0x8b, 0x54, 0x54, 0x44, 0x3f, 0x5b, 0x88, 0x61, 0xe4, 0x48, 0x45, 0x26, 0x20, 0xbe, 0x0d, 0x06, 0xbb, 0x65, 0x59, 0xe1, 0x36,
- /* (2^442)P */ 0xb7, 0x98, 0xce, 0xa3, 0xe3, 0xee, 0x11, 0x1b, 0x9e, 0x24, 0x59, 0x75, 0x31, 0x37, 0x44, 0x6f, 0x6b, 0x9e, 0xec, 0xb7, 0x44, 0x01, 0x7e, 0xab, 0xbb, 0x69, 0x5d, 0x11, 0xb0, 0x30, 0x64, 0xea, 0x91, 0xb4, 0x7a, 0x8c, 0x02, 0x4c, 0xb9, 0x10, 0xa7, 0xc7, 0x79, 0xe6, 0xdc, 0x77, 0xe3, 0xc8, 0xef, 0x3e, 0xf9, 0x38, 0x81, 0xce, 0x9a, 0xb2,
- /* (2^443)P */ 0x91, 0x12, 0x76, 0xd0, 0x10, 0xb4, 0xaf, 0xe1, 0x89, 0x3a, 0x93, 0x6b, 0x5c, 0x19, 0x5f, 0x24, 0xed, 0x04, 0x92, 0xc7, 0xf0, 0x00, 0x08, 0xc1, 0x92, 0xff, 0x90, 0xdb, 0xb2, 0xbf, 0xdf, 0x49, 0xcd, 0xbd, 0x5c, 0x6e, 0xbf, 0x16, 0xbb, 0x61, 0xf9, 0x20, 0x33, 0x35, 0x93, 0x11, 0xbc, 0x59, 0x69, 0xce, 0x18, 0x9f, 0xf8, 0x7b, 0xa1, 0x6e,
- /* (2^444)P */ 0xa1, 0xf4, 0xaf, 0xad, 0xf8, 0xe6, 0x99, 0xd2, 0xa1, 0x4d, 0xde, 0x56, 0xc9, 0x7b, 0x0b, 0x11, 0x3e, 0xbf, 0x89, 0x1a, 0x9a, 0x90, 0xe5, 0xe2, 0xa6, 0x37, 0x88, 0xa1, 0x68, 0x59, 0xae, 0x8c, 0xec, 0x02, 0x14, 0x8d, 0xb7, 0x2e, 0x25, 0x75, 0x7f, 0x76, 0x1a, 0xd3, 0x4d, 0xad, 0x8a, 0x00, 0x6c, 0x96, 0x49, 0xa4, 0xc3, 0x2e, 0x5c, 0x7b,
- /* (2^445)P */ 0x26, 0x53, 0xf7, 0xda, 0xa8, 0x01, 0x14, 0xb1, 0x63, 0xe3, 0xc3, 0x89, 0x88, 0xb0, 0x85, 0x40, 0x2b, 0x26, 0x9a, 0x10, 0x1a, 0x70, 0x33, 0xf4, 0x50, 0x9d, 0x4d, 0xd8, 0x64, 0xc6, 0x0f, 0xe1, 0x17, 0xc8, 0x10, 0x4b, 0xfc, 0xa0, 0xc9, 0xba, 0x2c, 0x98, 0x09, 0xf5, 0x84, 0xb6, 0x7c, 0x4e, 0xa3, 0xe3, 0x81, 0x1b, 0x32, 0x60, 0x02, 0xdd,
- /* (2^446)P */ 0xa3, 0xe5, 0x86, 0xd4, 0x43, 0xa8, 0xd1, 0x98, 0x9d, 0x9d, 0xdb, 0x04, 0xcf, 0x6e, 0x35, 0x05, 0x30, 0x53, 0x3b, 0xbc, 0x90, 0x00, 0x4a, 0xc5, 0x40, 0x2a, 0x0f, 0xde, 0x1a, 0xd7, 0x36, 0x27, 0x44, 0x62, 0xa6, 0xac, 0x9d, 0xd2, 0x70, 0x69, 0x14, 0x39, 0x9b, 0xd1, 0xc3, 0x0a, 0x3a, 0x82, 0x0e, 0xf1, 0x94, 0xd7, 0x42, 0x94, 0xd5, 0x7d,
- /* (2^447)P */ 0x04, 0xc0, 0x6e, 0x12, 0x90, 0x70, 0xf9, 0xdf, 0xf7, 0xc9, 0x86, 0xc0, 0xe6, 0x92, 0x8b, 0x0a, 0xa1, 0xc1, 0x3b, 0xcc, 0x33, 0xb7, 0xf0, 0xeb, 0x51, 0x50, 0x80, 0x20, 0x69, 0x1c, 0x4f, 0x89, 0x05, 0x1e, 0xe4, 0x7a, 0x0a, 0xc2, 0xf0, 0xf5, 0x78, 0x91, 0x76, 0x34, 0x45, 0xdc, 0x24, 0x53, 0x24, 0x98, 0xe2, 0x73, 0x6f, 0xe6, 0x46, 0x67,
-}
diff --git a/vendor/github.com/cloudflare/circl/ecc/goldilocks/constants.go b/vendor/github.com/cloudflare/circl/ecc/goldilocks/constants.go
deleted file mode 100644
index b6b236e5d3d..00000000000
--- a/vendor/github.com/cloudflare/circl/ecc/goldilocks/constants.go
+++ /dev/null
@@ -1,71 +0,0 @@
-package goldilocks
-
-import fp "github.com/cloudflare/circl/math/fp448"
-
-var (
- // genX is the x-coordinate of the generator of Goldilocks curve.
- genX = fp.Elt{
- 0x5e, 0xc0, 0x0c, 0xc7, 0x2b, 0xa8, 0x26, 0x26,
- 0x8e, 0x93, 0x00, 0x8b, 0xe1, 0x80, 0x3b, 0x43,
- 0x11, 0x65, 0xb6, 0x2a, 0xf7, 0x1a, 0xae, 0x12,
- 0x64, 0xa4, 0xd3, 0xa3, 0x24, 0xe3, 0x6d, 0xea,
- 0x67, 0x17, 0x0f, 0x47, 0x70, 0x65, 0x14, 0x9e,
- 0xda, 0x36, 0xbf, 0x22, 0xa6, 0x15, 0x1d, 0x22,
- 0xed, 0x0d, 0xed, 0x6b, 0xc6, 0x70, 0x19, 0x4f,
- }
- // genY is the y-coordinate of the generator of Goldilocks curve.
- genY = fp.Elt{
- 0x14, 0xfa, 0x30, 0xf2, 0x5b, 0x79, 0x08, 0x98,
- 0xad, 0xc8, 0xd7, 0x4e, 0x2c, 0x13, 0xbd, 0xfd,
- 0xc4, 0x39, 0x7c, 0xe6, 0x1c, 0xff, 0xd3, 0x3a,
- 0xd7, 0xc2, 0xa0, 0x05, 0x1e, 0x9c, 0x78, 0x87,
- 0x40, 0x98, 0xa3, 0x6c, 0x73, 0x73, 0xea, 0x4b,
- 0x62, 0xc7, 0xc9, 0x56, 0x37, 0x20, 0x76, 0x88,
- 0x24, 0xbc, 0xb6, 0x6e, 0x71, 0x46, 0x3f, 0x69,
- }
- // paramD is -39081 in Fp.
- paramD = fp.Elt{
- 0x56, 0x67, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- }
- // order is 2^446-0x8335dc163bb124b65129c96fde933d8d723a70aadc873d6d54a7bb0d,
- // which is the number of points in the prime subgroup.
- order = Scalar{
- 0xf3, 0x44, 0x58, 0xab, 0x92, 0xc2, 0x78, 0x23,
- 0x55, 0x8f, 0xc5, 0x8d, 0x72, 0xc2, 0x6c, 0x21,
- 0x90, 0x36, 0xd6, 0xae, 0x49, 0xdb, 0x4e, 0xc4,
- 0xe9, 0x23, 0xca, 0x7c, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f,
- }
- // residue448 is 2^448 mod order.
- residue448 = [4]uint64{
- 0x721cf5b5529eec34, 0x7a4cf635c8e9c2ab, 0xeec492d944a725bf, 0x20cd77058,
- }
- // invFour is 1/4 mod order.
- invFour = Scalar{
- 0x3d, 0x11, 0xd6, 0xaa, 0xa4, 0x30, 0xde, 0x48,
- 0xd5, 0x63, 0x71, 0xa3, 0x9c, 0x30, 0x5b, 0x08,
- 0xa4, 0x8d, 0xb5, 0x6b, 0xd2, 0xb6, 0x13, 0x71,
- 0xfa, 0x88, 0x32, 0xdf, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x0f,
- }
- // paramDTwist is -39082 in Fp. The D parameter of the twist curve.
- paramDTwist = fp.Elt{
- 0x55, 0x67, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- }
-)
diff --git a/vendor/github.com/cloudflare/circl/ecc/goldilocks/curve.go b/vendor/github.com/cloudflare/circl/ecc/goldilocks/curve.go
deleted file mode 100644
index 5a939100d2c..00000000000
--- a/vendor/github.com/cloudflare/circl/ecc/goldilocks/curve.go
+++ /dev/null
@@ -1,80 +0,0 @@
-// Package goldilocks provides elliptic curve operations over the goldilocks curve.
-package goldilocks
-
-import fp "github.com/cloudflare/circl/math/fp448"
-
-// Curve is the Goldilocks curve x^2+y^2=z^2-39081x^2y^2.
-type Curve struct{}
-
-// Identity returns the identity point.
-func (Curve) Identity() *Point {
- return &Point{
- y: fp.One(),
- z: fp.One(),
- }
-}
-
-// IsOnCurve returns true if the point lies on the curve.
-func (Curve) IsOnCurve(P *Point) bool {
- x2, y2, t, t2, z2 := &fp.Elt{}, &fp.Elt{}, &fp.Elt{}, &fp.Elt{}, &fp.Elt{}
- rhs, lhs := &fp.Elt{}, &fp.Elt{}
- fp.Mul(t, &P.ta, &P.tb) // t = ta*tb
- fp.Sqr(x2, &P.x) // x^2
- fp.Sqr(y2, &P.y) // y^2
- fp.Sqr(z2, &P.z) // z^2
- fp.Sqr(t2, t) // t^2
- fp.Add(lhs, x2, y2) // x^2 + y^2
- fp.Mul(rhs, t2, ¶mD) // dt^2
- fp.Add(rhs, rhs, z2) // z^2 + dt^2
- fp.Sub(lhs, lhs, rhs) // x^2 + y^2 - (z^2 + dt^2)
- eq0 := fp.IsZero(lhs)
-
- fp.Mul(lhs, &P.x, &P.y) // xy
- fp.Mul(rhs, t, &P.z) // tz
- fp.Sub(lhs, lhs, rhs) // xy - tz
- eq1 := fp.IsZero(lhs)
- return eq0 && eq1
-}
-
-// Generator returns the generator point.
-func (Curve) Generator() *Point {
- return &Point{
- x: genX,
- y: genY,
- z: fp.One(),
- ta: genX,
- tb: genY,
- }
-}
-
-// Order returns the number of points in the prime subgroup.
-func (Curve) Order() Scalar { return order }
-
-// Double returns 2P.
-func (Curve) Double(P *Point) *Point { R := *P; R.Double(); return &R }
-
-// Add returns P+Q.
-func (Curve) Add(P, Q *Point) *Point { R := *P; R.Add(Q); return &R }
-
-// ScalarMult returns kP. This function runs in constant time.
-func (e Curve) ScalarMult(k *Scalar, P *Point) *Point {
- k4 := &Scalar{}
- k4.divBy4(k)
- return e.pull(twistCurve{}.ScalarMult(k4, e.push(P)))
-}
-
-// ScalarBaseMult returns kG where G is the generator point. This function runs in constant time.
-func (e Curve) ScalarBaseMult(k *Scalar) *Point {
- k4 := &Scalar{}
- k4.divBy4(k)
- return e.pull(twistCurve{}.ScalarBaseMult(k4))
-}
-
-// CombinedMult returns mG+nP, where G is the generator point. This function is non-constant time.
-func (e Curve) CombinedMult(m, n *Scalar, P *Point) *Point {
- m4 := &Scalar{}
- n4 := &Scalar{}
- m4.divBy4(m)
- n4.divBy4(n)
- return e.pull(twistCurve{}.CombinedMult(m4, n4, twistCurve{}.pull(P)))
-}
diff --git a/vendor/github.com/cloudflare/circl/ecc/goldilocks/isogeny.go b/vendor/github.com/cloudflare/circl/ecc/goldilocks/isogeny.go
deleted file mode 100644
index b1daab851c5..00000000000
--- a/vendor/github.com/cloudflare/circl/ecc/goldilocks/isogeny.go
+++ /dev/null
@@ -1,52 +0,0 @@
-package goldilocks
-
-import fp "github.com/cloudflare/circl/math/fp448"
-
-func (Curve) pull(P *twistPoint) *Point { return twistCurve{}.push(P) }
-func (twistCurve) pull(P *Point) *twistPoint { return Curve{}.push(P) }
-
-// push sends a point on the Goldilocks curve to a point on the twist curve.
-func (Curve) push(P *Point) *twistPoint {
- Q := &twistPoint{}
- Px, Py, Pz := &P.x, &P.y, &P.z
- a, b, c, d, e, f, g, h := &Q.x, &Q.y, &Q.z, &fp.Elt{}, &Q.ta, &Q.x, &Q.y, &Q.tb
- fp.Add(e, Px, Py) // x+y
- fp.Sqr(a, Px) // A = x^2
- fp.Sqr(b, Py) // B = y^2
- fp.Sqr(c, Pz) // z^2
- fp.Add(c, c, c) // C = 2*z^2
- *d = *a // D = A
- fp.Sqr(e, e) // (x+y)^2
- fp.Sub(e, e, a) // (x+y)^2-A
- fp.Sub(e, e, b) // E = (x+y)^2-A-B
- fp.Add(h, b, d) // H = B+D
- fp.Sub(g, b, d) // G = B-D
- fp.Sub(f, c, h) // F = C-H
- fp.Mul(&Q.z, f, g) // Z = F * G
- fp.Mul(&Q.x, e, f) // X = E * F
- fp.Mul(&Q.y, g, h) // Y = G * H, // T = E * H
- return Q
-}
-
-// push sends a point on the twist curve to a point on the Goldilocks curve.
-func (twistCurve) push(P *twistPoint) *Point {
- Q := &Point{}
- Px, Py, Pz := &P.x, &P.y, &P.z
- a, b, c, d, e, f, g, h := &Q.x, &Q.y, &Q.z, &fp.Elt{}, &Q.ta, &Q.x, &Q.y, &Q.tb
- fp.Add(e, Px, Py) // x+y
- fp.Sqr(a, Px) // A = x^2
- fp.Sqr(b, Py) // B = y^2
- fp.Sqr(c, Pz) // z^2
- fp.Add(c, c, c) // C = 2*z^2
- fp.Neg(d, a) // D = -A
- fp.Sqr(e, e) // (x+y)^2
- fp.Sub(e, e, a) // (x+y)^2-A
- fp.Sub(e, e, b) // E = (x+y)^2-A-B
- fp.Add(h, b, d) // H = B+D
- fp.Sub(g, b, d) // G = B-D
- fp.Sub(f, c, h) // F = C-H
- fp.Mul(&Q.z, f, g) // Z = F * G
- fp.Mul(&Q.x, e, f) // X = E * F
- fp.Mul(&Q.y, g, h) // Y = G * H, // T = E * H
- return Q
-}
diff --git a/vendor/github.com/cloudflare/circl/ecc/goldilocks/point.go b/vendor/github.com/cloudflare/circl/ecc/goldilocks/point.go
deleted file mode 100644
index 11f73de0542..00000000000
--- a/vendor/github.com/cloudflare/circl/ecc/goldilocks/point.go
+++ /dev/null
@@ -1,171 +0,0 @@
-package goldilocks
-
-import (
- "errors"
- "fmt"
-
- fp "github.com/cloudflare/circl/math/fp448"
-)
-
-// Point is a point on the Goldilocks Curve.
-type Point struct{ x, y, z, ta, tb fp.Elt }
-
-func (P Point) String() string {
- return fmt.Sprintf("x: %v\ny: %v\nz: %v\nta: %v\ntb: %v", P.x, P.y, P.z, P.ta, P.tb)
-}
-
-// FromAffine creates a point from affine coordinates.
-func FromAffine(x, y *fp.Elt) (*Point, error) {
- P := &Point{
- x: *x,
- y: *y,
- z: fp.One(),
- ta: *x,
- tb: *y,
- }
- if !(Curve{}).IsOnCurve(P) {
- return P, errors.New("point not on curve")
- }
- return P, nil
-}
-
-// isLessThan returns true if 0 <= x < y, and assumes that slices are of the
-// same length and are interpreted in little-endian order.
-func isLessThan(x, y []byte) bool {
- i := len(x) - 1
- for i > 0 && x[i] == y[i] {
- i--
- }
- return x[i] < y[i]
-}
-
-// FromBytes returns a point from the input buffer.
-func FromBytes(in []byte) (*Point, error) {
- if len(in) < fp.Size+1 {
- return nil, errors.New("wrong input length")
- }
- err := errors.New("invalid decoding")
- P := &Point{}
- signX := in[fp.Size] >> 7
- copy(P.y[:], in[:fp.Size])
- p := fp.P()
- if !isLessThan(P.y[:], p[:]) {
- return nil, err
- }
-
- u, v := &fp.Elt{}, &fp.Elt{}
- one := fp.One()
- fp.Sqr(u, &P.y) // u = y^2
- fp.Mul(v, u, ¶mD) // v = dy^2
- fp.Sub(u, u, &one) // u = y^2-1
- fp.Sub(v, v, &one) // v = dy^2-1
- isQR := fp.InvSqrt(&P.x, u, v) // x = sqrt(u/v)
- if !isQR {
- return nil, err
- }
- fp.Modp(&P.x) // x = x mod p
- if fp.IsZero(&P.x) && signX == 1 {
- return nil, err
- }
- if signX != (P.x[0] & 1) {
- fp.Neg(&P.x, &P.x)
- }
- P.ta = P.x
- P.tb = P.y
- P.z = fp.One()
- return P, nil
-}
-
-// IsIdentity returns true is P is the identity Point.
-func (P *Point) IsIdentity() bool {
- return fp.IsZero(&P.x) && !fp.IsZero(&P.y) && !fp.IsZero(&P.z) && P.y == P.z
-}
-
-// IsEqual returns true if P is equivalent to Q.
-func (P *Point) IsEqual(Q *Point) bool {
- l, r := &fp.Elt{}, &fp.Elt{}
- fp.Mul(l, &P.x, &Q.z)
- fp.Mul(r, &Q.x, &P.z)
- fp.Sub(l, l, r)
- b := fp.IsZero(l)
- fp.Mul(l, &P.y, &Q.z)
- fp.Mul(r, &Q.y, &P.z)
- fp.Sub(l, l, r)
- b = b && fp.IsZero(l)
- fp.Mul(l, &P.ta, &P.tb)
- fp.Mul(l, l, &Q.z)
- fp.Mul(r, &Q.ta, &Q.tb)
- fp.Mul(r, r, &P.z)
- fp.Sub(l, l, r)
- b = b && fp.IsZero(l)
- return b
-}
-
-// Neg obtains the inverse of the Point.
-func (P *Point) Neg() { fp.Neg(&P.x, &P.x); fp.Neg(&P.ta, &P.ta) }
-
-// ToAffine returns the x,y affine coordinates of P.
-func (P *Point) ToAffine() (x, y fp.Elt) {
- fp.Inv(&P.z, &P.z) // 1/z
- fp.Mul(&P.x, &P.x, &P.z) // x/z
- fp.Mul(&P.y, &P.y, &P.z) // y/z
- fp.Modp(&P.x)
- fp.Modp(&P.y)
- fp.SetOne(&P.z)
- P.ta = P.x
- P.tb = P.y
- return P.x, P.y
-}
-
-// ToBytes stores P into a slice of bytes.
-func (P *Point) ToBytes(out []byte) error {
- if len(out) < fp.Size+1 {
- return errors.New("invalid decoding")
- }
- x, y := P.ToAffine()
- out[fp.Size] = (x[0] & 1) << 7
- return fp.ToBytes(out[:fp.Size], &y)
-}
-
-// MarshalBinary encodes the receiver into a binary form and returns the result.
-func (P *Point) MarshalBinary() (data []byte, err error) {
- data = make([]byte, fp.Size+1)
- err = P.ToBytes(data[:fp.Size+1])
- return data, err
-}
-
-// UnmarshalBinary must be able to decode the form generated by MarshalBinary.
-func (P *Point) UnmarshalBinary(data []byte) error { Q, err := FromBytes(data); *P = *Q; return err }
-
-// Double sets P = 2Q.
-func (P *Point) Double() { P.Add(P) }
-
-// Add sets P =P+Q..
-func (P *Point) Add(Q *Point) {
- // This is formula (5) from "Twisted Edwards Curves Revisited" by
- // Hisil H., Wong K.KH., Carter G., Dawson E. (2008)
- // https://doi.org/10.1007/978-3-540-89255-7_20
- x1, y1, z1, ta1, tb1 := &P.x, &P.y, &P.z, &P.ta, &P.tb
- x2, y2, z2, ta2, tb2 := &Q.x, &Q.y, &Q.z, &Q.ta, &Q.tb
- x3, y3, z3, E, H := &P.x, &P.y, &P.z, &P.ta, &P.tb
- A, B, C, D := &fp.Elt{}, &fp.Elt{}, &fp.Elt{}, &fp.Elt{}
- t1, t2, F, G := C, D, &fp.Elt{}, &fp.Elt{}
- fp.Mul(t1, ta1, tb1) // t1 = ta1*tb1
- fp.Mul(t2, ta2, tb2) // t2 = ta2*tb2
- fp.Mul(A, x1, x2) // A = x1*x2
- fp.Mul(B, y1, y2) // B = y1*y2
- fp.Mul(C, t1, t2) // t1*t2
- fp.Mul(C, C, ¶mD) // C = d*t1*t2
- fp.Mul(D, z1, z2) // D = z1*z2
- fp.Add(F, x1, y1) // x1+y1
- fp.Add(E, x2, y2) // x2+y2
- fp.Mul(E, E, F) // (x1+y1)*(x2+y2)
- fp.Sub(E, E, A) // (x1+y1)*(x2+y2)-A
- fp.Sub(E, E, B) // E = (x1+y1)*(x2+y2)-A-B
- fp.Sub(F, D, C) // F = D-C
- fp.Add(G, D, C) // G = D+C
- fp.Sub(H, B, A) // H = B-A
- fp.Mul(z3, F, G) // Z = F * G
- fp.Mul(x3, E, F) // X = E * F
- fp.Mul(y3, G, H) // Y = G * H, T = E * H
-}
diff --git a/vendor/github.com/cloudflare/circl/ecc/goldilocks/scalar.go b/vendor/github.com/cloudflare/circl/ecc/goldilocks/scalar.go
deleted file mode 100644
index f98117b2527..00000000000
--- a/vendor/github.com/cloudflare/circl/ecc/goldilocks/scalar.go
+++ /dev/null
@@ -1,203 +0,0 @@
-package goldilocks
-
-import (
- "encoding/binary"
- "math/bits"
-)
-
-// ScalarSize is the size (in bytes) of scalars.
-const ScalarSize = 56 // 448 / 8
-
-// _N is the number of 64-bit words to store scalars.
-const _N = 7 // 448 / 64
-
-// Scalar represents a positive integer stored in little-endian order.
-type Scalar [ScalarSize]byte
-
-type scalar64 [_N]uint64
-
-func (z *scalar64) fromScalar(x *Scalar) {
- z[0] = binary.LittleEndian.Uint64(x[0*8 : 1*8])
- z[1] = binary.LittleEndian.Uint64(x[1*8 : 2*8])
- z[2] = binary.LittleEndian.Uint64(x[2*8 : 3*8])
- z[3] = binary.LittleEndian.Uint64(x[3*8 : 4*8])
- z[4] = binary.LittleEndian.Uint64(x[4*8 : 5*8])
- z[5] = binary.LittleEndian.Uint64(x[5*8 : 6*8])
- z[6] = binary.LittleEndian.Uint64(x[6*8 : 7*8])
-}
-
-func (z *scalar64) toScalar(x *Scalar) {
- binary.LittleEndian.PutUint64(x[0*8:1*8], z[0])
- binary.LittleEndian.PutUint64(x[1*8:2*8], z[1])
- binary.LittleEndian.PutUint64(x[2*8:3*8], z[2])
- binary.LittleEndian.PutUint64(x[3*8:4*8], z[3])
- binary.LittleEndian.PutUint64(x[4*8:5*8], z[4])
- binary.LittleEndian.PutUint64(x[5*8:6*8], z[5])
- binary.LittleEndian.PutUint64(x[6*8:7*8], z[6])
-}
-
-// add calculates z = x + y. Assumes len(z) > max(len(x),len(y)).
-func add(z, x, y []uint64) uint64 {
- l, L, zz := len(x), len(y), y
- if l > L {
- l, L, zz = L, l, x
- }
- c := uint64(0)
- for i := 0; i < l; i++ {
- z[i], c = bits.Add64(x[i], y[i], c)
- }
- for i := l; i < L; i++ {
- z[i], c = bits.Add64(zz[i], 0, c)
- }
- return c
-}
-
-// sub calculates z = x - y. Assumes len(z) > max(len(x),len(y)).
-func sub(z, x, y []uint64) uint64 {
- l, L, zz := len(x), len(y), y
- if l > L {
- l, L, zz = L, l, x
- }
- c := uint64(0)
- for i := 0; i < l; i++ {
- z[i], c = bits.Sub64(x[i], y[i], c)
- }
- for i := l; i < L; i++ {
- z[i], c = bits.Sub64(zz[i], 0, c)
- }
- return c
-}
-
-// mulWord calculates z = x * y. Assumes len(z) >= len(x)+1.
-func mulWord(z, x []uint64, y uint64) {
- for i := range z {
- z[i] = 0
- }
- carry := uint64(0)
- for i := range x {
- hi, lo := bits.Mul64(x[i], y)
- lo, cc := bits.Add64(lo, z[i], 0)
- hi, _ = bits.Add64(hi, 0, cc)
- z[i], cc = bits.Add64(lo, carry, 0)
- carry, _ = bits.Add64(hi, 0, cc)
- }
- z[len(x)] = carry
-}
-
-// Cmov moves x into z if b=1.
-func (z *scalar64) Cmov(b uint64, x *scalar64) {
- m := uint64(0) - b
- for i := range z {
- z[i] = (z[i] &^ m) | (x[i] & m)
- }
-}
-
-// leftShift shifts to the left the words of z returning the more significant word.
-func (z *scalar64) leftShift(low uint64) uint64 {
- high := z[_N-1]
- for i := _N - 1; i > 0; i-- {
- z[i] = z[i-1]
- }
- z[0] = low
- return high
-}
-
-// reduceOneWord calculates z = z + 2^448*x such that the result fits in a Scalar.
-func (z *scalar64) reduceOneWord(x uint64) {
- prod := (&scalar64{})[:]
- mulWord(prod, residue448[:], x)
- cc := add(z[:], z[:], prod)
- mulWord(prod, residue448[:], cc)
- add(z[:], z[:], prod)
-}
-
-// modOrder reduces z mod order.
-func (z *scalar64) modOrder() {
- var o64, x scalar64
- o64.fromScalar(&order)
- // Performs: while (z >= order) { z = z-order }
- // At most 8 (eight) iterations reduce 3 bits by subtracting.
- for i := 0; i < 8; i++ {
- c := sub(x[:], z[:], o64[:]) // (c || x) = z-order
- z.Cmov(1-c, &x) // if c != 0 { z = x }
- }
-}
-
-// FromBytes stores z = x mod order, where x is a number stored in little-endian order.
-func (z *Scalar) FromBytes(x []byte) {
- n := len(x)
- nCeil := (n + 7) >> 3
- for i := range z {
- z[i] = 0
- }
- if nCeil < _N {
- copy(z[:], x)
- return
- }
- copy(z[:], x[8*(nCeil-_N):])
- var z64 scalar64
- z64.fromScalar(z)
- for i := nCeil - _N - 1; i >= 0; i-- {
- low := binary.LittleEndian.Uint64(x[8*i:])
- high := z64.leftShift(low)
- z64.reduceOneWord(high)
- }
- z64.modOrder()
- z64.toScalar(z)
-}
-
-// divBy4 calculates z = x/4 mod order.
-func (z *Scalar) divBy4(x *Scalar) { z.Mul(x, &invFour) }
-
-// Red reduces z mod order.
-func (z *Scalar) Red() { var t scalar64; t.fromScalar(z); t.modOrder(); t.toScalar(z) }
-
-// Neg calculates z = -z mod order.
-func (z *Scalar) Neg() { z.Sub(&order, z) }
-
-// Add calculates z = x+y mod order.
-func (z *Scalar) Add(x, y *Scalar) {
- var z64, x64, y64, t scalar64
- x64.fromScalar(x)
- y64.fromScalar(y)
- c := add(z64[:], x64[:], y64[:])
- add(t[:], z64[:], residue448[:])
- z64.Cmov(c, &t)
- z64.modOrder()
- z64.toScalar(z)
-}
-
-// Sub calculates z = x-y mod order.
-func (z *Scalar) Sub(x, y *Scalar) {
- var z64, x64, y64, t scalar64
- x64.fromScalar(x)
- y64.fromScalar(y)
- c := sub(z64[:], x64[:], y64[:])
- sub(t[:], z64[:], residue448[:])
- z64.Cmov(c, &t)
- z64.modOrder()
- z64.toScalar(z)
-}
-
-// Mul calculates z = x*y mod order.
-func (z *Scalar) Mul(x, y *Scalar) {
- var z64, x64, y64 scalar64
- prod := (&[_N + 1]uint64{})[:]
- x64.fromScalar(x)
- y64.fromScalar(y)
- mulWord(prod, x64[:], y64[_N-1])
- copy(z64[:], prod[:_N])
- z64.reduceOneWord(prod[_N])
- for i := _N - 2; i >= 0; i-- {
- h := z64.leftShift(0)
- z64.reduceOneWord(h)
- mulWord(prod, x64[:], y64[i])
- c := add(z64[:], z64[:], prod[:_N])
- z64.reduceOneWord(prod[_N] + c)
- }
- z64.modOrder()
- z64.toScalar(z)
-}
-
-// IsZero returns true if z=0.
-func (z *Scalar) IsZero() bool { z.Red(); return *z == Scalar{} }
diff --git a/vendor/github.com/cloudflare/circl/ecc/goldilocks/twist.go b/vendor/github.com/cloudflare/circl/ecc/goldilocks/twist.go
deleted file mode 100644
index 83d7cdadd3e..00000000000
--- a/vendor/github.com/cloudflare/circl/ecc/goldilocks/twist.go
+++ /dev/null
@@ -1,138 +0,0 @@
-package goldilocks
-
-import (
- "crypto/subtle"
- "math/bits"
-
- "github.com/cloudflare/circl/internal/conv"
- "github.com/cloudflare/circl/math"
- fp "github.com/cloudflare/circl/math/fp448"
-)
-
-// twistCurve is -x^2+y^2=1-39082x^2y^2 and is 4-isogenous to Goldilocks.
-type twistCurve struct{}
-
-// Identity returns the identity point.
-func (twistCurve) Identity() *twistPoint {
- return &twistPoint{
- y: fp.One(),
- z: fp.One(),
- }
-}
-
-// subYDiv16 update x = (x - y) / 16.
-func subYDiv16(x *scalar64, y int64) {
- s := uint64(y >> 63)
- x0, b0 := bits.Sub64((*x)[0], uint64(y), 0)
- x1, b1 := bits.Sub64((*x)[1], s, b0)
- x2, b2 := bits.Sub64((*x)[2], s, b1)
- x3, b3 := bits.Sub64((*x)[3], s, b2)
- x4, b4 := bits.Sub64((*x)[4], s, b3)
- x5, b5 := bits.Sub64((*x)[5], s, b4)
- x6, _ := bits.Sub64((*x)[6], s, b5)
- x[0] = (x0 >> 4) | (x1 << 60)
- x[1] = (x1 >> 4) | (x2 << 60)
- x[2] = (x2 >> 4) | (x3 << 60)
- x[3] = (x3 >> 4) | (x4 << 60)
- x[4] = (x4 >> 4) | (x5 << 60)
- x[5] = (x5 >> 4) | (x6 << 60)
- x[6] = (x6 >> 4)
-}
-
-func recodeScalar(d *[113]int8, k *Scalar) {
- var k64 scalar64
- k64.fromScalar(k)
- for i := 0; i < 112; i++ {
- d[i] = int8((k64[0] & 0x1f) - 16)
- subYDiv16(&k64, int64(d[i]))
- }
- d[112] = int8(k64[0])
-}
-
-// ScalarMult returns kP.
-func (e twistCurve) ScalarMult(k *Scalar, P *twistPoint) *twistPoint {
- var TabP [8]preTwistPointProy
- var S preTwistPointProy
- var d [113]int8
-
- var isZero int
- if k.IsZero() {
- isZero = 1
- }
- subtle.ConstantTimeCopy(isZero, k[:], order[:])
-
- minusK := *k
- isEven := 1 - int(k[0]&0x1)
- minusK.Neg()
- subtle.ConstantTimeCopy(isEven, k[:], minusK[:])
- recodeScalar(&d, k)
-
- P.oddMultiples(TabP[:])
- Q := e.Identity()
- for i := 112; i >= 0; i-- {
- Q.Double()
- Q.Double()
- Q.Double()
- Q.Double()
- mask := d[i] >> 7
- absDi := (d[i] + mask) ^ mask
- inx := int32((absDi - 1) >> 1)
- sig := int((d[i] >> 7) & 0x1)
- for j := range TabP {
- S.cmov(&TabP[j], uint(subtle.ConstantTimeEq(inx, int32(j))))
- }
- S.cneg(sig)
- Q.mixAdd(&S)
- }
- Q.cneg(uint(isEven))
- return Q
-}
-
-const (
- omegaFix = 7
- omegaVar = 5
-)
-
-// CombinedMult returns mG+nP.
-func (e twistCurve) CombinedMult(m, n *Scalar, P *twistPoint) *twistPoint {
- nafFix := math.OmegaNAF(conv.BytesLe2BigInt(m[:]), omegaFix)
- nafVar := math.OmegaNAF(conv.BytesLe2BigInt(n[:]), omegaVar)
-
- if len(nafFix) > len(nafVar) {
- nafVar = append(nafVar, make([]int32, len(nafFix)-len(nafVar))...)
- } else if len(nafFix) < len(nafVar) {
- nafFix = append(nafFix, make([]int32, len(nafVar)-len(nafFix))...)
- }
-
- var TabQ [1 << (omegaVar - 2)]preTwistPointProy
- P.oddMultiples(TabQ[:])
- Q := e.Identity()
- for i := len(nafFix) - 1; i >= 0; i-- {
- Q.Double()
- // Generator point
- if nafFix[i] != 0 {
- idxM := absolute(nafFix[i]) >> 1
- R := tabVerif[idxM]
- if nafFix[i] < 0 {
- R.neg()
- }
- Q.mixAddZ1(&R)
- }
- // Variable input point
- if nafVar[i] != 0 {
- idxN := absolute(nafVar[i]) >> 1
- S := TabQ[idxN]
- if nafVar[i] < 0 {
- S.neg()
- }
- Q.mixAdd(&S)
- }
- }
- return Q
-}
-
-// absolute returns always a positive value.
-func absolute(x int32) int32 {
- mask := x >> 31
- return (x + mask) ^ mask
-}
diff --git a/vendor/github.com/cloudflare/circl/ecc/goldilocks/twistPoint.go b/vendor/github.com/cloudflare/circl/ecc/goldilocks/twistPoint.go
deleted file mode 100644
index c55db77b069..00000000000
--- a/vendor/github.com/cloudflare/circl/ecc/goldilocks/twistPoint.go
+++ /dev/null
@@ -1,135 +0,0 @@
-package goldilocks
-
-import (
- "fmt"
-
- fp "github.com/cloudflare/circl/math/fp448"
-)
-
-type twistPoint struct{ x, y, z, ta, tb fp.Elt }
-
-type preTwistPointAffine struct{ addYX, subYX, dt2 fp.Elt }
-
-type preTwistPointProy struct {
- preTwistPointAffine
- z2 fp.Elt
-}
-
-func (P *twistPoint) String() string {
- return fmt.Sprintf("x: %v\ny: %v\nz: %v\nta: %v\ntb: %v", P.x, P.y, P.z, P.ta, P.tb)
-}
-
-// cneg conditionally negates the point if b=1.
-func (P *twistPoint) cneg(b uint) {
- t := &fp.Elt{}
- fp.Neg(t, &P.x)
- fp.Cmov(&P.x, t, b)
- fp.Neg(t, &P.ta)
- fp.Cmov(&P.ta, t, b)
-}
-
-// Double updates P with 2P.
-func (P *twistPoint) Double() {
- // This is formula (7) from "Twisted Edwards Curves Revisited" by
- // Hisil H., Wong K.KH., Carter G., Dawson E. (2008)
- // https://doi.org/10.1007/978-3-540-89255-7_20
- Px, Py, Pz, Pta, Ptb := &P.x, &P.y, &P.z, &P.ta, &P.tb
- a, b, c, e, f, g, h := Px, Py, Pz, Pta, Px, Py, Ptb
- fp.Add(e, Px, Py) // x+y
- fp.Sqr(a, Px) // A = x^2
- fp.Sqr(b, Py) // B = y^2
- fp.Sqr(c, Pz) // z^2
- fp.Add(c, c, c) // C = 2*z^2
- fp.Add(h, a, b) // H = A+B
- fp.Sqr(e, e) // (x+y)^2
- fp.Sub(e, e, h) // E = (x+y)^2-A-B
- fp.Sub(g, b, a) // G = B-A
- fp.Sub(f, c, g) // F = C-G
- fp.Mul(Pz, f, g) // Z = F * G
- fp.Mul(Px, e, f) // X = E * F
- fp.Mul(Py, g, h) // Y = G * H, T = E * H
-}
-
-// mixAdd calculates P= P+Q, where Q is a precomputed point with Z_Q = 1.
-func (P *twistPoint) mixAddZ1(Q *preTwistPointAffine) {
- fp.Add(&P.z, &P.z, &P.z) // D = 2*z1 (z2=1)
- P.coreAddition(Q)
-}
-
-// coreAddition calculates P=P+Q for curves with A=-1.
-func (P *twistPoint) coreAddition(Q *preTwistPointAffine) {
- // This is the formula following (5) from "Twisted Edwards Curves Revisited" by
- // Hisil H., Wong K.KH., Carter G., Dawson E. (2008)
- // https://doi.org/10.1007/978-3-540-89255-7_20
- Px, Py, Pz, Pta, Ptb := &P.x, &P.y, &P.z, &P.ta, &P.tb
- addYX2, subYX2, dt2 := &Q.addYX, &Q.subYX, &Q.dt2
- a, b, c, d, e, f, g, h := Px, Py, &fp.Elt{}, Pz, Pta, Px, Py, Ptb
- fp.Mul(c, Pta, Ptb) // t1 = ta*tb
- fp.Sub(h, Py, Px) // y1-x1
- fp.Add(b, Py, Px) // y1+x1
- fp.Mul(a, h, subYX2) // A = (y1-x1)*(y2-x2)
- fp.Mul(b, b, addYX2) // B = (y1+x1)*(y2+x2)
- fp.Mul(c, c, dt2) // C = 2*D*t1*t2
- fp.Sub(e, b, a) // E = B-A
- fp.Add(h, b, a) // H = B+A
- fp.Sub(f, d, c) // F = D-C
- fp.Add(g, d, c) // G = D+C
- fp.Mul(Pz, f, g) // Z = F * G
- fp.Mul(Px, e, f) // X = E * F
- fp.Mul(Py, g, h) // Y = G * H, T = E * H
-}
-
-func (P *preTwistPointAffine) neg() {
- P.addYX, P.subYX = P.subYX, P.addYX
- fp.Neg(&P.dt2, &P.dt2)
-}
-
-func (P *preTwistPointAffine) cneg(b int) {
- t := &fp.Elt{}
- fp.Cswap(&P.addYX, &P.subYX, uint(b))
- fp.Neg(t, &P.dt2)
- fp.Cmov(&P.dt2, t, uint(b))
-}
-
-func (P *preTwistPointAffine) cmov(Q *preTwistPointAffine, b uint) {
- fp.Cmov(&P.addYX, &Q.addYX, b)
- fp.Cmov(&P.subYX, &Q.subYX, b)
- fp.Cmov(&P.dt2, &Q.dt2, b)
-}
-
-// mixAdd calculates P= P+Q, where Q is a precomputed point with Z_Q != 1.
-func (P *twistPoint) mixAdd(Q *preTwistPointProy) {
- fp.Mul(&P.z, &P.z, &Q.z2) // D = 2*z1*z2
- P.coreAddition(&Q.preTwistPointAffine)
-}
-
-// oddMultiples calculates T[i] = (2*i-1)P for 0 < i < len(T).
-func (P *twistPoint) oddMultiples(T []preTwistPointProy) {
- if n := len(T); n > 0 {
- T[0].FromTwistPoint(P)
- _2P := *P
- _2P.Double()
- R := &preTwistPointProy{}
- R.FromTwistPoint(&_2P)
- for i := 1; i < n; i++ {
- P.mixAdd(R)
- T[i].FromTwistPoint(P)
- }
- }
-}
-
-// cmov conditionally moves Q into P if b=1.
-func (P *preTwistPointProy) cmov(Q *preTwistPointProy, b uint) {
- P.preTwistPointAffine.cmov(&Q.preTwistPointAffine, b)
- fp.Cmov(&P.z2, &Q.z2, b)
-}
-
-// FromTwistPoint precomputes some coordinates of Q for missed addition.
-func (P *preTwistPointProy) FromTwistPoint(Q *twistPoint) {
- fp.Add(&P.addYX, &Q.y, &Q.x) // addYX = X + Y
- fp.Sub(&P.subYX, &Q.y, &Q.x) // subYX = Y - X
- fp.Mul(&P.dt2, &Q.ta, &Q.tb) // T = ta*tb
- fp.Mul(&P.dt2, &P.dt2, ¶mDTwist) // D*T
- fp.Add(&P.dt2, &P.dt2, &P.dt2) // dt2 = 2*D*T
- fp.Add(&P.z2, &Q.z, &Q.z) // z2 = 2*Z
-}
diff --git a/vendor/github.com/cloudflare/circl/ecc/goldilocks/twistTables.go b/vendor/github.com/cloudflare/circl/ecc/goldilocks/twistTables.go
deleted file mode 100644
index ed432e02c78..00000000000
--- a/vendor/github.com/cloudflare/circl/ecc/goldilocks/twistTables.go
+++ /dev/null
@@ -1,216 +0,0 @@
-package goldilocks
-
-import fp "github.com/cloudflare/circl/math/fp448"
-
-var tabFixMult = [fxV][fx2w1]preTwistPointAffine{
- {
- {
- addYX: fp.Elt{0x65, 0x4a, 0xdd, 0xdf, 0xb4, 0x79, 0x60, 0xc8, 0xa1, 0x70, 0xb4, 0x3a, 0x1e, 0x0c, 0x9b, 0x19, 0xe5, 0x48, 0x3f, 0xd7, 0x44, 0x18, 0x18, 0x14, 0x14, 0x27, 0x45, 0xd0, 0x2b, 0x24, 0xd5, 0x93, 0xc3, 0x74, 0x4c, 0x50, 0x70, 0x43, 0x26, 0x05, 0x08, 0x24, 0xca, 0x78, 0x30, 0xc1, 0x06, 0x8d, 0xd4, 0x86, 0x42, 0xf0, 0x14, 0xde, 0x08, 0x05},
- subYX: fp.Elt{0x64, 0x4a, 0xdd, 0xdf, 0xb4, 0x79, 0x60, 0xc8, 0xa1, 0x70, 0xb4, 0x3a, 0x1e, 0x0c, 0x9b, 0x19, 0xe5, 0x48, 0x3f, 0xd7, 0x44, 0x18, 0x18, 0x14, 0x14, 0x27, 0x45, 0xd0, 0x2d, 0x24, 0xd5, 0x93, 0xc3, 0x74, 0x4c, 0x50, 0x70, 0x43, 0x26, 0x05, 0x08, 0x24, 0xca, 0x78, 0x30, 0xc1, 0x06, 0x8d, 0xd4, 0x86, 0x42, 0xf0, 0x14, 0xde, 0x08, 0x05},
- dt2: fp.Elt{0x1a, 0x33, 0xea, 0x64, 0x45, 0x1c, 0xdf, 0x17, 0x1d, 0x16, 0x34, 0x28, 0xd6, 0x61, 0x19, 0x67, 0x79, 0xb4, 0x13, 0xcf, 0x3e, 0x7c, 0x0e, 0x72, 0xda, 0xf1, 0x5f, 0xda, 0xe6, 0xcf, 0x42, 0xd3, 0xb6, 0x17, 0xc2, 0x68, 0x13, 0x2d, 0xd9, 0x60, 0x3e, 0xae, 0xf0, 0x5b, 0x96, 0xf0, 0xcd, 0xaf, 0xea, 0xb7, 0x0d, 0x59, 0x16, 0xa7, 0xff, 0x55},
- },
- {
- addYX: fp.Elt{0xca, 0xd8, 0x7d, 0x86, 0x1a, 0xef, 0xad, 0x11, 0xe3, 0x27, 0x41, 0x7e, 0x7f, 0x3e, 0xa9, 0xd2, 0xb5, 0x4e, 0x50, 0xe0, 0x77, 0x91, 0xc2, 0x13, 0x52, 0x73, 0x41, 0x09, 0xa6, 0x57, 0x9a, 0xc8, 0xa8, 0x90, 0x9d, 0x26, 0x14, 0xbb, 0xa1, 0x2a, 0xf7, 0x45, 0x43, 0x4e, 0xea, 0x35, 0x62, 0xe1, 0x08, 0x85, 0x46, 0xb8, 0x24, 0x05, 0x2d, 0xab},
- subYX: fp.Elt{0x9b, 0xe6, 0xd3, 0xe5, 0xfe, 0x50, 0x36, 0x3c, 0x3c, 0x6d, 0x74, 0x1d, 0x74, 0xc0, 0xde, 0x5b, 0x45, 0x27, 0xe5, 0x12, 0xee, 0x63, 0x35, 0x6b, 0x13, 0xe2, 0x41, 0x6b, 0x3a, 0x05, 0x2b, 0xb1, 0x89, 0x26, 0xb6, 0xc6, 0xd1, 0x84, 0xff, 0x0e, 0x9b, 0xa3, 0xfb, 0x21, 0x36, 0x6b, 0x01, 0xf7, 0x9f, 0x7c, 0xeb, 0xf5, 0x18, 0x7a, 0x2a, 0x70},
- dt2: fp.Elt{0x09, 0xad, 0x99, 0x1a, 0x38, 0xd3, 0xdf, 0x22, 0x37, 0x32, 0x61, 0x8b, 0xf3, 0x19, 0x48, 0x08, 0xe8, 0x49, 0xb6, 0x4a, 0xa7, 0xed, 0xa4, 0xa2, 0xee, 0x86, 0xd7, 0x31, 0x5e, 0xce, 0x95, 0x76, 0x86, 0x42, 0x1c, 0x9d, 0x07, 0x14, 0x8c, 0x34, 0x18, 0x9c, 0x6d, 0x3a, 0xdf, 0xa9, 0xe8, 0x36, 0x7e, 0xe4, 0x95, 0xbe, 0xb5, 0x09, 0xf8, 0x9c},
- },
- {
- addYX: fp.Elt{0x51, 0xdb, 0x49, 0xa8, 0x9f, 0xe3, 0xd7, 0xec, 0x0d, 0x0f, 0x49, 0xe8, 0xb6, 0xc5, 0x0f, 0x5a, 0x1c, 0xce, 0x54, 0x0d, 0xb1, 0x8d, 0x5b, 0xbf, 0xf4, 0xaa, 0x34, 0x77, 0xc4, 0x5d, 0x59, 0xb6, 0xc5, 0x0e, 0x5a, 0xd8, 0x5b, 0x30, 0xc2, 0x1d, 0xec, 0x85, 0x1c, 0x42, 0xbe, 0x24, 0x2e, 0x50, 0x55, 0x44, 0xb2, 0x3a, 0x01, 0xaa, 0x98, 0xfb},
- subYX: fp.Elt{0xe7, 0x29, 0xb7, 0xd0, 0xaa, 0x4f, 0x32, 0x53, 0x56, 0xde, 0xbc, 0xd1, 0x92, 0x5d, 0x19, 0xbe, 0xa3, 0xe3, 0x75, 0x48, 0xe0, 0x7a, 0x1b, 0x54, 0x7a, 0xb7, 0x41, 0x77, 0x84, 0x38, 0xdd, 0x14, 0x9f, 0xca, 0x3f, 0xa3, 0xc8, 0xa7, 0x04, 0x70, 0xf1, 0x4d, 0x3d, 0xb3, 0x84, 0x79, 0xcb, 0xdb, 0xe4, 0xc5, 0x42, 0x9b, 0x57, 0x19, 0xf1, 0x2d},
- dt2: fp.Elt{0x20, 0xb4, 0x94, 0x9e, 0xdf, 0x31, 0x44, 0x0b, 0xc9, 0x7b, 0x75, 0x40, 0x9d, 0xd1, 0x96, 0x39, 0x70, 0x71, 0x15, 0xc8, 0x93, 0xd5, 0xc5, 0xe5, 0xba, 0xfe, 0xee, 0x08, 0x6a, 0x98, 0x0a, 0x1b, 0xb2, 0xaa, 0x3a, 0xf4, 0xa4, 0x79, 0xf9, 0x8e, 0x4d, 0x65, 0x10, 0x9b, 0x3a, 0x6e, 0x7c, 0x87, 0x94, 0x92, 0x11, 0x65, 0xbf, 0x1a, 0x09, 0xde},
- },
- {
- addYX: fp.Elt{0xf3, 0x84, 0x76, 0x77, 0xa5, 0x6b, 0x27, 0x3b, 0x83, 0x3d, 0xdf, 0xa0, 0xeb, 0x32, 0x6d, 0x58, 0x81, 0x57, 0x64, 0xc2, 0x21, 0x7c, 0x9b, 0xea, 0xe6, 0xb0, 0x93, 0xf9, 0xe7, 0xc3, 0xed, 0x5a, 0x8e, 0xe2, 0xb4, 0x72, 0x76, 0x66, 0x0f, 0x22, 0x29, 0x94, 0x3e, 0x63, 0x48, 0x5e, 0x80, 0xcb, 0xac, 0xfa, 0x95, 0xb6, 0x4b, 0xc4, 0x95, 0x33},
- subYX: fp.Elt{0x0c, 0x55, 0xd1, 0x5e, 0x5f, 0xbf, 0xbf, 0xe2, 0x4c, 0xfc, 0x37, 0x4a, 0xc4, 0xb1, 0xf4, 0x83, 0x61, 0x93, 0x60, 0x8e, 0x9f, 0x31, 0xf0, 0xa0, 0x41, 0xff, 0x1d, 0xe2, 0x7f, 0xca, 0x40, 0xd6, 0x88, 0xe8, 0x91, 0x61, 0xe2, 0x11, 0x18, 0x83, 0xf3, 0x25, 0x2f, 0x3f, 0x49, 0x40, 0xd4, 0x83, 0xe2, 0xd7, 0x74, 0x6a, 0x16, 0x86, 0x4e, 0xab},
- dt2: fp.Elt{0xdd, 0x58, 0x65, 0xd8, 0x9f, 0xdd, 0x70, 0x7f, 0x0f, 0xec, 0xbd, 0x5c, 0x5c, 0x9b, 0x7e, 0x1b, 0x9f, 0x79, 0x36, 0x1f, 0xfd, 0x79, 0x10, 0x1c, 0x52, 0xf3, 0x22, 0xa4, 0x1f, 0x71, 0x6e, 0x63, 0x14, 0xf4, 0xa7, 0x3e, 0xbe, 0xad, 0x43, 0x30, 0x38, 0x8c, 0x29, 0xc6, 0xcf, 0x50, 0x75, 0x21, 0xe5, 0x78, 0xfd, 0xb0, 0x9a, 0xc4, 0x6d, 0xd4},
- },
- },
- {
- {
- addYX: fp.Elt{0x7a, 0xa1, 0x38, 0xa6, 0xfd, 0x0e, 0x96, 0xd5, 0x26, 0x76, 0x86, 0x70, 0x80, 0x30, 0xa6, 0x67, 0xeb, 0xf4, 0x39, 0xdb, 0x22, 0xf5, 0x9f, 0x98, 0xe4, 0xb5, 0x3a, 0x0c, 0x59, 0xbf, 0x85, 0xc6, 0xf0, 0x0b, 0x1c, 0x41, 0x38, 0x09, 0x01, 0xdb, 0xd6, 0x3c, 0xb7, 0xf1, 0x08, 0x6b, 0x4b, 0x9e, 0x63, 0x53, 0x83, 0xd3, 0xab, 0xa3, 0x72, 0x0d},
- subYX: fp.Elt{0x84, 0x68, 0x25, 0xe8, 0xe9, 0x8f, 0x91, 0xbf, 0xf7, 0xa4, 0x30, 0xae, 0xea, 0x9f, 0xdd, 0x56, 0x64, 0x09, 0xc9, 0x54, 0x68, 0x4e, 0x33, 0xc5, 0x6f, 0x7b, 0x2d, 0x52, 0x2e, 0x42, 0xbe, 0xbe, 0xf5, 0x64, 0xbf, 0x77, 0x54, 0xdf, 0xb0, 0x10, 0xd2, 0x16, 0x5d, 0xce, 0xaf, 0x9f, 0xfb, 0xa3, 0x63, 0x50, 0xcb, 0xc0, 0xd0, 0x88, 0x44, 0xa3},
- dt2: fp.Elt{0xc3, 0x8b, 0xa5, 0xf1, 0x44, 0xe4, 0x41, 0xcd, 0x75, 0xe3, 0x17, 0x69, 0x5b, 0xb9, 0xbb, 0xee, 0x82, 0xbb, 0xce, 0x57, 0xdf, 0x2a, 0x9c, 0x12, 0xab, 0x66, 0x08, 0x68, 0x05, 0x1b, 0x87, 0xee, 0x5d, 0x1e, 0x18, 0x14, 0x22, 0x4b, 0x99, 0x61, 0x75, 0x28, 0xe7, 0x65, 0x1c, 0x36, 0xb6, 0x18, 0x09, 0xa8, 0xdf, 0xef, 0x30, 0x35, 0xbc, 0x58},
- },
- {
- addYX: fp.Elt{0xc5, 0xd3, 0x0e, 0x6f, 0xaf, 0x06, 0x69, 0xc4, 0x07, 0x9e, 0x58, 0x6e, 0x3f, 0x49, 0xd9, 0x0a, 0x3c, 0x2c, 0x37, 0xcd, 0x27, 0x4d, 0x87, 0x91, 0x7a, 0xb0, 0x28, 0xad, 0x2f, 0x68, 0x92, 0x05, 0x97, 0xf1, 0x30, 0x5f, 0x4c, 0x10, 0x20, 0x30, 0xd3, 0x08, 0x3f, 0xc1, 0xc6, 0xb7, 0xb5, 0xd1, 0x71, 0x7b, 0xa8, 0x0a, 0xd8, 0xf5, 0x17, 0xcf},
- subYX: fp.Elt{0x64, 0xd4, 0x8f, 0x91, 0x40, 0xab, 0x6e, 0x1a, 0x62, 0x83, 0xdc, 0xd7, 0x30, 0x1a, 0x4a, 0x2a, 0x4c, 0x54, 0x86, 0x19, 0x81, 0x5d, 0x04, 0x52, 0xa3, 0xca, 0x82, 0x38, 0xdc, 0x1e, 0xf0, 0x7a, 0x78, 0x76, 0x49, 0x4f, 0x71, 0xc4, 0x74, 0x2f, 0xf0, 0x5b, 0x2e, 0x5e, 0xac, 0xef, 0x17, 0xe4, 0x8e, 0x6e, 0xed, 0x43, 0x23, 0x61, 0x99, 0x49},
- dt2: fp.Elt{0x64, 0x90, 0x72, 0x76, 0xf8, 0x2c, 0x7d, 0x57, 0xf9, 0x30, 0x5e, 0x7a, 0x10, 0x74, 0x19, 0x39, 0xd9, 0xaf, 0x0a, 0xf1, 0x43, 0xed, 0x88, 0x9c, 0x8b, 0xdc, 0x9b, 0x1c, 0x90, 0xe7, 0xf7, 0xa3, 0xa5, 0x0d, 0xc6, 0xbc, 0x30, 0xfb, 0x91, 0x1a, 0x51, 0xba, 0x2d, 0xbe, 0x89, 0xdf, 0x1d, 0xdc, 0x53, 0xa8, 0x82, 0x8a, 0xd3, 0x8d, 0x16, 0x68},
- },
- {
- addYX: fp.Elt{0xef, 0x5c, 0xe3, 0x74, 0xbf, 0x13, 0x4a, 0xbf, 0x66, 0x73, 0x64, 0xb7, 0xd4, 0xce, 0x98, 0x82, 0x05, 0xfa, 0x98, 0x0c, 0x0a, 0xae, 0xe5, 0x6b, 0x9f, 0xac, 0xbb, 0x6e, 0x1f, 0xcf, 0xff, 0xa6, 0x71, 0x9a, 0xa8, 0x7a, 0x9e, 0x64, 0x1f, 0x20, 0x4a, 0x61, 0xa2, 0xd6, 0x50, 0xe3, 0xba, 0x81, 0x0c, 0x50, 0x59, 0x69, 0x59, 0x15, 0x55, 0xdb},
- subYX: fp.Elt{0xe8, 0x77, 0x4d, 0xe8, 0x66, 0x3d, 0xc1, 0x00, 0x3c, 0xf2, 0x25, 0x00, 0xdc, 0xb2, 0xe5, 0x9b, 0x12, 0x89, 0xf3, 0xd6, 0xea, 0x85, 0x60, 0xfe, 0x67, 0x91, 0xfd, 0x04, 0x7c, 0xe0, 0xf1, 0x86, 0x06, 0x11, 0x66, 0xee, 0xd4, 0xd5, 0xbe, 0x3b, 0x0f, 0xe3, 0x59, 0xb3, 0x4f, 0x00, 0xb6, 0xce, 0x80, 0xc1, 0x61, 0xf7, 0xaf, 0x04, 0x6a, 0x3c},
- dt2: fp.Elt{0x00, 0xd7, 0x32, 0x93, 0x67, 0x70, 0x6f, 0xd7, 0x69, 0xab, 0xb1, 0xd3, 0xdc, 0xd6, 0xa8, 0xdd, 0x35, 0x25, 0xca, 0xd3, 0x8a, 0x6d, 0xce, 0xfb, 0xfd, 0x2b, 0x83, 0xf0, 0xd4, 0xac, 0x66, 0xfb, 0x72, 0x87, 0x7e, 0x55, 0xb7, 0x91, 0x58, 0x10, 0xc3, 0x11, 0x7e, 0x15, 0xfe, 0x7c, 0x55, 0x90, 0xa3, 0x9e, 0xed, 0x9a, 0x7f, 0xa7, 0xb7, 0xeb},
- },
- {
- addYX: fp.Elt{0x25, 0x0f, 0xc2, 0x09, 0x9c, 0x10, 0xc8, 0x7c, 0x93, 0xa7, 0xbe, 0xe9, 0x26, 0x25, 0x7c, 0x21, 0xfe, 0xe7, 0x5f, 0x3c, 0x02, 0x83, 0xa7, 0x9e, 0xdf, 0xc0, 0x94, 0x2b, 0x7d, 0x1a, 0xd0, 0x1d, 0xcc, 0x2e, 0x7d, 0xd4, 0x85, 0xe7, 0xc1, 0x15, 0x66, 0xd6, 0xd6, 0x32, 0xb8, 0xf7, 0x63, 0xaa, 0x3b, 0xa5, 0xea, 0x49, 0xad, 0x88, 0x9b, 0x66},
- subYX: fp.Elt{0x09, 0x97, 0x79, 0x36, 0x41, 0x56, 0x9b, 0xdf, 0x15, 0xd8, 0x43, 0x28, 0x17, 0x5b, 0x96, 0xc9, 0xcf, 0x39, 0x1f, 0x13, 0xf7, 0x4d, 0x1d, 0x1f, 0xda, 0x51, 0x56, 0xe7, 0x0a, 0x5a, 0x65, 0xb6, 0x2a, 0x87, 0x49, 0x86, 0xc2, 0x2b, 0xcd, 0xfe, 0x07, 0xf6, 0x4c, 0xe2, 0x1d, 0x9b, 0xd8, 0x82, 0x09, 0x5b, 0x11, 0x10, 0x62, 0x56, 0x89, 0xbd},
- dt2: fp.Elt{0xd9, 0x15, 0x73, 0xf2, 0x96, 0x35, 0x53, 0xb0, 0xe7, 0xa8, 0x0b, 0x93, 0x35, 0x0b, 0x3a, 0x00, 0xf5, 0x18, 0xb1, 0xc3, 0x12, 0x3f, 0x91, 0x17, 0xc1, 0x4c, 0x15, 0x5a, 0x86, 0x92, 0x11, 0xbd, 0x44, 0x40, 0x5a, 0x7b, 0x15, 0x89, 0xba, 0xc1, 0xc1, 0xbc, 0x43, 0x45, 0xe6, 0x52, 0x02, 0x73, 0x0a, 0xd0, 0x2a, 0x19, 0xda, 0x47, 0xa8, 0xff},
- },
- },
-}
-
-// tabVerif contains the odd multiples of P. The entry T[i] = (2i+1)P, where
-// P = phi(G) and G is the generator of the Goldilocks curve, and phi is a
-// 4-degree isogeny.
-var tabVerif = [1 << (omegaFix - 2)]preTwistPointAffine{
- { /* 1P*/
- addYX: fp.Elt{0x65, 0x4a, 0xdd, 0xdf, 0xb4, 0x79, 0x60, 0xc8, 0xa1, 0x70, 0xb4, 0x3a, 0x1e, 0x0c, 0x9b, 0x19, 0xe5, 0x48, 0x3f, 0xd7, 0x44, 0x18, 0x18, 0x14, 0x14, 0x27, 0x45, 0xd0, 0x2b, 0x24, 0xd5, 0x93, 0xc3, 0x74, 0x4c, 0x50, 0x70, 0x43, 0x26, 0x05, 0x08, 0x24, 0xca, 0x78, 0x30, 0xc1, 0x06, 0x8d, 0xd4, 0x86, 0x42, 0xf0, 0x14, 0xde, 0x08, 0x05},
- subYX: fp.Elt{0x64, 0x4a, 0xdd, 0xdf, 0xb4, 0x79, 0x60, 0xc8, 0xa1, 0x70, 0xb4, 0x3a, 0x1e, 0x0c, 0x9b, 0x19, 0xe5, 0x48, 0x3f, 0xd7, 0x44, 0x18, 0x18, 0x14, 0x14, 0x27, 0x45, 0xd0, 0x2d, 0x24, 0xd5, 0x93, 0xc3, 0x74, 0x4c, 0x50, 0x70, 0x43, 0x26, 0x05, 0x08, 0x24, 0xca, 0x78, 0x30, 0xc1, 0x06, 0x8d, 0xd4, 0x86, 0x42, 0xf0, 0x14, 0xde, 0x08, 0x05},
- dt2: fp.Elt{0x1a, 0x33, 0xea, 0x64, 0x45, 0x1c, 0xdf, 0x17, 0x1d, 0x16, 0x34, 0x28, 0xd6, 0x61, 0x19, 0x67, 0x79, 0xb4, 0x13, 0xcf, 0x3e, 0x7c, 0x0e, 0x72, 0xda, 0xf1, 0x5f, 0xda, 0xe6, 0xcf, 0x42, 0xd3, 0xb6, 0x17, 0xc2, 0x68, 0x13, 0x2d, 0xd9, 0x60, 0x3e, 0xae, 0xf0, 0x5b, 0x96, 0xf0, 0xcd, 0xaf, 0xea, 0xb7, 0x0d, 0x59, 0x16, 0xa7, 0xff, 0x55},
- },
- { /* 3P*/
- addYX: fp.Elt{0xd1, 0xe9, 0xa8, 0x33, 0x20, 0x76, 0x18, 0x08, 0x45, 0x2a, 0xc9, 0x67, 0x2a, 0xc3, 0x15, 0x24, 0xf9, 0x74, 0x21, 0x30, 0x99, 0x59, 0x8b, 0xb2, 0xf0, 0xa4, 0x07, 0xe2, 0x6a, 0x36, 0x8d, 0xd9, 0xd2, 0x4a, 0x7f, 0x73, 0x50, 0x39, 0x3d, 0xaa, 0xa7, 0x51, 0x73, 0x0d, 0x2b, 0x8b, 0x96, 0x47, 0xac, 0x3c, 0x5d, 0xaa, 0x39, 0x9c, 0xcf, 0xd5},
- subYX: fp.Elt{0x6b, 0x11, 0x5d, 0x1a, 0xf9, 0x41, 0x9d, 0xc5, 0x30, 0x3e, 0xad, 0x25, 0x2c, 0x04, 0x45, 0xea, 0xcc, 0x67, 0x07, 0x85, 0xe9, 0xda, 0x0e, 0xb5, 0x40, 0xb7, 0x32, 0xb4, 0x49, 0xdd, 0xff, 0xaa, 0xfc, 0xbb, 0x19, 0xca, 0x8b, 0x79, 0x2b, 0x8f, 0x8d, 0x00, 0x33, 0xc2, 0xad, 0xe9, 0xd3, 0x12, 0xa8, 0xaa, 0x87, 0x62, 0xad, 0x2d, 0xff, 0xa4},
- dt2: fp.Elt{0xb0, 0xaf, 0x3b, 0xea, 0xf0, 0x42, 0x0b, 0x5e, 0x88, 0xd3, 0x98, 0x08, 0x87, 0x59, 0x72, 0x0a, 0xc2, 0xdf, 0xcb, 0x7f, 0x59, 0xb5, 0x4c, 0x63, 0x68, 0xe8, 0x41, 0x38, 0x67, 0x4f, 0xe9, 0xc6, 0xb2, 0x6b, 0x08, 0xa7, 0xf7, 0x0e, 0xcd, 0xea, 0xca, 0x3d, 0xaf, 0x8e, 0xda, 0x4b, 0x2e, 0xd2, 0x88, 0x64, 0x8d, 0xc5, 0x5f, 0x76, 0x0f, 0x3d},
- },
- { /* 5P*/
- addYX: fp.Elt{0xe5, 0x65, 0xc9, 0xe2, 0x75, 0xf0, 0x7d, 0x1a, 0xba, 0xa4, 0x40, 0x4b, 0x93, 0x12, 0xa2, 0x80, 0x95, 0x0d, 0x03, 0x93, 0xe8, 0xa5, 0x4d, 0xe2, 0x3d, 0x81, 0xf5, 0xce, 0xd4, 0x2d, 0x25, 0x59, 0x16, 0x5c, 0xe7, 0xda, 0xc7, 0x45, 0xd2, 0x7e, 0x2c, 0x38, 0xd4, 0x37, 0x64, 0xb2, 0xc2, 0x28, 0xc5, 0x72, 0x16, 0x32, 0x45, 0x36, 0x6f, 0x9f},
- subYX: fp.Elt{0x09, 0xf4, 0x7e, 0xbd, 0x89, 0xdb, 0x19, 0x58, 0xe1, 0x08, 0x00, 0x8a, 0xf4, 0x5f, 0x2a, 0x32, 0x40, 0xf0, 0x2c, 0x3f, 0x5d, 0xe4, 0xfc, 0x89, 0x11, 0x24, 0xb4, 0x2f, 0x97, 0xad, 0xac, 0x8f, 0x19, 0xab, 0xfa, 0x12, 0xe5, 0xf9, 0x50, 0x4e, 0x50, 0x6f, 0x32, 0x30, 0x88, 0xa6, 0xe5, 0x48, 0x28, 0xa2, 0x1b, 0x9f, 0xcd, 0xe2, 0x43, 0x38},
- dt2: fp.Elt{0xa9, 0xcc, 0x53, 0x39, 0x86, 0x02, 0x60, 0x75, 0x34, 0x99, 0x57, 0xbd, 0xfc, 0x5a, 0x8e, 0xce, 0x5e, 0x98, 0x22, 0xd0, 0xa5, 0x24, 0xff, 0x90, 0x28, 0x9f, 0x58, 0xf3, 0x39, 0xe9, 0xba, 0x36, 0x23, 0xfb, 0x7f, 0x41, 0xcc, 0x2b, 0x5a, 0x25, 0x3f, 0x4c, 0x2a, 0xf1, 0x52, 0x6f, 0x2f, 0x07, 0xe3, 0x88, 0x81, 0x77, 0xdd, 0x7c, 0x88, 0x82},
- },
- { /* 7P*/
- addYX: fp.Elt{0xf7, 0xee, 0x88, 0xfd, 0x3a, 0xbf, 0x7e, 0x28, 0x39, 0x23, 0x79, 0xe6, 0x5c, 0x56, 0xcb, 0xb5, 0x48, 0x6a, 0x80, 0x6d, 0x37, 0x60, 0x6c, 0x10, 0x35, 0x49, 0x4b, 0x46, 0x60, 0xd4, 0x79, 0xd4, 0x53, 0xd3, 0x67, 0x88, 0xd0, 0x41, 0xd5, 0x43, 0x85, 0xc8, 0x71, 0xe3, 0x1c, 0xb6, 0xda, 0x22, 0x64, 0x8f, 0x80, 0xac, 0xad, 0x7d, 0xd5, 0x82},
- subYX: fp.Elt{0x92, 0x40, 0xc1, 0x83, 0x21, 0x9b, 0xd5, 0x7d, 0x3f, 0x29, 0xb6, 0x26, 0xef, 0x12, 0xb9, 0x27, 0x39, 0x42, 0x37, 0x97, 0x09, 0x9a, 0x08, 0xe1, 0x68, 0xb6, 0x7a, 0x3f, 0x9f, 0x45, 0xf8, 0x37, 0x19, 0x83, 0x97, 0xe6, 0x73, 0x30, 0x32, 0x35, 0xcf, 0xae, 0x5c, 0x12, 0x68, 0xdf, 0x6e, 0x2b, 0xde, 0x83, 0xa0, 0x44, 0x74, 0x2e, 0x4a, 0xe9},
- dt2: fp.Elt{0xcb, 0x22, 0x0a, 0xda, 0x6b, 0xc1, 0x8a, 0x29, 0xa1, 0xac, 0x8b, 0x5b, 0x8b, 0x32, 0x20, 0xf2, 0x21, 0xae, 0x0c, 0x43, 0xc4, 0xd7, 0x19, 0x37, 0x3d, 0x79, 0x25, 0x98, 0x6c, 0x9c, 0x22, 0x31, 0x2a, 0x55, 0x9f, 0xda, 0x5e, 0xa8, 0x13, 0xdb, 0x8e, 0x2e, 0x16, 0x39, 0xf4, 0x91, 0x6f, 0xec, 0x71, 0x71, 0xc9, 0x10, 0xf2, 0xa4, 0x8f, 0x11},
- },
- { /* 9P*/
- addYX: fp.Elt{0x85, 0xdd, 0x37, 0x62, 0x74, 0x8e, 0x33, 0x5b, 0x25, 0x12, 0x1b, 0xe7, 0xdf, 0x47, 0xe5, 0x12, 0xfd, 0x3a, 0x3a, 0xf5, 0x5d, 0x4c, 0xa2, 0x29, 0x3c, 0x5c, 0x2f, 0xee, 0x18, 0x19, 0x0a, 0x2b, 0xef, 0x67, 0x50, 0x7a, 0x0d, 0x29, 0xae, 0x55, 0x82, 0xcd, 0xd6, 0x41, 0x90, 0xb4, 0x13, 0x31, 0x5d, 0x11, 0xb8, 0xaa, 0x12, 0x86, 0x08, 0xac},
- subYX: fp.Elt{0xcc, 0x37, 0x8d, 0x83, 0x5f, 0xfd, 0xde, 0xd5, 0xf7, 0xf1, 0xae, 0x0a, 0xa7, 0x0b, 0xeb, 0x6d, 0x19, 0x8a, 0xb6, 0x1a, 0x59, 0xd8, 0xff, 0x3c, 0xbc, 0xbc, 0xef, 0x9c, 0xda, 0x7b, 0x75, 0x12, 0xaf, 0x80, 0x8f, 0x2c, 0x3c, 0xaa, 0x0b, 0x17, 0x86, 0x36, 0x78, 0x18, 0xc8, 0x8a, 0xf6, 0xb8, 0x2c, 0x2f, 0x57, 0x2c, 0x62, 0x57, 0xf6, 0x90},
- dt2: fp.Elt{0x83, 0xbc, 0xa2, 0x07, 0xa5, 0x38, 0x96, 0xea, 0xfe, 0x11, 0x46, 0x1d, 0x3b, 0xcd, 0x42, 0xc5, 0xee, 0x67, 0x04, 0x72, 0x08, 0xd8, 0xd9, 0x96, 0x07, 0xf7, 0xac, 0xc3, 0x64, 0xf1, 0x98, 0x2c, 0x55, 0xd7, 0x7d, 0xc8, 0x6c, 0xbd, 0x2c, 0xff, 0x15, 0xd6, 0x6e, 0xb8, 0x17, 0x8e, 0xa8, 0x27, 0x66, 0xb1, 0x73, 0x79, 0x96, 0xff, 0x29, 0x10},
- },
- { /* 11P*/
- addYX: fp.Elt{0x76, 0xcb, 0x9b, 0x0c, 0x5b, 0xfe, 0xe1, 0x2a, 0xdd, 0x6f, 0x6c, 0xdd, 0x6f, 0xb4, 0xc0, 0xc2, 0x1b, 0x4b, 0x38, 0xe8, 0x66, 0x8c, 0x1e, 0x31, 0x63, 0xb9, 0x94, 0xcd, 0xc3, 0x8c, 0x44, 0x25, 0x7b, 0xd5, 0x39, 0x80, 0xfc, 0x01, 0xaa, 0xf7, 0x2a, 0x61, 0x8a, 0x25, 0xd2, 0x5f, 0xc5, 0x66, 0x38, 0xa4, 0x17, 0xcf, 0x3e, 0x11, 0x0f, 0xa3},
- subYX: fp.Elt{0xe0, 0xb6, 0xd1, 0x9c, 0x71, 0x49, 0x2e, 0x7b, 0xde, 0x00, 0xda, 0x6b, 0xf1, 0xec, 0xe6, 0x7a, 0x15, 0x38, 0x71, 0xe9, 0x7b, 0xdb, 0xf8, 0x98, 0xc0, 0x91, 0x2e, 0x53, 0xee, 0x92, 0x87, 0x25, 0xc9, 0xb0, 0xbb, 0x33, 0x15, 0x46, 0x7f, 0xfd, 0x4f, 0x8b, 0x77, 0x05, 0x96, 0xb6, 0xe2, 0x08, 0xdb, 0x0d, 0x09, 0xee, 0x5b, 0xd1, 0x2a, 0x63},
- dt2: fp.Elt{0x8f, 0x7b, 0x57, 0x8c, 0xbf, 0x06, 0x0d, 0x43, 0x21, 0x92, 0x94, 0x2d, 0x6a, 0x38, 0x07, 0x0f, 0xa0, 0xf1, 0xe3, 0xd8, 0x2a, 0xbf, 0x46, 0xc6, 0x9e, 0x1f, 0x8f, 0x2b, 0x46, 0x84, 0x0b, 0x74, 0xed, 0xff, 0xf8, 0xa5, 0x94, 0xae, 0xf1, 0x67, 0xb1, 0x9b, 0xdd, 0x4a, 0xd0, 0xdb, 0xc2, 0xb5, 0x58, 0x49, 0x0c, 0xa9, 0x1d, 0x7d, 0xa9, 0xd3},
- },
- { /* 13P*/
- addYX: fp.Elt{0x73, 0x84, 0x2e, 0x31, 0x1f, 0xdc, 0xed, 0x9f, 0x74, 0xfa, 0xe0, 0x35, 0xb1, 0x85, 0x6a, 0x8d, 0x86, 0xd0, 0xff, 0xd6, 0x08, 0x43, 0x73, 0x1a, 0xd5, 0xf8, 0x43, 0xd4, 0xb3, 0xe5, 0x3f, 0xa8, 0x84, 0x17, 0x59, 0x65, 0x4e, 0xe6, 0xee, 0x54, 0x9c, 0xda, 0x5e, 0x7e, 0x98, 0x29, 0x6d, 0x73, 0x34, 0x1f, 0x99, 0x80, 0x54, 0x54, 0x81, 0x0b},
- subYX: fp.Elt{0xb1, 0xe5, 0xbb, 0x80, 0x22, 0x9c, 0x81, 0x6d, 0xaf, 0x27, 0x65, 0x6f, 0x7e, 0x9c, 0xb6, 0x8d, 0x35, 0x5c, 0x2e, 0x20, 0x48, 0x7a, 0x28, 0xf0, 0x97, 0xfe, 0xb7, 0x71, 0xce, 0xd6, 0xad, 0x3a, 0x81, 0xf6, 0x74, 0x5e, 0xf3, 0xfd, 0x1b, 0xd4, 0x1e, 0x7c, 0xc2, 0xb7, 0xc8, 0xa6, 0xc9, 0x89, 0x03, 0x47, 0xec, 0x24, 0xd6, 0x0e, 0xec, 0x9c},
- dt2: fp.Elt{0x91, 0x0a, 0x43, 0x34, 0x20, 0xc2, 0x64, 0xf7, 0x4e, 0x48, 0xc8, 0xd2, 0x95, 0x83, 0xd1, 0xa4, 0xfb, 0x4e, 0x41, 0x3b, 0x0d, 0xd5, 0x07, 0xd9, 0xf1, 0x13, 0x16, 0x78, 0x54, 0x57, 0xd0, 0xf1, 0x4f, 0x20, 0xac, 0xcf, 0x9c, 0x3b, 0x33, 0x0b, 0x99, 0x54, 0xc3, 0x7f, 0x3e, 0x57, 0x26, 0x86, 0xd5, 0xa5, 0x2b, 0x8d, 0xe3, 0x19, 0x36, 0xf7},
- },
- { /* 15P*/
- addYX: fp.Elt{0x23, 0x69, 0x47, 0x14, 0xf9, 0x9a, 0x50, 0xff, 0x64, 0xd1, 0x50, 0x35, 0xc3, 0x11, 0xd3, 0x19, 0xcf, 0x87, 0xda, 0x30, 0x0b, 0x50, 0xda, 0xc0, 0xe0, 0x25, 0x00, 0xe5, 0x68, 0x93, 0x04, 0xc2, 0xaf, 0xbd, 0x2f, 0x36, 0x5f, 0x47, 0x96, 0x10, 0xa8, 0xbd, 0xe4, 0x88, 0xac, 0x80, 0x52, 0x61, 0x73, 0xe9, 0x63, 0xdd, 0x99, 0xad, 0x20, 0x5b},
- subYX: fp.Elt{0x1b, 0x5e, 0xa2, 0x2a, 0x25, 0x0f, 0x86, 0xc0, 0xb1, 0x2e, 0x0c, 0x13, 0x40, 0x8d, 0xf0, 0xe6, 0x00, 0x55, 0x08, 0xc5, 0x7d, 0xf4, 0xc9, 0x31, 0x25, 0x3a, 0x99, 0x69, 0xdd, 0x67, 0x63, 0x9a, 0xd6, 0x89, 0x2e, 0xa1, 0x19, 0xca, 0x2c, 0xd9, 0x59, 0x5f, 0x5d, 0xc3, 0x6e, 0x62, 0x36, 0x12, 0x59, 0x15, 0xe1, 0xdc, 0xa4, 0xad, 0xc9, 0xd0},
- dt2: fp.Elt{0xbc, 0xea, 0xfc, 0xaf, 0x66, 0x23, 0xb7, 0x39, 0x6b, 0x2a, 0x96, 0xa8, 0x54, 0x43, 0xe9, 0xaa, 0x32, 0x40, 0x63, 0x92, 0x5e, 0xdf, 0x35, 0xc2, 0x9f, 0x24, 0x0c, 0xed, 0xfc, 0xde, 0x73, 0x8f, 0xa7, 0xd5, 0xa3, 0x2b, 0x18, 0x1f, 0xb0, 0xf8, 0xeb, 0x55, 0xd9, 0xc3, 0xfd, 0x28, 0x7c, 0x4f, 0xce, 0x0d, 0xf7, 0xae, 0xc2, 0x83, 0xc3, 0x78},
- },
- { /* 17P*/
- addYX: fp.Elt{0x71, 0xe6, 0x60, 0x93, 0x37, 0xdb, 0x01, 0xa5, 0x4c, 0xba, 0xe8, 0x8e, 0xd5, 0xf9, 0xd3, 0x98, 0xe5, 0xeb, 0xab, 0x3a, 0x15, 0x8b, 0x35, 0x60, 0xbe, 0xe5, 0x9c, 0x2d, 0x10, 0x9b, 0x2e, 0xcf, 0x65, 0x64, 0xea, 0x8f, 0x72, 0xce, 0xf5, 0x18, 0xe5, 0xe2, 0xf0, 0x0e, 0xae, 0x04, 0xec, 0xa0, 0x20, 0x65, 0x63, 0x07, 0xb1, 0x9f, 0x03, 0x97},
- subYX: fp.Elt{0x9e, 0x41, 0x64, 0x30, 0x95, 0x7f, 0x3a, 0x89, 0x7b, 0x0a, 0x79, 0x59, 0x23, 0x9a, 0x3b, 0xfe, 0xa4, 0x13, 0x08, 0xb2, 0x2e, 0x04, 0x50, 0x10, 0x30, 0xcd, 0x2e, 0xa4, 0x91, 0x71, 0x50, 0x36, 0x4a, 0x02, 0xf4, 0x8d, 0xa3, 0x36, 0x1b, 0xf4, 0x52, 0xba, 0x15, 0x04, 0x8b, 0x80, 0x25, 0xd9, 0xae, 0x67, 0x20, 0xd9, 0x88, 0x8f, 0x97, 0xa6},
- dt2: fp.Elt{0xb5, 0xe7, 0x46, 0xbd, 0x55, 0x23, 0xa0, 0x68, 0xc0, 0x12, 0xd9, 0xf1, 0x0a, 0x75, 0xe2, 0xda, 0xf4, 0x6b, 0xca, 0x14, 0xe4, 0x9f, 0x0f, 0xb5, 0x3c, 0xa6, 0xa5, 0xa2, 0x63, 0x94, 0xd1, 0x1c, 0x39, 0x58, 0x57, 0x02, 0x27, 0x98, 0xb6, 0x47, 0xc6, 0x61, 0x4b, 0x5c, 0xab, 0x6f, 0x2d, 0xab, 0xe3, 0xc1, 0x69, 0xf9, 0x12, 0xb0, 0xc8, 0xd5},
- },
- { /* 19P*/
- addYX: fp.Elt{0x19, 0x7d, 0xd5, 0xac, 0x79, 0xa2, 0x82, 0x9b, 0x28, 0x31, 0x22, 0xc0, 0x73, 0x02, 0x76, 0x17, 0x10, 0x70, 0x79, 0x57, 0xc9, 0x84, 0x62, 0x8e, 0x04, 0x04, 0x61, 0x67, 0x08, 0x48, 0xb4, 0x4b, 0xde, 0x53, 0x8c, 0xff, 0x36, 0x1b, 0x62, 0x86, 0x5d, 0xe1, 0x9b, 0xb1, 0xe5, 0xe8, 0x44, 0x64, 0xa1, 0x68, 0x3f, 0xa8, 0x45, 0x52, 0x91, 0xed},
- subYX: fp.Elt{0x42, 0x1a, 0x36, 0x1f, 0x90, 0x15, 0x24, 0x8d, 0x24, 0x80, 0xe6, 0xfe, 0x1e, 0xf0, 0xad, 0xaf, 0x6a, 0x93, 0xf0, 0xa6, 0x0d, 0x5d, 0xea, 0xf6, 0x62, 0x96, 0x7a, 0x05, 0x76, 0x85, 0x74, 0x32, 0xc7, 0xc8, 0x64, 0x53, 0x62, 0xe7, 0x54, 0x84, 0xe0, 0x40, 0x66, 0x19, 0x70, 0x40, 0x95, 0x35, 0x68, 0x64, 0x43, 0xcd, 0xba, 0x29, 0x32, 0xa8},
- dt2: fp.Elt{0x3e, 0xf6, 0xd6, 0xe4, 0x99, 0xeb, 0x20, 0x66, 0x08, 0x2e, 0x26, 0x64, 0xd7, 0x76, 0xf3, 0xb4, 0xc5, 0xa4, 0x35, 0x92, 0xd2, 0x99, 0x70, 0x5a, 0x1a, 0xe9, 0xe9, 0x3d, 0x3b, 0xe1, 0xcd, 0x0e, 0xee, 0x24, 0x13, 0x03, 0x22, 0xd6, 0xd6, 0x72, 0x08, 0x2b, 0xde, 0xfd, 0x93, 0xed, 0x0c, 0x7f, 0x5e, 0x31, 0x22, 0x4d, 0x80, 0x78, 0xc0, 0x48},
- },
- { /* 21P*/
- addYX: fp.Elt{0x8f, 0x72, 0xd2, 0x9e, 0xc4, 0xcd, 0x2c, 0xbf, 0xa8, 0xd3, 0x24, 0x62, 0x28, 0xee, 0x39, 0x0a, 0x19, 0x3a, 0x58, 0xff, 0x21, 0x2e, 0x69, 0x6c, 0x6e, 0x18, 0xd0, 0xcd, 0x61, 0xc1, 0x18, 0x02, 0x5a, 0xe9, 0xe3, 0xef, 0x1f, 0x8e, 0x10, 0xe8, 0x90, 0x2b, 0x48, 0xcd, 0xee, 0x38, 0xbd, 0x3a, 0xca, 0xbc, 0x2d, 0xe2, 0x3a, 0x03, 0x71, 0x02},
- subYX: fp.Elt{0xf8, 0xa4, 0x32, 0x26, 0x66, 0xaf, 0x3b, 0x53, 0xe7, 0xb0, 0x91, 0x92, 0xf5, 0x3c, 0x74, 0xce, 0xf2, 0xdd, 0x68, 0xa9, 0xf4, 0xcd, 0x5f, 0x60, 0xab, 0x71, 0xdf, 0xcd, 0x5c, 0x5d, 0x51, 0x72, 0x3a, 0x96, 0xea, 0xd6, 0xde, 0x54, 0x8e, 0x55, 0x4c, 0x08, 0x4c, 0x60, 0xdd, 0x34, 0xa9, 0x6f, 0xf3, 0x04, 0x02, 0xa8, 0xa6, 0x4e, 0x4d, 0x62},
- dt2: fp.Elt{0x76, 0x4a, 0xae, 0x38, 0x62, 0x69, 0x72, 0xdc, 0xe8, 0x43, 0xbe, 0x1d, 0x61, 0xde, 0x31, 0xc3, 0x42, 0x8f, 0x33, 0x9d, 0xca, 0xc7, 0x9c, 0xec, 0x6a, 0xe2, 0xaa, 0x01, 0x49, 0x78, 0x8d, 0x72, 0x4f, 0x38, 0xea, 0x52, 0xc2, 0xd3, 0xc9, 0x39, 0x71, 0xba, 0xb9, 0x09, 0x9b, 0xa3, 0x7f, 0x45, 0x43, 0x65, 0x36, 0x29, 0xca, 0xe7, 0x5c, 0x5f},
- },
- { /* 23P*/
- addYX: fp.Elt{0x89, 0x42, 0x35, 0x48, 0x6d, 0x74, 0xe5, 0x1f, 0xc3, 0xdd, 0x28, 0x5b, 0x84, 0x41, 0x33, 0x9f, 0x42, 0xf3, 0x1d, 0x5d, 0x15, 0x6d, 0x76, 0x33, 0x36, 0xaf, 0xe9, 0xdd, 0xfa, 0x63, 0x4f, 0x7a, 0x9c, 0xeb, 0x1c, 0x4f, 0x34, 0x65, 0x07, 0x54, 0xbb, 0x4c, 0x8b, 0x62, 0x9d, 0xd0, 0x06, 0x99, 0xb3, 0xe9, 0xda, 0x85, 0x19, 0xb0, 0x3d, 0x3c},
- subYX: fp.Elt{0xbb, 0x99, 0xf6, 0xbf, 0xaf, 0x2c, 0x22, 0x0d, 0x7a, 0xaa, 0x98, 0x6f, 0x01, 0x82, 0x99, 0xcf, 0x88, 0xbd, 0x0e, 0x3a, 0x89, 0xe0, 0x9c, 0x8c, 0x17, 0x20, 0xc4, 0xe0, 0xcf, 0x43, 0x7a, 0xef, 0x0d, 0x9f, 0x87, 0xd4, 0xfb, 0xf2, 0x96, 0xb8, 0x03, 0xe8, 0xcb, 0x5c, 0xec, 0x65, 0x5f, 0x49, 0xa4, 0x7c, 0x85, 0xb4, 0xf6, 0xc7, 0xdb, 0xa3},
- dt2: fp.Elt{0x11, 0xf3, 0x32, 0xa3, 0xa7, 0xb2, 0x7d, 0x51, 0x82, 0x44, 0xeb, 0xa2, 0x7d, 0x72, 0xcb, 0xc6, 0xf6, 0xc7, 0xb2, 0x38, 0x0e, 0x0f, 0x4f, 0x29, 0x00, 0xe4, 0x5b, 0x94, 0x46, 0x86, 0x66, 0xa1, 0x83, 0xb3, 0xeb, 0x15, 0xb6, 0x31, 0x50, 0x28, 0xeb, 0xed, 0x0d, 0x32, 0x39, 0xe9, 0x23, 0x81, 0x99, 0x3e, 0xff, 0x17, 0x4c, 0x11, 0x43, 0xd1},
- },
- { /* 25P*/
- addYX: fp.Elt{0xce, 0xe7, 0xf8, 0x94, 0x8f, 0x96, 0xf8, 0x96, 0xe6, 0x72, 0x20, 0x44, 0x2c, 0xa7, 0xfc, 0xba, 0xc8, 0xe1, 0xbb, 0xc9, 0x16, 0x85, 0xcd, 0x0b, 0xe5, 0xb5, 0x5a, 0x7f, 0x51, 0x43, 0x63, 0x8b, 0x23, 0x8e, 0x1d, 0x31, 0xff, 0x46, 0x02, 0x66, 0xcc, 0x9e, 0x4d, 0xa2, 0xca, 0xe2, 0xc7, 0xfd, 0x22, 0xb1, 0xdb, 0xdf, 0x6f, 0xe6, 0xa5, 0x82},
- subYX: fp.Elt{0xd0, 0xf5, 0x65, 0x40, 0xec, 0x8e, 0x65, 0x42, 0x78, 0xc1, 0x65, 0xe4, 0x10, 0xc8, 0x0b, 0x1b, 0xdd, 0x96, 0x68, 0xce, 0xee, 0x45, 0x55, 0xd8, 0x6e, 0xd3, 0xe6, 0x77, 0x19, 0xae, 0xc2, 0x8d, 0x8d, 0x3e, 0x14, 0x3f, 0x6d, 0x00, 0x2f, 0x9b, 0xd1, 0x26, 0x60, 0x28, 0x0f, 0x3a, 0x47, 0xb3, 0xe6, 0x68, 0x28, 0x24, 0x25, 0xca, 0xc8, 0x06},
- dt2: fp.Elt{0x54, 0xbb, 0x60, 0x92, 0xdb, 0x8f, 0x0f, 0x38, 0xe0, 0xe6, 0xe4, 0xc9, 0xcc, 0x14, 0x62, 0x01, 0xc4, 0x2b, 0x0f, 0xcf, 0xed, 0x7d, 0x8e, 0xa4, 0xd9, 0x73, 0x0b, 0xba, 0x0c, 0xaf, 0x0c, 0xf9, 0xe2, 0xeb, 0x29, 0x2a, 0x53, 0xdf, 0x2c, 0x5a, 0xfa, 0x8f, 0xc1, 0x01, 0xd7, 0xb1, 0x45, 0x73, 0x92, 0x32, 0x83, 0x85, 0x12, 0x74, 0x89, 0x44},
- },
- { /* 27P*/
- addYX: fp.Elt{0x0b, 0x73, 0x3c, 0xc2, 0xb1, 0x2e, 0xe1, 0xa7, 0xf5, 0xc9, 0x7a, 0xfb, 0x3d, 0x2d, 0xac, 0x59, 0xdb, 0xfa, 0x36, 0x11, 0xd1, 0x13, 0x04, 0x51, 0x1d, 0xab, 0x9b, 0x6b, 0x93, 0xfe, 0xda, 0xb0, 0x8e, 0xb4, 0x79, 0x11, 0x21, 0x0f, 0x65, 0xb9, 0xbb, 0x79, 0x96, 0x2a, 0xfd, 0x30, 0xe0, 0xb4, 0x2d, 0x9a, 0x55, 0x25, 0x5d, 0xd4, 0xad, 0x2a},
- subYX: fp.Elt{0x9e, 0xc5, 0x04, 0xfe, 0xec, 0x3c, 0x64, 0x1c, 0xed, 0x95, 0xed, 0xae, 0xaf, 0x5c, 0x6e, 0x08, 0x9e, 0x02, 0x29, 0x59, 0x7e, 0x5f, 0xc4, 0x9a, 0xd5, 0x32, 0x72, 0x86, 0xe1, 0x4e, 0x3c, 0xce, 0x99, 0x69, 0x3b, 0xc4, 0xdd, 0x4d, 0xb7, 0xbb, 0xda, 0x3b, 0x1a, 0x99, 0xaa, 0x62, 0x15, 0xc1, 0xf0, 0xb6, 0x6c, 0xec, 0x56, 0xc1, 0xff, 0x0c},
- dt2: fp.Elt{0x2f, 0xf1, 0x3f, 0x7a, 0x2d, 0x56, 0x19, 0x7f, 0xea, 0xbe, 0x59, 0x2e, 0x13, 0x67, 0x81, 0xfb, 0xdb, 0xc8, 0xa3, 0x1d, 0xd5, 0xe9, 0x13, 0x8b, 0x29, 0xdf, 0xcf, 0x9f, 0xe7, 0xd9, 0x0b, 0x70, 0xd3, 0x15, 0x57, 0x4a, 0xe9, 0x50, 0x12, 0x1b, 0x81, 0x4b, 0x98, 0x98, 0xa8, 0x31, 0x1d, 0x27, 0x47, 0x38, 0xed, 0x57, 0x99, 0x26, 0xb2, 0xee},
- },
- { /* 29P*/
- addYX: fp.Elt{0x1c, 0xb2, 0xb2, 0x67, 0x3b, 0x8b, 0x3d, 0x5a, 0x30, 0x7e, 0x38, 0x7e, 0x3c, 0x3d, 0x28, 0x56, 0x59, 0xd8, 0x87, 0x53, 0x8b, 0xe6, 0x6c, 0x5d, 0xe5, 0x0a, 0x33, 0x10, 0xce, 0xa2, 0x17, 0x0d, 0xe8, 0x76, 0xee, 0x68, 0xa8, 0x72, 0x54, 0xbd, 0xa6, 0x24, 0x94, 0x6e, 0x77, 0xc7, 0x53, 0xb7, 0x89, 0x1c, 0x7a, 0xe9, 0x78, 0x9a, 0x74, 0x5f},
- subYX: fp.Elt{0x76, 0x96, 0x1c, 0xcf, 0x08, 0x55, 0xd8, 0x1e, 0x0d, 0xa3, 0x59, 0x95, 0x32, 0xf4, 0xc2, 0x8e, 0x84, 0x5e, 0x4b, 0x04, 0xda, 0x71, 0xc9, 0x78, 0x52, 0xde, 0x14, 0xb4, 0x31, 0xf4, 0xd4, 0xb8, 0x58, 0xc5, 0x20, 0xe8, 0xdd, 0x15, 0xb5, 0xee, 0xea, 0x61, 0xe0, 0xf5, 0xd6, 0xae, 0x55, 0x59, 0x05, 0x3e, 0xaf, 0x74, 0xac, 0x1f, 0x17, 0x82},
- dt2: fp.Elt{0x59, 0x24, 0xcd, 0xfc, 0x11, 0x7e, 0x85, 0x18, 0x3d, 0x69, 0xf7, 0x71, 0x31, 0x66, 0x98, 0x42, 0x95, 0x00, 0x8c, 0xb2, 0xae, 0x39, 0x7e, 0x85, 0xd6, 0xb0, 0x02, 0xec, 0xce, 0xfc, 0x25, 0xb2, 0xe3, 0x99, 0x8e, 0x5b, 0x61, 0x96, 0x2e, 0x6d, 0x96, 0x57, 0x71, 0xa5, 0x93, 0x41, 0x0e, 0x6f, 0xfd, 0x0a, 0xbf, 0xa9, 0xf7, 0x56, 0xa9, 0x3e},
- },
- { /* 31P*/
- addYX: fp.Elt{0xa2, 0x2e, 0x0c, 0x17, 0x4d, 0xcc, 0x85, 0x2c, 0x18, 0xa0, 0xd2, 0x08, 0xba, 0x11, 0xfa, 0x47, 0x71, 0x86, 0xaf, 0x36, 0x6a, 0xd7, 0xfe, 0xb9, 0xb0, 0x2f, 0x89, 0x98, 0x49, 0x69, 0xf8, 0x6a, 0xad, 0x27, 0x5e, 0x0a, 0x22, 0x60, 0x5e, 0x5d, 0xca, 0x06, 0x51, 0x27, 0x99, 0x29, 0x85, 0x68, 0x98, 0xe1, 0xc4, 0x21, 0x50, 0xa0, 0xe9, 0xc1},
- subYX: fp.Elt{0x4d, 0x70, 0xee, 0x91, 0x92, 0x3f, 0xb7, 0xd3, 0x1d, 0xdb, 0x8d, 0x6e, 0x16, 0xf5, 0x65, 0x7d, 0x5f, 0xb5, 0x6c, 0x59, 0x26, 0x70, 0x4b, 0xf2, 0xfc, 0xe7, 0xdf, 0x86, 0xfe, 0xa5, 0xa7, 0xa6, 0x5d, 0xfb, 0x06, 0xe9, 0xf9, 0xcc, 0xc0, 0x37, 0xcc, 0xd8, 0x09, 0x04, 0xd2, 0xa5, 0x1d, 0xd7, 0xb7, 0xce, 0x92, 0xac, 0x3c, 0xad, 0xfb, 0xae},
- dt2: fp.Elt{0x17, 0xa3, 0x9a, 0xc7, 0x86, 0x2a, 0x51, 0xf7, 0x96, 0x79, 0x49, 0x22, 0x2e, 0x5a, 0x01, 0x5c, 0xb5, 0x95, 0xd4, 0xe8, 0xcb, 0x00, 0xca, 0x2d, 0x55, 0xb6, 0x34, 0x36, 0x0b, 0x65, 0x46, 0xf0, 0x49, 0xfc, 0x87, 0x86, 0xe5, 0xc3, 0x15, 0xdb, 0x32, 0xcd, 0xf2, 0xd3, 0x82, 0x4c, 0xe6, 0x61, 0x8a, 0xaf, 0xd4, 0x9e, 0x0f, 0x5a, 0xf2, 0x81},
- },
- { /* 33P*/
- addYX: fp.Elt{0x88, 0x10, 0xc0, 0xcb, 0xf5, 0x77, 0xae, 0xa5, 0xbe, 0xf6, 0xcd, 0x2e, 0x8b, 0x7e, 0xbd, 0x79, 0x62, 0x4a, 0xeb, 0x69, 0xc3, 0x28, 0xaa, 0x72, 0x87, 0xa9, 0x25, 0x87, 0x46, 0xea, 0x0e, 0x62, 0xa3, 0x6a, 0x1a, 0xe2, 0xba, 0xdc, 0x81, 0x10, 0x33, 0x01, 0xf6, 0x16, 0x89, 0x80, 0xc6, 0xcd, 0xdb, 0xdc, 0xba, 0x0e, 0x09, 0x4a, 0x35, 0x4a},
- subYX: fp.Elt{0x86, 0xb2, 0x2b, 0xd0, 0xb8, 0x4a, 0x6d, 0x66, 0x7b, 0x32, 0xdf, 0x3b, 0x1a, 0x19, 0x1f, 0x63, 0xee, 0x1f, 0x3d, 0x1c, 0x5c, 0x14, 0x60, 0x5b, 0x72, 0x49, 0x07, 0xb1, 0x0d, 0x72, 0xc6, 0x35, 0xf0, 0xbc, 0x5e, 0xda, 0x80, 0x6b, 0x64, 0x5b, 0xe5, 0x34, 0x54, 0x39, 0xdd, 0xe6, 0x3c, 0xcb, 0xe5, 0x29, 0x32, 0x06, 0xc6, 0xb1, 0x96, 0x34},
- dt2: fp.Elt{0x85, 0x86, 0xf5, 0x84, 0x86, 0xe6, 0x77, 0x8a, 0x71, 0x85, 0x0c, 0x4f, 0x81, 0x5b, 0x29, 0x06, 0xb5, 0x2e, 0x26, 0x71, 0x07, 0x78, 0x07, 0xae, 0xbc, 0x95, 0x46, 0xc3, 0x65, 0xac, 0xe3, 0x76, 0x51, 0x7d, 0xd4, 0x85, 0x31, 0xe3, 0x43, 0xf3, 0x1b, 0x7c, 0xf7, 0x6b, 0x2c, 0xf8, 0x1c, 0xbb, 0x8d, 0xca, 0xab, 0x4b, 0xba, 0x7f, 0xa4, 0xe2},
- },
- { /* 35P*/
- addYX: fp.Elt{0x1a, 0xee, 0xe7, 0xa4, 0x8a, 0x9d, 0x53, 0x80, 0xc6, 0xb8, 0x4e, 0xdc, 0x89, 0xe0, 0xc4, 0x2b, 0x60, 0x52, 0x6f, 0xec, 0x81, 0xd2, 0x55, 0x6b, 0x1b, 0x6f, 0x17, 0x67, 0x8e, 0x42, 0x26, 0x4c, 0x65, 0x23, 0x29, 0xc6, 0x7b, 0xcd, 0x9f, 0xad, 0x4b, 0x42, 0xd3, 0x0c, 0x75, 0xc3, 0x8a, 0xf5, 0xbe, 0x9e, 0x55, 0xf7, 0x47, 0x5d, 0xbd, 0x3a},
- subYX: fp.Elt{0x0d, 0xa8, 0x3b, 0xf9, 0xc7, 0x7e, 0xc6, 0x86, 0x94, 0xc0, 0x01, 0xff, 0x27, 0xce, 0x43, 0xac, 0xe5, 0xe1, 0xd2, 0x8d, 0xc1, 0x22, 0x31, 0xbe, 0xe1, 0xaf, 0xf9, 0x4a, 0x78, 0xa1, 0x0c, 0xaa, 0xd4, 0x80, 0xe4, 0x09, 0x8d, 0xfb, 0x1d, 0x52, 0xc8, 0x60, 0x2d, 0xf2, 0xa2, 0x89, 0x02, 0x56, 0x3d, 0x56, 0x27, 0x85, 0xc7, 0xf0, 0x2b, 0x9a},
- dt2: fp.Elt{0x62, 0x7c, 0xc7, 0x6b, 0x2c, 0x9d, 0x0a, 0x7c, 0xe5, 0x50, 0x3c, 0xe6, 0x87, 0x1c, 0x82, 0x30, 0x67, 0x3c, 0x39, 0xb6, 0xa0, 0x31, 0xfb, 0x03, 0x7b, 0xa1, 0x58, 0xdf, 0x12, 0x76, 0x5d, 0x5d, 0x0a, 0x8f, 0x9b, 0x37, 0x32, 0xc3, 0x60, 0x33, 0xea, 0x9f, 0x0a, 0x99, 0xfa, 0x20, 0xd0, 0x33, 0x21, 0xc3, 0x94, 0xd4, 0x86, 0x49, 0x7c, 0x4e},
- },
- { /* 37P*/
- addYX: fp.Elt{0xc7, 0x0c, 0x71, 0xfe, 0x55, 0xd1, 0x95, 0x8f, 0x43, 0xbb, 0x6b, 0x74, 0x30, 0xbd, 0xe8, 0x6f, 0x1c, 0x1b, 0x06, 0x62, 0xf5, 0xfc, 0x65, 0xa0, 0xeb, 0x81, 0x12, 0xc9, 0x64, 0x66, 0x61, 0xde, 0xf3, 0x6d, 0xd4, 0xae, 0x8e, 0xb1, 0x72, 0xe0, 0xcd, 0x37, 0x01, 0x28, 0x52, 0xd7, 0x39, 0x46, 0x0c, 0x55, 0xcf, 0x47, 0x70, 0xef, 0xa1, 0x17},
- subYX: fp.Elt{0x8d, 0x58, 0xde, 0x83, 0x88, 0x16, 0x0e, 0x12, 0x42, 0x03, 0x50, 0x60, 0x4b, 0xdf, 0xbf, 0x95, 0xcc, 0x7d, 0x18, 0x17, 0x7e, 0x31, 0x5d, 0x8a, 0x66, 0xc1, 0xcf, 0x14, 0xea, 0xf4, 0xf4, 0xe5, 0x63, 0x2d, 0x32, 0x86, 0x9b, 0xed, 0x1f, 0x4f, 0x03, 0xaf, 0x33, 0x92, 0xcb, 0xaf, 0x9c, 0x05, 0x0d, 0x47, 0x1b, 0x42, 0xba, 0x13, 0x22, 0x98},
- dt2: fp.Elt{0xb5, 0x48, 0xeb, 0x7d, 0x3d, 0x10, 0x9f, 0x59, 0xde, 0xf8, 0x1c, 0x4f, 0x7d, 0x9d, 0x40, 0x4d, 0x9e, 0x13, 0x24, 0xb5, 0x21, 0x09, 0xb7, 0xee, 0x98, 0x5c, 0x56, 0xbc, 0x5e, 0x2b, 0x78, 0x38, 0x06, 0xac, 0xe3, 0xe0, 0xfa, 0x2e, 0xde, 0x4f, 0xd2, 0xb3, 0xfb, 0x2d, 0x71, 0x84, 0xd1, 0x9d, 0x12, 0x5b, 0x35, 0xc8, 0x03, 0x68, 0x67, 0xc7},
- },
- { /* 39P*/
- addYX: fp.Elt{0xb6, 0x65, 0xfb, 0xa7, 0x06, 0x35, 0xbb, 0xe0, 0x31, 0x8d, 0x91, 0x40, 0x98, 0xab, 0x30, 0xe4, 0xca, 0x12, 0x59, 0x89, 0xed, 0x65, 0x5d, 0x7f, 0xae, 0x69, 0xa0, 0xa4, 0xfa, 0x78, 0xb4, 0xf7, 0xed, 0xae, 0x86, 0x78, 0x79, 0x64, 0x24, 0xa6, 0xd4, 0xe1, 0xf6, 0xd3, 0xa0, 0x89, 0xba, 0x20, 0xf4, 0x54, 0x0d, 0x8f, 0xdb, 0x1a, 0x79, 0xdb},
- subYX: fp.Elt{0xe1, 0x82, 0x0c, 0x4d, 0xde, 0x9f, 0x40, 0xf0, 0xc1, 0xbd, 0x8b, 0xd3, 0x24, 0x03, 0xcd, 0xf2, 0x92, 0x7d, 0xe2, 0x68, 0x7f, 0xf1, 0xbe, 0x69, 0xde, 0x34, 0x67, 0x4c, 0x85, 0x3b, 0xec, 0x98, 0xcc, 0x4d, 0x3e, 0xc0, 0x96, 0x27, 0xe6, 0x75, 0xfc, 0xdf, 0x37, 0xc0, 0x1e, 0x27, 0xe0, 0xf6, 0xc2, 0xbd, 0xbc, 0x3d, 0x9b, 0x39, 0xdc, 0xe2},
- dt2: fp.Elt{0xd8, 0x29, 0xa7, 0x39, 0xe3, 0x9f, 0x2f, 0x0e, 0x4b, 0x24, 0x21, 0x70, 0xef, 0xfd, 0x91, 0xea, 0xbf, 0xe1, 0x72, 0x90, 0xcc, 0xc9, 0x84, 0x0e, 0xad, 0xd5, 0xe6, 0xbb, 0xc5, 0x99, 0x7f, 0xa4, 0xf0, 0x2e, 0xcc, 0x95, 0x64, 0x27, 0x19, 0xd8, 0x4c, 0x27, 0x0d, 0xff, 0xb6, 0x29, 0xe2, 0x6c, 0xfa, 0xbb, 0x4d, 0x9c, 0xbb, 0xaf, 0xa5, 0xec},
- },
- { /* 41P*/
- addYX: fp.Elt{0xd6, 0x33, 0x3f, 0x9f, 0xcf, 0xfd, 0x4c, 0xd1, 0xfe, 0xe5, 0xeb, 0x64, 0x27, 0xae, 0x7a, 0xa2, 0x82, 0x50, 0x6d, 0xaa, 0xe3, 0x5d, 0xe2, 0x48, 0x60, 0xb3, 0x76, 0x04, 0xd9, 0x19, 0xa7, 0xa1, 0x73, 0x8d, 0x38, 0xa9, 0xaf, 0x45, 0xb5, 0xb2, 0x62, 0x9b, 0xf1, 0x35, 0x7b, 0x84, 0x66, 0xeb, 0x06, 0xef, 0xf1, 0xb2, 0x2d, 0x6a, 0x61, 0x15},
- subYX: fp.Elt{0x86, 0x50, 0x42, 0xf7, 0xda, 0x59, 0xb2, 0xcf, 0x0d, 0x3d, 0xee, 0x8e, 0x53, 0x5d, 0xf7, 0x9e, 0x6a, 0x26, 0x2d, 0xc7, 0x8c, 0x8e, 0x18, 0x50, 0x6d, 0xb7, 0x51, 0x4c, 0xa7, 0x52, 0x6e, 0x0e, 0x0a, 0x16, 0x74, 0xb2, 0x81, 0x8b, 0x56, 0x27, 0x22, 0x84, 0xf4, 0x56, 0xc5, 0x06, 0xe1, 0x8b, 0xca, 0x2d, 0xdb, 0x9a, 0xf6, 0x10, 0x9c, 0x51},
- dt2: fp.Elt{0x1f, 0x16, 0xa2, 0x78, 0x96, 0x1b, 0x85, 0x9c, 0x76, 0x49, 0xd4, 0x0f, 0xac, 0xb0, 0xf4, 0xd0, 0x06, 0x2c, 0x7e, 0x6d, 0x6e, 0x8e, 0xc7, 0x9f, 0x18, 0xad, 0xfc, 0x88, 0x0c, 0x0c, 0x09, 0x05, 0x05, 0xa0, 0x79, 0x72, 0x32, 0x72, 0x87, 0x0f, 0x49, 0x87, 0x0c, 0xb4, 0x12, 0xc2, 0x09, 0xf8, 0x9f, 0x30, 0x72, 0xa9, 0x47, 0x13, 0x93, 0x49},
- },
- { /* 43P*/
- addYX: fp.Elt{0xcc, 0xb1, 0x4c, 0xd3, 0xc0, 0x9e, 0x9e, 0x4d, 0x6d, 0x28, 0x0b, 0xa5, 0x94, 0xa7, 0x2e, 0xc2, 0xc7, 0xaf, 0x29, 0x73, 0xc9, 0x68, 0xea, 0x0f, 0x34, 0x37, 0x8d, 0x96, 0x8f, 0x3a, 0x3d, 0x73, 0x1e, 0x6d, 0x9f, 0xcf, 0x8d, 0x83, 0xb5, 0x71, 0xb9, 0xe1, 0x4b, 0x67, 0x71, 0xea, 0xcf, 0x56, 0xe5, 0xeb, 0x72, 0x15, 0x2f, 0x9e, 0xa8, 0xaa},
- subYX: fp.Elt{0xf4, 0x3e, 0x85, 0x1c, 0x1a, 0xef, 0x50, 0xd1, 0xb4, 0x20, 0xb2, 0x60, 0x05, 0x98, 0xfe, 0x47, 0x3b, 0xc1, 0x76, 0xca, 0x2c, 0x4e, 0x5a, 0x42, 0xa3, 0xf7, 0x20, 0xaa, 0x57, 0x39, 0xee, 0x34, 0x1f, 0xe1, 0x68, 0xd3, 0x7e, 0x06, 0xc4, 0x6c, 0xc7, 0x76, 0x2b, 0xe4, 0x1c, 0x48, 0x44, 0xe6, 0xe5, 0x44, 0x24, 0x8d, 0xb3, 0xb6, 0x88, 0x32},
- dt2: fp.Elt{0x18, 0xa7, 0xba, 0xd0, 0x44, 0x6f, 0x33, 0x31, 0x00, 0xf8, 0xf6, 0x12, 0xe3, 0xc5, 0xc7, 0xb5, 0x91, 0x9c, 0x91, 0xb5, 0x75, 0x18, 0x18, 0x8a, 0xab, 0xed, 0x24, 0x11, 0x2e, 0xce, 0x5a, 0x0f, 0x94, 0x5f, 0x2e, 0xca, 0xd3, 0x80, 0xea, 0xe5, 0x34, 0x96, 0x67, 0x8b, 0x6a, 0x26, 0x5e, 0xc8, 0x9d, 0x2c, 0x5e, 0x6c, 0xa2, 0x0c, 0xbf, 0xf0},
- },
- { /* 45P*/
- addYX: fp.Elt{0xb3, 0xbf, 0xa3, 0x85, 0xee, 0xf6, 0x58, 0x02, 0x78, 0xc4, 0x30, 0xd6, 0x57, 0x59, 0x8c, 0x88, 0x08, 0x7c, 0xbc, 0xbe, 0x0a, 0x74, 0xa9, 0xde, 0x69, 0xe7, 0x41, 0xd8, 0xbf, 0x66, 0x8d, 0x3d, 0x28, 0x00, 0x8c, 0x47, 0x65, 0x34, 0xfe, 0x86, 0x9e, 0x6a, 0xf2, 0x41, 0x6a, 0x94, 0xc4, 0x88, 0x75, 0x23, 0x0d, 0x52, 0x69, 0xee, 0x07, 0x89},
- subYX: fp.Elt{0x22, 0x3c, 0xa1, 0x70, 0x58, 0x97, 0x93, 0xbe, 0x59, 0xa8, 0x0b, 0x8a, 0x46, 0x2a, 0x38, 0x1e, 0x08, 0x6b, 0x61, 0x9f, 0xf2, 0x4a, 0x8b, 0x80, 0x68, 0x6e, 0xc8, 0x92, 0x60, 0xf3, 0xc9, 0x89, 0xb2, 0x6d, 0x63, 0xb0, 0xeb, 0x83, 0x15, 0x63, 0x0e, 0x64, 0xbb, 0xb8, 0xfe, 0xb4, 0x81, 0x90, 0x01, 0x28, 0x10, 0xb9, 0x74, 0x6e, 0xde, 0xa4},
- dt2: fp.Elt{0x1a, 0x23, 0x45, 0xa8, 0x6f, 0x4e, 0xa7, 0x4a, 0x0c, 0xeb, 0xb0, 0x43, 0xf9, 0xef, 0x99, 0x60, 0x5b, 0xdb, 0x66, 0xc0, 0x86, 0x71, 0x43, 0xb1, 0x22, 0x7b, 0x1c, 0xe7, 0x8d, 0x09, 0x1d, 0x83, 0x76, 0x9c, 0xd3, 0x5a, 0xdd, 0x42, 0xd9, 0x2f, 0x2d, 0xba, 0x7a, 0xc2, 0xd9, 0x6b, 0xd4, 0x7a, 0xf1, 0xd5, 0x5f, 0x6b, 0x85, 0xbf, 0x0b, 0xf1},
- },
- { /* 47P*/
- addYX: fp.Elt{0xb2, 0x83, 0xfa, 0x1f, 0xd2, 0xce, 0xb6, 0xf2, 0x2d, 0xea, 0x1b, 0xe5, 0x29, 0xa5, 0x72, 0xf9, 0x25, 0x48, 0x4e, 0xf2, 0x50, 0x1b, 0x39, 0xda, 0x34, 0xc5, 0x16, 0x13, 0xb4, 0x0c, 0xa1, 0x00, 0x79, 0x7a, 0xf5, 0x8b, 0xf3, 0x70, 0x14, 0xb6, 0xfc, 0x9a, 0x47, 0x68, 0x1e, 0x42, 0x70, 0x64, 0x2a, 0x84, 0x3e, 0x3d, 0x20, 0x58, 0xf9, 0x6a},
- subYX: fp.Elt{0xd9, 0xee, 0xc0, 0xc4, 0xf5, 0xc2, 0x86, 0xaf, 0x45, 0xd2, 0xd2, 0x87, 0x1b, 0x64, 0xd5, 0xe0, 0x8c, 0x44, 0x00, 0x4f, 0x43, 0x89, 0x04, 0x48, 0x4a, 0x0b, 0xca, 0x94, 0x06, 0x2f, 0x23, 0x5b, 0x6c, 0x8d, 0x44, 0x66, 0x53, 0xf5, 0x5a, 0x20, 0x72, 0x28, 0x58, 0x84, 0xcc, 0x73, 0x22, 0x5e, 0xd1, 0x0b, 0x56, 0x5e, 0x6a, 0xa3, 0x11, 0x91},
- dt2: fp.Elt{0x6e, 0x9f, 0x88, 0xa8, 0x68, 0x2f, 0x12, 0x37, 0x88, 0xfc, 0x92, 0x8f, 0x24, 0xeb, 0x5b, 0x2a, 0x2a, 0xd0, 0x14, 0x40, 0x4c, 0xa9, 0xa4, 0x03, 0x0c, 0x45, 0x48, 0x13, 0xe8, 0xa6, 0x37, 0xab, 0xc0, 0x06, 0x38, 0x6c, 0x96, 0x73, 0x40, 0x6c, 0xc6, 0xea, 0x56, 0xc6, 0xe9, 0x1a, 0x69, 0xeb, 0x7a, 0xd1, 0x33, 0x69, 0x58, 0x2b, 0xea, 0x2f},
- },
- { /* 49P*/
- addYX: fp.Elt{0x58, 0xa8, 0x05, 0x41, 0x00, 0x9d, 0xaa, 0xd9, 0x98, 0xcf, 0xb9, 0x41, 0xb5, 0x4a, 0x8d, 0xe2, 0xe7, 0xc0, 0x72, 0xef, 0xc8, 0x28, 0x6b, 0x68, 0x9d, 0xc9, 0xdf, 0x05, 0x8b, 0xd0, 0x04, 0x74, 0x79, 0x45, 0x52, 0x05, 0xa3, 0x6e, 0x35, 0x3a, 0xe3, 0xef, 0xb2, 0xdc, 0x08, 0x6f, 0x4e, 0x76, 0x85, 0x67, 0xba, 0x23, 0x8f, 0xdd, 0xaf, 0x09},
- subYX: fp.Elt{0xb4, 0x38, 0xc8, 0xff, 0x4f, 0x65, 0x2a, 0x7e, 0xad, 0xb1, 0xc6, 0xb9, 0x3d, 0xd6, 0xf7, 0x14, 0xcf, 0xf6, 0x98, 0x75, 0xbb, 0x47, 0x83, 0x90, 0xe7, 0xe1, 0xf6, 0x14, 0x99, 0x7e, 0xfa, 0xe4, 0x77, 0x24, 0xe3, 0xe7, 0xf0, 0x1e, 0xdb, 0x27, 0x4e, 0x16, 0x04, 0xf2, 0x08, 0x52, 0xfc, 0xec, 0x55, 0xdb, 0x2e, 0x67, 0xe1, 0x94, 0x32, 0x89},
- dt2: fp.Elt{0x00, 0xad, 0x03, 0x35, 0x1a, 0xb1, 0x88, 0xf0, 0xc9, 0x11, 0xe4, 0x12, 0x52, 0x61, 0xfd, 0x8a, 0x1b, 0x6a, 0x0a, 0x4c, 0x42, 0x46, 0x22, 0x0e, 0xa5, 0xf9, 0xe2, 0x50, 0xf2, 0xb2, 0x1f, 0x20, 0x78, 0x10, 0xf6, 0xbf, 0x7f, 0x0c, 0x9c, 0xad, 0x40, 0x8b, 0x82, 0xd4, 0xba, 0x69, 0x09, 0xac, 0x4b, 0x6d, 0xc4, 0x49, 0x17, 0x81, 0x57, 0x3b},
- },
- { /* 51P*/
- addYX: fp.Elt{0x0d, 0xfe, 0xb4, 0x35, 0x11, 0xbd, 0x1d, 0x6b, 0xc2, 0xc5, 0x3b, 0xd2, 0x23, 0x2c, 0x72, 0xe3, 0x48, 0xb1, 0x48, 0x73, 0xfb, 0xa3, 0x21, 0x6e, 0xc0, 0x09, 0x69, 0xac, 0xe1, 0x60, 0xbc, 0x24, 0x03, 0x99, 0x63, 0x0a, 0x00, 0xf0, 0x75, 0xf6, 0x92, 0xc5, 0xd6, 0xdb, 0x51, 0xd4, 0x7d, 0xe6, 0xf4, 0x11, 0x79, 0xd7, 0xc3, 0xaf, 0x48, 0xd0},
- subYX: fp.Elt{0xf4, 0x4f, 0xaf, 0x31, 0xe3, 0x10, 0x89, 0x95, 0xf0, 0x8a, 0xf6, 0x31, 0x9f, 0x48, 0x02, 0xba, 0x42, 0x2b, 0x3c, 0x22, 0x8b, 0xcc, 0x12, 0x98, 0x6e, 0x7a, 0x64, 0x3a, 0xc4, 0xca, 0x32, 0x2a, 0x72, 0xf8, 0x2c, 0xcf, 0x78, 0x5e, 0x7a, 0x75, 0x6e, 0x72, 0x46, 0x48, 0x62, 0x28, 0xac, 0x58, 0x1a, 0xc6, 0x59, 0x88, 0x2a, 0x44, 0x9e, 0x83},
- dt2: fp.Elt{0xb3, 0xde, 0x36, 0xfd, 0xeb, 0x1b, 0xd4, 0x24, 0x1b, 0x08, 0x8c, 0xfe, 0xa9, 0x41, 0xa1, 0x64, 0xf2, 0x6d, 0xdb, 0xf9, 0x94, 0xae, 0x86, 0x71, 0xab, 0x10, 0xbf, 0xa3, 0xb2, 0xa0, 0xdf, 0x10, 0x8c, 0x74, 0xce, 0xb3, 0xfc, 0xdb, 0xba, 0x15, 0xf6, 0x91, 0x7a, 0x9c, 0x36, 0x1e, 0x45, 0x07, 0x3c, 0xec, 0x1a, 0x61, 0x26, 0x93, 0xe3, 0x50},
- },
- { /* 53P*/
- addYX: fp.Elt{0xc5, 0x50, 0xc5, 0x83, 0xb0, 0xbd, 0xd9, 0xf6, 0x6d, 0x15, 0x5e, 0xc1, 0x1a, 0x33, 0xa0, 0xce, 0x13, 0x70, 0x3b, 0xe1, 0x31, 0xc6, 0xc4, 0x02, 0xec, 0x8c, 0xd5, 0x9c, 0x97, 0xd3, 0x12, 0xc4, 0xa2, 0xf9, 0xd5, 0xfb, 0x22, 0x69, 0x94, 0x09, 0x2f, 0x59, 0xce, 0xdb, 0xf2, 0xf2, 0x00, 0xe0, 0xa9, 0x08, 0x44, 0x2e, 0x8b, 0x6b, 0xf5, 0xb3},
- subYX: fp.Elt{0x90, 0xdd, 0xec, 0xa2, 0x65, 0xb7, 0x61, 0xbc, 0xaa, 0x70, 0xa2, 0x15, 0xd8, 0xb0, 0xf8, 0x8e, 0x23, 0x3d, 0x9f, 0x46, 0xa3, 0x29, 0x20, 0xd1, 0xa1, 0x15, 0x81, 0xc6, 0xb6, 0xde, 0xbe, 0x60, 0x63, 0x24, 0xac, 0x15, 0xfb, 0xeb, 0xd3, 0xea, 0x57, 0x13, 0x86, 0x38, 0x1e, 0x22, 0xf4, 0x8c, 0x5d, 0xaf, 0x1b, 0x27, 0x21, 0x4f, 0xa3, 0x63},
- dt2: fp.Elt{0x07, 0x15, 0x87, 0xc4, 0xfd, 0xa1, 0x97, 0x7a, 0x07, 0x1f, 0x56, 0xcc, 0xe3, 0x6a, 0x01, 0x90, 0xce, 0xf9, 0xfa, 0x50, 0xb2, 0xe0, 0x87, 0x8b, 0x6c, 0x63, 0x6c, 0xf6, 0x2a, 0x09, 0xef, 0xef, 0xd2, 0x31, 0x40, 0x25, 0xf6, 0x84, 0xcb, 0xe0, 0xc4, 0x23, 0xc1, 0xcb, 0xe2, 0x02, 0x83, 0x2d, 0xed, 0x74, 0x74, 0x8b, 0xf8, 0x7c, 0x81, 0x18},
- },
- { /* 55P*/
- addYX: fp.Elt{0x9e, 0xe5, 0x59, 0x95, 0x63, 0x2e, 0xac, 0x8b, 0x03, 0x3c, 0xc1, 0x8e, 0xe1, 0x5b, 0x56, 0x3c, 0x16, 0x41, 0xe4, 0xc2, 0x60, 0x0c, 0x6d, 0x65, 0x9f, 0xfc, 0x27, 0x68, 0x43, 0x44, 0x05, 0x12, 0x6c, 0xda, 0x04, 0xef, 0xcf, 0xcf, 0xdc, 0x0a, 0x1a, 0x7f, 0x12, 0xd3, 0xeb, 0x02, 0xb6, 0x04, 0xca, 0xd6, 0xcb, 0xf0, 0x22, 0xba, 0x35, 0x6d},
- subYX: fp.Elt{0x09, 0x6d, 0xf9, 0x64, 0x4c, 0xe6, 0x41, 0xff, 0x01, 0x4d, 0xce, 0x1e, 0xfa, 0x38, 0xa2, 0x25, 0x62, 0xff, 0x03, 0x39, 0x18, 0x91, 0xbb, 0x9d, 0xce, 0x02, 0xf0, 0xf1, 0x3c, 0x55, 0x18, 0xa9, 0xab, 0x4d, 0xd2, 0x35, 0xfd, 0x8d, 0xa9, 0xb2, 0xad, 0xb7, 0x06, 0x6e, 0xc6, 0x69, 0x49, 0xd6, 0x98, 0x98, 0x0b, 0x22, 0x81, 0x6b, 0xbd, 0xa0},
- dt2: fp.Elt{0x22, 0xf4, 0x85, 0x5d, 0x2b, 0xf1, 0x55, 0xa5, 0xd6, 0x27, 0x86, 0x57, 0x12, 0x1f, 0x16, 0x0a, 0x5a, 0x9b, 0xf2, 0x38, 0xb6, 0x28, 0xd8, 0x99, 0x0c, 0x89, 0x1d, 0x7f, 0xca, 0x21, 0x17, 0x1a, 0x0b, 0x02, 0x5f, 0x77, 0x2f, 0x73, 0x30, 0x7c, 0xc8, 0xd7, 0x2b, 0xcc, 0xe7, 0xf3, 0x21, 0xac, 0x53, 0xa7, 0x11, 0x5d, 0xd8, 0x1d, 0x9b, 0xf5},
- },
- { /* 57P*/
- addYX: fp.Elt{0x94, 0x63, 0x5d, 0xef, 0xfd, 0x6d, 0x25, 0x4e, 0x6d, 0x29, 0x03, 0xed, 0x24, 0x28, 0x27, 0x57, 0x47, 0x3e, 0x6a, 0x1a, 0xfe, 0x37, 0xee, 0x5f, 0x83, 0x29, 0x14, 0xfd, 0x78, 0x25, 0x8a, 0xe1, 0x02, 0x38, 0xd8, 0xca, 0x65, 0x55, 0x40, 0x7d, 0x48, 0x2c, 0x7c, 0x7e, 0x60, 0xb6, 0x0c, 0x6d, 0xf7, 0xe8, 0xb3, 0x62, 0x53, 0xd6, 0x9c, 0x2b},
- subYX: fp.Elt{0x47, 0x25, 0x70, 0x62, 0xf5, 0x65, 0x93, 0x62, 0x08, 0xac, 0x59, 0x66, 0xdb, 0x08, 0xd9, 0x1a, 0x19, 0xaf, 0xf4, 0xef, 0x02, 0xa2, 0x78, 0xa9, 0x55, 0x1c, 0xfa, 0x08, 0x11, 0xcb, 0xa3, 0x71, 0x74, 0xb1, 0x62, 0xe7, 0xc7, 0xf3, 0x5a, 0xb5, 0x8b, 0xd4, 0xf6, 0x10, 0x57, 0x79, 0x72, 0x2f, 0x13, 0x86, 0x7b, 0x44, 0x5f, 0x48, 0xfd, 0x88},
- dt2: fp.Elt{0x10, 0x02, 0xcd, 0x05, 0x9a, 0xc3, 0x32, 0x6d, 0x10, 0x3a, 0x74, 0xba, 0x06, 0xc4, 0x3b, 0x34, 0xbc, 0x36, 0xed, 0xa3, 0xba, 0x9a, 0xdb, 0x6d, 0xd4, 0x69, 0x99, 0x97, 0xd0, 0xe4, 0xdd, 0xf5, 0xd4, 0x7c, 0xd3, 0x4e, 0xab, 0xd1, 0x3b, 0xbb, 0xe9, 0xc7, 0x6a, 0x94, 0x25, 0x61, 0xf0, 0x06, 0xc5, 0x12, 0xa8, 0x86, 0xe5, 0x35, 0x46, 0xeb},
- },
- { /* 59P*/
- addYX: fp.Elt{0x9e, 0x95, 0x11, 0xc6, 0xc7, 0xe8, 0xee, 0x5a, 0x26, 0xa0, 0x72, 0x72, 0x59, 0x91, 0x59, 0x16, 0x49, 0x99, 0x7e, 0xbb, 0xd7, 0x15, 0xb4, 0xf2, 0x40, 0xf9, 0x5a, 0x4d, 0xc8, 0xa0, 0xe2, 0x34, 0x7b, 0x34, 0xf3, 0x99, 0xbf, 0xa9, 0xf3, 0x79, 0xc1, 0x1a, 0x0c, 0xf4, 0x86, 0x74, 0x4e, 0xcb, 0xbc, 0x90, 0xad, 0xb6, 0x51, 0x6d, 0xaa, 0x33},
- subYX: fp.Elt{0x9f, 0xd1, 0xc5, 0xa2, 0x6c, 0x24, 0x88, 0x15, 0x71, 0x68, 0xf6, 0x07, 0x45, 0x02, 0xc4, 0x73, 0x7e, 0x75, 0x87, 0xca, 0x7c, 0xf0, 0x92, 0x00, 0x75, 0xd6, 0x5a, 0xdd, 0xe0, 0x64, 0x16, 0x9d, 0x62, 0x80, 0x33, 0x9f, 0xf4, 0x8e, 0x1a, 0x15, 0x1c, 0xd3, 0x0f, 0x4d, 0x4f, 0x62, 0x2d, 0xd7, 0xa5, 0x77, 0xe3, 0xea, 0xf0, 0xfb, 0x1a, 0xdb},
- dt2: fp.Elt{0x6a, 0xa2, 0xb1, 0xaa, 0xfb, 0x5a, 0x32, 0x4e, 0xff, 0x47, 0x06, 0xd5, 0x9a, 0x4f, 0xce, 0x83, 0x5b, 0x82, 0x34, 0x3e, 0x47, 0xb8, 0xf8, 0xe9, 0x7c, 0x67, 0x69, 0x8d, 0x9c, 0xb7, 0xde, 0x57, 0xf4, 0x88, 0x41, 0x56, 0x0c, 0x87, 0x1e, 0xc9, 0x2f, 0x54, 0xbf, 0x5c, 0x68, 0x2c, 0xd9, 0xc4, 0xef, 0x53, 0x73, 0x1e, 0xa6, 0x38, 0x02, 0x10},
- },
- { /* 61P*/
- addYX: fp.Elt{0x08, 0x80, 0x4a, 0xc9, 0xb7, 0xa8, 0x88, 0xd9, 0xfc, 0x6a, 0xc0, 0x3e, 0xc2, 0x33, 0x4d, 0x2b, 0x2a, 0xa3, 0x6d, 0x72, 0x3e, 0xdc, 0x34, 0x68, 0x08, 0xbf, 0x27, 0xef, 0xf4, 0xff, 0xe2, 0x0c, 0x31, 0x0c, 0xa2, 0x0a, 0x1f, 0x65, 0xc1, 0x4c, 0x61, 0xd3, 0x1b, 0xbc, 0x25, 0xb1, 0xd0, 0xd4, 0x89, 0xb2, 0x53, 0xfb, 0x43, 0xa5, 0xaf, 0x04},
- subYX: fp.Elt{0xe3, 0xe1, 0x37, 0xad, 0x58, 0xa9, 0x55, 0x81, 0xee, 0x64, 0x21, 0xb9, 0xf5, 0x4c, 0x35, 0xea, 0x4a, 0xd3, 0x26, 0xaa, 0x90, 0xd4, 0x60, 0x46, 0x09, 0x4b, 0x4a, 0x62, 0xf9, 0xcd, 0xe1, 0xee, 0xbb, 0xc2, 0x09, 0x0b, 0xb0, 0x96, 0x8e, 0x43, 0x77, 0xaf, 0x25, 0x20, 0x5e, 0x47, 0xe4, 0x1d, 0x50, 0x69, 0x74, 0x08, 0xd7, 0xb9, 0x90, 0x13},
- dt2: fp.Elt{0x51, 0x91, 0x95, 0x64, 0x03, 0x16, 0xfd, 0x6e, 0x26, 0x94, 0x6b, 0x61, 0xe7, 0xd9, 0xe0, 0x4a, 0x6d, 0x7c, 0xfa, 0xc0, 0xe2, 0x43, 0x23, 0x53, 0x70, 0xf5, 0x6f, 0x73, 0x8b, 0x81, 0xb0, 0x0c, 0xee, 0x2e, 0x46, 0xf2, 0x8d, 0xa6, 0xfb, 0xb5, 0x1c, 0x33, 0xbf, 0x90, 0x59, 0xc9, 0x7c, 0xb8, 0x6f, 0xad, 0x75, 0x02, 0x90, 0x8e, 0x59, 0x75},
- },
- { /* 63P*/
- addYX: fp.Elt{0x36, 0x4d, 0x77, 0x04, 0xb8, 0x7d, 0x4a, 0xd1, 0xc5, 0xbb, 0x7b, 0x50, 0x5f, 0x8d, 0x9d, 0x62, 0x0f, 0x66, 0x71, 0xec, 0x87, 0xc5, 0x80, 0x82, 0xc8, 0xf4, 0x6a, 0x94, 0x92, 0x5b, 0xb0, 0x16, 0x9b, 0xb2, 0xc9, 0x6f, 0x2b, 0x2d, 0xee, 0x95, 0x73, 0x2e, 0xc2, 0x1b, 0xc5, 0x55, 0x36, 0x86, 0x24, 0xf8, 0x20, 0x05, 0x0d, 0x93, 0xd7, 0x76},
- subYX: fp.Elt{0x7f, 0x01, 0xeb, 0x2e, 0x48, 0x4d, 0x1d, 0xf1, 0x06, 0x7e, 0x7c, 0x2a, 0x43, 0xbf, 0x28, 0xac, 0xe9, 0x58, 0x13, 0xc8, 0xbf, 0x8e, 0xc0, 0xef, 0xe8, 0x4f, 0x46, 0x8a, 0xe7, 0xc0, 0xf6, 0x0f, 0x0a, 0x03, 0x48, 0x91, 0x55, 0x39, 0x2a, 0xe3, 0xdc, 0xf6, 0x22, 0x9d, 0x4d, 0x71, 0x55, 0x68, 0x25, 0x6e, 0x95, 0x52, 0xee, 0x4c, 0xd9, 0x01},
- dt2: fp.Elt{0xac, 0x33, 0x3f, 0x7c, 0x27, 0x35, 0x15, 0x91, 0x33, 0x8d, 0xf9, 0xc4, 0xf4, 0xf3, 0x90, 0x09, 0x75, 0x69, 0x62, 0x9f, 0x61, 0x35, 0x83, 0x92, 0x04, 0xef, 0x96, 0x38, 0x80, 0x9e, 0x88, 0xb3, 0x67, 0x95, 0xbe, 0x79, 0x3c, 0x35, 0xd8, 0xdc, 0xb2, 0x3e, 0x2d, 0xe6, 0x46, 0xbe, 0x81, 0xf3, 0x32, 0x0e, 0x37, 0x23, 0x75, 0x2a, 0x3d, 0xa0},
- },
-}
diff --git a/vendor/github.com/cloudflare/circl/ecc/goldilocks/twist_basemult.go b/vendor/github.com/cloudflare/circl/ecc/goldilocks/twist_basemult.go
deleted file mode 100644
index f6ac5edbbbc..00000000000
--- a/vendor/github.com/cloudflare/circl/ecc/goldilocks/twist_basemult.go
+++ /dev/null
@@ -1,62 +0,0 @@
-package goldilocks
-
-import (
- "crypto/subtle"
-
- mlsb "github.com/cloudflare/circl/math/mlsbset"
-)
-
-const (
- // MLSBRecoding parameters
- fxT = 448
- fxV = 2
- fxW = 3
- fx2w1 = 1 << (uint(fxW) - 1)
-)
-
-// ScalarBaseMult returns kG where G is the generator point.
-func (e twistCurve) ScalarBaseMult(k *Scalar) *twistPoint {
- m, err := mlsb.New(fxT, fxV, fxW)
- if err != nil {
- panic(err)
- }
- if m.IsExtended() {
- panic("not extended")
- }
-
- var isZero int
- if k.IsZero() {
- isZero = 1
- }
- subtle.ConstantTimeCopy(isZero, k[:], order[:])
-
- minusK := *k
- isEven := 1 - int(k[0]&0x1)
- minusK.Neg()
- subtle.ConstantTimeCopy(isEven, k[:], minusK[:])
- c, err := m.Encode(k[:])
- if err != nil {
- panic(err)
- }
-
- gP := c.Exp(groupMLSB{})
- P := gP.(*twistPoint)
- P.cneg(uint(isEven))
- return P
-}
-
-type groupMLSB struct{}
-
-func (e groupMLSB) ExtendedEltP() mlsb.EltP { return nil }
-func (e groupMLSB) Sqr(x mlsb.EltG) { x.(*twistPoint).Double() }
-func (e groupMLSB) Mul(x mlsb.EltG, y mlsb.EltP) { x.(*twistPoint).mixAddZ1(y.(*preTwistPointAffine)) }
-func (e groupMLSB) Identity() mlsb.EltG { return twistCurve{}.Identity() }
-func (e groupMLSB) NewEltP() mlsb.EltP { return &preTwistPointAffine{} }
-func (e groupMLSB) Lookup(a mlsb.EltP, v uint, s, u int32) {
- Tabj := &tabFixMult[v]
- P := a.(*preTwistPointAffine)
- for k := range Tabj {
- P.cmov(&Tabj[k], uint(subtle.ConstantTimeEq(int32(k), u)))
- }
- P.cneg(int(s >> 31))
-}
diff --git a/vendor/github.com/cloudflare/circl/internal/conv/conv.go b/vendor/github.com/cloudflare/circl/internal/conv/conv.go
deleted file mode 100644
index 649a8e931d6..00000000000
--- a/vendor/github.com/cloudflare/circl/internal/conv/conv.go
+++ /dev/null
@@ -1,140 +0,0 @@
-package conv
-
-import (
- "encoding/binary"
- "fmt"
- "math/big"
- "strings"
-)
-
-// BytesLe2Hex returns an hexadecimal string of a number stored in a
-// little-endian order slice x.
-func BytesLe2Hex(x []byte) string {
- b := &strings.Builder{}
- b.Grow(2*len(x) + 2)
- fmt.Fprint(b, "0x")
- if len(x) == 0 {
- fmt.Fprint(b, "00")
- }
- for i := len(x) - 1; i >= 0; i-- {
- fmt.Fprintf(b, "%02x", x[i])
- }
- return b.String()
-}
-
-// BytesLe2BigInt converts a little-endian slice x into a big-endian
-// math/big.Int.
-func BytesLe2BigInt(x []byte) *big.Int {
- n := len(x)
- b := new(big.Int)
- if len(x) > 0 {
- y := make([]byte, n)
- for i := 0; i < n; i++ {
- y[n-1-i] = x[i]
- }
- b.SetBytes(y)
- }
- return b
-}
-
-// BytesBe2Uint64Le converts a big-endian slice x to a little-endian slice of uint64.
-func BytesBe2Uint64Le(x []byte) []uint64 {
- l := len(x)
- z := make([]uint64, (l+7)/8)
- blocks := l / 8
- for i := 0; i < blocks; i++ {
- z[i] = binary.BigEndian.Uint64(x[l-8*(i+1):])
- }
- remBytes := l % 8
- for i := 0; i < remBytes; i++ {
- z[blocks] |= uint64(x[l-1-8*blocks-i]) << uint(8*i)
- }
- return z
-}
-
-// BigInt2BytesLe stores a positive big.Int number x into a little-endian slice z.
-// The slice is modified if the bitlength of x <= 8*len(z) (padding with zeros).
-// If x does not fit in the slice or is negative, z is not modified.
-func BigInt2BytesLe(z []byte, x *big.Int) {
- xLen := (x.BitLen() + 7) >> 3
- zLen := len(z)
- if zLen >= xLen && x.Sign() >= 0 {
- y := x.Bytes()
- for i := 0; i < xLen; i++ {
- z[i] = y[xLen-1-i]
- }
- for i := xLen; i < zLen; i++ {
- z[i] = 0
- }
- }
-}
-
-// Uint64Le2BigInt converts a little-endian slice x into a big number.
-func Uint64Le2BigInt(x []uint64) *big.Int {
- n := len(x)
- b := new(big.Int)
- var bi big.Int
- for i := n - 1; i >= 0; i-- {
- bi.SetUint64(x[i])
- b.Lsh(b, 64)
- b.Add(b, &bi)
- }
- return b
-}
-
-// Uint64Le2BytesLe converts a little-endian slice x to a little-endian slice of bytes.
-func Uint64Le2BytesLe(x []uint64) []byte {
- b := make([]byte, 8*len(x))
- n := len(x)
- for i := 0; i < n; i++ {
- binary.LittleEndian.PutUint64(b[i*8:], x[i])
- }
- return b
-}
-
-// Uint64Le2BytesBe converts a little-endian slice x to a big-endian slice of bytes.
-func Uint64Le2BytesBe(x []uint64) []byte {
- b := make([]byte, 8*len(x))
- n := len(x)
- for i := 0; i < n; i++ {
- binary.BigEndian.PutUint64(b[i*8:], x[n-1-i])
- }
- return b
-}
-
-// Uint64Le2Hex returns an hexadecimal string of a number stored in a
-// little-endian order slice x.
-func Uint64Le2Hex(x []uint64) string {
- b := new(strings.Builder)
- b.Grow(16*len(x) + 2)
- fmt.Fprint(b, "0x")
- if len(x) == 0 {
- fmt.Fprint(b, "00")
- }
- for i := len(x) - 1; i >= 0; i-- {
- fmt.Fprintf(b, "%016x", x[i])
- }
- return b.String()
-}
-
-// BigInt2Uint64Le stores a positive big.Int number x into a little-endian slice z.
-// The slice is modified if the bitlength of x <= 8*len(z) (padding with zeros).
-// If x does not fit in the slice or is negative, z is not modified.
-func BigInt2Uint64Le(z []uint64, x *big.Int) {
- xLen := (x.BitLen() + 63) >> 6 // number of 64-bit words
- zLen := len(z)
- if zLen >= xLen && x.Sign() > 0 {
- var y, yi big.Int
- y.Set(x)
- two64 := big.NewInt(1)
- two64.Lsh(two64, 64).Sub(two64, big.NewInt(1))
- for i := 0; i < xLen; i++ {
- yi.And(&y, two64)
- z[i] = yi.Uint64()
- y.Rsh(&y, 64)
- }
- }
- for i := xLen; i < zLen; i++ {
- z[i] = 0
- }
-}
diff --git a/vendor/github.com/cloudflare/circl/internal/sha3/doc.go b/vendor/github.com/cloudflare/circl/internal/sha3/doc.go
deleted file mode 100644
index 7e023090707..00000000000
--- a/vendor/github.com/cloudflare/circl/internal/sha3/doc.go
+++ /dev/null
@@ -1,62 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package sha3 implements the SHA-3 fixed-output-length hash functions and
-// the SHAKE variable-output-length hash functions defined by FIPS-202.
-//
-// Both types of hash function use the "sponge" construction and the Keccak
-// permutation. For a detailed specification see http://keccak.noekeon.org/
-//
-// # Guidance
-//
-// If you aren't sure what function you need, use SHAKE256 with at least 64
-// bytes of output. The SHAKE instances are faster than the SHA3 instances;
-// the latter have to allocate memory to conform to the hash.Hash interface.
-//
-// If you need a secret-key MAC (message authentication code), prepend the
-// secret key to the input, hash with SHAKE256 and read at least 32 bytes of
-// output.
-//
-// # Security strengths
-//
-// The SHA3-x (x equals 224, 256, 384, or 512) functions have a security
-// strength against preimage attacks of x bits. Since they only produce "x"
-// bits of output, their collision-resistance is only "x/2" bits.
-//
-// The SHAKE-256 and -128 functions have a generic security strength of 256 and
-// 128 bits against all attacks, provided that at least 2x bits of their output
-// is used. Requesting more than 64 or 32 bytes of output, respectively, does
-// not increase the collision-resistance of the SHAKE functions.
-//
-// # The sponge construction
-//
-// A sponge builds a pseudo-random function from a public pseudo-random
-// permutation, by applying the permutation to a state of "rate + capacity"
-// bytes, but hiding "capacity" of the bytes.
-//
-// A sponge starts out with a zero state. To hash an input using a sponge, up
-// to "rate" bytes of the input are XORed into the sponge's state. The sponge
-// is then "full" and the permutation is applied to "empty" it. This process is
-// repeated until all the input has been "absorbed". The input is then padded.
-// The digest is "squeezed" from the sponge in the same way, except that output
-// is copied out instead of input being XORed in.
-//
-// A sponge is parameterized by its generic security strength, which is equal
-// to half its capacity; capacity + rate is equal to the permutation's width.
-// Since the KeccakF-1600 permutation is 1600 bits (200 bytes) wide, this means
-// that the security strength of a sponge instance is equal to (1600 - bitrate) / 2.
-//
-// # Recommendations
-//
-// The SHAKE functions are recommended for most new uses. They can produce
-// output of arbitrary length. SHAKE256, with an output length of at least
-// 64 bytes, provides 256-bit security against all attacks. The Keccak team
-// recommends it for most applications upgrading from SHA2-512. (NIST chose a
-// much stronger, but much slower, sponge instance for SHA3-512.)
-//
-// The SHA-3 functions are "drop-in" replacements for the SHA-2 functions.
-// They produce output of the same length, with the same security strengths
-// against all attacks. This means, in particular, that SHA3-256 only has
-// 128-bit collision resistance, because its output length is 32 bytes.
-package sha3
diff --git a/vendor/github.com/cloudflare/circl/internal/sha3/hashes.go b/vendor/github.com/cloudflare/circl/internal/sha3/hashes.go
deleted file mode 100644
index 7d2365a76ed..00000000000
--- a/vendor/github.com/cloudflare/circl/internal/sha3/hashes.go
+++ /dev/null
@@ -1,69 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package sha3
-
-// This file provides functions for creating instances of the SHA-3
-// and SHAKE hash functions, as well as utility functions for hashing
-// bytes.
-
-// New224 creates a new SHA3-224 hash.
-// Its generic security strength is 224 bits against preimage attacks,
-// and 112 bits against collision attacks.
-func New224() State {
- return State{rate: 144, outputLen: 28, dsbyte: 0x06}
-}
-
-// New256 creates a new SHA3-256 hash.
-// Its generic security strength is 256 bits against preimage attacks,
-// and 128 bits against collision attacks.
-func New256() State {
- return State{rate: 136, outputLen: 32, dsbyte: 0x06}
-}
-
-// New384 creates a new SHA3-384 hash.
-// Its generic security strength is 384 bits against preimage attacks,
-// and 192 bits against collision attacks.
-func New384() State {
- return State{rate: 104, outputLen: 48, dsbyte: 0x06}
-}
-
-// New512 creates a new SHA3-512 hash.
-// Its generic security strength is 512 bits against preimage attacks,
-// and 256 bits against collision attacks.
-func New512() State {
- return State{rate: 72, outputLen: 64, dsbyte: 0x06}
-}
-
-// Sum224 returns the SHA3-224 digest of the data.
-func Sum224(data []byte) (digest [28]byte) {
- h := New224()
- _, _ = h.Write(data)
- h.Sum(digest[:0])
- return
-}
-
-// Sum256 returns the SHA3-256 digest of the data.
-func Sum256(data []byte) (digest [32]byte) {
- h := New256()
- _, _ = h.Write(data)
- h.Sum(digest[:0])
- return
-}
-
-// Sum384 returns the SHA3-384 digest of the data.
-func Sum384(data []byte) (digest [48]byte) {
- h := New384()
- _, _ = h.Write(data)
- h.Sum(digest[:0])
- return
-}
-
-// Sum512 returns the SHA3-512 digest of the data.
-func Sum512(data []byte) (digest [64]byte) {
- h := New512()
- _, _ = h.Write(data)
- h.Sum(digest[:0])
- return
-}
diff --git a/vendor/github.com/cloudflare/circl/internal/sha3/keccakf.go b/vendor/github.com/cloudflare/circl/internal/sha3/keccakf.go
deleted file mode 100644
index 1755fd1e6dc..00000000000
--- a/vendor/github.com/cloudflare/circl/internal/sha3/keccakf.go
+++ /dev/null
@@ -1,391 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package sha3
-
-// KeccakF1600 applies the Keccak permutation to a 1600b-wide
-// state represented as a slice of 25 uint64s.
-// If turbo is true, applies the 12-round variant instead of the
-// regular 24-round variant.
-// nolint:funlen
-func KeccakF1600(a *[25]uint64, turbo bool) {
- // Implementation translated from Keccak-inplace.c
- // in the keccak reference code.
- var t, bc0, bc1, bc2, bc3, bc4, d0, d1, d2, d3, d4 uint64
-
- i := 0
-
- if turbo {
- i = 12
- }
-
- for ; i < 24; i += 4 {
- // Combines the 5 steps in each round into 2 steps.
- // Unrolls 4 rounds per loop and spreads some steps across rounds.
-
- // Round 1
- bc0 = a[0] ^ a[5] ^ a[10] ^ a[15] ^ a[20]
- bc1 = a[1] ^ a[6] ^ a[11] ^ a[16] ^ a[21]
- bc2 = a[2] ^ a[7] ^ a[12] ^ a[17] ^ a[22]
- bc3 = a[3] ^ a[8] ^ a[13] ^ a[18] ^ a[23]
- bc4 = a[4] ^ a[9] ^ a[14] ^ a[19] ^ a[24]
- d0 = bc4 ^ (bc1<<1 | bc1>>63)
- d1 = bc0 ^ (bc2<<1 | bc2>>63)
- d2 = bc1 ^ (bc3<<1 | bc3>>63)
- d3 = bc2 ^ (bc4<<1 | bc4>>63)
- d4 = bc3 ^ (bc0<<1 | bc0>>63)
-
- bc0 = a[0] ^ d0
- t = a[6] ^ d1
- bc1 = t<<44 | t>>(64-44)
- t = a[12] ^ d2
- bc2 = t<<43 | t>>(64-43)
- t = a[18] ^ d3
- bc3 = t<<21 | t>>(64-21)
- t = a[24] ^ d4
- bc4 = t<<14 | t>>(64-14)
- a[0] = bc0 ^ (bc2 &^ bc1) ^ RC[i]
- a[6] = bc1 ^ (bc3 &^ bc2)
- a[12] = bc2 ^ (bc4 &^ bc3)
- a[18] = bc3 ^ (bc0 &^ bc4)
- a[24] = bc4 ^ (bc1 &^ bc0)
-
- t = a[10] ^ d0
- bc2 = t<<3 | t>>(64-3)
- t = a[16] ^ d1
- bc3 = t<<45 | t>>(64-45)
- t = a[22] ^ d2
- bc4 = t<<61 | t>>(64-61)
- t = a[3] ^ d3
- bc0 = t<<28 | t>>(64-28)
- t = a[9] ^ d4
- bc1 = t<<20 | t>>(64-20)
- a[10] = bc0 ^ (bc2 &^ bc1)
- a[16] = bc1 ^ (bc3 &^ bc2)
- a[22] = bc2 ^ (bc4 &^ bc3)
- a[3] = bc3 ^ (bc0 &^ bc4)
- a[9] = bc4 ^ (bc1 &^ bc0)
-
- t = a[20] ^ d0
- bc4 = t<<18 | t>>(64-18)
- t = a[1] ^ d1
- bc0 = t<<1 | t>>(64-1)
- t = a[7] ^ d2
- bc1 = t<<6 | t>>(64-6)
- t = a[13] ^ d3
- bc2 = t<<25 | t>>(64-25)
- t = a[19] ^ d4
- bc3 = t<<8 | t>>(64-8)
- a[20] = bc0 ^ (bc2 &^ bc1)
- a[1] = bc1 ^ (bc3 &^ bc2)
- a[7] = bc2 ^ (bc4 &^ bc3)
- a[13] = bc3 ^ (bc0 &^ bc4)
- a[19] = bc4 ^ (bc1 &^ bc0)
-
- t = a[5] ^ d0
- bc1 = t<<36 | t>>(64-36)
- t = a[11] ^ d1
- bc2 = t<<10 | t>>(64-10)
- t = a[17] ^ d2
- bc3 = t<<15 | t>>(64-15)
- t = a[23] ^ d3
- bc4 = t<<56 | t>>(64-56)
- t = a[4] ^ d4
- bc0 = t<<27 | t>>(64-27)
- a[5] = bc0 ^ (bc2 &^ bc1)
- a[11] = bc1 ^ (bc3 &^ bc2)
- a[17] = bc2 ^ (bc4 &^ bc3)
- a[23] = bc3 ^ (bc0 &^ bc4)
- a[4] = bc4 ^ (bc1 &^ bc0)
-
- t = a[15] ^ d0
- bc3 = t<<41 | t>>(64-41)
- t = a[21] ^ d1
- bc4 = t<<2 | t>>(64-2)
- t = a[2] ^ d2
- bc0 = t<<62 | t>>(64-62)
- t = a[8] ^ d3
- bc1 = t<<55 | t>>(64-55)
- t = a[14] ^ d4
- bc2 = t<<39 | t>>(64-39)
- a[15] = bc0 ^ (bc2 &^ bc1)
- a[21] = bc1 ^ (bc3 &^ bc2)
- a[2] = bc2 ^ (bc4 &^ bc3)
- a[8] = bc3 ^ (bc0 &^ bc4)
- a[14] = bc4 ^ (bc1 &^ bc0)
-
- // Round 2
- bc0 = a[0] ^ a[5] ^ a[10] ^ a[15] ^ a[20]
- bc1 = a[1] ^ a[6] ^ a[11] ^ a[16] ^ a[21]
- bc2 = a[2] ^ a[7] ^ a[12] ^ a[17] ^ a[22]
- bc3 = a[3] ^ a[8] ^ a[13] ^ a[18] ^ a[23]
- bc4 = a[4] ^ a[9] ^ a[14] ^ a[19] ^ a[24]
- d0 = bc4 ^ (bc1<<1 | bc1>>63)
- d1 = bc0 ^ (bc2<<1 | bc2>>63)
- d2 = bc1 ^ (bc3<<1 | bc3>>63)
- d3 = bc2 ^ (bc4<<1 | bc4>>63)
- d4 = bc3 ^ (bc0<<1 | bc0>>63)
-
- bc0 = a[0] ^ d0
- t = a[16] ^ d1
- bc1 = t<<44 | t>>(64-44)
- t = a[7] ^ d2
- bc2 = t<<43 | t>>(64-43)
- t = a[23] ^ d3
- bc3 = t<<21 | t>>(64-21)
- t = a[14] ^ d4
- bc4 = t<<14 | t>>(64-14)
- a[0] = bc0 ^ (bc2 &^ bc1) ^ RC[i+1]
- a[16] = bc1 ^ (bc3 &^ bc2)
- a[7] = bc2 ^ (bc4 &^ bc3)
- a[23] = bc3 ^ (bc0 &^ bc4)
- a[14] = bc4 ^ (bc1 &^ bc0)
-
- t = a[20] ^ d0
- bc2 = t<<3 | t>>(64-3)
- t = a[11] ^ d1
- bc3 = t<<45 | t>>(64-45)
- t = a[2] ^ d2
- bc4 = t<<61 | t>>(64-61)
- t = a[18] ^ d3
- bc0 = t<<28 | t>>(64-28)
- t = a[9] ^ d4
- bc1 = t<<20 | t>>(64-20)
- a[20] = bc0 ^ (bc2 &^ bc1)
- a[11] = bc1 ^ (bc3 &^ bc2)
- a[2] = bc2 ^ (bc4 &^ bc3)
- a[18] = bc3 ^ (bc0 &^ bc4)
- a[9] = bc4 ^ (bc1 &^ bc0)
-
- t = a[15] ^ d0
- bc4 = t<<18 | t>>(64-18)
- t = a[6] ^ d1
- bc0 = t<<1 | t>>(64-1)
- t = a[22] ^ d2
- bc1 = t<<6 | t>>(64-6)
- t = a[13] ^ d3
- bc2 = t<<25 | t>>(64-25)
- t = a[4] ^ d4
- bc3 = t<<8 | t>>(64-8)
- a[15] = bc0 ^ (bc2 &^ bc1)
- a[6] = bc1 ^ (bc3 &^ bc2)
- a[22] = bc2 ^ (bc4 &^ bc3)
- a[13] = bc3 ^ (bc0 &^ bc4)
- a[4] = bc4 ^ (bc1 &^ bc0)
-
- t = a[10] ^ d0
- bc1 = t<<36 | t>>(64-36)
- t = a[1] ^ d1
- bc2 = t<<10 | t>>(64-10)
- t = a[17] ^ d2
- bc3 = t<<15 | t>>(64-15)
- t = a[8] ^ d3
- bc4 = t<<56 | t>>(64-56)
- t = a[24] ^ d4
- bc0 = t<<27 | t>>(64-27)
- a[10] = bc0 ^ (bc2 &^ bc1)
- a[1] = bc1 ^ (bc3 &^ bc2)
- a[17] = bc2 ^ (bc4 &^ bc3)
- a[8] = bc3 ^ (bc0 &^ bc4)
- a[24] = bc4 ^ (bc1 &^ bc0)
-
- t = a[5] ^ d0
- bc3 = t<<41 | t>>(64-41)
- t = a[21] ^ d1
- bc4 = t<<2 | t>>(64-2)
- t = a[12] ^ d2
- bc0 = t<<62 | t>>(64-62)
- t = a[3] ^ d3
- bc1 = t<<55 | t>>(64-55)
- t = a[19] ^ d4
- bc2 = t<<39 | t>>(64-39)
- a[5] = bc0 ^ (bc2 &^ bc1)
- a[21] = bc1 ^ (bc3 &^ bc2)
- a[12] = bc2 ^ (bc4 &^ bc3)
- a[3] = bc3 ^ (bc0 &^ bc4)
- a[19] = bc4 ^ (bc1 &^ bc0)
-
- // Round 3
- bc0 = a[0] ^ a[5] ^ a[10] ^ a[15] ^ a[20]
- bc1 = a[1] ^ a[6] ^ a[11] ^ a[16] ^ a[21]
- bc2 = a[2] ^ a[7] ^ a[12] ^ a[17] ^ a[22]
- bc3 = a[3] ^ a[8] ^ a[13] ^ a[18] ^ a[23]
- bc4 = a[4] ^ a[9] ^ a[14] ^ a[19] ^ a[24]
- d0 = bc4 ^ (bc1<<1 | bc1>>63)
- d1 = bc0 ^ (bc2<<1 | bc2>>63)
- d2 = bc1 ^ (bc3<<1 | bc3>>63)
- d3 = bc2 ^ (bc4<<1 | bc4>>63)
- d4 = bc3 ^ (bc0<<1 | bc0>>63)
-
- bc0 = a[0] ^ d0
- t = a[11] ^ d1
- bc1 = t<<44 | t>>(64-44)
- t = a[22] ^ d2
- bc2 = t<<43 | t>>(64-43)
- t = a[8] ^ d3
- bc3 = t<<21 | t>>(64-21)
- t = a[19] ^ d4
- bc4 = t<<14 | t>>(64-14)
- a[0] = bc0 ^ (bc2 &^ bc1) ^ RC[i+2]
- a[11] = bc1 ^ (bc3 &^ bc2)
- a[22] = bc2 ^ (bc4 &^ bc3)
- a[8] = bc3 ^ (bc0 &^ bc4)
- a[19] = bc4 ^ (bc1 &^ bc0)
-
- t = a[15] ^ d0
- bc2 = t<<3 | t>>(64-3)
- t = a[1] ^ d1
- bc3 = t<<45 | t>>(64-45)
- t = a[12] ^ d2
- bc4 = t<<61 | t>>(64-61)
- t = a[23] ^ d3
- bc0 = t<<28 | t>>(64-28)
- t = a[9] ^ d4
- bc1 = t<<20 | t>>(64-20)
- a[15] = bc0 ^ (bc2 &^ bc1)
- a[1] = bc1 ^ (bc3 &^ bc2)
- a[12] = bc2 ^ (bc4 &^ bc3)
- a[23] = bc3 ^ (bc0 &^ bc4)
- a[9] = bc4 ^ (bc1 &^ bc0)
-
- t = a[5] ^ d0
- bc4 = t<<18 | t>>(64-18)
- t = a[16] ^ d1
- bc0 = t<<1 | t>>(64-1)
- t = a[2] ^ d2
- bc1 = t<<6 | t>>(64-6)
- t = a[13] ^ d3
- bc2 = t<<25 | t>>(64-25)
- t = a[24] ^ d4
- bc3 = t<<8 | t>>(64-8)
- a[5] = bc0 ^ (bc2 &^ bc1)
- a[16] = bc1 ^ (bc3 &^ bc2)
- a[2] = bc2 ^ (bc4 &^ bc3)
- a[13] = bc3 ^ (bc0 &^ bc4)
- a[24] = bc4 ^ (bc1 &^ bc0)
-
- t = a[20] ^ d0
- bc1 = t<<36 | t>>(64-36)
- t = a[6] ^ d1
- bc2 = t<<10 | t>>(64-10)
- t = a[17] ^ d2
- bc3 = t<<15 | t>>(64-15)
- t = a[3] ^ d3
- bc4 = t<<56 | t>>(64-56)
- t = a[14] ^ d4
- bc0 = t<<27 | t>>(64-27)
- a[20] = bc0 ^ (bc2 &^ bc1)
- a[6] = bc1 ^ (bc3 &^ bc2)
- a[17] = bc2 ^ (bc4 &^ bc3)
- a[3] = bc3 ^ (bc0 &^ bc4)
- a[14] = bc4 ^ (bc1 &^ bc0)
-
- t = a[10] ^ d0
- bc3 = t<<41 | t>>(64-41)
- t = a[21] ^ d1
- bc4 = t<<2 | t>>(64-2)
- t = a[7] ^ d2
- bc0 = t<<62 | t>>(64-62)
- t = a[18] ^ d3
- bc1 = t<<55 | t>>(64-55)
- t = a[4] ^ d4
- bc2 = t<<39 | t>>(64-39)
- a[10] = bc0 ^ (bc2 &^ bc1)
- a[21] = bc1 ^ (bc3 &^ bc2)
- a[7] = bc2 ^ (bc4 &^ bc3)
- a[18] = bc3 ^ (bc0 &^ bc4)
- a[4] = bc4 ^ (bc1 &^ bc0)
-
- // Round 4
- bc0 = a[0] ^ a[5] ^ a[10] ^ a[15] ^ a[20]
- bc1 = a[1] ^ a[6] ^ a[11] ^ a[16] ^ a[21]
- bc2 = a[2] ^ a[7] ^ a[12] ^ a[17] ^ a[22]
- bc3 = a[3] ^ a[8] ^ a[13] ^ a[18] ^ a[23]
- bc4 = a[4] ^ a[9] ^ a[14] ^ a[19] ^ a[24]
- d0 = bc4 ^ (bc1<<1 | bc1>>63)
- d1 = bc0 ^ (bc2<<1 | bc2>>63)
- d2 = bc1 ^ (bc3<<1 | bc3>>63)
- d3 = bc2 ^ (bc4<<1 | bc4>>63)
- d4 = bc3 ^ (bc0<<1 | bc0>>63)
-
- bc0 = a[0] ^ d0
- t = a[1] ^ d1
- bc1 = t<<44 | t>>(64-44)
- t = a[2] ^ d2
- bc2 = t<<43 | t>>(64-43)
- t = a[3] ^ d3
- bc3 = t<<21 | t>>(64-21)
- t = a[4] ^ d4
- bc4 = t<<14 | t>>(64-14)
- a[0] = bc0 ^ (bc2 &^ bc1) ^ RC[i+3]
- a[1] = bc1 ^ (bc3 &^ bc2)
- a[2] = bc2 ^ (bc4 &^ bc3)
- a[3] = bc3 ^ (bc0 &^ bc4)
- a[4] = bc4 ^ (bc1 &^ bc0)
-
- t = a[5] ^ d0
- bc2 = t<<3 | t>>(64-3)
- t = a[6] ^ d1
- bc3 = t<<45 | t>>(64-45)
- t = a[7] ^ d2
- bc4 = t<<61 | t>>(64-61)
- t = a[8] ^ d3
- bc0 = t<<28 | t>>(64-28)
- t = a[9] ^ d4
- bc1 = t<<20 | t>>(64-20)
- a[5] = bc0 ^ (bc2 &^ bc1)
- a[6] = bc1 ^ (bc3 &^ bc2)
- a[7] = bc2 ^ (bc4 &^ bc3)
- a[8] = bc3 ^ (bc0 &^ bc4)
- a[9] = bc4 ^ (bc1 &^ bc0)
-
- t = a[10] ^ d0
- bc4 = t<<18 | t>>(64-18)
- t = a[11] ^ d1
- bc0 = t<<1 | t>>(64-1)
- t = a[12] ^ d2
- bc1 = t<<6 | t>>(64-6)
- t = a[13] ^ d3
- bc2 = t<<25 | t>>(64-25)
- t = a[14] ^ d4
- bc3 = t<<8 | t>>(64-8)
- a[10] = bc0 ^ (bc2 &^ bc1)
- a[11] = bc1 ^ (bc3 &^ bc2)
- a[12] = bc2 ^ (bc4 &^ bc3)
- a[13] = bc3 ^ (bc0 &^ bc4)
- a[14] = bc4 ^ (bc1 &^ bc0)
-
- t = a[15] ^ d0
- bc1 = t<<36 | t>>(64-36)
- t = a[16] ^ d1
- bc2 = t<<10 | t>>(64-10)
- t = a[17] ^ d2
- bc3 = t<<15 | t>>(64-15)
- t = a[18] ^ d3
- bc4 = t<<56 | t>>(64-56)
- t = a[19] ^ d4
- bc0 = t<<27 | t>>(64-27)
- a[15] = bc0 ^ (bc2 &^ bc1)
- a[16] = bc1 ^ (bc3 &^ bc2)
- a[17] = bc2 ^ (bc4 &^ bc3)
- a[18] = bc3 ^ (bc0 &^ bc4)
- a[19] = bc4 ^ (bc1 &^ bc0)
-
- t = a[20] ^ d0
- bc3 = t<<41 | t>>(64-41)
- t = a[21] ^ d1
- bc4 = t<<2 | t>>(64-2)
- t = a[22] ^ d2
- bc0 = t<<62 | t>>(64-62)
- t = a[23] ^ d3
- bc1 = t<<55 | t>>(64-55)
- t = a[24] ^ d4
- bc2 = t<<39 | t>>(64-39)
- a[20] = bc0 ^ (bc2 &^ bc1)
- a[21] = bc1 ^ (bc3 &^ bc2)
- a[22] = bc2 ^ (bc4 &^ bc3)
- a[23] = bc3 ^ (bc0 &^ bc4)
- a[24] = bc4 ^ (bc1 &^ bc0)
- }
-}
diff --git a/vendor/github.com/cloudflare/circl/internal/sha3/rc.go b/vendor/github.com/cloudflare/circl/internal/sha3/rc.go
deleted file mode 100644
index 6a3df42f305..00000000000
--- a/vendor/github.com/cloudflare/circl/internal/sha3/rc.go
+++ /dev/null
@@ -1,29 +0,0 @@
-package sha3
-
-// RC stores the round constants for use in the ι step.
-var RC = [24]uint64{
- 0x0000000000000001,
- 0x0000000000008082,
- 0x800000000000808A,
- 0x8000000080008000,
- 0x000000000000808B,
- 0x0000000080000001,
- 0x8000000080008081,
- 0x8000000000008009,
- 0x000000000000008A,
- 0x0000000000000088,
- 0x0000000080008009,
- 0x000000008000000A,
- 0x000000008000808B,
- 0x800000000000008B,
- 0x8000000000008089,
- 0x8000000000008003,
- 0x8000000000008002,
- 0x8000000000000080,
- 0x000000000000800A,
- 0x800000008000000A,
- 0x8000000080008081,
- 0x8000000000008080,
- 0x0000000080000001,
- 0x8000000080008008,
-}
diff --git a/vendor/github.com/cloudflare/circl/internal/sha3/sha3.go b/vendor/github.com/cloudflare/circl/internal/sha3/sha3.go
deleted file mode 100644
index a0df5aa6c59..00000000000
--- a/vendor/github.com/cloudflare/circl/internal/sha3/sha3.go
+++ /dev/null
@@ -1,200 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package sha3
-
-// spongeDirection indicates the direction bytes are flowing through the sponge.
-type spongeDirection int
-
-const (
- // spongeAbsorbing indicates that the sponge is absorbing input.
- spongeAbsorbing spongeDirection = iota
- // spongeSqueezing indicates that the sponge is being squeezed.
- spongeSqueezing
-)
-
-const (
- // maxRate is the maximum size of the internal buffer. SHAKE-256
- // currently needs the largest buffer.
- maxRate = 168
-)
-
-func (d *State) buf() []byte {
- return d.storage.asBytes()[d.bufo:d.bufe]
-}
-
-type State struct {
- // Generic sponge components.
- a [25]uint64 // main state of the hash
- rate int // the number of bytes of state to use
-
- bufo int // offset of buffer in storage
- bufe int // end of buffer in storage
-
- // dsbyte contains the "domain separation" bits and the first bit of
- // the padding. Sections 6.1 and 6.2 of [1] separate the outputs of the
- // SHA-3 and SHAKE functions by appending bitstrings to the message.
- // Using a little-endian bit-ordering convention, these are "01" for SHA-3
- // and "1111" for SHAKE, or 00000010b and 00001111b, respectively. Then the
- // padding rule from section 5.1 is applied to pad the message to a multiple
- // of the rate, which involves adding a "1" bit, zero or more "0" bits, and
- // a final "1" bit. We merge the first "1" bit from the padding into dsbyte,
- // giving 00000110b (0x06) and 00011111b (0x1f).
- // [1] http://csrc.nist.gov/publications/drafts/fips-202/fips_202_draft.pdf
- // "Draft FIPS 202: SHA-3 Standard: Permutation-Based Hash and
- // Extendable-Output Functions (May 2014)"
- dsbyte byte
-
- storage storageBuf
-
- // Specific to SHA-3 and SHAKE.
- outputLen int // the default output size in bytes
- state spongeDirection // whether the sponge is absorbing or squeezing
- turbo bool // Whether we're using 12 rounds instead of 24
-}
-
-// BlockSize returns the rate of sponge underlying this hash function.
-func (d *State) BlockSize() int { return d.rate }
-
-// Size returns the output size of the hash function in bytes.
-func (d *State) Size() int { return d.outputLen }
-
-// Reset clears the internal state by zeroing the sponge state and
-// the byte buffer, and setting Sponge.state to absorbing.
-func (d *State) Reset() {
- // Zero the permutation's state.
- for i := range d.a {
- d.a[i] = 0
- }
- d.state = spongeAbsorbing
- d.bufo = 0
- d.bufe = 0
-}
-
-func (d *State) clone() *State {
- ret := *d
- return &ret
-}
-
-// permute applies the KeccakF-1600 permutation. It handles
-// any input-output buffering.
-func (d *State) permute() {
- switch d.state {
- case spongeAbsorbing:
- // If we're absorbing, we need to xor the input into the state
- // before applying the permutation.
- xorIn(d, d.buf())
- d.bufe = 0
- d.bufo = 0
- KeccakF1600(&d.a, d.turbo)
- case spongeSqueezing:
- // If we're squeezing, we need to apply the permutation before
- // copying more output.
- KeccakF1600(&d.a, d.turbo)
- d.bufe = d.rate
- d.bufo = 0
- copyOut(d, d.buf())
- }
-}
-
-// pads appends the domain separation bits in dsbyte, applies
-// the multi-bitrate 10..1 padding rule, and permutes the state.
-func (d *State) padAndPermute(dsbyte byte) {
- // Pad with this instance's domain-separator bits. We know that there's
- // at least one byte of space in d.buf() because, if it were full,
- // permute would have been called to empty it. dsbyte also contains the
- // first one bit for the padding. See the comment in the state struct.
- zerosStart := d.bufe + 1
- d.bufe = d.rate
- buf := d.buf()
- buf[zerosStart-1] = dsbyte
- for i := zerosStart; i < d.rate; i++ {
- buf[i] = 0
- }
- // This adds the final one bit for the padding. Because of the way that
- // bits are numbered from the LSB upwards, the final bit is the MSB of
- // the last byte.
- buf[d.rate-1] ^= 0x80
- // Apply the permutation
- d.permute()
- d.state = spongeSqueezing
- d.bufe = d.rate
- copyOut(d, buf)
-}
-
-// Write absorbs more data into the hash's state. It produces an error
-// if more data is written to the ShakeHash after writing
-func (d *State) Write(p []byte) (written int, err error) {
- if d.state != spongeAbsorbing {
- panic("sha3: write to sponge after read")
- }
- written = len(p)
-
- for len(p) > 0 {
- bufl := d.bufe - d.bufo
- if bufl == 0 && len(p) >= d.rate {
- // The fast path; absorb a full "rate" bytes of input and apply the permutation.
- xorIn(d, p[:d.rate])
- p = p[d.rate:]
- KeccakF1600(&d.a, d.turbo)
- } else {
- // The slow path; buffer the input until we can fill the sponge, and then xor it in.
- todo := d.rate - bufl
- if todo > len(p) {
- todo = len(p)
- }
- d.bufe += todo
- buf := d.buf()
- copy(buf[bufl:], p[:todo])
- p = p[todo:]
-
- // If the sponge is full, apply the permutation.
- if d.bufe == d.rate {
- d.permute()
- }
- }
- }
-
- return written, nil
-}
-
-// Read squeezes an arbitrary number of bytes from the sponge.
-func (d *State) Read(out []byte) (n int, err error) {
- // If we're still absorbing, pad and apply the permutation.
- if d.state == spongeAbsorbing {
- d.padAndPermute(d.dsbyte)
- }
-
- n = len(out)
-
- // Now, do the squeezing.
- for len(out) > 0 {
- buf := d.buf()
- n := copy(out, buf)
- d.bufo += n
- out = out[n:]
-
- // Apply the permutation if we've squeezed the sponge dry.
- if d.bufo == d.bufe {
- d.permute()
- }
- }
-
- return
-}
-
-// Sum applies padding to the hash state and then squeezes out the desired
-// number of output bytes.
-func (d *State) Sum(in []byte) []byte {
- // Make a copy of the original hash so that caller can keep writing
- // and summing.
- dup := d.clone()
- hash := make([]byte, dup.outputLen)
- _, _ = dup.Read(hash)
- return append(in, hash...)
-}
-
-func (d *State) IsAbsorbing() bool {
- return d.state == spongeAbsorbing
-}
diff --git a/vendor/github.com/cloudflare/circl/internal/sha3/sha3_s390x.s b/vendor/github.com/cloudflare/circl/internal/sha3/sha3_s390x.s
deleted file mode 100644
index 8a4458f63f9..00000000000
--- a/vendor/github.com/cloudflare/circl/internal/sha3/sha3_s390x.s
+++ /dev/null
@@ -1,33 +0,0 @@
-// Copyright 2017 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build !gccgo,!appengine
-
-#include "textflag.h"
-
-// func kimd(function code, chain *[200]byte, src []byte)
-TEXT ·kimd(SB), NOFRAME|NOSPLIT, $0-40
- MOVD function+0(FP), R0
- MOVD chain+8(FP), R1
- LMG src+16(FP), R2, R3 // R2=base, R3=len
-
-continue:
- WORD $0xB93E0002 // KIMD --, R2
- BVS continue // continue if interrupted
- MOVD $0, R0 // reset R0 for pre-go1.8 compilers
- RET
-
-// func klmd(function code, chain *[200]byte, dst, src []byte)
-TEXT ·klmd(SB), NOFRAME|NOSPLIT, $0-64
- // TODO: SHAKE support
- MOVD function+0(FP), R0
- MOVD chain+8(FP), R1
- LMG dst+16(FP), R2, R3 // R2=base, R3=len
- LMG src+40(FP), R4, R5 // R4=base, R5=len
-
-continue:
- WORD $0xB93F0024 // KLMD R2, R4
- BVS continue // continue if interrupted
- MOVD $0, R0 // reset R0 for pre-go1.8 compilers
- RET
diff --git a/vendor/github.com/cloudflare/circl/internal/sha3/shake.go b/vendor/github.com/cloudflare/circl/internal/sha3/shake.go
deleted file mode 100644
index 77817f758cb..00000000000
--- a/vendor/github.com/cloudflare/circl/internal/sha3/shake.go
+++ /dev/null
@@ -1,119 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package sha3
-
-// This file defines the ShakeHash interface, and provides
-// functions for creating SHAKE and cSHAKE instances, as well as utility
-// functions for hashing bytes to arbitrary-length output.
-//
-//
-// SHAKE implementation is based on FIPS PUB 202 [1]
-// cSHAKE implementations is based on NIST SP 800-185 [2]
-//
-// [1] https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.202.pdf
-// [2] https://doi.org/10.6028/NIST.SP.800-185
-
-import (
- "io"
-)
-
-// ShakeHash defines the interface to hash functions that
-// support arbitrary-length output.
-type ShakeHash interface {
- // Write absorbs more data into the hash's state. It panics if input is
- // written to it after output has been read from it.
- io.Writer
-
- // Read reads more output from the hash; reading affects the hash's
- // state. (ShakeHash.Read is thus very different from Hash.Sum)
- // It never returns an error.
- io.Reader
-
- // Clone returns a copy of the ShakeHash in its current state.
- Clone() ShakeHash
-
- // Reset resets the ShakeHash to its initial state.
- Reset()
-}
-
-// Consts for configuring initial SHA-3 state
-const (
- dsbyteShake = 0x1f
- rate128 = 168
- rate256 = 136
-)
-
-// Clone returns copy of SHAKE context within its current state.
-func (d *State) Clone() ShakeHash {
- return d.clone()
-}
-
-// NewShake128 creates a new SHAKE128 variable-output-length ShakeHash.
-// Its generic security strength is 128 bits against all attacks if at
-// least 32 bytes of its output are used.
-func NewShake128() State {
- return State{rate: rate128, dsbyte: dsbyteShake}
-}
-
-// NewTurboShake128 creates a new TurboSHAKE128 variable-output-length ShakeHash.
-// Its generic security strength is 128 bits against all attacks if at
-// least 32 bytes of its output are used.
-// D is the domain separation byte and must be between 0x01 and 0x7f inclusive.
-func NewTurboShake128(D byte) State {
- if D == 0 || D > 0x7f {
- panic("turboshake: D out of range")
- }
- return State{rate: rate128, dsbyte: D, turbo: true}
-}
-
-// NewShake256 creates a new SHAKE256 variable-output-length ShakeHash.
-// Its generic security strength is 256 bits against all attacks if
-// at least 64 bytes of its output are used.
-func NewShake256() State {
- return State{rate: rate256, dsbyte: dsbyteShake}
-}
-
-// NewTurboShake256 creates a new TurboSHAKE256 variable-output-length ShakeHash.
-// Its generic security strength is 256 bits against all attacks if
-// at least 64 bytes of its output are used.
-// D is the domain separation byte and must be between 0x01 and 0x7f inclusive.
-func NewTurboShake256(D byte) State {
- if D == 0 || D > 0x7f {
- panic("turboshake: D out of range")
- }
- return State{rate: rate256, dsbyte: D, turbo: true}
-}
-
-// ShakeSum128 writes an arbitrary-length digest of data into hash.
-func ShakeSum128(hash, data []byte) {
- h := NewShake128()
- _, _ = h.Write(data)
- _, _ = h.Read(hash)
-}
-
-// ShakeSum256 writes an arbitrary-length digest of data into hash.
-func ShakeSum256(hash, data []byte) {
- h := NewShake256()
- _, _ = h.Write(data)
- _, _ = h.Read(hash)
-}
-
-// TurboShakeSum128 writes an arbitrary-length digest of data into hash.
-func TurboShakeSum128(hash, data []byte, D byte) {
- h := NewTurboShake128(D)
- _, _ = h.Write(data)
- _, _ = h.Read(hash)
-}
-
-// TurboShakeSum256 writes an arbitrary-length digest of data into hash.
-func TurboShakeSum256(hash, data []byte, D byte) {
- h := NewTurboShake256(D)
- _, _ = h.Write(data)
- _, _ = h.Read(hash)
-}
-
-func (d *State) SwitchDS(D byte) {
- d.dsbyte = D
-}
diff --git a/vendor/github.com/cloudflare/circl/internal/sha3/xor.go b/vendor/github.com/cloudflare/circl/internal/sha3/xor.go
deleted file mode 100644
index 1e21337454f..00000000000
--- a/vendor/github.com/cloudflare/circl/internal/sha3/xor.go
+++ /dev/null
@@ -1,15 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build (!amd64 && !386 && !ppc64le) || appengine
-// +build !amd64,!386,!ppc64le appengine
-
-package sha3
-
-// A storageBuf is an aligned array of maxRate bytes.
-type storageBuf [maxRate]byte
-
-func (b *storageBuf) asBytes() *[maxRate]byte {
- return (*[maxRate]byte)(b)
-}
diff --git a/vendor/github.com/cloudflare/circl/internal/sha3/xor_generic.go b/vendor/github.com/cloudflare/circl/internal/sha3/xor_generic.go
deleted file mode 100644
index 2b0c6617906..00000000000
--- a/vendor/github.com/cloudflare/circl/internal/sha3/xor_generic.go
+++ /dev/null
@@ -1,33 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build (!amd64 || appengine) && (!386 || appengine) && (!ppc64le || appengine)
-// +build !amd64 appengine
-// +build !386 appengine
-// +build !ppc64le appengine
-
-package sha3
-
-import "encoding/binary"
-
-// xorIn xors the bytes in buf into the state; it
-// makes no non-portable assumptions about memory layout
-// or alignment.
-func xorIn(d *State, buf []byte) {
- n := len(buf) / 8
-
- for i := 0; i < n; i++ {
- a := binary.LittleEndian.Uint64(buf)
- d.a[i] ^= a
- buf = buf[8:]
- }
-}
-
-// copyOut copies ulint64s to a byte buffer.
-func copyOut(d *State, b []byte) {
- for i := 0; len(b) >= 8; i++ {
- binary.LittleEndian.PutUint64(b, d.a[i])
- b = b[8:]
- }
-}
diff --git a/vendor/github.com/cloudflare/circl/internal/sha3/xor_unaligned.go b/vendor/github.com/cloudflare/circl/internal/sha3/xor_unaligned.go
deleted file mode 100644
index 052fc8d32d2..00000000000
--- a/vendor/github.com/cloudflare/circl/internal/sha3/xor_unaligned.go
+++ /dev/null
@@ -1,61 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build (amd64 || 386 || ppc64le) && !appengine
-// +build amd64 386 ppc64le
-// +build !appengine
-
-package sha3
-
-import "unsafe"
-
-// A storageBuf is an aligned array of maxRate bytes.
-type storageBuf [maxRate / 8]uint64
-
-func (b *storageBuf) asBytes() *[maxRate]byte {
- return (*[maxRate]byte)(unsafe.Pointer(b))
-}
-
-// xorInuses unaligned reads and writes to update d.a to contain d.a
-// XOR buf.
-func xorIn(d *State, buf []byte) {
- n := len(buf)
- bw := (*[maxRate / 8]uint64)(unsafe.Pointer(&buf[0]))[: n/8 : n/8]
- if n >= 72 {
- d.a[0] ^= bw[0]
- d.a[1] ^= bw[1]
- d.a[2] ^= bw[2]
- d.a[3] ^= bw[3]
- d.a[4] ^= bw[4]
- d.a[5] ^= bw[5]
- d.a[6] ^= bw[6]
- d.a[7] ^= bw[7]
- d.a[8] ^= bw[8]
- }
- if n >= 104 {
- d.a[9] ^= bw[9]
- d.a[10] ^= bw[10]
- d.a[11] ^= bw[11]
- d.a[12] ^= bw[12]
- }
- if n >= 136 {
- d.a[13] ^= bw[13]
- d.a[14] ^= bw[14]
- d.a[15] ^= bw[15]
- d.a[16] ^= bw[16]
- }
- if n >= 144 {
- d.a[17] ^= bw[17]
- }
- if n >= 168 {
- d.a[18] ^= bw[18]
- d.a[19] ^= bw[19]
- d.a[20] ^= bw[20]
- }
-}
-
-func copyOut(d *State, buf []byte) {
- ab := (*[maxRate]uint8)(unsafe.Pointer(&d.a[0]))
- copy(buf, ab[:])
-}
diff --git a/vendor/github.com/cloudflare/circl/math/fp25519/fp.go b/vendor/github.com/cloudflare/circl/math/fp25519/fp.go
deleted file mode 100644
index 57a50ff5e9b..00000000000
--- a/vendor/github.com/cloudflare/circl/math/fp25519/fp.go
+++ /dev/null
@@ -1,205 +0,0 @@
-// Package fp25519 provides prime field arithmetic over GF(2^255-19).
-package fp25519
-
-import (
- "errors"
-
- "github.com/cloudflare/circl/internal/conv"
-)
-
-// Size in bytes of an element.
-const Size = 32
-
-// Elt is a prime field element.
-type Elt [Size]byte
-
-func (e Elt) String() string { return conv.BytesLe2Hex(e[:]) }
-
-// p is the prime modulus 2^255-19.
-var p = Elt{
- 0xed, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f,
-}
-
-// P returns the prime modulus 2^255-19.
-func P() Elt { return p }
-
-// ToBytes stores in b the little-endian byte representation of x.
-func ToBytes(b []byte, x *Elt) error {
- if len(b) != Size {
- return errors.New("wrong size")
- }
- Modp(x)
- copy(b, x[:])
- return nil
-}
-
-// IsZero returns true if x is equal to 0.
-func IsZero(x *Elt) bool { Modp(x); return *x == Elt{} }
-
-// SetOne assigns x=1.
-func SetOne(x *Elt) { *x = Elt{}; x[0] = 1 }
-
-// Neg calculates z = -x.
-func Neg(z, x *Elt) { Sub(z, &p, x) }
-
-// InvSqrt calculates z = sqrt(x/y) iff x/y is a quadratic-residue, which is
-// indicated by returning isQR = true. Otherwise, when x/y is a quadratic
-// non-residue, z will have an undetermined value and isQR = false.
-func InvSqrt(z, x, y *Elt) (isQR bool) {
- sqrtMinusOne := &Elt{
- 0xb0, 0xa0, 0x0e, 0x4a, 0x27, 0x1b, 0xee, 0xc4,
- 0x78, 0xe4, 0x2f, 0xad, 0x06, 0x18, 0x43, 0x2f,
- 0xa7, 0xd7, 0xfb, 0x3d, 0x99, 0x00, 0x4d, 0x2b,
- 0x0b, 0xdf, 0xc1, 0x4f, 0x80, 0x24, 0x83, 0x2b,
- }
- t0, t1, t2, t3 := &Elt{}, &Elt{}, &Elt{}, &Elt{}
-
- Mul(t0, x, y) // t0 = u*v
- Sqr(t1, y) // t1 = v^2
- Mul(t2, t0, t1) // t2 = u*v^3
- Sqr(t0, t1) // t0 = v^4
- Mul(t1, t0, t2) // t1 = u*v^7
-
- var Tab [4]*Elt
- Tab[0] = &Elt{}
- Tab[1] = &Elt{}
- Tab[2] = t3
- Tab[3] = t1
-
- *Tab[0] = *t1
- Sqr(Tab[0], Tab[0])
- Sqr(Tab[1], Tab[0])
- Sqr(Tab[1], Tab[1])
- Mul(Tab[1], Tab[1], Tab[3])
- Mul(Tab[0], Tab[0], Tab[1])
- Sqr(Tab[0], Tab[0])
- Mul(Tab[0], Tab[0], Tab[1])
- Sqr(Tab[1], Tab[0])
- for i := 0; i < 4; i++ {
- Sqr(Tab[1], Tab[1])
- }
- Mul(Tab[1], Tab[1], Tab[0])
- Sqr(Tab[2], Tab[1])
- for i := 0; i < 4; i++ {
- Sqr(Tab[2], Tab[2])
- }
- Mul(Tab[2], Tab[2], Tab[0])
- Sqr(Tab[1], Tab[2])
- for i := 0; i < 14; i++ {
- Sqr(Tab[1], Tab[1])
- }
- Mul(Tab[1], Tab[1], Tab[2])
- Sqr(Tab[2], Tab[1])
- for i := 0; i < 29; i++ {
- Sqr(Tab[2], Tab[2])
- }
- Mul(Tab[2], Tab[2], Tab[1])
- Sqr(Tab[1], Tab[2])
- for i := 0; i < 59; i++ {
- Sqr(Tab[1], Tab[1])
- }
- Mul(Tab[1], Tab[1], Tab[2])
- for i := 0; i < 5; i++ {
- Sqr(Tab[1], Tab[1])
- }
- Mul(Tab[1], Tab[1], Tab[0])
- Sqr(Tab[2], Tab[1])
- for i := 0; i < 124; i++ {
- Sqr(Tab[2], Tab[2])
- }
- Mul(Tab[2], Tab[2], Tab[1])
- Sqr(Tab[2], Tab[2])
- Sqr(Tab[2], Tab[2])
- Mul(Tab[2], Tab[2], Tab[3])
-
- Mul(z, t3, t2) // z = xy^(p+3)/8 = xy^3*(xy^7)^(p-5)/8
- // Checking whether y z^2 == x
- Sqr(t0, z) // t0 = z^2
- Mul(t0, t0, y) // t0 = yz^2
- Sub(t1, t0, x) // t1 = t0-u
- Add(t2, t0, x) // t2 = t0+u
- if IsZero(t1) {
- return true
- } else if IsZero(t2) {
- Mul(z, z, sqrtMinusOne) // z = z*sqrt(-1)
- return true
- } else {
- return false
- }
-}
-
-// Inv calculates z = 1/x mod p.
-func Inv(z, x *Elt) {
- x0, x1, x2 := &Elt{}, &Elt{}, &Elt{}
- Sqr(x1, x)
- Sqr(x0, x1)
- Sqr(x0, x0)
- Mul(x0, x0, x)
- Mul(z, x0, x1)
- Sqr(x1, z)
- Mul(x0, x0, x1)
- Sqr(x1, x0)
- for i := 0; i < 4; i++ {
- Sqr(x1, x1)
- }
- Mul(x0, x0, x1)
- Sqr(x1, x0)
- for i := 0; i < 9; i++ {
- Sqr(x1, x1)
- }
- Mul(x1, x1, x0)
- Sqr(x2, x1)
- for i := 0; i < 19; i++ {
- Sqr(x2, x2)
- }
- Mul(x2, x2, x1)
- for i := 0; i < 10; i++ {
- Sqr(x2, x2)
- }
- Mul(x2, x2, x0)
- Sqr(x0, x2)
- for i := 0; i < 49; i++ {
- Sqr(x0, x0)
- }
- Mul(x0, x0, x2)
- Sqr(x1, x0)
- for i := 0; i < 99; i++ {
- Sqr(x1, x1)
- }
- Mul(x1, x1, x0)
- for i := 0; i < 50; i++ {
- Sqr(x1, x1)
- }
- Mul(x1, x1, x2)
- for i := 0; i < 5; i++ {
- Sqr(x1, x1)
- }
- Mul(z, z, x1)
-}
-
-// Cmov assigns y to x if n is 1.
-func Cmov(x, y *Elt, n uint) { cmov(x, y, n) }
-
-// Cswap interchanges x and y if n is 1.
-func Cswap(x, y *Elt, n uint) { cswap(x, y, n) }
-
-// Add calculates z = x+y mod p.
-func Add(z, x, y *Elt) { add(z, x, y) }
-
-// Sub calculates z = x-y mod p.
-func Sub(z, x, y *Elt) { sub(z, x, y) }
-
-// AddSub calculates (x,y) = (x+y mod p, x-y mod p).
-func AddSub(x, y *Elt) { addsub(x, y) }
-
-// Mul calculates z = x*y mod p.
-func Mul(z, x, y *Elt) { mul(z, x, y) }
-
-// Sqr calculates z = x^2 mod p.
-func Sqr(z, x *Elt) { sqr(z, x) }
-
-// Modp ensures that z is between [0,p-1].
-func Modp(z *Elt) { modp(z) }
diff --git a/vendor/github.com/cloudflare/circl/math/fp25519/fp_amd64.go b/vendor/github.com/cloudflare/circl/math/fp25519/fp_amd64.go
deleted file mode 100644
index 057f0d2803f..00000000000
--- a/vendor/github.com/cloudflare/circl/math/fp25519/fp_amd64.go
+++ /dev/null
@@ -1,45 +0,0 @@
-//go:build amd64 && !purego
-// +build amd64,!purego
-
-package fp25519
-
-import (
- "golang.org/x/sys/cpu"
-)
-
-var hasBmi2Adx = cpu.X86.HasBMI2 && cpu.X86.HasADX
-
-var _ = hasBmi2Adx
-
-func cmov(x, y *Elt, n uint) { cmovAmd64(x, y, n) }
-func cswap(x, y *Elt, n uint) { cswapAmd64(x, y, n) }
-func add(z, x, y *Elt) { addAmd64(z, x, y) }
-func sub(z, x, y *Elt) { subAmd64(z, x, y) }
-func addsub(x, y *Elt) { addsubAmd64(x, y) }
-func mul(z, x, y *Elt) { mulAmd64(z, x, y) }
-func sqr(z, x *Elt) { sqrAmd64(z, x) }
-func modp(z *Elt) { modpAmd64(z) }
-
-//go:noescape
-func cmovAmd64(x, y *Elt, n uint)
-
-//go:noescape
-func cswapAmd64(x, y *Elt, n uint)
-
-//go:noescape
-func addAmd64(z, x, y *Elt)
-
-//go:noescape
-func subAmd64(z, x, y *Elt)
-
-//go:noescape
-func addsubAmd64(x, y *Elt)
-
-//go:noescape
-func mulAmd64(z, x, y *Elt)
-
-//go:noescape
-func sqrAmd64(z, x *Elt)
-
-//go:noescape
-func modpAmd64(z *Elt)
diff --git a/vendor/github.com/cloudflare/circl/math/fp25519/fp_amd64.h b/vendor/github.com/cloudflare/circl/math/fp25519/fp_amd64.h
deleted file mode 100644
index b884b584ab3..00000000000
--- a/vendor/github.com/cloudflare/circl/math/fp25519/fp_amd64.h
+++ /dev/null
@@ -1,351 +0,0 @@
-// This code was imported from https://github.com/armfazh/rfc7748_precomputed
-
-// CHECK_BMI2ADX triggers bmi2adx if supported,
-// otherwise it fallbacks to legacy code.
-#define CHECK_BMI2ADX(label, legacy, bmi2adx) \
- CMPB ·hasBmi2Adx(SB), $0 \
- JE label \
- bmi2adx \
- RET \
- label: \
- legacy \
- RET
-
-// cselect is a conditional move
-// if b=1: it copies y into x;
-// if b=0: x remains with the same value;
-// if b<> 0,1: undefined.
-// Uses: AX, DX, FLAGS
-// Instr: x86_64, cmov
-#define cselect(x,y,b) \
- TESTQ b, b \
- MOVQ 0+x, AX; MOVQ 0+y, DX; CMOVQNE DX, AX; MOVQ AX, 0+x; \
- MOVQ 8+x, AX; MOVQ 8+y, DX; CMOVQNE DX, AX; MOVQ AX, 8+x; \
- MOVQ 16+x, AX; MOVQ 16+y, DX; CMOVQNE DX, AX; MOVQ AX, 16+x; \
- MOVQ 24+x, AX; MOVQ 24+y, DX; CMOVQNE DX, AX; MOVQ AX, 24+x;
-
-// cswap is a conditional swap
-// if b=1: x,y <- y,x;
-// if b=0: x,y remain with the same values;
-// if b<> 0,1: undefined.
-// Uses: AX, DX, R8, FLAGS
-// Instr: x86_64, cmov
-#define cswap(x,y,b) \
- TESTQ b, b \
- MOVQ 0+x, AX; MOVQ AX, R8; MOVQ 0+y, DX; CMOVQNE DX, AX; CMOVQNE R8, DX; MOVQ AX, 0+x; MOVQ DX, 0+y; \
- MOVQ 8+x, AX; MOVQ AX, R8; MOVQ 8+y, DX; CMOVQNE DX, AX; CMOVQNE R8, DX; MOVQ AX, 8+x; MOVQ DX, 8+y; \
- MOVQ 16+x, AX; MOVQ AX, R8; MOVQ 16+y, DX; CMOVQNE DX, AX; CMOVQNE R8, DX; MOVQ AX, 16+x; MOVQ DX, 16+y; \
- MOVQ 24+x, AX; MOVQ AX, R8; MOVQ 24+y, DX; CMOVQNE DX, AX; CMOVQNE R8, DX; MOVQ AX, 24+x; MOVQ DX, 24+y;
-
-// additionLeg adds x and y and stores in z
-// Uses: AX, DX, R8-R11, FLAGS
-// Instr: x86_64, cmov
-#define additionLeg(z,x,y) \
- MOVL $38, AX; \
- MOVL $0, DX; \
- MOVQ 0+x, R8; ADDQ 0+y, R8; \
- MOVQ 8+x, R9; ADCQ 8+y, R9; \
- MOVQ 16+x, R10; ADCQ 16+y, R10; \
- MOVQ 24+x, R11; ADCQ 24+y, R11; \
- CMOVQCS AX, DX; \
- ADDQ DX, R8; \
- ADCQ $0, R9; MOVQ R9, 8+z; \
- ADCQ $0, R10; MOVQ R10, 16+z; \
- ADCQ $0, R11; MOVQ R11, 24+z; \
- MOVL $0, DX; \
- CMOVQCS AX, DX; \
- ADDQ DX, R8; MOVQ R8, 0+z;
-
-// additionAdx adds x and y and stores in z
-// Uses: AX, DX, R8-R11, FLAGS
-// Instr: x86_64, cmov, adx
-#define additionAdx(z,x,y) \
- MOVL $38, AX; \
- XORL DX, DX; \
- MOVQ 0+x, R8; ADCXQ 0+y, R8; \
- MOVQ 8+x, R9; ADCXQ 8+y, R9; \
- MOVQ 16+x, R10; ADCXQ 16+y, R10; \
- MOVQ 24+x, R11; ADCXQ 24+y, R11; \
- CMOVQCS AX, DX ; \
- XORL AX, AX; \
- ADCXQ DX, R8; \
- ADCXQ AX, R9; MOVQ R9, 8+z; \
- ADCXQ AX, R10; MOVQ R10, 16+z; \
- ADCXQ AX, R11; MOVQ R11, 24+z; \
- MOVL $38, DX; \
- CMOVQCS DX, AX; \
- ADDQ AX, R8; MOVQ R8, 0+z;
-
-// subtraction subtracts y from x and stores in z
-// Uses: AX, DX, R8-R11, FLAGS
-// Instr: x86_64, cmov
-#define subtraction(z,x,y) \
- MOVL $38, AX; \
- MOVQ 0+x, R8; SUBQ 0+y, R8; \
- MOVQ 8+x, R9; SBBQ 8+y, R9; \
- MOVQ 16+x, R10; SBBQ 16+y, R10; \
- MOVQ 24+x, R11; SBBQ 24+y, R11; \
- MOVL $0, DX; \
- CMOVQCS AX, DX; \
- SUBQ DX, R8; \
- SBBQ $0, R9; MOVQ R9, 8+z; \
- SBBQ $0, R10; MOVQ R10, 16+z; \
- SBBQ $0, R11; MOVQ R11, 24+z; \
- MOVL $0, DX; \
- CMOVQCS AX, DX; \
- SUBQ DX, R8; MOVQ R8, 0+z;
-
-// integerMulAdx multiplies x and y and stores in z
-// Uses: AX, DX, R8-R15, FLAGS
-// Instr: x86_64, bmi2, adx
-#define integerMulAdx(z,x,y) \
- MOVL $0,R15; \
- MOVQ 0+y, DX; XORL AX, AX; \
- MULXQ 0+x, AX, R8; MOVQ AX, 0+z; \
- MULXQ 8+x, AX, R9; ADCXQ AX, R8; \
- MULXQ 16+x, AX, R10; ADCXQ AX, R9; \
- MULXQ 24+x, AX, R11; ADCXQ AX, R10; \
- MOVL $0, AX;;;;;;;;; ADCXQ AX, R11; \
- MOVQ 8+y, DX; XORL AX, AX; \
- MULXQ 0+x, AX, R12; ADCXQ R8, AX; MOVQ AX, 8+z; \
- MULXQ 8+x, AX, R13; ADCXQ R9, R12; ADOXQ AX, R12; \
- MULXQ 16+x, AX, R14; ADCXQ R10, R13; ADOXQ AX, R13; \
- MULXQ 24+x, AX, R15; ADCXQ R11, R14; ADOXQ AX, R14; \
- MOVL $0, AX;;;;;;;;; ADCXQ AX, R15; ADOXQ AX, R15; \
- MOVQ 16+y, DX; XORL AX, AX; \
- MULXQ 0+x, AX, R8; ADCXQ R12, AX; MOVQ AX, 16+z; \
- MULXQ 8+x, AX, R9; ADCXQ R13, R8; ADOXQ AX, R8; \
- MULXQ 16+x, AX, R10; ADCXQ R14, R9; ADOXQ AX, R9; \
- MULXQ 24+x, AX, R11; ADCXQ R15, R10; ADOXQ AX, R10; \
- MOVL $0, AX;;;;;;;;; ADCXQ AX, R11; ADOXQ AX, R11; \
- MOVQ 24+y, DX; XORL AX, AX; \
- MULXQ 0+x, AX, R12; ADCXQ R8, AX; MOVQ AX, 24+z; \
- MULXQ 8+x, AX, R13; ADCXQ R9, R12; ADOXQ AX, R12; MOVQ R12, 32+z; \
- MULXQ 16+x, AX, R14; ADCXQ R10, R13; ADOXQ AX, R13; MOVQ R13, 40+z; \
- MULXQ 24+x, AX, R15; ADCXQ R11, R14; ADOXQ AX, R14; MOVQ R14, 48+z; \
- MOVL $0, AX;;;;;;;;; ADCXQ AX, R15; ADOXQ AX, R15; MOVQ R15, 56+z;
-
-// integerMulLeg multiplies x and y and stores in z
-// Uses: AX, DX, R8-R15, FLAGS
-// Instr: x86_64
-#define integerMulLeg(z,x,y) \
- MOVQ 0+y, R8; \
- MOVQ 0+x, AX; MULQ R8; MOVQ AX, 0+z; MOVQ DX, R15; \
- MOVQ 8+x, AX; MULQ R8; MOVQ AX, R13; MOVQ DX, R10; \
- MOVQ 16+x, AX; MULQ R8; MOVQ AX, R14; MOVQ DX, R11; \
- MOVQ 24+x, AX; MULQ R8; \
- ADDQ R13, R15; \
- ADCQ R14, R10; MOVQ R10, 16+z; \
- ADCQ AX, R11; MOVQ R11, 24+z; \
- ADCQ $0, DX; MOVQ DX, 32+z; \
- MOVQ 8+y, R8; \
- MOVQ 0+x, AX; MULQ R8; MOVQ AX, R12; MOVQ DX, R9; \
- MOVQ 8+x, AX; MULQ R8; MOVQ AX, R13; MOVQ DX, R10; \
- MOVQ 16+x, AX; MULQ R8; MOVQ AX, R14; MOVQ DX, R11; \
- MOVQ 24+x, AX; MULQ R8; \
- ADDQ R12, R15; MOVQ R15, 8+z; \
- ADCQ R13, R9; \
- ADCQ R14, R10; \
- ADCQ AX, R11; \
- ADCQ $0, DX; \
- ADCQ 16+z, R9; MOVQ R9, R15; \
- ADCQ 24+z, R10; MOVQ R10, 24+z; \
- ADCQ 32+z, R11; MOVQ R11, 32+z; \
- ADCQ $0, DX; MOVQ DX, 40+z; \
- MOVQ 16+y, R8; \
- MOVQ 0+x, AX; MULQ R8; MOVQ AX, R12; MOVQ DX, R9; \
- MOVQ 8+x, AX; MULQ R8; MOVQ AX, R13; MOVQ DX, R10; \
- MOVQ 16+x, AX; MULQ R8; MOVQ AX, R14; MOVQ DX, R11; \
- MOVQ 24+x, AX; MULQ R8; \
- ADDQ R12, R15; MOVQ R15, 16+z; \
- ADCQ R13, R9; \
- ADCQ R14, R10; \
- ADCQ AX, R11; \
- ADCQ $0, DX; \
- ADCQ 24+z, R9; MOVQ R9, R15; \
- ADCQ 32+z, R10; MOVQ R10, 32+z; \
- ADCQ 40+z, R11; MOVQ R11, 40+z; \
- ADCQ $0, DX; MOVQ DX, 48+z; \
- MOVQ 24+y, R8; \
- MOVQ 0+x, AX; MULQ R8; MOVQ AX, R12; MOVQ DX, R9; \
- MOVQ 8+x, AX; MULQ R8; MOVQ AX, R13; MOVQ DX, R10; \
- MOVQ 16+x, AX; MULQ R8; MOVQ AX, R14; MOVQ DX, R11; \
- MOVQ 24+x, AX; MULQ R8; \
- ADDQ R12, R15; MOVQ R15, 24+z; \
- ADCQ R13, R9; \
- ADCQ R14, R10; \
- ADCQ AX, R11; \
- ADCQ $0, DX; \
- ADCQ 32+z, R9; MOVQ R9, 32+z; \
- ADCQ 40+z, R10; MOVQ R10, 40+z; \
- ADCQ 48+z, R11; MOVQ R11, 48+z; \
- ADCQ $0, DX; MOVQ DX, 56+z;
-
-// integerSqrLeg squares x and stores in z
-// Uses: AX, CX, DX, R8-R15, FLAGS
-// Instr: x86_64
-#define integerSqrLeg(z,x) \
- MOVQ 0+x, R8; \
- MOVQ 8+x, AX; MULQ R8; MOVQ AX, R9; MOVQ DX, R10; /* A[0]*A[1] */ \
- MOVQ 16+x, AX; MULQ R8; MOVQ AX, R14; MOVQ DX, R11; /* A[0]*A[2] */ \
- MOVQ 24+x, AX; MULQ R8; MOVQ AX, R15; MOVQ DX, R12; /* A[0]*A[3] */ \
- MOVQ 24+x, R8; \
- MOVQ 8+x, AX; MULQ R8; MOVQ AX, CX; MOVQ DX, R13; /* A[3]*A[1] */ \
- MOVQ 16+x, AX; MULQ R8; /* A[3]*A[2] */ \
- \
- ADDQ R14, R10;\
- ADCQ R15, R11; MOVL $0, R15;\
- ADCQ CX, R12;\
- ADCQ AX, R13;\
- ADCQ $0, DX; MOVQ DX, R14;\
- MOVQ 8+x, AX; MULQ 16+x;\
- \
- ADDQ AX, R11;\
- ADCQ DX, R12;\
- ADCQ $0, R13;\
- ADCQ $0, R14;\
- ADCQ $0, R15;\
- \
- SHLQ $1, R14, R15; MOVQ R15, 56+z;\
- SHLQ $1, R13, R14; MOVQ R14, 48+z;\
- SHLQ $1, R12, R13; MOVQ R13, 40+z;\
- SHLQ $1, R11, R12; MOVQ R12, 32+z;\
- SHLQ $1, R10, R11; MOVQ R11, 24+z;\
- SHLQ $1, R9, R10; MOVQ R10, 16+z;\
- SHLQ $1, R9; MOVQ R9, 8+z;\
- \
- MOVQ 0+x,AX; MULQ AX; MOVQ AX, 0+z; MOVQ DX, R9;\
- MOVQ 8+x,AX; MULQ AX; MOVQ AX, R10; MOVQ DX, R11;\
- MOVQ 16+x,AX; MULQ AX; MOVQ AX, R12; MOVQ DX, R13;\
- MOVQ 24+x,AX; MULQ AX; MOVQ AX, R14; MOVQ DX, R15;\
- \
- ADDQ 8+z, R9; MOVQ R9, 8+z;\
- ADCQ 16+z, R10; MOVQ R10, 16+z;\
- ADCQ 24+z, R11; MOVQ R11, 24+z;\
- ADCQ 32+z, R12; MOVQ R12, 32+z;\
- ADCQ 40+z, R13; MOVQ R13, 40+z;\
- ADCQ 48+z, R14; MOVQ R14, 48+z;\
- ADCQ 56+z, R15; MOVQ R15, 56+z;
-
-// integerSqrAdx squares x and stores in z
-// Uses: AX, CX, DX, R8-R15, FLAGS
-// Instr: x86_64, bmi2, adx
-#define integerSqrAdx(z,x) \
- MOVQ 0+x, DX; /* A[0] */ \
- MULXQ 8+x, R8, R14; /* A[1]*A[0] */ XORL R15, R15; \
- MULXQ 16+x, R9, R10; /* A[2]*A[0] */ ADCXQ R14, R9; \
- MULXQ 24+x, AX, CX; /* A[3]*A[0] */ ADCXQ AX, R10; \
- MOVQ 24+x, DX; /* A[3] */ \
- MULXQ 8+x, R11, R12; /* A[1]*A[3] */ ADCXQ CX, R11; \
- MULXQ 16+x, AX, R13; /* A[2]*A[3] */ ADCXQ AX, R12; \
- MOVQ 8+x, DX; /* A[1] */ ADCXQ R15, R13; \
- MULXQ 16+x, AX, CX; /* A[2]*A[1] */ MOVL $0, R14; \
- ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ADCXQ R15, R14; \
- XORL R15, R15; \
- ADOXQ AX, R10; ADCXQ R8, R8; \
- ADOXQ CX, R11; ADCXQ R9, R9; \
- ADOXQ R15, R12; ADCXQ R10, R10; \
- ADOXQ R15, R13; ADCXQ R11, R11; \
- ADOXQ R15, R14; ADCXQ R12, R12; \
- ;;;;;;;;;;;;;;; ADCXQ R13, R13; \
- ;;;;;;;;;;;;;;; ADCXQ R14, R14; \
- MOVQ 0+x, DX; MULXQ DX, AX, CX; /* A[0]^2 */ \
- ;;;;;;;;;;;;;;; MOVQ AX, 0+z; \
- ADDQ CX, R8; MOVQ R8, 8+z; \
- MOVQ 8+x, DX; MULXQ DX, AX, CX; /* A[1]^2 */ \
- ADCQ AX, R9; MOVQ R9, 16+z; \
- ADCQ CX, R10; MOVQ R10, 24+z; \
- MOVQ 16+x, DX; MULXQ DX, AX, CX; /* A[2]^2 */ \
- ADCQ AX, R11; MOVQ R11, 32+z; \
- ADCQ CX, R12; MOVQ R12, 40+z; \
- MOVQ 24+x, DX; MULXQ DX, AX, CX; /* A[3]^2 */ \
- ADCQ AX, R13; MOVQ R13, 48+z; \
- ADCQ CX, R14; MOVQ R14, 56+z;
-
-// reduceFromDouble finds z congruent to x modulo p such that 0> 63)
- // PUT BIT 255 IN CARRY FLAG AND CLEAR
- x3 &^= 1 << 63
-
- x0, c0 := bits.Add64(x0, cx, 0)
- x1, c1 := bits.Add64(x1, 0, c0)
- x2, c2 := bits.Add64(x2, 0, c1)
- x3, _ = bits.Add64(x3, 0, c2)
-
- // TEST FOR BIT 255 AGAIN; ONLY TRIGGERED ON OVERFLOW MODULO 2^255-19
- // cx = C[255] ? 0 : 19
- cx = uint64(19) &^ (-(x3 >> 63))
- // CLEAR BIT 255
- x3 &^= 1 << 63
-
- x0, c0 = bits.Sub64(x0, cx, 0)
- x1, c1 = bits.Sub64(x1, 0, c0)
- x2, c2 = bits.Sub64(x2, 0, c1)
- x3, _ = bits.Sub64(x3, 0, c2)
-
- binary.LittleEndian.PutUint64(x[0*8:1*8], x0)
- binary.LittleEndian.PutUint64(x[1*8:2*8], x1)
- binary.LittleEndian.PutUint64(x[2*8:3*8], x2)
- binary.LittleEndian.PutUint64(x[3*8:4*8], x3)
-}
-
-func red64(z *Elt, x0, x1, x2, x3, x4, x5, x6, x7 uint64) {
- h0, l0 := bits.Mul64(x4, 38)
- h1, l1 := bits.Mul64(x5, 38)
- h2, l2 := bits.Mul64(x6, 38)
- h3, l3 := bits.Mul64(x7, 38)
-
- l1, c0 := bits.Add64(h0, l1, 0)
- l2, c1 := bits.Add64(h1, l2, c0)
- l3, c2 := bits.Add64(h2, l3, c1)
- l4, _ := bits.Add64(h3, 0, c2)
-
- l0, c0 = bits.Add64(l0, x0, 0)
- l1, c1 = bits.Add64(l1, x1, c0)
- l2, c2 = bits.Add64(l2, x2, c1)
- l3, c3 := bits.Add64(l3, x3, c2)
- l4, _ = bits.Add64(l4, 0, c3)
-
- _, l4 = bits.Mul64(l4, 38)
- l0, c0 = bits.Add64(l0, l4, 0)
- z1, c1 := bits.Add64(l1, 0, c0)
- z2, c2 := bits.Add64(l2, 0, c1)
- z3, c3 := bits.Add64(l3, 0, c2)
- z0, _ := bits.Add64(l0, (-c3)&38, 0)
-
- binary.LittleEndian.PutUint64(z[0*8:1*8], z0)
- binary.LittleEndian.PutUint64(z[1*8:2*8], z1)
- binary.LittleEndian.PutUint64(z[2*8:3*8], z2)
- binary.LittleEndian.PutUint64(z[3*8:4*8], z3)
-}
diff --git a/vendor/github.com/cloudflare/circl/math/fp25519/fp_noasm.go b/vendor/github.com/cloudflare/circl/math/fp25519/fp_noasm.go
deleted file mode 100644
index 26ca4d01b7e..00000000000
--- a/vendor/github.com/cloudflare/circl/math/fp25519/fp_noasm.go
+++ /dev/null
@@ -1,13 +0,0 @@
-//go:build !amd64 || purego
-// +build !amd64 purego
-
-package fp25519
-
-func cmov(x, y *Elt, n uint) { cmovGeneric(x, y, n) }
-func cswap(x, y *Elt, n uint) { cswapGeneric(x, y, n) }
-func add(z, x, y *Elt) { addGeneric(z, x, y) }
-func sub(z, x, y *Elt) { subGeneric(z, x, y) }
-func addsub(x, y *Elt) { addsubGeneric(x, y) }
-func mul(z, x, y *Elt) { mulGeneric(z, x, y) }
-func sqr(z, x *Elt) { sqrGeneric(z, x) }
-func modp(z *Elt) { modpGeneric(z) }
diff --git a/vendor/github.com/cloudflare/circl/math/fp448/fp.go b/vendor/github.com/cloudflare/circl/math/fp448/fp.go
deleted file mode 100644
index a5e36600bb6..00000000000
--- a/vendor/github.com/cloudflare/circl/math/fp448/fp.go
+++ /dev/null
@@ -1,164 +0,0 @@
-// Package fp448 provides prime field arithmetic over GF(2^448-2^224-1).
-package fp448
-
-import (
- "errors"
-
- "github.com/cloudflare/circl/internal/conv"
-)
-
-// Size in bytes of an element.
-const Size = 56
-
-// Elt is a prime field element.
-type Elt [Size]byte
-
-func (e Elt) String() string { return conv.BytesLe2Hex(e[:]) }
-
-// p is the prime modulus 2^448-2^224-1.
-var p = Elt{
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-}
-
-// P returns the prime modulus 2^448-2^224-1.
-func P() Elt { return p }
-
-// ToBytes stores in b the little-endian byte representation of x.
-func ToBytes(b []byte, x *Elt) error {
- if len(b) != Size {
- return errors.New("wrong size")
- }
- Modp(x)
- copy(b, x[:])
- return nil
-}
-
-// IsZero returns true if x is equal to 0.
-func IsZero(x *Elt) bool { Modp(x); return *x == Elt{} }
-
-// IsOne returns true if x is equal to 1.
-func IsOne(x *Elt) bool { Modp(x); return *x == Elt{1} }
-
-// SetOne assigns x=1.
-func SetOne(x *Elt) { *x = Elt{1} }
-
-// One returns the 1 element.
-func One() (x Elt) { x = Elt{1}; return }
-
-// Neg calculates z = -x.
-func Neg(z, x *Elt) { Sub(z, &p, x) }
-
-// Modp ensures that z is between [0,p-1].
-func Modp(z *Elt) { Sub(z, z, &p) }
-
-// InvSqrt calculates z = sqrt(x/y) iff x/y is a quadratic-residue. If so,
-// isQR = true; otherwise, isQR = false, since x/y is a quadratic non-residue,
-// and z = sqrt(-x/y).
-func InvSqrt(z, x, y *Elt) (isQR bool) {
- // First note that x^(2(k+1)) = x^(p-1)/2 * x = legendre(x) * x
- // so that's x if x is a quadratic residue and -x otherwise.
- // Next, y^(6k+3) = y^(4k+2) * y^(2k+1) = y^(p-1) * y^((p-1)/2) = legendre(y).
- // So the z we compute satisfies z^2 y = x^(2(k+1)) y^(6k+3) = legendre(x)*legendre(y).
- // Thus if x and y are quadratic residues, then z is indeed sqrt(x/y).
- t0, t1 := &Elt{}, &Elt{}
- Mul(t0, x, y) // x*y
- Sqr(t1, y) // y^2
- Mul(t1, t0, t1) // x*y^3
- powPminus3div4(z, t1) // (x*y^3)^k
- Mul(z, z, t0) // z = x*y*(x*y^3)^k = x^(k+1) * y^(3k+1)
-
- // Check if x/y is a quadratic residue
- Sqr(t0, z) // z^2
- Mul(t0, t0, y) // y*z^2
- Sub(t0, t0, x) // y*z^2-x
- return IsZero(t0)
-}
-
-// Inv calculates z = 1/x mod p.
-func Inv(z, x *Elt) {
- // Calculates z = x^(4k+1) = x^(p-3+1) = x^(p-2) = x^-1, where k = (p-3)/4.
- t := &Elt{}
- powPminus3div4(t, x) // t = x^k
- Sqr(t, t) // t = x^2k
- Sqr(t, t) // t = x^4k
- Mul(z, t, x) // z = x^(4k+1)
-}
-
-// powPminus3div4 calculates z = x^k mod p, where k = (p-3)/4.
-func powPminus3div4(z, x *Elt) {
- x0, x1 := &Elt{}, &Elt{}
- Sqr(z, x)
- Mul(z, z, x)
- Sqr(x0, z)
- Mul(x0, x0, x)
- Sqr(z, x0)
- Sqr(z, z)
- Sqr(z, z)
- Mul(z, z, x0)
- Sqr(x1, z)
- for i := 0; i < 5; i++ {
- Sqr(x1, x1)
- }
- Mul(x1, x1, z)
- Sqr(z, x1)
- for i := 0; i < 11; i++ {
- Sqr(z, z)
- }
- Mul(z, z, x1)
- Sqr(z, z)
- Sqr(z, z)
- Sqr(z, z)
- Mul(z, z, x0)
- Sqr(x1, z)
- for i := 0; i < 26; i++ {
- Sqr(x1, x1)
- }
- Mul(x1, x1, z)
- Sqr(z, x1)
- for i := 0; i < 53; i++ {
- Sqr(z, z)
- }
- Mul(z, z, x1)
- Sqr(z, z)
- Sqr(z, z)
- Sqr(z, z)
- Mul(z, z, x0)
- Sqr(x1, z)
- for i := 0; i < 110; i++ {
- Sqr(x1, x1)
- }
- Mul(x1, x1, z)
- Sqr(z, x1)
- Mul(z, z, x)
- for i := 0; i < 223; i++ {
- Sqr(z, z)
- }
- Mul(z, z, x1)
-}
-
-// Cmov assigns y to x if n is 1.
-func Cmov(x, y *Elt, n uint) { cmov(x, y, n) }
-
-// Cswap interchanges x and y if n is 1.
-func Cswap(x, y *Elt, n uint) { cswap(x, y, n) }
-
-// Add calculates z = x+y mod p.
-func Add(z, x, y *Elt) { add(z, x, y) }
-
-// Sub calculates z = x-y mod p.
-func Sub(z, x, y *Elt) { sub(z, x, y) }
-
-// AddSub calculates (x,y) = (x+y mod p, x-y mod p).
-func AddSub(x, y *Elt) { addsub(x, y) }
-
-// Mul calculates z = x*y mod p.
-func Mul(z, x, y *Elt) { mul(z, x, y) }
-
-// Sqr calculates z = x^2 mod p.
-func Sqr(z, x *Elt) { sqr(z, x) }
diff --git a/vendor/github.com/cloudflare/circl/math/fp448/fp_amd64.go b/vendor/github.com/cloudflare/circl/math/fp448/fp_amd64.go
deleted file mode 100644
index 6a12209a704..00000000000
--- a/vendor/github.com/cloudflare/circl/math/fp448/fp_amd64.go
+++ /dev/null
@@ -1,43 +0,0 @@
-//go:build amd64 && !purego
-// +build amd64,!purego
-
-package fp448
-
-import (
- "golang.org/x/sys/cpu"
-)
-
-var hasBmi2Adx = cpu.X86.HasBMI2 && cpu.X86.HasADX
-
-var _ = hasBmi2Adx
-
-func cmov(x, y *Elt, n uint) { cmovAmd64(x, y, n) }
-func cswap(x, y *Elt, n uint) { cswapAmd64(x, y, n) }
-func add(z, x, y *Elt) { addAmd64(z, x, y) }
-func sub(z, x, y *Elt) { subAmd64(z, x, y) }
-func addsub(x, y *Elt) { addsubAmd64(x, y) }
-func mul(z, x, y *Elt) { mulAmd64(z, x, y) }
-func sqr(z, x *Elt) { sqrAmd64(z, x) }
-
-/* Functions defined in fp_amd64.s */
-
-//go:noescape
-func cmovAmd64(x, y *Elt, n uint)
-
-//go:noescape
-func cswapAmd64(x, y *Elt, n uint)
-
-//go:noescape
-func addAmd64(z, x, y *Elt)
-
-//go:noescape
-func subAmd64(z, x, y *Elt)
-
-//go:noescape
-func addsubAmd64(x, y *Elt)
-
-//go:noescape
-func mulAmd64(z, x, y *Elt)
-
-//go:noescape
-func sqrAmd64(z, x *Elt)
diff --git a/vendor/github.com/cloudflare/circl/math/fp448/fp_amd64.h b/vendor/github.com/cloudflare/circl/math/fp448/fp_amd64.h
deleted file mode 100644
index 536fe5bdfe0..00000000000
--- a/vendor/github.com/cloudflare/circl/math/fp448/fp_amd64.h
+++ /dev/null
@@ -1,591 +0,0 @@
-// This code was imported from https://github.com/armfazh/rfc7748_precomputed
-
-// CHECK_BMI2ADX triggers bmi2adx if supported,
-// otherwise it fallbacks to legacy code.
-#define CHECK_BMI2ADX(label, legacy, bmi2adx) \
- CMPB ·hasBmi2Adx(SB), $0 \
- JE label \
- bmi2adx \
- RET \
- label: \
- legacy \
- RET
-
-// cselect is a conditional move
-// if b=1: it copies y into x;
-// if b=0: x remains with the same value;
-// if b<> 0,1: undefined.
-// Uses: AX, DX, FLAGS
-// Instr: x86_64, cmov
-#define cselect(x,y,b) \
- TESTQ b, b \
- MOVQ 0+x, AX; MOVQ 0+y, DX; CMOVQNE DX, AX; MOVQ AX, 0+x; \
- MOVQ 8+x, AX; MOVQ 8+y, DX; CMOVQNE DX, AX; MOVQ AX, 8+x; \
- MOVQ 16+x, AX; MOVQ 16+y, DX; CMOVQNE DX, AX; MOVQ AX, 16+x; \
- MOVQ 24+x, AX; MOVQ 24+y, DX; CMOVQNE DX, AX; MOVQ AX, 24+x; \
- MOVQ 32+x, AX; MOVQ 32+y, DX; CMOVQNE DX, AX; MOVQ AX, 32+x; \
- MOVQ 40+x, AX; MOVQ 40+y, DX; CMOVQNE DX, AX; MOVQ AX, 40+x; \
- MOVQ 48+x, AX; MOVQ 48+y, DX; CMOVQNE DX, AX; MOVQ AX, 48+x;
-
-// cswap is a conditional swap
-// if b=1: x,y <- y,x;
-// if b=0: x,y remain with the same values;
-// if b<> 0,1: undefined.
-// Uses: AX, DX, R8, FLAGS
-// Instr: x86_64, cmov
-#define cswap(x,y,b) \
- TESTQ b, b \
- MOVQ 0+x, AX; MOVQ AX, R8; MOVQ 0+y, DX; CMOVQNE DX, AX; CMOVQNE R8, DX; MOVQ AX, 0+x; MOVQ DX, 0+y; \
- MOVQ 8+x, AX; MOVQ AX, R8; MOVQ 8+y, DX; CMOVQNE DX, AX; CMOVQNE R8, DX; MOVQ AX, 8+x; MOVQ DX, 8+y; \
- MOVQ 16+x, AX; MOVQ AX, R8; MOVQ 16+y, DX; CMOVQNE DX, AX; CMOVQNE R8, DX; MOVQ AX, 16+x; MOVQ DX, 16+y; \
- MOVQ 24+x, AX; MOVQ AX, R8; MOVQ 24+y, DX; CMOVQNE DX, AX; CMOVQNE R8, DX; MOVQ AX, 24+x; MOVQ DX, 24+y; \
- MOVQ 32+x, AX; MOVQ AX, R8; MOVQ 32+y, DX; CMOVQNE DX, AX; CMOVQNE R8, DX; MOVQ AX, 32+x; MOVQ DX, 32+y; \
- MOVQ 40+x, AX; MOVQ AX, R8; MOVQ 40+y, DX; CMOVQNE DX, AX; CMOVQNE R8, DX; MOVQ AX, 40+x; MOVQ DX, 40+y; \
- MOVQ 48+x, AX; MOVQ AX, R8; MOVQ 48+y, DX; CMOVQNE DX, AX; CMOVQNE R8, DX; MOVQ AX, 48+x; MOVQ DX, 48+y;
-
-// additionLeg adds x and y and stores in z
-// Uses: AX, DX, R8-R14, FLAGS
-// Instr: x86_64
-#define additionLeg(z,x,y) \
- MOVQ 0+x, R8; ADDQ 0+y, R8; \
- MOVQ 8+x, R9; ADCQ 8+y, R9; \
- MOVQ 16+x, R10; ADCQ 16+y, R10; \
- MOVQ 24+x, R11; ADCQ 24+y, R11; \
- MOVQ 32+x, R12; ADCQ 32+y, R12; \
- MOVQ 40+x, R13; ADCQ 40+y, R13; \
- MOVQ 48+x, R14; ADCQ 48+y, R14; \
- MOVQ $0, AX; ADCQ $0, AX; \
- MOVQ AX, DX; \
- SHLQ $32, DX; \
- ADDQ AX, R8; MOVQ $0, AX; \
- ADCQ $0, R9; \
- ADCQ $0, R10; \
- ADCQ DX, R11; \
- ADCQ $0, R12; \
- ADCQ $0, R13; \
- ADCQ $0, R14; \
- ADCQ $0, AX; \
- MOVQ AX, DX; \
- SHLQ $32, DX; \
- ADDQ AX, R8; MOVQ R8, 0+z; \
- ADCQ $0, R9; MOVQ R9, 8+z; \
- ADCQ $0, R10; MOVQ R10, 16+z; \
- ADCQ DX, R11; MOVQ R11, 24+z; \
- ADCQ $0, R12; MOVQ R12, 32+z; \
- ADCQ $0, R13; MOVQ R13, 40+z; \
- ADCQ $0, R14; MOVQ R14, 48+z;
-
-
-// additionAdx adds x and y and stores in z
-// Uses: AX, DX, R8-R15, FLAGS
-// Instr: x86_64, adx
-#define additionAdx(z,x,y) \
- MOVL $32, R15; \
- XORL DX, DX; \
- MOVQ 0+x, R8; ADCXQ 0+y, R8; \
- MOVQ 8+x, R9; ADCXQ 8+y, R9; \
- MOVQ 16+x, R10; ADCXQ 16+y, R10; \
- MOVQ 24+x, R11; ADCXQ 24+y, R11; \
- MOVQ 32+x, R12; ADCXQ 32+y, R12; \
- MOVQ 40+x, R13; ADCXQ 40+y, R13; \
- MOVQ 48+x, R14; ADCXQ 48+y, R14; \
- ;;;;;;;;;;;;;;; ADCXQ DX, DX; \
- XORL AX, AX; \
- ADCXQ DX, R8; SHLXQ R15, DX, DX; \
- ADCXQ AX, R9; \
- ADCXQ AX, R10; \
- ADCXQ DX, R11; \
- ADCXQ AX, R12; \
- ADCXQ AX, R13; \
- ADCXQ AX, R14; \
- ADCXQ AX, AX; \
- XORL DX, DX; \
- ADCXQ AX, R8; MOVQ R8, 0+z; SHLXQ R15, AX, AX; \
- ADCXQ DX, R9; MOVQ R9, 8+z; \
- ADCXQ DX, R10; MOVQ R10, 16+z; \
- ADCXQ AX, R11; MOVQ R11, 24+z; \
- ADCXQ DX, R12; MOVQ R12, 32+z; \
- ADCXQ DX, R13; MOVQ R13, 40+z; \
- ADCXQ DX, R14; MOVQ R14, 48+z;
-
-// subtraction subtracts y from x and stores in z
-// Uses: AX, DX, R8-R14, FLAGS
-// Instr: x86_64
-#define subtraction(z,x,y) \
- MOVQ 0+x, R8; SUBQ 0+y, R8; \
- MOVQ 8+x, R9; SBBQ 8+y, R9; \
- MOVQ 16+x, R10; SBBQ 16+y, R10; \
- MOVQ 24+x, R11; SBBQ 24+y, R11; \
- MOVQ 32+x, R12; SBBQ 32+y, R12; \
- MOVQ 40+x, R13; SBBQ 40+y, R13; \
- MOVQ 48+x, R14; SBBQ 48+y, R14; \
- MOVQ $0, AX; SETCS AX; \
- MOVQ AX, DX; \
- SHLQ $32, DX; \
- SUBQ AX, R8; MOVQ $0, AX; \
- SBBQ $0, R9; \
- SBBQ $0, R10; \
- SBBQ DX, R11; \
- SBBQ $0, R12; \
- SBBQ $0, R13; \
- SBBQ $0, R14; \
- SETCS AX; \
- MOVQ AX, DX; \
- SHLQ $32, DX; \
- SUBQ AX, R8; MOVQ R8, 0+z; \
- SBBQ $0, R9; MOVQ R9, 8+z; \
- SBBQ $0, R10; MOVQ R10, 16+z; \
- SBBQ DX, R11; MOVQ R11, 24+z; \
- SBBQ $0, R12; MOVQ R12, 32+z; \
- SBBQ $0, R13; MOVQ R13, 40+z; \
- SBBQ $0, R14; MOVQ R14, 48+z;
-
-// maddBmi2Adx multiplies x and y and accumulates in z
-// Uses: AX, DX, R15, FLAGS
-// Instr: x86_64, bmi2, adx
-#define maddBmi2Adx(z,x,y,i,r0,r1,r2,r3,r4,r5,r6) \
- MOVQ i+y, DX; XORL AX, AX; \
- MULXQ 0+x, AX, R8; ADOXQ AX, r0; ADCXQ R8, r1; MOVQ r0,i+z; \
- MULXQ 8+x, AX, r0; ADOXQ AX, r1; ADCXQ r0, r2; MOVQ $0, R8; \
- MULXQ 16+x, AX, r0; ADOXQ AX, r2; ADCXQ r0, r3; \
- MULXQ 24+x, AX, r0; ADOXQ AX, r3; ADCXQ r0, r4; \
- MULXQ 32+x, AX, r0; ADOXQ AX, r4; ADCXQ r0, r5; \
- MULXQ 40+x, AX, r0; ADOXQ AX, r5; ADCXQ r0, r6; \
- MULXQ 48+x, AX, r0; ADOXQ AX, r6; ADCXQ R8, r0; \
- ;;;;;;;;;;;;;;;;;;; ADOXQ R8, r0;
-
-// integerMulAdx multiplies x and y and stores in z
-// Uses: AX, DX, R8-R15, FLAGS
-// Instr: x86_64, bmi2, adx
-#define integerMulAdx(z,x,y) \
- MOVL $0,R15; \
- MOVQ 0+y, DX; XORL AX, AX; MOVQ $0, R8; \
- MULXQ 0+x, AX, R9; MOVQ AX, 0+z; \
- MULXQ 8+x, AX, R10; ADCXQ AX, R9; \
- MULXQ 16+x, AX, R11; ADCXQ AX, R10; \
- MULXQ 24+x, AX, R12; ADCXQ AX, R11; \
- MULXQ 32+x, AX, R13; ADCXQ AX, R12; \
- MULXQ 40+x, AX, R14; ADCXQ AX, R13; \
- MULXQ 48+x, AX, R15; ADCXQ AX, R14; \
- ;;;;;;;;;;;;;;;;;;;; ADCXQ R8, R15; \
- maddBmi2Adx(z,x,y, 8, R9,R10,R11,R12,R13,R14,R15) \
- maddBmi2Adx(z,x,y,16,R10,R11,R12,R13,R14,R15, R9) \
- maddBmi2Adx(z,x,y,24,R11,R12,R13,R14,R15, R9,R10) \
- maddBmi2Adx(z,x,y,32,R12,R13,R14,R15, R9,R10,R11) \
- maddBmi2Adx(z,x,y,40,R13,R14,R15, R9,R10,R11,R12) \
- maddBmi2Adx(z,x,y,48,R14,R15, R9,R10,R11,R12,R13) \
- MOVQ R15, 56+z; \
- MOVQ R9, 64+z; \
- MOVQ R10, 72+z; \
- MOVQ R11, 80+z; \
- MOVQ R12, 88+z; \
- MOVQ R13, 96+z; \
- MOVQ R14, 104+z;
-
-// maddLegacy multiplies x and y and accumulates in z
-// Uses: AX, DX, R15, FLAGS
-// Instr: x86_64
-#define maddLegacy(z,x,y,i) \
- MOVQ i+y, R15; \
- MOVQ 0+x, AX; MULQ R15; MOVQ AX, R8; ;;;;;;;;;;;; MOVQ DX, R9; \
- MOVQ 8+x, AX; MULQ R15; ADDQ AX, R9; ADCQ $0, DX; MOVQ DX, R10; \
- MOVQ 16+x, AX; MULQ R15; ADDQ AX, R10; ADCQ $0, DX; MOVQ DX, R11; \
- MOVQ 24+x, AX; MULQ R15; ADDQ AX, R11; ADCQ $0, DX; MOVQ DX, R12; \
- MOVQ 32+x, AX; MULQ R15; ADDQ AX, R12; ADCQ $0, DX; MOVQ DX, R13; \
- MOVQ 40+x, AX; MULQ R15; ADDQ AX, R13; ADCQ $0, DX; MOVQ DX, R14; \
- MOVQ 48+x, AX; MULQ R15; ADDQ AX, R14; ADCQ $0, DX; \
- ADDQ 0+i+z, R8; MOVQ R8, 0+i+z; \
- ADCQ 8+i+z, R9; MOVQ R9, 8+i+z; \
- ADCQ 16+i+z, R10; MOVQ R10, 16+i+z; \
- ADCQ 24+i+z, R11; MOVQ R11, 24+i+z; \
- ADCQ 32+i+z, R12; MOVQ R12, 32+i+z; \
- ADCQ 40+i+z, R13; MOVQ R13, 40+i+z; \
- ADCQ 48+i+z, R14; MOVQ R14, 48+i+z; \
- ADCQ $0, DX; MOVQ DX, 56+i+z;
-
-// integerMulLeg multiplies x and y and stores in z
-// Uses: AX, DX, R8-R15, FLAGS
-// Instr: x86_64
-#define integerMulLeg(z,x,y) \
- MOVQ 0+y, R15; \
- MOVQ 0+x, AX; MULQ R15; MOVQ AX, 0+z; ;;;;;;;;;;;; MOVQ DX, R8; \
- MOVQ 8+x, AX; MULQ R15; ADDQ AX, R8; ADCQ $0, DX; MOVQ DX, R9; MOVQ R8, 8+z; \
- MOVQ 16+x, AX; MULQ R15; ADDQ AX, R9; ADCQ $0, DX; MOVQ DX, R10; MOVQ R9, 16+z; \
- MOVQ 24+x, AX; MULQ R15; ADDQ AX, R10; ADCQ $0, DX; MOVQ DX, R11; MOVQ R10, 24+z; \
- MOVQ 32+x, AX; MULQ R15; ADDQ AX, R11; ADCQ $0, DX; MOVQ DX, R12; MOVQ R11, 32+z; \
- MOVQ 40+x, AX; MULQ R15; ADDQ AX, R12; ADCQ $0, DX; MOVQ DX, R13; MOVQ R12, 40+z; \
- MOVQ 48+x, AX; MULQ R15; ADDQ AX, R13; ADCQ $0, DX; MOVQ DX,56+z; MOVQ R13, 48+z; \
- maddLegacy(z,x,y, 8) \
- maddLegacy(z,x,y,16) \
- maddLegacy(z,x,y,24) \
- maddLegacy(z,x,y,32) \
- maddLegacy(z,x,y,40) \
- maddLegacy(z,x,y,48)
-
-// integerSqrLeg squares x and stores in z
-// Uses: AX, CX, DX, R8-R15, FLAGS
-// Instr: x86_64
-#define integerSqrLeg(z,x) \
- XORL R15, R15; \
- MOVQ 0+x, CX; \
- MOVQ CX, AX; MULQ CX; MOVQ AX, 0+z; MOVQ DX, R8; \
- ADDQ CX, CX; ADCQ $0, R15; \
- MOVQ 8+x, AX; MULQ CX; ADDQ AX, R8; ADCQ $0, DX; MOVQ DX, R9; MOVQ R8, 8+z; \
- MOVQ 16+x, AX; MULQ CX; ADDQ AX, R9; ADCQ $0, DX; MOVQ DX, R10; \
- MOVQ 24+x, AX; MULQ CX; ADDQ AX, R10; ADCQ $0, DX; MOVQ DX, R11; \
- MOVQ 32+x, AX; MULQ CX; ADDQ AX, R11; ADCQ $0, DX; MOVQ DX, R12; \
- MOVQ 40+x, AX; MULQ CX; ADDQ AX, R12; ADCQ $0, DX; MOVQ DX, R13; \
- MOVQ 48+x, AX; MULQ CX; ADDQ AX, R13; ADCQ $0, DX; MOVQ DX, R14; \
- \
- MOVQ 8+x, CX; \
- MOVQ CX, AX; ADDQ R15, CX; MOVQ $0, R15; ADCQ $0, R15; \
- ;;;;;;;;;;;;;; MULQ CX; ADDQ AX, R9; ADCQ $0, DX; MOVQ R9,16+z; \
- MOVQ R15, AX; NEGQ AX; ANDQ 8+x, AX; ADDQ AX, DX; ADCQ $0, R11; MOVQ DX, R8; \
- ADDQ 8+x, CX; ADCQ $0, R15; \
- MOVQ 16+x, AX; MULQ CX; ADDQ AX, R10; ADCQ $0, DX; ADDQ R8, R10; ADCQ $0, DX; MOVQ DX, R8; MOVQ R10, 24+z; \
- MOVQ 24+x, AX; MULQ CX; ADDQ AX, R11; ADCQ $0, DX; ADDQ R8, R11; ADCQ $0, DX; MOVQ DX, R8; \
- MOVQ 32+x, AX; MULQ CX; ADDQ AX, R12; ADCQ $0, DX; ADDQ R8, R12; ADCQ $0, DX; MOVQ DX, R8; \
- MOVQ 40+x, AX; MULQ CX; ADDQ AX, R13; ADCQ $0, DX; ADDQ R8, R13; ADCQ $0, DX; MOVQ DX, R8; \
- MOVQ 48+x, AX; MULQ CX; ADDQ AX, R14; ADCQ $0, DX; ADDQ R8, R14; ADCQ $0, DX; MOVQ DX, R9; \
- \
- MOVQ 16+x, CX; \
- MOVQ CX, AX; ADDQ R15, CX; MOVQ $0, R15; ADCQ $0, R15; \
- ;;;;;;;;;;;;;; MULQ CX; ADDQ AX, R11; ADCQ $0, DX; MOVQ R11, 32+z; \
- MOVQ R15, AX; NEGQ AX; ANDQ 16+x,AX; ADDQ AX, DX; ADCQ $0, R13; MOVQ DX, R8; \
- ADDQ 16+x, CX; ADCQ $0, R15; \
- MOVQ 24+x, AX; MULQ CX; ADDQ AX, R12; ADCQ $0, DX; ADDQ R8, R12; ADCQ $0, DX; MOVQ DX, R8; MOVQ R12, 40+z; \
- MOVQ 32+x, AX; MULQ CX; ADDQ AX, R13; ADCQ $0, DX; ADDQ R8, R13; ADCQ $0, DX; MOVQ DX, R8; \
- MOVQ 40+x, AX; MULQ CX; ADDQ AX, R14; ADCQ $0, DX; ADDQ R8, R14; ADCQ $0, DX; MOVQ DX, R8; \
- MOVQ 48+x, AX; MULQ CX; ADDQ AX, R9; ADCQ $0, DX; ADDQ R8, R9; ADCQ $0, DX; MOVQ DX,R10; \
- \
- MOVQ 24+x, CX; \
- MOVQ CX, AX; ADDQ R15, CX; MOVQ $0, R15; ADCQ $0, R15; \
- ;;;;;;;;;;;;;; MULQ CX; ADDQ AX, R13; ADCQ $0, DX; MOVQ R13, 48+z; \
- MOVQ R15, AX; NEGQ AX; ANDQ 24+x,AX; ADDQ AX, DX; ADCQ $0, R9; MOVQ DX, R8; \
- ADDQ 24+x, CX; ADCQ $0, R15; \
- MOVQ 32+x, AX; MULQ CX; ADDQ AX, R14; ADCQ $0, DX; ADDQ R8, R14; ADCQ $0, DX; MOVQ DX, R8; MOVQ R14, 56+z; \
- MOVQ 40+x, AX; MULQ CX; ADDQ AX, R9; ADCQ $0, DX; ADDQ R8, R9; ADCQ $0, DX; MOVQ DX, R8; \
- MOVQ 48+x, AX; MULQ CX; ADDQ AX, R10; ADCQ $0, DX; ADDQ R8, R10; ADCQ $0, DX; MOVQ DX,R11; \
- \
- MOVQ 32+x, CX; \
- MOVQ CX, AX; ADDQ R15, CX; MOVQ $0, R15; ADCQ $0, R15; \
- ;;;;;;;;;;;;;; MULQ CX; ADDQ AX, R9; ADCQ $0, DX; MOVQ R9, 64+z; \
- MOVQ R15, AX; NEGQ AX; ANDQ 32+x,AX; ADDQ AX, DX; ADCQ $0, R11; MOVQ DX, R8; \
- ADDQ 32+x, CX; ADCQ $0, R15; \
- MOVQ 40+x, AX; MULQ CX; ADDQ AX, R10; ADCQ $0, DX; ADDQ R8, R10; ADCQ $0, DX; MOVQ DX, R8; MOVQ R10, 72+z; \
- MOVQ 48+x, AX; MULQ CX; ADDQ AX, R11; ADCQ $0, DX; ADDQ R8, R11; ADCQ $0, DX; MOVQ DX,R12; \
- \
- XORL R13, R13; \
- XORL R14, R14; \
- MOVQ 40+x, CX; \
- MOVQ CX, AX; ADDQ R15, CX; MOVQ $0, R15; ADCQ $0, R15; \
- ;;;;;;;;;;;;;; MULQ CX; ADDQ AX, R11; ADCQ $0, DX; MOVQ R11, 80+z; \
- MOVQ R15, AX; NEGQ AX; ANDQ 40+x,AX; ADDQ AX, DX; ADCQ $0, R13; MOVQ DX, R8; \
- ADDQ 40+x, CX; ADCQ $0, R15; \
- MOVQ 48+x, AX; MULQ CX; ADDQ AX, R12; ADCQ $0, DX; ADDQ R8, R12; ADCQ $0, DX; MOVQ DX, R8; MOVQ R12, 88+z; \
- ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ADDQ R8, R13; ADCQ $0,R14; \
- \
- XORL R9, R9; \
- MOVQ 48+x, CX; \
- MOVQ CX, AX; ADDQ R15, CX; MOVQ $0, R15; ADCQ $0, R15; \
- ;;;;;;;;;;;;;; MULQ CX; ADDQ AX, R13; ADCQ $0, DX; MOVQ R13, 96+z; \
- MOVQ R15, AX; NEGQ AX; ANDQ 48+x,AX; ADDQ AX, DX; ADCQ $0, R9; MOVQ DX, R8; \
- ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ADDQ R8,R14; ADCQ $0, R9; MOVQ R14, 104+z;
-
-
-// integerSqrAdx squares x and stores in z
-// Uses: AX, CX, DX, R8-R15, FLAGS
-// Instr: x86_64, bmi2, adx
-#define integerSqrAdx(z,x) \
- XORL R15, R15; \
- MOVQ 0+x, DX; \
- ;;;;;;;;;;;;;; MULXQ DX, AX, R8; MOVQ AX, 0+z; \
- ADDQ DX, DX; ADCQ $0, R15; CLC; \
- MULXQ 8+x, AX, R9; ADCXQ AX, R8; MOVQ R8, 8+z; \
- MULXQ 16+x, AX, R10; ADCXQ AX, R9; MOVQ $0, R8;\
- MULXQ 24+x, AX, R11; ADCXQ AX, R10; \
- MULXQ 32+x, AX, R12; ADCXQ AX, R11; \
- MULXQ 40+x, AX, R13; ADCXQ AX, R12; \
- MULXQ 48+x, AX, R14; ADCXQ AX, R13; \
- ;;;;;;;;;;;;;;;;;;;; ADCXQ R8, R14; \
- \
- MOVQ 8+x, DX; \
- MOVQ DX, AX; ADDQ R15, DX; MOVQ $0, R15; ADCQ $0, R15; \
- MULXQ AX, AX, CX; \
- MOVQ R15, R8; NEGQ R8; ANDQ 8+x, R8; \
- ADDQ AX, R9; MOVQ R9, 16+z; \
- ADCQ CX, R8; \
- ADCQ $0, R11; \
- ADDQ 8+x, DX; \
- ADCQ $0, R15; \
- XORL R9, R9; ;;;;;;;;;;;;;;;;;;;;; ADOXQ R8, R10; \
- MULXQ 16+x, AX, CX; ADCXQ AX, R10; ADOXQ CX, R11; MOVQ R10, 24+z; \
- MULXQ 24+x, AX, CX; ADCXQ AX, R11; ADOXQ CX, R12; MOVQ $0, R10; \
- MULXQ 32+x, AX, CX; ADCXQ AX, R12; ADOXQ CX, R13; \
- MULXQ 40+x, AX, CX; ADCXQ AX, R13; ADOXQ CX, R14; \
- MULXQ 48+x, AX, CX; ADCXQ AX, R14; ADOXQ CX, R9; \
- ;;;;;;;;;;;;;;;;;;; ADCXQ R10, R9; \
- \
- MOVQ 16+x, DX; \
- MOVQ DX, AX; ADDQ R15, DX; MOVQ $0, R15; ADCQ $0, R15; \
- MULXQ AX, AX, CX; \
- MOVQ R15, R8; NEGQ R8; ANDQ 16+x, R8; \
- ADDQ AX, R11; MOVQ R11, 32+z; \
- ADCQ CX, R8; \
- ADCQ $0, R13; \
- ADDQ 16+x, DX; \
- ADCQ $0, R15; \
- XORL R11, R11; ;;;;;;;;;;;;;;;;;;; ADOXQ R8, R12; \
- MULXQ 24+x, AX, CX; ADCXQ AX, R12; ADOXQ CX, R13; MOVQ R12, 40+z; \
- MULXQ 32+x, AX, CX; ADCXQ AX, R13; ADOXQ CX, R14; MOVQ $0, R12; \
- MULXQ 40+x, AX, CX; ADCXQ AX, R14; ADOXQ CX, R9; \
- MULXQ 48+x, AX, CX; ADCXQ AX, R9; ADOXQ CX, R10; \
- ;;;;;;;;;;;;;;;;;;; ADCXQ R11,R10; \
- \
- MOVQ 24+x, DX; \
- MOVQ DX, AX; ADDQ R15, DX; MOVQ $0, R15; ADCQ $0, R15; \
- MULXQ AX, AX, CX; \
- MOVQ R15, R8; NEGQ R8; ANDQ 24+x, R8; \
- ADDQ AX, R13; MOVQ R13, 48+z; \
- ADCQ CX, R8; \
- ADCQ $0, R9; \
- ADDQ 24+x, DX; \
- ADCQ $0, R15; \
- XORL R13, R13; ;;;;;;;;;;;;;;;;;;; ADOXQ R8, R14; \
- MULXQ 32+x, AX, CX; ADCXQ AX, R14; ADOXQ CX, R9; MOVQ R14, 56+z; \
- MULXQ 40+x, AX, CX; ADCXQ AX, R9; ADOXQ CX, R10; MOVQ $0, R14; \
- MULXQ 48+x, AX, CX; ADCXQ AX, R10; ADOXQ CX, R11; \
- ;;;;;;;;;;;;;;;;;;; ADCXQ R12,R11; \
- \
- MOVQ 32+x, DX; \
- MOVQ DX, AX; ADDQ R15, DX; MOVQ $0, R15; ADCQ $0, R15; \
- MULXQ AX, AX, CX; \
- MOVQ R15, R8; NEGQ R8; ANDQ 32+x, R8; \
- ADDQ AX, R9; MOVQ R9, 64+z; \
- ADCQ CX, R8; \
- ADCQ $0, R11; \
- ADDQ 32+x, DX; \
- ADCQ $0, R15; \
- XORL R9, R9; ;;;;;;;;;;;;;;;;;;;;; ADOXQ R8, R10; \
- MULXQ 40+x, AX, CX; ADCXQ AX, R10; ADOXQ CX, R11; MOVQ R10, 72+z; \
- MULXQ 48+x, AX, CX; ADCXQ AX, R11; ADOXQ CX, R12; \
- ;;;;;;;;;;;;;;;;;;; ADCXQ R13,R12; \
- \
- MOVQ 40+x, DX; \
- MOVQ DX, AX; ADDQ R15, DX; MOVQ $0, R15; ADCQ $0, R15; \
- MULXQ AX, AX, CX; \
- MOVQ R15, R8; NEGQ R8; ANDQ 40+x, R8; \
- ADDQ AX, R11; MOVQ R11, 80+z; \
- ADCQ CX, R8; \
- ADCQ $0, R13; \
- ADDQ 40+x, DX; \
- ADCQ $0, R15; \
- XORL R11, R11; ;;;;;;;;;;;;;;;;;;; ADOXQ R8, R12; \
- MULXQ 48+x, AX, CX; ADCXQ AX, R12; ADOXQ CX, R13; MOVQ R12, 88+z; \
- ;;;;;;;;;;;;;;;;;;; ADCXQ R14,R13; \
- \
- MOVQ 48+x, DX; \
- MOVQ DX, AX; ADDQ R15, DX; MOVQ $0, R15; ADCQ $0, R15; \
- MULXQ AX, AX, CX; \
- MOVQ R15, R8; NEGQ R8; ANDQ 48+x, R8; \
- XORL R10, R10; ;;;;;;;;;;;;;; ADOXQ CX, R14; \
- ;;;;;;;;;;;;;; ADCXQ AX, R13; ;;;;;;;;;;;;;; MOVQ R13, 96+z; \
- ;;;;;;;;;;;;;; ADCXQ R8, R14; MOVQ R14, 104+z;
-
-// reduceFromDoubleLeg finds a z=x modulo p such that z<2^448 and stores in z
-// Uses: AX, R8-R15, FLAGS
-// Instr: x86_64
-#define reduceFromDoubleLeg(z,x) \
- /* ( ,2C13,2C12,2C11,2C10|C10,C9,C8, C7) + (C6,...,C0) */ \
- /* (r14, r13, r12, r11, r10,r9,r8,r15) */ \
- MOVQ 80+x,AX; MOVQ AX,R10; \
- MOVQ $0xFFFFFFFF00000000, R8; \
- ANDQ R8,R10; \
- \
- MOVQ $0,R14; \
- MOVQ 104+x,R13; SHLQ $1,R13,R14; \
- MOVQ 96+x,R12; SHLQ $1,R12,R13; \
- MOVQ 88+x,R11; SHLQ $1,R11,R12; \
- MOVQ 72+x, R9; SHLQ $1,R10,R11; \
- MOVQ 64+x, R8; SHLQ $1,R10; \
- MOVQ $0xFFFFFFFF,R15; ANDQ R15,AX; ORQ AX,R10; \
- MOVQ 56+x,R15; \
- \
- ADDQ 0+x,R15; MOVQ R15, 0+z; MOVQ 56+x,R15; \
- ADCQ 8+x, R8; MOVQ R8, 8+z; MOVQ 64+x, R8; \
- ADCQ 16+x, R9; MOVQ R9,16+z; MOVQ 72+x, R9; \
- ADCQ 24+x,R10; MOVQ R10,24+z; MOVQ 80+x,R10; \
- ADCQ 32+x,R11; MOVQ R11,32+z; MOVQ 88+x,R11; \
- ADCQ 40+x,R12; MOVQ R12,40+z; MOVQ 96+x,R12; \
- ADCQ 48+x,R13; MOVQ R13,48+z; MOVQ 104+x,R13; \
- ADCQ $0,R14; \
- /* (c10c9,c9c8,c8c7,c7c13,c13c12,c12c11,c11c10) + (c6,...,c0) */ \
- /* ( r9, r8, r15, r13, r12, r11, r10) */ \
- MOVQ R10, AX; \
- SHRQ $32,R11,R10; \
- SHRQ $32,R12,R11; \
- SHRQ $32,R13,R12; \
- SHRQ $32,R15,R13; \
- SHRQ $32, R8,R15; \
- SHRQ $32, R9, R8; \
- SHRQ $32, AX, R9; \
- \
- ADDQ 0+z,R10; \
- ADCQ 8+z,R11; \
- ADCQ 16+z,R12; \
- ADCQ 24+z,R13; \
- ADCQ 32+z,R15; \
- ADCQ 40+z, R8; \
- ADCQ 48+z, R9; \
- ADCQ $0,R14; \
- /* ( c7) + (c6,...,c0) */ \
- /* (r14) */ \
- MOVQ R14, AX; SHLQ $32, AX; \
- ADDQ R14,R10; MOVQ $0,R14; \
- ADCQ $0,R11; \
- ADCQ $0,R12; \
- ADCQ AX,R13; \
- ADCQ $0,R15; \
- ADCQ $0, R8; \
- ADCQ $0, R9; \
- ADCQ $0,R14; \
- /* ( c7) + (c6,...,c0) */ \
- /* (r14) */ \
- MOVQ R14, AX; SHLQ $32,AX; \
- ADDQ R14,R10; MOVQ R10, 0+z; \
- ADCQ $0,R11; MOVQ R11, 8+z; \
- ADCQ $0,R12; MOVQ R12,16+z; \
- ADCQ AX,R13; MOVQ R13,24+z; \
- ADCQ $0,R15; MOVQ R15,32+z; \
- ADCQ $0, R8; MOVQ R8,40+z; \
- ADCQ $0, R9; MOVQ R9,48+z;
-
-// reduceFromDoubleAdx finds a z=x modulo p such that z<2^448 and stores in z
-// Uses: AX, R8-R15, FLAGS
-// Instr: x86_64, adx
-#define reduceFromDoubleAdx(z,x) \
- /* ( ,2C13,2C12,2C11,2C10|C10,C9,C8, C7) + (C6,...,C0) */ \
- /* (r14, r13, r12, r11, r10,r9,r8,r15) */ \
- MOVQ 80+x,AX; MOVQ AX,R10; \
- MOVQ $0xFFFFFFFF00000000, R8; \
- ANDQ R8,R10; \
- \
- MOVQ $0,R14; \
- MOVQ 104+x,R13; SHLQ $1,R13,R14; \
- MOVQ 96+x,R12; SHLQ $1,R12,R13; \
- MOVQ 88+x,R11; SHLQ $1,R11,R12; \
- MOVQ 72+x, R9; SHLQ $1,R10,R11; \
- MOVQ 64+x, R8; SHLQ $1,R10; \
- MOVQ $0xFFFFFFFF,R15; ANDQ R15,AX; ORQ AX,R10; \
- MOVQ 56+x,R15; \
- \
- XORL AX,AX; \
- ADCXQ 0+x,R15; MOVQ R15, 0+z; MOVQ 56+x,R15; \
- ADCXQ 8+x, R8; MOVQ R8, 8+z; MOVQ 64+x, R8; \
- ADCXQ 16+x, R9; MOVQ R9,16+z; MOVQ 72+x, R9; \
- ADCXQ 24+x,R10; MOVQ R10,24+z; MOVQ 80+x,R10; \
- ADCXQ 32+x,R11; MOVQ R11,32+z; MOVQ 88+x,R11; \
- ADCXQ 40+x,R12; MOVQ R12,40+z; MOVQ 96+x,R12; \
- ADCXQ 48+x,R13; MOVQ R13,48+z; MOVQ 104+x,R13; \
- ADCXQ AX,R14; \
- /* (c10c9,c9c8,c8c7,c7c13,c13c12,c12c11,c11c10) + (c6,...,c0) */ \
- /* ( r9, r8, r15, r13, r12, r11, r10) */ \
- MOVQ R10, AX; \
- SHRQ $32,R11,R10; \
- SHRQ $32,R12,R11; \
- SHRQ $32,R13,R12; \
- SHRQ $32,R15,R13; \
- SHRQ $32, R8,R15; \
- SHRQ $32, R9, R8; \
- SHRQ $32, AX, R9; \
- \
- XORL AX,AX; \
- ADCXQ 0+z,R10; \
- ADCXQ 8+z,R11; \
- ADCXQ 16+z,R12; \
- ADCXQ 24+z,R13; \
- ADCXQ 32+z,R15; \
- ADCXQ 40+z, R8; \
- ADCXQ 48+z, R9; \
- ADCXQ AX,R14; \
- /* ( c7) + (c6,...,c0) */ \
- /* (r14) */ \
- MOVQ R14, AX; SHLQ $32, AX; \
- CLC; \
- ADCXQ R14,R10; MOVQ $0,R14; \
- ADCXQ R14,R11; \
- ADCXQ R14,R12; \
- ADCXQ AX,R13; \
- ADCXQ R14,R15; \
- ADCXQ R14, R8; \
- ADCXQ R14, R9; \
- ADCXQ R14,R14; \
- /* ( c7) + (c6,...,c0) */ \
- /* (r14) */ \
- MOVQ R14, AX; SHLQ $32, AX; \
- CLC; \
- ADCXQ R14,R10; MOVQ R10, 0+z; MOVQ $0,R14; \
- ADCXQ R14,R11; MOVQ R11, 8+z; \
- ADCXQ R14,R12; MOVQ R12,16+z; \
- ADCXQ AX,R13; MOVQ R13,24+z; \
- ADCXQ R14,R15; MOVQ R15,32+z; \
- ADCXQ R14, R8; MOVQ R8,40+z; \
- ADCXQ R14, R9; MOVQ R9,48+z;
-
-// addSub calculates two operations: x,y = x+y,x-y
-// Uses: AX, DX, R8-R15, FLAGS
-#define addSub(x,y) \
- MOVQ 0+x, R8; ADDQ 0+y, R8; \
- MOVQ 8+x, R9; ADCQ 8+y, R9; \
- MOVQ 16+x, R10; ADCQ 16+y, R10; \
- MOVQ 24+x, R11; ADCQ 24+y, R11; \
- MOVQ 32+x, R12; ADCQ 32+y, R12; \
- MOVQ 40+x, R13; ADCQ 40+y, R13; \
- MOVQ 48+x, R14; ADCQ 48+y, R14; \
- MOVQ $0, AX; ADCQ $0, AX; \
- MOVQ AX, DX; \
- SHLQ $32, DX; \
- ADDQ AX, R8; MOVQ $0, AX; \
- ADCQ $0, R9; \
- ADCQ $0, R10; \
- ADCQ DX, R11; \
- ADCQ $0, R12; \
- ADCQ $0, R13; \
- ADCQ $0, R14; \
- ADCQ $0, AX; \
- MOVQ AX, DX; \
- SHLQ $32, DX; \
- ADDQ AX, R8; MOVQ 0+x,AX; MOVQ R8, 0+x; MOVQ AX, R8; \
- ADCQ $0, R9; MOVQ 8+x,AX; MOVQ R9, 8+x; MOVQ AX, R9; \
- ADCQ $0, R10; MOVQ 16+x,AX; MOVQ R10, 16+x; MOVQ AX, R10; \
- ADCQ DX, R11; MOVQ 24+x,AX; MOVQ R11, 24+x; MOVQ AX, R11; \
- ADCQ $0, R12; MOVQ 32+x,AX; MOVQ R12, 32+x; MOVQ AX, R12; \
- ADCQ $0, R13; MOVQ 40+x,AX; MOVQ R13, 40+x; MOVQ AX, R13; \
- ADCQ $0, R14; MOVQ 48+x,AX; MOVQ R14, 48+x; MOVQ AX, R14; \
- SUBQ 0+y, R8; \
- SBBQ 8+y, R9; \
- SBBQ 16+y, R10; \
- SBBQ 24+y, R11; \
- SBBQ 32+y, R12; \
- SBBQ 40+y, R13; \
- SBBQ 48+y, R14; \
- MOVQ $0, AX; SETCS AX; \
- MOVQ AX, DX; \
- SHLQ $32, DX; \
- SUBQ AX, R8; MOVQ $0, AX; \
- SBBQ $0, R9; \
- SBBQ $0, R10; \
- SBBQ DX, R11; \
- SBBQ $0, R12; \
- SBBQ $0, R13; \
- SBBQ $0, R14; \
- SETCS AX; \
- MOVQ AX, DX; \
- SHLQ $32, DX; \
- SUBQ AX, R8; MOVQ R8, 0+y; \
- SBBQ $0, R9; MOVQ R9, 8+y; \
- SBBQ $0, R10; MOVQ R10, 16+y; \
- SBBQ DX, R11; MOVQ R11, 24+y; \
- SBBQ $0, R12; MOVQ R12, 32+y; \
- SBBQ $0, R13; MOVQ R13, 40+y; \
- SBBQ $0, R14; MOVQ R14, 48+y;
diff --git a/vendor/github.com/cloudflare/circl/math/fp448/fp_amd64.s b/vendor/github.com/cloudflare/circl/math/fp448/fp_amd64.s
deleted file mode 100644
index 3f1f07c9862..00000000000
--- a/vendor/github.com/cloudflare/circl/math/fp448/fp_amd64.s
+++ /dev/null
@@ -1,75 +0,0 @@
-//go:build amd64 && !purego
-// +build amd64,!purego
-
-#include "textflag.h"
-#include "fp_amd64.h"
-
-// func cmovAmd64(x, y *Elt, n uint)
-TEXT ·cmovAmd64(SB),NOSPLIT,$0-24
- MOVQ x+0(FP), DI
- MOVQ y+8(FP), SI
- MOVQ n+16(FP), BX
- cselect(0(DI),0(SI),BX)
- RET
-
-// func cswapAmd64(x, y *Elt, n uint)
-TEXT ·cswapAmd64(SB),NOSPLIT,$0-24
- MOVQ x+0(FP), DI
- MOVQ y+8(FP), SI
- MOVQ n+16(FP), BX
- cswap(0(DI),0(SI),BX)
- RET
-
-// func subAmd64(z, x, y *Elt)
-TEXT ·subAmd64(SB),NOSPLIT,$0-24
- MOVQ z+0(FP), DI
- MOVQ x+8(FP), SI
- MOVQ y+16(FP), BX
- subtraction(0(DI),0(SI),0(BX))
- RET
-
-// func addsubAmd64(x, y *Elt)
-TEXT ·addsubAmd64(SB),NOSPLIT,$0-16
- MOVQ x+0(FP), DI
- MOVQ y+8(FP), SI
- addSub(0(DI),0(SI))
- RET
-
-#define addLegacy \
- additionLeg(0(DI),0(SI),0(BX))
-#define addBmi2Adx \
- additionAdx(0(DI),0(SI),0(BX))
-
-#define mulLegacy \
- integerMulLeg(0(SP),0(SI),0(BX)) \
- reduceFromDoubleLeg(0(DI),0(SP))
-#define mulBmi2Adx \
- integerMulAdx(0(SP),0(SI),0(BX)) \
- reduceFromDoubleAdx(0(DI),0(SP))
-
-#define sqrLegacy \
- integerSqrLeg(0(SP),0(SI)) \
- reduceFromDoubleLeg(0(DI),0(SP))
-#define sqrBmi2Adx \
- integerSqrAdx(0(SP),0(SI)) \
- reduceFromDoubleAdx(0(DI),0(SP))
-
-// func addAmd64(z, x, y *Elt)
-TEXT ·addAmd64(SB),NOSPLIT,$0-24
- MOVQ z+0(FP), DI
- MOVQ x+8(FP), SI
- MOVQ y+16(FP), BX
- CHECK_BMI2ADX(LADD, addLegacy, addBmi2Adx)
-
-// func mulAmd64(z, x, y *Elt)
-TEXT ·mulAmd64(SB),NOSPLIT,$112-24
- MOVQ z+0(FP), DI
- MOVQ x+8(FP), SI
- MOVQ y+16(FP), BX
- CHECK_BMI2ADX(LMUL, mulLegacy, mulBmi2Adx)
-
-// func sqrAmd64(z, x *Elt)
-TEXT ·sqrAmd64(SB),NOSPLIT,$112-16
- MOVQ z+0(FP), DI
- MOVQ x+8(FP), SI
- CHECK_BMI2ADX(LSQR, sqrLegacy, sqrBmi2Adx)
diff --git a/vendor/github.com/cloudflare/circl/math/fp448/fp_generic.go b/vendor/github.com/cloudflare/circl/math/fp448/fp_generic.go
deleted file mode 100644
index 47a0b63205f..00000000000
--- a/vendor/github.com/cloudflare/circl/math/fp448/fp_generic.go
+++ /dev/null
@@ -1,339 +0,0 @@
-package fp448
-
-import (
- "encoding/binary"
- "math/bits"
-)
-
-func cmovGeneric(x, y *Elt, n uint) {
- m := -uint64(n & 0x1)
- x0 := binary.LittleEndian.Uint64(x[0*8 : 1*8])
- x1 := binary.LittleEndian.Uint64(x[1*8 : 2*8])
- x2 := binary.LittleEndian.Uint64(x[2*8 : 3*8])
- x3 := binary.LittleEndian.Uint64(x[3*8 : 4*8])
- x4 := binary.LittleEndian.Uint64(x[4*8 : 5*8])
- x5 := binary.LittleEndian.Uint64(x[5*8 : 6*8])
- x6 := binary.LittleEndian.Uint64(x[6*8 : 7*8])
-
- y0 := binary.LittleEndian.Uint64(y[0*8 : 1*8])
- y1 := binary.LittleEndian.Uint64(y[1*8 : 2*8])
- y2 := binary.LittleEndian.Uint64(y[2*8 : 3*8])
- y3 := binary.LittleEndian.Uint64(y[3*8 : 4*8])
- y4 := binary.LittleEndian.Uint64(y[4*8 : 5*8])
- y5 := binary.LittleEndian.Uint64(y[5*8 : 6*8])
- y6 := binary.LittleEndian.Uint64(y[6*8 : 7*8])
-
- x0 = (x0 &^ m) | (y0 & m)
- x1 = (x1 &^ m) | (y1 & m)
- x2 = (x2 &^ m) | (y2 & m)
- x3 = (x3 &^ m) | (y3 & m)
- x4 = (x4 &^ m) | (y4 & m)
- x5 = (x5 &^ m) | (y5 & m)
- x6 = (x6 &^ m) | (y6 & m)
-
- binary.LittleEndian.PutUint64(x[0*8:1*8], x0)
- binary.LittleEndian.PutUint64(x[1*8:2*8], x1)
- binary.LittleEndian.PutUint64(x[2*8:3*8], x2)
- binary.LittleEndian.PutUint64(x[3*8:4*8], x3)
- binary.LittleEndian.PutUint64(x[4*8:5*8], x4)
- binary.LittleEndian.PutUint64(x[5*8:6*8], x5)
- binary.LittleEndian.PutUint64(x[6*8:7*8], x6)
-}
-
-func cswapGeneric(x, y *Elt, n uint) {
- m := -uint64(n & 0x1)
- x0 := binary.LittleEndian.Uint64(x[0*8 : 1*8])
- x1 := binary.LittleEndian.Uint64(x[1*8 : 2*8])
- x2 := binary.LittleEndian.Uint64(x[2*8 : 3*8])
- x3 := binary.LittleEndian.Uint64(x[3*8 : 4*8])
- x4 := binary.LittleEndian.Uint64(x[4*8 : 5*8])
- x5 := binary.LittleEndian.Uint64(x[5*8 : 6*8])
- x6 := binary.LittleEndian.Uint64(x[6*8 : 7*8])
-
- y0 := binary.LittleEndian.Uint64(y[0*8 : 1*8])
- y1 := binary.LittleEndian.Uint64(y[1*8 : 2*8])
- y2 := binary.LittleEndian.Uint64(y[2*8 : 3*8])
- y3 := binary.LittleEndian.Uint64(y[3*8 : 4*8])
- y4 := binary.LittleEndian.Uint64(y[4*8 : 5*8])
- y5 := binary.LittleEndian.Uint64(y[5*8 : 6*8])
- y6 := binary.LittleEndian.Uint64(y[6*8 : 7*8])
-
- t0 := m & (x0 ^ y0)
- t1 := m & (x1 ^ y1)
- t2 := m & (x2 ^ y2)
- t3 := m & (x3 ^ y3)
- t4 := m & (x4 ^ y4)
- t5 := m & (x5 ^ y5)
- t6 := m & (x6 ^ y6)
- x0 ^= t0
- x1 ^= t1
- x2 ^= t2
- x3 ^= t3
- x4 ^= t4
- x5 ^= t5
- x6 ^= t6
- y0 ^= t0
- y1 ^= t1
- y2 ^= t2
- y3 ^= t3
- y4 ^= t4
- y5 ^= t5
- y6 ^= t6
-
- binary.LittleEndian.PutUint64(x[0*8:1*8], x0)
- binary.LittleEndian.PutUint64(x[1*8:2*8], x1)
- binary.LittleEndian.PutUint64(x[2*8:3*8], x2)
- binary.LittleEndian.PutUint64(x[3*8:4*8], x3)
- binary.LittleEndian.PutUint64(x[4*8:5*8], x4)
- binary.LittleEndian.PutUint64(x[5*8:6*8], x5)
- binary.LittleEndian.PutUint64(x[6*8:7*8], x6)
-
- binary.LittleEndian.PutUint64(y[0*8:1*8], y0)
- binary.LittleEndian.PutUint64(y[1*8:2*8], y1)
- binary.LittleEndian.PutUint64(y[2*8:3*8], y2)
- binary.LittleEndian.PutUint64(y[3*8:4*8], y3)
- binary.LittleEndian.PutUint64(y[4*8:5*8], y4)
- binary.LittleEndian.PutUint64(y[5*8:6*8], y5)
- binary.LittleEndian.PutUint64(y[6*8:7*8], y6)
-}
-
-func addGeneric(z, x, y *Elt) {
- x0 := binary.LittleEndian.Uint64(x[0*8 : 1*8])
- x1 := binary.LittleEndian.Uint64(x[1*8 : 2*8])
- x2 := binary.LittleEndian.Uint64(x[2*8 : 3*8])
- x3 := binary.LittleEndian.Uint64(x[3*8 : 4*8])
- x4 := binary.LittleEndian.Uint64(x[4*8 : 5*8])
- x5 := binary.LittleEndian.Uint64(x[5*8 : 6*8])
- x6 := binary.LittleEndian.Uint64(x[6*8 : 7*8])
-
- y0 := binary.LittleEndian.Uint64(y[0*8 : 1*8])
- y1 := binary.LittleEndian.Uint64(y[1*8 : 2*8])
- y2 := binary.LittleEndian.Uint64(y[2*8 : 3*8])
- y3 := binary.LittleEndian.Uint64(y[3*8 : 4*8])
- y4 := binary.LittleEndian.Uint64(y[4*8 : 5*8])
- y5 := binary.LittleEndian.Uint64(y[5*8 : 6*8])
- y6 := binary.LittleEndian.Uint64(y[6*8 : 7*8])
-
- z0, c0 := bits.Add64(x0, y0, 0)
- z1, c1 := bits.Add64(x1, y1, c0)
- z2, c2 := bits.Add64(x2, y2, c1)
- z3, c3 := bits.Add64(x3, y3, c2)
- z4, c4 := bits.Add64(x4, y4, c3)
- z5, c5 := bits.Add64(x5, y5, c4)
- z6, z7 := bits.Add64(x6, y6, c5)
-
- z0, c0 = bits.Add64(z0, z7, 0)
- z1, c1 = bits.Add64(z1, 0, c0)
- z2, c2 = bits.Add64(z2, 0, c1)
- z3, c3 = bits.Add64(z3, z7<<32, c2)
- z4, c4 = bits.Add64(z4, 0, c3)
- z5, c5 = bits.Add64(z5, 0, c4)
- z6, z7 = bits.Add64(z6, 0, c5)
-
- z0, c0 = bits.Add64(z0, z7, 0)
- z1, c1 = bits.Add64(z1, 0, c0)
- z2, c2 = bits.Add64(z2, 0, c1)
- z3, c3 = bits.Add64(z3, z7<<32, c2)
- z4, c4 = bits.Add64(z4, 0, c3)
- z5, c5 = bits.Add64(z5, 0, c4)
- z6, _ = bits.Add64(z6, 0, c5)
-
- binary.LittleEndian.PutUint64(z[0*8:1*8], z0)
- binary.LittleEndian.PutUint64(z[1*8:2*8], z1)
- binary.LittleEndian.PutUint64(z[2*8:3*8], z2)
- binary.LittleEndian.PutUint64(z[3*8:4*8], z3)
- binary.LittleEndian.PutUint64(z[4*8:5*8], z4)
- binary.LittleEndian.PutUint64(z[5*8:6*8], z5)
- binary.LittleEndian.PutUint64(z[6*8:7*8], z6)
-}
-
-func subGeneric(z, x, y *Elt) {
- x0 := binary.LittleEndian.Uint64(x[0*8 : 1*8])
- x1 := binary.LittleEndian.Uint64(x[1*8 : 2*8])
- x2 := binary.LittleEndian.Uint64(x[2*8 : 3*8])
- x3 := binary.LittleEndian.Uint64(x[3*8 : 4*8])
- x4 := binary.LittleEndian.Uint64(x[4*8 : 5*8])
- x5 := binary.LittleEndian.Uint64(x[5*8 : 6*8])
- x6 := binary.LittleEndian.Uint64(x[6*8 : 7*8])
-
- y0 := binary.LittleEndian.Uint64(y[0*8 : 1*8])
- y1 := binary.LittleEndian.Uint64(y[1*8 : 2*8])
- y2 := binary.LittleEndian.Uint64(y[2*8 : 3*8])
- y3 := binary.LittleEndian.Uint64(y[3*8 : 4*8])
- y4 := binary.LittleEndian.Uint64(y[4*8 : 5*8])
- y5 := binary.LittleEndian.Uint64(y[5*8 : 6*8])
- y6 := binary.LittleEndian.Uint64(y[6*8 : 7*8])
-
- z0, c0 := bits.Sub64(x0, y0, 0)
- z1, c1 := bits.Sub64(x1, y1, c0)
- z2, c2 := bits.Sub64(x2, y2, c1)
- z3, c3 := bits.Sub64(x3, y3, c2)
- z4, c4 := bits.Sub64(x4, y4, c3)
- z5, c5 := bits.Sub64(x5, y5, c4)
- z6, z7 := bits.Sub64(x6, y6, c5)
-
- z0, c0 = bits.Sub64(z0, z7, 0)
- z1, c1 = bits.Sub64(z1, 0, c0)
- z2, c2 = bits.Sub64(z2, 0, c1)
- z3, c3 = bits.Sub64(z3, z7<<32, c2)
- z4, c4 = bits.Sub64(z4, 0, c3)
- z5, c5 = bits.Sub64(z5, 0, c4)
- z6, z7 = bits.Sub64(z6, 0, c5)
-
- z0, c0 = bits.Sub64(z0, z7, 0)
- z1, c1 = bits.Sub64(z1, 0, c0)
- z2, c2 = bits.Sub64(z2, 0, c1)
- z3, c3 = bits.Sub64(z3, z7<<32, c2)
- z4, c4 = bits.Sub64(z4, 0, c3)
- z5, c5 = bits.Sub64(z5, 0, c4)
- z6, _ = bits.Sub64(z6, 0, c5)
-
- binary.LittleEndian.PutUint64(z[0*8:1*8], z0)
- binary.LittleEndian.PutUint64(z[1*8:2*8], z1)
- binary.LittleEndian.PutUint64(z[2*8:3*8], z2)
- binary.LittleEndian.PutUint64(z[3*8:4*8], z3)
- binary.LittleEndian.PutUint64(z[4*8:5*8], z4)
- binary.LittleEndian.PutUint64(z[5*8:6*8], z5)
- binary.LittleEndian.PutUint64(z[6*8:7*8], z6)
-}
-
-func addsubGeneric(x, y *Elt) {
- z := &Elt{}
- addGeneric(z, x, y)
- subGeneric(y, x, y)
- *x = *z
-}
-
-func mulGeneric(z, x, y *Elt) {
- x0 := binary.LittleEndian.Uint64(x[0*8 : 1*8])
- x1 := binary.LittleEndian.Uint64(x[1*8 : 2*8])
- x2 := binary.LittleEndian.Uint64(x[2*8 : 3*8])
- x3 := binary.LittleEndian.Uint64(x[3*8 : 4*8])
- x4 := binary.LittleEndian.Uint64(x[4*8 : 5*8])
- x5 := binary.LittleEndian.Uint64(x[5*8 : 6*8])
- x6 := binary.LittleEndian.Uint64(x[6*8 : 7*8])
-
- y0 := binary.LittleEndian.Uint64(y[0*8 : 1*8])
- y1 := binary.LittleEndian.Uint64(y[1*8 : 2*8])
- y2 := binary.LittleEndian.Uint64(y[2*8 : 3*8])
- y3 := binary.LittleEndian.Uint64(y[3*8 : 4*8])
- y4 := binary.LittleEndian.Uint64(y[4*8 : 5*8])
- y5 := binary.LittleEndian.Uint64(y[5*8 : 6*8])
- y6 := binary.LittleEndian.Uint64(y[6*8 : 7*8])
-
- yy := [7]uint64{y0, y1, y2, y3, y4, y5, y6}
- zz := [7]uint64{}
-
- yi := yy[0]
- h0, l0 := bits.Mul64(x0, yi)
- h1, l1 := bits.Mul64(x1, yi)
- h2, l2 := bits.Mul64(x2, yi)
- h3, l3 := bits.Mul64(x3, yi)
- h4, l4 := bits.Mul64(x4, yi)
- h5, l5 := bits.Mul64(x5, yi)
- h6, l6 := bits.Mul64(x6, yi)
-
- zz[0] = l0
- a0, c0 := bits.Add64(h0, l1, 0)
- a1, c1 := bits.Add64(h1, l2, c0)
- a2, c2 := bits.Add64(h2, l3, c1)
- a3, c3 := bits.Add64(h3, l4, c2)
- a4, c4 := bits.Add64(h4, l5, c3)
- a5, c5 := bits.Add64(h5, l6, c4)
- a6, _ := bits.Add64(h6, 0, c5)
-
- for i := 1; i < 7; i++ {
- yi = yy[i]
- h0, l0 = bits.Mul64(x0, yi)
- h1, l1 = bits.Mul64(x1, yi)
- h2, l2 = bits.Mul64(x2, yi)
- h3, l3 = bits.Mul64(x3, yi)
- h4, l4 = bits.Mul64(x4, yi)
- h5, l5 = bits.Mul64(x5, yi)
- h6, l6 = bits.Mul64(x6, yi)
-
- zz[i], c0 = bits.Add64(a0, l0, 0)
- a0, c1 = bits.Add64(a1, l1, c0)
- a1, c2 = bits.Add64(a2, l2, c1)
- a2, c3 = bits.Add64(a3, l3, c2)
- a3, c4 = bits.Add64(a4, l4, c3)
- a4, c5 = bits.Add64(a5, l5, c4)
- a5, a6 = bits.Add64(a6, l6, c5)
-
- a0, c0 = bits.Add64(a0, h0, 0)
- a1, c1 = bits.Add64(a1, h1, c0)
- a2, c2 = bits.Add64(a2, h2, c1)
- a3, c3 = bits.Add64(a3, h3, c2)
- a4, c4 = bits.Add64(a4, h4, c3)
- a5, c5 = bits.Add64(a5, h5, c4)
- a6, _ = bits.Add64(a6, h6, c5)
- }
- red64(z, &zz, &[7]uint64{a0, a1, a2, a3, a4, a5, a6})
-}
-
-func sqrGeneric(z, x *Elt) { mulGeneric(z, x, x) }
-
-func red64(z *Elt, l, h *[7]uint64) {
- /* (2C13, 2C12, 2C11, 2C10|C10, C9, C8, C7) + (C6,...,C0) */
- h0 := h[0]
- h1 := h[1]
- h2 := h[2]
- h3 := ((h[3] & (0xFFFFFFFF << 32)) << 1) | (h[3] & 0xFFFFFFFF)
- h4 := (h[3] >> 63) | (h[4] << 1)
- h5 := (h[4] >> 63) | (h[5] << 1)
- h6 := (h[5] >> 63) | (h[6] << 1)
- h7 := (h[6] >> 63)
-
- l0, c0 := bits.Add64(h0, l[0], 0)
- l1, c1 := bits.Add64(h1, l[1], c0)
- l2, c2 := bits.Add64(h2, l[2], c1)
- l3, c3 := bits.Add64(h3, l[3], c2)
- l4, c4 := bits.Add64(h4, l[4], c3)
- l5, c5 := bits.Add64(h5, l[5], c4)
- l6, c6 := bits.Add64(h6, l[6], c5)
- l7, _ := bits.Add64(h7, 0, c6)
-
- /* (C10C9, C9C8,C8C7,C7C13,C13C12,C12C11,C11C10) + (C6,...,C0) */
- h0 = (h[3] >> 32) | (h[4] << 32)
- h1 = (h[4] >> 32) | (h[5] << 32)
- h2 = (h[5] >> 32) | (h[6] << 32)
- h3 = (h[6] >> 32) | (h[0] << 32)
- h4 = (h[0] >> 32) | (h[1] << 32)
- h5 = (h[1] >> 32) | (h[2] << 32)
- h6 = (h[2] >> 32) | (h[3] << 32)
-
- l0, c0 = bits.Add64(l0, h0, 0)
- l1, c1 = bits.Add64(l1, h1, c0)
- l2, c2 = bits.Add64(l2, h2, c1)
- l3, c3 = bits.Add64(l3, h3, c2)
- l4, c4 = bits.Add64(l4, h4, c3)
- l5, c5 = bits.Add64(l5, h5, c4)
- l6, c6 = bits.Add64(l6, h6, c5)
- l7, _ = bits.Add64(l7, 0, c6)
-
- /* (C7) + (C6,...,C0) */
- l0, c0 = bits.Add64(l0, l7, 0)
- l1, c1 = bits.Add64(l1, 0, c0)
- l2, c2 = bits.Add64(l2, 0, c1)
- l3, c3 = bits.Add64(l3, l7<<32, c2)
- l4, c4 = bits.Add64(l4, 0, c3)
- l5, c5 = bits.Add64(l5, 0, c4)
- l6, l7 = bits.Add64(l6, 0, c5)
-
- /* (C7) + (C6,...,C0) */
- l0, c0 = bits.Add64(l0, l7, 0)
- l1, c1 = bits.Add64(l1, 0, c0)
- l2, c2 = bits.Add64(l2, 0, c1)
- l3, c3 = bits.Add64(l3, l7<<32, c2)
- l4, c4 = bits.Add64(l4, 0, c3)
- l5, c5 = bits.Add64(l5, 0, c4)
- l6, _ = bits.Add64(l6, 0, c5)
-
- binary.LittleEndian.PutUint64(z[0*8:1*8], l0)
- binary.LittleEndian.PutUint64(z[1*8:2*8], l1)
- binary.LittleEndian.PutUint64(z[2*8:3*8], l2)
- binary.LittleEndian.PutUint64(z[3*8:4*8], l3)
- binary.LittleEndian.PutUint64(z[4*8:5*8], l4)
- binary.LittleEndian.PutUint64(z[5*8:6*8], l5)
- binary.LittleEndian.PutUint64(z[6*8:7*8], l6)
-}
diff --git a/vendor/github.com/cloudflare/circl/math/fp448/fp_noasm.go b/vendor/github.com/cloudflare/circl/math/fp448/fp_noasm.go
deleted file mode 100644
index a62225d2962..00000000000
--- a/vendor/github.com/cloudflare/circl/math/fp448/fp_noasm.go
+++ /dev/null
@@ -1,12 +0,0 @@
-//go:build !amd64 || purego
-// +build !amd64 purego
-
-package fp448
-
-func cmov(x, y *Elt, n uint) { cmovGeneric(x, y, n) }
-func cswap(x, y *Elt, n uint) { cswapGeneric(x, y, n) }
-func add(z, x, y *Elt) { addGeneric(z, x, y) }
-func sub(z, x, y *Elt) { subGeneric(z, x, y) }
-func addsub(x, y *Elt) { addsubGeneric(x, y) }
-func mul(z, x, y *Elt) { mulGeneric(z, x, y) }
-func sqr(z, x *Elt) { sqrGeneric(z, x) }
diff --git a/vendor/github.com/cloudflare/circl/math/fp448/fuzzer.go b/vendor/github.com/cloudflare/circl/math/fp448/fuzzer.go
deleted file mode 100644
index 2d7afc80598..00000000000
--- a/vendor/github.com/cloudflare/circl/math/fp448/fuzzer.go
+++ /dev/null
@@ -1,75 +0,0 @@
-//go:build gofuzz
-// +build gofuzz
-
-// How to run the fuzzer:
-//
-// $ go get -u github.com/dvyukov/go-fuzz/go-fuzz
-// $ go get -u github.com/dvyukov/go-fuzz/go-fuzz-build
-// $ go-fuzz-build -libfuzzer -func FuzzReduction -o lib.a
-// $ clang -fsanitize=fuzzer lib.a -o fu.exe
-// $ ./fu.exe
-package fp448
-
-import (
- "encoding/binary"
- "fmt"
- "math/big"
-
- "github.com/cloudflare/circl/internal/conv"
-)
-
-// FuzzReduction is a fuzzer target for red64 function, which reduces t
-// (112 bits) to a number t' (56 bits) congruent modulo p448.
-func FuzzReduction(data []byte) int {
- if len(data) != 2*Size {
- return -1
- }
- var got, want Elt
- var lo, hi [7]uint64
- a := data[:Size]
- b := data[Size:]
- lo[0] = binary.LittleEndian.Uint64(a[0*8 : 1*8])
- lo[1] = binary.LittleEndian.Uint64(a[1*8 : 2*8])
- lo[2] = binary.LittleEndian.Uint64(a[2*8 : 3*8])
- lo[3] = binary.LittleEndian.Uint64(a[3*8 : 4*8])
- lo[4] = binary.LittleEndian.Uint64(a[4*8 : 5*8])
- lo[5] = binary.LittleEndian.Uint64(a[5*8 : 6*8])
- lo[6] = binary.LittleEndian.Uint64(a[6*8 : 7*8])
-
- hi[0] = binary.LittleEndian.Uint64(b[0*8 : 1*8])
- hi[1] = binary.LittleEndian.Uint64(b[1*8 : 2*8])
- hi[2] = binary.LittleEndian.Uint64(b[2*8 : 3*8])
- hi[3] = binary.LittleEndian.Uint64(b[3*8 : 4*8])
- hi[4] = binary.LittleEndian.Uint64(b[4*8 : 5*8])
- hi[5] = binary.LittleEndian.Uint64(b[5*8 : 6*8])
- hi[6] = binary.LittleEndian.Uint64(b[6*8 : 7*8])
-
- red64(&got, &lo, &hi)
-
- t := conv.BytesLe2BigInt(data[:2*Size])
-
- two448 := big.NewInt(1)
- two448.Lsh(two448, 448) // 2^448
- mask448 := big.NewInt(1)
- mask448.Sub(two448, mask448) // 2^448-1
- two224plus1 := big.NewInt(1)
- two224plus1.Lsh(two224plus1, 224)
- two224plus1.Add(two224plus1, big.NewInt(1)) // 2^224+1
-
- var loBig, hiBig big.Int
- for t.Cmp(two448) >= 0 {
- loBig.And(t, mask448)
- hiBig.Rsh(t, 448)
- t.Mul(&hiBig, two224plus1)
- t.Add(t, &loBig)
- }
- conv.BigInt2BytesLe(want[:], t)
-
- if got != want {
- fmt.Printf("in: %v\n", conv.BytesLe2BigInt(data[:2*Size]))
- fmt.Printf("got: %v\n", got)
- fmt.Printf("want: %v\n", want)
- panic("error found")
- }
- return 1
-}
diff --git a/vendor/github.com/cloudflare/circl/math/mlsbset/mlsbset.go b/vendor/github.com/cloudflare/circl/math/mlsbset/mlsbset.go
deleted file mode 100644
index a43851b8bb2..00000000000
--- a/vendor/github.com/cloudflare/circl/math/mlsbset/mlsbset.go
+++ /dev/null
@@ -1,122 +0,0 @@
-// Package mlsbset provides a constant-time exponentiation method with precomputation.
-//
-// References: "Efficient and secure algorithms for GLV-based scalar
-// multiplication and their implementation on GLV–GLS curves" by (Faz-Hernandez et al.)
-// - https://doi.org/10.1007/s13389-014-0085-7
-// - https://eprint.iacr.org/2013/158
-package mlsbset
-
-import (
- "errors"
- "fmt"
- "math/big"
-
- "github.com/cloudflare/circl/internal/conv"
-)
-
-// EltG is a group element.
-type EltG interface{}
-
-// EltP is a precomputed group element.
-type EltP interface{}
-
-// Group defines the operations required by MLSBSet exponentiation method.
-type Group interface {
- Identity() EltG // Returns the identity of the group.
- Sqr(x EltG) // Calculates x = x^2.
- Mul(x EltG, y EltP) // Calculates x = x*y.
- NewEltP() EltP // Returns an arbitrary precomputed element.
- ExtendedEltP() EltP // Returns the precomputed element x^(2^(w*d)).
- Lookup(a EltP, v uint, s, u int32) // Sets a = s*T[v][u].
-}
-
-// Params contains the parameters of the encoding.
-type Params struct {
- T uint // T is the maximum size (in bits) of exponents.
- V uint // V is the number of tables.
- W uint // W is the window size.
- E uint // E is the number of digits per table.
- D uint // D is the number of digits in total.
- L uint // L is the length of the code.
-}
-
-// Encoder allows to convert integers into valid powers.
-type Encoder struct{ p Params }
-
-// New produces an encoder of the MLSBSet algorithm.
-func New(t, v, w uint) (Encoder, error) {
- if !(t > 1 && v >= 1 && w >= 2) {
- return Encoder{}, errors.New("t>1, v>=1, w>=2")
- }
- e := (t + w*v - 1) / (w * v)
- d := e * v
- l := d * w
- return Encoder{Params{t, v, w, e, d, l}}, nil
-}
-
-// Encode converts an odd integer k into a valid power for exponentiation.
-func (m Encoder) Encode(k []byte) (*Power, error) {
- if len(k) == 0 {
- return nil, errors.New("empty slice")
- }
- if !(len(k) <= int(m.p.L+7)>>3) {
- return nil, errors.New("k too big")
- }
- if k[0]%2 == 0 {
- return nil, errors.New("k must be odd")
- }
- ap := int((m.p.L+7)/8) - len(k)
- k = append(k, make([]byte, ap)...)
- s := m.signs(k)
- b := make([]int32, m.p.L-m.p.D)
- c := conv.BytesLe2BigInt(k)
- c.Rsh(c, m.p.D)
- var bi big.Int
- for i := m.p.D; i < m.p.L; i++ {
- c0 := int32(c.Bit(0))
- b[i-m.p.D] = s[i%m.p.D] * c0
- bi.SetInt64(int64(b[i-m.p.D] >> 1))
- c.Rsh(c, 1)
- c.Sub(c, &bi)
- }
- carry := int(c.Int64())
- return &Power{m, s, b, carry}, nil
-}
-
-// signs calculates the set of signs.
-func (m Encoder) signs(k []byte) []int32 {
- s := make([]int32, m.p.D)
- s[m.p.D-1] = 1
- for i := uint(1); i < m.p.D; i++ {
- ki := int32((k[i>>3] >> (i & 0x7)) & 0x1)
- s[i-1] = 2*ki - 1
- }
- return s
-}
-
-// GetParams returns the complementary parameters of the encoding.
-func (m Encoder) GetParams() Params { return m.p }
-
-// tableSize returns the size of each table.
-func (m Encoder) tableSize() uint { return 1 << (m.p.W - 1) }
-
-// Elts returns the total number of elements that must be precomputed.
-func (m Encoder) Elts() uint { return m.p.V * m.tableSize() }
-
-// IsExtended returns true if the element x^(2^(wd)) must be calculated.
-func (m Encoder) IsExtended() bool { q := m.p.T / (m.p.V * m.p.W); return m.p.T == q*m.p.V*m.p.W }
-
-// Ops returns the number of squares and multiplications executed during an exponentiation.
-func (m Encoder) Ops() (S uint, M uint) {
- S = m.p.E
- M = m.p.E * m.p.V
- if m.IsExtended() {
- M++
- }
- return
-}
-
-func (m Encoder) String() string {
- return fmt.Sprintf("T: %v W: %v V: %v e: %v d: %v l: %v wv|t: %v",
- m.p.T, m.p.W, m.p.V, m.p.E, m.p.D, m.p.L, m.IsExtended())
-}
diff --git a/vendor/github.com/cloudflare/circl/math/mlsbset/power.go b/vendor/github.com/cloudflare/circl/math/mlsbset/power.go
deleted file mode 100644
index 3f214c3046a..00000000000
--- a/vendor/github.com/cloudflare/circl/math/mlsbset/power.go
+++ /dev/null
@@ -1,64 +0,0 @@
-package mlsbset
-
-import "fmt"
-
-// Power is a valid exponent produced by the MLSBSet encoding algorithm.
-type Power struct {
- set Encoder // parameters of code.
- s []int32 // set of signs.
- b []int32 // set of digits.
- c int // carry is {0,1}.
-}
-
-// Exp is calculates x^k, where x is a predetermined element of a group G.
-func (p *Power) Exp(G Group) EltG {
- a, b := G.Identity(), G.NewEltP()
- for e := int(p.set.p.E - 1); e >= 0; e-- {
- G.Sqr(a)
- for v := uint(0); v < p.set.p.V; v++ {
- sgnElt, idElt := p.Digit(v, uint(e))
- G.Lookup(b, v, sgnElt, idElt)
- G.Mul(a, b)
- }
- }
- if p.set.IsExtended() && p.c == 1 {
- G.Mul(a, G.ExtendedEltP())
- }
- return a
-}
-
-// Digit returns the (v,e)-th digit and its sign.
-func (p *Power) Digit(v, e uint) (sgn, dig int32) {
- sgn = p.bit(0, v, e)
- dig = 0
- for i := p.set.p.W - 1; i > 0; i-- {
- dig = 2*dig + p.bit(i, v, e)
- }
- mask := dig >> 31
- dig = (dig + mask) ^ mask
- return sgn, dig
-}
-
-// bit returns the (w,v,e)-th bit of the code.
-func (p *Power) bit(w, v, e uint) int32 {
- if !(w < p.set.p.W &&
- v < p.set.p.V &&
- e < p.set.p.E) {
- panic(fmt.Errorf("indexes outside (%v,%v,%v)", w, v, e))
- }
- if w == 0 {
- return p.s[p.set.p.E*v+e]
- }
- return p.b[p.set.p.D*(w-1)+p.set.p.E*v+e]
-}
-
-func (p *Power) String() string {
- dig := ""
- for j := uint(0); j < p.set.p.V; j++ {
- for i := uint(0); i < p.set.p.E; i++ {
- s, d := p.Digit(j, i)
- dig += fmt.Sprintf("(%2v,%2v) = %+2v %+2v\n", j, i, s, d)
- }
- }
- return fmt.Sprintf("len: %v\ncarry: %v\ndigits:\n%v", len(p.b)+len(p.s), p.c, dig)
-}
diff --git a/vendor/github.com/cloudflare/circl/math/primes.go b/vendor/github.com/cloudflare/circl/math/primes.go
deleted file mode 100644
index 158fd83a7aa..00000000000
--- a/vendor/github.com/cloudflare/circl/math/primes.go
+++ /dev/null
@@ -1,34 +0,0 @@
-package math
-
-import (
- "crypto/rand"
- "io"
- "math/big"
-)
-
-// IsSafePrime reports whether p is (probably) a safe prime.
-// The prime p=2*q+1 is safe prime if both p and q are primes.
-// Note that ProbablyPrime is not suitable for judging primes
-// that an adversary may have crafted to fool the test.
-func IsSafePrime(p *big.Int) bool {
- pdiv2 := new(big.Int).Rsh(p, 1)
- return p.ProbablyPrime(20) && pdiv2.ProbablyPrime(20)
-}
-
-// SafePrime returns a number of the given bit length that is a safe prime with high probability.
-// The number returned p=2*q+1 is a safe prime if both p and q are primes.
-// SafePrime will return error for any error returned by rand.Read or if bits < 2.
-func SafePrime(random io.Reader, bits int) (*big.Int, error) {
- one := big.NewInt(1)
- p := new(big.Int)
- for {
- q, err := rand.Prime(random, bits-1)
- if err != nil {
- return nil, err
- }
- p.Lsh(q, 1).Add(p, one)
- if p.ProbablyPrime(20) {
- return p, nil
- }
- }
-}
diff --git a/vendor/github.com/cloudflare/circl/math/wnaf.go b/vendor/github.com/cloudflare/circl/math/wnaf.go
deleted file mode 100644
index 94a1ec50429..00000000000
--- a/vendor/github.com/cloudflare/circl/math/wnaf.go
+++ /dev/null
@@ -1,84 +0,0 @@
-// Package math provides some utility functions for big integers.
-package math
-
-import "math/big"
-
-// SignedDigit obtains the signed-digit recoding of n and returns a list L of
-// digits such that n = sum( L[i]*2^(i*(w-1)) ), and each L[i] is an odd number
-// in the set {±1, ±3, ..., ±2^(w-1)-1}. The third parameter ensures that the
-// output has ceil(l/(w-1)) digits.
-//
-// Restrictions:
-// - n is odd and n > 0.
-// - 1 < w < 32.
-// - l >= bit length of n.
-//
-// References:
-// - Alg.6 in "Exponent Recoding and Regular Exponentiation Algorithms"
-// by Joye-Tunstall. http://doi.org/10.1007/978-3-642-02384-2_21
-// - Alg.6 in "Selecting Elliptic Curves for Cryptography: An Efficiency and
-// Security Analysis" by Bos et al. http://doi.org/10.1007/s13389-015-0097-y
-func SignedDigit(n *big.Int, w, l uint) []int32 {
- if n.Sign() <= 0 || n.Bit(0) == 0 {
- panic("n must be non-zero, odd, and positive")
- }
- if w <= 1 || w >= 32 {
- panic("Verify that 1 < w < 32")
- }
- if uint(n.BitLen()) > l {
- panic("n is too big to fit in l digits")
- }
- lenN := (l + (w - 1) - 1) / (w - 1) // ceil(l/(w-1))
- L := make([]int32, lenN+1)
- var k, v big.Int
- k.Set(n)
-
- var i uint
- for i = 0; i < lenN; i++ {
- words := k.Bits()
- value := int32(words[0] & ((1 << w) - 1))
- value -= int32(1) << (w - 1)
- L[i] = value
- v.SetInt64(int64(value))
- k.Sub(&k, &v)
- k.Rsh(&k, w-1)
- }
- L[i] = int32(k.Int64())
- return L
-}
-
-// OmegaNAF obtains the window-w Non-Adjacent Form of a positive number n and
-// 1 < w < 32. The returned slice L holds n = sum( L[i]*2^i ).
-//
-// Reference:
-// - Alg.9 "Efficient arithmetic on Koblitz curves" by Solinas.
-// http://doi.org/10.1023/A:1008306223194
-func OmegaNAF(n *big.Int, w uint) (L []int32) {
- if n.Sign() < 0 {
- panic("n must be positive")
- }
- if w <= 1 || w >= 32 {
- panic("Verify that 1 < w < 32")
- }
-
- L = make([]int32, n.BitLen()+1)
- var k, v big.Int
- k.Set(n)
-
- i := 0
- for ; k.Sign() > 0; i++ {
- value := int32(0)
- if k.Bit(0) == 1 {
- words := k.Bits()
- value = int32(words[0] & ((1 << w) - 1))
- if value >= (int32(1) << (w - 1)) {
- value -= int32(1) << w
- }
- v.SetInt64(int64(value))
- k.Sub(&k, &v)
- }
- L[i] = value
- k.Rsh(&k, 1)
- }
- return L[:i]
-}
diff --git a/vendor/github.com/cloudflare/circl/sign/ed25519/ed25519.go b/vendor/github.com/cloudflare/circl/sign/ed25519/ed25519.go
deleted file mode 100644
index 2c73c26fb1f..00000000000
--- a/vendor/github.com/cloudflare/circl/sign/ed25519/ed25519.go
+++ /dev/null
@@ -1,453 +0,0 @@
-// Package ed25519 implements Ed25519 signature scheme as described in RFC-8032.
-//
-// This package provides optimized implementations of the three signature
-// variants and maintaining closer compatibility with crypto/ed25519.
-//
-// | Scheme Name | Sign Function | Verification | Context |
-// |-------------|-------------------|---------------|-------------------|
-// | Ed25519 | Sign | Verify | None |
-// | Ed25519Ph | SignPh | VerifyPh | Yes, can be empty |
-// | Ed25519Ctx | SignWithCtx | VerifyWithCtx | Yes, non-empty |
-// | All above | (PrivateKey).Sign | VerifyAny | As above |
-//
-// Specific functions for sign and verify are defined. A generic signing
-// function for all schemes is available through the crypto.Signer interface,
-// which is implemented by the PrivateKey type. A correspond all-in-one
-// verification method is provided by the VerifyAny function.
-//
-// Signing with Ed25519Ph or Ed25519Ctx requires a context string for domain
-// separation. This parameter is passed using a SignerOptions struct defined
-// in this package. While Ed25519Ph accepts an empty context, Ed25519Ctx
-// enforces non-empty context strings.
-//
-// # Compatibility with crypto.ed25519
-//
-// These functions are compatible with the “Ed25519” function defined in
-// RFC-8032. However, unlike RFC 8032's formulation, this package's private
-// key representation includes a public key suffix to make multiple signing
-// operations with the same key more efficient. This package refers to the
-// RFC-8032 private key as the “seed”.
-//
-// References
-//
-// - RFC-8032: https://rfc-editor.org/rfc/rfc8032.txt
-// - Ed25519: https://ed25519.cr.yp.to/
-// - EdDSA: High-speed high-security signatures. https://doi.org/10.1007/s13389-012-0027-1
-package ed25519
-
-import (
- "bytes"
- "crypto"
- cryptoRand "crypto/rand"
- "crypto/sha512"
- "crypto/subtle"
- "errors"
- "fmt"
- "io"
- "strconv"
-
- "github.com/cloudflare/circl/sign"
-)
-
-const (
- // ContextMaxSize is the maximum length (in bytes) allowed for context.
- ContextMaxSize = 255
- // PublicKeySize is the size, in bytes, of public keys as used in this package.
- PublicKeySize = 32
- // PrivateKeySize is the size, in bytes, of private keys as used in this package.
- PrivateKeySize = 64
- // SignatureSize is the size, in bytes, of signatures generated and verified by this package.
- SignatureSize = 64
- // SeedSize is the size, in bytes, of private key seeds. These are the private key representations used by RFC 8032.
- SeedSize = 32
-)
-
-const (
- paramB = 256 / 8 // Size of keys in bytes.
-)
-
-// SignerOptions implements crypto.SignerOpts and augments with parameters
-// that are specific to the Ed25519 signature schemes.
-type SignerOptions struct {
- // Hash must be crypto.Hash(0) for Ed25519/Ed25519ctx, or crypto.SHA512
- // for Ed25519ph.
- crypto.Hash
-
- // Context is an optional domain separation string for Ed25519ph and a
- // must for Ed25519ctx. Its length must be less or equal than 255 bytes.
- Context string
-
- // Scheme is an identifier for choosing a signature scheme. The zero value
- // is ED25519.
- Scheme SchemeID
-}
-
-// SchemeID is an identifier for each signature scheme.
-type SchemeID uint
-
-const (
- ED25519 SchemeID = iota
- ED25519Ph
- ED25519Ctx
-)
-
-// PrivateKey is the type of Ed25519 private keys. It implements crypto.Signer.
-type PrivateKey []byte
-
-// Equal reports whether priv and x have the same value.
-func (priv PrivateKey) Equal(x crypto.PrivateKey) bool {
- xx, ok := x.(PrivateKey)
- return ok && subtle.ConstantTimeCompare(priv, xx) == 1
-}
-
-// Public returns the PublicKey corresponding to priv.
-func (priv PrivateKey) Public() crypto.PublicKey {
- publicKey := make(PublicKey, PublicKeySize)
- copy(publicKey, priv[SeedSize:])
- return publicKey
-}
-
-// Seed returns the private key seed corresponding to priv. It is provided for
-// interoperability with RFC 8032. RFC 8032's private keys correspond to seeds
-// in this package.
-func (priv PrivateKey) Seed() []byte {
- seed := make([]byte, SeedSize)
- copy(seed, priv[:SeedSize])
- return seed
-}
-
-func (priv PrivateKey) Scheme() sign.Scheme { return sch }
-
-func (pub PublicKey) Scheme() sign.Scheme { return sch }
-
-func (priv PrivateKey) MarshalBinary() (data []byte, err error) {
- privateKey := make(PrivateKey, PrivateKeySize)
- copy(privateKey, priv)
- return privateKey, nil
-}
-
-func (pub PublicKey) MarshalBinary() (data []byte, err error) {
- publicKey := make(PublicKey, PublicKeySize)
- copy(publicKey, pub)
- return publicKey, nil
-}
-
-// Equal reports whether pub and x have the same value.
-func (pub PublicKey) Equal(x crypto.PublicKey) bool {
- xx, ok := x.(PublicKey)
- return ok && bytes.Equal(pub, xx)
-}
-
-// Sign creates a signature of a message with priv key.
-// This function is compatible with crypto.ed25519 and also supports the
-// three signature variants defined in RFC-8032, namely Ed25519 (or pure
-// EdDSA), Ed25519Ph, and Ed25519Ctx.
-// The opts.HashFunc() must return zero to specify either Ed25519 or Ed25519Ctx
-// variant. This can be achieved by passing crypto.Hash(0) as the value for
-// opts.
-// The opts.HashFunc() must return SHA512 to specify the Ed25519Ph variant.
-// This can be achieved by passing crypto.SHA512 as the value for opts.
-// Use a SignerOptions struct (defined in this package) to pass a context
-// string for signing.
-func (priv PrivateKey) Sign(
- rand io.Reader,
- message []byte,
- opts crypto.SignerOpts,
-) (signature []byte, err error) {
- var ctx string
- var scheme SchemeID
- if o, ok := opts.(SignerOptions); ok {
- ctx = o.Context
- scheme = o.Scheme
- }
-
- switch true {
- case scheme == ED25519 && opts.HashFunc() == crypto.Hash(0):
- return Sign(priv, message), nil
- case scheme == ED25519Ph && opts.HashFunc() == crypto.SHA512:
- return SignPh(priv, message, ctx), nil
- case scheme == ED25519Ctx && opts.HashFunc() == crypto.Hash(0) && len(ctx) > 0:
- return SignWithCtx(priv, message, ctx), nil
- default:
- return nil, errors.New("ed25519: bad hash algorithm")
- }
-}
-
-// GenerateKey generates a public/private key pair using entropy from rand.
-// If rand is nil, crypto/rand.Reader will be used.
-func GenerateKey(rand io.Reader) (PublicKey, PrivateKey, error) {
- if rand == nil {
- rand = cryptoRand.Reader
- }
-
- seed := make([]byte, SeedSize)
- if _, err := io.ReadFull(rand, seed); err != nil {
- return nil, nil, err
- }
-
- privateKey := NewKeyFromSeed(seed)
- publicKey := make(PublicKey, PublicKeySize)
- copy(publicKey, privateKey[SeedSize:])
-
- return publicKey, privateKey, nil
-}
-
-// NewKeyFromSeed calculates a private key from a seed. It will panic if
-// len(seed) is not SeedSize. This function is provided for interoperability
-// with RFC 8032. RFC 8032's private keys correspond to seeds in this
-// package.
-func NewKeyFromSeed(seed []byte) PrivateKey {
- privateKey := make(PrivateKey, PrivateKeySize)
- newKeyFromSeed(privateKey, seed)
- return privateKey
-}
-
-func newKeyFromSeed(privateKey, seed []byte) {
- if l := len(seed); l != SeedSize {
- panic("ed25519: bad seed length: " + strconv.Itoa(l))
- }
- var P pointR1
- k := sha512.Sum512(seed)
- clamp(k[:])
- reduceModOrder(k[:paramB], false)
- P.fixedMult(k[:paramB])
- copy(privateKey[:SeedSize], seed)
- _ = P.ToBytes(privateKey[SeedSize:])
-}
-
-func signAll(signature []byte, privateKey PrivateKey, message, ctx []byte, preHash bool) {
- if l := len(privateKey); l != PrivateKeySize {
- panic("ed25519: bad private key length: " + strconv.Itoa(l))
- }
-
- H := sha512.New()
- var PHM []byte
-
- if preHash {
- _, _ = H.Write(message)
- PHM = H.Sum(nil)
- H.Reset()
- } else {
- PHM = message
- }
-
- // 1. Hash the 32-byte private key using SHA-512.
- _, _ = H.Write(privateKey[:SeedSize])
- h := H.Sum(nil)
- clamp(h[:])
- prefix, s := h[paramB:], h[:paramB]
-
- // 2. Compute SHA-512(dom2(F, C) || prefix || PH(M))
- H.Reset()
-
- writeDom(H, ctx, preHash)
-
- _, _ = H.Write(prefix)
- _, _ = H.Write(PHM)
- r := H.Sum(nil)
- reduceModOrder(r[:], true)
-
- // 3. Compute the point [r]B.
- var P pointR1
- P.fixedMult(r[:paramB])
- R := (&[paramB]byte{})[:]
- if err := P.ToBytes(R); err != nil {
- panic(err)
- }
-
- // 4. Compute SHA512(dom2(F, C) || R || A || PH(M)).
- H.Reset()
-
- writeDom(H, ctx, preHash)
-
- _, _ = H.Write(R)
- _, _ = H.Write(privateKey[SeedSize:])
- _, _ = H.Write(PHM)
- hRAM := H.Sum(nil)
-
- reduceModOrder(hRAM[:], true)
-
- // 5. Compute S = (r + k * s) mod order.
- S := (&[paramB]byte{})[:]
- calculateS(S, r[:paramB], hRAM[:paramB], s)
-
- // 6. The signature is the concatenation of R and S.
- copy(signature[:paramB], R[:])
- copy(signature[paramB:], S[:])
-}
-
-// Sign signs the message with privateKey and returns a signature.
-// This function supports the signature variant defined in RFC-8032: Ed25519,
-// also known as the pure version of EdDSA.
-// It will panic if len(privateKey) is not PrivateKeySize.
-func Sign(privateKey PrivateKey, message []byte) []byte {
- signature := make([]byte, SignatureSize)
- signAll(signature, privateKey, message, []byte(""), false)
- return signature
-}
-
-// SignPh creates a signature of a message with private key and context.
-// This function supports the signature variant defined in RFC-8032: Ed25519ph,
-// meaning it internally hashes the message using SHA-512, and optionally
-// accepts a context string.
-// It will panic if len(privateKey) is not PrivateKeySize.
-// Context could be passed to this function, which length should be no more than
-// ContextMaxSize=255. It can be empty.
-func SignPh(privateKey PrivateKey, message []byte, ctx string) []byte {
- if len(ctx) > ContextMaxSize {
- panic(fmt.Errorf("ed25519: bad context length: %v", len(ctx)))
- }
-
- signature := make([]byte, SignatureSize)
- signAll(signature, privateKey, message, []byte(ctx), true)
- return signature
-}
-
-// SignWithCtx creates a signature of a message with private key and context.
-// This function supports the signature variant defined in RFC-8032: Ed25519ctx,
-// meaning it accepts a non-empty context string.
-// It will panic if len(privateKey) is not PrivateKeySize.
-// Context must be passed to this function, which length should be no more than
-// ContextMaxSize=255 and cannot be empty.
-func SignWithCtx(privateKey PrivateKey, message []byte, ctx string) []byte {
- if len(ctx) == 0 || len(ctx) > ContextMaxSize {
- panic(fmt.Errorf("ed25519: bad context length: %v > %v", len(ctx), ContextMaxSize))
- }
-
- signature := make([]byte, SignatureSize)
- signAll(signature, privateKey, message, []byte(ctx), false)
- return signature
-}
-
-func verify(public PublicKey, message, signature, ctx []byte, preHash bool) bool {
- if len(public) != PublicKeySize ||
- len(signature) != SignatureSize ||
- !isLessThanOrder(signature[paramB:]) {
- return false
- }
-
- var P pointR1
- if ok := P.FromBytes(public); !ok {
- return false
- }
-
- H := sha512.New()
- var PHM []byte
-
- if preHash {
- _, _ = H.Write(message)
- PHM = H.Sum(nil)
- H.Reset()
- } else {
- PHM = message
- }
-
- R := signature[:paramB]
-
- writeDom(H, ctx, preHash)
-
- _, _ = H.Write(R)
- _, _ = H.Write(public)
- _, _ = H.Write(PHM)
- hRAM := H.Sum(nil)
- reduceModOrder(hRAM[:], true)
-
- var Q pointR1
- encR := (&[paramB]byte{})[:]
- P.neg()
- Q.doubleMult(&P, signature[paramB:], hRAM[:paramB])
- _ = Q.ToBytes(encR)
- return bytes.Equal(R, encR)
-}
-
-// VerifyAny returns true if the signature is valid. Failure cases are invalid
-// signature, or when the public key cannot be decoded.
-// This function supports all the three signature variants defined in RFC-8032,
-// namely Ed25519 (or pure EdDSA), Ed25519Ph, and Ed25519Ctx.
-// The opts.HashFunc() must return zero to specify either Ed25519 or Ed25519Ctx
-// variant. This can be achieved by passing crypto.Hash(0) as the value for opts.
-// The opts.HashFunc() must return SHA512 to specify the Ed25519Ph variant.
-// This can be achieved by passing crypto.SHA512 as the value for opts.
-// Use a SignerOptions struct to pass a context string for signing.
-func VerifyAny(public PublicKey, message, signature []byte, opts crypto.SignerOpts) bool {
- var ctx string
- var scheme SchemeID
- if o, ok := opts.(SignerOptions); ok {
- ctx = o.Context
- scheme = o.Scheme
- }
-
- switch true {
- case scheme == ED25519 && opts.HashFunc() == crypto.Hash(0):
- return Verify(public, message, signature)
- case scheme == ED25519Ph && opts.HashFunc() == crypto.SHA512:
- return VerifyPh(public, message, signature, ctx)
- case scheme == ED25519Ctx && opts.HashFunc() == crypto.Hash(0) && len(ctx) > 0:
- return VerifyWithCtx(public, message, signature, ctx)
- default:
- return false
- }
-}
-
-// Verify returns true if the signature is valid. Failure cases are invalid
-// signature, or when the public key cannot be decoded.
-// This function supports the signature variant defined in RFC-8032: Ed25519,
-// also known as the pure version of EdDSA.
-func Verify(public PublicKey, message, signature []byte) bool {
- return verify(public, message, signature, []byte(""), false)
-}
-
-// VerifyPh returns true if the signature is valid. Failure cases are invalid
-// signature, or when the public key cannot be decoded.
-// This function supports the signature variant defined in RFC-8032: Ed25519ph,
-// meaning it internally hashes the message using SHA-512.
-// Context could be passed to this function, which length should be no more than
-// 255. It can be empty.
-func VerifyPh(public PublicKey, message, signature []byte, ctx string) bool {
- return verify(public, message, signature, []byte(ctx), true)
-}
-
-// VerifyWithCtx returns true if the signature is valid. Failure cases are invalid
-// signature, or when the public key cannot be decoded, or when context is
-// not provided.
-// This function supports the signature variant defined in RFC-8032: Ed25519ctx,
-// meaning it does not handle prehashed messages. Non-empty context string must be
-// provided, and must not be more than 255 of length.
-func VerifyWithCtx(public PublicKey, message, signature []byte, ctx string) bool {
- if len(ctx) == 0 || len(ctx) > ContextMaxSize {
- return false
- }
-
- return verify(public, message, signature, []byte(ctx), false)
-}
-
-func clamp(k []byte) {
- k[0] &= 248
- k[paramB-1] = (k[paramB-1] & 127) | 64
-}
-
-// isLessThanOrder returns true if 0 <= x < order.
-func isLessThanOrder(x []byte) bool {
- i := len(order) - 1
- for i > 0 && x[i] == order[i] {
- i--
- }
- return x[i] < order[i]
-}
-
-func writeDom(h io.Writer, ctx []byte, preHash bool) {
- dom2 := "SigEd25519 no Ed25519 collisions"
-
- if len(ctx) > 0 {
- _, _ = h.Write([]byte(dom2))
- if preHash {
- _, _ = h.Write([]byte{byte(0x01), byte(len(ctx))})
- } else {
- _, _ = h.Write([]byte{byte(0x00), byte(len(ctx))})
- }
- _, _ = h.Write(ctx)
- } else if preHash {
- _, _ = h.Write([]byte(dom2))
- _, _ = h.Write([]byte{0x01, 0x00})
- }
-}
diff --git a/vendor/github.com/cloudflare/circl/sign/ed25519/modular.go b/vendor/github.com/cloudflare/circl/sign/ed25519/modular.go
deleted file mode 100644
index 10efafdcafb..00000000000
--- a/vendor/github.com/cloudflare/circl/sign/ed25519/modular.go
+++ /dev/null
@@ -1,175 +0,0 @@
-package ed25519
-
-import (
- "encoding/binary"
- "math/bits"
-)
-
-var order = [paramB]byte{
- 0xed, 0xd3, 0xf5, 0x5c, 0x1a, 0x63, 0x12, 0x58,
- 0xd6, 0x9c, 0xf7, 0xa2, 0xde, 0xf9, 0xde, 0x14,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10,
-}
-
-// isLessThan returns true if 0 <= x < y, and assumes that slices have the same length.
-func isLessThan(x, y []byte) bool {
- i := len(x) - 1
- for i > 0 && x[i] == y[i] {
- i--
- }
- return x[i] < y[i]
-}
-
-// reduceModOrder calculates k = k mod order of the curve.
-func reduceModOrder(k []byte, is512Bit bool) {
- var X [((2 * paramB) * 8) / 64]uint64
- numWords := len(k) >> 3
- for i := 0; i < numWords; i++ {
- X[i] = binary.LittleEndian.Uint64(k[i*8 : (i+1)*8])
- }
- red512(&X, is512Bit)
- for i := 0; i < numWords; i++ {
- binary.LittleEndian.PutUint64(k[i*8:(i+1)*8], X[i])
- }
-}
-
-// red512 calculates x = x mod Order of the curve.
-func red512(x *[8]uint64, full bool) {
- // Implementation of Algs.(14.47)+(14.52) of Handbook of Applied
- // Cryptography, by A. Menezes, P. van Oorschot, and S. Vanstone.
- const (
- ell0 = uint64(0x5812631a5cf5d3ed)
- ell1 = uint64(0x14def9dea2f79cd6)
- ell160 = uint64(0x812631a5cf5d3ed0)
- ell161 = uint64(0x4def9dea2f79cd65)
- ell162 = uint64(0x0000000000000001)
- )
-
- var c0, c1, c2, c3 uint64
- r0, r1, r2, r3, r4 := x[0], x[1], x[2], x[3], uint64(0)
-
- if full {
- q0, q1, q2, q3 := x[4], x[5], x[6], x[7]
-
- for i := 0; i < 3; i++ {
- h0, s0 := bits.Mul64(q0, ell160)
- h1, s1 := bits.Mul64(q1, ell160)
- h2, s2 := bits.Mul64(q2, ell160)
- h3, s3 := bits.Mul64(q3, ell160)
-
- s1, c0 = bits.Add64(h0, s1, 0)
- s2, c1 = bits.Add64(h1, s2, c0)
- s3, c2 = bits.Add64(h2, s3, c1)
- s4, _ := bits.Add64(h3, 0, c2)
-
- h0, l0 := bits.Mul64(q0, ell161)
- h1, l1 := bits.Mul64(q1, ell161)
- h2, l2 := bits.Mul64(q2, ell161)
- h3, l3 := bits.Mul64(q3, ell161)
-
- l1, c0 = bits.Add64(h0, l1, 0)
- l2, c1 = bits.Add64(h1, l2, c0)
- l3, c2 = bits.Add64(h2, l3, c1)
- l4, _ := bits.Add64(h3, 0, c2)
-
- s1, c0 = bits.Add64(s1, l0, 0)
- s2, c1 = bits.Add64(s2, l1, c0)
- s3, c2 = bits.Add64(s3, l2, c1)
- s4, c3 = bits.Add64(s4, l3, c2)
- s5, s6 := bits.Add64(l4, 0, c3)
-
- s2, c0 = bits.Add64(s2, q0, 0)
- s3, c1 = bits.Add64(s3, q1, c0)
- s4, c2 = bits.Add64(s4, q2, c1)
- s5, c3 = bits.Add64(s5, q3, c2)
- s6, s7 := bits.Add64(s6, 0, c3)
-
- q := q0 | q1 | q2 | q3
- m := -((q | -q) >> 63) // if q=0 then m=0...0 else m=1..1
- s0 &= m
- s1 &= m
- s2 &= m
- s3 &= m
- q0, q1, q2, q3 = s4, s5, s6, s7
-
- if (i+1)%2 == 0 {
- r0, c0 = bits.Add64(r0, s0, 0)
- r1, c1 = bits.Add64(r1, s1, c0)
- r2, c2 = bits.Add64(r2, s2, c1)
- r3, c3 = bits.Add64(r3, s3, c2)
- r4, _ = bits.Add64(r4, 0, c3)
- } else {
- r0, c0 = bits.Sub64(r0, s0, 0)
- r1, c1 = bits.Sub64(r1, s1, c0)
- r2, c2 = bits.Sub64(r2, s2, c1)
- r3, c3 = bits.Sub64(r3, s3, c2)
- r4, _ = bits.Sub64(r4, 0, c3)
- }
- }
-
- m := -(r4 >> 63)
- r0, c0 = bits.Add64(r0, m&ell160, 0)
- r1, c1 = bits.Add64(r1, m&ell161, c0)
- r2, c2 = bits.Add64(r2, m&ell162, c1)
- r3, c3 = bits.Add64(r3, 0, c2)
- r4, _ = bits.Add64(r4, m&1, c3)
- x[4], x[5], x[6], x[7] = 0, 0, 0, 0
- }
-
- q0 := (r4 << 4) | (r3 >> 60)
- r3 &= (uint64(1) << 60) - 1
-
- h0, s0 := bits.Mul64(ell0, q0)
- h1, s1 := bits.Mul64(ell1, q0)
- s1, c0 = bits.Add64(h0, s1, 0)
- s2, _ := bits.Add64(h1, 0, c0)
-
- r0, c0 = bits.Sub64(r0, s0, 0)
- r1, c1 = bits.Sub64(r1, s1, c0)
- r2, c2 = bits.Sub64(r2, s2, c1)
- r3, _ = bits.Sub64(r3, 0, c2)
-
- x[0], x[1], x[2], x[3] = r0, r1, r2, r3
-}
-
-// calculateS performs s = r+k*a mod Order of the curve.
-func calculateS(s, r, k, a []byte) {
- K := [4]uint64{
- binary.LittleEndian.Uint64(k[0*8 : 1*8]),
- binary.LittleEndian.Uint64(k[1*8 : 2*8]),
- binary.LittleEndian.Uint64(k[2*8 : 3*8]),
- binary.LittleEndian.Uint64(k[3*8 : 4*8]),
- }
- S := [8]uint64{
- binary.LittleEndian.Uint64(r[0*8 : 1*8]),
- binary.LittleEndian.Uint64(r[1*8 : 2*8]),
- binary.LittleEndian.Uint64(r[2*8 : 3*8]),
- binary.LittleEndian.Uint64(r[3*8 : 4*8]),
- }
- var c3 uint64
- for i := range K {
- ai := binary.LittleEndian.Uint64(a[i*8 : (i+1)*8])
-
- h0, l0 := bits.Mul64(K[0], ai)
- h1, l1 := bits.Mul64(K[1], ai)
- h2, l2 := bits.Mul64(K[2], ai)
- h3, l3 := bits.Mul64(K[3], ai)
-
- l1, c0 := bits.Add64(h0, l1, 0)
- l2, c1 := bits.Add64(h1, l2, c0)
- l3, c2 := bits.Add64(h2, l3, c1)
- l4, _ := bits.Add64(h3, 0, c2)
-
- S[i+0], c0 = bits.Add64(S[i+0], l0, 0)
- S[i+1], c1 = bits.Add64(S[i+1], l1, c0)
- S[i+2], c2 = bits.Add64(S[i+2], l2, c1)
- S[i+3], c3 = bits.Add64(S[i+3], l3, c2)
- S[i+4], _ = bits.Add64(S[i+4], l4, c3)
- }
- red512(&S, true)
- binary.LittleEndian.PutUint64(s[0*8:1*8], S[0])
- binary.LittleEndian.PutUint64(s[1*8:2*8], S[1])
- binary.LittleEndian.PutUint64(s[2*8:3*8], S[2])
- binary.LittleEndian.PutUint64(s[3*8:4*8], S[3])
-}
diff --git a/vendor/github.com/cloudflare/circl/sign/ed25519/mult.go b/vendor/github.com/cloudflare/circl/sign/ed25519/mult.go
deleted file mode 100644
index 3216aae303c..00000000000
--- a/vendor/github.com/cloudflare/circl/sign/ed25519/mult.go
+++ /dev/null
@@ -1,180 +0,0 @@
-package ed25519
-
-import (
- "crypto/subtle"
- "encoding/binary"
- "math/bits"
-
- "github.com/cloudflare/circl/internal/conv"
- "github.com/cloudflare/circl/math"
- fp "github.com/cloudflare/circl/math/fp25519"
-)
-
-var paramD = fp.Elt{
- 0xa3, 0x78, 0x59, 0x13, 0xca, 0x4d, 0xeb, 0x75,
- 0xab, 0xd8, 0x41, 0x41, 0x4d, 0x0a, 0x70, 0x00,
- 0x98, 0xe8, 0x79, 0x77, 0x79, 0x40, 0xc7, 0x8c,
- 0x73, 0xfe, 0x6f, 0x2b, 0xee, 0x6c, 0x03, 0x52,
-}
-
-// mLSBRecoding parameters.
-const (
- fxT = 257
- fxV = 2
- fxW = 3
- fx2w1 = 1 << (uint(fxW) - 1)
- numWords64 = (paramB * 8 / 64)
-)
-
-// mLSBRecoding is the odd-only modified LSB-set.
-//
-// Reference:
-//
-// "Efficient and secure algorithms for GLV-based scalar multiplication and
-// their implementation on GLV–GLS curves" by (Faz-Hernandez et al.)
-// http://doi.org/10.1007/s13389-014-0085-7.
-func mLSBRecoding(L []int8, k []byte) {
- const ee = (fxT + fxW*fxV - 1) / (fxW * fxV)
- const dd = ee * fxV
- const ll = dd * fxW
- if len(L) == (ll + 1) {
- var m [numWords64 + 1]uint64
- for i := 0; i < numWords64; i++ {
- m[i] = binary.LittleEndian.Uint64(k[8*i : 8*i+8])
- }
- condAddOrderN(&m)
- L[dd-1] = 1
- for i := 0; i < dd-1; i++ {
- kip1 := (m[(i+1)/64] >> (uint(i+1) % 64)) & 0x1
- L[i] = int8(kip1<<1) - 1
- }
- { // right-shift by d
- right := uint(dd % 64)
- left := uint(64) - right
- lim := ((numWords64+1)*64 - dd) / 64
- j := dd / 64
- for i := 0; i < lim; i++ {
- m[i] = (m[i+j] >> right) | (m[i+j+1] << left)
- }
- m[lim] = m[lim+j] >> right
- }
- for i := dd; i < ll; i++ {
- L[i] = L[i%dd] * int8(m[0]&0x1)
- div2subY(m[:], int64(L[i]>>1), numWords64)
- }
- L[ll] = int8(m[0])
- }
-}
-
-// absolute returns always a positive value.
-func absolute(x int32) int32 {
- mask := x >> 31
- return (x + mask) ^ mask
-}
-
-// condAddOrderN updates x = x+order if x is even, otherwise x remains unchanged.
-func condAddOrderN(x *[numWords64 + 1]uint64) {
- isOdd := (x[0] & 0x1) - 1
- c := uint64(0)
- for i := 0; i < numWords64; i++ {
- orderWord := binary.LittleEndian.Uint64(order[8*i : 8*i+8])
- o := isOdd & orderWord
- x0, c0 := bits.Add64(x[i], o, c)
- x[i] = x0
- c = c0
- }
- x[numWords64], _ = bits.Add64(x[numWords64], 0, c)
-}
-
-// div2subY update x = (x/2) - y.
-func div2subY(x []uint64, y int64, l int) {
- s := uint64(y >> 63)
- for i := 0; i < l-1; i++ {
- x[i] = (x[i] >> 1) | (x[i+1] << 63)
- }
- x[l-1] = (x[l-1] >> 1)
-
- b := uint64(0)
- x0, b0 := bits.Sub64(x[0], uint64(y), b)
- x[0] = x0
- b = b0
- for i := 1; i < l-1; i++ {
- x0, b0 := bits.Sub64(x[i], s, b)
- x[i] = x0
- b = b0
- }
- x[l-1], _ = bits.Sub64(x[l-1], s, b)
-}
-
-func (P *pointR1) fixedMult(scalar []byte) {
- if len(scalar) != paramB {
- panic("wrong scalar size")
- }
- const ee = (fxT + fxW*fxV - 1) / (fxW * fxV)
- const dd = ee * fxV
- const ll = dd * fxW
-
- L := make([]int8, ll+1)
- mLSBRecoding(L[:], scalar)
- S := &pointR3{}
- P.SetIdentity()
- for ii := ee - 1; ii >= 0; ii-- {
- P.double()
- for j := 0; j < fxV; j++ {
- dig := L[fxW*dd-j*ee+ii-ee]
- for i := (fxW-1)*dd - j*ee + ii - ee; i >= (2*dd - j*ee + ii - ee); i = i - dd {
- dig = 2*dig + L[i]
- }
- idx := absolute(int32(dig))
- sig := L[dd-j*ee+ii-ee]
- Tabj := &tabSign[fxV-j-1]
- for k := 0; k < fx2w1; k++ {
- S.cmov(&Tabj[k], subtle.ConstantTimeEq(int32(k), idx))
- }
- S.cneg(subtle.ConstantTimeEq(int32(sig), -1))
- P.mixAdd(S)
- }
- }
-}
-
-const (
- omegaFix = 7
- omegaVar = 5
-)
-
-// doubleMult returns P=mG+nQ.
-func (P *pointR1) doubleMult(Q *pointR1, m, n []byte) {
- nafFix := math.OmegaNAF(conv.BytesLe2BigInt(m), omegaFix)
- nafVar := math.OmegaNAF(conv.BytesLe2BigInt(n), omegaVar)
-
- if len(nafFix) > len(nafVar) {
- nafVar = append(nafVar, make([]int32, len(nafFix)-len(nafVar))...)
- } else if len(nafFix) < len(nafVar) {
- nafFix = append(nafFix, make([]int32, len(nafVar)-len(nafFix))...)
- }
-
- var TabQ [1 << (omegaVar - 2)]pointR2
- Q.oddMultiples(TabQ[:])
- P.SetIdentity()
- for i := len(nafFix) - 1; i >= 0; i-- {
- P.double()
- // Generator point
- if nafFix[i] != 0 {
- idxM := absolute(nafFix[i]) >> 1
- R := tabVerif[idxM]
- if nafFix[i] < 0 {
- R.neg()
- }
- P.mixAdd(&R)
- }
- // Variable input point
- if nafVar[i] != 0 {
- idxN := absolute(nafVar[i]) >> 1
- S := TabQ[idxN]
- if nafVar[i] < 0 {
- S.neg()
- }
- P.add(&S)
- }
- }
-}
diff --git a/vendor/github.com/cloudflare/circl/sign/ed25519/point.go b/vendor/github.com/cloudflare/circl/sign/ed25519/point.go
deleted file mode 100644
index 374a69503c3..00000000000
--- a/vendor/github.com/cloudflare/circl/sign/ed25519/point.go
+++ /dev/null
@@ -1,195 +0,0 @@
-package ed25519
-
-import fp "github.com/cloudflare/circl/math/fp25519"
-
-type (
- pointR1 struct{ x, y, z, ta, tb fp.Elt }
- pointR2 struct {
- pointR3
- z2 fp.Elt
- }
-)
-type pointR3 struct{ addYX, subYX, dt2 fp.Elt }
-
-func (P *pointR1) neg() {
- fp.Neg(&P.x, &P.x)
- fp.Neg(&P.ta, &P.ta)
-}
-
-func (P *pointR1) SetIdentity() {
- P.x = fp.Elt{}
- fp.SetOne(&P.y)
- fp.SetOne(&P.z)
- P.ta = fp.Elt{}
- P.tb = fp.Elt{}
-}
-
-func (P *pointR1) toAffine() {
- fp.Inv(&P.z, &P.z)
- fp.Mul(&P.x, &P.x, &P.z)
- fp.Mul(&P.y, &P.y, &P.z)
- fp.Modp(&P.x)
- fp.Modp(&P.y)
- fp.SetOne(&P.z)
- P.ta = P.x
- P.tb = P.y
-}
-
-func (P *pointR1) ToBytes(k []byte) error {
- P.toAffine()
- var x [fp.Size]byte
- err := fp.ToBytes(k[:fp.Size], &P.y)
- if err != nil {
- return err
- }
- err = fp.ToBytes(x[:], &P.x)
- if err != nil {
- return err
- }
- b := x[0] & 1
- k[paramB-1] = k[paramB-1] | (b << 7)
- return nil
-}
-
-func (P *pointR1) FromBytes(k []byte) bool {
- if len(k) != paramB {
- panic("wrong size")
- }
- signX := k[paramB-1] >> 7
- copy(P.y[:], k[:fp.Size])
- P.y[fp.Size-1] &= 0x7F
- p := fp.P()
- if !isLessThan(P.y[:], p[:]) {
- return false
- }
-
- one, u, v := &fp.Elt{}, &fp.Elt{}, &fp.Elt{}
- fp.SetOne(one)
- fp.Sqr(u, &P.y) // u = y^2
- fp.Mul(v, u, ¶mD) // v = dy^2
- fp.Sub(u, u, one) // u = y^2-1
- fp.Add(v, v, one) // v = dy^2+1
- isQR := fp.InvSqrt(&P.x, u, v) // x = sqrt(u/v)
- if !isQR {
- return false
- }
- fp.Modp(&P.x) // x = x mod p
- if fp.IsZero(&P.x) && signX == 1 {
- return false
- }
- if signX != (P.x[0] & 1) {
- fp.Neg(&P.x, &P.x)
- }
- P.ta = P.x
- P.tb = P.y
- fp.SetOne(&P.z)
- return true
-}
-
-// double calculates 2P for curves with A=-1.
-func (P *pointR1) double() {
- Px, Py, Pz, Pta, Ptb := &P.x, &P.y, &P.z, &P.ta, &P.tb
- a, b, c, e, f, g, h := Px, Py, Pz, Pta, Px, Py, Ptb
- fp.Add(e, Px, Py) // x+y
- fp.Sqr(a, Px) // A = x^2
- fp.Sqr(b, Py) // B = y^2
- fp.Sqr(c, Pz) // z^2
- fp.Add(c, c, c) // C = 2*z^2
- fp.Add(h, a, b) // H = A+B
- fp.Sqr(e, e) // (x+y)^2
- fp.Sub(e, e, h) // E = (x+y)^2-A-B
- fp.Sub(g, b, a) // G = B-A
- fp.Sub(f, c, g) // F = C-G
- fp.Mul(Pz, f, g) // Z = F * G
- fp.Mul(Px, e, f) // X = E * F
- fp.Mul(Py, g, h) // Y = G * H, T = E * H
-}
-
-func (P *pointR1) mixAdd(Q *pointR3) {
- fp.Add(&P.z, &P.z, &P.z) // D = 2*z1
- P.coreAddition(Q)
-}
-
-func (P *pointR1) add(Q *pointR2) {
- fp.Mul(&P.z, &P.z, &Q.z2) // D = 2*z1*z2
- P.coreAddition(&Q.pointR3)
-}
-
-// coreAddition calculates P=P+Q for curves with A=-1.
-func (P *pointR1) coreAddition(Q *pointR3) {
- Px, Py, Pz, Pta, Ptb := &P.x, &P.y, &P.z, &P.ta, &P.tb
- addYX2, subYX2, dt2 := &Q.addYX, &Q.subYX, &Q.dt2
- a, b, c, d, e, f, g, h := Px, Py, &fp.Elt{}, Pz, Pta, Px, Py, Ptb
- fp.Mul(c, Pta, Ptb) // t1 = ta*tb
- fp.Sub(h, Py, Px) // y1-x1
- fp.Add(b, Py, Px) // y1+x1
- fp.Mul(a, h, subYX2) // A = (y1-x1)*(y2-x2)
- fp.Mul(b, b, addYX2) // B = (y1+x1)*(y2+x2)
- fp.Mul(c, c, dt2) // C = 2*D*t1*t2
- fp.Sub(e, b, a) // E = B-A
- fp.Add(h, b, a) // H = B+A
- fp.Sub(f, d, c) // F = D-C
- fp.Add(g, d, c) // G = D+C
- fp.Mul(Pz, f, g) // Z = F * G
- fp.Mul(Px, e, f) // X = E * F
- fp.Mul(Py, g, h) // Y = G * H, T = E * H
-}
-
-func (P *pointR1) oddMultiples(T []pointR2) {
- var R pointR2
- n := len(T)
- T[0].fromR1(P)
- _2P := *P
- _2P.double()
- R.fromR1(&_2P)
- for i := 1; i < n; i++ {
- P.add(&R)
- T[i].fromR1(P)
- }
-}
-
-func (P *pointR1) isEqual(Q *pointR1) bool {
- l, r := &fp.Elt{}, &fp.Elt{}
- fp.Mul(l, &P.x, &Q.z)
- fp.Mul(r, &Q.x, &P.z)
- fp.Sub(l, l, r)
- b := fp.IsZero(l)
- fp.Mul(l, &P.y, &Q.z)
- fp.Mul(r, &Q.y, &P.z)
- fp.Sub(l, l, r)
- b = b && fp.IsZero(l)
- fp.Mul(l, &P.ta, &P.tb)
- fp.Mul(l, l, &Q.z)
- fp.Mul(r, &Q.ta, &Q.tb)
- fp.Mul(r, r, &P.z)
- fp.Sub(l, l, r)
- b = b && fp.IsZero(l)
- return b
-}
-
-func (P *pointR3) neg() {
- P.addYX, P.subYX = P.subYX, P.addYX
- fp.Neg(&P.dt2, &P.dt2)
-}
-
-func (P *pointR2) fromR1(Q *pointR1) {
- fp.Add(&P.addYX, &Q.y, &Q.x)
- fp.Sub(&P.subYX, &Q.y, &Q.x)
- fp.Mul(&P.dt2, &Q.ta, &Q.tb)
- fp.Mul(&P.dt2, &P.dt2, ¶mD)
- fp.Add(&P.dt2, &P.dt2, &P.dt2)
- fp.Add(&P.z2, &Q.z, &Q.z)
-}
-
-func (P *pointR3) cneg(b int) {
- t := &fp.Elt{}
- fp.Cswap(&P.addYX, &P.subYX, uint(b))
- fp.Neg(t, &P.dt2)
- fp.Cmov(&P.dt2, t, uint(b))
-}
-
-func (P *pointR3) cmov(Q *pointR3, b int) {
- fp.Cmov(&P.addYX, &Q.addYX, uint(b))
- fp.Cmov(&P.subYX, &Q.subYX, uint(b))
- fp.Cmov(&P.dt2, &Q.dt2, uint(b))
-}
diff --git a/vendor/github.com/cloudflare/circl/sign/ed25519/pubkey.go b/vendor/github.com/cloudflare/circl/sign/ed25519/pubkey.go
deleted file mode 100644
index c3505b67ace..00000000000
--- a/vendor/github.com/cloudflare/circl/sign/ed25519/pubkey.go
+++ /dev/null
@@ -1,9 +0,0 @@
-//go:build go1.13
-// +build go1.13
-
-package ed25519
-
-import cryptoEd25519 "crypto/ed25519"
-
-// PublicKey is the type of Ed25519 public keys.
-type PublicKey cryptoEd25519.PublicKey
diff --git a/vendor/github.com/cloudflare/circl/sign/ed25519/pubkey112.go b/vendor/github.com/cloudflare/circl/sign/ed25519/pubkey112.go
deleted file mode 100644
index d57d86eff08..00000000000
--- a/vendor/github.com/cloudflare/circl/sign/ed25519/pubkey112.go
+++ /dev/null
@@ -1,7 +0,0 @@
-//go:build !go1.13
-// +build !go1.13
-
-package ed25519
-
-// PublicKey is the type of Ed25519 public keys.
-type PublicKey []byte
diff --git a/vendor/github.com/cloudflare/circl/sign/ed25519/signapi.go b/vendor/github.com/cloudflare/circl/sign/ed25519/signapi.go
deleted file mode 100644
index e4520f52034..00000000000
--- a/vendor/github.com/cloudflare/circl/sign/ed25519/signapi.go
+++ /dev/null
@@ -1,87 +0,0 @@
-package ed25519
-
-import (
- "crypto/rand"
- "encoding/asn1"
-
- "github.com/cloudflare/circl/sign"
-)
-
-var sch sign.Scheme = &scheme{}
-
-// Scheme returns a signature interface.
-func Scheme() sign.Scheme { return sch }
-
-type scheme struct{}
-
-func (*scheme) Name() string { return "Ed25519" }
-func (*scheme) PublicKeySize() int { return PublicKeySize }
-func (*scheme) PrivateKeySize() int { return PrivateKeySize }
-func (*scheme) SignatureSize() int { return SignatureSize }
-func (*scheme) SeedSize() int { return SeedSize }
-func (*scheme) TLSIdentifier() uint { return 0x0807 }
-func (*scheme) SupportsContext() bool { return false }
-func (*scheme) Oid() asn1.ObjectIdentifier {
- return asn1.ObjectIdentifier{1, 3, 101, 112}
-}
-
-func (*scheme) GenerateKey() (sign.PublicKey, sign.PrivateKey, error) {
- return GenerateKey(rand.Reader)
-}
-
-func (*scheme) Sign(
- sk sign.PrivateKey,
- message []byte,
- opts *sign.SignatureOpts,
-) []byte {
- priv, ok := sk.(PrivateKey)
- if !ok {
- panic(sign.ErrTypeMismatch)
- }
- if opts != nil && opts.Context != "" {
- panic(sign.ErrContextNotSupported)
- }
- return Sign(priv, message)
-}
-
-func (*scheme) Verify(
- pk sign.PublicKey,
- message, signature []byte,
- opts *sign.SignatureOpts,
-) bool {
- pub, ok := pk.(PublicKey)
- if !ok {
- panic(sign.ErrTypeMismatch)
- }
- if opts != nil {
- if opts.Context != "" {
- panic(sign.ErrContextNotSupported)
- }
- }
- return Verify(pub, message, signature)
-}
-
-func (*scheme) DeriveKey(seed []byte) (sign.PublicKey, sign.PrivateKey) {
- privateKey := NewKeyFromSeed(seed)
- publicKey := make(PublicKey, PublicKeySize)
- copy(publicKey, privateKey[SeedSize:])
- return publicKey, privateKey
-}
-
-func (*scheme) UnmarshalBinaryPublicKey(buf []byte) (sign.PublicKey, error) {
- if len(buf) < PublicKeySize {
- return nil, sign.ErrPubKeySize
- }
- pub := make(PublicKey, PublicKeySize)
- copy(pub, buf[:PublicKeySize])
- return pub, nil
-}
-
-func (*scheme) UnmarshalBinaryPrivateKey(buf []byte) (sign.PrivateKey, error) {
- if len(buf) < PrivateKeySize {
- return nil, sign.ErrPrivKeySize
- }
- priv := make(PrivateKey, PrivateKeySize)
- copy(priv, buf[:PrivateKeySize])
- return priv, nil
-}
diff --git a/vendor/github.com/cloudflare/circl/sign/ed25519/tables.go b/vendor/github.com/cloudflare/circl/sign/ed25519/tables.go
deleted file mode 100644
index 8763b426fc0..00000000000
--- a/vendor/github.com/cloudflare/circl/sign/ed25519/tables.go
+++ /dev/null
@@ -1,213 +0,0 @@
-package ed25519
-
-import fp "github.com/cloudflare/circl/math/fp25519"
-
-var tabSign = [fxV][fx2w1]pointR3{
- {
- pointR3{
- addYX: fp.Elt{0x85, 0x3b, 0x8c, 0xf5, 0xc6, 0x93, 0xbc, 0x2f, 0x19, 0x0e, 0x8c, 0xfb, 0xc6, 0x2d, 0x93, 0xcf, 0xc2, 0x42, 0x3d, 0x64, 0x98, 0x48, 0x0b, 0x27, 0x65, 0xba, 0xd4, 0x33, 0x3a, 0x9d, 0xcf, 0x07},
- subYX: fp.Elt{0x3e, 0x91, 0x40, 0xd7, 0x05, 0x39, 0x10, 0x9d, 0xb3, 0xbe, 0x40, 0xd1, 0x05, 0x9f, 0x39, 0xfd, 0x09, 0x8a, 0x8f, 0x68, 0x34, 0x84, 0xc1, 0xa5, 0x67, 0x12, 0xf8, 0x98, 0x92, 0x2f, 0xfd, 0x44},
- dt2: fp.Elt{0x68, 0xaa, 0x7a, 0x87, 0x05, 0x12, 0xc9, 0xab, 0x9e, 0xc4, 0xaa, 0xcc, 0x23, 0xe8, 0xd9, 0x26, 0x8c, 0x59, 0x43, 0xdd, 0xcb, 0x7d, 0x1b, 0x5a, 0xa8, 0x65, 0x0c, 0x9f, 0x68, 0x7b, 0x11, 0x6f},
- },
- {
- addYX: fp.Elt{0x7c, 0xb0, 0x9e, 0xe6, 0xc5, 0xbf, 0xfa, 0x13, 0x8e, 0x0d, 0x22, 0xde, 0xc8, 0xd1, 0xce, 0x52, 0x02, 0xd5, 0x62, 0x31, 0x71, 0x0e, 0x8e, 0x9d, 0xb0, 0xd6, 0x00, 0xa5, 0x5a, 0x0e, 0xce, 0x72},
- subYX: fp.Elt{0x1a, 0x8e, 0x5c, 0xdc, 0xa4, 0xb3, 0x6c, 0x51, 0x18, 0xa0, 0x09, 0x80, 0x9a, 0x46, 0x33, 0xd5, 0xe0, 0x3c, 0x4d, 0x3b, 0xfc, 0x49, 0xa2, 0x43, 0x29, 0xe1, 0x29, 0xa9, 0x93, 0xea, 0x7c, 0x35},
- dt2: fp.Elt{0x08, 0x46, 0x6f, 0x68, 0x7f, 0x0b, 0x7c, 0x9e, 0xad, 0xba, 0x07, 0x61, 0x74, 0x83, 0x2f, 0xfc, 0x26, 0xd6, 0x09, 0xb9, 0x00, 0x34, 0x36, 0x4f, 0x01, 0xf3, 0x48, 0xdb, 0x43, 0xba, 0x04, 0x44},
- },
- {
- addYX: fp.Elt{0x4c, 0xda, 0x0d, 0x13, 0x66, 0xfd, 0x82, 0x84, 0x9f, 0x75, 0x5b, 0xa2, 0x17, 0xfe, 0x34, 0xbf, 0x1f, 0xcb, 0xba, 0x90, 0x55, 0x80, 0x83, 0xfd, 0x63, 0xb9, 0x18, 0xf8, 0x5b, 0x5d, 0x94, 0x1e},
- subYX: fp.Elt{0xb9, 0xdb, 0x6c, 0x04, 0x88, 0x22, 0xd8, 0x79, 0x83, 0x2f, 0x8d, 0x65, 0x6b, 0xd2, 0xab, 0x1b, 0xdd, 0x65, 0xe5, 0x93, 0x63, 0xf8, 0xa2, 0xd8, 0x3c, 0xf1, 0x4b, 0xc5, 0x99, 0xd1, 0xf2, 0x12},
- dt2: fp.Elt{0x05, 0x4c, 0xb8, 0x3b, 0xfe, 0xf5, 0x9f, 0x2e, 0xd1, 0xb2, 0xb8, 0xff, 0xfe, 0x6d, 0xd9, 0x37, 0xe0, 0xae, 0xb4, 0x5a, 0x51, 0x80, 0x7e, 0x9b, 0x1d, 0xd1, 0x8d, 0x8c, 0x56, 0xb1, 0x84, 0x35},
- },
- {
- addYX: fp.Elt{0x39, 0x71, 0x43, 0x34, 0xe3, 0x42, 0x45, 0xa1, 0xf2, 0x68, 0x71, 0xa7, 0xe8, 0x23, 0xfd, 0x9f, 0x86, 0x48, 0xff, 0xe5, 0x96, 0x74, 0xcf, 0x05, 0x49, 0xe2, 0xb3, 0x6c, 0x17, 0x77, 0x2f, 0x6d},
- subYX: fp.Elt{0x73, 0x3f, 0xc1, 0xc7, 0x6a, 0x66, 0xa1, 0x20, 0xdd, 0x11, 0xfb, 0x7a, 0x6e, 0xa8, 0x51, 0xb8, 0x3f, 0x9d, 0xa2, 0x97, 0x84, 0xb5, 0xc7, 0x90, 0x7c, 0xab, 0x48, 0xd6, 0x84, 0xa3, 0xd5, 0x1a},
- dt2: fp.Elt{0x63, 0x27, 0x3c, 0x49, 0x4b, 0xfc, 0x22, 0xf2, 0x0b, 0x50, 0xc2, 0x0f, 0xb4, 0x1f, 0x31, 0x0c, 0x2f, 0x53, 0xab, 0xaa, 0x75, 0x6f, 0xe0, 0x69, 0x39, 0x56, 0xe0, 0x3b, 0xb7, 0xa8, 0xbf, 0x45},
- },
- },
- {
- {
- addYX: fp.Elt{0x00, 0x45, 0xd9, 0x0d, 0x58, 0x03, 0xfc, 0x29, 0x93, 0xec, 0xbb, 0x6f, 0xa4, 0x7a, 0xd2, 0xec, 0xf8, 0xa7, 0xe2, 0xc2, 0x5f, 0x15, 0x0a, 0x13, 0xd5, 0xa1, 0x06, 0xb7, 0x1a, 0x15, 0x6b, 0x41},
- subYX: fp.Elt{0x85, 0x8c, 0xb2, 0x17, 0xd6, 0x3b, 0x0a, 0xd3, 0xea, 0x3b, 0x77, 0x39, 0xb7, 0x77, 0xd3, 0xc5, 0xbf, 0x5c, 0x6a, 0x1e, 0x8c, 0xe7, 0xc6, 0xc6, 0xc4, 0xb7, 0x2a, 0x8b, 0xf7, 0xb8, 0x61, 0x0d},
- dt2: fp.Elt{0xb0, 0x36, 0xc1, 0xe9, 0xef, 0xd7, 0xa8, 0x56, 0x20, 0x4b, 0xe4, 0x58, 0xcd, 0xe5, 0x07, 0xbd, 0xab, 0xe0, 0x57, 0x1b, 0xda, 0x2f, 0xe6, 0xaf, 0xd2, 0xe8, 0x77, 0x42, 0xf7, 0x2a, 0x1a, 0x19},
- },
- {
- addYX: fp.Elt{0x6a, 0x6d, 0x6d, 0xd1, 0xfa, 0xf5, 0x03, 0x30, 0xbd, 0x6d, 0xc2, 0xc8, 0xf5, 0x38, 0x80, 0x4f, 0xb2, 0xbe, 0xa1, 0x76, 0x50, 0x1a, 0x73, 0xf2, 0x78, 0x2b, 0x8e, 0x3a, 0x1e, 0x34, 0x47, 0x7b},
- subYX: fp.Elt{0xc3, 0x2c, 0x36, 0xdc, 0xc5, 0x45, 0xbc, 0xef, 0x1b, 0x64, 0xd6, 0x65, 0x28, 0xe9, 0xda, 0x84, 0x13, 0xbe, 0x27, 0x8e, 0x3f, 0x98, 0x2a, 0x37, 0xee, 0x78, 0x97, 0xd6, 0xc0, 0x6f, 0xb4, 0x53},
- dt2: fp.Elt{0x58, 0x5d, 0xa7, 0xa3, 0x68, 0xbb, 0x20, 0x30, 0x2e, 0x03, 0xe9, 0xb1, 0xd4, 0x90, 0x72, 0xe3, 0x71, 0xb2, 0x36, 0x3e, 0x73, 0xa0, 0x2e, 0x3d, 0xd1, 0x85, 0x33, 0x62, 0x4e, 0xa7, 0x7b, 0x31},
- },
- {
- addYX: fp.Elt{0xbf, 0xc4, 0x38, 0x53, 0xfb, 0x68, 0xa9, 0x77, 0xce, 0x55, 0xf9, 0x05, 0xcb, 0xeb, 0xfb, 0x8c, 0x46, 0xc2, 0x32, 0x7c, 0xf0, 0xdb, 0xd7, 0x2c, 0x62, 0x8e, 0xdd, 0x54, 0x75, 0xcf, 0x3f, 0x33},
- subYX: fp.Elt{0x49, 0x50, 0x1f, 0x4e, 0x6e, 0x55, 0x55, 0xde, 0x8c, 0x4e, 0x77, 0x96, 0x38, 0x3b, 0xfe, 0xb6, 0x43, 0x3c, 0x86, 0x69, 0xc2, 0x72, 0x66, 0x1f, 0x6b, 0xf9, 0x87, 0xbc, 0x4f, 0x37, 0x3e, 0x3c},
- dt2: fp.Elt{0xd2, 0x2f, 0x06, 0x6b, 0x08, 0x07, 0x69, 0x77, 0xc0, 0x94, 0xcc, 0xae, 0x43, 0x00, 0x59, 0x6e, 0xa3, 0x63, 0xa8, 0xdd, 0xfa, 0x24, 0x18, 0xd0, 0x35, 0xc7, 0x78, 0xf7, 0x0d, 0xd4, 0x5a, 0x1e},
- },
- {
- addYX: fp.Elt{0x45, 0xc1, 0x17, 0x51, 0xf8, 0xed, 0x7e, 0xc7, 0xa9, 0x1a, 0x11, 0x6e, 0x2d, 0xef, 0x0b, 0xd5, 0x3f, 0x98, 0xb0, 0xa3, 0x9d, 0x65, 0xf1, 0xcd, 0x53, 0x4a, 0x8a, 0x18, 0x70, 0x0a, 0x7f, 0x23},
- subYX: fp.Elt{0xdd, 0xef, 0xbe, 0x3a, 0x31, 0xe0, 0xbc, 0xbe, 0x6d, 0x5d, 0x79, 0x87, 0xd6, 0xbe, 0x68, 0xe3, 0x59, 0x76, 0x8c, 0x86, 0x0e, 0x7a, 0x92, 0x13, 0x14, 0x8f, 0x67, 0xb3, 0xcb, 0x1a, 0x76, 0x76},
- dt2: fp.Elt{0x56, 0x7a, 0x1c, 0x9d, 0xca, 0x96, 0xf9, 0xf9, 0x03, 0x21, 0xd4, 0xe8, 0xb3, 0xd5, 0xe9, 0x52, 0xc8, 0x54, 0x1e, 0x1b, 0x13, 0xb6, 0xfd, 0x47, 0x7d, 0x02, 0x32, 0x33, 0x27, 0xe2, 0x1f, 0x19},
- },
- },
-}
-
-var tabVerif = [1 << (omegaFix - 2)]pointR3{
- { /* 1P */
- addYX: fp.Elt{0x85, 0x3b, 0x8c, 0xf5, 0xc6, 0x93, 0xbc, 0x2f, 0x19, 0x0e, 0x8c, 0xfb, 0xc6, 0x2d, 0x93, 0xcf, 0xc2, 0x42, 0x3d, 0x64, 0x98, 0x48, 0x0b, 0x27, 0x65, 0xba, 0xd4, 0x33, 0x3a, 0x9d, 0xcf, 0x07},
- subYX: fp.Elt{0x3e, 0x91, 0x40, 0xd7, 0x05, 0x39, 0x10, 0x9d, 0xb3, 0xbe, 0x40, 0xd1, 0x05, 0x9f, 0x39, 0xfd, 0x09, 0x8a, 0x8f, 0x68, 0x34, 0x84, 0xc1, 0xa5, 0x67, 0x12, 0xf8, 0x98, 0x92, 0x2f, 0xfd, 0x44},
- dt2: fp.Elt{0x68, 0xaa, 0x7a, 0x87, 0x05, 0x12, 0xc9, 0xab, 0x9e, 0xc4, 0xaa, 0xcc, 0x23, 0xe8, 0xd9, 0x26, 0x8c, 0x59, 0x43, 0xdd, 0xcb, 0x7d, 0x1b, 0x5a, 0xa8, 0x65, 0x0c, 0x9f, 0x68, 0x7b, 0x11, 0x6f},
- },
- { /* 3P */
- addYX: fp.Elt{0x30, 0x97, 0xee, 0x4c, 0xa8, 0xb0, 0x25, 0xaf, 0x8a, 0x4b, 0x86, 0xe8, 0x30, 0x84, 0x5a, 0x02, 0x32, 0x67, 0x01, 0x9f, 0x02, 0x50, 0x1b, 0xc1, 0xf4, 0xf8, 0x80, 0x9a, 0x1b, 0x4e, 0x16, 0x7a},
- subYX: fp.Elt{0x65, 0xd2, 0xfc, 0xa4, 0xe8, 0x1f, 0x61, 0x56, 0x7d, 0xba, 0xc1, 0xe5, 0xfd, 0x53, 0xd3, 0x3b, 0xbd, 0xd6, 0x4b, 0x21, 0x1a, 0xf3, 0x31, 0x81, 0x62, 0xda, 0x5b, 0x55, 0x87, 0x15, 0xb9, 0x2a},
- dt2: fp.Elt{0x89, 0xd8, 0xd0, 0x0d, 0x3f, 0x93, 0xae, 0x14, 0x62, 0xda, 0x35, 0x1c, 0x22, 0x23, 0x94, 0x58, 0x4c, 0xdb, 0xf2, 0x8c, 0x45, 0xe5, 0x70, 0xd1, 0xc6, 0xb4, 0xb9, 0x12, 0xaf, 0x26, 0x28, 0x5a},
- },
- { /* 5P */
- addYX: fp.Elt{0x33, 0xbb, 0xa5, 0x08, 0x44, 0xbc, 0x12, 0xa2, 0x02, 0xed, 0x5e, 0xc7, 0xc3, 0x48, 0x50, 0x8d, 0x44, 0xec, 0xbf, 0x5a, 0x0c, 0xeb, 0x1b, 0xdd, 0xeb, 0x06, 0xe2, 0x46, 0xf1, 0xcc, 0x45, 0x29},
- subYX: fp.Elt{0xba, 0xd6, 0x47, 0xa4, 0xc3, 0x82, 0x91, 0x7f, 0xb7, 0x29, 0x27, 0x4b, 0xd1, 0x14, 0x00, 0xd5, 0x87, 0xa0, 0x64, 0xb8, 0x1c, 0xf1, 0x3c, 0xe3, 0xf3, 0x55, 0x1b, 0xeb, 0x73, 0x7e, 0x4a, 0x15},
- dt2: fp.Elt{0x85, 0x82, 0x2a, 0x81, 0xf1, 0xdb, 0xbb, 0xbc, 0xfc, 0xd1, 0xbd, 0xd0, 0x07, 0x08, 0x0e, 0x27, 0x2d, 0xa7, 0xbd, 0x1b, 0x0b, 0x67, 0x1b, 0xb4, 0x9a, 0xb6, 0x3b, 0x6b, 0x69, 0xbe, 0xaa, 0x43},
- },
- { /* 7P */
- addYX: fp.Elt{0xbf, 0xa3, 0x4e, 0x94, 0xd0, 0x5c, 0x1a, 0x6b, 0xd2, 0xc0, 0x9d, 0xb3, 0x3a, 0x35, 0x70, 0x74, 0x49, 0x2e, 0x54, 0x28, 0x82, 0x52, 0xb2, 0x71, 0x7e, 0x92, 0x3c, 0x28, 0x69, 0xea, 0x1b, 0x46},
- subYX: fp.Elt{0xb1, 0x21, 0x32, 0xaa, 0x9a, 0x2c, 0x6f, 0xba, 0xa7, 0x23, 0xba, 0x3b, 0x53, 0x21, 0xa0, 0x6c, 0x3a, 0x2c, 0x19, 0x92, 0x4f, 0x76, 0xea, 0x9d, 0xe0, 0x17, 0x53, 0x2e, 0x5d, 0xdd, 0x6e, 0x1d},
- dt2: fp.Elt{0xa2, 0xb3, 0xb8, 0x01, 0xc8, 0x6d, 0x83, 0xf1, 0x9a, 0xa4, 0x3e, 0x05, 0x47, 0x5f, 0x03, 0xb3, 0xf3, 0xad, 0x77, 0x58, 0xba, 0x41, 0x9c, 0x52, 0xa7, 0x90, 0x0f, 0x6a, 0x1c, 0xbb, 0x9f, 0x7a},
- },
- { /* 9P */
- addYX: fp.Elt{0x2f, 0x63, 0xa8, 0xa6, 0x8a, 0x67, 0x2e, 0x9b, 0xc5, 0x46, 0xbc, 0x51, 0x6f, 0x9e, 0x50, 0xa6, 0xb5, 0xf5, 0x86, 0xc6, 0xc9, 0x33, 0xb2, 0xce, 0x59, 0x7f, 0xdd, 0x8a, 0x33, 0xed, 0xb9, 0x34},
- subYX: fp.Elt{0x64, 0x80, 0x9d, 0x03, 0x7e, 0x21, 0x6e, 0xf3, 0x9b, 0x41, 0x20, 0xf5, 0xb6, 0x81, 0xa0, 0x98, 0x44, 0xb0, 0x5e, 0xe7, 0x08, 0xc6, 0xcb, 0x96, 0x8f, 0x9c, 0xdc, 0xfa, 0x51, 0x5a, 0xc0, 0x49},
- dt2: fp.Elt{0x1b, 0xaf, 0x45, 0x90, 0xbf, 0xe8, 0xb4, 0x06, 0x2f, 0xd2, 0x19, 0xa7, 0xe8, 0x83, 0xff, 0xe2, 0x16, 0xcf, 0xd4, 0x93, 0x29, 0xfc, 0xf6, 0xaa, 0x06, 0x8b, 0x00, 0x1b, 0x02, 0x72, 0xc1, 0x73},
- },
- { /* 11P */
- addYX: fp.Elt{0xde, 0x2a, 0x80, 0x8a, 0x84, 0x00, 0xbf, 0x2f, 0x27, 0x2e, 0x30, 0x02, 0xcf, 0xfe, 0xd9, 0xe5, 0x06, 0x34, 0x70, 0x17, 0x71, 0x84, 0x3e, 0x11, 0xaf, 0x8f, 0x6d, 0x54, 0xe2, 0xaa, 0x75, 0x42},
- subYX: fp.Elt{0x48, 0x43, 0x86, 0x49, 0x02, 0x5b, 0x5f, 0x31, 0x81, 0x83, 0x08, 0x77, 0x69, 0xb3, 0xd6, 0x3e, 0x95, 0xeb, 0x8d, 0x6a, 0x55, 0x75, 0xa0, 0xa3, 0x7f, 0xc7, 0xd5, 0x29, 0x80, 0x59, 0xab, 0x18},
- dt2: fp.Elt{0xe9, 0x89, 0x60, 0xfd, 0xc5, 0x2c, 0x2b, 0xd8, 0xa4, 0xe4, 0x82, 0x32, 0xa1, 0xb4, 0x1e, 0x03, 0x22, 0x86, 0x1a, 0xb5, 0x99, 0x11, 0x31, 0x44, 0x48, 0xf9, 0x3d, 0xb5, 0x22, 0x55, 0xc6, 0x3d},
- },
- { /* 13P */
- addYX: fp.Elt{0x6d, 0x7f, 0x00, 0xa2, 0x22, 0xc2, 0x70, 0xbf, 0xdb, 0xde, 0xbc, 0xb5, 0x9a, 0xb3, 0x84, 0xbf, 0x07, 0xba, 0x07, 0xfb, 0x12, 0x0e, 0x7a, 0x53, 0x41, 0xf2, 0x46, 0xc3, 0xee, 0xd7, 0x4f, 0x23},
- subYX: fp.Elt{0x93, 0xbf, 0x7f, 0x32, 0x3b, 0x01, 0x6f, 0x50, 0x6b, 0x6f, 0x77, 0x9b, 0xc9, 0xeb, 0xfc, 0xae, 0x68, 0x59, 0xad, 0xaa, 0x32, 0xb2, 0x12, 0x9d, 0xa7, 0x24, 0x60, 0x17, 0x2d, 0x88, 0x67, 0x02},
- dt2: fp.Elt{0x78, 0xa3, 0x2e, 0x73, 0x19, 0xa1, 0x60, 0x53, 0x71, 0xd4, 0x8d, 0xdf, 0xb1, 0xe6, 0x37, 0x24, 0x33, 0xe5, 0xa7, 0x91, 0xf8, 0x37, 0xef, 0xa2, 0x63, 0x78, 0x09, 0xaa, 0xfd, 0xa6, 0x7b, 0x49},
- },
- { /* 15P */
- addYX: fp.Elt{0xa0, 0xea, 0xcf, 0x13, 0x03, 0xcc, 0xce, 0x24, 0x6d, 0x24, 0x9c, 0x18, 0x8d, 0xc2, 0x48, 0x86, 0xd0, 0xd4, 0xf2, 0xc1, 0xfa, 0xbd, 0xbd, 0x2d, 0x2b, 0xe7, 0x2d, 0xf1, 0x17, 0x29, 0xe2, 0x61},
- subYX: fp.Elt{0x0b, 0xcf, 0x8c, 0x46, 0x86, 0xcd, 0x0b, 0x04, 0xd6, 0x10, 0x99, 0x2a, 0xa4, 0x9b, 0x82, 0xd3, 0x92, 0x51, 0xb2, 0x07, 0x08, 0x30, 0x08, 0x75, 0xbf, 0x5e, 0xd0, 0x18, 0x42, 0xcd, 0xb5, 0x43},
- dt2: fp.Elt{0x16, 0xb5, 0xd0, 0x9b, 0x2f, 0x76, 0x9a, 0x5d, 0xee, 0xde, 0x3f, 0x37, 0x4e, 0xaf, 0x38, 0xeb, 0x70, 0x42, 0xd6, 0x93, 0x7d, 0x5a, 0x2e, 0x03, 0x42, 0xd8, 0xe4, 0x0a, 0x21, 0x61, 0x1d, 0x51},
- },
- { /* 17P */
- addYX: fp.Elt{0x81, 0x9d, 0x0e, 0x95, 0xef, 0x76, 0xc6, 0x92, 0x4f, 0x04, 0xd7, 0xc0, 0xcd, 0x20, 0x46, 0xa5, 0x48, 0x12, 0x8f, 0x6f, 0x64, 0x36, 0x9b, 0xaa, 0xe3, 0x55, 0xb8, 0xdd, 0x24, 0x59, 0x32, 0x6d},
- subYX: fp.Elt{0x87, 0xde, 0x20, 0x44, 0x48, 0x86, 0x13, 0x08, 0xb4, 0xed, 0x92, 0xb5, 0x16, 0xf0, 0x1c, 0x8a, 0x25, 0x2d, 0x94, 0x29, 0x27, 0x4e, 0xfa, 0x39, 0x10, 0x28, 0x48, 0xe2, 0x6f, 0xfe, 0xa7, 0x71},
- dt2: fp.Elt{0x54, 0xc8, 0xc8, 0xa5, 0xb8, 0x82, 0x71, 0x6c, 0x03, 0x2a, 0x5f, 0xfe, 0x79, 0x14, 0xfd, 0x33, 0x0c, 0x8d, 0x77, 0x83, 0x18, 0x59, 0xcf, 0x72, 0xa9, 0xea, 0x9e, 0x55, 0xb6, 0xc4, 0x46, 0x47},
- },
- { /* 19P */
- addYX: fp.Elt{0x2b, 0x9a, 0xc6, 0x6d, 0x3c, 0x7b, 0x77, 0xd3, 0x17, 0xf6, 0x89, 0x6f, 0x27, 0xb2, 0xfa, 0xde, 0xb5, 0x16, 0x3a, 0xb5, 0xf7, 0x1c, 0x65, 0x45, 0xb7, 0x9f, 0xfe, 0x34, 0xde, 0x51, 0x9a, 0x5c},
- subYX: fp.Elt{0x47, 0x11, 0x74, 0x64, 0xc8, 0x46, 0x85, 0x34, 0x49, 0xc8, 0xfc, 0x0e, 0xdd, 0xae, 0x35, 0x7d, 0x32, 0xa3, 0x72, 0x06, 0x76, 0x9a, 0x93, 0xff, 0xd6, 0xe6, 0xb5, 0x7d, 0x49, 0x63, 0x96, 0x21},
- dt2: fp.Elt{0x67, 0x0e, 0xf1, 0x79, 0xcf, 0xf1, 0x10, 0xf5, 0x5b, 0x51, 0x58, 0xe6, 0xa1, 0xda, 0xdd, 0xff, 0x77, 0x22, 0x14, 0x10, 0x17, 0xa7, 0xc3, 0x09, 0xbb, 0x23, 0x82, 0x60, 0x3c, 0x50, 0x04, 0x48},
- },
- { /* 21P */
- addYX: fp.Elt{0xc7, 0x7f, 0xa3, 0x2c, 0xd0, 0x9e, 0x24, 0xc4, 0xab, 0xac, 0x15, 0xa6, 0xe3, 0xa0, 0x59, 0xa0, 0x23, 0x0e, 0x6e, 0xc9, 0xd7, 0x6e, 0xa9, 0x88, 0x6d, 0x69, 0x50, 0x16, 0xa5, 0x98, 0x33, 0x55},
- subYX: fp.Elt{0x75, 0xd1, 0x36, 0x3a, 0xd2, 0x21, 0x68, 0x3b, 0x32, 0x9e, 0x9b, 0xe9, 0xa7, 0x0a, 0xb4, 0xbb, 0x47, 0x8a, 0x83, 0x20, 0xe4, 0x5c, 0x9e, 0x5d, 0x5e, 0x4c, 0xde, 0x58, 0x88, 0x09, 0x1e, 0x77},
- dt2: fp.Elt{0xdf, 0x1e, 0x45, 0x78, 0xd2, 0xf5, 0x12, 0x9a, 0xcb, 0x9c, 0x89, 0x85, 0x79, 0x5d, 0xda, 0x3a, 0x08, 0x95, 0xa5, 0x9f, 0x2d, 0x4a, 0x7f, 0x47, 0x11, 0xa6, 0xf5, 0x8f, 0xd6, 0xd1, 0x5e, 0x5a},
- },
- { /* 23P */
- addYX: fp.Elt{0x83, 0x0e, 0x15, 0xfe, 0x2a, 0x12, 0x95, 0x11, 0xd8, 0x35, 0x4b, 0x7e, 0x25, 0x9a, 0x20, 0xcf, 0x20, 0x1e, 0x71, 0x1e, 0x29, 0xf8, 0x87, 0x73, 0xf0, 0x92, 0xbf, 0xd8, 0x97, 0xb8, 0xac, 0x44},
- subYX: fp.Elt{0x59, 0x73, 0x52, 0x58, 0xc5, 0xe0, 0xe5, 0xba, 0x7e, 0x9d, 0xdb, 0xca, 0x19, 0x5c, 0x2e, 0x39, 0xe9, 0xab, 0x1c, 0xda, 0x1e, 0x3c, 0x65, 0x28, 0x44, 0xdc, 0xef, 0x5f, 0x13, 0x60, 0x9b, 0x01},
- dt2: fp.Elt{0x83, 0x4b, 0x13, 0x5e, 0x14, 0x68, 0x60, 0x1e, 0x16, 0x4c, 0x30, 0x24, 0x4f, 0xe6, 0xf5, 0xc4, 0xd7, 0x3e, 0x1a, 0xfc, 0xa8, 0x88, 0x6e, 0x50, 0x92, 0x2f, 0xad, 0xe6, 0xfd, 0x49, 0x0c, 0x15},
- },
- { /* 25P */
- addYX: fp.Elt{0x38, 0x11, 0x47, 0x09, 0x95, 0xf2, 0x7b, 0x8e, 0x51, 0xa6, 0x75, 0x4f, 0x39, 0xef, 0x6f, 0x5d, 0xad, 0x08, 0xa7, 0x25, 0xc4, 0x79, 0xaf, 0x10, 0x22, 0x99, 0xb9, 0x5b, 0x07, 0x5a, 0x2b, 0x6b},
- subYX: fp.Elt{0x68, 0xa8, 0xdc, 0x9c, 0x3c, 0x86, 0x49, 0xb8, 0xd0, 0x4a, 0x71, 0xb8, 0xdb, 0x44, 0x3f, 0xc8, 0x8d, 0x16, 0x36, 0x0c, 0x56, 0xe3, 0x3e, 0xfe, 0xc1, 0xfb, 0x05, 0x1e, 0x79, 0xd7, 0xa6, 0x78},
- dt2: fp.Elt{0x76, 0xb9, 0xa0, 0x47, 0x4b, 0x70, 0xbf, 0x58, 0xd5, 0x48, 0x17, 0x74, 0x55, 0xb3, 0x01, 0xa6, 0x90, 0xf5, 0x42, 0xd5, 0xb1, 0x1f, 0x2b, 0xaa, 0x00, 0x5d, 0xd5, 0x4a, 0xfc, 0x7f, 0x5c, 0x72},
- },
- { /* 27P */
- addYX: fp.Elt{0xb2, 0x99, 0xcf, 0xd1, 0x15, 0x67, 0x42, 0xe4, 0x34, 0x0d, 0xa2, 0x02, 0x11, 0xd5, 0x52, 0x73, 0x9f, 0x10, 0x12, 0x8b, 0x7b, 0x15, 0xd1, 0x23, 0xa3, 0xf3, 0xb1, 0x7c, 0x27, 0xc9, 0x4c, 0x79},
- subYX: fp.Elt{0xc0, 0x98, 0xd0, 0x1c, 0xf7, 0x2b, 0x80, 0x91, 0x66, 0x63, 0x5e, 0xed, 0xa4, 0x6c, 0x41, 0xfe, 0x4c, 0x99, 0x02, 0x49, 0x71, 0x5d, 0x58, 0xdf, 0xe7, 0xfa, 0x55, 0xf8, 0x25, 0x46, 0xd5, 0x4c},
- dt2: fp.Elt{0x53, 0x50, 0xac, 0xc2, 0x26, 0xc4, 0xf6, 0x4a, 0x58, 0x72, 0xf6, 0x32, 0xad, 0xed, 0x9a, 0xbc, 0x21, 0x10, 0x31, 0x0a, 0xf1, 0x32, 0xd0, 0x2a, 0x85, 0x8e, 0xcc, 0x6f, 0x7b, 0x35, 0x08, 0x70},
- },
- { /* 29P */
- addYX: fp.Elt{0x01, 0x3f, 0x77, 0x38, 0x27, 0x67, 0x88, 0x0b, 0xfb, 0xcc, 0xfb, 0x95, 0xfa, 0xc8, 0xcc, 0xb8, 0xb6, 0x29, 0xad, 0xb9, 0xa3, 0xd5, 0x2d, 0x8d, 0x6a, 0x0f, 0xad, 0x51, 0x98, 0x7e, 0xef, 0x06},
- subYX: fp.Elt{0x34, 0x4a, 0x58, 0x82, 0xbb, 0x9f, 0x1b, 0xd0, 0x2b, 0x79, 0xb4, 0xd2, 0x63, 0x64, 0xab, 0x47, 0x02, 0x62, 0x53, 0x48, 0x9c, 0x63, 0x31, 0xb6, 0x28, 0xd4, 0xd6, 0x69, 0x36, 0x2a, 0xa9, 0x13},
- dt2: fp.Elt{0xe5, 0x7d, 0x57, 0xc0, 0x1c, 0x77, 0x93, 0xca, 0x5c, 0xdc, 0x35, 0x50, 0x1e, 0xe4, 0x40, 0x75, 0x71, 0xe0, 0x02, 0xd8, 0x01, 0x0f, 0x68, 0x24, 0x6a, 0xf8, 0x2a, 0x8a, 0xdf, 0x6d, 0x29, 0x3c},
- },
- { /* 31P */
- addYX: fp.Elt{0x13, 0xa7, 0x14, 0xd9, 0xf9, 0x15, 0xad, 0xae, 0x12, 0xf9, 0x8f, 0x8c, 0xf9, 0x7b, 0x2f, 0xa9, 0x30, 0xd7, 0x53, 0x9f, 0x17, 0x23, 0xf8, 0xaf, 0xba, 0x77, 0x0c, 0x49, 0x93, 0xd3, 0x99, 0x7a},
- subYX: fp.Elt{0x41, 0x25, 0x1f, 0xbb, 0x2e, 0x4d, 0xeb, 0xfc, 0x1f, 0xb9, 0xad, 0x40, 0xc7, 0x10, 0x95, 0xb8, 0x05, 0xad, 0xa1, 0xd0, 0x7d, 0xa3, 0x71, 0xfc, 0x7b, 0x71, 0x47, 0x07, 0x70, 0x2c, 0x89, 0x0a},
- dt2: fp.Elt{0xe8, 0xa3, 0xbd, 0x36, 0x24, 0xed, 0x52, 0x8f, 0x94, 0x07, 0xe8, 0x57, 0x41, 0xc8, 0xa8, 0x77, 0xe0, 0x9c, 0x2f, 0x26, 0x63, 0x65, 0xa9, 0xa5, 0xd2, 0xf7, 0x02, 0x83, 0xd2, 0x62, 0x67, 0x28},
- },
- { /* 33P */
- addYX: fp.Elt{0x25, 0x5b, 0xe3, 0x3c, 0x09, 0x36, 0x78, 0x4e, 0x97, 0xaa, 0x6b, 0xb2, 0x1d, 0x18, 0xe1, 0x82, 0x3f, 0xb8, 0xc7, 0xcb, 0xd3, 0x92, 0xc1, 0x0c, 0x3a, 0x9d, 0x9d, 0x6a, 0x04, 0xda, 0xf1, 0x32},
- subYX: fp.Elt{0xbd, 0xf5, 0x2e, 0xce, 0x2b, 0x8e, 0x55, 0x7c, 0x63, 0xbc, 0x47, 0x67, 0xb4, 0x6c, 0x98, 0xe4, 0xb8, 0x89, 0xbb, 0x3b, 0x9f, 0x17, 0x4a, 0x15, 0x7a, 0x76, 0xf1, 0xd6, 0xa3, 0xf2, 0x86, 0x76},
- dt2: fp.Elt{0x6a, 0x7c, 0x59, 0x6d, 0xa6, 0x12, 0x8d, 0xaa, 0x2b, 0x85, 0xd3, 0x04, 0x03, 0x93, 0x11, 0x8f, 0x22, 0xb0, 0x09, 0xc2, 0x73, 0xdc, 0x91, 0x3f, 0xa6, 0x28, 0xad, 0xa9, 0xf8, 0x05, 0x13, 0x56},
- },
- { /* 35P */
- addYX: fp.Elt{0xd1, 0xae, 0x92, 0xec, 0x8d, 0x97, 0x0c, 0x10, 0xe5, 0x73, 0x6d, 0x4d, 0x43, 0xd5, 0x43, 0xca, 0x48, 0xba, 0x47, 0xd8, 0x22, 0x1b, 0x13, 0x83, 0x2c, 0x4d, 0x5d, 0xe3, 0x53, 0xec, 0xaa},
- subYX: fp.Elt{0xd5, 0xc0, 0xb0, 0xe7, 0x28, 0xcc, 0x22, 0x67, 0x53, 0x5c, 0x07, 0xdb, 0xbb, 0xe9, 0x9d, 0x70, 0x61, 0x0a, 0x01, 0xd7, 0xa7, 0x8d, 0xf6, 0xca, 0x6c, 0xcc, 0x57, 0x2c, 0xef, 0x1a, 0x0a, 0x03},
- dt2: fp.Elt{0xaa, 0xd2, 0x3a, 0x00, 0x73, 0xf7, 0xb1, 0x7b, 0x08, 0x66, 0x21, 0x2b, 0x80, 0x29, 0x3f, 0x0b, 0x3e, 0xd2, 0x0e, 0x52, 0x86, 0xdc, 0x21, 0x78, 0x80, 0x54, 0x06, 0x24, 0x1c, 0x9c, 0xbe, 0x20},
- },
- { /* 37P */
- addYX: fp.Elt{0xa6, 0x73, 0x96, 0x24, 0xd8, 0x87, 0x53, 0xe1, 0x93, 0xe4, 0x46, 0xf5, 0x2d, 0xbc, 0x43, 0x59, 0xb5, 0x63, 0x6f, 0xc3, 0x81, 0x9a, 0x7f, 0x1c, 0xde, 0xc1, 0x0a, 0x1f, 0x36, 0xb3, 0x0a, 0x75},
- subYX: fp.Elt{0x60, 0x5e, 0x02, 0xe2, 0x4a, 0xe4, 0xe0, 0x20, 0x38, 0xb9, 0xdc, 0xcb, 0x2f, 0x3b, 0x3b, 0xb0, 0x1c, 0x0d, 0x5a, 0xf9, 0x9c, 0x63, 0x5d, 0x10, 0x11, 0xe3, 0x67, 0x50, 0x54, 0x4c, 0x76, 0x69},
- dt2: fp.Elt{0x37, 0x10, 0xf8, 0xa2, 0x83, 0x32, 0x8a, 0x1e, 0xf1, 0xcb, 0x7f, 0xbd, 0x23, 0xda, 0x2e, 0x6f, 0x63, 0x25, 0x2e, 0xac, 0x5b, 0xd1, 0x2f, 0xb7, 0x40, 0x50, 0x07, 0xb7, 0x3f, 0x6b, 0xf9, 0x54},
- },
- { /* 39P */
- addYX: fp.Elt{0x79, 0x92, 0x66, 0x29, 0x04, 0xf2, 0xad, 0x0f, 0x4a, 0x72, 0x7d, 0x7d, 0x04, 0xa2, 0xdd, 0x3a, 0xf1, 0x60, 0x57, 0x8c, 0x82, 0x94, 0x3d, 0x6f, 0x9e, 0x53, 0xb7, 0x2b, 0xc5, 0xe9, 0x7f, 0x3d},
- subYX: fp.Elt{0xcd, 0x1e, 0xb1, 0x16, 0xc6, 0xaf, 0x7d, 0x17, 0x79, 0x64, 0x57, 0xfa, 0x9c, 0x4b, 0x76, 0x89, 0x85, 0xe7, 0xec, 0xe6, 0x10, 0xa1, 0xa8, 0xb7, 0xf0, 0xdb, 0x85, 0xbe, 0x9f, 0x83, 0xe6, 0x78},
- dt2: fp.Elt{0x6b, 0x85, 0xb8, 0x37, 0xf7, 0x2d, 0x33, 0x70, 0x8a, 0x17, 0x1a, 0x04, 0x43, 0x5d, 0xd0, 0x75, 0x22, 0x9e, 0xe5, 0xa0, 0x4a, 0xf7, 0x0f, 0x32, 0x42, 0x82, 0x08, 0x50, 0xf3, 0x68, 0xf2, 0x70},
- },
- { /* 41P */
- addYX: fp.Elt{0x47, 0x5f, 0x80, 0xb1, 0x83, 0x45, 0x86, 0x66, 0x19, 0x7c, 0xdd, 0x60, 0xd1, 0xc5, 0x35, 0xf5, 0x06, 0xb0, 0x4c, 0x1e, 0xb7, 0x4e, 0x87, 0xe9, 0xd9, 0x89, 0xd8, 0xfa, 0x5c, 0x34, 0x0d, 0x7c},
- subYX: fp.Elt{0x55, 0xf3, 0xdc, 0x70, 0x20, 0x11, 0x24, 0x23, 0x17, 0xe1, 0xfc, 0xe7, 0x7e, 0xc9, 0x0c, 0x38, 0x98, 0xb6, 0x52, 0x35, 0xed, 0xde, 0x1d, 0xb3, 0xb9, 0xc4, 0xb8, 0x39, 0xc0, 0x56, 0x4e, 0x40},
- dt2: fp.Elt{0x8a, 0x33, 0x78, 0x8c, 0x4b, 0x1f, 0x1f, 0x59, 0xe1, 0xb5, 0xe0, 0x67, 0xb1, 0x6a, 0x36, 0xa0, 0x44, 0x3d, 0x5f, 0xb4, 0x52, 0x41, 0xbc, 0x5c, 0x77, 0xc7, 0xae, 0x2a, 0x76, 0x54, 0xd7, 0x20},
- },
- { /* 43P */
- addYX: fp.Elt{0x58, 0xb7, 0x3b, 0xc7, 0x6f, 0xc3, 0x8f, 0x5e, 0x9a, 0xbb, 0x3c, 0x36, 0xa5, 0x43, 0xe5, 0xac, 0x22, 0xc9, 0x3b, 0x90, 0x7d, 0x4a, 0x93, 0xa9, 0x62, 0xec, 0xce, 0xf3, 0x46, 0x1e, 0x8f, 0x2b},
- subYX: fp.Elt{0x43, 0xf5, 0xb9, 0x35, 0xb1, 0xfe, 0x74, 0x9d, 0x6c, 0x95, 0x8c, 0xde, 0xf1, 0x7d, 0xb3, 0x84, 0xa9, 0x8b, 0x13, 0x57, 0x07, 0x2b, 0x32, 0xe9, 0xe1, 0x4c, 0x0b, 0x79, 0xa8, 0xad, 0xb8, 0x38},
- dt2: fp.Elt{0x5d, 0xf9, 0x51, 0xdf, 0x9c, 0x4a, 0xc0, 0xb5, 0xac, 0xde, 0x1f, 0xcb, 0xae, 0x52, 0x39, 0x2b, 0xda, 0x66, 0x8b, 0x32, 0x8b, 0x6d, 0x10, 0x1d, 0x53, 0x19, 0xba, 0xce, 0x32, 0xeb, 0x9a, 0x04},
- },
- { /* 45P */
- addYX: fp.Elt{0x31, 0x79, 0xfc, 0x75, 0x0b, 0x7d, 0x50, 0xaa, 0xd3, 0x25, 0x67, 0x7a, 0x4b, 0x92, 0xef, 0x0f, 0x30, 0x39, 0x6b, 0x39, 0x2b, 0x54, 0x82, 0x1d, 0xfc, 0x74, 0xf6, 0x30, 0x75, 0xe1, 0x5e, 0x79},
- subYX: fp.Elt{0x7e, 0xfe, 0xdc, 0x63, 0x3c, 0x7d, 0x76, 0xd7, 0x40, 0x6e, 0x85, 0x97, 0x48, 0x59, 0x9c, 0x20, 0x13, 0x7c, 0x4f, 0xe1, 0x61, 0x68, 0x67, 0xb6, 0xfc, 0x25, 0xd6, 0xc8, 0xe0, 0x65, 0xc6, 0x51},
- dt2: fp.Elt{0x81, 0xbd, 0xec, 0x52, 0x0a, 0x5b, 0x4a, 0x25, 0xe7, 0xaf, 0x34, 0xe0, 0x6e, 0x1f, 0x41, 0x5d, 0x31, 0x4a, 0xee, 0xca, 0x0d, 0x4d, 0xa2, 0xe6, 0x77, 0x44, 0xc5, 0x9d, 0xf4, 0x9b, 0xd1, 0x6c},
- },
- { /* 47P */
- addYX: fp.Elt{0x86, 0xc3, 0xaf, 0x65, 0x21, 0x61, 0xfe, 0x1f, 0x10, 0x1b, 0xd5, 0xb8, 0x88, 0x2a, 0x2a, 0x08, 0xaa, 0x0b, 0x99, 0x20, 0x7e, 0x62, 0xf6, 0x76, 0xe7, 0x43, 0x9e, 0x42, 0xa7, 0xb3, 0x01, 0x5e},
- subYX: fp.Elt{0xa3, 0x9c, 0x17, 0x52, 0x90, 0x61, 0x87, 0x7e, 0x85, 0x9f, 0x2c, 0x0b, 0x06, 0x0a, 0x1d, 0x57, 0x1e, 0x71, 0x99, 0x84, 0xa8, 0xba, 0xa2, 0x80, 0x38, 0xe6, 0xb2, 0x40, 0xdb, 0xf3, 0x20, 0x75},
- dt2: fp.Elt{0xa1, 0x57, 0x93, 0xd3, 0xe3, 0x0b, 0xb5, 0x3d, 0xa5, 0x94, 0x9e, 0x59, 0xdd, 0x6c, 0x7b, 0x96, 0x6e, 0x1e, 0x31, 0xdf, 0x64, 0x9a, 0x30, 0x1a, 0x86, 0xc9, 0xf3, 0xce, 0x9c, 0x2c, 0x09, 0x71},
- },
- { /* 49P */
- addYX: fp.Elt{0xcf, 0x1d, 0x05, 0x74, 0xac, 0xd8, 0x6b, 0x85, 0x1e, 0xaa, 0xb7, 0x55, 0x08, 0xa4, 0xf6, 0x03, 0xeb, 0x3c, 0x74, 0xc9, 0xcb, 0xe7, 0x4a, 0x3a, 0xde, 0xab, 0x37, 0x71, 0xbb, 0xa5, 0x73, 0x41},
- subYX: fp.Elt{0x8c, 0x91, 0x64, 0x03, 0x3f, 0x52, 0xd8, 0x53, 0x1c, 0x6b, 0xab, 0x3f, 0xf4, 0x04, 0xb4, 0xa2, 0xa4, 0xe5, 0x81, 0x66, 0x9e, 0x4a, 0x0b, 0x08, 0xa7, 0x7b, 0x25, 0xd0, 0x03, 0x5b, 0xa1, 0x0e},
- dt2: fp.Elt{0x8a, 0x21, 0xf9, 0xf0, 0x31, 0x6e, 0xc5, 0x17, 0x08, 0x47, 0xfc, 0x1a, 0x2b, 0x6e, 0x69, 0x5a, 0x76, 0xf1, 0xb2, 0xf4, 0x68, 0x16, 0x93, 0xf7, 0x67, 0x3a, 0x4e, 0x4a, 0x61, 0x65, 0xc5, 0x5f},
- },
- { /* 51P */
- addYX: fp.Elt{0x8e, 0x98, 0x90, 0x77, 0xe6, 0xe1, 0x92, 0x48, 0x22, 0xd7, 0x5c, 0x1c, 0x0f, 0x95, 0xd5, 0x01, 0xed, 0x3e, 0x92, 0xe5, 0x9a, 0x81, 0xb0, 0xe3, 0x1b, 0x65, 0x46, 0x9d, 0x40, 0xc7, 0x14, 0x32},
- subYX: fp.Elt{0xe5, 0x7a, 0x6d, 0xc4, 0x0d, 0x57, 0x6e, 0x13, 0x8f, 0xdc, 0xf8, 0x54, 0xcc, 0xaa, 0xd0, 0x0f, 0x86, 0xad, 0x0d, 0x31, 0x03, 0x9f, 0x54, 0x59, 0xa1, 0x4a, 0x45, 0x4c, 0x41, 0x1c, 0x71, 0x62},
- dt2: fp.Elt{0x70, 0x17, 0x65, 0x06, 0x74, 0x82, 0x29, 0x13, 0x36, 0x94, 0x27, 0x8a, 0x66, 0xa0, 0xa4, 0x3b, 0x3c, 0x22, 0x5d, 0x18, 0xec, 0xb8, 0xb6, 0xd9, 0x3c, 0x83, 0xcb, 0x3e, 0x07, 0x94, 0xea, 0x5b},
- },
- { /* 53P */
- addYX: fp.Elt{0xf8, 0xd2, 0x43, 0xf3, 0x63, 0xce, 0x70, 0xb4, 0xf1, 0xe8, 0x43, 0x05, 0x8f, 0xba, 0x67, 0x00, 0x6f, 0x7b, 0x11, 0xa2, 0xa1, 0x51, 0xda, 0x35, 0x2f, 0xbd, 0xf1, 0x44, 0x59, 0x78, 0xd0, 0x4a},
- subYX: fp.Elt{0xe4, 0x9b, 0xc8, 0x12, 0x09, 0xbf, 0x1d, 0x64, 0x9c, 0x57, 0x6e, 0x7d, 0x31, 0x8b, 0xf3, 0xac, 0x65, 0xb0, 0x97, 0xf6, 0x02, 0x9e, 0xfe, 0xab, 0xec, 0x1e, 0xf6, 0x48, 0xc1, 0xd5, 0xac, 0x3a},
- dt2: fp.Elt{0x01, 0x83, 0x31, 0xc3, 0x34, 0x3b, 0x8e, 0x85, 0x26, 0x68, 0x31, 0x07, 0x47, 0xc0, 0x99, 0xdc, 0x8c, 0xa8, 0x9d, 0xd3, 0x2e, 0x5b, 0x08, 0x34, 0x3d, 0x85, 0x02, 0xd9, 0xb1, 0x0c, 0xff, 0x3a},
- },
- { /* 55P */
- addYX: fp.Elt{0x05, 0x35, 0xc5, 0xf4, 0x0b, 0x43, 0x26, 0x92, 0x83, 0x22, 0x1f, 0x26, 0x13, 0x9c, 0xe4, 0x68, 0xc6, 0x27, 0xd3, 0x8f, 0x78, 0x33, 0xef, 0x09, 0x7f, 0x9e, 0xd9, 0x2b, 0x73, 0x9f, 0xcf, 0x2c},
- subYX: fp.Elt{0x5e, 0x40, 0x20, 0x3a, 0xeb, 0xc7, 0xc5, 0x87, 0xc9, 0x56, 0xad, 0xed, 0xef, 0x11, 0xe3, 0x8e, 0xf9, 0xd5, 0x29, 0xad, 0x48, 0x2e, 0x25, 0x29, 0x1d, 0x25, 0xcd, 0xf4, 0x86, 0x7e, 0x0e, 0x11},
- dt2: fp.Elt{0xe4, 0xf5, 0x03, 0xd6, 0x9e, 0xd8, 0xc0, 0x57, 0x0c, 0x20, 0xb0, 0xf0, 0x28, 0x86, 0x88, 0x12, 0xb7, 0x3b, 0x2e, 0xa0, 0x09, 0x27, 0x17, 0x53, 0x37, 0x3a, 0x69, 0xb9, 0xe0, 0x57, 0xc5, 0x05},
- },
- { /* 57P */
- addYX: fp.Elt{0xb0, 0x0e, 0xc2, 0x89, 0xb0, 0xbb, 0x76, 0xf7, 0x5c, 0xd8, 0x0f, 0xfa, 0xf6, 0x5b, 0xf8, 0x61, 0xfb, 0x21, 0x44, 0x63, 0x4e, 0x3f, 0xb9, 0xb6, 0x05, 0x12, 0x86, 0x41, 0x08, 0xef, 0x9f, 0x28},
- subYX: fp.Elt{0x6f, 0x7e, 0xc9, 0x1f, 0x31, 0xce, 0xf9, 0xd8, 0xae, 0xfd, 0xf9, 0x11, 0x30, 0x26, 0x3f, 0x7a, 0xdd, 0x25, 0xed, 0x8b, 0xa0, 0x7e, 0x5b, 0xe1, 0x5a, 0x87, 0xe9, 0x8f, 0x17, 0x4c, 0x15, 0x6e},
- dt2: fp.Elt{0xbf, 0x9a, 0xd6, 0xfe, 0x36, 0x63, 0x61, 0xcf, 0x4f, 0xc9, 0x35, 0x83, 0xe7, 0xe4, 0x16, 0x9b, 0xe7, 0x7f, 0x3a, 0x75, 0x65, 0x97, 0x78, 0x13, 0x19, 0xa3, 0x5c, 0xa9, 0x42, 0xf6, 0xfb, 0x6a},
- },
- { /* 59P */
- addYX: fp.Elt{0xcc, 0xa8, 0x13, 0xf9, 0x70, 0x50, 0xe5, 0x5d, 0x61, 0xf5, 0x0c, 0x2b, 0x7b, 0x16, 0x1d, 0x7d, 0x89, 0xd4, 0xea, 0x90, 0xb6, 0x56, 0x29, 0xda, 0xd9, 0x1e, 0x80, 0xdb, 0xce, 0x93, 0xc0, 0x12},
- subYX: fp.Elt{0xc1, 0xd2, 0xf5, 0x62, 0x0c, 0xde, 0xa8, 0x7d, 0x9a, 0x7b, 0x0e, 0xb0, 0xa4, 0x3d, 0xfc, 0x98, 0xe0, 0x70, 0xad, 0x0d, 0xda, 0x6a, 0xeb, 0x7d, 0xc4, 0x38, 0x50, 0xb9, 0x51, 0xb8, 0xb4, 0x0d},
- dt2: fp.Elt{0x0f, 0x19, 0xb8, 0x08, 0x93, 0x7f, 0x14, 0xfc, 0x10, 0xe3, 0x1a, 0xa1, 0xa0, 0x9d, 0x96, 0x06, 0xfd, 0xd7, 0xc7, 0xda, 0x72, 0x55, 0xe7, 0xce, 0xe6, 0x5c, 0x63, 0xc6, 0x99, 0x87, 0xaa, 0x33},
- },
- { /* 61P */
- addYX: fp.Elt{0xb1, 0x6c, 0x15, 0xfc, 0x88, 0xf5, 0x48, 0x83, 0x27, 0x6d, 0x0a, 0x1a, 0x9b, 0xba, 0xa2, 0x6d, 0xb6, 0x5a, 0xca, 0x87, 0x5c, 0x2d, 0x26, 0xe2, 0xa6, 0x89, 0xd5, 0xc8, 0xc1, 0xd0, 0x2c, 0x21},
- subYX: fp.Elt{0xf2, 0x5c, 0x08, 0xbd, 0x1e, 0xf5, 0x0f, 0xaf, 0x1f, 0x3f, 0xd3, 0x67, 0x89, 0x1a, 0xf5, 0x78, 0x3c, 0x03, 0x60, 0x50, 0xe1, 0xbf, 0xc2, 0x6e, 0x86, 0x1a, 0xe2, 0xe8, 0x29, 0x6f, 0x3c, 0x23},
- dt2: fp.Elt{0x81, 0xc7, 0x18, 0x7f, 0x10, 0xd5, 0xf4, 0xd2, 0x28, 0x9d, 0x7e, 0x52, 0xf2, 0xcd, 0x2e, 0x12, 0x41, 0x33, 0x3d, 0x3d, 0x2a, 0x86, 0x0a, 0xa7, 0xe3, 0x4c, 0x91, 0x11, 0x89, 0x77, 0xb7, 0x1d},
- },
- { /* 63P */
- addYX: fp.Elt{0xb6, 0x1a, 0x70, 0xdd, 0x69, 0x47, 0x39, 0xb3, 0xa5, 0x8d, 0xcf, 0x19, 0xd4, 0xde, 0xb8, 0xe2, 0x52, 0xc8, 0x2a, 0xfd, 0x61, 0x41, 0xdf, 0x15, 0xbe, 0x24, 0x7d, 0x01, 0x8a, 0xca, 0xe2, 0x7a},
- subYX: fp.Elt{0x6f, 0xc2, 0x6b, 0x7c, 0x39, 0x52, 0xf3, 0xdd, 0x13, 0x01, 0xd5, 0x53, 0xcc, 0xe2, 0x97, 0x7a, 0x30, 0xa3, 0x79, 0xbf, 0x3a, 0xf4, 0x74, 0x7c, 0xfc, 0xad, 0xe2, 0x26, 0xad, 0x97, 0xad, 0x31},
- dt2: fp.Elt{0x62, 0xb9, 0x20, 0x09, 0xed, 0x17, 0xe8, 0xb7, 0x9d, 0xda, 0x19, 0x3f, 0xcc, 0x18, 0x85, 0x1e, 0x64, 0x0a, 0x56, 0x25, 0x4f, 0xc1, 0x91, 0xe4, 0x83, 0x2c, 0x62, 0xa6, 0x53, 0xfc, 0xd1, 0x1e},
- },
-}
diff --git a/vendor/github.com/cloudflare/circl/sign/ed448/ed448.go b/vendor/github.com/cloudflare/circl/sign/ed448/ed448.go
deleted file mode 100644
index 324bd8f3346..00000000000
--- a/vendor/github.com/cloudflare/circl/sign/ed448/ed448.go
+++ /dev/null
@@ -1,411 +0,0 @@
-// Package ed448 implements Ed448 signature scheme as described in RFC-8032.
-//
-// This package implements two signature variants.
-//
-// | Scheme Name | Sign Function | Verification | Context |
-// |-------------|-------------------|---------------|-------------------|
-// | Ed448 | Sign | Verify | Yes, can be empty |
-// | Ed448Ph | SignPh | VerifyPh | Yes, can be empty |
-// | All above | (PrivateKey).Sign | VerifyAny | As above |
-//
-// Specific functions for sign and verify are defined. A generic signing
-// function for all schemes is available through the crypto.Signer interface,
-// which is implemented by the PrivateKey type. A correspond all-in-one
-// verification method is provided by the VerifyAny function.
-//
-// Both schemes require a context string for domain separation. This parameter
-// is passed using a SignerOptions struct defined in this package.
-//
-// References:
-//
-// - RFC8032: https://rfc-editor.org/rfc/rfc8032.txt
-// - EdDSA for more curves: https://eprint.iacr.org/2015/677
-// - High-speed high-security signatures: https://doi.org/10.1007/s13389-012-0027-1
-package ed448
-
-import (
- "bytes"
- "crypto"
- cryptoRand "crypto/rand"
- "crypto/subtle"
- "errors"
- "fmt"
- "io"
- "strconv"
-
- "github.com/cloudflare/circl/ecc/goldilocks"
- "github.com/cloudflare/circl/internal/sha3"
- "github.com/cloudflare/circl/sign"
-)
-
-const (
- // ContextMaxSize is the maximum length (in bytes) allowed for context.
- ContextMaxSize = 255
- // PublicKeySize is the length in bytes of Ed448 public keys.
- PublicKeySize = 57
- // PrivateKeySize is the length in bytes of Ed448 private keys.
- PrivateKeySize = 114
- // SignatureSize is the length in bytes of signatures.
- SignatureSize = 114
- // SeedSize is the size, in bytes, of private key seeds. These are the private key representations used by RFC 8032.
- SeedSize = 57
-)
-
-const (
- paramB = 456 / 8 // Size of keys in bytes.
- hashSize = 2 * paramB // Size of the hash function's output.
-)
-
-// SignerOptions implements crypto.SignerOpts and augments with parameters
-// that are specific to the Ed448 signature schemes.
-type SignerOptions struct {
- // Hash must be crypto.Hash(0) for both Ed448 and Ed448Ph.
- crypto.Hash
-
- // Context is an optional domain separation string for signing.
- // Its length must be less or equal than 255 bytes.
- Context string
-
- // Scheme is an identifier for choosing a signature scheme.
- Scheme SchemeID
-}
-
-// SchemeID is an identifier for each signature scheme.
-type SchemeID uint
-
-const (
- ED448 SchemeID = iota
- ED448Ph
-)
-
-// PublicKey is the type of Ed448 public keys.
-type PublicKey []byte
-
-// Equal reports whether pub and x have the same value.
-func (pub PublicKey) Equal(x crypto.PublicKey) bool {
- xx, ok := x.(PublicKey)
- return ok && bytes.Equal(pub, xx)
-}
-
-// PrivateKey is the type of Ed448 private keys. It implements crypto.Signer.
-type PrivateKey []byte
-
-// Equal reports whether priv and x have the same value.
-func (priv PrivateKey) Equal(x crypto.PrivateKey) bool {
- xx, ok := x.(PrivateKey)
- return ok && subtle.ConstantTimeCompare(priv, xx) == 1
-}
-
-// Public returns the PublicKey corresponding to priv.
-func (priv PrivateKey) Public() crypto.PublicKey {
- publicKey := make([]byte, PublicKeySize)
- copy(publicKey, priv[SeedSize:])
- return PublicKey(publicKey)
-}
-
-// Seed returns the private key seed corresponding to priv. It is provided for
-// interoperability with RFC 8032. RFC 8032's private keys correspond to seeds
-// in this package.
-func (priv PrivateKey) Seed() []byte {
- seed := make([]byte, SeedSize)
- copy(seed, priv[:SeedSize])
- return seed
-}
-
-func (priv PrivateKey) Scheme() sign.Scheme { return sch }
-
-func (pub PublicKey) Scheme() sign.Scheme { return sch }
-
-func (priv PrivateKey) MarshalBinary() (data []byte, err error) {
- privateKey := make(PrivateKey, PrivateKeySize)
- copy(privateKey, priv)
- return privateKey, nil
-}
-
-func (pub PublicKey) MarshalBinary() (data []byte, err error) {
- publicKey := make(PublicKey, PublicKeySize)
- copy(publicKey, pub)
- return publicKey, nil
-}
-
-// Sign creates a signature of a message given a key pair.
-// This function supports all the two signature variants defined in RFC-8032,
-// namely Ed448 (or pure EdDSA) and Ed448Ph.
-// The opts.HashFunc() must return zero to the specify Ed448 variant. This can
-// be achieved by passing crypto.Hash(0) as the value for opts.
-// Use an Options struct to pass a bool indicating that the ed448Ph variant
-// should be used.
-// The struct can also be optionally used to pass a context string for signing.
-func (priv PrivateKey) Sign(
- rand io.Reader,
- message []byte,
- opts crypto.SignerOpts,
-) (signature []byte, err error) {
- var ctx string
- var scheme SchemeID
-
- if o, ok := opts.(SignerOptions); ok {
- ctx = o.Context
- scheme = o.Scheme
- }
-
- switch true {
- case scheme == ED448 && opts.HashFunc() == crypto.Hash(0):
- return Sign(priv, message, ctx), nil
- case scheme == ED448Ph && opts.HashFunc() == crypto.Hash(0):
- return SignPh(priv, message, ctx), nil
- default:
- return nil, errors.New("ed448: bad hash algorithm")
- }
-}
-
-// GenerateKey generates a public/private key pair using entropy from rand.
-// If rand is nil, crypto/rand.Reader will be used.
-func GenerateKey(rand io.Reader) (PublicKey, PrivateKey, error) {
- if rand == nil {
- rand = cryptoRand.Reader
- }
-
- seed := make(PrivateKey, SeedSize)
- if _, err := io.ReadFull(rand, seed); err != nil {
- return nil, nil, err
- }
-
- privateKey := NewKeyFromSeed(seed)
- publicKey := make([]byte, PublicKeySize)
- copy(publicKey, privateKey[SeedSize:])
-
- return publicKey, privateKey, nil
-}
-
-// NewKeyFromSeed calculates a private key from a seed. It will panic if
-// len(seed) is not SeedSize. This function is provided for interoperability
-// with RFC 8032. RFC 8032's private keys correspond to seeds in this
-// package.
-func NewKeyFromSeed(seed []byte) PrivateKey {
- privateKey := make([]byte, PrivateKeySize)
- newKeyFromSeed(privateKey, seed)
- return privateKey
-}
-
-func newKeyFromSeed(privateKey, seed []byte) {
- if l := len(seed); l != SeedSize {
- panic("ed448: bad seed length: " + strconv.Itoa(l))
- }
-
- var h [hashSize]byte
- H := sha3.NewShake256()
- _, _ = H.Write(seed)
- _, _ = H.Read(h[:])
- s := &goldilocks.Scalar{}
- deriveSecretScalar(s, h[:paramB])
-
- copy(privateKey[:SeedSize], seed)
- _ = goldilocks.Curve{}.ScalarBaseMult(s).ToBytes(privateKey[SeedSize:])
-}
-
-func signAll(signature []byte, privateKey PrivateKey, message, ctx []byte, preHash bool) {
- if len(ctx) > ContextMaxSize {
- panic(fmt.Errorf("ed448: bad context length: " + strconv.Itoa(len(ctx))))
- }
-
- H := sha3.NewShake256()
- var PHM []byte
-
- if preHash {
- var h [64]byte
- _, _ = H.Write(message)
- _, _ = H.Read(h[:])
- PHM = h[:]
- H.Reset()
- } else {
- PHM = message
- }
-
- // 1. Hash the 57-byte private key using SHAKE256(x, 114).
- var h [hashSize]byte
- _, _ = H.Write(privateKey[:SeedSize])
- _, _ = H.Read(h[:])
- s := &goldilocks.Scalar{}
- deriveSecretScalar(s, h[:paramB])
- prefix := h[paramB:]
-
- // 2. Compute SHAKE256(dom4(F, C) || prefix || PH(M), 114).
- var rPM [hashSize]byte
- H.Reset()
-
- writeDom(&H, ctx, preHash)
-
- _, _ = H.Write(prefix)
- _, _ = H.Write(PHM)
- _, _ = H.Read(rPM[:])
-
- // 3. Compute the point [r]B.
- r := &goldilocks.Scalar{}
- r.FromBytes(rPM[:])
- R := (&[paramB]byte{})[:]
- if err := (goldilocks.Curve{}.ScalarBaseMult(r).ToBytes(R)); err != nil {
- panic(err)
- }
- // 4. Compute SHAKE256(dom4(F, C) || R || A || PH(M), 114)
- var hRAM [hashSize]byte
- H.Reset()
-
- writeDom(&H, ctx, preHash)
-
- _, _ = H.Write(R)
- _, _ = H.Write(privateKey[SeedSize:])
- _, _ = H.Write(PHM)
- _, _ = H.Read(hRAM[:])
-
- // 5. Compute S = (r + k * s) mod order.
- k := &goldilocks.Scalar{}
- k.FromBytes(hRAM[:])
- S := &goldilocks.Scalar{}
- S.Mul(k, s)
- S.Add(S, r)
-
- // 6. The signature is the concatenation of R and S.
- copy(signature[:paramB], R[:])
- copy(signature[paramB:], S[:])
-}
-
-// Sign signs the message with privateKey and returns a signature.
-// This function supports the signature variant defined in RFC-8032: Ed448,
-// also known as the pure version of EdDSA.
-// It will panic if len(privateKey) is not PrivateKeySize.
-func Sign(priv PrivateKey, message []byte, ctx string) []byte {
- signature := make([]byte, SignatureSize)
- signAll(signature, priv, message, []byte(ctx), false)
- return signature
-}
-
-// SignPh creates a signature of a message given a keypair.
-// This function supports the signature variant defined in RFC-8032: Ed448ph,
-// meaning it internally hashes the message using SHAKE-256.
-// Context could be passed to this function, which length should be no more than
-// 255. It can be empty.
-func SignPh(priv PrivateKey, message []byte, ctx string) []byte {
- signature := make([]byte, SignatureSize)
- signAll(signature, priv, message, []byte(ctx), true)
- return signature
-}
-
-func verify(public PublicKey, message, signature, ctx []byte, preHash bool) bool {
- if len(public) != PublicKeySize ||
- len(signature) != SignatureSize ||
- len(ctx) > ContextMaxSize ||
- !isLessThanOrder(signature[paramB:]) {
- return false
- }
-
- P, err := goldilocks.FromBytes(public)
- if err != nil {
- return false
- }
-
- H := sha3.NewShake256()
- var PHM []byte
-
- if preHash {
- var h [64]byte
- _, _ = H.Write(message)
- _, _ = H.Read(h[:])
- PHM = h[:]
- H.Reset()
- } else {
- PHM = message
- }
-
- var hRAM [hashSize]byte
- R := signature[:paramB]
-
- writeDom(&H, ctx, preHash)
-
- _, _ = H.Write(R)
- _, _ = H.Write(public)
- _, _ = H.Write(PHM)
- _, _ = H.Read(hRAM[:])
-
- k := &goldilocks.Scalar{}
- k.FromBytes(hRAM[:])
- S := &goldilocks.Scalar{}
- S.FromBytes(signature[paramB:])
-
- encR := (&[paramB]byte{})[:]
- P.Neg()
- _ = goldilocks.Curve{}.CombinedMult(S, k, P).ToBytes(encR)
- return bytes.Equal(R, encR)
-}
-
-// VerifyAny returns true if the signature is valid. Failure cases are invalid
-// signature, or when the public key cannot be decoded.
-// This function supports all the two signature variants defined in RFC-8032,
-// namely Ed448 (or pure EdDSA) and Ed448Ph.
-// The opts.HashFunc() must return zero, this can be achieved by passing
-// crypto.Hash(0) as the value for opts.
-// Use a SignerOptions struct to pass a context string for signing.
-func VerifyAny(public PublicKey, message, signature []byte, opts crypto.SignerOpts) bool {
- var ctx string
- var scheme SchemeID
- if o, ok := opts.(SignerOptions); ok {
- ctx = o.Context
- scheme = o.Scheme
- }
-
- switch true {
- case scheme == ED448 && opts.HashFunc() == crypto.Hash(0):
- return Verify(public, message, signature, ctx)
- case scheme == ED448Ph && opts.HashFunc() == crypto.Hash(0):
- return VerifyPh(public, message, signature, ctx)
- default:
- return false
- }
-}
-
-// Verify returns true if the signature is valid. Failure cases are invalid
-// signature, or when the public key cannot be decoded.
-// This function supports the signature variant defined in RFC-8032: Ed448,
-// also known as the pure version of EdDSA.
-func Verify(public PublicKey, message, signature []byte, ctx string) bool {
- return verify(public, message, signature, []byte(ctx), false)
-}
-
-// VerifyPh returns true if the signature is valid. Failure cases are invalid
-// signature, or when the public key cannot be decoded.
-// This function supports the signature variant defined in RFC-8032: Ed448ph,
-// meaning it internally hashes the message using SHAKE-256.
-// Context could be passed to this function, which length should be no more than
-// 255. It can be empty.
-func VerifyPh(public PublicKey, message, signature []byte, ctx string) bool {
- return verify(public, message, signature, []byte(ctx), true)
-}
-
-func deriveSecretScalar(s *goldilocks.Scalar, h []byte) {
- h[0] &= 0xFC // The two least significant bits of the first octet are cleared,
- h[paramB-1] = 0x00 // all eight bits the last octet are cleared, and
- h[paramB-2] |= 0x80 // the highest bit of the second to last octet is set.
- s.FromBytes(h[:paramB])
-}
-
-// isLessThanOrder returns true if 0 <= x < order and if the last byte of x is zero.
-func isLessThanOrder(x []byte) bool {
- order := goldilocks.Curve{}.Order()
- i := len(order) - 1
- for i > 0 && x[i] == order[i] {
- i--
- }
- return x[paramB-1] == 0 && x[i] < order[i]
-}
-
-func writeDom(h io.Writer, ctx []byte, preHash bool) {
- dom4 := "SigEd448"
- _, _ = h.Write([]byte(dom4))
-
- if preHash {
- _, _ = h.Write([]byte{byte(0x01), byte(len(ctx))})
- } else {
- _, _ = h.Write([]byte{byte(0x00), byte(len(ctx))})
- }
- _, _ = h.Write(ctx)
-}
diff --git a/vendor/github.com/cloudflare/circl/sign/ed448/signapi.go b/vendor/github.com/cloudflare/circl/sign/ed448/signapi.go
deleted file mode 100644
index 22da8bc0a57..00000000000
--- a/vendor/github.com/cloudflare/circl/sign/ed448/signapi.go
+++ /dev/null
@@ -1,87 +0,0 @@
-package ed448
-
-import (
- "crypto/rand"
- "encoding/asn1"
-
- "github.com/cloudflare/circl/sign"
-)
-
-var sch sign.Scheme = &scheme{}
-
-// Scheme returns a signature interface.
-func Scheme() sign.Scheme { return sch }
-
-type scheme struct{}
-
-func (*scheme) Name() string { return "Ed448" }
-func (*scheme) PublicKeySize() int { return PublicKeySize }
-func (*scheme) PrivateKeySize() int { return PrivateKeySize }
-func (*scheme) SignatureSize() int { return SignatureSize }
-func (*scheme) SeedSize() int { return SeedSize }
-func (*scheme) TLSIdentifier() uint { return 0x0808 }
-func (*scheme) SupportsContext() bool { return true }
-func (*scheme) Oid() asn1.ObjectIdentifier {
- return asn1.ObjectIdentifier{1, 3, 101, 113}
-}
-
-func (*scheme) GenerateKey() (sign.PublicKey, sign.PrivateKey, error) {
- return GenerateKey(rand.Reader)
-}
-
-func (*scheme) Sign(
- sk sign.PrivateKey,
- message []byte,
- opts *sign.SignatureOpts,
-) []byte {
- priv, ok := sk.(PrivateKey)
- if !ok {
- panic(sign.ErrTypeMismatch)
- }
- ctx := ""
- if opts != nil {
- ctx = opts.Context
- }
- return Sign(priv, message, ctx)
-}
-
-func (*scheme) Verify(
- pk sign.PublicKey,
- message, signature []byte,
- opts *sign.SignatureOpts,
-) bool {
- pub, ok := pk.(PublicKey)
- if !ok {
- panic(sign.ErrTypeMismatch)
- }
- ctx := ""
- if opts != nil {
- ctx = opts.Context
- }
- return Verify(pub, message, signature, ctx)
-}
-
-func (*scheme) DeriveKey(seed []byte) (sign.PublicKey, sign.PrivateKey) {
- privateKey := NewKeyFromSeed(seed)
- publicKey := make(PublicKey, PublicKeySize)
- copy(publicKey, privateKey[SeedSize:])
- return publicKey, privateKey
-}
-
-func (*scheme) UnmarshalBinaryPublicKey(buf []byte) (sign.PublicKey, error) {
- if len(buf) < PublicKeySize {
- return nil, sign.ErrPubKeySize
- }
- pub := make(PublicKey, PublicKeySize)
- copy(pub, buf[:PublicKeySize])
- return pub, nil
-}
-
-func (*scheme) UnmarshalBinaryPrivateKey(buf []byte) (sign.PrivateKey, error) {
- if len(buf) < PrivateKeySize {
- return nil, sign.ErrPrivKeySize
- }
- priv := make(PrivateKey, PrivateKeySize)
- copy(priv, buf[:PrivateKeySize])
- return priv, nil
-}
diff --git a/vendor/github.com/cloudflare/circl/sign/sign.go b/vendor/github.com/cloudflare/circl/sign/sign.go
deleted file mode 100644
index 13b20fa4b04..00000000000
--- a/vendor/github.com/cloudflare/circl/sign/sign.go
+++ /dev/null
@@ -1,110 +0,0 @@
-// Package sign provides unified interfaces for signature schemes.
-//
-// A register of schemes is available in the package
-//
-// github.com/cloudflare/circl/sign/schemes
-package sign
-
-import (
- "crypto"
- "encoding"
- "errors"
-)
-
-type SignatureOpts struct {
- // If non-empty, includes the given context in the signature if supported
- // and will cause an error during signing otherwise.
- Context string
-}
-
-// A public key is used to verify a signature set by the corresponding private
-// key.
-type PublicKey interface {
- // Returns the signature scheme for this public key.
- Scheme() Scheme
- Equal(crypto.PublicKey) bool
- encoding.BinaryMarshaler
- crypto.PublicKey
-}
-
-// A private key allows one to create signatures.
-type PrivateKey interface {
- // Returns the signature scheme for this private key.
- Scheme() Scheme
- Equal(crypto.PrivateKey) bool
- // For compatibility with Go standard library
- crypto.Signer
- crypto.PrivateKey
- encoding.BinaryMarshaler
-}
-
-// A Scheme represents a specific instance of a signature scheme.
-type Scheme interface {
- // Name of the scheme.
- Name() string
-
- // GenerateKey creates a new key-pair.
- GenerateKey() (PublicKey, PrivateKey, error)
-
- // Creates a signature using the PrivateKey on the given message and
- // returns the signature. opts are additional options which can be nil.
- //
- // Panics if key is nil or wrong type or opts context is not supported.
- Sign(sk PrivateKey, message []byte, opts *SignatureOpts) []byte
-
- // Checks whether the given signature is a valid signature set by
- // the private key corresponding to the given public key on the
- // given message. opts are additional options which can be nil.
- //
- // Panics if key is nil or wrong type or opts context is not supported.
- Verify(pk PublicKey, message []byte, signature []byte, opts *SignatureOpts) bool
-
- // Deterministically derives a keypair from a seed. If you're unsure,
- // you're better off using GenerateKey().
- //
- // Panics if seed is not of length SeedSize().
- DeriveKey(seed []byte) (PublicKey, PrivateKey)
-
- // Unmarshals a PublicKey from the provided buffer.
- UnmarshalBinaryPublicKey([]byte) (PublicKey, error)
-
- // Unmarshals a PublicKey from the provided buffer.
- UnmarshalBinaryPrivateKey([]byte) (PrivateKey, error)
-
- // Size of binary marshalled public keys.
- PublicKeySize() int
-
- // Size of binary marshalled public keys.
- PrivateKeySize() int
-
- // Size of signatures.
- SignatureSize() int
-
- // Size of seeds.
- SeedSize() int
-
- // Returns whether contexts are supported.
- SupportsContext() bool
-}
-
-var (
- // ErrTypeMismatch is the error used if types of, for instance, private
- // and public keys don't match.
- ErrTypeMismatch = errors.New("types mismatch")
-
- // ErrSeedSize is the error used if the provided seed is of the wrong
- // size.
- ErrSeedSize = errors.New("wrong seed size")
-
- // ErrPubKeySize is the error used if the provided public key is of
- // the wrong size.
- ErrPubKeySize = errors.New("wrong size for public key")
-
- // ErrPrivKeySize is the error used if the provided private key is of
- // the wrong size.
- ErrPrivKeySize = errors.New("wrong size for private key")
-
- // ErrContextNotSupported is the error used if a context is not
- // supported.
- ErrContextNotSupported = errors.New("context not supported")
-)
diff --git a/vendor/github.com/cyphar/filepath-securejoin/LICENSE b/vendor/github.com/cyphar/filepath-securejoin/LICENSE
deleted file mode 100644
index bec842f294f..00000000000
--- a/vendor/github.com/cyphar/filepath-securejoin/LICENSE
+++ /dev/null
@@ -1,28 +0,0 @@
-Copyright (C) 2014-2015 Docker Inc & Go Authors. All rights reserved.
-Copyright (C) 2017 SUSE LLC. All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
-
- * Redistributions of source code must retain the above copyright
-notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above
-copyright notice, this list of conditions and the following disclaimer
-in the documentation and/or other materials provided with the
-distribution.
- * Neither the name of Google Inc. nor the names of its
-contributors may be used to endorse or promote products derived from
-this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/cyphar/filepath-securejoin/README.md b/vendor/github.com/cyphar/filepath-securejoin/README.md
deleted file mode 100644
index 4eca0f23550..00000000000
--- a/vendor/github.com/cyphar/filepath-securejoin/README.md
+++ /dev/null
@@ -1,79 +0,0 @@
-## `filepath-securejoin` ##
-
-[](https://github.com/cyphar/filepath-securejoin/actions/workflows/ci.yml)
-
-An implementation of `SecureJoin`, a [candidate for inclusion in the Go
-standard library][go#20126]. The purpose of this function is to be a "secure"
-alternative to `filepath.Join`, and in particular it provides certain
-guarantees that are not provided by `filepath.Join`.
-
-> **NOTE**: This code is *only* safe if you are not at risk of other processes
-> modifying path components after you've used `SecureJoin`. If it is possible
-> for a malicious process to modify path components of the resolved path, then
-> you will be vulnerable to some fairly trivial TOCTOU race conditions. [There
-> are some Linux kernel patches I'm working on which might allow for a better
-> solution.][lwn-obeneath]
->
-> In addition, with a slightly modified API it might be possible to use
-> `O_PATH` and verify that the opened path is actually the resolved one -- but
-> I have not done that yet. I might add it in the future as a helper function
-> to help users verify the path (we can't just return `/proc/self/fd/`
-> because that doesn't always work transparently for all users).
-
-This is the function prototype:
-
-```go
-func SecureJoin(root, unsafePath string) (string, error)
-```
-
-This library **guarantees** the following:
-
-* If no error is set, the resulting string **must** be a child path of
- `root` and will not contain any symlink path components (they will all be
- expanded).
-
-* When expanding symlinks, all symlink path components **must** be resolved
- relative to the provided root. In particular, this can be considered a
- userspace implementation of how `chroot(2)` operates on file paths. Note that
- these symlinks will **not** be expanded lexically (`filepath.Clean` is not
- called on the input before processing).
-
-* Non-existent path components are unaffected by `SecureJoin` (similar to
- `filepath.EvalSymlinks`'s semantics).
-
-* The returned path will always be `filepath.Clean`ed and thus not contain any
- `..` components.
-
-A (trivial) implementation of this function on GNU/Linux systems could be done
-with the following (note that this requires root privileges and is far more
-opaque than the implementation in this library, and also requires that
-`readlink` is inside the `root` path):
-
-```go
-package securejoin
-
-import (
- "os/exec"
- "path/filepath"
-)
-
-func SecureJoin(root, unsafePath string) (string, error) {
- unsafePath = string(filepath.Separator) + unsafePath
- cmd := exec.Command("chroot", root,
- "readlink", "--canonicalize-missing", "--no-newline", unsafePath)
- output, err := cmd.CombinedOutput()
- if err != nil {
- return "", err
- }
- expanded := string(output)
- return filepath.Join(root, expanded), nil
-}
-```
-
-[lwn-obeneath]: https://lwn.net/Articles/767547/
-[go#20126]: https://github.com/golang/go/issues/20126
-
-### License ###
-
-The license of this project is the same as Go, which is a BSD 3-clause license
-available in the `LICENSE` file.
diff --git a/vendor/github.com/cyphar/filepath-securejoin/VERSION b/vendor/github.com/cyphar/filepath-securejoin/VERSION
deleted file mode 100644
index 3a4036fb450..00000000000
--- a/vendor/github.com/cyphar/filepath-securejoin/VERSION
+++ /dev/null
@@ -1 +0,0 @@
-0.2.5
diff --git a/vendor/github.com/cyphar/filepath-securejoin/join.go b/vendor/github.com/cyphar/filepath-securejoin/join.go
deleted file mode 100644
index 5ac23b99831..00000000000
--- a/vendor/github.com/cyphar/filepath-securejoin/join.go
+++ /dev/null
@@ -1,124 +0,0 @@
-// Copyright (C) 2014-2015 Docker Inc & Go Authors. All rights reserved.
-// Copyright (C) 2017 SUSE LLC. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package securejoin is an implementation of the hopefully-soon-to-be-included
-// SecureJoin helper that is meant to be part of the "path/filepath" package.
-// The purpose of this project is to provide a PoC implementation to make the
-// SecureJoin proposal (https://github.com/golang/go/issues/20126) more
-// tangible.
-package securejoin
-
-import (
- "errors"
- "os"
- "path/filepath"
- "strings"
- "syscall"
-)
-
-const maxSymlinkLimit = 255
-
-// IsNotExist tells you if err is an error that implies that either the path
-// accessed does not exist (or path components don't exist). This is
-// effectively a more broad version of os.IsNotExist.
-func IsNotExist(err error) bool {
- // Check that it's not actually an ENOTDIR, which in some cases is a more
- // convoluted case of ENOENT (usually involving weird paths).
- return errors.Is(err, os.ErrNotExist) || errors.Is(err, syscall.ENOTDIR) || errors.Is(err, syscall.ENOENT)
-}
-
-// SecureJoinVFS joins the two given path components (similar to Join) except
-// that the returned path is guaranteed to be scoped inside the provided root
-// path (when evaluated). Any symbolic links in the path are evaluated with the
-// given root treated as the root of the filesystem, similar to a chroot. The
-// filesystem state is evaluated through the given VFS interface (if nil, the
-// standard os.* family of functions are used).
-//
-// Note that the guarantees provided by this function only apply if the path
-// components in the returned string are not modified (in other words are not
-// replaced with symlinks on the filesystem) after this function has returned.
-// Such a symlink race is necessarily out-of-scope of SecureJoin.
-//
-// Volume names in unsafePath are always discarded, regardless if they are
-// provided via direct input or when evaluating symlinks. Therefore:
-//
-// "C:\Temp" + "D:\path\to\file.txt" results in "C:\Temp\path\to\file.txt"
-func SecureJoinVFS(root, unsafePath string, vfs VFS) (string, error) {
- // Use the os.* VFS implementation if none was specified.
- if vfs == nil {
- vfs = osVFS{}
- }
-
- unsafePath = filepath.FromSlash(unsafePath)
- var (
- currentPath string
- remainingPath = unsafePath
- linksWalked int
- )
- for remainingPath != "" {
- if v := filepath.VolumeName(remainingPath); v != "" {
- remainingPath = remainingPath[len(v):]
- }
-
- // Get the next path component.
- var part string
- if i := strings.IndexRune(remainingPath, filepath.Separator); i == -1 {
- part, remainingPath = remainingPath, ""
- } else {
- part, remainingPath = remainingPath[:i], remainingPath[i+1:]
- }
-
- // Apply the component lexically to the path we are building.
- // currentPath does not contain any symlinks, and we are lexically
- // dealing with a single component, so it's okay to do a filepath.Clean
- // here.
- nextPath := filepath.Join(string(filepath.Separator), currentPath, part)
- if nextPath == string(filepath.Separator) {
- currentPath = ""
- continue
- }
- fullPath := root + string(filepath.Separator) + nextPath
-
- // Figure out whether the path is a symlink.
- fi, err := vfs.Lstat(fullPath)
- if err != nil && !IsNotExist(err) {
- return "", err
- }
- // Treat non-existent path components the same as non-symlinks (we
- // can't do any better here).
- if IsNotExist(err) || fi.Mode()&os.ModeSymlink == 0 {
- currentPath = nextPath
- continue
- }
-
- // It's a symlink, so get its contents and expand it by prepending it
- // to the yet-unparsed path.
- linksWalked++
- if linksWalked > maxSymlinkLimit {
- return "", &os.PathError{Op: "SecureJoin", Path: root + string(filepath.Separator) + unsafePath, Err: syscall.ELOOP}
- }
-
- dest, err := vfs.Readlink(fullPath)
- if err != nil {
- return "", err
- }
- remainingPath = dest + string(filepath.Separator) + remainingPath
- // Absolute symlinks reset any work we've already done.
- if filepath.IsAbs(dest) {
- currentPath = ""
- }
- }
-
- // There should be no lexical components like ".." left in the path here,
- // but for safety clean up the path before joining it to the root.
- finalPath := filepath.Join(string(filepath.Separator), currentPath)
- return filepath.Join(root, finalPath), nil
-}
-
-// SecureJoin is a wrapper around SecureJoinVFS that just uses the os.* library
-// of functions as the VFS. If in doubt, use this function over SecureJoinVFS.
-func SecureJoin(root, unsafePath string) (string, error) {
- return SecureJoinVFS(root, unsafePath, nil)
-}
diff --git a/vendor/github.com/cyphar/filepath-securejoin/vfs.go b/vendor/github.com/cyphar/filepath-securejoin/vfs.go
deleted file mode 100644
index a82a5eae11e..00000000000
--- a/vendor/github.com/cyphar/filepath-securejoin/vfs.go
+++ /dev/null
@@ -1,41 +0,0 @@
-// Copyright (C) 2017 SUSE LLC. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package securejoin
-
-import "os"
-
-// In future this should be moved into a separate package, because now there
-// are several projects (umoci and go-mtree) that are using this sort of
-// interface.
-
-// VFS is the minimal interface necessary to use SecureJoinVFS. A nil VFS is
-// equivalent to using the standard os.* family of functions. This is mainly
-// used for the purposes of mock testing, but also can be used to otherwise use
-// SecureJoin with VFS-like system.
-type VFS interface {
- // Lstat returns a FileInfo describing the named file. If the file is a
- // symbolic link, the returned FileInfo describes the symbolic link. Lstat
- // makes no attempt to follow the link. These semantics are identical to
- // os.Lstat.
- Lstat(name string) (os.FileInfo, error)
-
- // Readlink returns the destination of the named symbolic link. These
- // semantics are identical to os.Readlink.
- Readlink(name string) (string, error)
-}
-
-// osVFS is the "nil" VFS, in that it just passes everything through to the os
-// module.
-type osVFS struct{}
-
-// Lstat returns a FileInfo describing the named file. If the file is a
-// symbolic link, the returned FileInfo describes the symbolic link. Lstat
-// makes no attempt to follow the link. These semantics are identical to
-// os.Lstat.
-func (o osVFS) Lstat(name string) (os.FileInfo, error) { return os.Lstat(name) }
-
-// Readlink returns the destination of the named symbolic link. These
-// semantics are identical to os.Readlink.
-func (o osVFS) Readlink(name string) (string, error) { return os.Readlink(name) }
diff --git a/vendor/github.com/diskfs/go-diskfs/.golangci.yml b/vendor/github.com/diskfs/go-diskfs/.golangci.yml
index 2ec29ec289d..5434216b268 100644
--- a/vendor/github.com/diskfs/go-diskfs/.golangci.yml
+++ b/vendor/github.com/diskfs/go-diskfs/.golangci.yml
@@ -21,7 +21,6 @@ linters:
disable-all: true
enable:
- bodyclose
- - depguard
- dogsled
- dupl
- errcheck
@@ -49,9 +48,6 @@ linters:
- unconvert
- unparam
- whitespace
- # - wsl # лишние пустые строки и т.д., чистый стиль
- # - goconst # проверка на наличие переменных, которых следовало бы вынести в const
- # - gomnd # поиск всяких "магических" чисел, переменных
run:
issues-exit-code: 1
diff --git a/vendor/github.com/diskfs/go-diskfs/Makefile b/vendor/github.com/diskfs/go-diskfs/Makefile
index 7aacf67c928..24bbc2f446a 100644
--- a/vendor/github.com/diskfs/go-diskfs/Makefile
+++ b/vendor/github.com/diskfs/go-diskfs/Makefile
@@ -6,7 +6,7 @@ GOENV ?= GO111MODULE=on CGO_ENABLED=0
GO_FILES ?= $(shell $(GOENV) go list ./...)
GOBIN ?= $(shell go env GOPATH)/bin
LINTER ?= $(GOBIN)/golangci-lint
-LINTER_VERSION ?= v1.51.2
+LINTER_VERSION ?= v1.55.2
# BUILDARCH is the host architecture
# ARCH is the target architecture
diff --git a/vendor/github.com/diskfs/go-diskfs/README.md b/vendor/github.com/diskfs/go-diskfs/README.md
index aa290f7b7a4..e230077b823 100644
--- a/vendor/github.com/diskfs/go-diskfs/README.md
+++ b/vendor/github.com/diskfs/go-diskfs/README.md
@@ -85,49 +85,7 @@ Some filesystem types are intended to be created once, after which they are read
### Example
-There are examples in the [examples/](./examples/) directory. Here is one to get you started.
-
-The following example will create a fully bootable EFI disk image. It assumes you have a bootable EFI file (any modern Linux kernel compiled with `CONFIG_EFI_STUB=y` will work) available.
-
-```go
-import diskfs "github.com/diskfs/go-diskfs"
-
-espSize int := 100*1024*1024 // 100 MB
-diskSize int := espSize + 4*1024*1024 // 104 MB
-
-
-// create a disk image
-diskImg := "/tmp/disk.img"
-disk := diskfs.Create(diskImg, diskSize, diskfs.Raw, diskfs.SectorSizeDefault)
-// create a partition table
-blkSize int := 512
-partitionSectors int := espSize / blkSize
-partitionStart int := 2048
-partitionEnd int := partitionSectors - partitionStart + 1
-table := PartitionTable{
- type: partition.GPT,
- partitions:[
- Partition{Start: partitionStart, End: partitionEnd, Type: partition.EFISystemPartition, Name: "EFI System"}
- ]
-}
-// apply the partition table
-err = disk.Partition(table)
-
-
-/*
- * create an ESP partition with some contents
- */
-kernel, err := os.ReadFile("/some/kernel/file")
-
-fs, err := disk.CreateFilesystem(0, diskfs.TypeFat32)
-
-// make our directories
-err = fs.Mkdir("/EFI/BOOT")
-rw, err := fs.OpenFile("/EFI/BOOT/BOOTX64.EFI", os.O_CREATE|os.O_RDRWR)
-
-err = rw.Write(kernel)
-
-```
+There are examples in the [examples/](./examples/) directory. See for example how to [create a fully bootable EFI disk image](./examples/efi_create.go).
## Tests
There are two ways to run tests: unit and integration (somewhat loosely defined).
@@ -151,7 +109,6 @@ cat $PWD/foo.img | docker run -i --rm $INT_IMAGE mdir -i /file.img /abc
Future plans are to add the following:
* embed boot code in `mbr` e.g. `altmbr.bin` (no need for `gpt` since an ESP with `/EFI/BOOT/BOOT.EFI` will boot)
-* `ext4` filesystem
* `Joliet` extensions to `iso9660`
* `Rock Ridge` sparse file support - supports the flag, but not yet reading or writing
* `squashfs` sparse file support - currently treats sparse files as regular files
diff --git a/vendor/github.com/diskfs/go-diskfs/disk/disk.go b/vendor/github.com/diskfs/go-diskfs/disk/disk.go
index 08c7dfe11fa..796565b864b 100644
--- a/vendor/github.com/diskfs/go-diskfs/disk/disk.go
+++ b/vendor/github.com/diskfs/go-diskfs/disk/disk.go
@@ -13,6 +13,7 @@ import (
log "github.com/sirupsen/logrus"
"github.com/diskfs/go-diskfs/filesystem"
+ "github.com/diskfs/go-diskfs/filesystem/ext4"
"github.com/diskfs/go-diskfs/filesystem/fat32"
"github.com/diskfs/go-diskfs/filesystem/iso9660"
"github.com/diskfs/go-diskfs/filesystem/squashfs"
@@ -185,6 +186,8 @@ func (d *Disk) CreateFilesystem(spec FilesystemSpec) (filesystem.FileSystem, err
return fat32.Create(d.File, size, start, d.LogicalBlocksize, spec.VolumeLabel)
case filesystem.TypeISO9660:
return iso9660.Create(d.File, size, start, d.LogicalBlocksize, spec.WorkDir)
+ case filesystem.TypeExt4:
+ return ext4.Create(d.File, size, start, d.LogicalBlocksize, nil)
case filesystem.TypeSquashfs:
return nil, errors.New("squashfs is a read-only filesystem")
default:
@@ -244,5 +247,20 @@ func (d *Disk) GetFilesystem(part int) (filesystem.FileSystem, error) {
if err == nil {
return squashFS, nil
}
+ log.Debug("trying ext4")
+ ext4FS, err := ext4.Read(d.File, size, start, d.LogicalBlocksize)
+ if err == nil {
+ return ext4FS, nil
+ }
+ log.Debugf("ext4 failed: %v", err)
return nil, fmt.Errorf("unknown filesystem on partition %d", part)
}
+
+// Close the disk. Once successfully closed, it can no longer be used.
+func (d *Disk) Close() error {
+ if err := d.File.Close(); err != nil {
+ return err
+ }
+ *d = Disk{}
+ return nil
+}
diff --git a/vendor/github.com/diskfs/go-diskfs/disk/disk_wasip1.go b/vendor/github.com/diskfs/go-diskfs/disk/disk_wasip1.go
new file mode 100644
index 00000000000..6a290d1999a
--- /dev/null
+++ b/vendor/github.com/diskfs/go-diskfs/disk/disk_wasip1.go
@@ -0,0 +1,10 @@
+//go:build wasip1
+// +build wasip1
+
+package disk
+
+import "errors"
+
+func (d *Disk) ReReadPartitionTable() error {
+ return errors.New("not implemented")
+}
diff --git a/vendor/github.com/diskfs/go-diskfs/diskfs.go b/vendor/github.com/diskfs/go-diskfs/diskfs.go
index f2e39cf2c9e..f0952a09650 100644
--- a/vendor/github.com/diskfs/go-diskfs/diskfs.go
+++ b/vendor/github.com/diskfs/go-diskfs/diskfs.go
@@ -335,7 +335,7 @@ func Open(device string, opts ...OpenOpt) (*disk.Disk, error) {
// Create a Disk from a path to a device
// Should pass a path to a block device e.g. /dev/sda or a path to a file /tmp/foo.img
// The provided device must not exist at the time you call Create()
-func Create(device string, size int64, format Format, sectorSize SectorSize) (*disk.Disk, error) {
+func Create(device string, size int64, _ Format, sectorSize SectorSize) (*disk.Disk, error) {
if device == "" {
return nil, errors.New("must pass device name")
}
@@ -344,11 +344,11 @@ func Create(device string, size int64, format Format, sectorSize SectorSize) (*d
}
f, err := os.OpenFile(device, os.O_RDWR|os.O_EXCL|os.O_CREATE, 0o666)
if err != nil {
- return nil, fmt.Errorf("could not create device %s: %v", device, errors.Unwrap(err))
+ return nil, fmt.Errorf("could not create device %s: %w", device, err)
}
err = os.Truncate(device, size)
if err != nil {
- return nil, fmt.Errorf("could not expand device %s to size %d: %v", device, size, errors.Unwrap(err))
+ return nil, fmt.Errorf("could not expand device %s to size %d: %w", device, size, err)
}
// return our disk
return initDisk(f, ReadWriteExclusive, sectorSize)
diff --git a/vendor/github.com/diskfs/go-diskfs/diskfs_other.go b/vendor/github.com/diskfs/go-diskfs/diskfs_other.go
index 95f1f487f9f..16208aea9cc 100644
--- a/vendor/github.com/diskfs/go-diskfs/diskfs_other.go
+++ b/vendor/github.com/diskfs/go-diskfs/diskfs_other.go
@@ -8,11 +8,11 @@ import (
)
// getBlockDeviceSize get the size of an opened block device in Bytes.
-func getBlockDeviceSize(f *os.File) (int64, error) {
+func getBlockDeviceSize(_ *os.File) (int64, error) {
return 0, errors.New("block devices not supported on this platform")
}
// getSectorSizes get the logical and physical sector sizes for a block device
-func getSectorSizes(f *os.File) (logicalSectorSize, physicalSectorSize int64, err error) {
+func getSectorSizes(_ *os.File) (logicalSectorSize, physicalSectorSize int64, err error) {
return 0, 0, errors.New("block devices not supported on this platform")
}
diff --git a/vendor/github.com/diskfs/go-diskfs/filesystem/ext4/blockgroup.go b/vendor/github.com/diskfs/go-diskfs/filesystem/ext4/blockgroup.go
new file mode 100644
index 00000000000..bf3b426d9af
--- /dev/null
+++ b/vendor/github.com/diskfs/go-diskfs/filesystem/ext4/blockgroup.go
@@ -0,0 +1,55 @@
+package ext4
+
+import (
+ "fmt"
+
+ "github.com/diskfs/go-diskfs/util"
+)
+
+// blockGroup is a structure holding the data about a single block group
+//
+//nolint:unused // will be used in the future, not yet
+type blockGroup struct {
+ inodeBitmap *util.Bitmap
+ blockBitmap *util.Bitmap
+ blockSize int
+ number int
+ inodeTableSize int
+ firstDataBlock int
+}
+
+// blockGroupFromBytes create a blockGroup struct from bytes
+// it does not load the inode table or data blocks into memory, rather holding pointers to where they are
+//
+//nolint:unused // will be used in the future, not yet
+func blockGroupFromBytes(b []byte, blockSize, groupNumber int) (*blockGroup, error) {
+ expectedSize := 2 * blockSize
+ actualSize := len(b)
+ if actualSize != expectedSize {
+ return nil, fmt.Errorf("expected to be passed %d bytes for 2 blocks of size %d, instead received %d", expectedSize, blockSize, actualSize)
+ }
+ inodeBitmap := util.BitmapFromBytes(b[0:blockSize])
+ blockBitmap := util.BitmapFromBytes(b[blockSize : 2*blockSize])
+
+ bg := blockGroup{
+ inodeBitmap: inodeBitmap,
+ blockBitmap: blockBitmap,
+ number: groupNumber,
+ blockSize: blockSize,
+ }
+ return &bg, nil
+}
+
+// toBytes returns bitmaps ready to be written to disk
+//
+//nolint:unused // will be used in the future, not yet
+func (bg *blockGroup) toBytes() ([]byte, error) {
+ b := make([]byte, 2*bg.blockSize)
+ inodeBitmapBytes := bg.inodeBitmap.ToBytes()
+ blockBitmapBytes := bg.blockBitmap.ToBytes()
+
+ b = append(b, inodeBitmapBytes...)
+ b = append(b, blockBitmapBytes...)
+
+ return b, nil
+}
diff --git a/vendor/github.com/diskfs/go-diskfs/filesystem/ext4/checksum.go b/vendor/github.com/diskfs/go-diskfs/filesystem/ext4/checksum.go
new file mode 100644
index 00000000000..d7ffea43749
--- /dev/null
+++ b/vendor/github.com/diskfs/go-diskfs/filesystem/ext4/checksum.go
@@ -0,0 +1,50 @@
+package ext4
+
+import (
+ "encoding/binary"
+
+ "github.com/diskfs/go-diskfs/filesystem/ext4/crc"
+)
+
+// checksumAppender is a function that takes a byte slice and returns a byte slice with a checksum appended
+type checksumAppender func([]byte) []byte
+type checksummer func([]byte) uint32
+
+// directoryChecksummer returns a function that implements checksumAppender for a directory entries block
+// original calculations can be seen for e2fsprogs https://git.kernel.org/pub/scm/fs/ext2/e2fsprogs.git/tree/lib/ext2fs/csum.c#n301
+// and in the linux tree https://github.com/torvalds/linux/blob/master/fs/ext4/namei.c#L376-L384
+func directoryChecksummer(seed, inodeNumber, inodeGeneration uint32) checksummer {
+ numBytes := make([]byte, 4)
+ binary.LittleEndian.PutUint32(numBytes, inodeNumber)
+ crcResult := crc.CRC32c(seed, numBytes)
+ genBytes := make([]byte, 4)
+ binary.LittleEndian.PutUint32(genBytes, inodeGeneration)
+ crcResult = crc.CRC32c(crcResult, genBytes)
+ return func(b []byte) uint32 {
+ checksum := crc.CRC32c(crcResult, b)
+ return checksum
+ }
+}
+
+// directoryChecksumAppender returns a function that implements checksumAppender for a directory entries block
+// original calculations can be seen for e2fsprogs https://git.kernel.org/pub/scm/fs/ext2/e2fsprogs.git/tree/lib/ext2fs/csum.c#n301
+// and in the linux tree https://github.com/torvalds/linux/blob/master/fs/ext4/namei.c#L376-L384
+//
+//nolint:unparam // inodeGeneration is always 0
+func directoryChecksumAppender(seed, inodeNumber, inodeGeneration uint32) checksumAppender {
+ fn := directoryChecksummer(seed, inodeNumber, inodeGeneration)
+ return func(b []byte) []byte {
+ checksum := fn(b)
+ checksumBytes := make([]byte, 12)
+ checksumBytes[4] = 12
+ checksumBytes[7] = 0xde
+ binary.LittleEndian.PutUint32(checksumBytes[8:12], checksum)
+ b = append(b, checksumBytes...)
+ return b
+ }
+}
+
+// nullDirectoryChecksummer does not change anything
+func nullDirectoryChecksummer(b []byte) []byte {
+ return b
+}
diff --git a/vendor/github.com/diskfs/go-diskfs/filesystem/ext4/consts.go b/vendor/github.com/diskfs/go-diskfs/filesystem/ext4/consts.go
new file mode 100644
index 00000000000..2295aa08264
--- /dev/null
+++ b/vendor/github.com/diskfs/go-diskfs/filesystem/ext4/consts.go
@@ -0,0 +1,5 @@
+package ext4
+
+const (
+ maxUint16 uint64 = 1<<16 - 1
+)
diff --git a/vendor/github.com/diskfs/go-diskfs/filesystem/ext4/crc/crc16.go b/vendor/github.com/diskfs/go-diskfs/filesystem/ext4/crc/crc16.go
new file mode 100644
index 00000000000..b8c37882fa9
--- /dev/null
+++ b/vendor/github.com/diskfs/go-diskfs/filesystem/ext4/crc/crc16.go
@@ -0,0 +1,44 @@
+package crc
+
+var crc16tab = [256]uint16{
+ 0x0000, 0x1021, 0x2042, 0x3063, 0x4084, 0x50a5, 0x60c6, 0x70e7,
+ 0x8108, 0x9129, 0xa14a, 0xb16b, 0xc18c, 0xd1ad, 0xe1ce, 0xf1ef,
+ 0x1231, 0x0210, 0x3273, 0x2252, 0x52b5, 0x4294, 0x72f7, 0x62d6,
+ 0x9339, 0x8318, 0xb37b, 0xa35a, 0xd3bd, 0xc39c, 0xf3ff, 0xe3de,
+ 0x2462, 0x3443, 0x0420, 0x1401, 0x64e6, 0x74c7, 0x44a4, 0x5485,
+ 0xa56a, 0xb54b, 0x8528, 0x9509, 0xe5ee, 0xf5cf, 0xc5ac, 0xd58d,
+ 0x3653, 0x2672, 0x1611, 0x0630, 0x76d7, 0x66f6, 0x5695, 0x46b4,
+ 0xb75b, 0xa77a, 0x9719, 0x8738, 0xf7df, 0xe7fe, 0xd79d, 0xc7bc,
+ 0x48c4, 0x58e5, 0x6886, 0x78a7, 0x0840, 0x1861, 0x2802, 0x3823,
+ 0xc9cc, 0xd9ed, 0xe98e, 0xf9af, 0x8948, 0x9969, 0xa90a, 0xb92b,
+ 0x5af5, 0x4ad4, 0x7ab7, 0x6a96, 0x1a71, 0x0a50, 0x3a33, 0x2a12,
+ 0xdbfd, 0xcbdc, 0xfbbf, 0xeb9e, 0x9b79, 0x8b58, 0xbb3b, 0xab1a,
+ 0x6ca6, 0x7c87, 0x4ce4, 0x5cc5, 0x2c22, 0x3c03, 0x0c60, 0x1c41,
+ 0xedae, 0xfd8f, 0xcdec, 0xddcd, 0xad2a, 0xbd0b, 0x8d68, 0x9d49,
+ 0x7e97, 0x6eb6, 0x5ed5, 0x4ef4, 0x3e13, 0x2e32, 0x1e51, 0x0e70,
+ 0xff9f, 0xefbe, 0xdfdd, 0xcffc, 0xbf1b, 0xaf3a, 0x9f59, 0x8f78,
+ 0x9188, 0x81a9, 0xb1ca, 0xa1eb, 0xd10c, 0xc12d, 0xf14e, 0xe16f,
+ 0x1080, 0x00a1, 0x30c2, 0x20e3, 0x5004, 0x4025, 0x7046, 0x6067,
+ 0x83b9, 0x9398, 0xa3fb, 0xb3da, 0xc33d, 0xd31c, 0xe37f, 0xf35e,
+ 0x02b1, 0x1290, 0x22f3, 0x32d2, 0x4235, 0x5214, 0x6277, 0x7256,
+ 0xb5ea, 0xa5cb, 0x95a8, 0x8589, 0xf56e, 0xe54f, 0xd52c, 0xc50d,
+ 0x34e2, 0x24c3, 0x14a0, 0x0481, 0x7466, 0x6447, 0x5424, 0x4405,
+ 0xa7db, 0xb7fa, 0x8799, 0x97b8, 0xe75f, 0xf77e, 0xc71d, 0xd73c,
+ 0x26d3, 0x36f2, 0x0691, 0x16b0, 0x6657, 0x7676, 0x4615, 0x5634,
+ 0xd94c, 0xc96d, 0xf90e, 0xe92f, 0x99c8, 0x89e9, 0xb98a, 0xa9ab,
+ 0x5844, 0x4865, 0x7806, 0x6827, 0x18c0, 0x08e1, 0x3882, 0x28a3,
+ 0xcb7d, 0xdb5c, 0xeb3f, 0xfb1e, 0x8bf9, 0x9bd8, 0xabbb, 0xbb9a,
+ 0x4a75, 0x5a54, 0x6a37, 0x7a16, 0x0af1, 0x1ad0, 0x2ab3, 0x3a92,
+ 0xfd2e, 0xed0f, 0xdd6c, 0xcd4d, 0xbdaa, 0xad8b, 0x9de8, 0x8dc9,
+ 0x7c26, 0x6c07, 0x5c64, 0x4c45, 0x3ca2, 0x2c83, 0x1ce0, 0x0cc1,
+ 0xef1f, 0xff3e, 0xcf5d, 0xdf7c, 0xaf9b, 0xbfba, 0x8fd9, 0x9ff8,
+ 0x6e17, 0x7e36, 0x4e55, 0x5e74, 0x2e93, 0x3eb2, 0x0ed1, 0x1ef0}
+
+func CRC16(crc uint16, bs []byte) uint16 {
+ l := len(bs)
+ for i := 0; i < l; i++ {
+ crc = ((crc << 8) & 0xff00) ^ crc16tab[((crc>>8)&0xff)^uint16(bs[i])]
+ }
+
+ return crc
+}
diff --git a/vendor/github.com/diskfs/go-diskfs/filesystem/ext4/crc/crc32.go b/vendor/github.com/diskfs/go-diskfs/filesystem/ext4/crc/crc32.go
new file mode 100644
index 00000000000..70b44b0e33e
--- /dev/null
+++ b/vendor/github.com/diskfs/go-diskfs/filesystem/ext4/crc/crc32.go
@@ -0,0 +1,74 @@
+package crc
+
+import (
+ "encoding/binary"
+ "hash/crc32"
+)
+
+// Define the CRC32C table using the Castagnoli polynomial
+var (
+ crc32cTable = crc32.MakeTable(crc32.Castagnoli)
+ crc32cTables = generateTables(crc32cTable)
+)
+
+func generateTables(poly *crc32.Table) [8][256]uint32 {
+ var tab [8][256]uint32
+ tab[0] = *poly
+
+ for i := 0; i < 256; i++ {
+ crc := tab[0][i]
+ for j := 1; j < 8; j++ {
+ crc = (crc >> 8) ^ tab[0][crc&0xff]
+ tab[j][i] = crc
+ }
+ }
+
+ return tab
+}
+
+func CRC32c(base uint32, b []byte) uint32 {
+ // Compute the CRC32C checksum
+ // for reasons unknown, the checksum from go package hash/crc32, using crc32.Update(), is different from the one calculated by the kernel
+ // so we use this
+ return crc32Body(base, b, &crc32cTables)
+}
+
+// doCRC processes a single byte
+func doCRC(crc uint32, x byte, tab *[256]uint32) uint32 {
+ return tab[(crc^uint32(x))&0xff] ^ (crc >> 8)
+}
+
+// doCRC4 processes 4 bytes
+func doCRC4(q uint32, tab *[8][256]uint32) uint32 {
+ return tab[3][q&0xff] ^ tab[2][(q>>8)&0xff] ^ tab[1][(q>>16)&0xff] ^ tab[0][(q>>24)&0xff]
+}
+
+// doCRC8 processes 8 bytes
+func doCRC8(q uint32, tab *[8][256]uint32) uint32 {
+ return tab[7][q&0xff] ^ tab[6][(q>>8)&0xff] ^ tab[5][(q>>16)&0xff] ^ tab[4][(q>>24)&0xff]
+}
+
+func crc32Body(crc uint32, buf []byte, tab *[8][256]uint32) uint32 {
+ // Align it
+ for len(buf) > 0 && (uintptr(len(buf))&3) != 0 {
+ crc = doCRC(crc, buf[0], &tab[0])
+ buf = buf[1:]
+ }
+
+ // Process in chunks of 8 bytes
+ remLen := len(buf) % 8
+ for len(buf) >= 8 {
+ q := crc ^ binary.LittleEndian.Uint32(buf[:4])
+ crc = doCRC8(q, tab)
+ q = binary.LittleEndian.Uint32(buf[4:8])
+ crc ^= doCRC4(q, tab)
+ buf = buf[8:]
+ }
+
+ // Process remaining bytes
+ for _, b := range buf[:remLen] {
+ crc = doCRC(crc, b, &tab[0])
+ }
+
+ return crc
+}
diff --git a/vendor/github.com/diskfs/go-diskfs/filesystem/ext4/directory.go b/vendor/github.com/diskfs/go-diskfs/filesystem/ext4/directory.go
new file mode 100644
index 00000000000..24535f0297d
--- /dev/null
+++ b/vendor/github.com/diskfs/go-diskfs/filesystem/ext4/directory.go
@@ -0,0 +1,211 @@
+package ext4
+
+import (
+ "bytes"
+ "encoding/binary"
+ "fmt"
+)
+
+const (
+ directoryHashTreeRootMinSize = 0x28
+ directoryHashTreeNodeMinSize = 0x12
+)
+
+// Directory represents a single directory in an ext4 filesystem
+type Directory struct {
+ directoryEntry
+ root bool
+ entries []*directoryEntry
+}
+
+// toBytes convert our entries to raw bytes. Provides checksum as well. Final returned byte slice will be a multiple of bytesPerBlock.
+func (d *Directory) toBytes(bytesPerBlock uint32, checksumFunc checksumAppender) []byte {
+ b := make([]byte, 0)
+ var (
+ previousLength int
+ previousEntry *directoryEntry
+ lastEntryCount int
+ block []byte
+ )
+ if len(d.entries) == 0 {
+ return b
+ }
+ lastEntryCount = len(d.entries) - 1
+ for i, de := range d.entries {
+ b2 := de.toBytes(0)
+ switch {
+ case len(block)+len(b2) > int(bytesPerBlock)-minDirEntryLength:
+ // if adding this one will go past the end of the block, pad out the previous
+ block = block[:len(block)-previousLength]
+ previousB := previousEntry.toBytes(uint16(int(bytesPerBlock) - len(block) - minDirEntryLength))
+ block = append(block, previousB...)
+ // add the checksum
+ block = checksumFunc(block)
+ b = append(b, block...)
+ // start a new block
+ block = make([]byte, 0)
+ case i == lastEntryCount:
+ // if this is the last one, pad it out
+ b2 = de.toBytes(uint16(int(bytesPerBlock) - len(block) - minDirEntryLength))
+ block = append(block, b2...)
+ // add the checksum
+ block = checksumFunc(block)
+ b = append(b, block...)
+ // start a new block
+ block = make([]byte, 0)
+ default:
+ block = append(block, b2...)
+ }
+ previousLength = len(b2)
+ previousEntry = de
+ }
+ remainder := len(b) % int(bytesPerBlock)
+ if remainder > 0 {
+ extra := int(bytesPerBlock) - remainder
+ zeroes := make([]byte, extra)
+ b = append(b, zeroes...)
+ }
+ return b
+}
+
+type directoryHashEntry struct {
+ hash uint32
+ block uint32
+}
+
+type dxNode interface {
+ entries() []directoryHashEntry
+}
+
+type directoryHashNode struct {
+ childEntries []directoryHashEntry
+}
+
+func (d *directoryHashNode) entries() []directoryHashEntry {
+ return d.childEntries
+}
+
+type directoryHashRoot struct {
+ inodeDir uint32
+ inodeParent uint32
+ hashVersion hashVersion
+ depth uint8
+ hashAlgorithm hashAlgorithm
+ childEntries []directoryHashEntry
+ dotEntry *directoryEntry
+ dotDotEntry *directoryEntry
+}
+
+func (d *directoryHashRoot) entries() []directoryHashEntry {
+ return d.childEntries
+}
+
+// parseDirectoryTreeRoot parses the directory hash tree root from the given byte slice. Reads only the root node.
+func parseDirectoryTreeRoot(b []byte, largeDir bool) (node *directoryHashRoot, err error) {
+ // min size
+ if len(b) < directoryHashTreeRootMinSize {
+ return nil, fmt.Errorf("directory hash tree root is too small")
+ }
+
+ // dot parameters
+ dotInode := binary.LittleEndian.Uint32(b[0x0:0x4])
+ dotSize := binary.LittleEndian.Uint16(b[0x4:0x6])
+ if dotSize != 12 {
+ return nil, fmt.Errorf("directory hash tree root dot size is %d and not 12", dotSize)
+ }
+ dotNameSize := b[0x6]
+ if dotNameSize != 1 {
+ return nil, fmt.Errorf("directory hash tree root dot name length is %d and not 1", dotNameSize)
+ }
+ dotFileType := directoryFileType(b[0x7])
+ if dotFileType != dirFileTypeDirectory {
+ return nil, fmt.Errorf("directory hash tree root dot file type is %d and not %v", dotFileType, dirFileTypeDirectory)
+ }
+ dotName := b[0x8:0xc]
+ if !bytes.Equal(dotName, []byte{'.', 0, 0, 0}) {
+ return nil, fmt.Errorf("directory hash tree root dot name is %s and not '.'", dotName)
+ }
+
+ // dotdot parameters
+ dotdotInode := binary.LittleEndian.Uint32(b[0xc:0x10])
+ dotdotNameSize := b[0x12]
+ if dotdotNameSize != 2 {
+ return nil, fmt.Errorf("directory hash tree root dotdot name length is %d and not 2", dotdotNameSize)
+ }
+ dotdotFileType := directoryFileType(b[0x13])
+ if dotdotFileType != dirFileTypeDirectory {
+ return nil, fmt.Errorf("directory hash tree root dotdot file type is %d and not %v", dotdotFileType, dirFileTypeDirectory)
+ }
+ dotdotName := b[0x14:0x18]
+ if !bytes.Equal(dotdotName, []byte{'.', '.', 0, 0}) {
+ return nil, fmt.Errorf("directory hash tree root dotdot name is %s and not '..'", dotdotName)
+ }
+
+ treeInformation := b[0x1d]
+ if treeInformation != 8 {
+ return nil, fmt.Errorf("directory hash tree root tree information is %d and not 8", treeInformation)
+ }
+ treeDepth := b[0x1e]
+ // there are maximums for this
+ maxTreeDepth := uint8(2)
+ if largeDir {
+ maxTreeDepth = 3
+ }
+ if treeDepth > maxTreeDepth {
+ return nil, fmt.Errorf("directory hash tree root tree depth is %d and not between 0 and %d", treeDepth, maxTreeDepth)
+ }
+
+ dxEntriesCount := binary.LittleEndian.Uint16(b[0x22:0x24])
+
+ node = &directoryHashRoot{
+ inodeDir: binary.LittleEndian.Uint32(b[0x0:0x4]),
+ inodeParent: binary.LittleEndian.Uint32(b[0xC:0x10]),
+ hashAlgorithm: hashAlgorithm(b[0x1c]), // what hashing algorithm is used?
+ depth: treeDepth,
+ childEntries: make([]directoryHashEntry, 0, int(dxEntriesCount)),
+ dotEntry: &directoryEntry{
+ inode: dotInode,
+ fileType: dotFileType,
+ filename: ".",
+ },
+ dotDotEntry: &directoryEntry{
+ inode: dotdotInode,
+ fileType: dotdotFileType,
+ filename: "..",
+ },
+ }
+
+ // remove 1, because the count includes the one in the dx_root itself
+ node.childEntries = append(node.childEntries, directoryHashEntry{hash: 0, block: binary.LittleEndian.Uint32(b[0x24:0x28])})
+ for i := 0; i < int(dxEntriesCount)-1; i++ {
+ entryOffset := 0x28 + (i * 8)
+ hash := binary.LittleEndian.Uint32(b[entryOffset : entryOffset+4])
+ block := binary.LittleEndian.Uint32(b[entryOffset+4 : entryOffset+8])
+ node.childEntries = append(node.childEntries, directoryHashEntry{hash: hash, block: block})
+ }
+
+ return node, nil
+}
+
+// parseDirectoryTreeNode parses an internal directory hash tree node from the given byte slice. Reads only the node.
+func parseDirectoryTreeNode(b []byte) (node *directoryHashNode, err error) {
+ // min size
+ if len(b) < directoryHashTreeNodeMinSize {
+ return nil, fmt.Errorf("directory hash tree root is too small")
+ }
+
+ dxEntriesCount := binary.LittleEndian.Uint16(b[0xa:0xc])
+
+ node = &directoryHashNode{
+ childEntries: make([]directoryHashEntry, 0, int(dxEntriesCount)),
+ }
+ node.childEntries = append(node.childEntries, directoryHashEntry{hash: 0, block: binary.LittleEndian.Uint32(b[0xc:0x10])})
+ for i := 0; i < int(dxEntriesCount)-1; i++ {
+ entryOffset := 0x10 + (i * 8)
+ hash := binary.LittleEndian.Uint32(b[entryOffset : entryOffset+4])
+ block := binary.LittleEndian.Uint32(b[entryOffset+4 : entryOffset+8])
+ node.childEntries = append(node.childEntries, directoryHashEntry{hash: hash, block: block})
+ }
+
+ return node, nil
+}
diff --git a/vendor/github.com/diskfs/go-diskfs/filesystem/ext4/directoryentry.go b/vendor/github.com/diskfs/go-diskfs/filesystem/ext4/directoryentry.go
new file mode 100644
index 00000000000..295469348e2
--- /dev/null
+++ b/vendor/github.com/diskfs/go-diskfs/filesystem/ext4/directoryentry.go
@@ -0,0 +1,176 @@
+package ext4
+
+import (
+ "encoding/binary"
+ "fmt"
+)
+
+// directoryFileType uses different constants than the file type property in the inode
+type directoryFileType uint8
+
+const (
+ minDirEntryLength int = 12 // actually 9 for 1-byte file length, but must be multiple of 4 bytes
+ maxDirEntryLength int = 263
+
+ // directory file types
+ dirFileTypeUnknown directoryFileType = 0x0
+ dirFileTypeRegular directoryFileType = 0x1
+ dirFileTypeDirectory directoryFileType = 0x2
+ dirFileTypeCharacter directoryFileType = 0x3
+ dirFileTypeBlock directoryFileType = 0x4
+ dirFileTypeFifo directoryFileType = 0x5
+ dirFileTypeSocket directoryFileType = 0x6
+ dirFileTypeSymlink directoryFileType = 0x7
+)
+
+// directoryEntry is a single directory entry
+type directoryEntry struct {
+ inode uint32
+ filename string
+ fileType directoryFileType
+}
+
+func (de *directoryEntry) equal(other *directoryEntry) bool {
+ return de.inode == other.inode && de.filename == other.filename && de.fileType == other.fileType
+}
+
+func directoryEntryFromBytes(b []byte) (*directoryEntry, error) {
+ if len(b) < minDirEntryLength {
+ return nil, fmt.Errorf("directory entry of length %d is less than minimum %d", len(b), minDirEntryLength)
+ }
+ if len(b) > maxDirEntryLength {
+ b = b[:maxDirEntryLength]
+ }
+
+ //nolint:gocritic // keep this here for future reference
+ // length := binary.LittleEndian.Uint16(b[0x4:0x6])
+ nameLength := b[0x6]
+ name := b[0x8 : 0x8+nameLength]
+ de := directoryEntry{
+ inode: binary.LittleEndian.Uint32(b[0x0:0x4]),
+ fileType: directoryFileType(b[0x7]),
+ filename: string(name),
+ }
+ return &de, nil
+}
+
+func directoryEntriesChecksumFromBytes(b []byte) (checksum uint32, err error) {
+ if len(b) != minDirEntryLength {
+ return checksum, fmt.Errorf("directory entry checksum of length %d is not required %d", len(b), minDirEntryLength)
+ }
+ inode := binary.LittleEndian.Uint32(b[0x0:0x4])
+ if inode != 0 {
+ return checksum, fmt.Errorf("directory entry checksum inode is not 0")
+ }
+ length := binary.LittleEndian.Uint16(b[0x4:0x6])
+ if int(length) != minDirEntryLength {
+ return checksum, fmt.Errorf("directory entry checksum length is not %d", minDirEntryLength)
+ }
+ nameLength := b[0x6]
+ if nameLength != 0 {
+ return checksum, fmt.Errorf("directory entry checksum name length is not 0")
+ }
+ fileType := b[0x7]
+ if fileType != 0xde {
+ return checksum, fmt.Errorf("directory entry checksum file type is not set to reserved 0xde")
+ }
+ return binary.LittleEndian.Uint32(b[0x8:0xc]), nil
+}
+
+// toBytes convert a directoryEntry to bytes. If isLast, then the size recorded is the number of bytes
+// from beginning of directory entry to end of block, minus the amount left for the checksum.
+func (de *directoryEntry) toBytes(withSize uint16) []byte {
+ // it must be the header length + filename length rounded up to nearest multiple of 4
+ nameLength := uint8(len(de.filename))
+ entryLength := uint16(nameLength) + 8
+ if leftover := entryLength % 4; leftover > 0 {
+ entryLength += (4 - leftover)
+ }
+
+ if withSize > 0 {
+ entryLength = withSize
+ }
+ b := make([]byte, entryLength)
+ binary.LittleEndian.PutUint32(b[0x0:0x4], de.inode)
+ binary.LittleEndian.PutUint16(b[0x4:0x6], entryLength)
+ b[0x6] = nameLength
+ b[0x7] = byte(de.fileType)
+ copy(b[0x8:], de.filename)
+
+ return b
+}
+
+func parseDirEntriesLinear(b []byte, withChecksums bool, blocksize, inodeNumber, inodeGeneration, checksumSeed uint32) ([]*directoryEntry, error) {
+ // checksum if needed
+ if withChecksums {
+ var (
+ newb []byte
+ checksumEntryOffset = int(blocksize) - minDirEntryLength
+ checksumOffset = int(blocksize) - 4
+ )
+ checksummer := directoryChecksummer(checksumSeed, inodeNumber, inodeGeneration)
+ for i := 0; i < len(b); i += int(blocksize) {
+ block := b[i : i+int(blocksize)]
+ inBlockChecksum := block[checksumOffset:]
+ block = block[:checksumEntryOffset]
+ // save everything except the checksum
+ newb = append(newb, block...)
+ // checksum the entire block
+ checksumValue := binary.LittleEndian.Uint32(inBlockChecksum)
+ // checksum the block
+ actualChecksum := checksummer(block)
+ if actualChecksum != checksumValue {
+ return nil, fmt.Errorf("directory block checksum mismatch: expected %x, got %x", checksumValue, actualChecksum)
+ }
+ }
+ b = newb
+ }
+
+ // convert into directory entries
+ entries := make([]*directoryEntry, 0, 4)
+ count := 0
+ for i := 0; i < len(b); count++ {
+ // read the length of the entry
+ length := binary.LittleEndian.Uint16(b[i+0x4 : i+0x6])
+ de, err := directoryEntryFromBytes(b[i : i+int(length)])
+ if err != nil {
+ return nil, fmt.Errorf("failed to parse directory entry %d: %v", count, err)
+ }
+ entries = append(entries, de)
+ i += int(length)
+ }
+ return entries, nil
+}
+
+// parseDirEntriesHashed parse hashed data blocks to get directory entries.
+// If hashedName is 0, returns all directory entries; otherwise, returns a slice with a single entry with the given name.
+func parseDirEntriesHashed(b []byte, depth uint8, node dxNode, blocksize uint32, withChecksums bool, inodeNumber, inodeGeneration, checksumSeed uint32) (dirEntries []*directoryEntry, err error) {
+ for _, entry := range node.entries() {
+ var (
+ addDirEntries []*directoryEntry
+ start = entry.block * blocksize
+ end = start + blocksize
+ )
+
+ nextBlock := b[start:end]
+ if depth == 0 {
+ addDirEntries, err = parseDirEntriesLinear(nextBlock, withChecksums, blocksize, inodeNumber, inodeGeneration, checksumSeed)
+ if err != nil {
+ return nil, fmt.Errorf("error parsing linear directory entries: %w", err)
+ }
+ } else {
+ // recursively parse the next level of the tree
+ // read the next level down
+ node, err := parseDirectoryTreeNode(nextBlock)
+ if err != nil {
+ return nil, fmt.Errorf("error parsing directory tree node: %w", err)
+ }
+ addDirEntries, err = parseDirEntriesHashed(b, depth-1, node, blocksize, withChecksums, inodeNumber, inodeGeneration, checksumSeed)
+ if err != nil {
+ return nil, fmt.Errorf("error parsing hashed directory entries: %w", err)
+ }
+ }
+ dirEntries = append(dirEntries, addDirEntries...)
+ }
+ return dirEntries, nil
+}
diff --git a/vendor/github.com/diskfs/go-diskfs/filesystem/ext4/dirhash.go b/vendor/github.com/diskfs/go-diskfs/filesystem/ext4/dirhash.go
new file mode 100644
index 00000000000..8717c34136e
--- /dev/null
+++ b/vendor/github.com/diskfs/go-diskfs/filesystem/ext4/dirhash.go
@@ -0,0 +1,157 @@
+package ext4
+
+import (
+ "github.com/diskfs/go-diskfs/filesystem/ext4/md4"
+)
+
+const (
+ teaDelta uint32 = 0x9E3779B9
+ k1 uint32 = 0
+ k2 uint32 = 0o13240474631
+ k3 uint32 = 0o15666365641
+ ext4HtreeEOF32 uint32 = ((1 << (32 - 1)) - 1)
+ ext4HtreeEOF64 uint64 = ((1 << (64 - 1)) - 1)
+)
+
+type hashVersion uint8
+
+const (
+ HashVersionLegacy = 0
+ HashVersionHalfMD4 = 1
+ HashVersionTEA = 2
+ HashVersionLegacyUnsigned = 3
+ HashVersionHalfMD4Unsigned = 4
+ HashVersionTEAUnsigned = 5
+ HashVersionSIP = 6
+)
+
+func TEATransform(buf [4]uint32, in []uint32) [4]uint32 {
+ var sum uint32
+ var b0, b1 = buf[0], buf[1]
+ var a, b, c, d = in[0], in[1], in[2], in[3]
+ var n = 16
+
+ for ; n > 0; n-- {
+ sum += teaDelta
+ b0 += ((b1 << 4) + a) ^ (b1 + sum) ^ ((b1 >> 5) + b)
+ b1 += ((b0 << 4) + c) ^ (b0 + sum) ^ ((b0 >> 5) + d)
+ }
+
+ buf[0] += b0
+ buf[1] += b1
+ return buf
+}
+
+// the old legacy hash
+//
+//nolint:unparam,revive // we do not used signed, but we probably should, so leaving until we are sure
+func dxHackHash(name string, signed bool) uint32 {
+ var hash uint32
+ var hash0, hash1 uint32 = 0x12a3fe2d, 0x37abe8f9
+ b := []byte(name)
+
+ for i := len(b); i > 0; i-- {
+ // get the specific character
+ c := int(b[i-1])
+ // the value of the individual character depends on if it is signed or not
+ hash = hash1 + (hash0 ^ uint32(c*7152373))
+
+ if hash&0x80000000 != 0 {
+ hash -= 0x7fffffff
+ }
+ hash1 = hash0
+ hash0 = hash
+ }
+ return hash0 << 1
+}
+
+//nolint:unparam,revive // we do not used signed, but we probably should, so leaving until we are sure
+func str2hashbuf(msg string, num int, signed bool) []uint32 {
+ var buf [8]uint32
+ var pad, val uint32
+ b := []byte(msg)
+ size := len(b)
+
+ pad = uint32(size) | (uint32(size) << 8)
+ pad |= pad << 16
+
+ val = pad
+ if size > num*4 {
+ size = num * 4
+ }
+ var j int
+ for i := 0; i < size; i++ {
+ c := int(b[i])
+ val = uint32(c) + (val << 8)
+ if (i % 4) == 3 {
+ buf[j] = val
+ val = pad
+ num--
+ j++
+ }
+ }
+ num--
+ if num >= 0 {
+ buf[j] = val
+ j++
+ }
+ for num--; num >= 0; num-- {
+ buf[j] = pad
+ j++
+ }
+ return buf[:]
+}
+
+func ext4fsDirhash(name string, version hashVersion, seed []uint32) (hash, minorHash uint32) {
+ /* Initialize the default seed for the hash checksum functions */
+ var buf = [4]uint32{0x67452301, 0xefcdab89, 0x98badcfe, 0x10325476}
+
+ // Check to see if the seed is all zero, and if so, use the default
+ for i, val := range seed {
+ if val != 0 {
+ buf[i] = val
+ }
+ }
+
+ switch version {
+ case HashVersionLegacyUnsigned:
+ hash = dxHackHash(name, false)
+ case HashVersionLegacy:
+ hash = dxHackHash(name, true)
+ case HashVersionHalfMD4Unsigned:
+ for i := 0; i < len(name); i += 32 {
+ in := str2hashbuf(name[i:], 8, false)
+ buf[1] = md4.HalfMD4Transform(buf, in)
+ }
+ minorHash = buf[2]
+ hash = buf[1]
+ case HashVersionHalfMD4:
+ for i := 0; i < len(name); i += 32 {
+ in := str2hashbuf(name[i:], 8, true)
+ buf[1] = md4.HalfMD4Transform(buf, in)
+ }
+ minorHash = buf[2]
+ hash = buf[1]
+ case HashVersionTEAUnsigned:
+ for i := 0; i < len(name); i += 16 {
+ in := str2hashbuf(name[i:], 4, false)
+ buf = TEATransform(buf, in)
+ }
+ hash = buf[0]
+ minorHash = buf[1]
+ case HashVersionTEA:
+ for i := 0; i < len(name); i += 16 {
+ in := str2hashbuf(name[i:], 4, true)
+ buf = TEATransform(buf, in)
+ }
+ hash = buf[0]
+ minorHash = buf[1]
+ default:
+ return 0, 0
+ }
+ hash &= ^uint32(1)
+ if hash == (ext4HtreeEOF32 << 1) {
+ hash = (ext4HtreeEOF32 - 1) << 1
+ }
+ return hash, minorHash
+}
diff --git a/vendor/github.com/diskfs/go-diskfs/filesystem/ext4/ext4.go b/vendor/github.com/diskfs/go-diskfs/filesystem/ext4/ext4.go
new file mode 100644
index 00000000000..b322dd7356d
--- /dev/null
+++ b/vendor/github.com/diskfs/go-diskfs/filesystem/ext4/ext4.go
@@ -0,0 +1,1748 @@
+package ext4
+
+import (
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "io"
+ iofs "io/fs"
+ "math"
+ "os"
+ "path"
+ "sort"
+ "strings"
+ "time"
+
+ "github.com/diskfs/go-diskfs/filesystem"
+ "github.com/diskfs/go-diskfs/filesystem/ext4/crc"
+ "github.com/diskfs/go-diskfs/util"
+ "github.com/google/uuid"
+)
+
+// SectorSize indicates what the sector size in bytes is
+type SectorSize uint16
+
+// BlockSize indicates how many sectors are in a block
+type BlockSize uint8
+
+// BlockGroupSize indicates how many blocks are in a group, standardly 8*block_size_in_bytes
+
+const (
+ // SectorSize512 is a sector size of 512 bytes, used as the logical size for all ext4 filesystems
+ SectorSize512 SectorSize = 512
+ minBlocksPerGroup uint32 = 256
+ BootSectorSize SectorSize = 2 * SectorSize512
+ SuperblockSize SectorSize = 2 * SectorSize512
+ BlockGroupFactor int = 8
+ DefaultInodeRatio int64 = 8192
+ DefaultInodeSize int64 = 256
+ DefaultReservedBlocksPercent uint8 = 5
+ DefaultVolumeName = "diskfs_ext4"
+ minClusterSize int = 128
+ maxClusterSize int = 65529
+ bytesPerSlot int = 32
+ maxCharsLongFilename int = 13
+ maxBlocksPerExtent uint16 = 32768
+ million int = 1000000
+ billion int = 1000 * million
+ firstNonReservedInode uint32 = 11 // traditional
+
+ minBlockLogSize int = 10 /* 1024 */
+ maxBlockLogSize int = 16 /* 65536 */
+ minBlockSize int = (1 << minBlockLogSize)
+ maxBlockSize int = (1 << maxBlockLogSize)
+
+ max32Num uint64 = math.MaxUint32
+ max64Num uint64 = math.MaxUint64
+
+ maxFilesystemSize32Bit uint64 = 16*2 ^ 40
+ maxFilesystemSize64Bit uint64 = 1*2 ^ 60
+
+ checksumType uint8 = 1
+
+ // default for log groups per flex group
+ defaultLogGroupsPerFlex int = 3
+
+ // fixed inodes
+ rootInode uint32 = 2
+ userQuotaInode uint32 = 3
+ groupQuotaInode uint32 = 4
+ journalInode uint32 = 8
+ lostFoundInode = 11 // traditional
+)
+
+type Params struct {
+ UUID *uuid.UUID
+ SectorsPerBlock uint8
+ BlocksPerGroup uint32
+ InodeRatio int64
+ InodeCount uint32
+ SparseSuperVersion uint8
+ Checksum bool
+ ClusterSize int64
+ ReservedBlocksPercent uint8
+ VolumeName string
+ // JournalDevice external journal device, only checked if WithFeatureSeparateJournalDevice(true) is set
+ JournalDevice string
+ LogFlexBlockGroups int
+ Features []FeatureOpt
+ DefaultMountOpts []MountOpt
+}
+
+// FileSystem implememnts the FileSystem interface
+type FileSystem struct {
+ bootSector []byte
+ superblock *superblock
+ groupDescriptors *groupDescriptors
+ blockGroups int64
+ size int64
+ start int64
+ file util.File
+}
+
+// Equal compare if two filesystems are equal
+func (fs *FileSystem) Equal(a *FileSystem) bool {
+ localMatch := fs.file == a.file
+ sbMatch := fs.superblock.equal(a.superblock)
+ gdMatch := fs.groupDescriptors.equal(a.groupDescriptors)
+ return localMatch && sbMatch && gdMatch
+}
+
+// Create creates an ext4 filesystem in a given file or device
+//
+// requires the util.File where to create the filesystem, size is the size of the filesystem in bytes,
+// start is how far in bytes from the beginning of the util.File to create the filesystem,
+// and blocksize is is the logical blocksize to use for creating the filesystem
+//
+// note that you are *not* required to create the filesystem on the entire disk. You could have a disk of size
+// 20GB, and create a small filesystem of size 50MB that begins 2GB into the disk.
+// This is extremely useful for creating filesystems on disk partitions.
+//
+// Note, however, that it is much easier to do this using the higher-level APIs at github.com/diskfs/go-diskfs
+// which allow you to work directly with partitions, rather than having to calculate (and hopefully not make any errors)
+// where a partition starts and ends.
+//
+// If the provided blocksize is 0, it will use the default of 512 bytes. If it is any number other than 0
+// or 512, it will return an error.
+//
+//nolint:gocyclo // yes, this has high cyclomatic complexity, but we can accept it
+func Create(f util.File, size, start, sectorsize int64, p *Params) (*FileSystem, error) {
+ // be safe about the params pointer
+ if p == nil {
+ p = &Params{}
+ }
+
+ // sectorsize must be <=0 or exactly SectorSize512 or error
+ // because of this, we know we can scale it down to a uint32, since it only can be 512 bytes
+ if sectorsize != int64(SectorSize512) && sectorsize > 0 {
+ return nil, fmt.Errorf("sectorsize for ext4 must be either 512 bytes or 0, not %d", sectorsize)
+ }
+ var sectorsize32 = uint32(sectorsize)
+ // there almost are no limits on an ext4 fs - theoretically up to 1 YB
+ // but we do have to check the max and min size per the requested parameters
+ // if size < minSizeGivenParameters {
+ // return nil, fmt.Errorf("requested size is smaller than minimum allowed ext4 size %d for given parameters", minSizeGivenParameters*4)
+ // }
+ // if size > maxSizeGivenParameters {
+ // return nil, fmt.Errorf("requested size is bigger than maximum ext4 size %d for given parameters", maxSizeGivenParameters*4)
+ // }
+
+ // uuid
+ fsuuid := p.UUID
+ if fsuuid == nil {
+ fsuuid2, _ := uuid.NewRandom()
+ fsuuid = &fsuuid2
+ }
+
+ // blocksize
+ sectorsPerBlock := p.SectorsPerBlock
+ userProvidedBlocksize := false
+ switch {
+ case sectorsPerBlock > 128 || sectorsPerBlock < 2:
+ return nil, fmt.Errorf("invalid sectors per block %d, must be between %d and %d sectors", sectorsPerBlock, 2, 128)
+ case sectorsPerBlock < 1:
+ sectorsPerBlock = 2
+ default:
+ userProvidedBlocksize = true
+ }
+ blocksize := uint32(sectorsPerBlock) * sectorsize32
+
+ // how many whole blocks is that?
+ numblocks := size / int64(blocksize)
+
+ // recalculate if it was not user provided
+ if !userProvidedBlocksize {
+ sectorsPerBlockR, blocksizeR, numblocksR := recalculateBlocksize(numblocks, size)
+ _, blocksize, numblocks = uint8(sectorsPerBlockR), blocksizeR, numblocksR
+ }
+
+ // how many blocks in each block group (and therefore how many block groups)
+ // if not provided, by default it is 8*blocksize (in bytes)
+ blocksPerGroup := p.BlocksPerGroup
+ switch {
+ case blocksPerGroup <= 0:
+ blocksPerGroup = blocksize * 8
+ case blocksPerGroup < minBlocksPerGroup:
+ return nil, fmt.Errorf("invalid number of blocks per group %d, must be at least %d", blocksPerGroup, minBlocksPerGroup)
+ case blocksPerGroup > 8*blocksize:
+ return nil, fmt.Errorf("invalid number of blocks per group %d, must be no larger than 8*blocksize of %d", blocksPerGroup, blocksize)
+ case blocksPerGroup%8 != 0:
+ return nil, fmt.Errorf("invalid number of blocks per group %d, must be divisible by 8", blocksPerGroup)
+ }
+
+ // how many block groups do we have?
+ blockGroups := numblocks / int64(blocksPerGroup)
+
+ // track how many free blocks we have
+ freeBlocks := numblocks
+
+ clusterSize := p.ClusterSize
+
+ // use our inode ratio to determine how many inodes we should have
+ inodeRatio := p.InodeRatio
+ if inodeRatio <= 0 {
+ inodeRatio = DefaultInodeRatio
+ }
+ if inodeRatio < int64(blocksize) {
+ inodeRatio = int64(blocksize)
+ }
+ if inodeRatio < clusterSize {
+ inodeRatio = clusterSize
+ }
+
+ inodeCount := p.InodeCount
+ switch {
+ case inodeCount <= 0:
+ // calculate how many inodes are needed
+ inodeCount64 := (numblocks * int64(blocksize)) / inodeRatio
+ if uint64(inodeCount64) > max32Num {
+ return nil, fmt.Errorf("requested %d inodes, greater than max %d", inodeCount64, max32Num)
+ }
+ inodeCount = uint32(inodeCount64)
+ case uint64(inodeCount) > max32Num:
+ return nil, fmt.Errorf("requested %d inodes, greater than max %d", inodeCount, max32Num)
+ }
+
+ inodesPerGroup := int64(inodeCount) / blockGroups
+
+ // track how many free inodes we have
+ freeInodes := inodeCount
+
+ // which blocks have superblock and GDT?
+ var (
+ backupSuperblocks []int64
+ backupSuperblockGroupsSparse [2]uint32
+ )
+ // 0 - primary
+ // ?? - backups
+ switch p.SparseSuperVersion {
+ case 2:
+ // backups in first and last block group
+ backupSuperblockGroupsSparse = [2]uint32{0, uint32(blockGroups) - 1}
+ backupSuperblocks = []int64{0, 1, blockGroups - 1}
+ default:
+ backupSuperblockGroups := calculateBackupSuperblockGroups(blockGroups)
+ backupSuperblocks = []int64{0}
+ for _, bg := range backupSuperblockGroups {
+ backupSuperblocks = append(backupSuperblocks, bg*int64(blocksPerGroup))
+ }
+ }
+
+ freeBlocks -= int64(len(backupSuperblocks))
+
+ var firstDataBlock uint32
+ if blocksize == 1024 {
+ firstDataBlock = 1
+ }
+
+ /*
+ size calculations
+ we have the total size of the disk from `size uint64`
+ we have the sectorsize fixed at SectorSize512
+
+ what do we need to determine or calculate?
+ - block size
+ - number of blocks
+ - number of block groups
+ - block groups for superblock and gdt backups
+ - in each block group:
+ - number of blocks in gdt
+ - number of reserved blocks in gdt
+ - number of blocks in inode table
+ - number of data blocks
+
+ config info:
+
+ [defaults]
+ base_features = sparse_super,large_file,filetype,resize_inode,dir_index,ext_attr
+ default_mntopts = acl,user_xattr
+ enable_periodic_fsck = 0
+ blocksize = 4096
+ inode_size = 256
+ inode_ratio = 16384
+
+ [fs_types]
+ ext3 = {
+ features = has_journal
+ }
+ ext4 = {
+ features = has_journal,extent,huge_file,flex_bg,uninit_bg,64bit,dir_nlink,extra_isize
+ inode_size = 256
+ }
+ ext4dev = {
+ features = has_journal,extent,huge_file,flex_bg,uninit_bg,inline_data,64bit,dir_nlink,extra_isize
+ inode_size = 256
+ options = test_fs=1
+ }
+ small = {
+ blocksize = 1024
+ inode_size = 128
+ inode_ratio = 4096
+ }
+ floppy = {
+ blocksize = 1024
+ inode_size = 128
+ inode_ratio = 8192
+ }
+ big = {
+ inode_ratio = 32768
+ }
+ huge = {
+ inode_ratio = 65536
+ }
+ news = {
+ inode_ratio = 4096
+ }
+ largefile = {
+ inode_ratio = 1048576
+ blocksize = -1
+ }
+ largefile4 = {
+ inode_ratio = 4194304
+ blocksize = -1
+ }
+ hurd = {
+ blocksize = 4096
+ inode_size = 128
+ }
+ */
+
+ // allocate root directory, single inode
+ freeInodes--
+
+ // how many reserved blocks?
+ reservedBlocksPercent := p.ReservedBlocksPercent
+ if reservedBlocksPercent <= 0 {
+ reservedBlocksPercent = DefaultReservedBlocksPercent
+ }
+
+ // are checksums enabled?
+ gdtChecksumType := gdtChecksumNone
+ if p.Checksum {
+ gdtChecksumType = gdtChecksumMetadata
+ }
+
+ // we do not yet support bigalloc
+ var clustersPerGroup = blocksPerGroup
+
+ // inodesPerGroup: once we know how many inodes per group, and how many groups
+ // we will have the total inode count
+
+ volumeName := p.VolumeName
+ if volumeName == "" {
+ volumeName = DefaultVolumeName
+ }
+
+ fflags := defaultFeatureFlags
+ for _, flagopt := range p.Features {
+ flagopt(&fflags)
+ }
+
+ mflags := defaultMiscFlags
+
+ // generate hash seed
+ hashSeed, _ := uuid.NewRandom()
+ hashSeedBytes := hashSeed[:]
+ htreeSeed := make([]uint32, 0, 4)
+ htreeSeed = append(htreeSeed,
+ binary.LittleEndian.Uint32(hashSeedBytes[:4]),
+ binary.LittleEndian.Uint32(hashSeedBytes[4:8]),
+ binary.LittleEndian.Uint32(hashSeedBytes[8:12]),
+ binary.LittleEndian.Uint32(hashSeedBytes[12:16]),
+ )
+
+ // create a UUID for the journal
+ journalSuperblockUUID, _ := uuid.NewRandom()
+
+ // group descriptor size could be 32 or 64, depending on option
+ var gdSize uint16
+ if fflags.fs64Bit {
+ gdSize = groupDescriptorSize64Bit
+ }
+
+ var firstMetaBG uint32
+ if fflags.metaBlockGroups {
+ return nil, fmt.Errorf("meta block groups not yet supported")
+ }
+
+ // calculate the maximum number of block groups
+ // maxBlockGroups = (maxFSSize) / (blocksPerGroup * blocksize)
+ var (
+ maxBlockGroups uint64
+ )
+ if fflags.fs64Bit {
+ maxBlockGroups = maxFilesystemSize64Bit / (uint64(blocksPerGroup) * uint64(blocksize))
+ } else {
+ maxBlockGroups = maxFilesystemSize32Bit / (uint64(blocksPerGroup) * uint64(blocksize))
+ }
+ reservedGDTBlocks := maxBlockGroups * 32 / maxBlockGroups
+ if reservedGDTBlocks > math.MaxUint16 {
+ return nil, fmt.Errorf("too many reserved blocks calculated for group descriptor table")
+ }
+
+ var (
+ journalDeviceNumber uint32
+ err error
+ )
+ if fflags.separateJournalDevice && p.JournalDevice != "" {
+ journalDeviceNumber, err = journalDevice(p.JournalDevice)
+ if err != nil {
+ return nil, fmt.Errorf("unable to get journal device: %w", err)
+ }
+ }
+
+ // get default mount options
+ mountOptions := defaultMountOptionsFromOpts(p.DefaultMountOpts)
+
+ // initial KB written. This must be adjusted over time to include:
+ // - superblock itself (1KB bytes)
+ // - GDT
+ // - block bitmap (1KB per block group)
+ // - inode bitmap (1KB per block group)
+ // - inode tables (inodes per block group * bytes per inode)
+ // - root directory
+
+ // for now, we just make it 1024 = 1 KB
+ initialKB := 1024
+
+ // only set a project quota inode if the feature was enabled
+ var projectQuotaInode uint32
+ if fflags.projectQuotas {
+ projectQuotaInode = lostFoundInode + 1
+ freeInodes--
+ }
+
+ // how many log groups per flex group? Depends on if we have flex groups
+ logGroupsPerFlex := 0
+ if fflags.flexBlockGroups {
+ logGroupsPerFlex = defaultLogGroupsPerFlex
+ if p.LogFlexBlockGroups > 0 {
+ logGroupsPerFlex = p.LogFlexBlockGroups
+ }
+ }
+
+ // create the superblock - MUST ADD IN OPTIONS
+ now, epoch := time.Now(), time.Unix(0, 0)
+ sb := superblock{
+ inodeCount: inodeCount,
+ blockCount: uint64(numblocks),
+ reservedBlocks: uint64(reservedBlocksPercent) / 100 * uint64(numblocks),
+ freeBlocks: uint64(freeBlocks),
+ freeInodes: freeInodes,
+ firstDataBlock: firstDataBlock,
+ blockSize: blocksize,
+ clusterSize: uint64(clusterSize),
+ blocksPerGroup: blocksPerGroup,
+ clustersPerGroup: clustersPerGroup,
+ inodesPerGroup: uint32(inodesPerGroup),
+ mountTime: now,
+ writeTime: now,
+ mountCount: 0,
+ mountsToFsck: 0,
+ filesystemState: fsStateCleanlyUnmounted,
+ errorBehaviour: errorsContinue,
+ minorRevision: 0,
+ lastCheck: now,
+ checkInterval: 0,
+ creatorOS: osLinux,
+ revisionLevel: 1,
+ reservedBlocksDefaultUID: 0,
+ reservedBlocksDefaultGID: 0,
+ firstNonReservedInode: firstNonReservedInode,
+ inodeSize: uint16(DefaultInodeSize),
+ blockGroup: 0,
+ features: fflags,
+ uuid: fsuuid,
+ volumeLabel: volumeName,
+ lastMountedDirectory: "/",
+ algorithmUsageBitmap: 0, // not used in Linux e2fsprogs
+ preallocationBlocks: 0, // not used in Linux e2fsprogs
+ preallocationDirectoryBlocks: 0, // not used in Linux e2fsprogs
+ reservedGDTBlocks: uint16(reservedGDTBlocks),
+ journalSuperblockUUID: &journalSuperblockUUID,
+ journalInode: journalInode,
+ journalDeviceNumber: journalDeviceNumber,
+ orphanedInodesStart: 0,
+ hashTreeSeed: htreeSeed,
+ hashVersion: hashHalfMD4,
+ groupDescriptorSize: gdSize,
+ defaultMountOptions: *mountOptions,
+ firstMetablockGroup: firstMetaBG,
+ mkfsTime: now,
+ journalBackup: nil,
+ // 64-bit mode features
+ inodeMinBytes: minInodeExtraSize,
+ inodeReserveBytes: wantInodeExtraSize,
+ miscFlags: mflags,
+ raidStride: 0,
+ multiMountPreventionInterval: 0,
+ multiMountProtectionBlock: 0,
+ raidStripeWidth: 0,
+ checksumType: checksumType,
+ totalKBWritten: uint64(initialKB),
+ errorCount: 0,
+ errorFirstTime: epoch,
+ errorFirstInode: 0,
+ errorFirstBlock: 0,
+ errorFirstFunction: "",
+ errorFirstLine: 0,
+ errorLastTime: epoch,
+ errorLastInode: 0,
+ errorLastLine: 0,
+ errorLastBlock: 0,
+ errorLastFunction: "",
+ mountOptions: "", // no mount options until it is mounted
+ backupSuperblockBlockGroups: backupSuperblockGroupsSparse,
+ lostFoundInode: lostFoundInode,
+ overheadBlocks: 0,
+ checksumSeed: crc.CRC32c(0, fsuuid[:]), // according to docs, this should be crc32c(~0, $orig_fs_uuid)
+ snapshotInodeNumber: 0,
+ snapshotID: 0,
+ snapshotReservedBlocks: 0,
+ snapshotStartInode: 0,
+ userQuotaInode: userQuotaInode,
+ groupQuotaInode: groupQuotaInode,
+ projectQuotaInode: projectQuotaInode,
+ logGroupsPerFlex: uint64(logGroupsPerFlex),
+ }
+ gdt := groupDescriptors{}
+
+ b, err := sb.toBytes()
+ if err != nil {
+ return nil, fmt.Errorf("error converting Superblock to bytes: %v", err)
+ }
+
+ g := gdt.toBytes(gdtChecksumType, sb.checksumSeed)
+ // how big should the GDT be?
+ gdSize = groupDescriptorSize
+ if sb.features.fs64Bit {
+ gdSize = groupDescriptorSize64Bit
+ }
+ gdtSize := int64(gdSize) * numblocks
+ // write the superblock and GDT to the various locations on disk
+ for _, bg := range backupSuperblocks {
+ block := bg * int64(blocksPerGroup)
+ blockStart := block * int64(blocksize)
+ // allow that the first one requires an offset
+ incr := int64(0)
+ if block == 0 {
+ incr = int64(SectorSize512) * 2
+ }
+
+ // write the superblock
+ count, err := f.WriteAt(b, incr+blockStart+start)
+ if err != nil {
+ return nil, fmt.Errorf("error writing Superblock for block %d to disk: %v", block, err)
+ }
+ if count != int(SuperblockSize) {
+ return nil, fmt.Errorf("wrote %d bytes of Superblock for block %d to disk instead of expected %d", count, block, SuperblockSize)
+ }
+
+ // write the GDT
+ count, err = f.WriteAt(g, incr+blockStart+int64(SuperblockSize)+start)
+ if err != nil {
+ return nil, fmt.Errorf("error writing GDT for block %d to disk: %v", block, err)
+ }
+ if count != int(gdtSize) {
+ return nil, fmt.Errorf("wrote %d bytes of GDT for block %d to disk instead of expected %d", count, block, gdtSize)
+ }
+ }
+
+ // create root directory
+ // there is nothing in there
+ return &FileSystem{
+ bootSector: []byte{},
+ superblock: &sb,
+ groupDescriptors: &gdt,
+ blockGroups: blockGroups,
+ size: size,
+ start: start,
+ file: f,
+ }, nil
+}
+
+// Read reads a filesystem from a given disk.
+//
+// requires the util.File where to read the filesystem, size is the size of the filesystem in bytes,
+// start is how far in bytes from the beginning of the util.File the filesystem is expected to begin,
+// and blocksize is is the logical blocksize to use for creating the filesystem
+//
+// note that you are *not* required to read a filesystem on the entire disk. You could have a disk of size
+// 20GB, and a small filesystem of size 50MB that begins 2GB into the disk.
+// This is extremely useful for working with filesystems on disk partitions.
+//
+// Note, however, that it is much easier to do this using the higher-level APIs at github.com/diskfs/go-diskfs
+// which allow you to work directly with partitions, rather than having to calculate (and hopefully not make any errors)
+// where a partition starts and ends.
+//
+// If the provided blocksize is 0, it will use the default of 512 bytes. If it is any number other than 0
+// or 512, it will return an error.
+func Read(file util.File, size, start, sectorsize int64) (*FileSystem, error) {
+ // blocksize must be <=0 or exactly SectorSize512 or error
+ if sectorsize != int64(SectorSize512) && sectorsize > 0 {
+ return nil, fmt.Errorf("sectorsize for ext4 must be either 512 bytes or 0, not %d", sectorsize)
+ }
+ // we do not check for ext4 max size because it is theoreticallt 1YB, which is bigger than an int64! Even 1ZB is!
+ if size < Ext4MinSize {
+ return nil, fmt.Errorf("requested size is smaller than minimum allowed ext4 size %d", Ext4MinSize)
+ }
+
+ // load the information from the disk
+ // read boot sector code
+ bs := make([]byte, BootSectorSize)
+ n, err := file.ReadAt(bs, start)
+ if err != nil {
+ return nil, fmt.Errorf("could not read boot sector bytes from file: %v", err)
+ }
+ if uint16(n) < uint16(BootSectorSize) {
+ return nil, fmt.Errorf("only could read %d boot sector bytes from file", n)
+ }
+
+ // read the superblock
+ // the superblock is one minimal block, i.e. 2 sectors
+ superblockBytes := make([]byte, SuperblockSize)
+ n, err = file.ReadAt(superblockBytes, start+int64(BootSectorSize))
+ if err != nil {
+ return nil, fmt.Errorf("could not read superblock bytes from file: %v", err)
+ }
+ if uint16(n) < uint16(SuperblockSize) {
+ return nil, fmt.Errorf("only could read %d superblock bytes from file", n)
+ }
+
+ // convert the bytes into a superblock structure
+ sb, err := superblockFromBytes(superblockBytes)
+ if err != nil {
+ return nil, fmt.Errorf("could not interpret superblock data: %v", err)
+ }
+
+ // now read the GDT
+ // how big should the GDT be?
+ gdtSize := uint64(sb.groupDescriptorSize) * sb.blockGroupCount()
+
+ gdtBytes := make([]byte, gdtSize)
+ // where do we find the GDT?
+ // - if blocksize is 1024, then 1024 padding for BootSector is block 0, 1024 for superblock is block 1
+ // and then the GDT starts at block 2
+ // - if blocksize is larger than 1024, then 1024 padding for BootSector followed by 1024 for superblock
+ // is block 0, and then the GDT starts at block 1
+ gdtBlock := 1
+ if sb.blockSize == 1024 {
+ gdtBlock = 2
+ }
+ n, err = file.ReadAt(gdtBytes, start+int64(gdtBlock)*int64(sb.blockSize))
+ if err != nil {
+ return nil, fmt.Errorf("could not read Group Descriptor Table bytes from file: %v", err)
+ }
+ if uint64(n) < gdtSize {
+ return nil, fmt.Errorf("only could read %d Group Descriptor Table bytes from file instead of %d", n, gdtSize)
+ }
+ gdt, err := groupDescriptorsFromBytes(gdtBytes, sb.groupDescriptorSize, sb.checksumSeed, sb.gdtChecksumType())
+ if err != nil {
+ return nil, fmt.Errorf("could not interpret Group Descriptor Table data: %v", err)
+ }
+
+ return &FileSystem{
+ bootSector: bs,
+ superblock: sb,
+ groupDescriptors: gdt,
+ blockGroups: int64(sb.blockGroupCount()),
+ size: size,
+ start: start,
+ file: file,
+ }, nil
+}
+
+// Type returns the type code for the filesystem. Always returns filesystem.TypeExt4
+func (fs *FileSystem) Type() filesystem.Type {
+ return filesystem.TypeExt4
+}
+
+// Mkdir make a directory at the given path. It is equivalent to `mkdir -p`, i.e. idempotent, in that:
+//
+// * It will make the entire tree path if it does not exist
+// * It will not return an error if the path already exists
+func (fs *FileSystem) Mkdir(p string) error {
+ _, err := fs.readDirWithMkdir(p, true)
+ // we are not interesting in returning the entries
+ return err
+}
+
+// ReadDir return the contents of a given directory in a given filesystem.
+//
+// Returns a slice of os.FileInfo with all of the entries in the directory.
+//
+// Will return an error if the directory does not exist or is a regular file and not a directory
+func (fs *FileSystem) ReadDir(p string) ([]os.FileInfo, error) {
+ dir, err := fs.readDirWithMkdir(p, false)
+ if err != nil {
+ return nil, fmt.Errorf("error reading directory %s: %v", p, err)
+ }
+ // once we have made it here, looping is done. We have found the final entry
+ // we need to return all of the file info
+ count := len(dir.entries)
+ ret := make([]os.FileInfo, count)
+ for i, e := range dir.entries {
+ in, err := fs.readInode(e.inode)
+ if err != nil {
+ return nil, fmt.Errorf("could not read inode %d at position %d in directory: %v", e.inode, i, err)
+ }
+ ret[i] = &FileInfo{
+ modTime: in.modifyTime,
+ name: e.filename,
+ size: int64(in.size),
+ isDir: e.fileType == dirFileTypeDirectory,
+ }
+ }
+
+ return ret, nil
+}
+
+// OpenFile returns an io.ReadWriter from which you can read the contents of a file
+// or write contents to the file
+//
+// accepts normal os.OpenFile flags
+//
+// returns an error if the file does not exist
+func (fs *FileSystem) OpenFile(p string, flag int) (filesystem.File, error) {
+ filename := path.Base(p)
+ dir := path.Dir(p)
+ parentDir, entry, err := fs.getEntryAndParent(p)
+ if err != nil {
+ return nil, err
+ }
+ if entry != nil && entry.fileType == dirFileTypeDirectory {
+ return nil, fmt.Errorf("cannot open directory %s as file", p)
+ }
+
+ // see if the file exists
+ // if the file does not exist, and is not opened for os.O_CREATE, return an error
+ if entry == nil {
+ if flag&os.O_CREATE == 0 {
+ return nil, fmt.Errorf("target file %s does not exist and was not asked to create", p)
+ }
+ // else create it
+ entry, err = fs.mkFile(parentDir, filename)
+ if err != nil {
+ return nil, fmt.Errorf("failed to create file %s: %v", p, err)
+ }
+ }
+ // get the inode
+ inodeNumber := entry.inode
+ inode, err := fs.readInode(inodeNumber)
+ if err != nil {
+ return nil, fmt.Errorf("could not read inode number %d: %v", inodeNumber, err)
+ }
+
+ // if a symlink, read the target, rather than the inode itself, which does not point to anything
+ if inode.fileType == fileTypeSymbolicLink {
+ // is the symlink relative or absolute?
+ linkTarget := inode.linkTarget
+ if !path.IsAbs(linkTarget) {
+ // convert it into an absolute path
+ // and start the process again
+ linkTarget = path.Join(dir, linkTarget)
+ // we probably could make this more efficient by checking if the final linkTarget
+ // is in the same directory as we already are parsing, rather than walking the whole thing again
+ // leave that for the future.
+ linkTarget = path.Clean(linkTarget)
+ }
+ return fs.OpenFile(linkTarget, flag)
+ }
+ offset := int64(0)
+ if flag&os.O_APPEND == os.O_APPEND {
+ offset = int64(inode.size)
+ }
+ // when we open a file, we load the inode but also all of the extents
+ extents, err := inode.extents.blocks(fs)
+ if err != nil {
+ return nil, fmt.Errorf("could not read extent tree for inode %d: %v", inodeNumber, err)
+ }
+ return &File{
+ directoryEntry: entry,
+ inode: inode,
+ isReadWrite: flag&os.O_RDWR != 0,
+ isAppend: flag&os.O_APPEND != 0,
+ offset: offset,
+ filesystem: fs,
+ extents: extents,
+ }, nil
+}
+
+// Label read the volume label
+func (fs *FileSystem) Label() string {
+ if fs.superblock == nil {
+ return ""
+ }
+ return fs.superblock.volumeLabel
+}
+
+// Rm remove file or directory at path.
+// If path is directory, it only will remove if it is empty.
+// If path is a file, it will remove the file.
+// Will not remove any parents.
+// Error if the file does not exist or is not an empty directory
+func (fs *FileSystem) Rm(p string) error {
+ parentDir, entry, err := fs.getEntryAndParent(p)
+ if err != nil {
+ return err
+ }
+ if parentDir.root && entry == &parentDir.directoryEntry {
+ return fmt.Errorf("cannot remove root directory")
+ }
+ if entry == nil {
+ return fmt.Errorf("file does not exist: %s", p)
+ }
+ // if it is a directory, it must be empty
+ if entry.fileType == dirFileTypeDirectory {
+ // read the directory
+ entries, err := fs.readDirectory(entry.inode)
+ if err != nil {
+ return fmt.Errorf("could not read directory %s: %v", p, err)
+ }
+ if len(entries) > 2 {
+ return fmt.Errorf("directory not empty: %s", p)
+ }
+ }
+ // at this point, it is either a file or an empty directory, so remove it
+
+ // free up the blocks
+ // read the inode to find the blocks
+ removedInode, err := fs.readInode(entry.inode)
+ if err != nil {
+ return fmt.Errorf("could not read inode %d for %s: %v", entry.inode, p, err)
+ }
+ extents, err := removedInode.extents.blocks(fs)
+ if err != nil {
+ return fmt.Errorf("could not read extents for inode %d for %s: %v", entry.inode, p, err)
+ }
+ // clear the inode from the inode bitmap
+ inodeBG := blockGroupForInode(int(entry.inode), fs.superblock.inodesPerGroup)
+ inodeBitmap, err := fs.readInodeBitmap(inodeBG)
+ if err != nil {
+ return fmt.Errorf("could not read inode bitmap: %v", err)
+ }
+ // clear up the blocks from the block bitmap. We are not clearing the block content, just the bitmap.
+ // keep a cache of bitmaps, so we do not have to read them again and again
+ blockBitmaps := make(map[int]*util.Bitmap)
+ for _, e := range extents {
+ for i := e.startingBlock; i < e.startingBlock+uint64(e.count); i++ {
+ // determine what block group this block is in, and read the bitmap for that blockgroup
+ bg := blockGroupForBlock(int(i), fs.superblock.blocksPerGroup)
+ dataBlockBitmap, ok := blockBitmaps[bg]
+ if !ok {
+ dataBlockBitmap, err = fs.readBlockBitmap(bg)
+ if err != nil {
+ return fmt.Errorf("could not read block bitmap: %v", err)
+ }
+ blockBitmaps[bg] = dataBlockBitmap
+ }
+ // the extent lists the absolute block number, but the bitmap is relative to the block group
+ blockInBG := int(i) - int(fs.superblock.blocksPerGroup)*bg
+ if err := dataBlockBitmap.Clear(blockInBG); err != nil {
+ return fmt.Errorf("could not clear block bitmap for block %d: %v", i, err)
+ }
+ }
+ }
+ for bg, dataBlockBitmap := range blockBitmaps {
+ if err := fs.writeBlockBitmap(dataBlockBitmap, bg); err != nil {
+ return fmt.Errorf("could not write block bitmap back to disk: %v", err)
+ }
+ }
+
+ // remove the directory entry from the parent
+ newEntries := make([]*directoryEntry, 0, len(parentDir.entries)-1)
+ for _, e := range parentDir.entries {
+ if e.inode == entry.inode {
+ continue
+ }
+ newEntries = append(newEntries, e)
+ }
+ parentDir.entries = newEntries
+ // write the parent directory back
+ dirBytes := parentDir.toBytes(fs.superblock.blockSize, directoryChecksumAppender(fs.superblock.checksumSeed, parentDir.inode, 0))
+ parentInode, err := fs.readInode(parentDir.inode)
+ if err != nil {
+ return fmt.Errorf("could not read inode %d for %s: %v", entry.inode, path.Base(p), err)
+ }
+ extents, err = parentInode.extents.blocks(fs)
+ if err != nil {
+ return fmt.Errorf("could not read extents for inode %d for %s: %v", entry.inode, path.Base(p), err)
+ }
+ for _, e := range extents {
+ for i := 0; i < int(e.count); i++ {
+ b := dirBytes[i:fs.superblock.blockSize]
+ if _, err := fs.file.WriteAt(b, (int64(i)+int64(e.startingBlock))*int64(fs.superblock.blockSize)); err != nil {
+ return fmt.Errorf("could not write inode bitmap back to disk: %v", err)
+ }
+ }
+ }
+
+ // remove the inode from the bitmap and write the inode bitmap back
+ // inode is absolute, but bitmap is relative to block group
+ inodeInBG := int(entry.inode) - int(fs.superblock.inodesPerGroup)*inodeBG
+ if err := inodeBitmap.Clear(inodeInBG); err != nil {
+ return fmt.Errorf("could not clear inode bitmap for inode %d: %v", entry.inode, err)
+ }
+
+ // write the inode bitmap back
+ if err := fs.writeInodeBitmap(inodeBitmap, inodeBG); err != nil {
+ return fmt.Errorf("could not write inode bitmap back to disk: %v", err)
+ }
+ // update the group descriptor
+ gd := fs.groupDescriptors.descriptors[inodeBG]
+
+ // update the group descriptor inodes and blocks
+ gd.freeInodes++
+ gd.freeBlocks += uint32(removedInode.blocks)
+ // write the group descriptor back
+ gdBytes := gd.toBytes(fs.superblock.gdtChecksumType(), fs.superblock.uuid.ID())
+ gdtBlock := 1
+ if fs.superblock.blockSize == 1024 {
+ gdtBlock = 2
+ }
+ if _, err := fs.file.WriteAt(gdBytes, fs.start+int64(gdtBlock)*int64(fs.superblock.blockSize)+int64(gd.number)*int64(fs.superblock.groupDescriptorSize)); err != nil {
+ return fmt.Errorf("could not write Group Descriptor bytes to file: %v", err)
+ }
+
+ // we could remove the inode from the inode table in the group descriptor,
+ // but we do not need to do so. Since we are not reusing the inode, we can just leave it there,
+ // the bitmap always is checked before reusing an inode location.
+ fs.superblock.freeInodes++
+ fs.superblock.freeBlocks += removedInode.blocks
+ return fs.writeSuperblock()
+}
+
+func (fs *FileSystem) Truncate(p string, size int64) error {
+ _, entry, err := fs.getEntryAndParent(p)
+ if err != nil {
+ return err
+ }
+ if entry == nil {
+ return fmt.Errorf("file does not exist: %s", p)
+ }
+ if entry.fileType == dirFileTypeDirectory {
+ return fmt.Errorf("cannot truncate directory %s", p)
+ }
+ // it is not a directory, and it exists, so truncate it
+ inode, err := fs.readInode(entry.inode)
+ if err != nil {
+ return fmt.Errorf("could not read inode %d in directory: %v", entry.inode, err)
+ }
+ // change the file size
+ inode.size = uint64(size)
+
+ // free used blocks if shrank, or reserve new blocks if grew
+ // both of which mean updating the superblock, and the extents tree in the inode
+
+ // write the inode back
+ return fs.writeInode(inode)
+}
+
+// getEntryAndParent given a path, get the Directory for the parent and the directory entry for the file.
+// If the directory does not exist, returns an error.
+// If the file does not exist, does not return an error, but rather returns a nil entry.
+func (fs *FileSystem) getEntryAndParent(p string) (parent *Directory, entry *directoryEntry, err error) {
+ dir := path.Dir(p)
+ filename := path.Base(p)
+ // get the directory entries
+ parentDir, err := fs.readDirWithMkdir(dir, false)
+ if err != nil {
+ return nil, nil, fmt.Errorf("could not read directory entries for %s", dir)
+ }
+ // we now know that the directory exists, see if the file exists
+ var targetEntry *directoryEntry
+ if parentDir.root && filename == "/" {
+ // root directory
+ return parentDir, &parentDir.directoryEntry, nil
+ }
+
+ for _, e := range parentDir.entries {
+ if e.filename != filename {
+ continue
+ }
+ // if we got this far, we have found the file
+ targetEntry = e
+ break
+ }
+ return parentDir, targetEntry, nil
+}
+
+// Stat return fs.FileInfo about a specific file path.
+func (fs *FileSystem) Stat(p string) (iofs.FileInfo, error) {
+ _, entry, err := fs.getEntryAndParent(p)
+ if err != nil {
+ return nil, err
+ }
+ if entry == nil {
+ return nil, fmt.Errorf("file does not exist: %s", p)
+ }
+ in, err := fs.readInode(entry.inode)
+ if err != nil {
+ return nil, fmt.Errorf("could not read inode %d in directory: %v", entry.inode, err)
+ }
+ return &FileInfo{
+ modTime: in.modifyTime,
+ name: entry.filename,
+ size: int64(in.size),
+ isDir: entry.fileType == dirFileTypeDirectory,
+ }, nil
+}
+
+// SetLabel changes the label on the writable filesystem. Different file system may hav different
+// length constraints.
+func (fs *FileSystem) SetLabel(label string) error {
+ fs.superblock.volumeLabel = label
+ return fs.writeSuperblock()
+}
+
+// readInode read a single inode from disk
+func (fs *FileSystem) readInode(inodeNumber uint32) (*inode, error) {
+ if inodeNumber == 0 {
+ return nil, fmt.Errorf("cannot read inode 0")
+ }
+ sb := fs.superblock
+ inodeSize := sb.inodeSize
+ inodesPerGroup := sb.inodesPerGroup
+ // figure out which block group the inode is on
+ bg := (inodeNumber - 1) / inodesPerGroup
+ // read the group descriptor to find out the location of the inode table
+ gd := fs.groupDescriptors.descriptors[bg]
+ inodeTableBlock := gd.inodeTableLocation
+ inodeBytes := make([]byte, inodeSize)
+ // bytesStart is beginning byte for the inodeTableBlock
+ byteStart := inodeTableBlock * uint64(sb.blockSize)
+ // offsetInode is how many inodes in our inode is
+ offsetInode := (inodeNumber - 1) % inodesPerGroup
+ // offset is how many bytes in our inode is
+ offset := offsetInode * uint32(inodeSize)
+ read, err := fs.file.ReadAt(inodeBytes, int64(byteStart)+int64(offset))
+ if err != nil {
+ return nil, fmt.Errorf("failed to read inode %d from offset %d of block %d from block group %d: %v", inodeNumber, offset, inodeTableBlock, bg, err)
+ }
+ if read != int(inodeSize) {
+ return nil, fmt.Errorf("read %d bytes for inode %d instead of inode size of %d", read, inodeNumber, inodeSize)
+ }
+ inode, err := inodeFromBytes(inodeBytes, sb, inodeNumber)
+ if err != nil {
+ return nil, fmt.Errorf("could not interpret inode data: %v", err)
+ }
+ // fill in symlink target if needed
+ if inode.fileType == fileTypeSymbolicLink && inode.linkTarget == "" {
+ // read the symlink target
+ extents, err := inode.extents.blocks(fs)
+ if err != nil {
+ return nil, fmt.Errorf("could not read extent tree for symlink inode %d: %v", inodeNumber, err)
+ }
+ b, err := fs.readFileBytes(extents, inode.size)
+ if err != nil {
+ return nil, fmt.Errorf("could not read symlink target for inode %d: %v", inodeNumber, err)
+ }
+ inode.linkTarget = string(b)
+ }
+ return inode, nil
+}
+
+// writeInode write a single inode to disk
+func (fs *FileSystem) writeInode(i *inode) error {
+ sb := fs.superblock
+ inodeSize := sb.inodeSize
+ inodesPerGroup := sb.inodesPerGroup
+ // figure out which block group the inode is on
+ bg := (i.number - 1) / inodesPerGroup
+ // read the group descriptor to find out the location of the inode table
+ gd := fs.groupDescriptors.descriptors[bg]
+ inodeTableBlock := gd.inodeTableLocation
+ // bytesStart is beginning byte for the inodeTableBlock
+ // byteStart := inodeTableBlock * sb.blockSize
+ // offsetInode is how many inodes in our inode is
+ offsetInode := (i.number - 1) % inodesPerGroup
+ byteStart := inodeTableBlock * uint64(sb.blockSize)
+ // offsetInode is how many inodes in our inode is
+ // offset is how many bytes in our inode is
+ // offset is how many bytes in our inode is
+ offset := int64(offsetInode) * int64(inodeSize)
+ inodeBytes := i.toBytes(sb)
+ wrote, err := fs.file.WriteAt(inodeBytes, int64(byteStart)+offset)
+ if err != nil {
+ return fmt.Errorf("failed to write inode %d at offset %d of block %d from block group %d: %v", i.number, offset, inodeTableBlock, bg, err)
+ }
+ if wrote != int(inodeSize) {
+ return fmt.Errorf("wrote %d bytes for inode %d instead of inode size of %d", wrote, i.number, inodeSize)
+ }
+ return nil
+}
+
+// read directory entries for a given directory
+func (fs *FileSystem) readDirectory(inodeNumber uint32) ([]*directoryEntry, error) {
+ // read the inode for the directory
+ in, err := fs.readInode(inodeNumber)
+ if err != nil {
+ return nil, fmt.Errorf("could not read inode %d for directory: %v", inodeNumber, err)
+ }
+ // convert the extent tree into a sorted list of extents
+ extents, err := in.extents.blocks(fs)
+ if err != nil {
+ return nil, fmt.Errorf("unable to get blocks for inode %d: %w", in.number, err)
+ }
+ // read the contents of the file across all blocks
+ b, err := fs.readFileBytes(extents, in.size)
+ if err != nil {
+ return nil, fmt.Errorf("error reading file bytes for inode %d: %v", inodeNumber, err)
+ }
+
+ var dirEntries []*directoryEntry
+ // TODO: none of this works for hashed dir entries, indicated by in.flags.hashedDirectoryIndexes == true
+ if in.flags.hashedDirectoryIndexes {
+ treeRoot, err := parseDirectoryTreeRoot(b[:fs.superblock.blockSize], fs.superblock.features.largeDirectory)
+ if err != nil {
+ return nil, fmt.Errorf("failed to parse directory tree root: %v", err)
+ }
+ subDirEntries, err := parseDirEntriesHashed(b, treeRoot.depth, treeRoot, fs.superblock.blockSize, fs.superblock.features.metadataChecksums, in.number, in.nfsFileVersion, fs.superblock.checksumSeed)
+ if err != nil {
+ return nil, fmt.Errorf("failed to parse hashed directory entries: %v", err)
+ }
+ // include the dot and dotdot entries from treeRoot; they do not show up in the hashed entries
+ dirEntries = []*directoryEntry{treeRoot.dotEntry, treeRoot.dotDotEntry}
+ dirEntries = append(dirEntries, subDirEntries...)
+ } else {
+ // convert into directory entries
+ dirEntries, err = parseDirEntriesLinear(b, fs.superblock.features.metadataChecksums, fs.superblock.blockSize, in.number, in.nfsFileVersion, fs.superblock.checksumSeed)
+ }
+
+ return dirEntries, err
+}
+
+// readFileBytes read all of the bytes for an individual file pointed at by a given inode
+// normally not very useful, but helpful when reading an entire directory.
+func (fs *FileSystem) readFileBytes(extents extents, filesize uint64) ([]byte, error) {
+ // walk through each one, gobbling up the bytes
+ b := make([]byte, 0, fs.superblock.blockSize)
+ for i, e := range extents {
+ start := e.startingBlock * uint64(fs.superblock.blockSize)
+ count := uint64(e.count) * uint64(fs.superblock.blockSize)
+ if uint64(len(b))+count > filesize {
+ count = filesize - uint64(len(b))
+ }
+ b2 := make([]byte, count)
+ read, err := fs.file.ReadAt(b2, int64(start))
+ if err != nil {
+ return nil, fmt.Errorf("failed to read bytes for extent %d: %v", i, err)
+ }
+ if read != int(count) {
+ return nil, fmt.Errorf("read %d bytes instead of %d for extent %d", read, count, i)
+ }
+ b = append(b, b2...)
+ if uint64(len(b)) >= filesize {
+ break
+ }
+ }
+ return b, nil
+}
+
+// mkFile make a file with a given name in the given directory.
+func (fs *FileSystem) mkFile(parent *Directory, name string) (*directoryEntry, error) {
+ return fs.mkDirEntry(parent, name, false)
+}
+
+// readDirWithMkdir - walks down a directory tree to the last entry in p.
+// For example, if p is /a/b/c, it will walk down to c.
+// Expects c to be a directory.
+// If each step in the tree does not exist, it will either make it if doMake is true, or return an error.
+func (fs *FileSystem) readDirWithMkdir(p string, doMake bool) (*Directory, error) {
+ paths := splitPath(p)
+
+ // walk down the directory tree until all paths have been walked or we cannot find something
+ // start with the root directory
+ var entries []*directoryEntry
+ currentDir := &Directory{
+ directoryEntry: directoryEntry{
+ inode: rootInode,
+ filename: "",
+ fileType: dirFileTypeDirectory,
+ },
+ root: true,
+ }
+ entries, err := fs.readDirectory(rootInode)
+ if err != nil {
+ return nil, fmt.Errorf("failed to read directory %s", "/")
+ }
+ currentDir.entries = entries
+ for i, subp := range paths {
+ // do we have an entry whose name is the same as this name?
+ found := false
+ for _, e := range entries {
+ if e.filename != subp {
+ continue
+ }
+ if e.fileType != dirFileTypeDirectory {
+ return nil, fmt.Errorf("cannot create directory at %s since it is a file", "/"+strings.Join(paths[0:i+1], "/"))
+ }
+ // the filename matches, and it is a subdirectory, so we can break after saving the directory entry, which contains the inode
+ found = true
+ currentDir = &Directory{
+ directoryEntry: *e,
+ }
+ break
+ }
+
+ // if not, either make it, retrieve its cluster and entries, and loop;
+ // or error out
+ if !found {
+ if doMake {
+ var subdirEntry *directoryEntry
+ subdirEntry, err = fs.mkSubdir(currentDir, subp)
+ if err != nil {
+ return nil, fmt.Errorf("failed to create subdirectory %s", "/"+strings.Join(paths[0:i+1], "/"))
+ }
+ // save where we are to search next
+ currentDir = &Directory{
+ directoryEntry: *subdirEntry,
+ }
+ } else {
+ return nil, fmt.Errorf("path %s not found", "/"+strings.Join(paths[0:i+1], "/"))
+ }
+ }
+ // get all of the entries in this directory
+ entries, err = fs.readDirectory(currentDir.inode)
+ if err != nil {
+ return nil, fmt.Errorf("failed to read directory %s", "/"+strings.Join(paths[0:i+1], "/"))
+ }
+ currentDir.entries = entries
+ }
+ // once we have made it here, looping is done; we have found the final entry
+ currentDir.entries = entries
+ return currentDir, nil
+}
+
+// readBlock read a single block from disk
+func (fs *FileSystem) readBlock(blockNumber uint64) ([]byte, error) {
+ sb := fs.superblock
+ // bytesStart is beginning byte for the inodeTableBlock
+ byteStart := blockNumber * uint64(sb.blockSize)
+ blockBytes := make([]byte, sb.blockSize)
+ read, err := fs.file.ReadAt(blockBytes, int64(byteStart))
+ if err != nil {
+ return nil, fmt.Errorf("failed to read block %d: %v", blockNumber, err)
+ }
+ if read != int(sb.blockSize) {
+ return nil, fmt.Errorf("read %d bytes for block %d instead of size of %d", read, blockNumber, sb.blockSize)
+ }
+ return blockBytes, nil
+}
+
+// recalculate blocksize based on the existing number of blocks
+// - 0 <= blocks < 3MM : floppy - blocksize = 1024
+// - 3MM <= blocks < 512MM : small - blocksize = 1024
+// - 512MM <= blocks < 4*1024*1024MM : default - blocksize =
+// - 4*1024*1024MM <= blocks < 16*1024*1024MM : big - blocksize =
+// - 16*1024*1024MM <= blocks : huge - blocksize =
+//
+// the original code from e2fsprogs https://git.kernel.org/pub/scm/fs/ext2/e2fsprogs.git/tree/misc/mke2fs.c
+func recalculateBlocksize(numblocks, size int64) (sectorsPerBlock int, blocksize uint32, numBlocksAdjusted int64) {
+ var (
+ million64 = int64(million)
+ sectorSize512 = uint32(SectorSize512)
+ )
+ switch {
+ case 0 <= numblocks && numblocks < 3*million64:
+ sectorsPerBlock = 2
+ blocksize = 2 * sectorSize512
+ case 3*million64 <= numblocks && numblocks < 512*million64:
+ sectorsPerBlock = 2
+ blocksize = 2 * sectorSize512
+ case 512*million64 <= numblocks && numblocks < 4*1024*1024*million64:
+ sectorsPerBlock = 2
+ blocksize = 2 * sectorSize512
+ case 4*1024*1024*million64 <= numblocks && numblocks < 16*1024*1024*million64:
+ sectorsPerBlock = 2
+ blocksize = 2 * sectorSize512
+ case numblocks > 16*1024*1024*million64:
+ sectorsPerBlock = 2
+ blocksize = 2 * sectorSize512
+ }
+ return sectorsPerBlock, blocksize, size / int64(blocksize)
+}
+
+// mkSubdir make a subdirectory of a given name inside the parent
+// 1- allocate a single data block for the directory
+// 2- create an inode in the inode table pointing to that data block
+// 3- mark the inode in the inode bitmap
+// 4- mark the data block in the data block bitmap
+// 5- create a directory entry in the parent directory data blocks
+func (fs *FileSystem) mkSubdir(parent *Directory, name string) (*directoryEntry, error) {
+ return fs.mkDirEntry(parent, name, true)
+}
+
+func (fs *FileSystem) mkDirEntry(parent *Directory, name string, isDir bool) (*directoryEntry, error) {
+ // still to do:
+ // - write directory entry in parent
+ // - write inode to disk
+
+ // create an inode
+ inodeNumber, err := fs.allocateInode(parent.inode)
+ if err != nil {
+ return nil, fmt.Errorf("could not allocate inode for file %s: %w", name, err)
+ }
+ // get extents for the file - prefer in the same block group as the inode, if possible
+ newExtents, err := fs.allocateExtents(1, nil)
+ if err != nil {
+ return nil, fmt.Errorf("could not allocate disk space for file %s: %w", name, err)
+ }
+ extentTreeParsed, err := extendExtentTree(nil, newExtents, fs, nil)
+ if err != nil {
+ return nil, fmt.Errorf("could not convert extents into tree: %w", err)
+ }
+ // normally, after getting a tree from extents, you would need to then allocate all of the blocks
+ // in the extent tree - leafs and intermediate. However, because we are allocating a new directory
+ // with a single extent, we *know* it can fit in the inode itself (which has a max of 4), so no need
+
+ // create a directory entry for the file
+ deFileType := dirFileTypeRegular
+ fileType := fileTypeRegularFile
+ var contentSize uint64
+ if isDir {
+ deFileType = dirFileTypeDirectory
+ fileType = fileTypeDirectory
+ contentSize = uint64(fs.superblock.blockSize)
+ }
+ de := directoryEntry{
+ inode: inodeNumber,
+ filename: name,
+ fileType: deFileType,
+ }
+ parent.entries = append(parent.entries, &de)
+ // write the parent out to disk
+ bytesPerBlock := fs.superblock.blockSize
+ parentDirBytes := parent.toBytes(bytesPerBlock, directoryChecksumAppender(fs.superblock.checksumSeed, parent.inode, 0))
+ // check if parent has increased in size beyond allocated blocks
+ parentInode, err := fs.readInode(parent.inode)
+ if err != nil {
+ return nil, fmt.Errorf("could not read inode %d of parent directory: %w", parent.inode, err)
+ }
+
+ // write the directory entry in the parent
+ // figure out which block it goes into, and possibly rebalance the directory entries hash tree
+ parentExtents, err := parentInode.extents.blocks(fs)
+ if err != nil {
+ return nil, fmt.Errorf("could not read parent extents for directory: %w", err)
+ }
+ dirFile := &File{
+ inode: parentInode,
+ directoryEntry: &directoryEntry{
+ inode: parent.inode,
+ filename: name,
+ fileType: dirFileTypeDirectory,
+ },
+ filesystem: fs,
+ isReadWrite: true,
+ isAppend: true,
+ offset: 0,
+ extents: parentExtents,
+ }
+ wrote, err := dirFile.Write(parentDirBytes)
+ if err != nil && err != io.EOF {
+ return nil, fmt.Errorf("unable to write new directory: %w", err)
+ }
+ if wrote != len(parentDirBytes) {
+ return nil, fmt.Errorf("wrote only %d bytes instead of expected %d for new directory", wrote, len(parentDirBytes))
+ }
+
+ // write the inode for the new entry out
+ now := time.Now()
+ in := inode{
+ number: inodeNumber,
+ permissionsGroup: parentInode.permissionsGroup,
+ permissionsOwner: parentInode.permissionsOwner,
+ permissionsOther: parentInode.permissionsOther,
+ fileType: fileType,
+ owner: parentInode.owner,
+ group: parentInode.group,
+ size: contentSize,
+ hardLinks: 2,
+ blocks: newExtents.blockCount(),
+ flags: &inodeFlags{},
+ nfsFileVersion: 0,
+ version: 0,
+ inodeSize: parentInode.inodeSize,
+ deletionTime: 0,
+ accessTime: now,
+ changeTime: now,
+ createTime: now,
+ modifyTime: now,
+ extendedAttributeBlock: 0,
+ project: 0,
+ extents: extentTreeParsed,
+ }
+ // write the inode to disk
+ if err := fs.writeInode(&in); err != nil {
+ return nil, fmt.Errorf("could not write inode for new directory: %w", err)
+ }
+ // if a directory, put entries for . and .. in the first block for the new directory
+ if isDir {
+ initialEntries := []*directoryEntry{
+ {
+ inode: inodeNumber,
+ filename: ".",
+ fileType: dirFileTypeDirectory,
+ },
+ {
+ inode: parent.inode,
+ filename: "..",
+ fileType: dirFileTypeDirectory,
+ },
+ }
+ newDir := Directory{
+ directoryEntry: de,
+ root: false,
+ entries: initialEntries,
+ }
+ dirBytes := newDir.toBytes(fs.superblock.blockSize, directoryChecksumAppender(fs.superblock.checksumSeed, inodeNumber, 0))
+ // write the bytes out to disk
+ dirFile = &File{
+ inode: &in,
+ directoryEntry: &directoryEntry{
+ inode: inodeNumber,
+ filename: name,
+ fileType: dirFileTypeDirectory,
+ },
+ filesystem: fs,
+ isReadWrite: true,
+ isAppend: true,
+ offset: 0,
+ extents: *newExtents,
+ }
+ wrote, err := dirFile.Write(dirBytes)
+ if err != nil && err != io.EOF {
+ return nil, fmt.Errorf("unable to write new directory: %w", err)
+ }
+ if wrote != len(dirBytes) {
+ return nil, fmt.Errorf("wrote only %d bytes instead of expected %d for new entry", wrote, len(dirBytes))
+ }
+ }
+
+ // return
+ return &de, nil
+}
+
+// allocateInode allocate a single inode
+// passed the parent, so it can know where to allocate it
+// logic:
+// - parent is 0 : root inode, will allocate at 2
+// - parent is 2 : child of root, will try to spread out
+// - else : try to collocate with parent, if possible
+func (fs *FileSystem) allocateInode(parent uint32) (uint32, error) {
+ var (
+ inodeNumber = -1
+ )
+ if parent == 0 {
+ inodeNumber = 2
+ }
+ // load the inode bitmap
+ var (
+ bg int
+ gd groupDescriptor
+ )
+
+ for _, gd = range fs.groupDescriptors.descriptors {
+ if inodeNumber != -1 {
+ break
+ }
+ bg := int(gd.number)
+ bm, err := fs.readInodeBitmap(bg)
+ if err != nil {
+ return 0, fmt.Errorf("could not read inode bitmap: %w", err)
+ }
+ // get first free inode
+ inodeNumber = bm.FirstFree(0)
+ // if we found a
+ if inodeNumber == -1 {
+ continue
+ }
+ // set it as marked
+ if err := bm.Set(inodeNumber); err != nil {
+ return 0, fmt.Errorf("could not set inode bitmap: %w", err)
+ }
+ // write the inode bitmap bytes
+ if err := fs.writeInodeBitmap(bm, bg); err != nil {
+ return 0, fmt.Errorf("could not write inode bitmap: %w", err)
+ }
+ }
+ if inodeNumber == -1 {
+ return 0, errors.New("no free inodes available")
+ }
+
+ // reduce number of free inodes in that descriptor in the group descriptor table
+ gd.freeInodes--
+
+ // get the group descriptor as bytes
+ gdBytes := gd.toBytes(fs.superblock.gdtChecksumType(), fs.superblock.uuid.ID())
+
+ // write the group descriptor bytes
+ // gdt starts in block 1 of any redundant copies, specifically in BG 0
+ gdtBlock := 1
+ blockByteLocation := gdtBlock * int(fs.superblock.blockSize)
+ gdOffset := fs.start + int64(blockByteLocation) + int64(bg)*int64(fs.superblock.groupDescriptorSize)
+ wrote, err := fs.file.WriteAt(gdBytes, gdOffset)
+ if err != nil {
+ return 0, fmt.Errorf("unable to write group descriptor bytes for blockgroup %d: %v", bg, err)
+ }
+ if wrote != len(gdBytes) {
+ return 0, fmt.Errorf("wrote only %d bytes instead of expected %d for group descriptor of block group %d", wrote, len(gdBytes), bg)
+ }
+
+ return uint32(inodeNumber), nil
+}
+
+// allocateExtents allocate the data blocks in extents that are
+// to be used for a file of a given size
+// arguments are file size in bytes and existing extents
+// if previous is nil, then we are not (re)sizing an existing file but creating a new one
+// returns the extents to be used in order
+func (fs *FileSystem) allocateExtents(size uint64, previous *extents) (*extents, error) {
+ // 1- calculate how many blocks are needed
+ required := size / uint64(fs.superblock.blockSize)
+ remainder := size % uint64(fs.superblock.blockSize)
+ if remainder > 0 {
+ required++
+ }
+ // 2- see how many blocks already are allocated
+ var allocated uint64
+ if previous != nil {
+ allocated = previous.blockCount()
+ }
+ // 3- if needed, allocate new blocks in extents
+ extraBlockCount := required - allocated
+ // if we have enough, do not add anything
+ if extraBlockCount <= 0 {
+ return previous, nil
+ }
+
+ // if there are not enough blocks left on the filesystem, return an error
+ if fs.superblock.freeBlocks < extraBlockCount {
+ return nil, fmt.Errorf("only %d blocks free, requires additional %d", fs.superblock.freeBlocks, extraBlockCount)
+ }
+
+ // now we need to look for as many contiguous blocks as possible
+ // first calculate the minimum number of extents needed
+
+ // if all of the extents, except possibly the last, are maximum size, then we need minExtents extents
+ // we loop through, trying to allocate an extent as large as our remaining blocks or maxBlocksPerExtent,
+ // whichever is smaller
+ blockGroupCount := fs.blockGroups
+ // TODO: instead of starting with BG 0, should start with BG where the inode for this file/dir is located
+ var (
+ newExtents []extent
+ datablockBitmaps = map[int]*util.Bitmap{}
+ blocksPerGroup = fs.superblock.blocksPerGroup
+ )
+
+ var i int64
+ for i = 0; i < blockGroupCount && allocated < extraBlockCount; i++ {
+ // keep track if we allocated anything in this blockgroup
+ // 1- read the GDT for this blockgroup to find the location of the block bitmap
+ // and total free blocks
+ // 2- read the block bitmap from disk
+ // 3- find the maximum contiguous space available
+ bs, err := fs.readBlockBitmap(int(i))
+ if err != nil {
+ return nil, fmt.Errorf("could not read block bitmap for block group %d: %v", i, err)
+ }
+ // now find our unused blocks and how many there are in a row as potential extents
+ if extraBlockCount > maxUint16 {
+ return nil, fmt.Errorf("cannot allocate more than %d blocks in a single extent", maxUint16)
+ }
+ // get the list of free blocks
+ blockList := bs.FreeList()
+
+ // create possible extents by size
+ // Step 3: Group contiguous blocks into extents
+ var extents []extent
+ for _, freeBlock := range blockList {
+ start, length := freeBlock.Position, freeBlock.Count
+ for length > 0 {
+ extentLength := min(length, int(maxBlocksPerExtent))
+ extents = append(extents, extent{startingBlock: uint64(start) + uint64(i)*uint64(blocksPerGroup), count: uint16(extentLength)})
+ start += extentLength
+ length -= extentLength
+ }
+ }
+
+ // sort in descending order
+ sort.Slice(extents, func(i, j int) bool {
+ return extents[i].count > extents[j].count
+ })
+
+ var allocatedBlocks uint64
+ for _, ext := range extents {
+ if extraBlockCount <= 0 {
+ break
+ }
+ extentToAdd := ext
+ if uint64(ext.count) >= extraBlockCount {
+ extentToAdd = extent{startingBlock: ext.startingBlock, count: uint16(extraBlockCount)}
+ }
+ newExtents = append(newExtents, extentToAdd)
+ allocatedBlocks += uint64(extentToAdd.count)
+ extraBlockCount -= uint64(extentToAdd.count)
+ // set the marked blocks in the bitmap, and save the bitmap
+ for block := extentToAdd.startingBlock; block < extentToAdd.startingBlock+uint64(extentToAdd.count); block++ {
+ // determine what block group this block is in, and read the bitmap for that blockgroup
+ // the extent lists the absolute block number, but the bitmap is relative to the block group
+ blockInGroup := block - uint64(i)*uint64(blocksPerGroup)
+ if err := bs.Set(int(blockInGroup)); err != nil {
+ return nil, fmt.Errorf("could not clear block bitmap for block %d: %v", i, err)
+ }
+ }
+
+ // do *not* write the bitmap back yet, as we do not yet know if we will be able to fulfill the entire request.
+ // instead save it for later
+ datablockBitmaps[int(i)] = bs
+ }
+ }
+ if extraBlockCount > 0 {
+ return nil, fmt.Errorf("could not allocate %d blocks", extraBlockCount)
+ }
+
+ // write the block bitmaps back to disk
+ for bg, bs := range datablockBitmaps {
+ if err := fs.writeBlockBitmap(bs, bg); err != nil {
+ return nil, fmt.Errorf("could not write block bitmap for block group %d: %v", bg, err)
+ }
+ }
+
+ // need to update the total blocks used/free in superblock
+ fs.superblock.freeBlocks -= allocated
+ // update the blockBitmapChecksum for any updated block groups in GDT
+ // write updated superblock and GDT to disk
+ if err := fs.writeSuperblock(); err != nil {
+ return nil, fmt.Errorf("could not write superblock: %w", err)
+ }
+ // write backup copies
+ var exten extents = newExtents
+ return &exten, nil
+}
+
+// readInodeBitmap read the inode bitmap off the disk.
+// This would be more efficient if we just read one group descriptor's bitmap
+// but for now we are about functionality, not efficiency, so it will read the whole thing.
+func (fs *FileSystem) readInodeBitmap(group int) (*util.Bitmap, error) {
+ if group >= len(fs.groupDescriptors.descriptors) {
+ return nil, fmt.Errorf("block group %d does not exist", group)
+ }
+ gd := fs.groupDescriptors.descriptors[group]
+ bitmapLocation := gd.inodeBitmapLocation
+ bitmapByteCount := fs.superblock.inodesPerGroup / 8
+ b := make([]byte, bitmapByteCount)
+ offset := int64(bitmapLocation*uint64(fs.superblock.blockSize) + uint64(fs.start))
+ read, err := fs.file.ReadAt(b, offset)
+ if err != nil {
+ return nil, fmt.Errorf("unable to read inode bitmap for blockgroup %d: %w", gd.number, err)
+ }
+ if read != int(bitmapByteCount) {
+ return nil, fmt.Errorf("Read %d bytes instead of expected %d for inode bitmap of block group %d", read, bitmapByteCount, gd.number)
+ }
+ // only take bytes corresponding to the number of inodes per group
+
+ // create a bitmap
+ bs := util.NewBitmap(int(fs.superblock.blockSize) * len(fs.groupDescriptors.descriptors))
+ bs.FromBytes(b)
+ return bs, nil
+}
+
+// writeInodeBitmap write the inode bitmap to the disk.
+func (fs *FileSystem) writeInodeBitmap(bm *util.Bitmap, group int) error {
+ if group >= len(fs.groupDescriptors.descriptors) {
+ return fmt.Errorf("block group %d does not exist", group)
+ }
+ b := bm.ToBytes()
+ gd := fs.groupDescriptors.descriptors[group]
+ bitmapByteCount := fs.superblock.inodesPerGroup / 8
+ bitmapLocation := gd.inodeBitmapLocation
+ offset := int64(bitmapLocation*uint64(fs.superblock.blockSize) + uint64(fs.start))
+ wrote, err := fs.file.WriteAt(b, offset)
+ if err != nil {
+ return fmt.Errorf("unable to write inode bitmap for blockgroup %d: %w", gd.number, err)
+ }
+ if wrote != int(bitmapByteCount) {
+ return fmt.Errorf("wrote %d bytes instead of expected %d for inode bitmap of block group %d", wrote, bitmapByteCount, gd.number)
+ }
+
+ return nil
+}
+
+func (fs *FileSystem) readBlockBitmap(group int) (*util.Bitmap, error) {
+ if group >= len(fs.groupDescriptors.descriptors) {
+ return nil, fmt.Errorf("block group %d does not exist", group)
+ }
+ gd := fs.groupDescriptors.descriptors[group]
+ bitmapLocation := gd.blockBitmapLocation
+ b := make([]byte, fs.superblock.blockSize)
+ offset := int64(bitmapLocation*uint64(fs.superblock.blockSize) + uint64(fs.start))
+ read, err := fs.file.ReadAt(b, offset)
+ if err != nil {
+ return nil, fmt.Errorf("unable to read block bitmap for blockgroup %d: %w", gd.number, err)
+ }
+ if read != int(fs.superblock.blockSize) {
+ return nil, fmt.Errorf("Read %d bytes instead of expected %d for block bitmap of block group %d", read, fs.superblock.blockSize, gd.number)
+ }
+ // create a bitmap
+ bs := util.NewBitmap(int(fs.superblock.blockSize) * len(fs.groupDescriptors.descriptors))
+ bs.FromBytes(b)
+ return bs, nil
+}
+
+// writeBlockBitmap write the inode bitmap to the disk.
+func (fs *FileSystem) writeBlockBitmap(bm *util.Bitmap, group int) error {
+ if group >= len(fs.groupDescriptors.descriptors) {
+ return fmt.Errorf("block group %d does not exist", group)
+ }
+ b := bm.ToBytes()
+ gd := fs.groupDescriptors.descriptors[group]
+ bitmapLocation := gd.blockBitmapLocation
+ offset := int64(bitmapLocation*uint64(fs.superblock.blockSize) + uint64(fs.start))
+ wrote, err := fs.file.WriteAt(b, offset)
+ if err != nil {
+ return fmt.Errorf("unable to write block bitmap for blockgroup %d: %w", gd.number, err)
+ }
+ if wrote != int(fs.superblock.blockSize) {
+ return fmt.Errorf("wrote %d bytes instead of expected %d for block bitmap of block group %d", wrote, fs.superblock.blockSize, gd.number)
+ }
+
+ return nil
+}
+
+func (fs *FileSystem) writeSuperblock() error {
+ superblockBytes, err := fs.superblock.toBytes()
+ if err != nil {
+ return fmt.Errorf("could not convert superblock to bytes: %v", err)
+ }
+ _, err = fs.file.WriteAt(superblockBytes, fs.start+int64(BootSectorSize))
+ return err
+}
+
+func blockGroupForInode(inodeNumber int, inodesPerGroup uint32) int {
+ return (inodeNumber - 1) / int(inodesPerGroup)
+}
+func blockGroupForBlock(blockNumber int, blocksPerGroup uint32) int {
+ return (blockNumber - 1) / int(blocksPerGroup)
+}
diff --git a/vendor/github.com/diskfs/go-diskfs/filesystem/ext4/ext4.md b/vendor/github.com/diskfs/go-diskfs/filesystem/ext4/ext4.md
new file mode 100644
index 00000000000..0cfa7191f73
--- /dev/null
+++ b/vendor/github.com/diskfs/go-diskfs/filesystem/ext4/ext4.md
@@ -0,0 +1,335 @@
+# ext4
+This file describes the layout on disk of ext4. It is a living document and probably will be deleted rather than committed to git.
+
+The primary reference document is [here](https://ext4.wiki.kernel.org/index.php/Ext4_Disk_Layout).
+
+Also useful are:
+
+* https://blogs.oracle.com/linux/post/understanding-ext4-disk-layout-part-2
+* https://www.sans.org/blog/understanding-ext4-part-6-directories/ - blog series
+* https://digital-forensics.sans.org/blog/2017/06/07/understanding-ext4-part-6-directories
+* https://metebalci.com/blog/a-minimum-complete-tutorial-of-linux-ext4-file-system/
+
+## Concepts
+
+* Sector: a section of 512 bytes
+* Block: a contiguous group of sectors. Block size usually is either 4K (4096 bytes) or 1K (1024 bytes), i.e. 8 sectors or 2 sectors. Block size minimum is 1KB (2 sectors), max is 64KB (128 sectors). Each block is associated with exactly one file. A file may contain more than one block - e.g. if a file is larger than the size of a single block - but each block belongs to exactly one file.
+* inode: metadata about a file or directory. Each inode contains metadata about exactly one file. The number of inodes in a system is identical to the number of blocks for 32-bit, or far fewer for 64-bit.
+* Block group: a contiguous group of blocks. Each block group is (`8*block_size_in_bytes`) blocks. So if block size is 4K, or 4096 bytes, then a block group is `8*4096` = 32,768 blocks, each of size 4096 bytes, for a block group of 128MB. If block size is 1K, a block group is 8192 blocks, or 8MB.
+* 64-bit feature: ext4 filesystems normally uses 32-bit, which means the maximum blocks per filesystem is 2^32. If the 64-bit feature is enabled, then the maximum blocks per filesystem is 2^64.
+* Superblock: A block that contains information about the entire filesystem. Exists in block group 0 and sometimes is backed up to other block groups. The superblock contains information about the filesystem as a whole: inode size, block size, last mount time, etc.
+* Block Group Descriptor: Block Group Descriptors contain information about each block group: start block, end block, inodes, etc. One Descriptor per Group. But it is stored next to the Superblock (and backups), not with each Group.
+* Extent: an extent is a contiguous group of blocks. Extents are used to store files. Extents are mapped beginning with the inode, and provide the way of getting from an inode to the blocks that contain the file's data.
+
+
+### Block Group
+
+Each block group is built in the following order. There is a distinction between Group 0 - the first one
+in the filesystem - and all others.
+
+Block groups come in one of several types. It isn't necessary to list all of them here. The key elements are as follows.
+
+Block 0:
+
+1. Padding: 1024 bytes, used for boot sector
+
+Block 0 (above 1024 bytes, if blocksize >1024) or Block 1; all backup blocks:
+
+2. Superblock: One block
+3. Group Descriptors: Many blocks
+4. Reserved GDT Blocks: Many blocks, reserved in case we need to expand to more Group Descriptors in the future
+
+All blocks:
+
+5. Data block bitmap: 1 block. One bit per block in the block group. Set to 1 if a data block is in use, 0 if not.
+6. inode bitmap: 1 block. One bit per inode in the block group. Set to 1 if an inode is in use, 0 if not.
+7. inode table: many blocks. Calculated by `(inodes_per_group)*(size_of_inode)`. Remember that `inodes_per_group` = `blocks_per_group` = `8*block_size_in_bytes`. The original `size_of_inode` in ext2 was 128 bytes. In ext4 it uses 156 bytes, but is stored in 256 bytes of space, so `inode_size_in_bytes` = 256 bytes.
+8. Data blocks: all of the rest of the blocks in the block group
+
+The variant on the above is with Flexible Block Groups. If flexbg is enabled, then block groups are grouped together, normally
+groups of 16 (but the actual number is in the superblock). The data block bitmap, inode bitmap and inode table are
+in the first block group for each flexible block group.
+
+This means you can have all sorts of combinations:
+
+* block that is both first in a block group (contains block bitmap, inode bitmap, inode table) and superblock/backup (contains superblock, GDT, reserved GDT blocks)
+* block that is first in a block group (block bitmap, inode bitmap, inode table) but not first in a block group or Flex BG
+* block that is superblock/backup (superblock, GDT, reserved GDT blocks) but not first in a block group or Flex BG
+* neither of the above (contains just data blocks)
+
+Summary: block bitmap, inode bitmap and inode table are in the first block in a blockgroup or Flex BG, which is a consistent
+number. Superblock backups are in specific blocks, calculated by being a block number that is a power of 3, 5 or 7.
+
+## How to
+
+Different actions. These all will be replaced by actual code. Things we need to be able to do:
+
+* walk the tree to a particular directory or file
+* inode to data blocks
+* read directory entries
+* create a new directory entry
+* read contents of a file
+* write contents to a file
+
+### Walk the Tree
+
+In order to get to any particular file or directory in the ext4 filesystem, you need to "walk the tree".
+For example, say you want to read the contents of directory `/usr/local/bin/`.
+
+1. Find the inode of the root directory in the inode table. This **always** is inode 2.
+1. Read inode of the root directory to get the data blocks that contain the contents of the root directory. See [inode to data blocks](#inode-to-data-blocks).
+1. Read the directory entries in the data blocks to get the names of the files and directories in root. This can be linear or hash.
+ * linear: read sequentially until you find the one whose name matches the desired subdirectory, for example `usr`
+ * hash: hash the name and use that to get the correct location
+1. Using the matched directory entry, get the inode number for that subdirectory.
+1. Use the superblock to read how many inodes are in each block group, e.g. 8144
+1. Calculate which block group contains the inode you are looking for. Using the above example, 0-8143 are in group 0, 8144-16287 are in group 1, etc.
+1. Read the inode of that subdirectory in the inode table of the given block group to get the data blocks that contain the contents of that directory.
+1. Repeat until you have read the data blocks for the desired entry.
+
+### Inode to Data Blocks
+
+Start with the inode
+
+1. Read the inode
+1. Read the `i_block` value, 60 bytes at location 0x28 (= 40)
+1. The first 12 bytes are an extent header:
+ * magic number 0xf30a (little endian) - 2 bytes
+ * number of entries following the header - 2 bytes - in the inode, always 1, 2, 3, or 4
+ * maximum number of entries that could follow the header - 2 bytes - in the inode, always 4
+ * depth of this node in the extent tree, where 0 = leaf, parent to that is 1, etc. - 2 bytes
+ * generation (unused) - 4 bytes
+1. Read the entries that follow.
+
+If the data inside the inode is a leaf node (header depth = 0), then the entries will be leaf entries of 12 bytes:
+
+* first block in the file that this extent covers - 4 bytes
+* number of blocks in this extent - 2 bytes - If the value of this field is <= 32768, the extent is initialized. If the value of the field is > 32768, the extent is uninitialized and the actual extent length is ee_len - 32768. Therefore, the maximum length of a initialized extent is 32768 blocks, and the maximum length of an uninitialized extent is 32767.
+* upper 16 bits of the block location - 2 bytes
+* lower 32 bits of the block location - 4 bytes
+
+For example, if a file has 1,000 blocks, and a particular extent entry points to blocks 100-299 of the file, and it starts
+at filesystem block 10000, then the entry will be:
+
+* 100 (4 bytes)
+* 200 (2 bytes) - is this correct? This would indicate uninitialized
+* 0 (2 bytes)
+* 10000 (4 bytes)
+
+If the data inside the inode is an internal node (header depth > 0), then the entries will be internal entries of 12 bytes:
+
+* first file block that this extent and all its children cover - 4 bytes
+* lower 32 bits of the block number os the extent node on the next lower level - 4 bytes
+* upper 16 bits of the block number of the extent node on the next lower level - 2 bytes
+* unused - 2 bytes
+
+For example, if a file has 10,000 blocks, covered in 15 extents, then there will be 15 level 0 extents, and 1 level 1 extent,
+and the 15 extents are stored in filesystem block 20000.
+
+The lower level 0 extent will look like our leaf node example above.
+The upper level 1 extent will look like:
+
+* 0 (4 bytes) - because this starts from file block 0
+* 20000 (4 bytes) - the block number of the extent node on the next lower level
+* 0 (2 bytes) - because lower 4 bytes were enough to cover
+
+You can find all of the blocks simply by looking at the root of the extent tree in the inode.
+
+* If the extents for the file are 4 or fewer, then the extent tree is stored in the inode itself.
+* If the extents for the file are more than 4, but enough to fit the extents in 1-4 blocks, then:
+ * level 0 extents are stored in a single separate block
+ * level 1 extents are stored in the inode, with up to 4 entries pointing to the level 0 extents blocks
+* If the extents for the file are more than fit in 4 blocks, then:
+ * level 0 extents are stored in as many blocks as needed
+ * level 1 extents are stored in other blocks pointing to level 0 extent blocks
+ * level 2 extents - up to 4 - are stored in the inode
+
+Each of these is repeated upwards. The maximum at the top of the tree is 4, the maximum in each block is `(blocksize-12)/12`.
+Because:
+
+- each block of extent nodes needs a header of 12 bytes
+- each extent node is 12 bytes
+
+### Read Directory Entries
+To read directory entries
+
+1. Walk the tree until you find the inode for the directory you want.
+2. Read the data blocks pointed to by that inode, see [inode to data blocks](#inode-to-data-blocks).
+3. Interpret the data blocks.
+
+The directory itself is just a single "file". It has an inode that indicates the file "length", which is the number of bytes that the listing takes up.
+
+There are two types of directories: Classic and Hash Tree. Classic are just linear, unsorted, unordered lists of files. They work fine for shorter lists, but large directories can be slow to traverse if they grow too large. Once the contents of the directory "file" will be larger than a single block, ext4 switches it to a Hash Tree Directory Entry.
+
+Which directory type it is - classical linear or hash tree - does not affect the inode, for which it is just a file, but the contents of the directory entry "file". You can tell if it is linear or hash tree by checking the inode flag `EXT4_INDEX_FL`. If it is set (i.e. `& 0x1000`), then it is a hash tree.
+
+#### Classic Directory Entry
+Each directory entry is at most 263 bytes long. They are arranged in sequential order in the file. The contents are:
+
+* first four bytes are a `uint32` giving the inode number
+* next 2 bytes give the length of the directory entry (max 263)
+* next 1 byte gives the length of the file name (which could be calculated from the directory entry length...)
+* next 1 byte gives type: unknown, file, directory, char device, block device, FIFO, socket, symlink
+* next (up to 255) bytes contain chars with the file or directory name
+
+The above is for the second version of ext4 directory entry (`ext4_dir_entry_2`). The slightly older version (`ext4_dir_entry`) is similar, except it does not give the file type, which in any case is in the inode. Instead it uses 2 bytes for the file name length.
+
+#### Hash Tree Directory Entry
+Entries in the block are structured as follows:
+
+* `.` and `..` are the first two entries, and are classic `ext4_dir_entry_2`
+* Look in byte `0x1c` to find the hash algorithm
+* take the desired file/subdirectory name (just the `basename`) and hash it, see [Calculating the hash value][Calculating the hash value]
+* look in the root directory entry in the hashmap to find the relative block number. Note that the block number is relative to the block in the directory, not the filesystem or block group.
+* Next step depends on the hash tree depth:
+ * Depth = 0: read directory entry from the given block.
+ * Depth > 0: use the block as another lookup table, repeating the steps above, until we come to the depth.
+* Once we have the final leaf block given by the hash table, we just read the block sequentially; it will be full of classical directory entries linearly.
+
+When reading the hashmap, it may not match precisely. Instead, it will fit within a range. The hashmap is sorted by `>=` to `<`. So if the table has entries as follows:
+
+| Hash | Block |
+| -------|-------|
+| 0 | 1 |
+| 100 | 25 |
+| 300 | 16 |
+
+Then:
+
+* all hash values from `0`-`99` will be in block `1`
+* all hash values from `100-299` will be in block `25`
+* all hash values from `300` to infinite will be in block `16`
+
+##### Calculating the hash value
+
+The hashing uses one of several algorithms. Most commonly, it is Half MD4.
+
+MD4 gives a digest length of 128 bits = 16 bytes.
+
+The "half md4" algorithm is given by the transformation code
+[here](https://elixir.bootlin.com/linux/v4.6/source/lib/halfmd4.c#L26). The result
+of it is 4 bytes. Those 4 bytes are the input to the hash.
+
+### Create a Directory Entry
+
+To create a directory, you need to go through the following steps:
+
+1. "Walk the tree" to find the parent directory. E.g. if you are creating `/usr/local/foo`, then you need to walk the tree to get to the directory "file" for `/usr/local`. If the parent directory is just the root `/`, e.g. you are creating `/foo`, then you use the root directory, whose inode always is `2`.
+2. Determine if the parent directory is classical linear or hash tree, by checking the flag `EXT4_INDEX_FL` in the parent directory's inode.
+ * if hash:
+ 1. find a block in the "directory" file with space to add a linear entry
+ 1. create and add the entry
+ 1. calculate the hash of the filename
+ 1. add the `hash:block_number` entry into the tree
+ 1. rebalance if needed
+ * if linear, create the entry:
+ * if adding one will not exceed the size for linear, write it and done
+ * if adding one will exceed the size for linear, convert to hash, then write it
+
+#### Hash Tree
+
+1. Calculate the hash of the new directory entry name
+2. Determine which block in the parent directory "file" the new entry should live, based on the hash table.
+3. Find the block.
+4. Add a classical linear entry at the end of it.
+5. Update the inode for the parent directory with the new file size.
+
+If there is no room at the end of the block, you need to rebalance the hash tree. See below.
+
+#### Classical Linear
+
+1. Find the last block in the parent directory "file"
+ * if there is no room for another entry, extend the file size by another block, and update the inode for the file with the block map
+2. Add a classical linear directory entry at the end of it.
+3. Update the inode for the parent directory with the new file size, if any. E.g. if the entry fit within padding, there is no change in size.
+
+If this entry will cause the directory "file" to extend beyond a single block, convert to a hash tree. See below.
+
+### Rebalance Hash Tree
+
+Rebalancing the hash tree is rebalancing a btree, where the keys are the hash values.
+You only ever need to rebalance when you add or remove an entry.
+
+#### Adding an entry
+
+When adding an entry, you only ever need to rebalance the node to which you add it, and parents up to the root.
+
+1. Calculate the hash of the entry
+1. Determine the leaf node into which it should go
+1. If the leaf node has less than the maximum number of elements, add it and done
+1. If the lead node has the maximum number of elements:
+ 1. Add the new node in the right place
+ 1. Find the median
+ 1. Move the median up to the parent node
+ 1. If necessary, rebalance the parent node
+
+#### Removing an entry
+
+When removing an entry, you only ever need to rebalance the node from which you remove it, and parents up to the root.
+
+1. Calculate the hash of the entry
+1. Determine the leaf node in which it exists
+1. If the leaf node has less than the maximum number of elements, add it and done
+1. If the lead node has the maximum number of elements:
+ 1. Add the new node in the right place
+ 1. Find the median
+ 1. Move the median up to the parent node
+ 1. If necessary, rebalance the parent node
+
+### Convert Classical Linear Directory Entries to Hash Tree
+
+The conversion usually happens when a single entry will exceed the capacity of a single block.
+
+1. Switch the flag in the inode to hash-tree
+1. Calculate the hash of each entry
+1. Create 2 new blocks:
+ * 1 for the bottom half of the entries
+ * 1 for the top half of the entries
+1. Move the bottom half of the entries into the bottom block
+1. Move the top half of the entries into the top block
+1. Zero out the current single file block, which previously had the classic linear directory entries
+1. Write the header into the tree block, with the 0-hash-value pointing to the bottom block
+1. Write one entry after the header, for the lowest hash value of the upper block, pointing to the upper block
+
+### Read File Contents
+
+1. Walk the tree until you find the inode for the file you want.
+1. Find the data blocks for that inode, see [inode to data blocks](#inode-to-data-blocks).
+1. Interpret the data blocks.
+
+### Create File
+
+1. Walk the tree until you find the inode for the parent directory.
+1. Find a free inode using the inode bitmap.
+1. Find a free block using the block bitmap.
+1. Create the inode for the new file in the inode table. Be sure to update all the dependencies:
+ * inode bitmap
+ * inode table
+ * inode count in the block group table
+ * inode count in the superblock
+1. Reserve a data block for the new file in the block group table. Be sure to update all the dependencies:
+ * block bitmap
+ * block count in the block group table
+ * block count in the superblock
+1. Create the file entry in the parent directory. Depends on if this is classic linear directory or hash tree directory. Note that if it is classic linear, calculate the new size before writing the entry. If it is bigger than a single block, convert to hash tree. TODO: is this the right boundary, single block?
+ * Classic linear directory:
+ 1. Find the last block in the parent directory "file"
+ 1. Add a classical linear directory entry at the end of it
+ 1. Update the inode for the parent directory with the new file size
+ * Hash tree directory:
+ 1. Calculate the hash of the new directory entry name
+ 1. Determine which block in the parent directory "file" the new entry should live, based on the hash table
+ 1. Find the block
+ 1. Add a classical linear entry at the end of it
+ 1. Update the inode for the parent directory with the new file size
+
+
+### Write File Contents
+
+1. Walk the tree until you find the inode for the file you want.
+1. Find the data blocks for that inode, see [inode to data blocks](#inode-to-data-blocks).
+1. Write the data to the data blocks.
+1. If the data written exceeds the end of the last block, reserve a new block, update the inode extent tree, and write the data to the new block.
+1. Update the inode with the filesize
+1. Update the block group table with the used blocks
+1. Update the superblock with the used blocks
diff --git a/vendor/github.com/diskfs/go-diskfs/filesystem/ext4/extent.go b/vendor/github.com/diskfs/go-diskfs/filesystem/ext4/extent.go
new file mode 100644
index 00000000000..e5d456e0068
--- /dev/null
+++ b/vendor/github.com/diskfs/go-diskfs/filesystem/ext4/extent.go
@@ -0,0 +1,733 @@
+package ext4
+
+import (
+ "encoding/binary"
+ "fmt"
+ "sort"
+)
+
+const (
+ extentTreeHeaderLength int = 12
+ extentTreeEntryLength int = 12
+ extentHeaderSignature uint16 = 0xf30a
+ extentTreeMaxDepth int = 5
+)
+
+// extens a structure holding multiple extents
+type extents []extent
+
+// extent a structure with information about a single contiguous run of blocks containing file data
+type extent struct {
+ // fileBlock block number relative to the file. E.g. if the file is composed of 5 blocks, this could be 0-4
+ fileBlock uint32
+ // startingBlock the first block on disk that contains the data in this extent. E.g. if the file is made up of data from blocks 100-104 on the disk, this would be 100
+ startingBlock uint64
+ // count how many contiguous blocks are covered by this extent
+ count uint16
+}
+
+// equal if 2 extents are equal
+//
+//nolint:unused // useful function for future
+func (e *extent) equal(a *extent) bool {
+ if (e == nil && a != nil) || (a == nil && e != nil) {
+ return false
+ }
+ if e == nil && a == nil {
+ return true
+ }
+ return *e == *a
+}
+
+// blockCount how many blocks are covered in the extents
+//
+//nolint:unused // useful function for future
+func (e extents) blockCount() uint64 {
+ var count uint64
+ for _, ext := range e {
+ count += uint64(ext.count)
+ }
+ return count
+}
+
+// extentBlockFinder provides a way of finding the blocks on disk that represent the block range of a given file.
+// Arguments are the starting and ending blocks in the file. Returns a slice of blocks to read on disk.
+// These blocks are in order. For example, if you ask to read file blocks starting at 20 for a count of 25, then you might
+// get a single fileToBlocks{block: 100, count: 25} if the file is contiguous on disk. Or you might get
+// fileToBlocks{block: 100, count: 10}, fileToBlocks{block: 200, count: 15} if the file is fragmented on disk.
+// The slice should be read in order.
+type extentBlockFinder interface {
+ // findBlocks find the actual blocks for a range in the file, given the start block in the file and how many blocks
+ findBlocks(start, count uint64, fs *FileSystem) ([]uint64, error)
+ // blocks get all of the blocks for a file, in sequential order, essentially unravels the tree into a slice of extents
+ blocks(fs *FileSystem) (extents, error)
+ // toBytes convert this extentBlockFinder to bytes to be stored in a block or inode
+ toBytes() []byte
+ getDepth() uint16
+ getMax() uint16
+ getBlockSize() uint32
+ getFileBlock() uint32
+ getCount() uint32
+}
+
+var (
+ _ extentBlockFinder = &extentInternalNode{}
+ _ extentBlockFinder = &extentLeafNode{}
+)
+
+// extentNodeHeader represents the header of an extent node
+type extentNodeHeader struct {
+ depth uint16 // the depth of tree below here; for leaf nodes, will be 0
+ entries uint16 // number of entries
+ max uint16 // maximum number of entries allowed at this level
+ blockSize uint32 // block size for this tree
+}
+
+func (e extentNodeHeader) toBytes() []byte {
+ b := make([]byte, 12)
+ binary.LittleEndian.PutUint16(b[0:2], extentHeaderSignature)
+ binary.LittleEndian.PutUint16(b[2:4], e.entries)
+ binary.LittleEndian.PutUint16(b[4:6], e.max)
+ binary.LittleEndian.PutUint16(b[6:8], e.depth)
+ return b
+}
+
+// extentChildPtr represents a child pointer in an internal node of extents
+// the child could be a leaf node or another internal node. We only would know
+// after parsing diskBlock to see its header.
+type extentChildPtr struct {
+ fileBlock uint32 // extents or children of this cover from file block fileBlock onwards
+ count uint32 // how many blocks are covered by this extent
+ diskBlock uint64 // block number where the children live
+}
+
+// extentLeafNode represents a leaf node of extents
+// it includes the information in the header and the extents (leaf nodes).
+// By definition, this is a leaf node, so depth=0
+type extentLeafNode struct {
+ extentNodeHeader
+ extents extents // the actual extents
+}
+
+// findBlocks find the actual blocks for a range in the file. leaf nodes already have all of the data inside,
+// so the FileSystem reference is unused.
+func (e extentLeafNode) findBlocks(start, count uint64, _ *FileSystem) ([]uint64, error) {
+ var ret []uint64
+
+ // before anything, figure out which file block is the start and end of the desired range
+ end := start + count - 1
+
+ // we are at the bottom of the tree, so we can just return the extents
+ for _, ext := range e.extents {
+ extentStart := uint64(ext.fileBlock)
+ extentEnd := uint64(ext.fileBlock + uint32(ext.count) - 1)
+
+ // Check if the extent does not overlap with the given block range
+ if extentEnd < start || extentStart > end {
+ continue
+ }
+
+ // Calculate the overlapping range
+ overlapStart := max(start, extentStart)
+ overlapEnd := min(end, extentEnd)
+
+ // Calculate the starting disk block for the overlap
+ diskBlockStart := ext.startingBlock + (overlapStart - extentStart)
+
+ // Append the corresponding disk blocks to the result
+ for i := uint64(0); i <= overlapEnd-overlapStart; i++ {
+ ret = append(ret, diskBlockStart+i)
+ }
+ }
+ return ret, nil
+}
+
+// blocks find the actual blocks for a range in the file. leaf nodes already have all of the data inside,
+// so the FileSystem reference is unused.
+func (e extentLeafNode) blocks(_ *FileSystem) (extents, error) {
+ return e.extents, nil
+}
+
+// toBytes convert the node to raw bytes to be stored, either in a block or in an inode
+func (e extentLeafNode) toBytes() []byte {
+ // 12 byte header, 12 bytes per child
+ b := make([]byte, 12+12*e.max)
+ copy(b[0:12], e.extentNodeHeader.toBytes())
+
+ for i, ext := range e.extents {
+ base := (i + 1) * 12
+ binary.LittleEndian.PutUint32(b[base:base+4], ext.fileBlock)
+ binary.LittleEndian.PutUint16(b[base+4:base+6], ext.count)
+ diskBlock := make([]byte, 8)
+ binary.LittleEndian.PutUint64(diskBlock, ext.startingBlock)
+ copy(b[base+6:base+8], diskBlock[4:6])
+ copy(b[base+8:base+12], diskBlock[0:4])
+ }
+ return b
+}
+
+func (e *extentLeafNode) getDepth() uint16 {
+ return e.depth
+}
+
+func (e *extentLeafNode) getMax() uint16 {
+ return e.max
+}
+
+func (e *extentLeafNode) getBlockSize() uint32 {
+ return e.blockSize
+}
+
+func (e *extentLeafNode) getFileBlock() uint32 {
+ return e.extents[0].fileBlock
+}
+
+func (e *extentLeafNode) getCount() uint32 {
+ return uint32(len(e.extents))
+}
+
+// extentInternalNode represents an internal node in a tree of extents
+// it includes the information in the header and the internal nodes
+// By definition, this is an internal node, so depth>0
+type extentInternalNode struct {
+ extentNodeHeader
+ children []*extentChildPtr // the children
+}
+
+// findBlocks find the actual blocks for a range in the file. internal nodes need to read the filesystem to
+// get the child nodes, so the FileSystem reference is used.
+func (e extentInternalNode) findBlocks(start, count uint64, fs *FileSystem) ([]uint64, error) {
+ var ret []uint64
+
+ // before anything, figure out which file block is the start and end of the desired range
+ end := start + count - 1
+
+ // we are not depth 0, so we have children extent tree nodes. Figure out which ranges we are in.
+ // the hard part here is that each child has start but not end or count. You only know it from reading the next one.
+ // So if the one we are looking at is in the range, we get it from the children, and keep going
+ for _, child := range e.children {
+ extentStart := uint64(child.fileBlock)
+ extentEnd := uint64(child.fileBlock + child.count - 1)
+
+ // Check if the extent does not overlap with the given block range
+ if extentEnd < start || extentStart > end {
+ continue
+ }
+
+ // read the extent block from the disk
+ b, err := fs.readBlock(child.diskBlock)
+ if err != nil {
+ return nil, err
+ }
+ ebf, err := parseExtents(b, e.blockSize, uint32(extentStart), uint32(extentEnd))
+ if err != nil {
+ return nil, err
+ }
+ blocks, err := ebf.findBlocks(extentStart, uint64(child.count), fs)
+ if err != nil {
+ return nil, err
+ }
+ if len(blocks) > 0 {
+ ret = append(ret, blocks...)
+ }
+ }
+ return ret, nil
+}
+
+// blocks find the actual blocks for a range in the file. leaf nodes already have all of the data inside,
+// so the FileSystem reference is unused.
+func (e extentInternalNode) blocks(fs *FileSystem) (extents, error) {
+ var ret extents
+
+ // we are not depth 0, so we have children extent tree nodes. Walk the tree below us and find all of the blocks
+ for _, child := range e.children {
+ // read the extent block from the disk
+ b, err := fs.readBlock(child.diskBlock)
+ if err != nil {
+ return nil, err
+ }
+ ebf, err := parseExtents(b, e.blockSize, child.fileBlock, child.fileBlock+child.count-1)
+ if err != nil {
+ return nil, err
+ }
+ blocks, err := ebf.blocks(fs)
+ if err != nil {
+ return nil, err
+ }
+ if len(blocks) > 0 {
+ ret = append(ret, blocks...)
+ }
+ }
+ return ret, nil
+}
+
+// toBytes convert the node to raw bytes to be stored, either in a block or in an inode
+func (e extentInternalNode) toBytes() []byte {
+ // 12 byte header, 12 bytes per child
+ b := make([]byte, 12+12*e.max)
+ copy(b[0:12], e.extentNodeHeader.toBytes())
+
+ for i, child := range e.children {
+ base := (i + 1) * 12
+ binary.LittleEndian.PutUint32(b[base:base+4], child.fileBlock)
+ diskBlock := make([]byte, 8)
+ binary.LittleEndian.PutUint64(diskBlock, child.diskBlock)
+ copy(b[base+4:base+8], diskBlock[0:4])
+ copy(b[base+8:base+10], diskBlock[4:6])
+ }
+ return b
+}
+func (e *extentInternalNode) getDepth() uint16 {
+ return e.depth
+}
+
+func (e *extentInternalNode) getMax() uint16 {
+ return e.max
+}
+
+func (e *extentInternalNode) getBlockSize() uint32 {
+ return e.blockSize
+}
+
+func (e *extentInternalNode) getFileBlock() uint32 {
+ return e.children[0].fileBlock
+}
+
+func (e *extentInternalNode) getCount() uint32 {
+ return uint32(len(e.children))
+}
+
+// parseExtents takes bytes, parses them to find the actual extents or the next blocks down.
+// It does not recurse down the tree, as we do not want to do that until we actually are ready
+// to read those blocks. This is similar to how ext4 driver in the Linux kernel does it.
+// totalBlocks is the total number of blocks covered in this given section of the extent tree.
+func parseExtents(b []byte, blocksize, start, count uint32) (extentBlockFinder, error) {
+ var ret extentBlockFinder
+ // must have at least header and one entry
+ minLength := extentTreeHeaderLength + extentTreeEntryLength
+ if len(b) < minLength {
+ return nil, fmt.Errorf("cannot parse extent tree from %d bytes, minimum required %d", len(b), minLength)
+ }
+ // check magic signature
+ if binary.LittleEndian.Uint16(b[0:2]) != extentHeaderSignature {
+ return nil, fmt.Errorf("invalid extent tree signature: %x", b[0x0:0x2])
+ }
+ e := extentNodeHeader{
+ entries: binary.LittleEndian.Uint16(b[0x2:0x4]),
+ max: binary.LittleEndian.Uint16(b[0x4:0x6]),
+ depth: binary.LittleEndian.Uint16(b[0x6:0x8]),
+ blockSize: blocksize,
+ }
+ // b[0x8:0xc] is used for the generation by Lustre but not standard ext4, so we ignore
+
+ // we have parsed the header, now read either the leaf entries or the intermediate nodes
+ switch e.depth {
+ case 0:
+ leafNode := extentLeafNode{
+ extentNodeHeader: e,
+ }
+ // read the leaves
+ for i := 0; i < int(e.entries); i++ {
+ start := i*extentTreeEntryLength + extentTreeHeaderLength
+ diskBlock := make([]byte, 8)
+ copy(diskBlock[0:4], b[start+8:start+12])
+ copy(diskBlock[4:6], b[start+6:start+8])
+ leafNode.extents = append(leafNode.extents, extent{
+ fileBlock: binary.LittleEndian.Uint32(b[start : start+4]),
+ count: binary.LittleEndian.Uint16(b[start+4 : start+6]),
+ startingBlock: binary.LittleEndian.Uint64(diskBlock),
+ })
+ }
+ ret = &leafNode
+ default:
+ internalNode := extentInternalNode{
+ extentNodeHeader: e,
+ }
+ for i := 0; i < int(e.entries); i++ {
+ start := i*extentTreeEntryLength + extentTreeHeaderLength
+ diskBlock := make([]byte, 8)
+ copy(diskBlock[0:4], b[start+4:start+8])
+ copy(diskBlock[4:6], b[start+8:start+10])
+ ptr := &extentChildPtr{
+ diskBlock: binary.LittleEndian.Uint64(diskBlock),
+ fileBlock: binary.LittleEndian.Uint32(b[start : start+4]),
+ }
+ internalNode.children = append(internalNode.children, ptr)
+ if i > 0 {
+ internalNode.children[i-1].count = ptr.fileBlock - internalNode.children[i-1].fileBlock
+ }
+ }
+ if len(internalNode.children) > 0 {
+ internalNode.children[len(internalNode.children)-1].count = start + count - internalNode.children[len(internalNode.children)-1].fileBlock
+ }
+ ret = &internalNode
+ }
+
+ return ret, nil
+}
+
+// extendExtentTree extends extent tree with a slice of new extents
+// if the existing tree is nil, create a new one.
+// For example, if the input is an extent tree - like the kind found in an inode - and you want to add more extents to it,
+// you add the provided extents, and it expands the tree, including creating new internal nodes and writing them to disk, as needed.
+
+func extendExtentTree(existing extentBlockFinder, added *extents, fs *FileSystem, parent *extentInternalNode) (extentBlockFinder, error) {
+ // Check if existing is a leaf or internal node
+ switch node := existing.(type) {
+ case *extentLeafNode:
+ return extendLeafNode(node, added, fs, parent)
+ case *extentInternalNode:
+ return extendInternalNode(node, added, fs, parent)
+ case nil:
+ // brand new extent tree. The root is in the inode, which has a max of 4 extents.
+ return createRootExtentTree(added, fs)
+ default:
+ return nil, fmt.Errorf("unsupported extentBlockFinder type")
+ }
+}
+
+func createRootExtentTree(added *extents, fs *FileSystem) (extentBlockFinder, error) {
+ // the root always is in the inode, which has a maximum of 4 extents. If it fits within that, we can just create a leaf node.
+ if len(*added) <= 4 {
+ return &extentLeafNode{
+ extentNodeHeader: extentNodeHeader{
+ depth: 0,
+ entries: uint16(len(*added)),
+ max: 4,
+ blockSize: fs.superblock.blockSize,
+ },
+ extents: *added,
+ }, nil
+ }
+ // in theory, we never should be creating a root internal node. We always should be starting with an extent or two,
+ // and later expanding the file.
+ // It might be theoretically possible, though, so we will handle it in the future.
+ return nil, fmt.Errorf("cannot create root internal node")
+}
+
+func extendLeafNode(node *extentLeafNode, added *extents, fs *FileSystem, parent *extentInternalNode) (extentBlockFinder, error) {
+ // Check if the leaf node has enough space for the added extents
+ if len(node.extents)+len(*added) <= int(node.max) {
+ // Simply append the extents if there's enough space
+ node.extents = append(node.extents, *added...)
+ node.entries = uint16(len(node.extents))
+
+ // Write the updated node back to the disk
+ err := writeNodeToDisk(node, fs, parent)
+ if err != nil {
+ return nil, err
+ }
+
+ return node, nil
+ }
+
+ // If not enough space, split the node
+ newNodes, err := splitLeafNode(node, added, fs, parent)
+ if err != nil {
+ return nil, err
+ }
+
+ // Check if the original node was the root
+ if parent == nil {
+ // Create a new internal node to reference the split leaf nodes
+ var newNodesAsBlockFinder []extentBlockFinder
+ for _, n := range newNodes {
+ newNodesAsBlockFinder = append(newNodesAsBlockFinder, n)
+ }
+ newRoot := createInternalNode(newNodesAsBlockFinder, nil, fs)
+ return newRoot, nil
+ }
+
+ // If the original node was not the root, handle the parent internal node
+ parentNode, err := getParentNode(node, fs)
+ if err != nil {
+ return nil, err
+ }
+
+ return extendInternalNode(parentNode, added, fs, parent)
+}
+
+func splitLeafNode(node *extentLeafNode, added *extents, fs *FileSystem, parent *extentInternalNode) ([]*extentLeafNode, error) {
+ // Combine existing and new extents
+ allExtents := node.extents
+ allExtents = append(allExtents, *added...)
+ // Sort extents by fileBlock to maintain order
+ sort.Slice(allExtents, func(i, j int) bool {
+ return allExtents[i].fileBlock < allExtents[j].fileBlock
+ })
+
+ // Calculate the midpoint to split the extents
+ mid := len(allExtents) / 2
+
+ // Create the first new leaf node
+ firstLeaf := &extentLeafNode{
+ extentNodeHeader: extentNodeHeader{
+ depth: 0,
+ entries: uint16(mid),
+ max: node.max,
+ blockSize: node.blockSize,
+ },
+ extents: allExtents[:mid],
+ }
+
+ // Create the second new leaf node
+ secondLeaf := &extentLeafNode{
+ extentNodeHeader: extentNodeHeader{
+ depth: 0,
+ entries: uint16(len(allExtents) - mid),
+ max: node.max,
+ blockSize: node.blockSize,
+ },
+ extents: allExtents[mid:],
+ }
+
+ // Write new leaf nodes to the disk
+ err := writeNodeToDisk(firstLeaf, fs, parent)
+ if err != nil {
+ return nil, err
+ }
+ err = writeNodeToDisk(secondLeaf, fs, parent)
+ if err != nil {
+ return nil, err
+ }
+
+ return []*extentLeafNode{firstLeaf, secondLeaf}, nil
+}
+
+func createInternalNode(nodes []extentBlockFinder, parent *extentInternalNode, fs *FileSystem) *extentInternalNode {
+ internalNode := &extentInternalNode{
+ extentNodeHeader: extentNodeHeader{
+ depth: nodes[0].getDepth() + 1, // Depth is 1 more than the children
+ entries: uint16(len(nodes)),
+ max: nodes[0].getMax(), // Assuming uniform max for all nodes
+ blockSize: nodes[0].getBlockSize(),
+ },
+ children: make([]*extentChildPtr, len(nodes)),
+ }
+
+ for i, node := range nodes {
+ internalNode.children[i] = &extentChildPtr{
+ fileBlock: node.getFileBlock(),
+ count: node.getCount(),
+ diskBlock: getBlockNumberFromNode(node, parent),
+ }
+ }
+
+ // Write the new internal node to the disk
+ err := writeNodeToDisk(internalNode, fs, parent)
+ if err != nil {
+ return nil
+ }
+
+ return internalNode
+}
+
+func getBlockNumberFromNode(node extentBlockFinder, parent *extentInternalNode) uint64 {
+ for _, childPtr := range parent.children {
+ if childPtrMatchesNode(childPtr, node) {
+ return childPtr.diskBlock
+ }
+ }
+ return 0 // Return 0 or an appropriate error value if the block number is not found
+}
+
+// Helper function to match a child pointer to a node
+func childPtrMatchesNode(childPtr *extentChildPtr, node extentBlockFinder) bool {
+ switch n := node.(type) {
+ case *extentLeafNode:
+ return childPtr.fileBlock == n.extents[0].fileBlock
+ case *extentInternalNode:
+ // Logic to determine if the childPtr matches the internal node
+ // Placeholder: Implement based on your specific matching criteria
+ return true
+ default:
+ return false
+ }
+}
+
+func extendInternalNode(node *extentInternalNode, added *extents, fs *FileSystem, parent *extentInternalNode) (extentBlockFinder, error) {
+ // Find the appropriate child node to extend
+ childIndex := findChildNode(node, added)
+ childPtr := node.children[childIndex]
+
+ // Load the actual child node from the disk
+ childNode, err := loadChildNode(childPtr, fs)
+ if err != nil {
+ return nil, err
+ }
+
+ // Recursively extend the child node
+ updatedChild, err := extendExtentTree(childNode, added, fs, node)
+ if err != nil {
+ return nil, err
+ }
+
+ // Update the current internal node to reference the updated child
+ switch updatedChild := updatedChild.(type) {
+ case *extentLeafNode:
+ node.children[childIndex] = &extentChildPtr{
+ fileBlock: updatedChild.extents[0].fileBlock,
+ count: uint32(len(updatedChild.extents)),
+ diskBlock: getBlockNumberFromNode(updatedChild, node),
+ }
+ case *extentInternalNode:
+ node.children[childIndex] = &extentChildPtr{
+ fileBlock: updatedChild.children[0].fileBlock,
+ count: uint32(len(updatedChild.children)),
+ diskBlock: getBlockNumberFromNode(updatedChild, node),
+ }
+ default:
+ return nil, fmt.Errorf("unsupported updatedChild type")
+ }
+
+ // Check if the internal node is at capacity
+ if len(node.children) > int(node.max) {
+ // Split the internal node if it's at capacity
+ newInternalNodes, err := splitInternalNode(node, node.children[childIndex], fs, parent)
+ if err != nil {
+ return nil, err
+ }
+
+ // Check if the original node was the root
+ if parent == nil {
+ // Create a new internal node as the new root
+ var newNodesAsBlockFinder []extentBlockFinder
+ for _, n := range newInternalNodes {
+ newNodesAsBlockFinder = append(newNodesAsBlockFinder, n)
+ }
+ newRoot := createInternalNode(newNodesAsBlockFinder, nil, fs)
+ return newRoot, nil
+ }
+
+ // If the original node was not the root, handle the parent internal node
+ return extendInternalNode(parent, added, fs, parent)
+ }
+
+ // Write the updated node back to the disk
+ err = writeNodeToDisk(node, fs, parent)
+ if err != nil {
+ return nil, err
+ }
+
+ return node, nil
+}
+
+// Helper function to get the parent node of a given internal node
+//
+//nolint:revive // this parameter will be used eventually
+func getParentNode(node extentBlockFinder, fs *FileSystem) (*extentInternalNode, error) {
+ // Logic to find and return the parent node of the given node
+ // This is a placeholder and needs to be implemented based on your specific tree structure
+ return nil, fmt.Errorf("getParentNode not implemented")
+}
+
+func splitInternalNode(node *extentInternalNode, newChild *extentChildPtr, fs *FileSystem, parent *extentInternalNode) ([]*extentInternalNode, error) {
+ // Combine existing children with the new child
+ allChildren := node.children
+ allChildren = append(allChildren, newChild)
+ // Sort children by fileBlock to maintain order
+ sort.Slice(allChildren, func(i, j int) bool {
+ return allChildren[i].fileBlock < allChildren[j].fileBlock
+ })
+
+ // Calculate the midpoint to split the children
+ mid := len(allChildren) / 2
+
+ // Create the first new internal node
+ firstInternal := &extentInternalNode{
+ extentNodeHeader: extentNodeHeader{
+ depth: node.depth,
+ entries: uint16(mid),
+ max: node.max,
+ blockSize: node.blockSize,
+ },
+ children: allChildren[:mid],
+ }
+
+ // Create the second new internal node
+ secondInternal := &extentInternalNode{
+ extentNodeHeader: extentNodeHeader{
+ depth: node.depth,
+ entries: uint16(len(allChildren) - mid),
+ max: node.max,
+ blockSize: node.blockSize,
+ },
+ children: allChildren[mid:],
+ }
+
+ // Write new internal nodes to the disk
+ err := writeNodeToDisk(firstInternal, fs, parent)
+ if err != nil {
+ return nil, err
+ }
+ err = writeNodeToDisk(secondInternal, fs, parent)
+ if err != nil {
+ return nil, err
+ }
+
+ return []*extentInternalNode{firstInternal, secondInternal}, nil
+}
+
+func writeNodeToDisk(node extentBlockFinder, fs *FileSystem, parent *extentInternalNode) error {
+ var blockNumber uint64
+ if parent != nil {
+ blockNumber = getBlockNumberFromNode(node, parent)
+ } else {
+ blockNumber = getNewBlockNumber(fs)
+ }
+
+ if blockNumber == 0 {
+ return fmt.Errorf("block number not found for node")
+ }
+
+ data := node.toBytes()
+ _, err := fs.file.WriteAt(data, int64(blockNumber)*int64(fs.superblock.blockSize))
+ return err
+}
+
+// Helper function to get a new block number when there is no parent
+//
+//nolint:revive // this parameter will be used eventually
+func getNewBlockNumber(fs *FileSystem) uint64 {
+ // Logic to allocate a new block
+ // This is a placeholder and needs to be implemented based on your specific filesystem structure
+ return 0 // Placeholder: Replace with actual implementation
+}
+
+// Helper function to find the block number of a child node from its parent
+func findChildBlockNumber(parent *extentInternalNode, child extentBlockFinder) uint64 {
+ for _, childPtr := range parent.children {
+ if childPtrMatchesNode(childPtr, child) {
+ return childPtr.diskBlock
+ }
+ }
+ return 0
+}
+
+func findChildNode(node *extentInternalNode, added *extents) int {
+ // Assuming added extents are sorted, find the correct child node to extend
+ addedSlice := *added
+ for i, child := range node.children {
+ if addedSlice[0].fileBlock < child.fileBlock {
+ return i - 1
+ }
+ }
+ return len(node.children) - 1
+}
+
+// loadChildNode load up a child node from the disk
+//
+//nolint:unparam // this parameter will be used eventually
+func loadChildNode(childPtr *extentChildPtr, fs *FileSystem) (extentBlockFinder, error) {
+ data := make([]byte, fs.superblock.blockSize)
+ _, err := fs.file.ReadAt(data, int64(childPtr.diskBlock)*int64(fs.superblock.blockSize))
+ if err != nil {
+ return nil, err
+ }
+
+ // Logic to decode data into an extentBlockFinder (extentLeafNode or extentInternalNode)
+ // This is a placeholder and needs to be implemented based on your specific encoding scheme
+ var node extentBlockFinder
+ // Implement the logic to decode the node from the data
+ return node, nil
+}
diff --git a/vendor/github.com/diskfs/go-diskfs/filesystem/ext4/features.go b/vendor/github.com/diskfs/go-diskfs/filesystem/ext4/features.go
new file mode 100644
index 00000000000..9a8baa9ebbe
--- /dev/null
+++ b/vendor/github.com/diskfs/go-diskfs/filesystem/ext4/features.go
@@ -0,0 +1,451 @@
+package ext4
+
+// features are defined
+// beginning at https://git.kernel.org/pub/scm/fs/ext2/e2fsprogs.git/tree/lib/ext2fs/ext2_fs.h#n820
+
+// featureFlags is a structure holding which flags are set - compatible, incompatible and read-only compatible
+type featureFlags struct {
+ // compatible, incompatible, and compatibleReadOnly feature flags
+ directoryPreAllocate bool
+ imagicInodes bool
+ hasJournal bool
+ extendedAttributes bool
+ reservedGDTBlocksForExpansion bool
+ directoryIndices bool
+ lazyBlockGroup bool
+ excludeInode bool
+ excludeBitmap bool
+ sparseSuperBlockV2 bool
+ fastCommit bool
+ stableInodes bool
+ orphanFile bool
+ compression bool
+ directoryEntriesRecordFileType bool
+ recoveryNeeded bool
+ separateJournalDevice bool
+ metaBlockGroups bool
+ extents bool
+ fs64Bit bool
+ multipleMountProtection bool
+ flexBlockGroups bool
+ extendedAttributeInodes bool
+ dataInDirectoryEntries bool
+ metadataChecksumSeedInSuperblock bool
+ largeDirectory bool
+ dataInInode bool
+ encryptInodes bool
+ sparseSuperblock bool
+ largeFile bool
+ btreeDirectory bool
+ hugeFile bool
+ gdtChecksum bool
+ largeSubdirectoryCount bool
+ largeInodes bool
+ snapshot bool
+ quota bool
+ bigalloc bool
+ metadataChecksums bool
+ replicas bool
+ readOnly bool
+ projectQuotas bool
+}
+
+func parseFeatureFlags(compatFlags, incompatFlags, roCompatFlags uint32) featureFlags {
+ f := featureFlags{
+ directoryPreAllocate: compatFeatureDirectoryPreAllocate.included(compatFlags),
+ imagicInodes: compatFeatureImagicInodes.included(compatFlags),
+ hasJournal: compatFeatureHasJournal.included(compatFlags),
+ extendedAttributes: compatFeatureExtendedAttributes.included(compatFlags),
+ reservedGDTBlocksForExpansion: compatFeatureReservedGDTBlocksForExpansion.included(compatFlags),
+ directoryIndices: compatFeatureDirectoryIndices.included(compatFlags),
+ lazyBlockGroup: compatFeatureLazyBlockGroup.included(compatFlags),
+ excludeInode: compatFeatureExcludeInode.included(compatFlags),
+ excludeBitmap: compatFeatureExcludeBitmap.included(compatFlags),
+ sparseSuperBlockV2: compatFeatureSparseSuperBlockV2.included(compatFlags),
+ fastCommit: compatFeatureFastCommit.included(compatFlags),
+ stableInodes: compatFeatureStableInodes.included(compatFlags),
+ orphanFile: compatFeatureOrphanFile.included(compatFlags),
+ compression: incompatFeatureCompression.included(incompatFlags),
+ directoryEntriesRecordFileType: incompatFeatureDirectoryEntriesRecordFileType.included(incompatFlags),
+ recoveryNeeded: incompatFeatureRecoveryNeeded.included(incompatFlags),
+ separateJournalDevice: incompatFeatureSeparateJournalDevice.included(incompatFlags),
+ metaBlockGroups: incompatFeatureMetaBlockGroups.included(incompatFlags),
+ extents: incompatFeatureExtents.included(incompatFlags),
+ fs64Bit: incompatFeature64Bit.included(incompatFlags),
+ multipleMountProtection: incompatFeatureMultipleMountProtection.included(incompatFlags),
+ flexBlockGroups: incompatFeatureFlexBlockGroups.included(incompatFlags),
+ extendedAttributeInodes: incompatFeatureExtendedAttributeInodes.included(incompatFlags),
+ dataInDirectoryEntries: incompatFeatureDataInDirectoryEntries.included(incompatFlags),
+ metadataChecksumSeedInSuperblock: incompatFeatureMetadataChecksumSeedInSuperblock.included(incompatFlags),
+ largeDirectory: incompatFeatureLargeDirectory.included(incompatFlags),
+ dataInInode: incompatFeatureDataInInode.included(incompatFlags),
+ encryptInodes: incompatFeatureEncryptInodes.included(incompatFlags),
+ sparseSuperblock: roCompatFeatureSparseSuperblock.included(roCompatFlags),
+ largeFile: roCompatFeatureLargeFile.included(roCompatFlags),
+ btreeDirectory: roCompatFeatureBtreeDirectory.included(roCompatFlags),
+ hugeFile: roCompatFeatureHugeFile.included(roCompatFlags),
+ gdtChecksum: roCompatFeatureGDTChecksum.included(roCompatFlags),
+ largeSubdirectoryCount: roCompatFeatureLargeSubdirectoryCount.included(roCompatFlags),
+ largeInodes: roCompatFeatureLargeInodes.included(roCompatFlags),
+ snapshot: roCompatFeatureSnapshot.included(roCompatFlags),
+ quota: roCompatFeatureQuota.included(roCompatFlags),
+ bigalloc: roCompatFeatureBigalloc.included(roCompatFlags),
+ metadataChecksums: roCompatFeatureMetadataChecksums.included(roCompatFlags),
+ replicas: roCompatFeatureReplicas.included(roCompatFlags),
+ readOnly: roCompatFeatureReadOnly.included(roCompatFlags),
+ projectQuotas: roCompatFeatureProjectQuotas.included(roCompatFlags),
+ }
+
+ return f
+}
+
+//nolint:gocyclo // we know this has cyclomatic complexity, but not worth breaking apart
+func (f *featureFlags) toInts() (compatFlags, incompatFlags, roCompatFlags uint32) {
+ // compatible flags
+ if f.directoryPreAllocate {
+ compatFlags |= uint32(compatFeatureDirectoryPreAllocate)
+ }
+ if f.imagicInodes {
+ compatFlags |= uint32(compatFeatureImagicInodes)
+ }
+ if f.hasJournal {
+ compatFlags |= uint32(compatFeatureHasJournal)
+ }
+ if f.extendedAttributes {
+ compatFlags |= uint32(compatFeatureExtendedAttributes)
+ }
+ if f.reservedGDTBlocksForExpansion {
+ compatFlags |= uint32(compatFeatureReservedGDTBlocksForExpansion)
+ }
+ if f.directoryIndices {
+ compatFlags |= uint32(compatFeatureDirectoryIndices)
+ }
+ if f.lazyBlockGroup {
+ compatFlags |= uint32(compatFeatureLazyBlockGroup)
+ }
+ if f.excludeInode {
+ compatFlags |= uint32(compatFeatureExcludeInode)
+ }
+ if f.excludeBitmap {
+ compatFlags |= uint32(compatFeatureExcludeBitmap)
+ }
+ if f.sparseSuperBlockV2 {
+ compatFlags |= uint32(compatFeatureSparseSuperBlockV2)
+ }
+ if f.fastCommit {
+ compatFlags |= uint32(compatFeatureFastCommit)
+ }
+ if f.stableInodes {
+ compatFlags |= uint32(compatFeatureStableInodes)
+ }
+ if f.orphanFile {
+ compatFlags |= uint32(compatFeatureOrphanFile)
+ }
+
+ // incompatible flags
+ if f.compression {
+ incompatFlags |= uint32(incompatFeatureCompression)
+ }
+ if f.directoryEntriesRecordFileType {
+ incompatFlags |= uint32(incompatFeatureDirectoryEntriesRecordFileType)
+ }
+ if f.recoveryNeeded {
+ incompatFlags |= uint32(incompatFeatureRecoveryNeeded)
+ }
+ if f.separateJournalDevice {
+ incompatFlags |= uint32(incompatFeatureSeparateJournalDevice)
+ }
+ if f.metaBlockGroups {
+ incompatFlags |= uint32(incompatFeatureMetaBlockGroups)
+ }
+ if f.extents {
+ incompatFlags |= uint32(incompatFeatureExtents)
+ }
+ if f.fs64Bit {
+ incompatFlags |= uint32(incompatFeature64Bit)
+ }
+ if f.multipleMountProtection {
+ incompatFlags |= uint32(incompatFeatureMultipleMountProtection)
+ }
+ if f.flexBlockGroups {
+ incompatFlags |= uint32(incompatFeatureFlexBlockGroups)
+ }
+ if f.extendedAttributeInodes {
+ incompatFlags |= uint32(incompatFeatureExtendedAttributeInodes)
+ }
+ if f.dataInDirectoryEntries {
+ incompatFlags |= uint32(incompatFeatureDataInDirectoryEntries)
+ }
+ if f.metadataChecksumSeedInSuperblock {
+ incompatFlags |= uint32(incompatFeatureMetadataChecksumSeedInSuperblock)
+ }
+ if f.largeDirectory {
+ incompatFlags |= uint32(incompatFeatureLargeDirectory)
+ }
+ if f.dataInInode {
+ incompatFlags |= uint32(incompatFeatureDataInInode)
+ }
+ if f.encryptInodes {
+ incompatFlags |= uint32(incompatFeatureEncryptInodes)
+ }
+
+ // read only compatible flags
+ if f.sparseSuperblock {
+ roCompatFlags |= uint32(roCompatFeatureSparseSuperblock)
+ }
+ if f.largeFile {
+ roCompatFlags |= uint32(roCompatFeatureLargeFile)
+ }
+ if f.btreeDirectory {
+ roCompatFlags |= uint32(roCompatFeatureBtreeDirectory)
+ }
+ if f.hugeFile {
+ roCompatFlags |= uint32(roCompatFeatureHugeFile)
+ }
+ if f.gdtChecksum {
+ roCompatFlags |= uint32(roCompatFeatureGDTChecksum)
+ }
+ if f.largeSubdirectoryCount {
+ roCompatFlags |= uint32(roCompatFeatureLargeSubdirectoryCount)
+ }
+ if f.largeInodes {
+ roCompatFlags |= uint32(roCompatFeatureLargeInodes)
+ }
+ if f.snapshot {
+ roCompatFlags |= uint32(roCompatFeatureSnapshot)
+ }
+ if f.quota {
+ roCompatFlags |= uint32(roCompatFeatureQuota)
+ }
+ if f.bigalloc {
+ roCompatFlags |= uint32(roCompatFeatureBigalloc)
+ }
+ if f.metadataChecksums {
+ roCompatFlags |= uint32(roCompatFeatureMetadataChecksums)
+ }
+ if f.replicas {
+ roCompatFlags |= uint32(roCompatFeatureReplicas)
+ }
+ if f.readOnly {
+ roCompatFlags |= uint32(roCompatFeatureReadOnly)
+ }
+ if f.projectQuotas {
+ roCompatFlags |= uint32(roCompatFeatureProjectQuotas)
+ }
+
+ return compatFlags, incompatFlags, roCompatFlags
+}
+
+// default features
+/*
+ base_features = sparse_super,large_file,filetype,resize_inode,dir_index,ext_attr
+ features = has_journal,extent,huge_file,flex_bg,uninit_bg,64bit,dir_nlink,extra_isize
+*/
+var defaultFeatureFlags = featureFlags{
+ largeFile: true,
+ hugeFile: true,
+ sparseSuperblock: true,
+ flexBlockGroups: true,
+ hasJournal: true,
+ extents: true,
+ fs64Bit: true,
+ extendedAttributes: true,
+}
+
+type FeatureOpt func(*featureFlags)
+
+func WithFeatureDirectoryPreAllocate(enable bool) FeatureOpt {
+ return func(o *featureFlags) {
+ o.directoryPreAllocate = enable
+ }
+}
+func WithFeatureImagicInodes(enable bool) FeatureOpt {
+ return func(o *featureFlags) {
+ o.imagicInodes = enable
+ }
+}
+func WithFeatureHasJournal(enable bool) FeatureOpt {
+ return func(o *featureFlags) {
+ o.hasJournal = enable
+ }
+}
+func WithFeatureExtendedAttributes(enable bool) FeatureOpt {
+ return func(o *featureFlags) {
+ o.extendedAttributes = enable
+ }
+}
+func WithFeatureReservedGDTBlocksForExpansion(enable bool) FeatureOpt {
+ return func(o *featureFlags) {
+ o.reservedGDTBlocksForExpansion = enable
+ }
+}
+func WithFeatureDirectoryIndices(enable bool) FeatureOpt {
+ return func(o *featureFlags) {
+ o.directoryIndices = enable
+ }
+}
+func WithFeatureLazyBlockGroup(enable bool) FeatureOpt {
+ return func(o *featureFlags) {
+ o.lazyBlockGroup = enable
+ }
+}
+func WithFeatureExcludeInode(enable bool) FeatureOpt {
+ return func(o *featureFlags) {
+ o.excludeInode = enable
+ }
+}
+func WithFeatureExcludeBitmap(enable bool) FeatureOpt {
+ return func(o *featureFlags) {
+ o.excludeBitmap = enable
+ }
+}
+func WithFeatureSparseSuperBlockV2(enable bool) FeatureOpt {
+ return func(o *featureFlags) {
+ o.sparseSuperBlockV2 = enable
+ }
+}
+func WithFeatureCompression(enable bool) FeatureOpt {
+ return func(o *featureFlags) {
+ o.compression = enable
+ }
+}
+func WithFeatureDirectoryEntriesRecordFileType(enable bool) FeatureOpt {
+ return func(o *featureFlags) {
+ o.directoryEntriesRecordFileType = enable
+ }
+}
+func WithFeatureRecoveryNeeded(enable bool) FeatureOpt {
+ return func(o *featureFlags) {
+ o.recoveryNeeded = enable
+ }
+}
+func WithFeatureSeparateJournalDevice(enable bool) FeatureOpt {
+ return func(o *featureFlags) {
+ o.separateJournalDevice = enable
+ }
+}
+func WithFeatureMetaBlockGroups(enable bool) FeatureOpt {
+ return func(o *featureFlags) {
+ o.metaBlockGroups = enable
+ }
+}
+func WithFeatureExtents(enable bool) FeatureOpt {
+ return func(o *featureFlags) {
+ o.extents = enable
+ }
+}
+func WithFeatureFS64Bit(enable bool) FeatureOpt {
+ return func(o *featureFlags) {
+ o.fs64Bit = enable
+ }
+}
+func WithFeatureMultipleMountProtection(enable bool) FeatureOpt {
+ return func(o *featureFlags) {
+ o.multipleMountProtection = enable
+ }
+}
+func WithFeatureFlexBlockGroups(enable bool) FeatureOpt {
+ return func(o *featureFlags) {
+ o.flexBlockGroups = enable
+ }
+}
+func WithFeatureExtendedAttributeInodes(enable bool) FeatureOpt {
+ return func(o *featureFlags) {
+ o.extendedAttributeInodes = enable
+ }
+}
+func WithFeatureDataInDirectoryEntries(enable bool) FeatureOpt {
+ return func(o *featureFlags) {
+ o.dataInDirectoryEntries = enable
+ }
+}
+func WithFeatureMetadataChecksumSeedInSuperblock(enable bool) FeatureOpt {
+ return func(o *featureFlags) {
+ o.metadataChecksumSeedInSuperblock = enable
+ }
+}
+func WithFeatureLargeDirectory(enable bool) FeatureOpt {
+ return func(o *featureFlags) {
+ o.largeDirectory = enable
+ }
+}
+func WithFeatureDataInInode(enable bool) FeatureOpt {
+ return func(o *featureFlags) {
+ o.dataInInode = enable
+ }
+}
+func WithFeatureEncryptInodes(enable bool) FeatureOpt {
+ return func(o *featureFlags) {
+ o.encryptInodes = enable
+ }
+}
+func WithFeatureSparseSuperblock(enable bool) FeatureOpt {
+ return func(o *featureFlags) {
+ o.sparseSuperblock = enable
+ }
+}
+func WithFeatureLargeFile(enable bool) FeatureOpt {
+ return func(o *featureFlags) {
+ o.largeFile = enable
+ }
+}
+func WithFeatureBTreeDirectory(enable bool) FeatureOpt {
+ return func(o *featureFlags) {
+ o.btreeDirectory = enable
+ }
+}
+func WithFeatureHugeFile(enable bool) FeatureOpt {
+ return func(o *featureFlags) {
+ o.hugeFile = enable
+ }
+}
+func WithFeatureGDTChecksum(enable bool) FeatureOpt {
+ return func(o *featureFlags) {
+ o.gdtChecksum = enable
+ }
+}
+func WithFeatureLargeSubdirectoryCount(enable bool) FeatureOpt {
+ return func(o *featureFlags) {
+ o.largeSubdirectoryCount = enable
+ }
+}
+func WithFeatureLargeInodes(enable bool) FeatureOpt {
+ return func(o *featureFlags) {
+ o.largeInodes = enable
+ }
+}
+func WithFeatureSnapshot(enable bool) FeatureOpt {
+ return func(o *featureFlags) {
+ o.snapshot = enable
+ }
+}
+func WithFeatureQuota(enable bool) FeatureOpt {
+ return func(o *featureFlags) {
+ o.quota = enable
+ }
+}
+func WithFeatureBigalloc(enable bool) FeatureOpt {
+ return func(o *featureFlags) {
+ o.bigalloc = enable
+ }
+}
+func WithFeatureMetadataChecksums(enable bool) FeatureOpt {
+ return func(o *featureFlags) {
+ o.metadataChecksums = enable
+ }
+}
+func WithFeatureReplicas(enable bool) FeatureOpt {
+ return func(o *featureFlags) {
+ o.replicas = enable
+ }
+}
+func WithFeatureReadOnly(enable bool) FeatureOpt {
+ return func(o *featureFlags) {
+ o.readOnly = enable
+ }
+}
+func WithFeatureProjectQuotas(enable bool) FeatureOpt {
+ return func(o *featureFlags) {
+ o.projectQuotas = enable
+ }
+}
diff --git a/vendor/github.com/diskfs/go-diskfs/filesystem/ext4/file.go b/vendor/github.com/diskfs/go-diskfs/filesystem/ext4/file.go
new file mode 100644
index 00000000000..4dc653956c6
--- /dev/null
+++ b/vendor/github.com/diskfs/go-diskfs/filesystem/ext4/file.go
@@ -0,0 +1,208 @@
+package ext4
+
+import (
+ "fmt"
+ "io"
+)
+
+// File represents a single file in an ext4 filesystem
+type File struct {
+ *directoryEntry
+ *inode
+ isReadWrite bool
+ isAppend bool
+ offset int64
+ filesystem *FileSystem
+ extents extents
+}
+
+// Read reads up to len(b) bytes from the File.
+// It returns the number of bytes read and any error encountered.
+// At end of file, Read returns 0, io.EOF
+// reads from the last known offset in the file from last read or write
+// use Seek() to set at a particular point
+func (fl *File) Read(b []byte) (int, error) {
+ var (
+ fileSize = int64(fl.size)
+ blocksize = uint64(fl.filesystem.superblock.blockSize)
+ )
+ if fl.offset >= fileSize {
+ return 0, io.EOF
+ }
+
+ // Calculate the number of bytes to read
+ bytesToRead := int64(len(b))
+ if fl.offset+bytesToRead > fileSize {
+ bytesToRead = fileSize - fl.offset
+ }
+
+ // Create a buffer to hold the bytes to be read
+ readBytes := int64(0)
+ b = b[:bytesToRead]
+
+ // the offset given for reading is relative to the file, so we need to calculate
+ // where these are in the extents relative to the file
+ readStartBlock := uint64(fl.offset) / blocksize
+ for _, e := range fl.extents {
+ // if the last block of the extent is before the first block we want to read, skip it
+ if uint64(e.fileBlock)+uint64(e.count) < readStartBlock {
+ continue
+ }
+ // extentSize is the number of bytes on the disk for the extent
+ extentSize := int64(e.count) * int64(blocksize)
+ // where do we start and end in the extent?
+ startPositionInExtent := fl.offset - int64(e.fileBlock)*int64(blocksize)
+ leftInExtent := extentSize - startPositionInExtent
+ // how many bytes are left to read
+ toReadInOffset := bytesToRead - readBytes
+ if toReadInOffset > leftInExtent {
+ toReadInOffset = leftInExtent
+ }
+ // read those bytes
+ startPosOnDisk := e.startingBlock*blocksize + uint64(startPositionInExtent)
+ b2 := make([]byte, toReadInOffset)
+ read, err := fl.filesystem.file.ReadAt(b2, int64(startPosOnDisk))
+ if err != nil {
+ return int(readBytes), fmt.Errorf("failed to read bytes: %v", err)
+ }
+ copy(b[readBytes:], b2[:read])
+ readBytes += int64(read)
+ fl.offset += int64(read)
+
+ if readBytes >= bytesToRead {
+ break
+ }
+ }
+ var err error
+ if fl.offset >= fileSize {
+ err = io.EOF
+ }
+
+ return int(readBytes), err
+}
+
+// Write writes len(b) bytes to the File.
+// It returns the number of bytes written and an error, if any.
+// returns a non-nil error when n != len(b)
+// writes to the last known offset in the file from last read or write
+// use Seek() to set at a particular point
+func (fl *File) Write(b []byte) (int, error) {
+ var (
+ fileSize = int64(fl.size)
+ originalFileSize = int64(fl.size)
+ blockCount = fl.blocks
+ originalBlockCount = fl.blocks
+ blocksize = uint64(fl.filesystem.superblock.blockSize)
+ )
+ if !fl.isReadWrite {
+ return 0, fmt.Errorf("file is not open for writing")
+ }
+
+ // if adding these bytes goes past the filesize, update the inode filesize to the new size and write the inode
+ // if adding these bytes goes past the total number of blocks, add more blocks, update the inode block count and write the inode
+ // if the offset is greater than the filesize, update the inode filesize to the offset
+ if fl.offset >= fileSize {
+ fl.size = uint64(fl.offset)
+ }
+
+ // Calculate the number of bytes to write
+ bytesToWrite := int64(len(b))
+
+ offsetAfterWrite := fl.offset + bytesToWrite
+ if offsetAfterWrite > int64(fl.size) {
+ fl.size = uint64(fl.offset + bytesToWrite)
+ }
+
+ // calculate the number of blocks in the file post-write
+ newBlockCount := fl.size / blocksize
+ if fl.size%blocksize > 0 {
+ newBlockCount++
+ }
+ blocksNeeded := newBlockCount - blockCount
+ bytesNeeded := blocksNeeded * blocksize
+ if newBlockCount > blockCount {
+ newExtents, err := fl.filesystem.allocateExtents(bytesNeeded, &fl.extents)
+ if err != nil {
+ return 0, fmt.Errorf("could not allocate disk space for file %w", err)
+ }
+ extentTreeParsed, err := extendExtentTree(fl.inode.extents, newExtents, fl.filesystem, nil)
+ if err != nil {
+ return 0, fmt.Errorf("could not convert extents into tree: %w", err)
+ }
+ fl.inode.extents = extentTreeParsed
+ fl.blocks = newBlockCount
+ }
+
+ if originalFileSize != int64(fl.size) || originalBlockCount != fl.blocks {
+ err := fl.filesystem.writeInode(fl.inode)
+ if err != nil {
+ return 0, fmt.Errorf("could not write inode: %w", err)
+ }
+ }
+
+ writtenBytes := int64(0)
+
+ // the offset given for reading is relative to the file, so we need to calculate
+ // where these are in the extents relative to the file
+ writeStartBlock := uint64(fl.offset) / blocksize
+ for _, e := range fl.extents {
+ // if the last block of the extent is before the first block we want to write, skip it
+ if uint64(e.fileBlock)+uint64(e.count) < writeStartBlock {
+ continue
+ }
+ // extentSize is the number of bytes on the disk for the extent
+ extentSize := int64(e.count) * int64(blocksize)
+ // where do we start and end in the extent?
+ startPositionInExtent := fl.offset - int64(e.fileBlock)*int64(blocksize)
+ leftInExtent := extentSize - startPositionInExtent
+ // how many bytes are left in the extent?
+ toWriteInOffset := bytesToWrite - writtenBytes
+ if toWriteInOffset > leftInExtent {
+ toWriteInOffset = leftInExtent
+ }
+ // read those bytes
+ startPosOnDisk := e.startingBlock*blocksize + uint64(startPositionInExtent)
+ b2 := make([]byte, toWriteInOffset)
+ copy(b2, b[writtenBytes:])
+ written, err := fl.filesystem.file.WriteAt(b2, int64(startPosOnDisk))
+ if err != nil {
+ return int(writtenBytes), fmt.Errorf("failed to read bytes: %v", err)
+ }
+ writtenBytes += int64(written)
+ fl.offset += int64(written)
+
+ if written >= len(b) {
+ break
+ }
+ }
+ var err error
+ if fl.offset >= fileSize {
+ err = io.EOF
+ }
+
+ return int(writtenBytes), err
+}
+
+// Seek set the offset to a particular point in the file
+func (fl *File) Seek(offset int64, whence int) (int64, error) {
+ newOffset := int64(0)
+ switch whence {
+ case io.SeekStart:
+ newOffset = offset
+ case io.SeekEnd:
+ newOffset = int64(fl.size) + offset
+ case io.SeekCurrent:
+ newOffset = fl.offset + offset
+ }
+ if newOffset < 0 {
+ return fl.offset, fmt.Errorf("cannot set offset %d before start of file", offset)
+ }
+ fl.offset = newOffset
+ return fl.offset, nil
+}
+
+// Close close a file that is being read
+func (fl *File) Close() error {
+ *fl = File{}
+ return nil
+}
diff --git a/vendor/github.com/diskfs/go-diskfs/filesystem/ext4/fileinfo.go b/vendor/github.com/diskfs/go-diskfs/filesystem/ext4/fileinfo.go
new file mode 100644
index 00000000000..4a6e5c3aacb
--- /dev/null
+++ b/vendor/github.com/diskfs/go-diskfs/filesystem/ext4/fileinfo.go
@@ -0,0 +1,48 @@
+package ext4
+
+import (
+ "os"
+ "time"
+)
+
+// FileInfo represents the information for an individual file
+// it fulfills os.FileInfo interface
+type FileInfo struct {
+ modTime time.Time
+ mode os.FileMode
+ name string
+ size int64
+ isDir bool
+}
+
+// IsDir abbreviation for Mode().IsDir()
+func (fi *FileInfo) IsDir() bool {
+ return fi.isDir
+}
+
+// ModTime modification time
+func (fi *FileInfo) ModTime() time.Time {
+ return fi.modTime
+}
+
+// Mode returns file mode
+func (fi *FileInfo) Mode() os.FileMode {
+ return fi.mode
+}
+
+// Name base name of the file
+//
+// will return the long name of the file. If none exists, returns the shortname and extension
+func (fi *FileInfo) Name() string {
+ return fi.name
+}
+
+// Size length in bytes for regular files
+func (fi *FileInfo) Size() int64 {
+ return fi.size
+}
+
+// Sys underlying data source - not supported yet and so will return nil
+func (fi *FileInfo) Sys() interface{} {
+ return nil
+}
diff --git a/vendor/github.com/diskfs/go-diskfs/filesystem/ext4/groupdescriptors.go b/vendor/github.com/diskfs/go-diskfs/filesystem/ext4/groupdescriptors.go
new file mode 100644
index 00000000000..995cda05e86
--- /dev/null
+++ b/vendor/github.com/diskfs/go-diskfs/filesystem/ext4/groupdescriptors.go
@@ -0,0 +1,327 @@
+package ext4
+
+import (
+ "cmp"
+ "encoding/binary"
+ "fmt"
+ "slices"
+
+ "github.com/diskfs/go-diskfs/filesystem/ext4/crc"
+)
+
+type blockGroupFlag uint16
+type gdtChecksumType uint8
+
+func (b blockGroupFlag) included(a uint16) bool {
+ return a&uint16(b) == uint16(b)
+}
+
+//nolint:unused // will be used in the future, not yet
+func (g gdtChecksumType) included(a uint8) bool {
+ return a&uint8(g) == uint8(g)
+}
+
+const (
+ groupDescriptorSize uint16 = 32
+ groupDescriptorSize64Bit uint16 = 64
+ blockGroupFlagInodesUninitialized blockGroupFlag = 0x1
+ blockGroupFlagBlockBitmapUninitialized blockGroupFlag = 0x2
+ blockGroupFlagInodeTableZeroed blockGroupFlag = 0x4
+ gdtChecksumNone gdtChecksumType = 0
+ gdtChecksumGdt gdtChecksumType = 1
+ gdtChecksumMetadata gdtChecksumType = 2
+)
+
+type blockGroupFlags struct {
+ inodesUninitialized bool
+ blockBitmapUninitialized bool
+ inodeTableZeroed bool
+}
+
+// groupdescriptors is a structure holding all of the group descriptors for all of the block groups
+type groupDescriptors struct {
+ descriptors []groupDescriptor
+}
+
+// groupDescriptor is a structure holding the data about a single block group
+type groupDescriptor struct {
+ blockBitmapLocation uint64
+ inodeBitmapLocation uint64
+ inodeTableLocation uint64
+ freeBlocks uint32
+ freeInodes uint32
+ usedDirectories uint32
+ flags blockGroupFlags
+ snapshotExclusionBitmapLocation uint64
+ blockBitmapChecksum uint32
+ inodeBitmapChecksum uint32
+ unusedInodes uint32
+ size uint16
+ number uint16
+}
+
+func (gd *groupDescriptor) equal(other *groupDescriptor) bool {
+ if other == nil {
+ return gd == nil
+ }
+ return *gd == *other
+}
+
+func (gds *groupDescriptors) equal(a *groupDescriptors) bool {
+ if gds == nil && a == nil {
+ return true
+ }
+ if (gds == nil && a != nil) || (a == nil && gds != nil) || len(gds.descriptors) != len(a.descriptors) {
+ return false
+ }
+
+ // both not nil, same size, so compare them
+ for i, g := range gds.descriptors {
+ if g != a.descriptors[i] {
+ return false
+ }
+ }
+ // if we made it this far, all the same
+ return true
+}
+
+// groupDescriptorsFromBytes create a groupDescriptors struct from bytes
+func groupDescriptorsFromBytes(b []byte, gdSize uint16, hashSeed uint32, checksumType gdtChecksumType) (*groupDescriptors, error) {
+ gds := groupDescriptors{}
+ gdSlice := make([]groupDescriptor, 0, 10)
+
+ count := len(b) / int(gdSize)
+
+ // go through them gdSize bytes at a time
+ for i := 0; i < count; i++ {
+ start := i * int(gdSize)
+ end := start + int(gdSize)
+ gd, err := groupDescriptorFromBytes(b[start:end], gdSize, i, checksumType, hashSeed)
+ if err != nil || gd == nil {
+ return nil, fmt.Errorf("error creating group descriptor from bytes: %w", err)
+ }
+ gdSlice = append(gdSlice, *gd)
+ }
+ gds.descriptors = gdSlice
+
+ return &gds, nil
+}
+
+// toBytes returns groupDescriptors ready to be written to disk
+func (gds *groupDescriptors) toBytes(checksumType gdtChecksumType, hashSeed uint32) []byte {
+ b := make([]byte, 0, 10*groupDescriptorSize)
+ for _, gd := range gds.descriptors {
+ b2 := gd.toBytes(checksumType, hashSeed)
+ b = append(b, b2...)
+ }
+
+ return b
+}
+
+// byFreeBlocks provides a sorted list of groupDescriptors by free blocks, descending.
+// If you want them ascending, sort if.
+func (gds *groupDescriptors) byFreeBlocks() []groupDescriptor {
+ // make a copy of the slice
+ gdSlice := make([]groupDescriptor, len(gds.descriptors))
+ copy(gdSlice, gds.descriptors)
+
+ // sort the slice
+ slices.SortFunc(gdSlice, func(a, b groupDescriptor) int {
+ return cmp.Compare(a.freeBlocks, b.freeBlocks)
+ })
+
+ return gdSlice
+}
+
+// groupDescriptorFromBytes create a groupDescriptor struct from bytes
+func groupDescriptorFromBytes(b []byte, gdSize uint16, number int, checksumType gdtChecksumType, hashSeed uint32) (*groupDescriptor, error) {
+ // block count, reserved block count and free blocks depends on whether the fs is 64-bit or not
+ blockBitmapLocation := make([]byte, 8)
+ inodeBitmapLocation := make([]byte, 8)
+ inodeTableLocation := make([]byte, 8)
+ freeBlocks := make([]byte, 4)
+ freeInodes := make([]byte, 4)
+ usedirectories := make([]byte, 4)
+ snapshotExclusionBitmapLocation := make([]byte, 8)
+ blockBitmapChecksum := make([]byte, 4)
+ inodeBitmapChecksum := make([]byte, 4)
+ unusedInodes := make([]byte, 4)
+
+ copy(blockBitmapLocation[0:4], b[0x0:0x4])
+ copy(inodeBitmapLocation[0:4], b[0x4:0x8])
+ copy(inodeTableLocation[0:4], b[0x8:0xc])
+ copy(freeBlocks[0:2], b[0xc:0xe])
+ copy(freeInodes[0:2], b[0xe:0x10])
+ copy(usedirectories[0:2], b[0x10:0x12])
+ copy(snapshotExclusionBitmapLocation[0:4], b[0x14:0x18])
+ copy(blockBitmapChecksum[0:2], b[0x18:0x1a])
+ copy(inodeBitmapChecksum[0:2], b[0x1a:0x1c])
+ copy(unusedInodes[0:2], b[0x1c:0x1e])
+
+ if gdSize == 64 {
+ copy(blockBitmapLocation[4:8], b[0x20:0x24])
+ copy(inodeBitmapLocation[4:8], b[0x24:0x28])
+ copy(inodeTableLocation[4:8], b[0x28:0x2c])
+ copy(freeBlocks[2:4], b[0x2c:0x2e])
+ copy(freeInodes[2:4], b[0x2e:0x30])
+ copy(usedirectories[2:4], b[0x30:0x32])
+ copy(unusedInodes[2:4], b[0x32:0x34])
+ copy(snapshotExclusionBitmapLocation[4:8], b[0x34:0x38])
+ copy(blockBitmapChecksum[2:4], b[0x38:0x3a])
+ copy(inodeBitmapChecksum[2:4], b[0x3a:0x3c])
+ }
+
+ gdNumber := uint16(number)
+ // only bother with checking the checksum if it was not type none (pre-checksums)
+ if checksumType != gdtChecksumNone {
+ checksum := binary.LittleEndian.Uint16(b[0x1e:0x20])
+ actualChecksum := groupDescriptorChecksum(b[0x0:0x40], hashSeed, gdNumber, checksumType)
+ if checksum != actualChecksum {
+ return nil, fmt.Errorf("checksum mismatch, passed %x, actual %x", checksum, actualChecksum)
+ }
+ }
+
+ gd := groupDescriptor{
+ size: gdSize,
+ number: gdNumber,
+ blockBitmapLocation: binary.LittleEndian.Uint64(blockBitmapLocation),
+ inodeBitmapLocation: binary.LittleEndian.Uint64(inodeBitmapLocation),
+ inodeTableLocation: binary.LittleEndian.Uint64(inodeTableLocation),
+ freeBlocks: binary.LittleEndian.Uint32(freeBlocks),
+ freeInodes: binary.LittleEndian.Uint32(freeInodes),
+ usedDirectories: binary.LittleEndian.Uint32(usedirectories),
+ snapshotExclusionBitmapLocation: binary.LittleEndian.Uint64(snapshotExclusionBitmapLocation),
+ blockBitmapChecksum: binary.LittleEndian.Uint32(blockBitmapChecksum),
+ inodeBitmapChecksum: binary.LittleEndian.Uint32(inodeBitmapChecksum),
+ unusedInodes: binary.LittleEndian.Uint32(unusedInodes),
+ flags: parseBlockGroupFlags(binary.LittleEndian.Uint16(b[0x12:0x14])),
+ }
+
+ return &gd, nil
+}
+
+// toBytes returns a groupDescriptor ready to be written to disk
+func (gd *groupDescriptor) toBytes(checksumType gdtChecksumType, hashSeed uint32) []byte {
+ gdSize := gd.size
+
+ b := make([]byte, gdSize)
+
+ blockBitmapLocation := make([]byte, 8)
+ inodeBitmapLocation := make([]byte, 8)
+ inodeTableLocation := make([]byte, 8)
+ freeBlocks := make([]byte, 4)
+ freeInodes := make([]byte, 4)
+ usedirectories := make([]byte, 4)
+ snapshotExclusionBitmapLocation := make([]byte, 8)
+ blockBitmapChecksum := make([]byte, 4)
+ inodeBitmapChecksum := make([]byte, 4)
+ unusedInodes := make([]byte, 4)
+
+ binary.LittleEndian.PutUint64(blockBitmapLocation, gd.blockBitmapLocation)
+ binary.LittleEndian.PutUint64(inodeTableLocation, gd.inodeTableLocation)
+ binary.LittleEndian.PutUint64(inodeBitmapLocation, gd.inodeBitmapLocation)
+ binary.LittleEndian.PutUint32(freeBlocks, gd.freeBlocks)
+ binary.LittleEndian.PutUint32(freeInodes, gd.freeInodes)
+ binary.LittleEndian.PutUint32(usedirectories, gd.usedDirectories)
+ binary.LittleEndian.PutUint64(snapshotExclusionBitmapLocation, gd.snapshotExclusionBitmapLocation)
+ binary.LittleEndian.PutUint32(blockBitmapChecksum, gd.blockBitmapChecksum)
+ binary.LittleEndian.PutUint32(inodeBitmapChecksum, gd.inodeBitmapChecksum)
+ binary.LittleEndian.PutUint32(unusedInodes, gd.unusedInodes)
+
+ // copy the lower 32 bytes in
+ copy(b[0x0:0x4], blockBitmapLocation[0:4])
+ copy(b[0x4:0x8], inodeBitmapLocation[0:4])
+ copy(b[0x8:0xc], inodeTableLocation[0:4])
+ copy(b[0xc:0xe], freeBlocks[0:2])
+ copy(b[0xe:0x10], freeInodes[0:2])
+ copy(b[0x10:0x12], usedirectories[0:2])
+ binary.LittleEndian.PutUint16(b[0x12:0x14], gd.flags.toInt())
+ copy(b[0x14:0x18], snapshotExclusionBitmapLocation[0:4])
+ copy(b[0x18:0x1a], blockBitmapChecksum[0:2])
+ copy(b[0x1a:0x1c], inodeBitmapChecksum[0:2])
+ copy(b[0x1c:0x1e], unusedInodes[0:2])
+
+ // now for the upper 32 bytes
+ if gd.size == 64 {
+ copy(b[0x20:0x24], blockBitmapLocation[4:8])
+ copy(b[0x24:0x28], inodeBitmapLocation[4:8])
+ copy(b[0x28:0x2c], inodeTableLocation[4:8])
+ copy(b[0x2c:0x2e], freeBlocks[2:4])
+ copy(b[0x2e:0x30], freeInodes[2:4])
+ copy(b[0x30:0x32], usedirectories[2:4])
+ copy(b[0x32:0x34], unusedInodes[2:4])
+ copy(b[0x34:0x38], snapshotExclusionBitmapLocation[4:8])
+ copy(b[0x38:0x3a], blockBitmapChecksum[2:4])
+ copy(b[0x3a:0x3c], inodeBitmapChecksum[2:4])
+ }
+
+ checksum := groupDescriptorChecksum(b[0x0:0x40], hashSeed, gd.number, checksumType)
+ binary.LittleEndian.PutUint16(b[0x1e:0x20], checksum)
+
+ return b
+}
+
+func parseBlockGroupFlags(flags uint16) blockGroupFlags {
+ f := blockGroupFlags{
+ inodeTableZeroed: blockGroupFlagInodeTableZeroed.included(flags),
+ inodesUninitialized: blockGroupFlagInodesUninitialized.included(flags),
+ blockBitmapUninitialized: blockGroupFlagBlockBitmapUninitialized.included(flags),
+ }
+
+ return f
+}
+
+func (f *blockGroupFlags) toInt() uint16 {
+ var (
+ flags uint16
+ )
+
+ // compatible flags
+ if f.inodeTableZeroed {
+ flags |= uint16(blockGroupFlagInodeTableZeroed)
+ }
+ if f.inodesUninitialized {
+ flags |= uint16(blockGroupFlagInodesUninitialized)
+ }
+ if f.blockBitmapUninitialized {
+ flags |= uint16(blockGroupFlagBlockBitmapUninitialized)
+ }
+ return flags
+}
+
+// groupDescriptorChecksum calculate the checksum for a block group descriptor
+// NOTE: we are assuming that the block group number is uint64, but we do not know that to be true
+//
+// it might be uint32 or uint64, and it might be in BigEndian as opposed to LittleEndian
+// just have to start with this and see
+// we do know that the maximum number of block groups in 32-bit mode is 2^19, which must be uint32
+// and in 64-bit mode it is 2^51 which must be uint64
+// So we start with uint32 = [4]byte{} for regular mode and [8]byte{} for mod32
+func groupDescriptorChecksum(b []byte, hashSeed uint32, groupNumber uint16, checksumType gdtChecksumType) uint16 {
+ var checksum uint16
+
+ numBytes := make([]byte, 4)
+ binary.LittleEndian.PutUint16(numBytes, groupNumber)
+ switch checksumType {
+ case gdtChecksumNone:
+ checksum = 0
+ case gdtChecksumMetadata:
+ // metadata checksum applies groupNumber to seed, then zeroes out checksum bytes from entire descriptor, then applies descriptor bytes
+ crcResult := crc.CRC32c(hashSeed, numBytes)
+ b2 := make([]byte, len(b))
+ copy(b2, b)
+ b2[0x1e] = 0
+ b2[0x1f] = 0
+ crcResult = crc.CRC32c(crcResult, b2)
+ checksum = uint16(crcResult & 0xffff)
+ case gdtChecksumGdt:
+ hashSeed16 := uint16(hashSeed & 0xffff)
+ crcResult := crc.CRC16(hashSeed16, numBytes)
+ b2 := make([]byte, len(b))
+ copy(b2, b)
+ b2[0x1e] = 0
+ b2[0x1f] = 0
+ checksum = crc.CRC16(crcResult, b)
+ }
+ return checksum
+}
diff --git a/vendor/github.com/diskfs/go-diskfs/filesystem/ext4/inode.go b/vendor/github.com/diskfs/go-diskfs/filesystem/ext4/inode.go
new file mode 100644
index 00000000000..b760c0cf672
--- /dev/null
+++ b/vendor/github.com/diskfs/go-diskfs/filesystem/ext4/inode.go
@@ -0,0 +1,588 @@
+package ext4
+
+import (
+ "encoding/binary"
+ "fmt"
+ "time"
+
+ "github.com/diskfs/go-diskfs/filesystem/ext4/crc"
+)
+
+type inodeFlag uint32
+type fileType uint16
+
+func (i inodeFlag) included(a uint32) bool {
+ return a&uint32(i) == uint32(i)
+}
+
+const (
+ ext2InodeSize uint16 = 128
+ // minInodeSize is ext2 + the extra min 32 bytes in ext4
+ minInodeExtraSize uint16 = 32
+ wantInodeExtraSize uint16 = 128
+ minInodeSize uint16 = ext2InodeSize + minInodeExtraSize
+ extentInodeMaxEntries int = 4
+ inodeFlagSecureDeletion inodeFlag = 0x1
+ inodeFlagPreserveForUndeletion inodeFlag = 0x2
+ inodeFlagCompressed inodeFlag = 0x4
+ inodeFlagSynchronous inodeFlag = 0x8
+ inodeFlagImmutable inodeFlag = 0x10
+ inodeFlagAppendOnly inodeFlag = 0x20
+ inodeFlagNoDump inodeFlag = 0x40
+ inodeFlagNoAccessTimeUpdate inodeFlag = 0x80
+ inodeFlagDirtyCompressed inodeFlag = 0x100
+ inodeFlagCompressedClusters inodeFlag = 0x200
+ inodeFlagNoCompress inodeFlag = 0x400
+ inodeFlagEncryptedInode inodeFlag = 0x800
+ inodeFlagHashedDirectoryIndexes inodeFlag = 0x1000
+ inodeFlagAFSMagicDirectory inodeFlag = 0x2000
+ inodeFlagAlwaysJournal inodeFlag = 0x4000
+ inodeFlagNoMergeTail inodeFlag = 0x8000
+ inodeFlagSyncDirectoryData inodeFlag = 0x10000
+ inodeFlagTopDirectory inodeFlag = 0x20000
+ inodeFlagHugeFile inodeFlag = 0x40000
+ inodeFlagUsesExtents inodeFlag = 0x80000
+ inodeFlagExtendedAttributes inodeFlag = 0x200000
+ inodeFlagBlocksPastEOF inodeFlag = 0x400000
+ inodeFlagSnapshot inodeFlag = 0x1000000
+ inodeFlagDeletingSnapshot inodeFlag = 0x4000000
+ inodeFlagCompletedSnapshotShrink inodeFlag = 0x8000000
+ inodeFlagInlineData inodeFlag = 0x10000000
+ inodeFlagInheritProject inodeFlag = 0x20000000
+
+ fileTypeFifo fileType = 0x1000
+ fileTypeCharacterDevice fileType = 0x2000
+ fileTypeDirectory fileType = 0x4000
+ fileTypeBlockDevice fileType = 0x6000
+ fileTypeRegularFile fileType = 0x8000
+ fileTypeSymbolicLink fileType = 0xA000
+ fileTypeSocket fileType = 0xC000
+
+ filePermissionsOwnerExecute uint16 = 0x40
+ filePermissionsOwnerWrite uint16 = 0x80
+ filePermissionsOwnerRead uint16 = 0x100
+ filePermissionsGroupExecute uint16 = 0x8
+ filePermissionsGroupWrite uint16 = 0x10
+ filePermissionsGroupRead uint16 = 0x20
+ filePermissionsOtherExecute uint16 = 0x1
+ filePermissionsOtherWrite uint16 = 0x2
+ filePermissionsOtherRead uint16 = 0x4
+)
+
+// mountOptions is a structure holding flags for an inode
+type inodeFlags struct {
+ secureDeletion bool
+ preserveForUndeletion bool
+ compressed bool
+ synchronous bool
+ immutable bool
+ appendOnly bool
+ noDump bool
+ noAccessTimeUpdate bool
+ dirtyCompressed bool
+ compressedClusters bool
+ noCompress bool
+ encryptedInode bool
+ hashedDirectoryIndexes bool
+ AFSMagicDirectory bool
+ alwaysJournal bool
+ noMergeTail bool
+ syncDirectoryData bool
+ topDirectory bool
+ hugeFile bool
+ usesExtents bool
+ extendedAttributes bool
+ blocksPastEOF bool
+ snapshot bool
+ deletingSnapshot bool
+ completedSnapshotShrink bool
+ inlineData bool
+ inheritProject bool
+}
+
+type filePermissions struct {
+ read bool
+ write bool
+ execute bool
+}
+
+// inode is a structure holding the data about an inode
+type inode struct {
+ number uint32
+ permissionsOther filePermissions
+ permissionsGroup filePermissions
+ permissionsOwner filePermissions
+ fileType fileType
+ owner uint32
+ group uint32
+ size uint64
+ accessTime time.Time
+ changeTime time.Time
+ modifyTime time.Time
+ createTime time.Time
+ deletionTime uint32
+ hardLinks uint16
+ blocks uint64
+ filesystemBlocks bool
+ flags *inodeFlags
+ version uint64
+ nfsFileVersion uint32
+ extendedAttributeBlock uint64
+ inodeSize uint16
+ project uint32
+ extents extentBlockFinder
+ linkTarget string
+}
+
+//nolint:unused // will be used in the future, not yet
+func (i *inode) equal(a *inode) bool {
+ if (i == nil && a != nil) || (a == nil && i != nil) {
+ return false
+ }
+ if i == nil && a == nil {
+ return true
+ }
+ return *i == *a
+}
+
+// inodeFromBytes create an inode struct from bytes
+func inodeFromBytes(b []byte, sb *superblock, number uint32) (*inode, error) {
+ // safely make sure it is the min size
+ if len(b) < int(minInodeSize) {
+ return nil, fmt.Errorf("inode data too short: %d bytes, must be min %d bytes", len(b), minInodeSize)
+ }
+
+ // checksum before using the data
+ checksumBytes := make([]byte, 4)
+
+ // checksum before using the data
+ copy(checksumBytes[0:2], b[0x7c:0x7e])
+ copy(checksumBytes[2:4], b[0x82:0x84])
+ // zero out checksum fields before calculating the checksum
+ b[0x7c] = 0
+ b[0x7d] = 0
+ b[0x82] = 0
+ b[0x83] = 0
+
+ // block count, reserved block count and free blocks depends on whether the fs is 64-bit or not
+ owner := make([]byte, 4)
+ fileSize := make([]byte, 8)
+ group := make([]byte, 4)
+ accessTime := make([]byte, 8)
+ changeTime := make([]byte, 8)
+ modifyTime := make([]byte, 8)
+ createTime := make([]byte, 8)
+ version := make([]byte, 8)
+ extendedAttributeBlock := make([]byte, 8)
+
+ mode := binary.LittleEndian.Uint16(b[0x0:0x2])
+
+ copy(owner[0:2], b[0x2:0x4])
+ copy(owner[2:4], b[0x78:0x7a])
+ copy(group[0:2], b[0x18:0x20])
+ copy(group[2:4], b[0x7a:0x7c])
+ copy(fileSize[0:4], b[0x4:0x8])
+ copy(fileSize[4:8], b[0x6c:0x70])
+ copy(version[0:4], b[0x24:0x28])
+ copy(version[4:8], b[0x98:0x9c])
+ copy(extendedAttributeBlock[0:4], b[0x88:0x8c])
+ copy(extendedAttributeBlock[4:6], b[0x76:0x78])
+
+ // get the the times
+ // the structure is as follows:
+ // original 32 bits (0:4) are seconds. Add (to the left) 2 more bits from the 32
+ // the remaining 30 bites are nanoseconds
+ copy(accessTime[0:4], b[0x8:0xc])
+ // take the two bits relevant and add to fifth byte
+ accessTime[4] = b[0x8c] & 0x3
+ copy(changeTime[0:4], b[0xc:0x10])
+ changeTime[4] = b[0x84] & 0x3
+ copy(modifyTime[0:4], b[0x10:0x14])
+ modifyTime[4] = b[0x88] & 0x3
+ copy(createTime[0:4], b[0x90:0x94])
+ createTime[4] = b[0x94] & 0x3
+
+ accessTimeSeconds := binary.LittleEndian.Uint64(accessTime)
+ changeTimeSeconds := binary.LittleEndian.Uint64(changeTime)
+ modifyTimeSeconds := binary.LittleEndian.Uint64(modifyTime)
+ createTimeSeconds := binary.LittleEndian.Uint64(createTime)
+
+ // now get the nanoseconds by using the upper 30 bites
+ accessTimeNanoseconds := binary.LittleEndian.Uint32(b[0x8c:0x90]) >> 2
+ changeTimeNanoseconds := binary.LittleEndian.Uint32(b[0x84:0x88]) >> 2
+ modifyTimeNanoseconds := binary.LittleEndian.Uint32(b[0x88:0x8c]) >> 2
+ createTimeNanoseconds := binary.LittleEndian.Uint32(b[0x94:0x98]) >> 2
+
+ flagsNum := binary.LittleEndian.Uint32(b[0x20:0x24])
+
+ flags := parseInodeFlags(flagsNum)
+
+ blocksLow := binary.LittleEndian.Uint32(b[0x1c:0x20])
+ blocksHigh := binary.LittleEndian.Uint16(b[0x74:0x76])
+ var (
+ blocks uint64
+ filesystemBlocks bool
+ )
+
+ hugeFile := sb.features.hugeFile
+ switch {
+ case !hugeFile:
+ // just 512-byte blocks
+ blocks = uint64(blocksLow)
+ filesystemBlocks = false
+ case hugeFile && !flags.hugeFile:
+ // larger number of 512-byte blocks
+ blocks = uint64(blocksHigh)<<32 + uint64(blocksLow)
+ filesystemBlocks = false
+ default:
+ // larger number of filesystem blocks
+ blocks = uint64(blocksHigh)<<32 + uint64(blocksLow)
+ filesystemBlocks = true
+ }
+ fileType := parseFileType(mode)
+ fileSizeNum := binary.LittleEndian.Uint64(fileSize)
+
+ extentInfo := make([]byte, 60)
+ copy(extentInfo, b[0x28:0x64])
+ // symlinks might store link target in extentInfo, or might store them elsewhere
+ var (
+ linkTarget string
+ allExtents extentBlockFinder
+ err error
+ )
+ if fileType == fileTypeSymbolicLink && fileSizeNum < 60 {
+ linkTarget = string(extentInfo[:fileSizeNum])
+ } else {
+ // parse the extent information in the inode to get the root of the extents tree
+ // we do not walk the entire tree, to get a slice of blocks for the file.
+ // If we want to do that, we call the extentBlockFinder.blocks() method
+ allExtents, err = parseExtents(extentInfo, sb.blockSize, 0, uint32(blocks))
+ if err != nil {
+ return nil, fmt.Errorf("error parsing extent tree: %v", err)
+ }
+ }
+
+ i := inode{
+ number: number,
+ permissionsGroup: parseGroupPermissions(mode),
+ permissionsOwner: parseOwnerPermissions(mode),
+ permissionsOther: parseOtherPermissions(mode),
+ fileType: fileType,
+ owner: binary.LittleEndian.Uint32(owner),
+ group: binary.LittleEndian.Uint32(group),
+ size: fileSizeNum,
+ hardLinks: binary.LittleEndian.Uint16(b[0x1a:0x1c]),
+ blocks: blocks,
+ filesystemBlocks: filesystemBlocks,
+ flags: &flags,
+ nfsFileVersion: binary.LittleEndian.Uint32(b[0x64:0x68]),
+ version: binary.LittleEndian.Uint64(version),
+ inodeSize: binary.LittleEndian.Uint16(b[0x80:0x82]) + minInodeSize,
+ deletionTime: binary.LittleEndian.Uint32(b[0x14:0x18]),
+ accessTime: time.Unix(int64(accessTimeSeconds), int64(accessTimeNanoseconds)),
+ changeTime: time.Unix(int64(changeTimeSeconds), int64(changeTimeNanoseconds)),
+ modifyTime: time.Unix(int64(modifyTimeSeconds), int64(modifyTimeNanoseconds)),
+ createTime: time.Unix(int64(createTimeSeconds), int64(createTimeNanoseconds)),
+ extendedAttributeBlock: binary.LittleEndian.Uint64(extendedAttributeBlock),
+ project: binary.LittleEndian.Uint32(b[0x9c:0x100]),
+ extents: allExtents,
+ linkTarget: linkTarget,
+ }
+ checksum := binary.LittleEndian.Uint32(checksumBytes)
+ actualChecksum := inodeChecksum(b, sb.checksumSeed, number, i.nfsFileVersion)
+
+ if actualChecksum != checksum {
+ return nil, fmt.Errorf("checksum mismatch, on-disk %x vs calculated %x", checksum, actualChecksum)
+ }
+
+ return &i, nil
+}
+
+// toBytes returns an inode ready to be written to disk
+//
+//nolint:unused // will be used in the future, not yet
+func (i *inode) toBytes(sb *superblock) []byte {
+ iSize := sb.inodeSize
+
+ b := make([]byte, iSize)
+
+ mode := make([]byte, 2)
+ owner := make([]byte, 4)
+ fileSize := make([]byte, 8)
+ group := make([]byte, 4)
+ accessTime := make([]byte, 8)
+ changeTime := make([]byte, 8)
+ modifyTime := make([]byte, 8)
+ createTime := make([]byte, 8)
+ version := make([]byte, 8)
+ extendedAttributeBlock := make([]byte, 8)
+
+ binary.LittleEndian.PutUint16(mode, i.permissionsGroup.toGroupInt()|i.permissionsOther.toOtherInt()|i.permissionsOwner.toOwnerInt()|uint16(i.fileType))
+ binary.LittleEndian.PutUint32(owner, i.owner)
+ binary.LittleEndian.PutUint32(group, i.group)
+ binary.LittleEndian.PutUint64(fileSize, i.size)
+ binary.LittleEndian.PutUint64(version, i.version)
+ binary.LittleEndian.PutUint64(extendedAttributeBlock, i.extendedAttributeBlock)
+
+ // there is some odd stuff that ext4 does with nanoseconds. We might need this in the future.
+ // See https://ext4.wiki.kernel.org/index.php/Ext4_Disk_Layout#Inode_Timestamps
+ // binary.LittleEndian.PutUint32(accessTime[4:8], (i.accessTimeNanoseconds<<2)&accessTime[4])
+ binary.LittleEndian.PutUint64(accessTime, uint64(i.accessTime.Unix()))
+ binary.LittleEndian.PutUint32(accessTime[4:8], uint32(i.accessTime.Nanosecond()))
+ binary.LittleEndian.PutUint64(createTime, uint64(i.createTime.Unix()))
+ binary.LittleEndian.PutUint32(createTime[4:8], uint32(i.createTime.Nanosecond()))
+ binary.LittleEndian.PutUint64(changeTime, uint64(i.changeTime.Unix()))
+ binary.LittleEndian.PutUint32(changeTime[4:8], uint32(i.changeTime.Nanosecond()))
+ binary.LittleEndian.PutUint64(modifyTime, uint64(i.modifyTime.Unix()))
+ binary.LittleEndian.PutUint32(modifyTime[4:8], uint32(i.modifyTime.Nanosecond()))
+
+ blocks := make([]byte, 8)
+ binary.LittleEndian.PutUint64(blocks, i.blocks)
+
+ copy(b[0x0:0x2], mode)
+ copy(b[0x2:0x4], owner[0:2])
+ copy(b[0x4:0x8], fileSize[0:4])
+ copy(b[0x8:0xc], accessTime[0:4])
+ copy(b[0xc:0x10], changeTime[0:4])
+ copy(b[0x10:0x14], modifyTime[0:4])
+
+ binary.LittleEndian.PutUint32(b[0x14:0x18], i.deletionTime)
+ copy(b[0x18:0x1a], group[0:2])
+ binary.LittleEndian.PutUint16(b[0x1a:0x1c], i.hardLinks)
+ copy(b[0x1c:0x20], blocks[0:4])
+ binary.LittleEndian.PutUint32(b[0x20:0x24], i.flags.toInt())
+ copy(b[0x24:0x28], version[0:4])
+ copy(b[0x28:0x64], i.extents.toBytes())
+ binary.LittleEndian.PutUint32(b[0x64:0x68], i.nfsFileVersion)
+ copy(b[0x68:0x6c], extendedAttributeBlock[0:4])
+ copy(b[0x6c:0x70], fileSize[4:8])
+ // b[0x70:0x74] is obsolete
+ copy(b[0x74:0x76], blocks[4:8])
+ copy(b[0x76:0x78], extendedAttributeBlock[4:6])
+ copy(b[0x78:0x7a], owner[2:4])
+ copy(b[0x7a:0x7c], group[2:4])
+ // b[0x7c:0x7e] is for checkeum
+ // b[0x7e:0x80] is unused
+ binary.LittleEndian.PutUint16(b[0x80:0x82], i.inodeSize-minInodeSize)
+ // b[0x82:0x84] is for checkeum
+ copy(b[0x84:0x88], changeTime[4:8])
+ copy(b[0x88:0x8c], modifyTime[4:8])
+ copy(b[0x8c:0x90], accessTime[4:8])
+ copy(b[0x90:0x94], createTime[0:4])
+ copy(b[0x94:0x98], createTime[4:8])
+
+ actualChecksum := inodeChecksum(b, sb.checksumSeed, i.number, i.nfsFileVersion)
+ checksum := make([]byte, 4)
+ binary.LittleEndian.PutUint32(checksum, actualChecksum)
+ copy(b[0x7c:0x7e], checksum[0:2])
+ copy(b[0x82:0x84], checksum[2:4])
+
+ return b
+}
+
+func parseOwnerPermissions(mode uint16) filePermissions {
+ return filePermissions{
+ execute: mode&filePermissionsOwnerExecute == filePermissionsOwnerExecute,
+ write: mode&filePermissionsOwnerWrite == filePermissionsOwnerWrite,
+ read: mode&filePermissionsOwnerRead == filePermissionsOwnerRead,
+ }
+}
+func parseGroupPermissions(mode uint16) filePermissions {
+ return filePermissions{
+ execute: mode&filePermissionsGroupExecute == filePermissionsGroupExecute,
+ write: mode&filePermissionsGroupWrite == filePermissionsGroupWrite,
+ read: mode&filePermissionsGroupRead == filePermissionsGroupRead,
+ }
+}
+func parseOtherPermissions(mode uint16) filePermissions {
+ return filePermissions{
+ execute: mode&filePermissionsOtherExecute == filePermissionsOtherExecute,
+ write: mode&filePermissionsOtherWrite == filePermissionsOtherWrite,
+ read: mode&filePermissionsOtherRead == filePermissionsOtherRead,
+ }
+}
+
+//nolint:unused // will be used in the future, not yet
+func (fp *filePermissions) toOwnerInt() uint16 {
+ var mode uint16
+ if fp.execute {
+ mode |= filePermissionsOwnerExecute
+ }
+ if fp.write {
+ mode |= filePermissionsOwnerWrite
+ }
+ if fp.read {
+ mode |= filePermissionsOwnerRead
+ }
+ return mode
+}
+
+//nolint:unused // will be used in the future, not yet
+func (fp *filePermissions) toOtherInt() uint16 {
+ var mode uint16
+ if fp.execute {
+ mode |= filePermissionsOtherExecute
+ }
+ if fp.write {
+ mode |= filePermissionsOtherWrite
+ }
+ if fp.read {
+ mode |= filePermissionsOtherRead
+ }
+ return mode
+}
+
+//nolint:unused // will be used in the future, not yet
+func (fp *filePermissions) toGroupInt() uint16 {
+ var mode uint16
+ if fp.execute {
+ mode |= filePermissionsGroupExecute
+ }
+ if fp.write {
+ mode |= filePermissionsGroupWrite
+ }
+ if fp.read {
+ mode |= filePermissionsGroupRead
+ }
+ return mode
+}
+
+// parseFileType from the uint16 mode. The mode is built of bottom 12 bits
+// being "any of" several permissions, and thus resolved via AND,
+// while the top 4 bits are "only one of" several types, and thus resolved via just equal.
+func parseFileType(mode uint16) fileType {
+ return fileType(mode & 0xF000)
+}
+
+func parseInodeFlags(flags uint32) inodeFlags {
+ return inodeFlags{
+ secureDeletion: inodeFlagSecureDeletion.included(flags),
+ preserveForUndeletion: inodeFlagPreserveForUndeletion.included(flags),
+ compressed: inodeFlagCompressed.included(flags),
+ synchronous: inodeFlagSynchronous.included(flags),
+ immutable: inodeFlagImmutable.included(flags),
+ appendOnly: inodeFlagAppendOnly.included(flags),
+ noDump: inodeFlagNoDump.included(flags),
+ noAccessTimeUpdate: inodeFlagNoAccessTimeUpdate.included(flags),
+ dirtyCompressed: inodeFlagDirtyCompressed.included(flags),
+ compressedClusters: inodeFlagCompressedClusters.included(flags),
+ noCompress: inodeFlagNoCompress.included(flags),
+ encryptedInode: inodeFlagEncryptedInode.included(flags),
+ hashedDirectoryIndexes: inodeFlagHashedDirectoryIndexes.included(flags),
+ AFSMagicDirectory: inodeFlagAFSMagicDirectory.included(flags),
+ alwaysJournal: inodeFlagAlwaysJournal.included(flags),
+ noMergeTail: inodeFlagNoMergeTail.included(flags),
+ syncDirectoryData: inodeFlagSyncDirectoryData.included(flags),
+ topDirectory: inodeFlagTopDirectory.included(flags),
+ hugeFile: inodeFlagHugeFile.included(flags),
+ usesExtents: inodeFlagUsesExtents.included(flags),
+ extendedAttributes: inodeFlagExtendedAttributes.included(flags),
+ blocksPastEOF: inodeFlagBlocksPastEOF.included(flags),
+ snapshot: inodeFlagSnapshot.included(flags),
+ deletingSnapshot: inodeFlagDeletingSnapshot.included(flags),
+ completedSnapshotShrink: inodeFlagCompletedSnapshotShrink.included(flags),
+ inlineData: inodeFlagInlineData.included(flags),
+ inheritProject: inodeFlagInheritProject.included(flags),
+ }
+}
+
+//nolint:unused // will be used in the future, not yet
+func (i *inodeFlags) toInt() uint32 {
+ var flags uint32
+
+ if i.secureDeletion {
+ flags |= uint32(inodeFlagSecureDeletion)
+ }
+ if i.preserveForUndeletion {
+ flags |= uint32(inodeFlagPreserveForUndeletion)
+ }
+ if i.compressed {
+ flags |= uint32(inodeFlagCompressed)
+ }
+ if i.synchronous {
+ flags |= uint32(inodeFlagSynchronous)
+ }
+ if i.immutable {
+ flags |= uint32(inodeFlagImmutable)
+ }
+ if i.appendOnly {
+ flags |= uint32(inodeFlagAppendOnly)
+ }
+ if i.noDump {
+ flags |= uint32(inodeFlagNoDump)
+ }
+ if i.noAccessTimeUpdate {
+ flags |= uint32(inodeFlagNoAccessTimeUpdate)
+ }
+ if i.dirtyCompressed {
+ flags |= uint32(inodeFlagDirtyCompressed)
+ }
+ if i.compressedClusters {
+ flags |= uint32(inodeFlagCompressedClusters)
+ }
+ if i.noCompress {
+ flags |= uint32(inodeFlagNoCompress)
+ }
+ if i.encryptedInode {
+ flags |= uint32(inodeFlagEncryptedInode)
+ }
+ if i.hashedDirectoryIndexes {
+ flags |= uint32(inodeFlagHashedDirectoryIndexes)
+ }
+ if i.AFSMagicDirectory {
+ flags |= uint32(inodeFlagAFSMagicDirectory)
+ }
+ if i.alwaysJournal {
+ flags |= uint32(inodeFlagAlwaysJournal)
+ }
+ if i.noMergeTail {
+ flags |= uint32(inodeFlagNoMergeTail)
+ }
+ if i.syncDirectoryData {
+ flags |= uint32(inodeFlagSyncDirectoryData)
+ }
+ if i.topDirectory {
+ flags |= uint32(inodeFlagTopDirectory)
+ }
+ if i.hugeFile {
+ flags |= uint32(inodeFlagHugeFile)
+ }
+ if i.usesExtents {
+ flags |= uint32(inodeFlagUsesExtents)
+ }
+ if i.extendedAttributes {
+ flags |= uint32(inodeFlagExtendedAttributes)
+ }
+ if i.blocksPastEOF {
+ flags |= uint32(inodeFlagBlocksPastEOF)
+ }
+ if i.snapshot {
+ flags |= uint32(inodeFlagSnapshot)
+ }
+ if i.deletingSnapshot {
+ flags |= uint32(inodeFlagDeletingSnapshot)
+ }
+ if i.completedSnapshotShrink {
+ flags |= uint32(inodeFlagCompletedSnapshotShrink)
+ }
+ if i.inlineData {
+ flags |= uint32(inodeFlagInlineData)
+ }
+ if i.inheritProject {
+ flags |= uint32(inodeFlagInheritProject)
+ }
+
+ return flags
+}
+
+// inodeChecksum calculate the checksum for an inode
+func inodeChecksum(b []byte, checksumSeed, inodeNumber, inodeGeneration uint32) uint32 {
+ numberBytes := make([]byte, 4)
+ binary.LittleEndian.PutUint32(numberBytes, inodeNumber)
+ crcResult := crc.CRC32c(checksumSeed, numberBytes)
+ genBytes := make([]byte, 4)
+ binary.LittleEndian.PutUint32(genBytes, inodeGeneration)
+ crcResult = crc.CRC32c(crcResult, genBytes)
+ checksum := crc.CRC32c(crcResult, b)
+ return checksum
+}
diff --git a/vendor/github.com/diskfs/go-diskfs/filesystem/ext4/journaldevice_other.go b/vendor/github.com/diskfs/go-diskfs/filesystem/ext4/journaldevice_other.go
new file mode 100644
index 00000000000..09a61488e6d
--- /dev/null
+++ b/vendor/github.com/diskfs/go-diskfs/filesystem/ext4/journaldevice_other.go
@@ -0,0 +1,12 @@
+//go:build !linux && !unix && !darwin && !windows
+
+package ext4
+
+import (
+ "fmt"
+ "runtime"
+)
+
+func journalDevice(devicePath string) (deviceNumber uint32, err error) {
+ return 0, fmt.Errorf("external journal device unsupported on filesystem %s", runtime.GOOS)
+}
diff --git a/vendor/github.com/diskfs/go-diskfs/filesystem/ext4/journaldevice_shared.go b/vendor/github.com/diskfs/go-diskfs/filesystem/ext4/journaldevice_shared.go
new file mode 100644
index 00000000000..00a91da9b85
--- /dev/null
+++ b/vendor/github.com/diskfs/go-diskfs/filesystem/ext4/journaldevice_shared.go
@@ -0,0 +1,40 @@
+//go:build linux || unix || freebsd || netbsd || openbsd || darwin
+
+package ext4
+
+import (
+ "fmt"
+ "math"
+
+ "golang.org/x/sys/unix"
+)
+
+func journalDevice(devicePath string) (deviceNumber uint32, err error) {
+ // Use unix.Stat to get file status
+ var stat unix.Stat_t
+ err = unix.Stat(devicePath, &stat)
+ if err != nil {
+ return deviceNumber, err
+ }
+
+ // Extract major and minor device numbers
+ //nolint:unconvert,nolintlint // lint stumbles on this, thinks it is an unnecessary conversion, which is true
+ // on Linux, but not on others. So we will be explicit about this, and add a nolint flag
+ major := unix.Major(uint64(stat.Rdev))
+ //nolint:unconvert,nolintlint // lint stumbles on this, thinks it is an unnecessary conversion, which is true
+ // on Linux, but not on others. So we will be explicit about this, and add a nolint flag
+ minor := unix.Minor(uint64(stat.Rdev))
+
+ // Combine major and minor numbers using unix.Mkdev
+ // interestingly, this does not 100% align with what I read about linux mkdev works, which would be:
+ // const minorbits = 20
+ // func mkdev(major, minor uint32) uint32 {
+ // return (((major) << minorbits) | (minor))
+ // }
+ // we leave this here for a future potential fix
+ journalDeviceNumber64 := unix.Mkdev(major, minor)
+ if journalDeviceNumber64 > math.MaxUint32 {
+ return deviceNumber, fmt.Errorf("journal device number %d is too large", journalDeviceNumber64)
+ }
+ return uint32(journalDeviceNumber64), nil
+}
diff --git a/vendor/github.com/diskfs/go-diskfs/filesystem/ext4/journaldevice_windows.go b/vendor/github.com/diskfs/go-diskfs/filesystem/ext4/journaldevice_windows.go
new file mode 100644
index 00000000000..bf36fb2e38a
--- /dev/null
+++ b/vendor/github.com/diskfs/go-diskfs/filesystem/ext4/journaldevice_windows.go
@@ -0,0 +1,11 @@
+//go:build windows
+
+package ext4
+
+import (
+ "errors"
+)
+
+func journalDevice(devicePath string) (deviceNumber uint32, err error) {
+ return 0, errors.New("external journal device unsupported on Windows")
+}
diff --git a/vendor/github.com/diskfs/go-diskfs/filesystem/ext4/md4/md4.go b/vendor/github.com/diskfs/go-diskfs/filesystem/ext4/md4/md4.go
new file mode 100644
index 00000000000..77df42700b7
--- /dev/null
+++ b/vendor/github.com/diskfs/go-diskfs/filesystem/ext4/md4/md4.go
@@ -0,0 +1,73 @@
+package md4
+
+// rotateLeft rotates a 32-bit integer to the left
+func rotateLeft(x uint32, s uint) uint32 {
+ return (x << s) | (x >> (32 - s))
+}
+
+// basic MD4 functions
+func f(x, y, z uint32) uint32 {
+ return z ^ (x & (y ^ z))
+}
+
+func g(x, y, z uint32) uint32 {
+ return (x & y) + ((x ^ y) & z)
+}
+
+func h(x, y, z uint32) uint32 {
+ return x ^ y ^ z
+}
+
+// MD4 constants
+const (
+ k1 uint32 = 0
+ k2 uint32 = 0x5A827999
+ k3 uint32 = 0x6ED9EBA1
+)
+
+// round applies the round function as a macro
+func round(f func(uint32, uint32, uint32) uint32, a, b, c, d, x uint32, s uint) uint32 {
+ return rotateLeft(a+f(b, c, d)+x, s)
+}
+
+// halfMD4Transform basic cut-down MD4 transform. Returns only 32 bits of result.
+func HalfMD4Transform(buf [4]uint32, in []uint32) uint32 {
+ var a, b, c, d = buf[0], buf[1], buf[2], buf[3]
+
+ /* Round 1 */
+ a = round(f, a, b, c, d, in[0]+k1, 3)
+ d = round(f, d, a, b, c, in[1]+k1, 7)
+ c = round(f, c, d, a, b, in[2]+k1, 11)
+ b = round(f, b, c, d, a, in[3]+k1, 19)
+ a = round(f, a, b, c, d, in[4]+k1, 3)
+ d = round(f, d, a, b, c, in[5]+k1, 7)
+ c = round(f, c, d, a, b, in[6]+k1, 11)
+ b = round(f, b, c, d, a, in[7]+k1, 19)
+
+ /* Round 2 */
+ a = round(g, a, b, c, d, in[1]+k2, 3)
+ d = round(g, d, a, b, c, in[3]+k2, 5)
+ c = round(g, c, d, a, b, in[5]+k2, 9)
+ b = round(g, b, c, d, a, in[7]+k2, 13)
+ a = round(g, a, b, c, d, in[0]+k2, 3)
+ d = round(g, d, a, b, c, in[2]+k2, 5)
+ c = round(g, c, d, a, b, in[4]+k2, 9)
+ b = round(g, b, c, d, a, in[6]+k2, 13)
+
+ /* Round 3 */
+ a = round(h, a, b, c, d, in[3]+k3, 3)
+ d = round(h, d, a, b, c, in[7]+k3, 9)
+ c = round(h, c, d, a, b, in[2]+k3, 11)
+ b = round(h, b, c, d, a, in[6]+k3, 15)
+ a = round(h, a, b, c, d, in[1]+k3, 3)
+ d = round(h, d, a, b, c, in[5]+k3, 9)
+ c = round(h, c, d, a, b, in[0]+k3, 11)
+ b = round(h, b, c, d, a, in[4]+k3, 15)
+
+ buf[0] += a
+ buf[1] += b
+ buf[2] += c
+ buf[3] += d
+
+ return buf[1]
+}
diff --git a/vendor/github.com/diskfs/go-diskfs/filesystem/ext4/miscflags.go b/vendor/github.com/diskfs/go-diskfs/filesystem/ext4/miscflags.go
new file mode 100644
index 00000000000..d2a22368e38
--- /dev/null
+++ b/vendor/github.com/diskfs/go-diskfs/filesystem/ext4/miscflags.go
@@ -0,0 +1,34 @@
+package ext4
+
+// miscFlags is a structure holding various miscellaneous flags
+type miscFlags struct {
+ signedDirectoryHash bool
+ unsignedDirectoryHash bool
+ developmentTest bool
+}
+
+func parseMiscFlags(flags uint32) miscFlags {
+ m := miscFlags{
+ signedDirectoryHash: flagSignedDirectoryHash.included(flags),
+ unsignedDirectoryHash: flagUnsignedDirectoryHash.included(flags),
+ developmentTest: flagTestDevCode.included(flags),
+ }
+ return m
+}
+
+func (m *miscFlags) toInt() uint32 {
+ var flags uint32
+
+ if m.signedDirectoryHash {
+ flags |= uint32(flagSignedDirectoryHash)
+ }
+ if m.unsignedDirectoryHash {
+ flags |= uint32(flagUnsignedDirectoryHash)
+ }
+ if m.developmentTest {
+ flags |= uint32(flagTestDevCode)
+ }
+ return flags
+}
+
+var defaultMiscFlags = miscFlags{}
diff --git a/vendor/github.com/diskfs/go-diskfs/filesystem/ext4/mountoptions.go b/vendor/github.com/diskfs/go-diskfs/filesystem/ext4/mountoptions.go
new file mode 100644
index 00000000000..a93a21a11d1
--- /dev/null
+++ b/vendor/github.com/diskfs/go-diskfs/filesystem/ext4/mountoptions.go
@@ -0,0 +1,182 @@
+package ext4
+
+const (
+ // default mount options
+ mountPrintDebugInfo mountOption = 0x1
+ mountNewFilesGIDContainingDirectory mountOption = 0x2
+ mountUserspaceExtendedAttributes mountOption = 0x4
+ mountPosixACLs mountOption = 0x8
+ mount16BitUIDs mountOption = 0x10
+ mountJournalDataAndMetadata mountOption = 0x20
+ mountFlushBeforeJournal mountOption = 0x40
+ mountUnorderingDataMetadata mountOption = 0x60
+ mountDisableWriteFlushes mountOption = 0x100
+ mountTrackMetadataBlocks mountOption = 0x200
+ mountDiscardDeviceSupport mountOption = 0x400
+ mountDisableDelayedAllocation mountOption = 0x800
+)
+
+// mountOptions is a structure holding which default mount options are set
+type mountOptions struct {
+ printDebugInfo bool
+ newFilesGIDContainingDirectory bool
+ userspaceExtendedAttributes bool
+ posixACLs bool
+ use16BitUIDs bool
+ journalDataAndMetadata bool
+ flushBeforeJournal bool
+ unorderingDataMetadata bool
+ disableWriteFlushes bool
+ trackMetadataBlocks bool
+ discardDeviceSupport bool
+ disableDelayedAllocation bool
+}
+
+type mountOption uint32
+
+func (m mountOption) included(a uint32) bool {
+ return a&uint32(m) == uint32(m)
+}
+
+type MountOpt func(*mountOptions)
+
+func WithDefaultMountOptionPrintDebuggingInfo(enable bool) MountOpt {
+ return func(o *mountOptions) {
+ o.printDebugInfo = enable
+ }
+}
+
+func WithDefaultMountOptionGIDFromDirectory(enable bool) MountOpt {
+ return func(o *mountOptions) {
+ o.newFilesGIDContainingDirectory = enable
+ }
+}
+
+func WithDefaultMountOptionUserspaceXattrs(enable bool) MountOpt {
+ return func(o *mountOptions) {
+ o.userspaceExtendedAttributes = enable
+ }
+}
+
+func WithDefaultMountOptionPOSIXACLs(enable bool) MountOpt {
+ return func(o *mountOptions) {
+ o.posixACLs = enable
+ }
+}
+
+func WithDefaultMountOptionUID16Bit(enable bool) MountOpt {
+ return func(o *mountOptions) {
+ o.use16BitUIDs = enable
+ }
+}
+
+func WithDefaultMountOptionJournalModeData(enable bool) MountOpt {
+ return func(o *mountOptions) {
+ o.journalDataAndMetadata = enable
+ }
+}
+
+func WithDefaultMountOptionJournalModeOrdered(enable bool) MountOpt {
+ return func(o *mountOptions) {
+ o.flushBeforeJournal = enable
+ }
+}
+
+func WithDefaultMountOptionJournalModeWriteback(enable bool) MountOpt {
+ return func(o *mountOptions) {
+ o.unorderingDataMetadata = enable
+ }
+}
+
+func WithDefaultMountOptionDisableWriteFlushes(enable bool) MountOpt {
+ return func(o *mountOptions) {
+ o.disableWriteFlushes = enable
+ }
+}
+
+func WithDefaultMountOptionBlockValidity(enable bool) MountOpt {
+ return func(o *mountOptions) {
+ o.trackMetadataBlocks = enable
+ }
+}
+
+func WithDefaultMountOptionDiscardSupport(enable bool) MountOpt {
+ return func(o *mountOptions) {
+ o.discardDeviceSupport = enable
+ }
+}
+
+func WithDefaultMountOptionDisableDelayedAllocation(enable bool) MountOpt {
+ return func(o *mountOptions) {
+ o.disableDelayedAllocation = enable
+ }
+}
+
+func defaultMountOptionsFromOpts(opts []MountOpt) *mountOptions {
+ o := &mountOptions{}
+ for _, opt := range opts {
+ opt(o)
+ }
+ return o
+}
+
+func parseMountOptions(flags uint32) mountOptions {
+ m := mountOptions{
+ printDebugInfo: mountPrintDebugInfo.included(flags),
+ newFilesGIDContainingDirectory: mountNewFilesGIDContainingDirectory.included(flags),
+ userspaceExtendedAttributes: mountUserspaceExtendedAttributes.included(flags),
+ posixACLs: mountPosixACLs.included(flags),
+ use16BitUIDs: mount16BitUIDs.included(flags),
+ journalDataAndMetadata: mountJournalDataAndMetadata.included(flags),
+ flushBeforeJournal: mountFlushBeforeJournal.included(flags),
+ unorderingDataMetadata: mountUnorderingDataMetadata.included(flags),
+ disableWriteFlushes: mountDisableWriteFlushes.included(flags),
+ trackMetadataBlocks: mountTrackMetadataBlocks.included(flags),
+ discardDeviceSupport: mountDiscardDeviceSupport.included(flags),
+ disableDelayedAllocation: mountDisableDelayedAllocation.included(flags),
+ }
+ return m
+}
+
+func (m *mountOptions) toInt() uint32 {
+ var flags uint32
+
+ if m.printDebugInfo {
+ flags |= uint32(mountPrintDebugInfo)
+ }
+ if m.newFilesGIDContainingDirectory {
+ flags |= uint32(mountNewFilesGIDContainingDirectory)
+ }
+ if m.userspaceExtendedAttributes {
+ flags |= uint32(mountUserspaceExtendedAttributes)
+ }
+ if m.posixACLs {
+ flags |= uint32(mountPosixACLs)
+ }
+ if m.use16BitUIDs {
+ flags |= uint32(mount16BitUIDs)
+ }
+ if m.journalDataAndMetadata {
+ flags |= uint32(mountJournalDataAndMetadata)
+ }
+ if m.flushBeforeJournal {
+ flags |= uint32(mountFlushBeforeJournal)
+ }
+ if m.unorderingDataMetadata {
+ flags |= uint32(mountUnorderingDataMetadata)
+ }
+ if m.disableWriteFlushes {
+ flags |= uint32(mountDisableWriteFlushes)
+ }
+ if m.trackMetadataBlocks {
+ flags |= uint32(mountTrackMetadataBlocks)
+ }
+ if m.discardDeviceSupport {
+ flags |= uint32(mountDiscardDeviceSupport)
+ }
+ if m.disableDelayedAllocation {
+ flags |= uint32(mountDisableDelayedAllocation)
+ }
+
+ return flags
+}
diff --git a/vendor/github.com/diskfs/go-diskfs/filesystem/ext4/superblock.go b/vendor/github.com/diskfs/go-diskfs/filesystem/ext4/superblock.go
new file mode 100644
index 00000000000..fcafda9390b
--- /dev/null
+++ b/vendor/github.com/diskfs/go-diskfs/filesystem/ext4/superblock.go
@@ -0,0 +1,768 @@
+package ext4
+
+import (
+ "encoding/binary"
+ "fmt"
+ "math"
+ "reflect"
+ "sort"
+ "time"
+
+ "github.com/diskfs/go-diskfs/filesystem/ext4/crc"
+ "github.com/diskfs/go-diskfs/util"
+ "github.com/google/uuid"
+)
+
+type filesystemState uint16
+type errorBehaviour uint16
+type osFlag uint32
+type feature uint32
+type hashAlgorithm byte
+type flag uint32
+type encryptionAlgorithm byte
+
+func (f feature) included(a uint32) bool {
+ return a&uint32(f) == uint32(f)
+}
+
+//nolint:unused // we know this is unused, but it will be needed in future
+func (f flag) equal(a flag) bool {
+ return f == a
+}
+func (f flag) included(a uint32) bool {
+ return a&uint32(f) == uint32(f)
+}
+
+const (
+ // superblockSignature is the signature for every superblock
+ superblockSignature uint16 = 0xef53
+ // optional states for the filesystem
+ fsStateCleanlyUnmounted filesystemState = 0x0001
+ fsStateErrors filesystemState = 0x0002
+ fsStateOrphansRecovered filesystemState = 0x0004
+ // how to handle erorrs
+ errorsContinue errorBehaviour = 1
+ errorsRemountReadOnly errorBehaviour = 2
+ errorsPanic errorBehaviour = 3
+ // checksum type
+ checkSumTypeCRC32c byte = 1
+ // oses
+ osLinux osFlag = 0
+ osHurd osFlag = 1
+ osMasix osFlag = 2
+ osFreeBSD osFlag = 3
+ osLites osFlag = 4
+ // compatible, incompatible, and compatibleReadOnly feature flags
+ compatFeatureDirectoryPreAllocate feature = 0x1
+ compatFeatureImagicInodes feature = 0x2
+ compatFeatureHasJournal feature = 0x4
+ compatFeatureExtendedAttributes feature = 0x8
+ compatFeatureReservedGDTBlocksForExpansion feature = 0x10
+ compatFeatureDirectoryIndices feature = 0x20
+ compatFeatureLazyBlockGroup feature = 0x40
+ compatFeatureExcludeInode feature = 0x80
+ compatFeatureExcludeBitmap feature = 0x100
+ compatFeatureSparseSuperBlockV2 feature = 0x200
+ compatFeatureFastCommit feature = 0x400
+ compatFeatureStableInodes feature = 0x800
+ compatFeatureOrphanFile feature = 0x1000
+ incompatFeatureCompression feature = 0x1
+ incompatFeatureDirectoryEntriesRecordFileType feature = 0x2
+ incompatFeatureRecoveryNeeded feature = 0x4
+ incompatFeatureSeparateJournalDevice feature = 0x8
+ incompatFeatureMetaBlockGroups feature = 0x10
+ incompatFeatureExtents feature = 0x40
+ incompatFeature64Bit feature = 0x80
+ incompatFeatureMultipleMountProtection feature = 0x100
+ incompatFeatureFlexBlockGroups feature = 0x200
+ incompatFeatureExtendedAttributeInodes feature = 0x400
+ incompatFeatureDataInDirectoryEntries feature = 0x1000
+ incompatFeatureMetadataChecksumSeedInSuperblock feature = 0x2000
+ incompatFeatureLargeDirectory feature = 0x4000
+ incompatFeatureDataInInode feature = 0x8000
+ incompatFeatureEncryptInodes feature = 0x10000
+ roCompatFeatureSparseSuperblock feature = 0x1
+ roCompatFeatureLargeFile feature = 0x2
+ roCompatFeatureBtreeDirectory feature = 0x4
+ roCompatFeatureHugeFile feature = 0x8
+ roCompatFeatureGDTChecksum feature = 0x10
+ roCompatFeatureLargeSubdirectoryCount feature = 0x20
+ roCompatFeatureLargeInodes feature = 0x40
+ roCompatFeatureSnapshot feature = 0x80
+ roCompatFeatureQuota feature = 0x100
+ roCompatFeatureBigalloc feature = 0x200
+ roCompatFeatureMetadataChecksums feature = 0x400
+ roCompatFeatureReplicas feature = 0x800
+ roCompatFeatureReadOnly feature = 0x1000
+ roCompatFeatureProjectQuotas feature = 0x2000
+ // hash algorithms for htree directory entries
+ hashLegacy hashAlgorithm = 0x0
+ hashHalfMD4 hashAlgorithm = 0x1
+ hashTea hashAlgorithm = 0x2
+ hashLegacyUnsigned hashAlgorithm = 0x3
+ hashHalfMD4Unsigned hashAlgorithm = 0x4
+ hashTeaUnsigned hashAlgorithm = 0x5
+ // miscellaneous flags
+ flagSignedDirectoryHash flag = 0x0001
+ flagUnsignedDirectoryHash flag = 0x0002
+ flagTestDevCode flag = 0x0004
+ // encryption algorithms
+ //nolint:unused // we know these are unused, but they will be needed in the future
+ encryptionAlgorithmInvalid encryptionAlgorithm = 0
+ encryptionAlgorithm256AESXTS encryptionAlgorithm = 1
+ encryptionAlgorithm256AESGCM encryptionAlgorithm = 2
+ encryptionAlgorithm256AESCBC encryptionAlgorithm = 3
+)
+
+// journalBackup is a backup in the superblock of the journal's inode i_block[] array and size
+type journalBackup struct {
+ iBlocks [15]uint32
+ iSize uint64
+}
+
+// Superblock is a structure holding the ext4 superblock
+type superblock struct {
+ inodeCount uint32
+ blockCount uint64
+ reservedBlocks uint64
+ freeBlocks uint64
+ freeInodes uint32
+ firstDataBlock uint32
+ blockSize uint32
+ clusterSize uint64
+ blocksPerGroup uint32
+ clustersPerGroup uint32
+ inodesPerGroup uint32
+ mountTime time.Time
+ writeTime time.Time
+ mountCount uint16
+ mountsToFsck uint16
+ filesystemState filesystemState
+ errorBehaviour errorBehaviour
+ minorRevision uint16
+ lastCheck time.Time
+ checkInterval uint32
+ creatorOS osFlag
+ revisionLevel uint32
+ reservedBlocksDefaultUID uint16
+ reservedBlocksDefaultGID uint16
+ firstNonReservedInode uint32
+ inodeSize uint16
+ blockGroup uint16
+ features featureFlags
+ uuid *uuid.UUID
+ volumeLabel string
+ lastMountedDirectory string
+ algorithmUsageBitmap uint32
+ preallocationBlocks byte
+ preallocationDirectoryBlocks byte
+ reservedGDTBlocks uint16
+ journalSuperblockUUID *uuid.UUID
+ journalInode uint32
+ journalDeviceNumber uint32
+ orphanedInodesStart uint32
+ hashTreeSeed []uint32
+ hashVersion hashAlgorithm
+ groupDescriptorSize uint16
+ defaultMountOptions mountOptions
+ firstMetablockGroup uint32
+ mkfsTime time.Time
+ journalBackup *journalBackup
+ // 64-bit mode features
+ inodeMinBytes uint16
+ inodeReserveBytes uint16
+ miscFlags miscFlags
+ raidStride uint16
+ multiMountPreventionInterval uint16
+ multiMountProtectionBlock uint64
+ raidStripeWidth uint32
+ logGroupsPerFlex uint64
+ checksumType byte
+ totalKBWritten uint64
+ snapshotInodeNumber uint32
+ snapshotID uint32
+ snapshotReservedBlocks uint64
+ snapshotStartInode uint32
+ errorCount uint32
+ errorFirstTime time.Time
+ errorFirstInode uint32
+ errorFirstBlock uint64
+ errorFirstFunction string
+ errorFirstLine uint32
+ errorLastTime time.Time
+ errorLastInode uint32
+ errorLastLine uint32
+ errorLastBlock uint64
+ errorLastFunction string
+ errorFirstCode byte
+ errorLastCode byte
+ mountOptions string
+ userQuotaInode uint32
+ groupQuotaInode uint32
+ overheadBlocks uint32
+ backupSuperblockBlockGroups [2]uint32
+ encryptionAlgorithms [4]encryptionAlgorithm
+ encryptionSalt [16]byte
+ lostFoundInode uint32
+ projectQuotaInode uint32
+ checksumSeed uint32
+ // encoding
+ filenameCharsetEncoding uint16
+ filenameCharsetEncodingFlags uint16
+ // inode for tracking orphaned inodes
+ orphanedInodeInodeNumber uint32
+}
+
+func (sb *superblock) equal(o *superblock) bool {
+ if (sb == nil && o != nil) || (o == nil && sb != nil) {
+ return false
+ }
+ if sb == nil && o == nil {
+ return true
+ }
+ return reflect.DeepEqual(sb, o)
+}
+
+// FSInformationSectorFromBytes create an FSInformationSector struct from bytes
+func superblockFromBytes(b []byte) (*superblock, error) {
+ bLen := len(b)
+ if bLen != int(SuperblockSize) {
+ return nil, fmt.Errorf("cannot read superblock from %d bytes instead of expected %d", bLen, SuperblockSize)
+ }
+
+ // check the magic signature
+ actualSignature := binary.LittleEndian.Uint16(b[0x38:0x3a])
+ if actualSignature != superblockSignature {
+ return nil, fmt.Errorf("erroneous signature at location 0x38 was %x instead of expected %x", actualSignature, superblockSignature)
+ }
+
+ sb := superblock{}
+
+ // first read feature flags of various types
+ compatFlags := binary.LittleEndian.Uint32(b[0x5c:0x60])
+ incompatFlags := binary.LittleEndian.Uint32(b[0x60:0x64])
+ roCompatFlags := binary.LittleEndian.Uint32(b[0x64:0x68])
+ // track which ones are set
+ sb.features = parseFeatureFlags(compatFlags, incompatFlags, roCompatFlags)
+
+ sb.inodeCount = binary.LittleEndian.Uint32(b[0:4])
+
+ // block count, reserved block count and free blocks depends on whether the fs is 64-bit or not
+ blockCount := make([]byte, 8)
+ reservedBlocks := make([]byte, 8)
+ freeBlocks := make([]byte, 8)
+
+ copy(blockCount[0:4], b[0x4:0x8])
+ copy(reservedBlocks[0:4], b[0x8:0xc])
+ copy(freeBlocks[0:4], b[0xc:0x10])
+
+ if sb.features.fs64Bit {
+ copy(blockCount[4:8], b[0x150:0x154])
+ copy(reservedBlocks[4:8], b[0x154:0x158])
+ copy(freeBlocks[4:8], b[0x158:0x15c])
+ }
+ sb.blockCount = binary.LittleEndian.Uint64(blockCount)
+ sb.reservedBlocks = binary.LittleEndian.Uint64(reservedBlocks)
+ sb.freeBlocks = binary.LittleEndian.Uint64(freeBlocks)
+
+ sb.freeInodes = binary.LittleEndian.Uint32(b[0x10:0x14])
+ sb.firstDataBlock = binary.LittleEndian.Uint32(b[0x14:0x18])
+ sb.blockSize = uint32(math.Exp2(float64(10 + binary.LittleEndian.Uint32(b[0x18:0x1c]))))
+ sb.clusterSize = uint64(math.Exp2(float64(binary.LittleEndian.Uint32(b[0x1c:0x20]))))
+ sb.blocksPerGroup = binary.LittleEndian.Uint32(b[0x20:0x24])
+ if sb.features.bigalloc {
+ sb.clustersPerGroup = binary.LittleEndian.Uint32(b[0x24:0x28])
+ }
+ sb.inodesPerGroup = binary.LittleEndian.Uint32(b[0x28:0x2c])
+ // these higher bits are listed as reserved in https://ext4.wiki.kernel.org/index.php/Ext4_Disk_Layout
+ // but looking at the source to mke2fs, we see that some are used, see
+ // https://git.kernel.org/pub/scm/fs/ext2/e2fsprogs.git/tree/lib/ext2fs/ext2_fs.h#n653
+ //
+ // mount time has low 32 bits at 0x2c and high 8 bits at 0x274
+ // write time has low 32 bits at 0x30 and high 8 bits at 0x275
+ // mkfs time has low 32 bits at 0x108 and high 8 bits at 0x276
+ // lastcheck time has low 32 bits at 0x40 and high 8 bits at 0x277
+ // firsterror time has low 32 bits at 0x198 and high 8 bits at 0x278
+ // lasterror time has low 32 bits at 0x1cc and high 8 bits at 0x279
+ // firsterror code is 8 bits at 0x27a
+ // lasterror code is 8 bits at 0x27b
+ sb.mountTime = bytesToTime(b[0x2c:0x30], b[0x274:0x275])
+ sb.writeTime = bytesToTime(b[0x30:0x34], b[0x275:0x276])
+ sb.mkfsTime = bytesToTime(b[0x108:0x10c], b[0x276:0x277])
+ sb.lastCheck = bytesToTime(b[0x40:0x44], b[0x277:0x278])
+ sb.errorFirstTime = bytesToTime(b[0x198:0x19c], b[0x278:0x279])
+ sb.errorLastTime = bytesToTime(b[0x1cc:0x1d0], b[0x279:0x280])
+
+ sb.errorFirstCode = b[0x27a]
+ sb.errorLastCode = b[0x27b]
+
+ sb.mountCount = binary.LittleEndian.Uint16(b[0x34:0x36])
+ sb.mountsToFsck = binary.LittleEndian.Uint16(b[0x36:0x38])
+
+ sb.filesystemState = filesystemState(binary.LittleEndian.Uint16(b[0x3a:0x3c]))
+ sb.errorBehaviour = errorBehaviour(binary.LittleEndian.Uint16(b[0x3c:0x3e]))
+
+ sb.minorRevision = binary.LittleEndian.Uint16(b[0x3e:0x40])
+ sb.checkInterval = binary.LittleEndian.Uint32(b[0x44:0x48])
+
+ sb.creatorOS = osFlag(binary.LittleEndian.Uint32(b[0x48:0x4c]))
+ sb.revisionLevel = binary.LittleEndian.Uint32(b[0x4c:0x50])
+ sb.reservedBlocksDefaultUID = binary.LittleEndian.Uint16(b[0x50:0x52])
+ sb.reservedBlocksDefaultGID = binary.LittleEndian.Uint16(b[0x52:0x54])
+
+ sb.firstNonReservedInode = binary.LittleEndian.Uint32(b[0x54:0x58])
+ sb.inodeSize = binary.LittleEndian.Uint16(b[0x58:0x5a])
+ sb.blockGroup = binary.LittleEndian.Uint16(b[0x5a:0x5c])
+
+ voluuid, err := uuid.FromBytes(b[0x68:0x78])
+ if err != nil {
+ return nil, fmt.Errorf("unable to read volume UUID: %v", err)
+ }
+ sb.uuid = &voluuid
+ sb.volumeLabel = minString(b[0x78:0x88])
+ sb.lastMountedDirectory = minString(b[0x88:0xc8])
+ sb.algorithmUsageBitmap = binary.LittleEndian.Uint32(b[0xc8:0xcc])
+
+ sb.preallocationBlocks = b[0xcc]
+ sb.preallocationDirectoryBlocks = b[0xcd]
+ sb.reservedGDTBlocks = binary.LittleEndian.Uint16(b[0xce:0xd0])
+
+ journaluuid, err := uuid.FromBytes(b[0xd0:0xe0])
+ if err != nil {
+ return nil, fmt.Errorf("unable to read journal UUID: %v", err)
+ }
+ sb.journalSuperblockUUID = &journaluuid
+ sb.journalInode = binary.LittleEndian.Uint32(b[0xe0:0xe4])
+ sb.journalDeviceNumber = binary.LittleEndian.Uint32(b[0xe4:0xe8])
+ sb.orphanedInodesStart = binary.LittleEndian.Uint32(b[0xe8:0xec])
+
+ htreeSeed := make([]uint32, 0, 4)
+ htreeSeed = append(htreeSeed,
+ binary.LittleEndian.Uint32(b[0xec:0xf0]),
+ binary.LittleEndian.Uint32(b[0xf0:0xf4]),
+ binary.LittleEndian.Uint32(b[0xf4:0xf8]),
+ binary.LittleEndian.Uint32(b[0xf8:0xfc]),
+ )
+ sb.hashTreeSeed = htreeSeed
+
+ sb.hashVersion = hashAlgorithm(b[0xfc])
+
+ sb.groupDescriptorSize = binary.LittleEndian.Uint16(b[0xfe:0x100])
+
+ sb.defaultMountOptions = parseMountOptions(binary.LittleEndian.Uint32(b[0x100:0x104]))
+ sb.firstMetablockGroup = binary.LittleEndian.Uint32(b[0x104:0x108])
+
+ journalBackupType := b[0xfd]
+ if journalBackupType == 0 || journalBackupType == 1 {
+ journalBackupArray := [15]uint32{}
+ startJournalBackup := 0x10c
+ for i := 0; i < 15; i++ {
+ start := startJournalBackup + 4*i
+ end := startJournalBackup + 4*i + 4
+ journalBackupArray[i] = binary.LittleEndian.Uint32(b[start:end])
+ }
+ iSizeBytes := make([]byte, 8)
+
+ copy(iSizeBytes[0:4], b[startJournalBackup+4*16:startJournalBackup+4*17])
+ copy(iSizeBytes[4:8], b[startJournalBackup+4*15:startJournalBackup+4*16])
+
+ sb.journalBackup = &journalBackup{
+ iSize: binary.LittleEndian.Uint64(iSizeBytes),
+ iBlocks: journalBackupArray,
+ }
+ }
+
+ sb.inodeMinBytes = binary.LittleEndian.Uint16(b[0x15c:0x15e])
+ sb.inodeReserveBytes = binary.LittleEndian.Uint16(b[0x15e:0x160])
+ sb.miscFlags = parseMiscFlags(binary.LittleEndian.Uint32(b[0x160:0x164]))
+
+ sb.raidStride = binary.LittleEndian.Uint16(b[0x164:0x166])
+ sb.raidStripeWidth = binary.LittleEndian.Uint32(b[0x170:0x174])
+
+ sb.multiMountPreventionInterval = binary.LittleEndian.Uint16(b[0x166:0x168])
+ sb.multiMountProtectionBlock = binary.LittleEndian.Uint64(b[0x168:0x170])
+
+ sb.logGroupsPerFlex = uint64(math.Exp2(float64(b[0x174])))
+
+ sb.checksumType = b[0x175] // only valid one is 1
+ if sb.checksumType != checkSumTypeCRC32c {
+ return nil, fmt.Errorf("cannot read superblock: invalid checksum type %d, only valid is %d", sb.checksumType, checkSumTypeCRC32c)
+ }
+
+ // b[0x176:0x178] are reserved padding
+
+ sb.totalKBWritten = binary.LittleEndian.Uint64(b[0x178:0x180])
+
+ sb.snapshotInodeNumber = binary.LittleEndian.Uint32(b[0x180:0x184])
+ sb.snapshotID = binary.LittleEndian.Uint32(b[0x184:0x188])
+ sb.snapshotReservedBlocks = binary.LittleEndian.Uint64(b[0x188:0x190])
+ sb.snapshotStartInode = binary.LittleEndian.Uint32(b[0x190:0x194])
+
+ // errors
+ sb.errorCount = binary.LittleEndian.Uint32(b[0x194:0x198])
+ sb.errorFirstInode = binary.LittleEndian.Uint32(b[0x19c:0x1a0])
+ sb.errorFirstBlock = binary.LittleEndian.Uint64(b[0x1a0:0x1a8])
+ sb.errorFirstFunction = minString(b[0x1a8:0x1c8])
+ sb.errorFirstLine = binary.LittleEndian.Uint32(b[0x1c8:0x1cc])
+ sb.errorLastInode = binary.LittleEndian.Uint32(b[0x1d0:0x1d4])
+ sb.errorLastLine = binary.LittleEndian.Uint32(b[0x1d4:0x1d8])
+ sb.errorLastBlock = binary.LittleEndian.Uint64(b[0x1d8:0x1e0])
+ sb.errorLastFunction = minString(b[0x1e0:0x200])
+
+ sb.mountOptions = minString(b[0x200:0x240])
+ sb.userQuotaInode = binary.LittleEndian.Uint32(b[0x240:0x244])
+ sb.groupQuotaInode = binary.LittleEndian.Uint32(b[0x244:0x248])
+ // overheadBlocks *always* is 0
+ sb.overheadBlocks = binary.LittleEndian.Uint32(b[0x248:0x24c])
+ sb.backupSuperblockBlockGroups = [2]uint32{
+ binary.LittleEndian.Uint32(b[0x24c:0x250]),
+ binary.LittleEndian.Uint32(b[0x250:0x254]),
+ }
+ for i := 0; i < 4; i++ {
+ sb.encryptionAlgorithms[i] = encryptionAlgorithm(b[0x254+i])
+ }
+ for i := 0; i < 16; i++ {
+ sb.encryptionSalt[i] = b[0x258+i]
+ }
+ sb.lostFoundInode = binary.LittleEndian.Uint32(b[0x268:0x26c])
+ sb.projectQuotaInode = binary.LittleEndian.Uint32(b[0x26c:0x270])
+
+ sb.checksumSeed = binary.LittleEndian.Uint32(b[0x270:0x274])
+ // what if the seed is missing? It can be.
+ if sb.features.metadataChecksums && sb.checksumSeed == 0 {
+ sb.checksumSeed = crc.CRC32c(0xffffffff, sb.uuid[:])
+ }
+
+ sb.filenameCharsetEncoding = binary.LittleEndian.Uint16(b[0x27c:0x27e])
+ sb.filenameCharsetEncodingFlags = binary.LittleEndian.Uint16(b[0x27e:0x280])
+ sb.orphanedInodeInodeNumber = binary.LittleEndian.Uint32(b[0x280:0x284])
+
+ // b[0x288:0x3fc] are reserved for zero padding
+
+ // checksum
+ checksum := binary.LittleEndian.Uint32(b[0x3fc:0x400])
+
+ // calculate the checksum and validate - we use crc32c
+ if sb.features.metadataChecksums {
+ actualChecksum := crc.CRC32c(0xffffffff, b[0:0x3fc])
+ if actualChecksum != checksum {
+ return nil, fmt.Errorf("invalid superblock checksum, actual was %x, on disk was %x, inverted on disk was %x", actualChecksum, checksum, 0xffffffff-checksum)
+ }
+ }
+
+ return &sb, nil
+}
+
+// toBytes returns a superblock ready to be written to disk
+func (sb *superblock) toBytes() ([]byte, error) {
+ b := make([]byte, SuperblockSize)
+
+ binary.LittleEndian.PutUint16(b[0x38:0x3a], superblockSignature)
+ compatFlags, incompatFlags, roCompatFlags := sb.features.toInts()
+ binary.LittleEndian.PutUint32(b[0x5c:0x60], compatFlags)
+ binary.LittleEndian.PutUint32(b[0x60:0x64], incompatFlags)
+ binary.LittleEndian.PutUint32(b[0x64:0x68], roCompatFlags)
+
+ binary.LittleEndian.PutUint32(b[0:4], sb.inodeCount)
+
+ // block count, reserved block count and free blocks depends on whether the fs is 64-bit or not
+ blockCount := make([]byte, 8)
+ reservedBlocks := make([]byte, 8)
+ freeBlocks := make([]byte, 8)
+
+ binary.LittleEndian.PutUint64(blockCount, sb.blockCount)
+ binary.LittleEndian.PutUint64(reservedBlocks, sb.reservedBlocks)
+ binary.LittleEndian.PutUint64(freeBlocks, sb.freeBlocks)
+
+ copy(b[0x4:0x8], blockCount[0:4])
+ copy(b[0x8:0xc], reservedBlocks[0:4])
+ copy(b[0xc:0x10], freeBlocks[0:4])
+
+ if sb.features.fs64Bit {
+ copy(b[0x150:0x154], blockCount[4:8])
+ copy(b[0x154:0x158], reservedBlocks[4:8])
+ copy(b[0x158:0x15c], freeBlocks[4:8])
+ }
+
+ binary.LittleEndian.PutUint32(b[0x10:0x14], sb.freeInodes)
+ binary.LittleEndian.PutUint32(b[0x14:0x18], sb.firstDataBlock)
+ binary.LittleEndian.PutUint32(b[0x18:0x1c], uint32(math.Log2(float64(sb.blockSize))-10))
+ binary.LittleEndian.PutUint32(b[0x1c:0x20], uint32(math.Log2(float64(sb.clusterSize))))
+
+ binary.LittleEndian.PutUint32(b[0x20:0x24], sb.blocksPerGroup)
+ if sb.features.bigalloc {
+ binary.LittleEndian.PutUint32(b[0x24:0x28], sb.clustersPerGroup)
+ } else {
+ binary.LittleEndian.PutUint32(b[0x24:0x28], sb.blocksPerGroup)
+ }
+ binary.LittleEndian.PutUint32(b[0x28:0x2c], sb.inodesPerGroup)
+ mountTime := timeToBytes(sb.mountTime)
+ writeTime := timeToBytes(sb.writeTime)
+ mkfsTime := timeToBytes(sb.mkfsTime)
+ lastCheck := timeToBytes(sb.lastCheck)
+ errorFirstTime := timeToBytes(sb.errorFirstTime)
+ errorLastTime := timeToBytes(sb.errorLastTime)
+
+ // mount time low bits, high bit
+ copy(b[0x2c:0x30], mountTime[0:4])
+ b[0x274] = mountTime[4]
+ // write time low bits, high bit
+ copy(b[0x30:0x34], writeTime[0:4])
+ b[0x275] = writeTime[4]
+ // mkfs time low bits, high bit
+ copy(b[0x108:0x10c], mkfsTime[0:4])
+ b[0x276] = mkfsTime[4]
+ // last check time low bits, high bit
+ copy(b[0x40:0x44], lastCheck[0:4])
+ b[0x277] = lastCheck[4]
+ // first error time low bits, high bit
+ copy(b[0x198:0x19c], errorFirstTime[0:4])
+ b[0x278] = errorFirstTime[4]
+ // last error time low bits, high bit
+ copy(b[0x1cc:0x1d0], errorLastTime[0:4])
+ b[0x279] = errorLastTime[4]
+
+ // error codes
+ b[0x27a] = sb.errorFirstCode
+ b[0x27b] = sb.errorLastCode
+
+ binary.LittleEndian.PutUint16(b[0x34:0x36], sb.mountCount)
+ binary.LittleEndian.PutUint16(b[0x36:0x38], sb.mountsToFsck)
+
+ binary.LittleEndian.PutUint16(b[0x3a:0x3c], uint16(sb.filesystemState))
+ binary.LittleEndian.PutUint16(b[0x3c:0x3e], uint16(sb.errorBehaviour))
+
+ binary.LittleEndian.PutUint16(b[0x3e:0x40], sb.minorRevision)
+ binary.LittleEndian.PutUint32(b[0x40:0x44], uint32(sb.lastCheck.Unix()))
+ binary.LittleEndian.PutUint32(b[0x44:0x48], sb.checkInterval)
+
+ binary.LittleEndian.PutUint32(b[0x48:0x4c], uint32(sb.creatorOS))
+ binary.LittleEndian.PutUint32(b[0x4c:0x50], sb.revisionLevel)
+ binary.LittleEndian.PutUint16(b[0x50:0x52], sb.reservedBlocksDefaultUID)
+ binary.LittleEndian.PutUint16(b[0x52:0x54], sb.reservedBlocksDefaultGID)
+
+ binary.LittleEndian.PutUint32(b[0x54:0x58], sb.firstNonReservedInode)
+ binary.LittleEndian.PutUint16(b[0x58:0x5a], sb.inodeSize)
+ binary.LittleEndian.PutUint16(b[0x5a:0x5c], sb.blockGroup)
+
+ if sb.uuid != nil {
+ copy(b[0x68:0x78], sb.uuid[:])
+ }
+
+ ab, err := stringToASCIIBytes(sb.volumeLabel, 16)
+ if err != nil {
+ return nil, fmt.Errorf("error converting volume label to bytes: %v", err)
+ }
+ copy(b[0x78:0x88], ab[0:16])
+ ab, err = stringToASCIIBytes(sb.lastMountedDirectory, 64)
+ if err != nil {
+ return nil, fmt.Errorf("error last mounted directory to bytes: %v", err)
+ }
+ copy(b[0x88:0xc8], ab[0:64])
+
+ binary.LittleEndian.PutUint32(b[0xc8:0xcc], sb.algorithmUsageBitmap)
+
+ b[0xcc] = sb.preallocationBlocks
+ b[0xcd] = sb.preallocationDirectoryBlocks
+ binary.LittleEndian.PutUint16(b[0xce:0xd0], sb.reservedGDTBlocks)
+
+ if sb.journalSuperblockUUID != nil {
+ copy(b[0xd0:0xe0], sb.journalSuperblockUUID[:])
+ }
+
+ binary.LittleEndian.PutUint32(b[0xe0:0xe4], sb.journalInode)
+ binary.LittleEndian.PutUint32(b[0xe4:0xe8], sb.journalDeviceNumber)
+ binary.LittleEndian.PutUint32(b[0xe8:0xec], sb.orphanedInodesStart)
+
+ // to be safe
+ if len(sb.hashTreeSeed) < 4 {
+ sb.hashTreeSeed = append(sb.hashTreeSeed, 0, 0, 0, 0)
+ }
+ binary.LittleEndian.PutUint32(b[0xec:0xf0], sb.hashTreeSeed[0])
+ binary.LittleEndian.PutUint32(b[0xf0:0xf4], sb.hashTreeSeed[1])
+ binary.LittleEndian.PutUint32(b[0xf4:0xf8], sb.hashTreeSeed[2])
+ binary.LittleEndian.PutUint32(b[0xf8:0xfc], sb.hashTreeSeed[3])
+
+ b[0xfc] = byte(sb.hashVersion)
+
+ binary.LittleEndian.PutUint16(b[0xfe:0x100], sb.groupDescriptorSize)
+
+ binary.LittleEndian.PutUint32(b[0x100:0x104], sb.defaultMountOptions.toInt())
+ binary.LittleEndian.PutUint32(b[0x104:0x108], sb.firstMetablockGroup)
+
+ if sb.journalBackup != nil {
+ b[0xfd] = 1
+ startJournalBackup := 0x10c
+ for i := 0; i < 15; i++ {
+ start := startJournalBackup + 4*i
+ end := startJournalBackup + 4*i + 4
+ binary.LittleEndian.PutUint32(b[start:end], sb.journalBackup.iBlocks[i])
+ }
+
+ iSizeBytes := make([]byte, 8)
+ binary.LittleEndian.PutUint64(iSizeBytes, sb.journalBackup.iSize)
+ copy(b[startJournalBackup+4*16:startJournalBackup+4*17], iSizeBytes[0:4])
+ copy(b[startJournalBackup+4*15:startJournalBackup+4*16], iSizeBytes[4:8])
+ }
+
+ binary.LittleEndian.PutUint16(b[0x15c:0x15e], sb.inodeMinBytes)
+ binary.LittleEndian.PutUint16(b[0x15e:0x160], sb.inodeReserveBytes)
+ binary.LittleEndian.PutUint32(b[0x160:0x164], sb.miscFlags.toInt())
+
+ binary.LittleEndian.PutUint16(b[0x164:0x166], sb.raidStride)
+ binary.LittleEndian.PutUint32(b[0x170:0x174], sb.raidStripeWidth)
+
+ binary.LittleEndian.PutUint16(b[0x166:0x168], sb.multiMountPreventionInterval)
+ binary.LittleEndian.PutUint64(b[0x168:0x170], sb.multiMountProtectionBlock)
+
+ b[0x174] = uint8(math.Log2(float64(sb.logGroupsPerFlex)))
+
+ b[0x175] = sb.checksumType // only valid one is 1
+
+ // b[0x176:0x178] are reserved padding
+
+ binary.LittleEndian.PutUint64(b[0x178:0x180], sb.totalKBWritten)
+
+ binary.LittleEndian.PutUint32(b[0x180:0x184], sb.snapshotInodeNumber)
+ binary.LittleEndian.PutUint32(b[0x184:0x188], sb.snapshotID)
+ binary.LittleEndian.PutUint64(b[0x188:0x190], sb.snapshotReservedBlocks)
+ binary.LittleEndian.PutUint32(b[0x190:0x194], sb.snapshotStartInode)
+
+ // errors
+ binary.LittleEndian.PutUint32(b[0x194:0x198], sb.errorCount)
+ binary.LittleEndian.PutUint32(b[0x19c:0x1a0], sb.errorFirstInode)
+ binary.LittleEndian.PutUint64(b[0x1a0:0x1a8], sb.errorFirstBlock)
+ errorFirstFunctionBytes, err := stringToASCIIBytes(sb.errorFirstFunction, 32)
+ if err != nil {
+ return nil, fmt.Errorf("error converting errorFirstFunction to bytes: %v", err)
+ }
+ copy(b[0x1a8:0x1c8], errorFirstFunctionBytes)
+ binary.LittleEndian.PutUint32(b[0x1c8:0x1cc], sb.errorFirstLine)
+ binary.LittleEndian.PutUint32(b[0x1d0:0x1d4], sb.errorLastInode)
+ binary.LittleEndian.PutUint32(b[0x1d4:0x1d8], sb.errorLastLine)
+ binary.LittleEndian.PutUint64(b[0x1d8:0x1e0], sb.errorLastBlock)
+ errorLastFunctionBytes, err := stringToASCIIBytes(sb.errorLastFunction, 32)
+ if err != nil {
+ return nil, fmt.Errorf("error converting errorLastFunction to bytes: %v", err)
+ }
+ copy(b[0x1e0:0x200], errorLastFunctionBytes)
+
+ mountOptionsBytes, err := stringToASCIIBytes(sb.mountOptions, 64)
+ if err != nil {
+ return nil, fmt.Errorf("error converting mountOptions to bytes: %v", err)
+ }
+ copy(b[0x200:0x240], mountOptionsBytes)
+ binary.LittleEndian.PutUint32(b[0x240:0x244], sb.userQuotaInode)
+ binary.LittleEndian.PutUint32(b[0x244:0x248], sb.groupQuotaInode)
+ // overheadBlocks *always* is 0
+ binary.LittleEndian.PutUint32(b[0x248:0x24c], sb.overheadBlocks)
+ binary.LittleEndian.PutUint32(b[0x24c:0x250], sb.backupSuperblockBlockGroups[0])
+ binary.LittleEndian.PutUint32(b[0x250:0x254], sb.backupSuperblockBlockGroups[1])
+ // safety check of encryption algorithms
+
+ for i := 0; i < 4; i++ {
+ b[0x254+i] = byte(sb.encryptionAlgorithms[i])
+ }
+ for i := 0; i < 16; i++ {
+ b[0x258+i] = sb.encryptionSalt[i]
+ }
+ binary.LittleEndian.PutUint32(b[0x268:0x26c], sb.lostFoundInode)
+ binary.LittleEndian.PutUint32(b[0x26c:0x270], sb.projectQuotaInode)
+
+ binary.LittleEndian.PutUint32(b[0x270:0x274], sb.checksumSeed)
+
+ binary.LittleEndian.PutUint16(b[0x27c:0x27e], sb.filenameCharsetEncoding)
+ binary.LittleEndian.PutUint16(b[0x27e:0x280], sb.filenameCharsetEncodingFlags)
+ binary.LittleEndian.PutUint32(b[0x280:0x284], sb.orphanedInodeInodeNumber)
+
+ // b[0x288:0x3fc] are reserved for zero padding
+
+ // calculate the checksum and validate - we use crc32c
+ if sb.features.metadataChecksums {
+ actualChecksum := crc.CRC32c(0xffffffff, b[0:0x3fc])
+ binary.LittleEndian.PutUint32(b[0x3fc:0x400], actualChecksum)
+ }
+
+ return b, nil
+}
+
+func (sb *superblock) gdtChecksumType() gdtChecksumType {
+ var gdtChecksumTypeInFS gdtChecksumType
+ switch {
+ case sb.features.metadataChecksums:
+ gdtChecksumTypeInFS = gdtChecksumMetadata
+ case sb.features.gdtChecksum:
+ gdtChecksumTypeInFS = gdtChecksumGdt
+ default:
+ gdtChecksumTypeInFS = gdtChecksumNone
+ }
+ return gdtChecksumTypeInFS
+}
+
+func (sb *superblock) blockGroupCount() uint64 {
+ whole := sb.blockCount / uint64(sb.blocksPerGroup)
+ part := sb.blockCount % uint64(sb.blocksPerGroup)
+ if part > 0 {
+ whole++
+ }
+ return whole
+}
+
+// calculateBackupSuperblocks calculate which block groups should have backup superblocks.
+func calculateBackupSuperblockGroups(bgs int64) []int64 {
+ // calculate which block groups should have backup superblocks
+ // these are if the block group number is a power of 3, 5, or 7
+ var backupGroups []int64
+ for i := float64(0); ; i++ {
+ bg := int64(math.Pow(3, i))
+ if bg >= bgs {
+ break
+ }
+ backupGroups = append(backupGroups, bg)
+ }
+ for i := float64(0); ; i++ {
+ bg := int64(math.Pow(5, i))
+ if bg >= bgs {
+ break
+ }
+ backupGroups = append(backupGroups, bg)
+ }
+ for i := float64(0); ; i++ {
+ bg := int64(math.Pow(7, i))
+ if bg >= bgs {
+ break
+ }
+ backupGroups = append(backupGroups, bg)
+ }
+ // sort the backup groups
+ uniqBackupGroups := util.Uniqify[int64](backupGroups)
+ sort.Slice(uniqBackupGroups, func(i, j int) bool {
+ return uniqBackupGroups[i] < uniqBackupGroups[j]
+ })
+ return uniqBackupGroups
+}
+
+func bytesToTime(b ...[]byte) time.Time {
+ // ensure it is at least 8 bytes
+ var (
+ in [8]byte
+ count int
+ )
+ for _, v := range b {
+ toCopy := len(v)
+ if toCopy+count > len(in) {
+ toCopy = len(in) - count
+ }
+ copied := copy(in[count:], v[:toCopy])
+ count += copied
+ }
+ return time.Unix(int64(binary.LittleEndian.Uint64(in[:])), 0).UTC()
+}
+
+// timeToBytes convert a time.Time to an 8 byte slice. Guarantees 8 bytes
+func timeToBytes(t time.Time) []byte {
+ timestamp := t.Unix()
+ var b = make([]byte, 8)
+ binary.LittleEndian.PutUint64(b, uint64(timestamp))
+ return b
+}
diff --git a/vendor/github.com/diskfs/go-diskfs/filesystem/ext4/util.go b/vendor/github.com/diskfs/go-diskfs/filesystem/ext4/util.go
new file mode 100644
index 00000000000..ae229430264
--- /dev/null
+++ b/vendor/github.com/diskfs/go-diskfs/filesystem/ext4/util.go
@@ -0,0 +1,106 @@
+package ext4
+
+import (
+ "fmt"
+ "strings"
+)
+
+const (
+ // KB represents one KB
+ KB int64 = 1024
+ // MB represents one MB
+ MB int64 = 1024 * KB
+ // GB represents one GB
+ GB int64 = 1024 * MB
+ // TB represents one TB
+ TB int64 = 1024 * GB
+ // PB represents one TB
+ PB int64 = 1024 * TB
+ // XB represents one Exabyte
+ XB int64 = 1024 * PB
+ // these because they are larger than int64 or uint64 can handle
+ // ZB represents one Zettabyte
+ // ZB int64 = 1024 * XB
+ // YB represents one Yottabyte
+ // YB int64 = 1024 * ZB
+ // Ext4MaxSize is maximum size of an ext4 filesystem in bytes
+ // it varies based on the block size and if we are 64-bit or 32-bit mode, but the absolute complete max
+ // is 64KB per block (128 sectors) in 64-bit mode
+ // for a max filesystem size of 1YB (yottabyte)
+ // Ext4MaxSize int64 = YB
+ // if we ever actually care, we will use math/big to do it
+ // var xb, ZB, kb, YB big.Int
+ // kb.SetUint64(1024)
+ // xb.SetUint64(uint64(XB))
+ // ZB.Mul(&xb, &kb)
+ // YB.Mul(&ZB, &kb)
+
+ // Ext4MinSize is minimum size for an ext4 filesystem
+ // it assumes a single block group with:
+ // blocksize = 2 sectors = 1KB
+ // 1 block for boot code
+ // 1 block for superblock
+ // 1 block for block group descriptors
+ // 1 block for bock and inode bitmaps and inode table
+ // 1 block for data
+ // total = 5 blocks
+ Ext4MinSize int64 = 5 * int64(SectorSize512)
+
+ // volume
+)
+
+func splitPath(p string) []string {
+ // we need to split such that each one ends in "/", except possibly the last one
+ parts := strings.Split(p, "/")
+ // eliminate empty parts
+ ret := make([]string, 0)
+ for _, sub := range parts {
+ if sub != "" {
+ ret = append(ret, sub)
+ }
+ }
+ return ret
+}
+
+// convert a string to a byte array, if all characters are valid ascii
+// always pads to the full length provided in padding. If size is less than the length of the string, it will be truncated
+func stringToASCIIBytes(s string, size int) ([]byte, error) {
+ length := len(s)
+ b := make([]byte, length)
+ // convert the name into 11 bytes
+ r := []rune(s)
+ // take the first 8 characters
+ for i := 0; i < length; i++ {
+ val := int(r[i])
+ // we only can handle values less than max byte = 255
+ if val > 255 {
+ return nil, fmt.Errorf("Non-ASCII character in name: %s", s)
+ }
+ b[i] = byte(val)
+ }
+ if len(b) < size {
+ // pad with nulls
+ for i := len(b); i < size; i++ {
+ b = append(b, 0)
+ }
+ }
+ if len(b) > size {
+ b = b[:size]
+ }
+ return b, nil
+}
+
+// minString convert []byte to string, but drop extraneous 0x0
+func minString(b []byte) string {
+ // find the last byte that is not 0x0
+ if len(b) == 0 {
+ return ""
+ }
+ index := len(b) - 1
+ for ; index >= 0; index-- {
+ if b[index] != 0 {
+ break
+ }
+ }
+ return string(b[:index+1])
+}
diff --git a/vendor/github.com/diskfs/go-diskfs/filesystem/fat32/fat32.go b/vendor/github.com/diskfs/go-diskfs/filesystem/fat32/fat32.go
index 84961587205..ae6531734f8 100644
--- a/vendor/github.com/diskfs/go-diskfs/filesystem/fat32/fat32.go
+++ b/vendor/github.com/diskfs/go-diskfs/filesystem/fat32/fat32.go
@@ -375,7 +375,7 @@ func Read(file util.File, size, start, blocksize int64) (*FileSystem, error) {
_, _ = file.ReadAt(b, int64(fatSecondaryStart)+start)
fat2 := tableFromBytes(b)
if !fat.equal(fat2) {
- return nil, errors.New("fat tables did not much")
+ return nil, errors.New("fat tables did not match")
}
dataStart := uint32(fatSecondaryStart) + fat.size
@@ -493,9 +493,12 @@ func (fs *FileSystem) ReadDir(p string) ([]os.FileInfo, error) {
}
// once we have made it here, looping is done. We have found the final entry
// we need to return all of the file info
- count := len(entries)
- ret := make([]os.FileInfo, count)
- for i, e := range entries {
+ //nolint:prealloc // because the following loop may omit some entry
+ var ret []os.FileInfo
+ for _, e := range entries {
+ if e.isVolumeLabel {
+ continue
+ }
shortName := e.filenameShort
if e.lowercaseShortname {
shortName = strings.ToLower(shortName)
@@ -507,13 +510,13 @@ func (fs *FileSystem) ReadDir(p string) ([]os.FileInfo, error) {
if fileExtension != "" {
shortName = fmt.Sprintf("%s.%s", shortName, fileExtension)
}
- ret[i] = FileInfo{
+ ret = append(ret, FileInfo{
modTime: e.modifyTime,
name: e.filenameLong,
shortName: shortName,
size: int64(e.fileSize),
isDir: e.isSubdirectory,
- }
+ })
}
return ret, nil
}
@@ -850,6 +853,7 @@ func (fs *FileSystem) readDirWithMkdir(p string, doMake bool) (*Directory, []*di
if err != nil {
return nil, nil, fmt.Errorf("failed to create subdirectory %s", "/"+strings.Join(paths[0:i+1], "/"))
}
+ currentDir.modifyTime = subdirEntry.createTime
// make a basic entry for the new subdir
parentDirectoryCluster := currentDir.clusterLocation
if parentDirectoryCluster == 2 {
@@ -859,8 +863,22 @@ func (fs *FileSystem) readDirWithMkdir(p string, doMake bool) (*Directory, []*di
dir := &Directory{
directoryEntry: directoryEntry{clusterLocation: subdirEntry.clusterLocation},
entries: []*directoryEntry{
- {filenameShort: ".", isSubdirectory: true, clusterLocation: subdirEntry.clusterLocation},
- {filenameShort: "..", isSubdirectory: true, clusterLocation: parentDirectoryCluster},
+ {
+ filenameShort: ".",
+ isSubdirectory: true,
+ clusterLocation: subdirEntry.clusterLocation,
+ createTime: subdirEntry.createTime,
+ modifyTime: subdirEntry.modifyTime,
+ accessTime: subdirEntry.accessTime,
+ },
+ {
+ filenameShort: "..",
+ isSubdirectory: true,
+ clusterLocation: parentDirectoryCluster,
+ createTime: currentDir.createTime,
+ modifyTime: currentDir.modifyTime,
+ accessTime: currentDir.accessTime,
+ },
},
}
// write the new directory entries to disk
diff --git a/vendor/github.com/diskfs/go-diskfs/filesystem/filesystem.go b/vendor/github.com/diskfs/go-diskfs/filesystem/filesystem.go
index fdd35313d01..2c1acfa6c36 100644
--- a/vendor/github.com/diskfs/go-diskfs/filesystem/filesystem.go
+++ b/vendor/github.com/diskfs/go-diskfs/filesystem/filesystem.go
@@ -34,4 +34,6 @@ const (
TypeISO9660
// TypeSquashfs is a squashfs filesystem
TypeSquashfs
+ // TypeExt4 is an ext4 compatible filesystem
+ TypeExt4
)
diff --git a/vendor/github.com/diskfs/go-diskfs/filesystem/iso9660/directoryentry.go b/vendor/github.com/diskfs/go-diskfs/filesystem/iso9660/directoryentry.go
index 1f6947811b1..d9877673041 100644
--- a/vendor/github.com/diskfs/go-diskfs/filesystem/iso9660/directoryentry.go
+++ b/vendor/github.com/diskfs/go-diskfs/filesystem/iso9660/directoryentry.go
@@ -119,7 +119,7 @@ func (de *directoryEntry) toBytes(skipExt bool, ceBlocks []uint32) ([][]byte, er
filenameBytes = []byte{0x01}
default:
// first validate the filename
- err = validateFilename(de.filename, de.isSubdirectory)
+ err = validateFilename(de.filename, de.isSubdirectory, de.filesystem.suspEnabled)
if err != nil {
nametype := "filename"
if de.isSubdirectory {
@@ -189,9 +189,8 @@ func dirEntryExtensionsToBytes(extensions []directoryEntrySystemUseExtension, ma
}
b = append(b, ce.Bytes()...)
break
- } else {
- b = append(b, b2...)
}
+ b = append(b, b2...)
}
ret = append(ret, b)
if len(continuedBytes) > 0 {
@@ -482,9 +481,24 @@ func (de *directoryEntry) Size() int64 {
// Mode() FileMode // file mode bits
func (de *directoryEntry) Mode() os.FileMode {
+ for _, ext := range de.extensions {
+ if s, ok := ext.(rockRidgeSymlink); ok && !s.continued {
+ return 0o755 | os.ModeSymlink
+ }
+ }
return 0o755
}
+// Readlink tries to return the target link, only valid for symlinks
+func (de *directoryEntry) ReadLink() (string, bool) {
+ for _, ext := range de.extensions {
+ if s, ok := ext.(rockRidgeSymlink); ok && !s.continued {
+ return s.name, true
+ }
+ }
+ return "", false
+}
+
// ModTime() time.Time // modification time
func (de *directoryEntry) ModTime() time.Time {
return de.creation
@@ -534,16 +548,27 @@ func timeToBytes(t time.Time) []byte {
}
// convert a string to ascii bytes, but only accept valid d-characters
-func validateFilename(s string, isDir bool) error {
+func validateFilename(s string, isDir, suspExtension bool) error {
+ var err error
+ if suspExtension {
+ err = validateSUSPFilename(s, isDir)
+ } else {
+ err = validateISOFilename(s, isDir)
+ }
+ return err
+}
+
+// validateISOFilename validates a filename that is plain ISO9660-compliant (levels 2 & 3)
+func validateISOFilename(s string, isDir bool) error {
var err error
+ // all allowed up to 30 characters, of A-Z,0-9,_
if isDir {
- // directory only allowed up to 8 characters of A-Z,0-9,_
re := regexp.MustCompile("^[A-Z0-9_]{1,30}$")
if !re.MatchString(s) {
err = fmt.Errorf("directory name must be of up to 30 characters from A-Z0-9_")
}
} else {
- // filename only allowed up to 8 characters of A-Z,0-9,_, plus an optional '.' plus up to 3 characters of A-Z,0-9,_, plus must have ";1"
+ // filename also allowed an optional '.' plus up to 3 characters of A-Z,0-9,_, plus must have ";1"
re := regexp.MustCompile("^[A-Z0-9_]+(.[A-Z0-9_]*)?;1$")
switch {
case !re.MatchString(s):
@@ -555,6 +580,20 @@ func validateFilename(s string, isDir bool) error {
return err
}
+// validateSUSPFilename validates a filename that is Rock Ridge compliant
+func validateSUSPFilename(s string, _ bool) error {
+ var err error
+ // all allowed up to 255 characters of any kind, except null (0x0) and '/'
+ re := regexp.MustCompile(`^[^\x00/]*$`)
+ switch {
+ case len(s) > 255:
+ err = fmt.Errorf("filename must be at most 255 characters")
+ case !re.MatchString(s):
+ err = fmt.Errorf("filename must not include / or null characters")
+ }
+ return err
+}
+
// convert a string to a byte array, if all characters are valid ascii
func stringToASCIIBytes(s string) ([]byte, error) {
length := len(s)
diff --git a/vendor/github.com/diskfs/go-diskfs/filesystem/iso9660/directoryentrysystemuseextension.go b/vendor/github.com/diskfs/go-diskfs/filesystem/iso9660/directoryentrysystemuseextension.go
index 1d10ec54eef..8bef9f6c27d 100644
--- a/vendor/github.com/diskfs/go-diskfs/filesystem/iso9660/directoryentrysystemuseextension.go
+++ b/vendor/github.com/diskfs/go-diskfs/filesystem/iso9660/directoryentrysystemuseextension.go
@@ -37,7 +37,7 @@ type suspExtension interface {
Descriptor() string
Source() string
Version() uint8
- GetFileExtensions(string, bool, bool) ([]directoryEntrySystemUseExtension, error)
+ GetFileExtensions(*finalizeFileInfo, bool, bool) ([]directoryEntrySystemUseExtension, error)
GetFinalizeExtensions(*finalizeFileInfo) ([]directoryEntrySystemUseExtension, error)
Relocatable() bool
Relocate(map[string]*finalizeFileInfo) ([]*finalizeFileInfo, map[string]*finalizeFileInfo, error)
diff --git a/vendor/github.com/diskfs/go-diskfs/filesystem/iso9660/eltorito.go b/vendor/github.com/diskfs/go-diskfs/filesystem/iso9660/eltorito.go
index a2cb818be66..a598ef6695f 100644
--- a/vendor/github.com/diskfs/go-diskfs/filesystem/iso9660/eltorito.go
+++ b/vendor/github.com/diskfs/go-diskfs/filesystem/iso9660/eltorito.go
@@ -33,7 +33,7 @@ const (
elToritoDefaultCatalogRR = "boot.catalog"
)
-// Emulation what emulation should be used for booting, normally none
+// Emulation that should be used for booting, normally none
type Emulation uint8
const (
@@ -55,7 +55,7 @@ type ElTorito struct {
BootCatalog string
// HideBootCatalog if the boot catalog should be hidden in the file system. Defaults to false
HideBootCatalog bool
- // Entries list of ElToritoEntry boot entires
+ // Entries list of ElToritoEntry boot entries
Entries []*ElToritoEntry
// Platform supported platform
Platform Platform
@@ -72,11 +72,11 @@ type ElToritoEntry struct {
// option `-boot-info-table`. Unlike genisoimage, does not modify the file in the
// filesystem, but inserts it on the fly.
BootTable bool
- // SystemType type of system the partition is, accordinng to the MBR standard
+ // SystemType type of system the partition is, according to the MBR standard
SystemType mbr.Type
// LoadSize how many blocks of BootFile to load, equivalent to genisoimage option `-boot-load-size`
LoadSize uint16
- size uint16
+ size uint32
location uint32
}
@@ -127,7 +127,7 @@ func (e *ElToritoEntry) headerBytes(last bool, entries uint16) []byte {
func (e *ElToritoEntry) entryBytes() []byte {
blocks := e.LoadSize
if blocks == 0 {
- blocks = e.size / 512
+ blocks = uint16(e.size / 512)
if e.size%512 > 1 {
blocks++
}
@@ -153,7 +153,7 @@ func (e *ElToritoEntry) generateBootTable(pvdSector uint32, p string) ([]byte, e
b := make([]byte, 56)
binary.LittleEndian.PutUint32(b[0:4], pvdSector)
binary.LittleEndian.PutUint32(b[4:8], e.location)
- binary.LittleEndian.PutUint32(b[8:12], uint32(e.size))
+ binary.LittleEndian.PutUint32(b[8:12], e.size)
// Checksum - simply add up all 32-bit words beginning at byte position 64
f, err := os.Open(p)
if err != nil {
diff --git a/vendor/github.com/diskfs/go-diskfs/filesystem/iso9660/file.go b/vendor/github.com/diskfs/go-diskfs/filesystem/iso9660/file.go
index 7d6bb519357..83dff8b7966 100644
--- a/vendor/github.com/diskfs/go-diskfs/filesystem/iso9660/file.go
+++ b/vendor/github.com/diskfs/go-diskfs/filesystem/iso9660/file.go
@@ -64,7 +64,7 @@ func (fl *File) Read(b []byte) (int, error) {
// Write writes len(b) bytes to the File.
//
// you cannot write to an iso, so this returns an error
-func (fl *File) Write(p []byte) (int, error) {
+func (fl *File) Write(_ []byte) (int, error) {
return 0, fmt.Errorf("cannot write to a read-only iso filesystem")
}
diff --git a/vendor/github.com/diskfs/go-diskfs/filesystem/iso9660/finalize.go b/vendor/github.com/diskfs/go-diskfs/filesystem/iso9660/finalize.go
index c055cf4d350..aaa32629b9f 100644
--- a/vendor/github.com/diskfs/go-diskfs/filesystem/iso9660/finalize.go
+++ b/vendor/github.com/diskfs/go-diskfs/filesystem/iso9660/finalize.go
@@ -3,6 +3,7 @@ package iso9660
import (
"fmt"
"io"
+ "io/fs"
"os"
"path"
"path/filepath"
@@ -12,11 +13,13 @@ import (
"time"
"github.com/diskfs/go-diskfs/util"
+ "github.com/djherbis/times"
)
const (
dataStartSector = 16
defaultVolumeIdentifier = "ISOIMAGE"
+ elToritoBootTableOffset = 8
)
// FinalizeOptions options to pass to finalize
@@ -41,6 +44,14 @@ type FinalizeOptions struct {
// IsDir() bool // abbreviation for Mode().IsDir()
// Sys() interface{} // underlying data source (can return nil)
//
+// Also supports:
+//
+// AccessTime() time.Time
+// ChangeTime() time.Time
+// Nlink() uint32 // number of hardlinks, if supported
+// Uid() uint32 // uid, if supported
+// Gid() uint32 // gid, if supported
+//
//nolint:structcheck // keep unused members so that we can know their references
type finalizeFileInfo struct {
path string
@@ -56,6 +67,8 @@ type finalizeFileInfo struct {
size int64
mode os.FileMode
modTime time.Time
+ accessTime time.Time
+ changeTime time.Time
isDir bool
isRoot bool
bytes [][]byte
@@ -64,7 +77,55 @@ type finalizeFileInfo struct {
trueParent *finalizeFileInfo
trueChild *finalizeFileInfo
elToritoEntry *ElToritoEntry
- content []byte
+ linkTarget string
+ uid uint32
+ gid uint32
+ nlink uint32
+ // content in memory content of file. If this is anything other than nil, including a zero-length slice,
+ // then this content is used, rather than anything on disk.
+ content []byte
+ serial uint64
+}
+
+func finalizeFileInfoFromFile(p, fullPath string, fi fs.FileInfo) (*finalizeFileInfo, error) {
+ isRoot := p == "."
+ name := fi.Name()
+ shortname, _ := calculateShortnameExtension(name)
+
+ if isRoot {
+ name = string([]byte{0x00})
+ shortname = name
+ }
+ t, err := times.Lstat(fullPath)
+ if err != nil {
+ return nil, fmt.Errorf("could not get times information for %s: %w", fullPath, err)
+ }
+ mode := fi.Mode()
+ var target string
+ if fi.Mode()&os.ModeSymlink == os.ModeSymlink {
+ target, err = os.Readlink(fullPath)
+ if err != nil {
+ return nil, fmt.Errorf("unable to read link for %s: %w", fullPath, err)
+ }
+ }
+ nlink, uid, gid := statt(fi)
+
+ return &finalizeFileInfo{
+ path: p,
+ name: name,
+ isDir: fi.IsDir(),
+ isRoot: isRoot,
+ modTime: fi.ModTime(),
+ accessTime: t.AccessTime(),
+ changeTime: t.ChangeTime(),
+ mode: mode,
+ size: fi.Size(),
+ shortname: shortname,
+ linkTarget: target,
+ uid: uid,
+ gid: gid,
+ nlink: nlink,
+ }, nil
}
func (fi *finalizeFileInfo) Name() string {
@@ -100,8 +161,26 @@ func (fi *finalizeFileInfo) updateDepth(depth int) {
}
}
}
+func (fi *finalizeFileInfo) AccessTime() time.Time {
+ return fi.accessTime
+}
+func (fi *finalizeFileInfo) ChangeTime() time.Time {
+ return fi.changeTime
+}
+func (fi *finalizeFileInfo) LinkTarget() string {
+ return fi.linkTarget
+}
+func (fi *finalizeFileInfo) Nlink() uint32 {
+ return fi.nlink
+}
+func (fi *finalizeFileInfo) UID() uint32 {
+ return fi.uid
+}
+func (fi *finalizeFileInfo) GID() uint32 {
+ return fi.gid
+}
-func (fi *finalizeFileInfo) toDirectoryEntry(fs *FileSystem, isSelf, isParent bool) (*directoryEntry, error) {
+func (fi *finalizeFileInfo) toDirectoryEntry(fsm *FileSystem, isSelf, isParent bool) (*directoryEntry, error) {
de := &directoryEntry{
extAttrSize: 0,
location: fi.location,
@@ -116,18 +195,22 @@ func (fi *finalizeFileInfo) toDirectoryEntry(fs *FileSystem, isSelf, isParent bo
isSelf: isSelf,
isParent: isParent,
volumeSequence: 1,
- filesystem: fs,
+ filesystem: fsm,
// we keep the full filename until after processing
filename: fi.Name(),
}
// if it is root, and we have susp enabled, add the necessary entries
- if fs.suspEnabled {
+ if fsm.suspEnabled {
if fi.isRoot && isSelf {
de.extensions = append(de.extensions, directoryEntrySystemUseExtensionSharingProtocolIndicator{skipBytes: 0})
}
// add appropriate PX, TF, SL, NM extensions
- for _, e := range fs.suspExtensions {
- ext, err := e.GetFileExtensions(path.Join(fs.workspace, fi.path), isSelf, isParent)
+ for _, e := range fsm.suspExtensions {
+ var (
+ ext []directoryEntrySystemUseExtension
+ err error
+ )
+ ext, err = e.GetFileExtensions(fi, isSelf, isParent)
if err != nil {
return nil, fmt.Errorf("error getting extensions for %s at path %s: %v", e.ID(), fi.path, err)
}
@@ -140,14 +223,14 @@ func (fi *finalizeFileInfo) toDirectoryEntry(fs *FileSystem, isSelf, isParent bo
}
if fi.isRoot && isSelf {
- for _, e := range fs.suspExtensions {
+ for _, e := range fsm.suspExtensions {
de.extensions = append(de.extensions, directoryEntrySystemUseExtensionReference{id: e.ID(), descriptor: e.Descriptor(), source: e.Source(), extensionVersion: e.Version()})
}
}
}
return de, nil
}
-func (fi *finalizeFileInfo) toDirectory(fs *FileSystem) (*Directory, error) {
+func (fi *finalizeFileInfo) toDirectory(fsm *FileSystem) (*Directory, error) {
// also need to add self and parent to it
var (
self, parent, dirEntry *directoryEntry
@@ -156,7 +239,7 @@ func (fi *finalizeFileInfo) toDirectory(fs *FileSystem) (*Directory, error) {
if !fi.IsDir() {
return nil, fmt.Errorf("cannot convert a file entry to a directtory")
}
- self, err = fi.toDirectoryEntry(fs, true, false)
+ self, err = fi.toDirectoryEntry(fsm, true, false)
if err != nil {
return nil, fmt.Errorf("could not convert self entry %s to dirEntry: %v", fi.path, err)
}
@@ -167,14 +250,14 @@ func (fi *finalizeFileInfo) toDirectory(fs *FileSystem) (*Directory, error) {
if fi.isRoot {
parentEntry = fi
}
- parent, err = parentEntry.toDirectoryEntry(fs, false, true)
+ parent, err = parentEntry.toDirectoryEntry(fsm, false, true)
if err != nil {
return nil, fmt.Errorf("could not convert parent entry %s to dirEntry: %v", fi.parent.path, err)
}
entries := []*directoryEntry{self, parent}
for _, child := range fi.children {
- dirEntry, err = child.toDirectoryEntry(fs, false, false)
+ dirEntry, err = child.toDirectoryEntry(fsm, false, false)
if err != nil {
return nil, fmt.Errorf("could not convert child entry %s to dirEntry: %v", child.path, err)
}
@@ -188,10 +271,10 @@ func (fi *finalizeFileInfo) toDirectory(fs *FileSystem) (*Directory, error) {
}
// calculate the size of a directory entry single record
-func (fi *finalizeFileInfo) calculateRecordSize(fs *FileSystem, isSelf, isParent bool) (dirEntrySize, continuationBlocksSize int, err error) {
+func (fi *finalizeFileInfo) calculateRecordSize(fsm *FileSystem, isSelf, isParent bool) (dirEntrySize, continuationBlocksSize int, err error) {
// we do not actually need the the continuation blocks to calculate size, just length, so use an empty slice
extTmpBlocks := make([]uint32, 100)
- dirEntry, err := fi.toDirectoryEntry(fs, isSelf, isParent)
+ dirEntry, err := fi.toDirectoryEntry(fsm, isSelf, isParent)
if err != nil {
return 0, 0, fmt.Errorf("could not convert to dirEntry: %v", err)
}
@@ -205,21 +288,21 @@ func (fi *finalizeFileInfo) calculateRecordSize(fs *FileSystem, isSelf, isParent
}
// calculate the size of a directory, similar to a file size
-func (fi *finalizeFileInfo) calculateDirectorySize(fs *FileSystem) (dirEntrySize, continuationBlocksSize int, err error) {
+func (fi *finalizeFileInfo) calculateDirectorySize(fsm *FileSystem) (dirEntrySize, continuationBlocksSize int, err error) {
var (
recSize, recCE int
)
if !fi.IsDir() {
- return 0, 0, fmt.Errorf("cannot convert a file entry to a directtory")
+ return 0, 0, fmt.Errorf("cannot convert a file entry to a directory")
}
- recSize, recCE, err = fi.calculateRecordSize(fs, true, false)
+ recSize, recCE, err = fi.calculateRecordSize(fsm, true, false)
if err != nil {
return 0, 0, fmt.Errorf("could not calculate self entry size %s: %v", fi.path, err)
}
dirEntrySize += recSize
continuationBlocksSize += recCE
- recSize, recCE, err = fi.calculateRecordSize(fs, false, true)
+ recSize, recCE, err = fi.calculateRecordSize(fsm, false, true)
if err != nil {
return 0, 0, fmt.Errorf("could not calculate parent entry size %s: %v", fi.path, err)
}
@@ -228,13 +311,13 @@ func (fi *finalizeFileInfo) calculateDirectorySize(fs *FileSystem) (dirEntrySize
for _, e := range fi.children {
// get size of data and CE blocks
- recSize, recCE, err = e.calculateRecordSize(fs, false, false)
+ recSize, recCE, err = e.calculateRecordSize(fsm, false, false)
if err != nil {
return 0, 0, fmt.Errorf("could not calculate child %s entry size %s: %v", e.path, fi.path, err)
}
// do not go over a block boundary; pad if necessary
newSize := dirEntrySize + recSize
- blocksize := int(fs.blocksize)
+ blocksize := int(fsm.blocksize)
left := blocksize - dirEntrySize%blocksize
if left != 0 && newSize/blocksize > dirEntrySize/blocksize {
dirEntrySize += left
@@ -341,21 +424,21 @@ func (fi *finalizeFileInfo) addChild(entry *finalizeFileInfo) {
// Finalize finalize a read-only filesystem by writing it out to a read-only format
//
//nolint:gocyclo // this finalize function is complex and needs to be. We might be better off refactoring it to multiple functions, but it does not buy all that much.
-func (fs *FileSystem) Finalize(options FinalizeOptions) error {
- if fs.workspace == "" {
+func (fsm *FileSystem) Finalize(options FinalizeOptions) error {
+ if fsm.workspace == "" {
return fmt.Errorf("cannot finalize an already finalized filesystem")
}
// did we ask for susp?
if options.RockRidge {
- fs.suspEnabled = true
- fs.suspExtensions = append(fs.suspExtensions, getRockRidgeExtension(rockRidge112))
+ fsm.suspEnabled = true
+ fsm.suspExtensions = append(fsm.suspExtensions, getRockRidgeExtension(rockRidge112))
}
/*
There is nothing in the iso9660 spec about the order of directories and files,
other than that they must be accessible in the location specified in directory entry and/or path table
- However, most implementations seem to it as follows:
+ However, most implementations seem to do it as follows:
- each directory follows its parent
- data (i.e. file) sectors in each directory are immediately after its directory and immediately before the next sibling directory to its parent
@@ -380,11 +463,11 @@ func (fs *FileSystem) Finalize(options FinalizeOptions) error {
10- write volume descriptor set terminator
*/
- f := fs.file
- blocksize := int(fs.blocksize)
+ f := fsm.file
+ blocksize := int(fsm.blocksize)
// 1- blank out sectors 0-15
- b := make([]byte, dataStartSector*fs.blocksize)
+ b := make([]byte, dataStartSector*fsm.blocksize)
n, err := f.WriteAt(b, 0)
if err != nil {
return fmt.Errorf("could not write blank system area: %v", err)
@@ -394,7 +477,7 @@ func (fs *FileSystem) Finalize(options FinalizeOptions) error {
}
// 3- build out file tree
- fileList, dirList, err := walkTree(fs.Workspace())
+ fileList, dirList, err := walkTree(fsm.Workspace())
if err != nil {
return fmt.Errorf("error walking tree: %v", err)
}
@@ -406,9 +489,9 @@ func (fs *FileSystem) Finalize(options FinalizeOptions) error {
// if we need to relocate directories, must do them here, before finalizing order and sizes
// do not bother if enabled DeepDirectories, i.e. non-ISO9660 compliant
if !options.DeepDirectories {
- if fs.suspEnabled {
+ if fsm.suspEnabled {
var handler suspExtension
- for _, e := range fs.suspExtensions {
+ for _, e := range fsm.suspExtensions {
if e.Relocatable() {
handler = e
break
@@ -431,7 +514,7 @@ func (fs *FileSystem) Finalize(options FinalizeOptions) error {
// convert sizes to required blocks for files
for _, e := range fileList {
- e.blocks = calculateBlocks(e.size, fs.blocksize)
+ e.blocks = calculateBlocks(e.size, fsm.blocksize)
}
// we now have list of all of the files and directories and their properties, as well as children of every directory
@@ -467,14 +550,18 @@ func (fs *FileSystem) Finalize(options FinalizeOptions) error {
shortname, extension := calculateShortnameExtension(path.Base(catname))
// break down the catalog basename from the parent dir
catSize := int64(len(bootcat))
+ now := time.Now()
catEntry = &finalizeFileInfo{
- content: bootcat,
- size: catSize,
- path: catname,
- name: path.Base(catname),
- shortname: shortname,
- extension: extension,
- blocks: calculateBlocks(catSize, fs.blocksize),
+ content: bootcat,
+ size: catSize,
+ path: catname,
+ name: path.Base(catname),
+ shortname: shortname,
+ extension: extension,
+ blocks: calculateBlocks(catSize, fsm.blocksize),
+ modTime: now,
+ accessTime: now,
+ changeTime: now,
}
// make it the first file
files = append([]*finalizeFileInfo{catEntry}, files...)
@@ -503,8 +590,11 @@ func (fs *FileSystem) Finalize(options FinalizeOptions) error {
return fmt.Errorf("unable to find image child %s: %v", e.BootFile, err)
}
}
+ if child == nil {
+ return fmt.Errorf("unable to find image child %s: %v", e.BootFile, err)
+ }
// save the child so we can add location late
- e.size = uint16(child.size)
+ e.size = uint32(child.size)
child.elToritoEntry = e
}
}
@@ -512,7 +602,7 @@ func (fs *FileSystem) Finalize(options FinalizeOptions) error {
var size, ceBlocks int
for _, dir := range dirs {
dir.location = location
- size, ceBlocks, err = dir.calculateDirectorySize(fs)
+ size, ceBlocks, err = dir.calculateDirectorySize(fsm)
if err != nil {
return fmt.Errorf("unable to calculate size of directory for %s: %v", dir.path, err)
}
@@ -566,7 +656,7 @@ func (fs *FileSystem) Finalize(options FinalizeOptions) error {
for _, e := range dirs {
writeAt := int64(e.location) * int64(blocksize)
var d *Directory
- d, err = e.toDirectory(fs)
+ d, err = e.toDirectory(fsm)
if err != nil {
return fmt.Errorf("unable to convert entry to directory: %v", err)
}
@@ -601,13 +691,14 @@ func (fs *FileSystem) Finalize(options FinalizeOptions) error {
}()
for _, e := range files {
var (
- from *os.File
- copied int
+ from *os.File
+ copied int
+ bootTableMinSize int
)
writeAt := int64(e.location) * int64(blocksize)
if e.content == nil {
// for file, just copy the data across
- from, err = os.Open(path.Join(fs.workspace, e.path))
+ from, err = os.Open(path.Join(fsm.workspace, e.path))
if err != nil {
return fmt.Errorf("failed to open file for reading %s: %v", e.path, err)
}
@@ -617,21 +708,23 @@ func (fs *FileSystem) Finalize(options FinalizeOptions) error {
var count int
// first 8 bytes
- count, err = copyFileData(from, f, 0, writeAt, 8)
+ count, err = copyFileData(from, f, 0, writeAt, elToritoBootTableOffset)
if err != nil {
return fmt.Errorf("failed to copy first bytes 0-8 of boot file to disk %s: %v", e.path, err)
}
copied += count
// insert El Torito Boot Information Table
- bootTable, err := e.elToritoEntry.generateBootTable(dataStartSector, path.Join(fs.workspace, e.path))
+ bootTable, err := e.elToritoEntry.generateBootTable(dataStartSector, path.Join(fsm.workspace, e.path))
if err != nil {
return fmt.Errorf("failed to generate boot table for %s: %v", e.path, err)
}
- count, err = f.WriteAt(bootTable, writeAt+8)
+ count, err = f.WriteAt(bootTable, writeAt+elToritoBootTableOffset)
if err != nil {
return fmt.Errorf("failed to write 56 byte boot table to disk %s: %v", e.path, err)
}
copied += count
+ // file with boot table file must be a minimum of boot table size and the offset
+ bootTableMinSize = count
// remainder of file
count, err = copyFileData(from, f, 64, writeAt+64, 0)
if err != nil {
@@ -644,8 +737,12 @@ func (fs *FileSystem) Finalize(options FinalizeOptions) error {
return fmt.Errorf("failed to copy file to disk %s: %v", e.path, err)
}
}
- if copied != int(e.Size()) {
- return fmt.Errorf("error copying file %s to disk, copied %d bytes, expected %d", e.path, copied, e.Size())
+ targetSize := e.Size()
+ if targetSize < int64(bootTableMinSize) {
+ targetSize = int64(bootTableMinSize)
+ }
+ if copied != int(targetSize) {
+ return fmt.Errorf("error copying file %s to disk, copied %d bytes, expected %d", e.path, copied, targetSize)
}
} else {
copied = len(e.content)
@@ -665,7 +762,7 @@ func (fs *FileSystem) Finalize(options FinalizeOptions) error {
location = dataStartSector
// create and write the primary volume descriptor, supplementary and boot, and volume descriptor set terminator
now := time.Now()
- rootDE, err := root.toDirectoryEntry(fs, true, false)
+ rootDE, err := root.toDirectoryEntry(fsm, true, false)
if err != nil {
return fmt.Errorf("could not convert root entry for primary volume descriptor to dirEntry: %v", err)
}
@@ -676,7 +773,7 @@ func (fs *FileSystem) Finalize(options FinalizeOptions) error {
volumeSize: totalSize,
setSize: 1,
sequenceNumber: 1,
- blocksize: uint16(fs.blocksize),
+ blocksize: uint16(fsm.blocksize),
pathTableSize: uint32(pathTableSize),
pathTableLLocation: pathTableLLocation,
pathTableLOptionalLocation: 0,
@@ -710,10 +807,10 @@ func (fs *FileSystem) Finalize(options FinalizeOptions) error {
b = terminator.toBytes()
_, _ = f.WriteAt(b, int64(location)*int64(blocksize))
- _ = os.RemoveAll(fs.workspace)
+ _ = os.RemoveAll(fsm.workspace)
// finish by setting as finalized
- fs.workspace = ""
+ fsm.workspace = ""
return nil
}
@@ -772,16 +869,16 @@ func sortFinalizeFileInfoPathTable(left, right *finalizeFileInfo) bool {
// create a path table from a slice of *finalizeFileInfo that are directories
func createPathTable(fi []*finalizeFileInfo) *pathTable {
// copy so we do not modify the original
- fs := make([]*finalizeFileInfo, len(fi))
- copy(fs, fi)
+ fis := make([]*finalizeFileInfo, len(fi))
+ copy(fis, fi)
// sort via the rules
- sort.Slice(fs, func(i, j int) bool {
- return sortFinalizeFileInfoPathTable(fs[i], fs[j])
+ sort.Slice(fis, func(i, j int) bool {
+ return sortFinalizeFileInfoPathTable(fis[i], fis[j])
})
indexMap := make(map[*finalizeFileInfo]int)
// now that it is sorted, create the ordered path table entries
entries := make([]*pathTableEntry, 0)
- for i, e := range fs {
+ for i, e := range fis {
name := e.Name()
nameSize := len(name)
size := 8 + uint16(nameSize)
@@ -811,28 +908,34 @@ func createPathTable(fi []*finalizeFileInfo) *pathTable {
}
func walkTree(workspace string) ([]*finalizeFileInfo, map[string]*finalizeFileInfo, error) {
- cwd, err := os.Getwd()
- if err != nil {
- return nil, nil, fmt.Errorf("could not get pwd: %v", err)
- }
- // make everything relative to the workspace
- _ = os.Chdir(workspace)
- dirList := make(map[string]*finalizeFileInfo)
- fileList := make([]*finalizeFileInfo, 0)
- var entry *finalizeFileInfo
- err = filepath.Walk(".", func(fp string, fi os.FileInfo, err error) error {
+ var (
+ dirList = make(map[string]*finalizeFileInfo)
+ fileList = make([]*finalizeFileInfo, 0)
+ entry *finalizeFileInfo
+ serial uint64
+ )
+ err := filepath.WalkDir(workspace, func(actualPath string, d fs.DirEntry, err error) error {
if err != nil {
- return fmt.Errorf("error walking path %s: %v", fp, err)
+ return fmt.Errorf("error walking path %s: %v", actualPath, err)
+ }
+ fp := strings.TrimPrefix(actualPath, workspace)
+ fp = strings.TrimPrefix(fp, string(filepath.Separator))
+ if fp == "" {
+ fp = "."
}
- isRoot := fp == "."
- name := fi.Name()
- shortname, extension := calculateShortnameExtension(name)
+ name := d.Name()
+ _, extension := calculateShortnameExtension(name)
- if isRoot {
- name = string([]byte{0x00})
- shortname = name
+ fi, err := d.Info()
+ if err != nil {
+ return fmt.Errorf("could not get file info for %s: %v", fp, err)
+ }
+ entry, err = finalizeFileInfoFromFile(fp, actualPath, fi)
+ if err != nil {
+ return err
}
- entry = &finalizeFileInfo{path: fp, name: name, isDir: fi.IsDir(), isRoot: isRoot, modTime: fi.ModTime(), mode: fi.Mode(), size: fi.Size(), shortname: shortname}
+ entry.serial = serial
+ serial++
// we will have to save it as its parent
parentDir := filepath.Dir(fp)
@@ -841,7 +944,7 @@ func walkTree(workspace string) ([]*finalizeFileInfo, map[string]*finalizeFileIn
if fi.IsDir() {
entry.children = make([]*finalizeFileInfo, 0, 20)
dirList[fp] = entry
- if !isRoot {
+ if !entry.isRoot {
parentDirInfo.children = append(parentDirInfo.children, entry)
dirList[parentDir] = parentDirInfo
}
@@ -858,8 +961,6 @@ func walkTree(workspace string) ([]*finalizeFileInfo, map[string]*finalizeFileIn
if err != nil {
return nil, nil, err
}
- // reset the workspace
- _ = os.Chdir(cwd)
return fileList, dirList, nil
}
diff --git a/vendor/github.com/diskfs/go-diskfs/filesystem/iso9660/iso9660.go b/vendor/github.com/diskfs/go-diskfs/filesystem/iso9660/iso9660.go
index c6c1321c646..64b28ed2ee1 100644
--- a/vendor/github.com/diskfs/go-diskfs/filesystem/iso9660/iso9660.go
+++ b/vendor/github.com/diskfs/go-diskfs/filesystem/iso9660/iso9660.go
@@ -34,15 +34,15 @@ type FileSystem struct {
}
// Equal compare if two filesystems are equal
-func (fs *FileSystem) Equal(a *FileSystem) bool {
- localMatch := fs.file == a.file && fs.size == a.size
- vdMatch := fs.volumes.equal(&a.volumes)
+func (fsm *FileSystem) Equal(a *FileSystem) bool {
+ localMatch := fsm.file == a.file && fsm.size == a.size
+ vdMatch := fsm.volumes.equal(&a.volumes)
return localMatch && vdMatch
}
// Workspace get the workspace path
-func (fs *FileSystem) Workspace() string {
- return fs.workspace
+func (fsm *FileSystem) Workspace() string {
+ return fsm.workspace
}
// Create creates an ISO9660 filesystem in a given directory
@@ -283,7 +283,7 @@ func Read(file util.File, size, start, blocksize int64) (*FileSystem, error) {
}
// Type returns the type code for the filesystem. Always returns filesystem.TypeFat32
-func (fs *FileSystem) Type() filesystem.Type {
+func (fsm *FileSystem) Type() filesystem.Type {
return filesystem.TypeISO9660
}
@@ -293,11 +293,11 @@ func (fs *FileSystem) Type() filesystem.Type {
// * It will not return an error if the path already exists
//
// if readonly and not in workspace, will return an error
-func (fs *FileSystem) Mkdir(p string) error {
- if fs.workspace == "" {
+func (fsm *FileSystem) Mkdir(p string) error {
+ if fsm.workspace == "" {
return fmt.Errorf("cannot write to read-only filesystem")
}
- err := os.MkdirAll(path.Join(fs.workspace, p), 0o755)
+ err := os.MkdirAll(path.Join(fsm.workspace, p), 0o755)
if err != nil {
return fmt.Errorf("could not create directory %s: %v", p, err)
}
@@ -310,12 +310,12 @@ func (fs *FileSystem) Mkdir(p string) error {
// Returns a slice of os.FileInfo with all of the entries in the directory.
//
// Will return an error if the directory does not exist or is a regular file and not a directory
-func (fs *FileSystem) ReadDir(p string) ([]os.FileInfo, error) {
+func (fsm *FileSystem) ReadDir(p string) ([]os.FileInfo, error) {
var fi []os.FileInfo
// non-workspace: read from iso9660
// workspace: read from regular filesystem
- if fs.workspace != "" {
- fullPath := path.Join(fs.workspace, p)
+ if fsm.workspace != "" {
+ fullPath := path.Join(fsm.workspace, p)
// read the entries
dirEntries, err := os.ReadDir(fullPath)
if err != nil {
@@ -329,7 +329,7 @@ func (fs *FileSystem) ReadDir(p string) ([]os.FileInfo, error) {
fi = append(fi, info)
}
} else {
- dirEntries, err := fs.readDirectory(p)
+ dirEntries, err := fsm.readDirectory(p)
if err != nil {
return nil, fmt.Errorf("error reading directory %s: %v", p, err)
}
@@ -351,7 +351,7 @@ func (fs *FileSystem) ReadDir(p string) ([]os.FileInfo, error) {
// accepts normal os.OpenFile flags
//
// returns an error if the file does not exist
-func (fs *FileSystem) OpenFile(p string, flag int) (filesystem.File, error) {
+func (fsm *FileSystem) OpenFile(p string, flag int) (filesystem.File, error) {
var f filesystem.File
var err error
@@ -366,14 +366,14 @@ func (fs *FileSystem) OpenFile(p string, flag int) (filesystem.File, error) {
// cannot open to write or append or create if we do not have a workspace
writeMode := flag&os.O_WRONLY != 0 || flag&os.O_RDWR != 0 || flag&os.O_APPEND != 0 || flag&os.O_CREATE != 0 || flag&os.O_TRUNC != 0 || flag&os.O_EXCL != 0
- if fs.workspace == "" {
+ if fsm.workspace == "" {
if writeMode {
return nil, fmt.Errorf("cannot write to read-only filesystem")
}
// get the directory entries
var entries []*directoryEntry
- entries, err = fs.readDirectory(dir)
+ entries, err = fsm.readDirectory(dir)
if err != nil {
return nil, fmt.Errorf("could not read directory entries for %s", dir)
}
@@ -405,7 +405,7 @@ func (fs *FileSystem) OpenFile(p string, flag int) (filesystem.File, error) {
offset: 0,
}
} else {
- f, err = os.OpenFile(path.Join(fs.workspace, p), flag, 0o644)
+ f, err = os.OpenFile(path.Join(fsm.workspace, p), flag, 0o644)
if err != nil {
return nil, fmt.Errorf("target file %s does not exist: %v", p, err)
}
@@ -415,7 +415,7 @@ func (fs *FileSystem) OpenFile(p string, flag int) (filesystem.File, error) {
}
// readDirectory - read directory entry on iso only (not workspace)
-func (fs *FileSystem) readDirectory(p string) ([]*directoryEntry, error) {
+func (fsm *FileSystem) readDirectory(p string) ([]*directoryEntry, error) {
var (
location, size uint32
err error
@@ -424,7 +424,7 @@ func (fs *FileSystem) readDirectory(p string) ([]*directoryEntry, error) {
// try from path table, then walk the directory tree, unless we were told explicitly not to
usePathtable := true
- for _, e := range fs.suspExtensions {
+ for _, e := range fsm.suspExtensions {
usePathtable = e.UsePathtable()
if !usePathtable {
break
@@ -432,14 +432,14 @@ func (fs *FileSystem) readDirectory(p string) ([]*directoryEntry, error) {
}
if usePathtable {
- location = fs.pathTable.getLocation(p)
+ location = fsm.pathTable.getLocation(p)
}
// if we found it, read the first directory entry to get the size
if location != 0 {
// we need 4 bytes to read the size of the directory; it is at offset 10 from beginning
dirb := make([]byte, 4)
- n, err = fs.file.ReadAt(dirb, int64(location)*fs.blocksize+10)
+ n, err = fsm.file.ReadAt(dirb, int64(location)*fsm.blocksize+10)
if err != nil {
return nil, fmt.Errorf("could not read directory %s: %v", p, err)
}
@@ -451,7 +451,7 @@ func (fs *FileSystem) readDirectory(p string) ([]*directoryEntry, error) {
} else {
// if we could not find the location in the path table, try reading directly from the disk
// it is slow, but this is how Unix does it, since many iso creators *do* create illegitimate disks
- location, size, err = fs.rootDir.getLocation(p)
+ location, size, err = fsm.rootDir.getLocation(p)
if err != nil {
return nil, fmt.Errorf("unable to read directory tree for %s: %v", p, err)
}
@@ -464,7 +464,7 @@ func (fs *FileSystem) readDirectory(p string) ([]*directoryEntry, error) {
// we have a location, let's read the directories from it
b := make([]byte, size)
- n, err = fs.file.ReadAt(b, int64(location)*fs.blocksize)
+ n, err = fsm.file.ReadAt(b, int64(location)*fsm.blocksize)
if err != nil {
return nil, fmt.Errorf("could not read directory entries for %s: %v", p, err)
}
@@ -472,7 +472,7 @@ func (fs *FileSystem) readDirectory(p string) ([]*directoryEntry, error) {
return nil, fmt.Errorf("reading directory %s returned %d bytes read instead of expected %d", p, n, size)
}
// parse the entries
- entries, err := parseDirEntries(b, fs)
+ entries, err := parseDirEntries(b, fsm)
if err != nil {
return nil, fmt.Errorf("could not parse directory entries for %s: %v", p, err)
}
@@ -488,13 +488,13 @@ func validateBlocksize(blocksize int64) error {
}
}
-func (fs *FileSystem) Label() string {
- if fs.volumes.primary == nil {
+func (fsm *FileSystem) Label() string {
+ if fsm.volumes.primary == nil {
return ""
}
- return fs.volumes.primary.volumeIdentifier
+ return fsm.volumes.primary.volumeIdentifier
}
-func (fs *FileSystem) SetLabel(string) error {
+func (fsm *FileSystem) SetLabel(string) error {
return fmt.Errorf("ISO9660 filesystem is read-only")
}
diff --git a/vendor/github.com/diskfs/go-diskfs/filesystem/iso9660/rockridge.go b/vendor/github.com/diskfs/go-diskfs/filesystem/iso9660/rockridge.go
index 8a8c80a1340..ed20a1d7971 100644
--- a/vendor/github.com/diskfs/go-diskfs/filesystem/iso9660/rockridge.go
+++ b/vendor/github.com/diskfs/go-diskfs/filesystem/iso9660/rockridge.go
@@ -6,8 +6,6 @@ import (
"os"
"sort"
"time"
-
- "gopkg.in/djherbis/times.v1"
)
const (
@@ -96,53 +94,37 @@ func (r *rockRidgeExtension) GetFilename(de *directoryEntry) (string, error) {
}
return name, nil
}
-func (r *rockRidgeExtension) GetFileExtensions(fp string, isSelf, isParent bool) ([]directoryEntrySystemUseExtension, error) {
+func (r *rockRidgeExtension) GetFileExtensions(ffi *finalizeFileInfo, isSelf, isParent bool) ([]directoryEntrySystemUseExtension, error) {
// we always do PX, TF, NM, SL order
ret := []directoryEntrySystemUseExtension{}
- // do not follow symlinks
- fi, err := os.Lstat(fp)
- if err != nil {
- return nil, fmt.Errorf("error reading file %s: %v", fp, err)
- }
-
- t, err := times.Lstat(fp)
- if err != nil {
- return nil, fmt.Errorf("error reading times %s: %v", fp, err)
- }
// PX
- nlink, uid, gid := statt(fi)
- mtime := fi.ModTime()
- atime := t.AccessTime()
- ctime := t.ChangeTime()
+ mtime := ffi.ModTime()
ret = append(ret, rockRidgePosixAttributes{
- mode: fi.Mode(),
- linkCount: nlink,
- uid: uid,
- gid: gid,
+ mode: ffi.Mode(),
+ linkCount: ffi.Nlink(),
+ uid: ffi.UID(),
+ gid: ffi.GID(),
length: r.pxLength,
+ serial: ffi.serial,
})
// TF
tf := rockRidgeTimestamps{longForm: false, stamps: []rockRidgeTimestamp{
{timestampType: rockRidgeTimestampModify, time: mtime},
- {timestampType: rockRidgeTimestampAccess, time: atime},
- {timestampType: rockRidgeTimestampAttribute, time: ctime},
+ {timestampType: rockRidgeTimestampAccess, time: ffi.AccessTime()},
+ {timestampType: rockRidgeTimestampAttribute, time: ffi.ChangeTime()},
}}
ret = append(ret, tf)
// NM
if !isSelf && !isParent {
- ret = append(ret, rockRidgeName{name: fi.Name()})
+ ret = append(ret, rockRidgeName{name: ffi.name})
}
// SL
- if fi.Mode()&os.ModeSymlink == os.ModeSymlink {
+ if ffi.Mode()&os.ModeSymlink == os.ModeSymlink {
// need the target if it is a symlink
- target, err := os.Readlink(fp)
- if err != nil {
- return nil, fmt.Errorf("error reading symlink target at %s", fp)
- }
- ret = append(ret, rockRidgeSymlink{continued: false, name: target})
+ ret = append(ret, rockRidgeSymlink{continued: false, name: ffi.LinkTarget()})
}
return ret, nil
@@ -293,7 +275,7 @@ type rockRidgePosixAttributes struct {
linkCount uint32
uid uint32
gid uint32
- serial uint32
+ serial uint64
}
func (d rockRidgePosixAttributes) Equal(o directoryEntrySystemUseExtension) bool {
@@ -362,8 +344,7 @@ func (d rockRidgePosixAttributes) Data() []byte {
binary.LittleEndian.PutUint32(ret[24:28], d.gid)
binary.BigEndian.PutUint32(ret[28:32], d.gid)
if d.length == 44 {
- binary.LittleEndian.PutUint32(ret[32:36], d.serial)
- binary.BigEndian.PutUint32(ret[36:40], d.serial)
+ binary.LittleEndian.PutUint64(ret[32:40], d.serial)
}
return ret
}
@@ -427,9 +408,9 @@ func (r *rockRidgeExtension) parsePosixAttributes(b []byte) (directoryEntrySyste
m |= uint32(os.ModeNamedPipe)
}
- var serial uint32
+ var serial uint64
if len(b) == 44 {
- serial = binary.LittleEndian.Uint32(b[36:40])
+ serial = binary.LittleEndian.Uint64(b[36:44])
}
return rockRidgePosixAttributes{
mode: os.FileMode(m),
diff --git a/vendor/github.com/diskfs/go-diskfs/filesystem/squashfs/compressor.go b/vendor/github.com/diskfs/go-diskfs/filesystem/squashfs/compressor.go
index b608d551604..173edabf80f 100644
--- a/vendor/github.com/diskfs/go-diskfs/filesystem/squashfs/compressor.go
+++ b/vendor/github.com/diskfs/go-diskfs/filesystem/squashfs/compressor.go
@@ -7,6 +7,7 @@ import (
"fmt"
"io"
+ "github.com/klauspost/compress/zstd"
"github.com/pierrec/lz4/v4"
"github.com/ulikunitz/xz"
"github.com/ulikunitz/xz/lzma"
@@ -51,6 +52,8 @@ func (c *CompressorLzma) decompress(in []byte) ([]byte, error) {
}
return p, nil
}
+
+//nolint:unused,revive // it is important to implement the interface
func (c *CompressorLzma) loadOptions(b []byte) error {
// lzma has no supported optiosn
return nil
@@ -107,6 +110,9 @@ func (c *CompressorGzip) decompress(in []byte) ([]byte, error) {
if err != nil {
return nil, fmt.Errorf("error decompressing: %v", err)
}
+ if err := gz.Close(); err != nil {
+ return nil, err
+ }
return p, nil
}
@@ -326,6 +332,32 @@ func (c *CompressorZstd) optionsBytes() []byte {
func (c *CompressorZstd) flavour() compression {
return compressionZstd
}
+func (c *CompressorZstd) compress(in []byte) ([]byte, error) {
+ var b bytes.Buffer
+ z, err := zstd.NewWriter(&b)
+ if err != nil {
+ return nil, fmt.Errorf("failed to create zstd compressor: %w", err)
+ }
+ if _, err := z.Write(in); err != nil {
+ return nil, err
+ }
+ if err := z.Close(); err != nil {
+ return nil, err
+ }
+ return b.Bytes(), nil
+}
+func (c *CompressorZstd) decompress(in []byte) ([]byte, error) {
+ z, err := zstd.NewReader(nil)
+ if err != nil {
+ return nil, fmt.Errorf("failed to create zstd decompressor: %w", err)
+ }
+ defer z.Close()
+ p, err := z.DecodeAll(in, nil)
+ if err != nil {
+ return nil, fmt.Errorf("error decompressing zstd: %w", err)
+ }
+ return p, nil
+}
func newCompressor(flavour compression) (Compressor, error) {
var c Compressor
@@ -343,7 +375,7 @@ func newCompressor(flavour compression) (Compressor, error) {
case compressionLz4:
c = &CompressorLz4{}
case compressionZstd:
- return nil, fmt.Errorf("zstd compression not yet supported")
+ c = &CompressorZstd{}
default:
return nil, fmt.Errorf("unknown compression type: %d", flavour)
}
diff --git a/vendor/github.com/diskfs/go-diskfs/filesystem/squashfs/directory.go b/vendor/github.com/diskfs/go-diskfs/filesystem/squashfs/directory.go
index 0f5bd8d5c34..ce7627b382d 100644
--- a/vendor/github.com/diskfs/go-diskfs/filesystem/squashfs/directory.go
+++ b/vendor/github.com/diskfs/go-diskfs/filesystem/squashfs/directory.go
@@ -54,18 +54,17 @@ type directoryEntryGroup struct {
// parse raw bytes of a directory to get the contents
func parseDirectory(b []byte) (*directory, error) {
- // must have at least one header
- if _, err := parseDirectoryHeader(b); err != nil {
- return nil, fmt.Errorf("could not parse directory header: %v", err)
- }
entries := make([]*directoryEntryRaw, 0)
- for pos := 0; pos+dirHeaderSize < len(b); {
+ for pos := 0; pos+dirHeaderSize <= len(b); {
directoryHeader, err := parseDirectoryHeader(b[pos:])
if err != nil {
return nil, fmt.Errorf("could not parse directory header: %v", err)
}
- if directoryHeader.count+1 > maxDirEntries {
- return nil, fmt.Errorf("corrupted directory, had %d entries instead of max %d", directoryHeader.count+1, maxDirEntries)
+ if directoryHeader.count == 0 {
+ return nil, fmt.Errorf("corrupted directory, must have at least one entry")
+ }
+ if directoryHeader.count > maxDirEntries {
+ return nil, fmt.Errorf("corrupted directory, had %d entries instead of max %d", directoryHeader.count, maxDirEntries)
}
pos += dirHeaderSize
for count := uint32(0); count < directoryHeader.count; count++ {
diff --git a/vendor/github.com/diskfs/go-diskfs/filesystem/squashfs/directoryentry.go b/vendor/github.com/diskfs/go-diskfs/filesystem/squashfs/directoryentry.go
index cc5beb75de4..040212a5825 100644
--- a/vendor/github.com/diskfs/go-diskfs/filesystem/squashfs/directoryentry.go
+++ b/vendor/github.com/diskfs/go-diskfs/filesystem/squashfs/directoryentry.go
@@ -1,50 +1,16 @@
package squashfs
import (
+ "fmt"
+ "io/fs"
"os"
"time"
+
+ "github.com/diskfs/go-diskfs/filesystem"
)
// FileStat is the extended data underlying a single file, similar to https://golang.org/pkg/syscall/#Stat_t
-type FileStat struct {
- uid uint32
- gid uint32
- xattrs map[string]string
-}
-
-func (f *FileStat) equal(o *FileStat) bool {
- if f.uid != o.uid || f.gid != o.gid {
- return false
- }
- if len(f.xattrs) != len(o.xattrs) {
- return false
- }
- for k, v := range f.xattrs {
- ov, ok := o.xattrs[k]
- if !ok {
- return false
- }
- if ov != v {
- return false
- }
- }
- return true
-}
-
-// UID get uid of file
-func (f *FileStat) UID() uint32 {
- return f.uid
-}
-
-// GID get gid of file
-func (f *FileStat) GID() uint32 {
- return f.gid
-}
-
-// Xattrs get extended attributes of file
-func (f *FileStat) Xattrs() map[string]string {
- return f.xattrs
-}
+type FileStat = *directoryEntry
// directoryEntry is a single directory entry
// it combines information from inode and the actual entry
@@ -57,22 +23,22 @@ func (f *FileStat) Xattrs() map[string]string {
// IsDir() bool // abbreviation for Mode().IsDir()
// Sys() interface{} // underlying data source (can return nil)
type directoryEntry struct {
- isSubdirectory bool
+ fs *FileSystem // the FileSystem this entry is part of
name string
size int64
modTime time.Time
mode os.FileMode
inode inode
- sys FileStat
+ uid uint32
+ gid uint32
+ xattrs map[string]string
+ isSubdirectory bool
}
func (d *directoryEntry) equal(o *directoryEntry) bool {
if o == nil {
return false
}
- if !d.sys.equal(&o.sys) {
- return false
- }
if d.inode == nil && o.inode == nil {
return true
}
@@ -107,10 +73,127 @@ func (d *directoryEntry) ModTime() time.Time {
// Mode FileMode // file mode bits
func (d *directoryEntry) Mode() os.FileMode {
- return d.mode
+ mode := d.mode
+
+ // We need to adjust the Linux mode into a Go mode
+ // The bottom 3*3 bits are the traditional unix permissions.
+
+ // Clear the non permissions bits
+ mode &= os.ModePerm
+
+ if d.inode == nil {
+ return mode
+ }
+ switch d.inode.inodeType() {
+ case inodeBasicDirectory, inodeExtendedDirectory:
+ mode |= os.ModeDir // d: is a directory
+ case inodeBasicFile, inodeExtendedFile:
+ // zero mode
+ case inodeBasicSymlink, inodeExtendedSymlink:
+ mode |= os.ModeSymlink // L: symbolic link
+ case inodeBasicBlock, inodeExtendedBlock:
+ mode |= os.ModeDevice // D: device file
+ case inodeBasicChar, inodeExtendedChar:
+ mode |= os.ModeDevice // D: device file
+ mode |= os.ModeCharDevice // c: Unix character device, when ModeDevice is set
+ case inodeBasicFifo, inodeExtendedFifo:
+ mode |= os.ModeNamedPipe // p: named pipe (FIFO)
+ case inodeBasicSocket, inodeExtendedSocket:
+ mode |= os.ModeSocket // S: Unix domain socket
+ default:
+ mode |= os.ModeIrregular // ?: non-regular file; nothing else is known about this file
+ }
+
+ // Not currently translated
+ // mode |= os.ModeAppend // a: append-only
+ // mode |= os.ModeExclusive // l: exclusive use
+ // mode |= os.ModeTemporary // T: temporary file; Plan 9 only
+ // mode |= os.ModeSetuid // u: setuid
+ // mode |= os.ModeSetgid // g: setgid
+ // mode |= os.ModeSticky // t: sticky
+
+ return mode
}
// Sys interface{} // underlying data source (can return nil)
func (d *directoryEntry) Sys() interface{} {
- return d.sys
+ return d
+}
+
+// UID get uid of file
+func (d *directoryEntry) UID() uint32 {
+ return d.uid
+}
+
+// GID get gid of file
+func (d *directoryEntry) GID() uint32 {
+ return d.gid
+}
+
+// Xattrs get extended attributes of file
+func (d *directoryEntry) Xattrs() map[string]string {
+ return d.xattrs
+}
+
+// Readlink returns the destination of the symbolic link if this entry
+// is a symbolic link.
+//
+// If this entry is not a symbolic link then it will return fs.ErrNotExist
+func (d *directoryEntry) Readlink() (string, error) {
+ var target string
+ body := d.inode.getBody()
+ //nolint:exhaustive // all other cases fall under default
+ switch d.inode.inodeType() {
+ case inodeBasicSymlink:
+ link, ok := body.(*basicSymlink)
+ if !ok {
+ return "", fmt.Errorf("internal error: inode wasn't basic symlink: %T", body)
+ }
+ target = link.target
+ case inodeExtendedSymlink:
+ link, ok := body.(*extendedSymlink)
+ if !ok {
+ return "", fmt.Errorf("internal error: inode wasn't extended symlink: %T", body)
+ }
+ target = link.target
+ default:
+ return "", fs.ErrNotExist
+ }
+ return target, nil
+}
+
+// Open returns an filesystem.File from which you can read the
+// contents of a file.
+//
+// Calling this on anything but a file will return an error.
+//
+// Calling this Open method is more efficient than calling
+// FileSystem.OpenFile as it doesn't have to find the file by
+// traversing the directory entries first.
+func (d *directoryEntry) Open() (filesystem.File, error) {
+ // get the inode data for this file
+ // now open the file
+ // get the inode for the file
+ var eFile *extendedFile
+ in := d.inode
+ iType := in.inodeType()
+ body := in.getBody()
+ //nolint:exhaustive // all other cases fall under default
+ switch iType {
+ case inodeBasicFile:
+ extFile := body.(*basicFile).toExtended()
+ eFile = &extFile
+ case inodeExtendedFile:
+ eFile, _ = body.(*extendedFile)
+ default:
+ return nil, fmt.Errorf("inode is of type %d, neither basic nor extended file", iType)
+ }
+
+ return &File{
+ extendedFile: eFile,
+ isReadWrite: false,
+ isAppend: false,
+ offset: 0,
+ filesystem: d.fs,
+ }, nil
}
diff --git a/vendor/github.com/diskfs/go-diskfs/filesystem/squashfs/file.go b/vendor/github.com/diskfs/go-diskfs/filesystem/squashfs/file.go
index 93487dd3ec2..1b77d9698b7 100644
--- a/vendor/github.com/diskfs/go-diskfs/filesystem/squashfs/file.go
+++ b/vendor/github.com/diskfs/go-diskfs/filesystem/squashfs/file.go
@@ -13,10 +13,12 @@ import (
// include all of the data
type File struct {
*extendedFile
- isReadWrite bool
- isAppend bool
- offset int64
- filesystem *FileSystem
+ isReadWrite bool
+ isAppend bool
+ offset int64
+ filesystem *FileSystem
+ blockLocation int64 // the position of the last block decompressed
+ block []byte // the actual last block decompressed
}
// Read reads up to len(b) bytes from the File.
@@ -42,9 +44,9 @@ func (fl *File) Read(b []byte) (int, error) {
// e.g. if starting block is at position 10245, then we want blocks 27,28,29 from the disk
// 5- read in and uncompress the necessary blocks
fs := fl.filesystem
- size := int(fl.size()) - int(fl.offset)
+ size := fl.size() - fl.offset
location := int64(fl.startBlock)
- maxRead := size
+ maxRead := len(b)
// if there is nothing left to read, just return EOF
if size <= 0 {
@@ -54,14 +56,14 @@ func (fl *File) Read(b []byte) (int, error) {
// we stop when we hit the lesser of
// 1- len(b)
// 2- file end
- if len(b) < maxRead {
- maxRead = len(b)
+ if size < int64(maxRead) {
+ maxRead = int(size)
}
// just read the requested number of bytes and change our offset
// figure out which block number has the bytes we are looking for
startBlock := int(fl.offset / fs.blocksize)
- endBlock := int((fl.offset + int64(maxRead)) / fs.blocksize)
+ endBlock := int((fl.offset + int64(maxRead) - 1) / fs.blocksize)
// do we end in fragment territory?
fragments := false
@@ -71,46 +73,80 @@ func (fl *File) Read(b []byte) (int, error) {
}
read := 0
- offset := fl.offset
+ offsetEnd := fl.offset + int64(maxRead)
+ pos := int64(0)
+
+ // send input to b, clipping as appropriate
+ outputBlock := func(input []byte) {
+ inputSize := int64(len(input))
+ start := fl.offset - pos
+ end := offsetEnd - pos
+ if start >= 0 && start < inputSize {
+ if end > inputSize {
+ end = inputSize
+ }
+ n := copy(b[read:], input[start:end])
+ read += n
+ fl.offset += int64(n)
+ }
+ }
+
// we need to cycle through all of the blocks to find where the desired one starts
for i, block := range fl.blockSizes {
- if i > endBlock || read > maxRead {
+ if i > endBlock || read >= maxRead {
break
}
// if we are in the range of desired ones, read it in
if i >= startBlock {
- input, err := fs.readBlock(location, block.compressed, block.size)
- if err != nil {
- return read, fmt.Errorf("error reading data block %d from squashfs: %v", i, err)
+ if int64(block.size) > fs.blocksize {
+ return read, fmt.Errorf("unexpected block.size=%d > fs.blocksize=%d", block.size, fs.blocksize)
}
- // we do not need to limit it to the remaining space of b, since copy() only will copy
- // to what space it has in b
- copy(b[read:], input[offset:])
- read += len(input)
- fl.offset += int64(read)
- offset = 0
+ var input []byte
+ if fl.blockLocation == location && fl.block != nil {
+ // Read last block from cache
+ input = fl.block
+ } else {
+ var err error
+ input, err = fs.readBlock(location, block.compressed, block.size)
+ if err != nil {
+ return read, fmt.Errorf("error reading data block %d from squashfs: %v", i, err)
+ }
+ // Cache the last block
+ fl.blockLocation = location
+ fl.block = input
+ }
+ outputBlock(input)
}
location += int64(block.size)
+ pos += fs.blocksize
}
+
// did we have a fragment to read?
- if fragments {
+ if read < maxRead && fragments {
+ if fl.fragmentBlockIndex == 0xffffffff {
+ return read, fmt.Errorf("expecting fragment to read %d bytes but no fragment found", maxRead-read)
+ }
input, err := fs.readFragment(fl.fragmentBlockIndex, fl.fragmentOffset, fl.size()%fs.blocksize)
if err != nil {
return read, fmt.Errorf("error reading fragment block %d from squashfs: %v", fl.fragmentBlockIndex, err)
}
- copy(b[read:], input)
+ pos = int64(len(fl.blockSizes)) * fs.blocksize
+ outputBlock(input)
}
- fl.offset += int64(maxRead)
var retErr error
- if fl.offset >= int64(size) {
+ if fl.offset >= fl.size() {
retErr = io.EOF
+ } else if read == 0 {
+ retErr = fmt.Errorf("internal error: read no bytes")
}
- return maxRead, retErr
+ return read, retErr
}
// Write writes len(b) bytes to the File.
//
// you cannot write to a finished squashfs, so this returns an error
+//
+//nolint:unused,revive // but it is important to implement the interface
func (fl *File) Write(p []byte) (int, error) {
return 0, fmt.Errorf("cannot write to a read-only squashfs filesystem")
}
diff --git a/vendor/github.com/diskfs/go-diskfs/filesystem/squashfs/finalize.go b/vendor/github.com/diskfs/go-diskfs/filesystem/squashfs/finalize.go
index f85c28febf3..7c99e4f2af9 100644
--- a/vendor/github.com/diskfs/go-diskfs/filesystem/squashfs/finalize.go
+++ b/vendor/github.com/diskfs/go-diskfs/filesystem/squashfs/finalize.go
@@ -4,6 +4,7 @@ import (
"encoding/binary"
"fmt"
"io"
+ iofs "io/fs"
"os"
"path"
"path/filepath"
@@ -415,21 +416,24 @@ func finalizeFragment(buf []byte, to util.File, toOffset int64, c Compressor) (r
// because the inode data is different.
// The first entry in the return always will be the root
func walkTree(workspace string) ([]*finalizeFileInfo, error) {
- cwd, err := os.Getwd()
- if err != nil {
- return nil, fmt.Errorf("could not get pwd: %v", err)
- }
- // make everything relative to the workspace
- _ = os.Chdir(workspace)
dirMap := make(map[string]*finalizeFileInfo)
fileList := make([]*finalizeFileInfo, 0)
var entry *finalizeFileInfo
- _ = filepath.Walk(".", func(fp string, fi os.FileInfo, err error) error {
+ err := filepath.WalkDir(workspace, func(actualPath string, d iofs.DirEntry, err error) error {
if err != nil {
return err
}
+ fp := strings.TrimPrefix(actualPath, workspace)
+ fp = strings.TrimPrefix(fp, string(filepath.Separator))
+ if fp == "" {
+ fp = "."
+ }
isRoot := fp == "."
- name := fi.Name()
+ name := d.Name()
+ fi, err := d.Info()
+ if err != nil {
+ return fmt.Errorf("could not get file info for %s: %v", fp, err)
+ }
m := fi.Mode()
var fType fileType
switch {
@@ -448,7 +452,7 @@ func walkTree(workspace string) ([]*finalizeFileInfo, error) {
default:
fType = fileRegular
}
- xattrNames, err := xattr.List(fp)
+ xattrNames, err := xattr.List(actualPath)
if err != nil {
return fmt.Errorf("unable to list xattrs for %s: %v", fp, err)
}
@@ -495,8 +499,9 @@ func walkTree(workspace string) ([]*finalizeFileInfo, error) {
fileList = append(fileList, entry)
return nil
})
- // reset the workspace
- _ = os.Chdir(cwd)
+ if err != nil {
+ return nil, err
+ }
return fileList, nil
}
@@ -750,7 +755,7 @@ func writeDirectories(dirs []*finalizeFileInfo, f util.File, compressor Compress
// writeFragmentTable write the fragment table
//
-//nolint:unparam // this does not use fragmentBlocksStart yet, but only because we have not yet added support
+//nolint:unparam,unused,revive // this does not use fragmentBlocksStart yet, but only because we have not yet added support
func writeFragmentTable(fragmentBlocks []fragmentBlock, fragmentBlocksStart int64, f util.File, compressor Compressor, location int64) (fragmentsWritten int, finalLocation uint64, err error) {
// now write the actual fragment table entries
var (
diff --git a/vendor/github.com/diskfs/go-diskfs/filesystem/squashfs/finalize_wasip1.go b/vendor/github.com/diskfs/go-diskfs/filesystem/squashfs/finalize_wasip1.go
new file mode 100644
index 00000000000..f848c987f0f
--- /dev/null
+++ b/vendor/github.com/diskfs/go-diskfs/filesystem/squashfs/finalize_wasip1.go
@@ -0,0 +1,18 @@
+//go:build wasip1
+// +build wasip1
+
+//nolint:unconvert // linter gets confused in this file
+package squashfs
+
+import (
+ "errors"
+ "os"
+)
+
+func getDeviceNumbers(path string) (major, minor uint32, err error) {
+ return 0, 0, errors.New("not implemented")
+}
+
+func getFileProperties(fi os.FileInfo) (links, uid, gid uint32) {
+ return 0, 0, 0
+}
diff --git a/vendor/github.com/diskfs/go-diskfs/filesystem/squashfs/inode.go b/vendor/github.com/diskfs/go-diskfs/filesystem/squashfs/inode.go
index 7a690ca686c..a08852b8ba2 100644
--- a/vendor/github.com/diskfs/go-diskfs/filesystem/squashfs/inode.go
+++ b/vendor/github.com/diskfs/go-diskfs/filesystem/squashfs/inode.go
@@ -143,11 +143,10 @@ func parseBlockData(u uint32) *blockData {
size: u & 0x00ffffff,
}
}
-func parseFileBlockSizes(b []byte, fileSize, blocksize int) []*blockData {
- count := fileSize / blocksize
- blocks := make([]*blockData, 0)
- for j := 0; j < count && j < len(b); j += 4 {
- blocks = append(blocks, parseBlockData(binary.LittleEndian.Uint32(b[j:j+4])))
+func parseFileBlockSizes(b []byte, blockListSize int) []*blockData {
+ blocks := make([]*blockData, 0, blockListSize)
+ for j := 0; j < blockListSize && j < len(b); j++ {
+ blocks = append(blocks, parseBlockData(binary.LittleEndian.Uint32(b[4*j:4*j+4])))
}
return blocks
}
@@ -440,14 +439,14 @@ func parseBasicFile(b []byte, blocksize int) (*basicFile, int, error) {
fileSize: fileSize,
}
// see how many other bytes we need to read
- blockListSize := int(d.fileSize) / blocksize
- if int(d.fileSize)%blocksize > 0 && d.fragmentBlockIndex != 0xffffffff {
+ blockListSize := int(d.fileSize / uint32(blocksize))
+ if d.fileSize%uint32(blocksize) > 0 && d.fragmentBlockIndex == 0xffffffff {
blockListSize++
}
// do we have enough data left to read those?
extra = blockListSize * 4
if len(b[16:]) >= extra {
- d.blockSizes = parseFileBlockSizes(b[16:], int(fileSize), blocksize)
+ d.blockSizes = parseFileBlockSizes(b[16:], blockListSize)
extra = 0
}
@@ -531,14 +530,14 @@ func parseExtendedFile(b []byte, blocksize int) (*extendedFile, int, error) {
xAttrIndex: binary.LittleEndian.Uint32(b[36:40]),
}
// see how many other bytes we need to read
- blockListSize := int(d.fileSize) / blocksize
- if int(d.fileSize)%blocksize > 0 && d.fragmentBlockIndex != 0xffffffff {
+ blockListSize := int(d.fileSize / uint64(blocksize))
+ if d.fileSize%uint64(blocksize) > 0 && d.fragmentBlockIndex == 0xffffffff {
blockListSize++
}
// do we have enough data left to read those?
extra = blockListSize * 4
- if len(b[16:]) >= extra {
- d.blockSizes = parseFileBlockSizes(b[16:], int(fileSize), blocksize)
+ if len(b[40:]) >= extra {
+ d.blockSizes = parseFileBlockSizes(b[40:], blockListSize)
extra = 0
}
return d, extra, nil
diff --git a/vendor/github.com/diskfs/go-diskfs/filesystem/squashfs/lru.go b/vendor/github.com/diskfs/go-diskfs/filesystem/squashfs/lru.go
new file mode 100644
index 00000000000..9185d44c375
--- /dev/null
+++ b/vendor/github.com/diskfs/go-diskfs/filesystem/squashfs/lru.go
@@ -0,0 +1,138 @@
+package squashfs
+
+import (
+ "sync"
+)
+
+// A simple least recently used cache
+type lru struct {
+ mu sync.Mutex
+ cache map[int64]*lruBlock // cache keyed on block position in file
+ maxBlocks int // max number of blocks in cache
+ root lruBlock // root block in LRU circular list
+}
+
+// A data block to store in the lru cache
+type lruBlock struct {
+ mu sync.Mutex // lock while fetching
+ data []byte // data block - nil while being fetched
+ prev *lruBlock // prev block in LRU list
+ next *lruBlock // next block in LRU list
+ pos int64 // position it was read off disk
+ size uint16 // compressed size on disk
+}
+
+// Create a new LRU cache of a maximum of maxBlocks blocks of size
+func newLRU(maxBlocks int) *lru {
+ l := &lru{
+ cache: make(map[int64]*lruBlock),
+ maxBlocks: maxBlocks,
+ root: lruBlock{
+ pos: -1,
+ },
+ }
+ l.root.prev = &l.root // circularly link the root node
+ l.root.next = &l.root
+ return l
+}
+
+// Unlink the block from the list
+func (l *lru) unlink(block *lruBlock) {
+ block.prev.next = block.next
+ block.next.prev = block.prev
+ block.prev = nil
+ block.next = nil
+}
+
+// Pop a block from the end of the list
+func (l *lru) pop() *lruBlock {
+ block := l.root.prev
+ if block == &l.root {
+ panic("internal error: list empty")
+ }
+ l.unlink(block)
+ return block
+}
+
+// Add a block to the start of the list
+func (l *lru) push(block *lruBlock) {
+ oldHead := l.root.next
+ l.root.next = block
+ block.prev = &l.root
+ block.next = oldHead
+ oldHead.prev = block
+}
+
+// ensure there are no more than n blocks in the cache
+func (l *lru) trim(maxBlocks int) {
+ for len(l.cache) > maxBlocks && len(l.cache) > 0 {
+ // Remove a block from the cache
+ block := l.pop()
+ delete(l.cache, block.pos)
+ }
+}
+
+// add block to the cache, pruning the cache as appropriate
+func (l *lru) add(block *lruBlock) {
+ l.trim(l.maxBlocks - 1)
+ l.cache[block.pos] = block
+ l.push(block)
+}
+
+// Fetch data returning size used from input and error
+//
+// data should be a subslice of buf
+type fetchFn func() (data []byte, size uint16, err error)
+
+// Get the block at pos from the cache.
+//
+// If it isn't found in the cache then fetch() is called to get it.
+//
+// This does read through caching and takes care not to block parallel
+// calls to the fetch() function.
+func (l *lru) get(pos int64, fetch fetchFn) (data []byte, size uint16, err error) {
+ if l == nil {
+ return fetch()
+ }
+ l.mu.Lock()
+ block, found := l.cache[pos]
+ if !found {
+ // Add an empty block with data == nil
+ block = &lruBlock{
+ pos: pos,
+ }
+ // Add it to the cache and the tail of the list
+ l.add(block)
+ } else {
+ // Remove the block from the list
+ l.unlink(block)
+ // Add it back to the start
+ l.push(block)
+ }
+ block.mu.Lock() // transfer the lock to the block
+ l.mu.Unlock()
+ defer block.mu.Unlock()
+
+ if block.data != nil {
+ return block.data, block.size, nil
+ }
+
+ // Fetch the block
+ data, size, err = fetch()
+ if err != nil {
+ return nil, 0, err
+ }
+ block.data = data
+ block.size = size
+ return data, size, nil
+}
+
+// Sets the number of blocks to be used in the cache
+//
+// It makes sure that there are no more than maxBlocks in the cache.
+func (l *lru) setMaxBlocks(maxBlocks int) {
+ l.mu.Lock()
+ defer l.mu.Unlock()
+ l.maxBlocks = maxBlocks
+ l.trim(l.maxBlocks)
+}
diff --git a/vendor/github.com/diskfs/go-diskfs/filesystem/squashfs/metadatablock.go b/vendor/github.com/diskfs/go-diskfs/filesystem/squashfs/metadatablock.go
index 6711f4ef0f5..8ef65098fd7 100644
--- a/vendor/github.com/diskfs/go-diskfs/filesystem/squashfs/metadatablock.go
+++ b/vendor/github.com/diskfs/go-diskfs/filesystem/squashfs/metadatablock.go
@@ -26,6 +26,7 @@ func getMetadataSize(b []byte) (size uint16, compressed bool, err error) {
return size, compressed, nil
}
+// FIXME this isn't used anywhere except in the test code
func parseMetadata(b []byte, c Compressor) (block *metadatablock, err error) {
if len(b) < minMetadataBlockSize {
return nil, fmt.Errorf("metadata block was of len %d, less than minimum %d", len(b), minMetadataBlockSize)
@@ -71,33 +72,35 @@ func (m *metadatablock) toBytes(c Compressor) ([]byte, error) {
return b, nil
}
-func readMetaBlock(r io.ReaderAt, c Compressor, location int64) (data []byte, size uint16, err error) {
- // read bytes off the reader to determine how big it is and if compressed
- b := make([]byte, 2)
- _, _ = r.ReadAt(b, location)
- size, compressed, err := getMetadataSize(b)
- if err != nil {
- return nil, 0, fmt.Errorf("error getting size and compression for metadata block at %d: %v", location, err)
- }
- b = make([]byte, size)
- read, err := r.ReadAt(b, location+2)
- if err != nil && err != io.EOF {
- return nil, 0, fmt.Errorf("unable to read metadata block of size %d at location %d: %v", size, location, err)
- }
- if read != len(b) {
- return nil, 0, fmt.Errorf("read %d instead of expected %d bytes for metadata block at location %d", read, size, location)
- }
- data = b
- if compressed {
- if c == nil {
- return nil, 0, fmt.Errorf("metadata block at %d compressed, but no compressor provided", location)
- }
- data, err = c.decompress(b)
+func (fs *FileSystem) readMetaBlock(r io.ReaderAt, c Compressor, location int64) (data []byte, size uint16, err error) {
+ return fs.cache.get(location, func() (data []byte, size uint16, err error) {
+ // read bytes off the reader to determine how big it is and if compressed
+ b := make([]byte, 2)
+ _, _ = r.ReadAt(b, location)
+ size, compressed, err := getMetadataSize(b)
if err != nil {
- return nil, 0, fmt.Errorf("decompress error: %v", err)
+ return nil, 0, fmt.Errorf("error getting size and compression for metadata block at %d: %v", location, err)
}
- }
- return data, size + 2, nil
+ b = make([]byte, size)
+ read, err := r.ReadAt(b, location+2)
+ if err != nil && err != io.EOF {
+ return nil, 0, fmt.Errorf("unable to read metadata block of size %d at location %d: %v", size, location, err)
+ }
+ if read != len(b) {
+ return nil, 0, fmt.Errorf("read %d instead of expected %d bytes for metadata block at location %d", read, size, location)
+ }
+ data = b
+ if compressed {
+ if c == nil {
+ return nil, 0, fmt.Errorf("metadata block at %d compressed, but no compressor provided", location)
+ }
+ data, err = c.decompress(b)
+ if err != nil {
+ return nil, 0, fmt.Errorf("decompress error: %v", err)
+ }
+ }
+ return data, size + 2, nil
+ })
}
// readMetadata read as many bytes of metadata as required for the given size, with the byteOffset provided as a starting
@@ -105,13 +108,13 @@ func readMetaBlock(r io.ReaderAt, c Compressor, location int64) (data []byte, si
// requests to read 500 bytes beginning at offset 8000 into the first block.
// it always returns to the end of the block, even if that is greater than the given size. This makes it easy to use more
// data than expected on first read. The consumer is expected to cut it down, if needed
-func readMetadata(r io.ReaderAt, c Compressor, firstBlock int64, initialBlockOffset uint32, byteOffset uint16, size int) ([]byte, error) {
+func (fs *FileSystem) readMetadata(r io.ReaderAt, c Compressor, firstBlock int64, initialBlockOffset uint32, byteOffset uint16, size int) ([]byte, error) {
var (
b []byte
blockOffset = int(initialBlockOffset)
)
// we know how many blocks, so read them all in
- m, read, err := readMetaBlock(r, c, firstBlock+int64(blockOffset))
+ m, read, err := fs.readMetaBlock(r, c, firstBlock+int64(blockOffset))
if err != nil {
return nil, err
}
@@ -119,7 +122,7 @@ func readMetadata(r io.ReaderAt, c Compressor, firstBlock int64, initialBlockOff
// do we have any more to read?
for len(b) < size {
blockOffset += int(read)
- m, read, err = readMetaBlock(r, c, firstBlock+int64(blockOffset))
+ m, read, err = fs.readMetaBlock(r, c, firstBlock+int64(blockOffset))
if err != nil {
return nil, err
}
diff --git a/vendor/github.com/diskfs/go-diskfs/filesystem/squashfs/squashfs.go b/vendor/github.com/diskfs/go-diskfs/filesystem/squashfs/squashfs.go
index 19f88f11278..bdd8d5a7eef 100644
--- a/vendor/github.com/diskfs/go-diskfs/filesystem/squashfs/squashfs.go
+++ b/vendor/github.com/diskfs/go-diskfs/filesystem/squashfs/squashfs.go
@@ -17,6 +17,7 @@ const (
metadataBlockSize = 8 * KB
minBlocksize = 4 * KB
maxBlocksize = 1 * MB
+ defaultCacheSize = 128 * MB
)
// FileSystem implements the FileSystem interface
@@ -32,6 +33,7 @@ type FileSystem struct {
uidsGids []uint32
xattrs *xAttrTable
rootDir inode
+ cache *lru
}
// Equal compare if two filesystems are equal
@@ -111,7 +113,38 @@ func Create(f util.File, size, start, blocksize int64) (*FileSystem, error) {
// which allow you to work directly with partitions, rather than having to calculate (and hopefully not make any errors)
// where a partition starts and ends.
//
-// If the provided blocksize is 0, it will use the default of 2K bytes
+// If the provided blocksize is 0, it will use the default of 2K bytes.
+//
+// This will use a cache for the decompressed blocks of 128 MB by
+// default. (You can set this with the SetCacheSize method and read
+// its size with the GetCacheSize method). A block cache is essential
+// for performance when reading. This implements a cache for the
+// fragments (tail ends of files) and the metadata (directory
+// listings) which otherwise would be read, decompressed and discarded
+// many times.
+//
+// Unpacking a 3 GB squashfs made from the tensorflow docker image like this:
+//
+// docker export $(docker create tensorflow/tensorflow:latest-gpu-jupyter) -o tensorflow.tar.gz
+// mkdir -p tensorflow && tar xf tensorflow.tar.gz -C tensorflow
+// [ -f tensorflow.sqfs ] && rm tensorflow.sqfs
+// mksquashfs tensorflow tensorflow.sqfs -comp zstd -Xcompression-level 3 -b 1M -no-xattrs -all-root
+//
+// Gives these timings with and without cache:
+//
+// - no caching: 206s
+// - 256 MB cache: 16.7s
+// - 128 MB cache: 17.5s (the default)
+// - 64 MB cache: 23.4s
+// - 32 MB cache: 54.s
+//
+// The cached versions compare favourably to the C program unsquashfs
+// which takes 12.0s to unpack the same archive.
+//
+// These tests were done using rclone and the archive backend which
+// uses this library like this:
+//
+// rclone -P --transfers 16 --checkers 16 copy :archive:/path/to/tensorflow.sqfs /tmp/tensorflow
func Read(file util.File, size, start, blocksize int64) (*FileSystem, error) {
var (
read int
@@ -147,7 +180,7 @@ func Read(file util.File, size, start, blocksize int64) (*FileSystem, error) {
// create the compressor function we will use
compress, err := newCompressor(s.compression)
if err != nil {
- return nil, fmt.Errorf("unable to create compressor")
+ return nil, fmt.Errorf("unable to create compressor: %v", err)
}
// load fragments
@@ -160,7 +193,7 @@ func Read(file util.File, size, start, blocksize int64) (*FileSystem, error) {
var (
xattrs *xAttrTable
)
- if !s.noXattrs {
+ if !s.noXattrs && s.xattrTableStart != 0xffff_ffff_ffff_ffff {
// xattr is right to the end of the disk
xattrs, err = readXattrsTable(s, file, compress)
if err != nil {
@@ -180,11 +213,12 @@ func Read(file util.File, size, start, blocksize int64) (*FileSystem, error) {
size: size,
file: file,
superblock: s,
- blocksize: blocksize,
+ blocksize: int64(s.blocksize), // use the blocksize in the superblock
xattrs: xattrs,
compressor: compress,
fragments: fragments,
uidsGids: uidsgids,
+ cache: newLRU(int(defaultCacheSize) / int(s.blocksize)),
}
// for efficiency, read in the root inode right now
rootInode, err := fs.getInode(s.rootInode.block, s.rootInode.offset, inodeBasicDirectory)
@@ -200,6 +234,30 @@ func (fs *FileSystem) Type() filesystem.Type {
return filesystem.TypeSquashfs
}
+// SetCacheSize set the maximum memory used by the block cache to cacheSize bytes.
+//
+// The default is 128 MB.
+//
+// If this is <= 0 then the cache will be disabled.
+func (fs *FileSystem) SetCacheSize(cacheSize int) {
+ if fs.cache == nil {
+ return
+ }
+ blocks := cacheSize / int(fs.blocksize)
+ if blocks <= 0 {
+ blocks = 0
+ }
+ fs.cache.setMaxBlocks(blocks)
+}
+
+// GetCacheSize get the maximum memory used by the block cache in bytes.
+func (fs *FileSystem) GetCacheSize() int {
+ if fs.cache == nil {
+ return 0
+ }
+ return fs.cache.maxBlocks * int(fs.blocksize)
+}
+
// Mkdir make a directory at the given path. It is equivalent to `mkdir -p`, i.e. idempotent, in that:
//
// * It will make the entire tree path if it does not exist
@@ -307,30 +365,9 @@ func (fs *FileSystem) OpenFile(p string, flag int) (filesystem.File, error) {
if targetEntry == nil {
return nil, fmt.Errorf("target file %s does not exist", p)
}
- // get the inode data for this file
- // now open the file
- // get the inode for the file
- var eFile *extendedFile
- in := targetEntry.inode
- iType := in.inodeType()
- body := in.getBody()
- //nolint:exhaustive // all other cases fall under default
- switch iType {
- case inodeBasicFile:
- extFile := body.(*basicFile).toExtended()
- eFile = &extFile
- case inodeExtendedFile:
- eFile, _ = body.(*extendedFile)
- default:
- return nil, fmt.Errorf("inode is of type %d, neither basic nor extended directory", iType)
- }
-
- f = &File{
- extendedFile: eFile,
- isReadWrite: false,
- isAppend: false,
- offset: 0,
- filesystem: fs,
+ f, err = targetEntry.Open()
+ if err != nil {
+ return nil, err
}
} else {
f, err = os.OpenFile(path.Join(fs.workspace, p), flag, 0o644)
@@ -440,17 +477,16 @@ func (fs *FileSystem) hydrateDirectoryEntries(entries []*directoryEntryRaw) ([]*
}
}
fullEntries = append(fullEntries, &directoryEntry{
+ fs: fs,
isSubdirectory: e.isSubdirectory,
name: e.name,
size: body.size(),
modTime: header.modTime,
mode: header.mode,
inode: in,
- sys: FileStat{
- uid: fs.uidsGids[header.uidIdx],
- gid: fs.uidsGids[header.gidIdx],
- xattrs: xattrs,
- },
+ uid: fs.uidsGids[header.uidIdx],
+ gid: fs.uidsGids[header.gidIdx],
+ xattrs: xattrs,
})
}
return fullEntries, nil
@@ -464,7 +500,7 @@ func (fs *FileSystem) getInode(blockOffset uint32, byteOffset uint16, iType inod
// get the block
// start by getting the minimum for the proposed type. It very well might be wrong.
size := inodeTypeToSize(iType)
- uncompressed, err := readMetadata(fs.file, fs.compressor, int64(fs.superblock.inodeTableStart), blockOffset, byteOffset, size)
+ uncompressed, err := fs.readMetadata(fs.file, fs.compressor, int64(fs.superblock.inodeTableStart), blockOffset, byteOffset, size)
if err != nil {
return nil, fmt.Errorf("error reading block at position %d: %v", blockOffset, err)
}
@@ -475,6 +511,14 @@ func (fs *FileSystem) getInode(blockOffset uint32, byteOffset uint16, iType inod
}
if header.inodeType != iType {
iType = header.inodeType
+ size = inodeTypeToSize(iType)
+ // Read more data if necessary (quite rare)
+ if size > len(uncompressed) {
+ uncompressed, err = fs.readMetadata(fs.file, fs.compressor, int64(fs.superblock.inodeTableStart), blockOffset, byteOffset, size)
+ if err != nil {
+ return nil, fmt.Errorf("error reading block at position %d: %v", blockOffset, err)
+ }
+ }
}
// now read the body, which may have a variable size
body, extra, err := parseInodeBody(uncompressed[inodeHeaderSize:], int(fs.blocksize), iType)
@@ -484,7 +528,7 @@ func (fs *FileSystem) getInode(blockOffset uint32, byteOffset uint16, iType inod
// if it returns extra > 0, then it needs that many more bytes to be read, and to be reparsed
if extra > 0 {
size += extra
- uncompressed, err = readMetadata(fs.file, fs.compressor, int64(fs.superblock.inodeTableStart), blockOffset, byteOffset, size)
+ uncompressed, err = fs.readMetadata(fs.file, fs.compressor, int64(fs.superblock.inodeTableStart), blockOffset, byteOffset, size)
if err != nil {
return nil, fmt.Errorf("error reading block at position %d: %v", blockOffset, err)
}
@@ -504,7 +548,7 @@ func (fs *FileSystem) getInode(blockOffset uint32, byteOffset uint16, iType inod
// block when uncompressed.
func (fs *FileSystem) getDirectory(blockOffset uint32, byteOffset uint16, size int) (*directory, error) {
// get the block
- uncompressed, err := readMetadata(fs.file, fs.compressor, int64(fs.superblock.directoryTableStart), blockOffset, byteOffset, size)
+ uncompressed, err := fs.readMetadata(fs.file, fs.compressor, int64(fs.superblock.directoryTableStart), blockOffset, byteOffset, size)
if err != nil {
return nil, fmt.Errorf("error reading block at position %d: %v", blockOffset, err)
}
@@ -517,6 +561,10 @@ func (fs *FileSystem) getDirectory(blockOffset uint32, byteOffset uint16, size i
}
func (fs *FileSystem) readBlock(location int64, compressed bool, size uint32) ([]byte, error) {
+ // Zero size is a sparse block of blocksize
+ if size == 0 {
+ return make([]byte, fs.superblock.blocksize), nil
+ }
b := make([]byte, size)
read, err := fs.file.ReadAt(b, location)
if err != nil && err != io.EOF {
@@ -543,25 +591,32 @@ func (fs *FileSystem) readFragment(index, offset uint32, fragmentSize int64) ([]
return nil, fmt.Errorf("cannot find fragment block with index %d", index)
}
fragmentInfo := fs.fragments[index]
- // figure out the size of the compressed block and if it is compressed
- b := make([]byte, fragmentInfo.size)
- read, err := fs.file.ReadAt(b, int64(fragmentInfo.start))
- if err != nil && err != io.EOF {
- return nil, fmt.Errorf("unable to read fragment block %d: %v", index, err)
- }
- if read != len(b) {
- return nil, fmt.Errorf("read %d instead of expected %d bytes for fragment block %d", read, len(b), index)
- }
-
- data := b
- if fragmentInfo.compressed {
- if fs.compressor == nil {
- return nil, fmt.Errorf("fragment compressed but do not have valid compressor")
+ pos := int64(fragmentInfo.start)
+ data, _, err := fs.cache.get(pos, func() (data []byte, size uint16, err error) {
+ // figure out the size of the compressed block and if it is compressed
+ b := make([]byte, fragmentInfo.size)
+ read, err := fs.file.ReadAt(b, pos)
+ if err != nil && err != io.EOF {
+ return nil, 0, fmt.Errorf("unable to read fragment block %d: %v", index, err)
}
- data, err = fs.compressor.decompress(b)
- if err != nil {
- return nil, fmt.Errorf("decompress error: %v", err)
+ if read != len(b) {
+ return nil, 0, fmt.Errorf("read %d instead of expected %d bytes for fragment block %d", read, len(b), index)
}
+
+ data = b
+ if fragmentInfo.compressed {
+ if fs.compressor == nil {
+ return nil, 0, fmt.Errorf("fragment compressed but do not have valid compressor")
+ }
+ data, err = fs.compressor.decompress(b)
+ if err != nil {
+ return nil, 0, fmt.Errorf("decompress error: %v", err)
+ }
+ }
+ return data, 0, nil
+ })
+ if err != nil {
+ return nil, err
}
// now get the data from the offset
return data[offset : int64(offset)+fragmentSize], nil
@@ -604,8 +659,9 @@ func readFragmentTable(s *superblock, file util.File, c Compressor) ([]*fragment
// load in the actual fragment entries
// read each block and uncompress it
var fragmentTable []*fragmentEntry
+ var fs = &FileSystem{}
for i, offset := range offsets {
- uncompressed, _, err := readMetaBlock(file, c, offset)
+ uncompressed, _, err := fs.readMetaBlock(file, c, offset)
if err != nil {
return nil, fmt.Errorf("error reading meta block %d at position %d: %v", i, offset, err)
}
@@ -674,13 +730,14 @@ func readXattrsTable(s *superblock, file util.File, c Compressor) (*xAttrTable,
var (
uncompressed []byte
size uint16
+ fs = &FileSystem{}
)
bIndex := make([]byte, 0)
// convert those into indexes
for i := 0; i+8-1 < len(b); i += 8 {
locn := binary.LittleEndian.Uint64(b[i : i+8])
- uncompressed, _, err = readMetaBlock(file, c, int64(locn))
+ uncompressed, _, err = fs.readMetaBlock(file, c, int64(locn))
if err != nil {
return nil, fmt.Errorf("error reading xattr index meta block %d at position %d: %v", i, locn, err)
}
@@ -691,7 +748,7 @@ func readXattrsTable(s *superblock, file util.File, c Compressor) (*xAttrTable,
xAttrEnd := binary.LittleEndian.Uint64(b[:8])
xAttrData := make([]byte, 0)
for i := xAttrStart; i < xAttrEnd; {
- uncompressed, size, err = readMetaBlock(file, c, int64(i))
+ uncompressed, size, err = fs.readMetaBlock(file, c, int64(i))
if err != nil {
return nil, fmt.Errorf("error reading xattr data meta block at position %d: %v", i, err)
}
@@ -704,7 +761,7 @@ func readXattrsTable(s *superblock, file util.File, c Compressor) (*xAttrTable,
return parseXattrsTable(xAttrData, bIndex, s.idTableStart, c)
}
-//nolint:unparam // this does not use offset or compressor yet, but only because we have not yet added support
+//nolint:unparam,unused,revive // this does not use offset or compressor yet, but only because we have not yet added support
func parseXattrsTable(bUIDXattr, bIndex []byte, offset uint64, c Compressor) (*xAttrTable, error) {
// create the ID list
var (
@@ -765,13 +822,14 @@ func readUidsGids(s *superblock, file util.File, c Compressor) ([]uint32, error)
var (
uncompressed []byte
+ fs = &FileSystem{}
)
data := make([]byte, 0)
// convert those into indexes
for i := 0; i+8-1 < len(b); i += 8 {
locn := binary.LittleEndian.Uint64(b[i : i+8])
- uncompressed, _, err = readMetaBlock(file, c, int64(locn))
+ uncompressed, _, err = fs.readMetaBlock(file, c, int64(locn))
if err != nil {
return nil, fmt.Errorf("error reading uidgid index meta block %d at position %d: %v", i, locn, err)
}
diff --git a/vendor/github.com/diskfs/go-diskfs/filesystem/squashfs/xattr.go b/vendor/github.com/diskfs/go-diskfs/filesystem/squashfs/xattr.go
index f63d4029a6c..e1894217d3d 100644
--- a/vendor/github.com/diskfs/go-diskfs/filesystem/squashfs/xattr.go
+++ b/vendor/github.com/diskfs/go-diskfs/filesystem/squashfs/xattr.go
@@ -45,7 +45,7 @@ func (x *xAttrTable) find(pos int) (map[string]string, error) {
xattrs := map[string]string{}
for i := 0; i < int(count); i++ {
// must be 4 bytes for header
- if len(b[pos:]) < 4 {
+ if len(b[ptr:]) < 4 {
return nil, fmt.Errorf("insufficient bytes %d to read the xattr at position %d", len(b[ptr:]), ptr)
}
// get the type and size
@@ -56,7 +56,7 @@ func (x *xAttrTable) find(pos int) (map[string]string, error) {
valStart := valHeaderStart + 4
// make sure we have enough bytes
if len(b[nameStart:]) < xSize {
- return nil, fmt.Errorf("xattr header has size %d, but only %d bytes available to read at position %d", xSize, len(b[pos+4:]), ptr)
+ return nil, fmt.Errorf("xattr header has size %d, but only %d bytes available to read at position %d", xSize, len(b[ptr+4:]), ptr)
}
if xSize < 1 {
return nil, fmt.Errorf("no name given for xattr at position %d", ptr)
diff --git a/vendor/github.com/diskfs/go-diskfs/partition/gpt/partition.go b/vendor/github.com/diskfs/go-diskfs/partition/gpt/partition.go
index a454ec23e94..bfa0e66270a 100644
--- a/vendor/github.com/diskfs/go-diskfs/partition/gpt/partition.go
+++ b/vendor/github.com/diskfs/go-diskfs/partition/gpt/partition.go
@@ -291,3 +291,14 @@ func (p *Partition) sectorSizes() (physical, logical int) {
func (p *Partition) Equal(o *Partition) bool {
return p != nil && o != nil && *p == *o
}
+
+// UUID returns the partitions UUID
+func (p *Partition) UUID() string {
+ return p.GUID
+}
+
+// Expand increases the size of the partition by a number of sectors
+func (p *Partition) Expand(sectors uint64) {
+ p.End += sectors
+ p.Size += sectors * uint64(p.logicalSectorSize)
+}
diff --git a/vendor/github.com/diskfs/go-diskfs/partition/gpt/table.go b/vendor/github.com/diskfs/go-diskfs/partition/gpt/table.go
index bbea768dc17..132b3578e01 100644
--- a/vendor/github.com/diskfs/go-diskfs/partition/gpt/table.go
+++ b/vendor/github.com/diskfs/go-diskfs/partition/gpt/table.go
@@ -20,6 +20,7 @@ const (
// just defaults
physicalSectorSize = 512
logicalSectorSize = 512
+ gptHeaderSector = 1
)
// Table represents a partition table to be applied to a disk or read from a disk
@@ -106,7 +107,7 @@ func (t *Table) initTable(size int64) {
t.secondaryHeader = diskSectors - 1
}
if t.lastDataSector == 0 {
- t.lastDataSector = diskSectors - 1 - partSectors
+ t.lastDataSector = t.secondaryHeader - partSectors - 1
}
t.initialized = true
@@ -347,15 +348,9 @@ func readPartitionArrayBytes(b []byte, entrySize, logicalSectorSize, physicalSec
return parts, nil
}
-// tableFromBytes read a partition table from a byte slice
-func tableFromBytes(b []byte, logicalBlockSize, physicalBlockSize int) (*Table, error) {
- // minimum size - gpt entries + header + LBA0 for (protective) MBR
- if len(b) < logicalBlockSize*2 {
- return nil, fmt.Errorf("data for partition was %d bytes instead of expected minimum %d", len(b), logicalBlockSize*2)
- }
-
- // GPT starts at LBA1
- gpt := b[logicalBlockSize:]
+// readGPTHeader reads the GPT header from the given byte slice
+func readGPTHeader(b []byte) (*Table, error) {
+ gpt := b
// start with fixed headers
efiSignature := gpt[0:8]
efiRevision := gpt[8:12]
@@ -396,12 +391,7 @@ func tableFromBytes(b []byte, logicalBlockSize, physicalBlockSize int) (*Table,
return nil, fmt.Errorf("invalid EFI Header Checksum, expected %v, got %v", checksum, efiHeaderCrc)
}
- // potential protective MBR is at LBA0
- hasProtectiveMBR := readProtectiveMBR(b[:logicalBlockSize], uint32(secondaryHeader))
-
table := Table{
- LogicalSectorSize: logicalBlockSize,
- PhysicalSectorSize: physicalBlockSize,
partitionEntrySize: partitionEntrySize,
primaryHeader: primaryHeader,
secondaryHeader: secondaryHeader,
@@ -409,15 +399,63 @@ func tableFromBytes(b []byte, logicalBlockSize, physicalBlockSize int) (*Table,
lastDataSector: lastDataSector,
partitionArraySize: int(partitionEntryCount),
partitionFirstLBA: partitionEntryFirstLBA,
- ProtectiveMBR: hasProtectiveMBR,
GUID: strings.ToUpper(diskGUID.String()),
partitionEntryChecksum: partitionEntryChecksum,
- initialized: true,
}
return &table, nil
}
+// tableHeaderFromBytes read a partition table from a byte slice, mainly used to validate the secondary header
+func tableHeaderFromBytes(b []byte, logicalBlockSize, physicalBlockSize int, skipMBR bool) (*Table, error) {
+ // minimum size - gpt entries + header + LBA0 for (protective) MBR
+ minSize := logicalBlockSize
+ if len(b) < minSize {
+ return nil, fmt.Errorf("data for partition was %d bytes instead of expected minimum %d", len(b), minSize)
+ }
+ gpt := b
+ if skipMBR {
+ gpt = b[logicalBlockSize:]
+ }
+
+ table, err := readGPTHeader(gpt)
+ if err != nil {
+ return nil, err
+ }
+
+ // potential protective MBR is at LBA0
+ table.ProtectiveMBR = readProtectiveMBR(b[:logicalBlockSize], uint32(table.secondaryHeader))
+ table.LogicalSectorSize = logicalBlockSize
+ table.PhysicalSectorSize = physicalBlockSize
+ table.initialized = true
+
+ return table, nil
+}
+
+// tableFromBytes read a partition table from a byte slice
+func tableFromBytes(b []byte, logicalBlockSize, physicalBlockSize int) (*Table, error) {
+ // minimum size - gpt entries + header + LBA0 for (protective) MBR
+ if len(b) < logicalBlockSize*2 {
+ return nil, fmt.Errorf("data for partition was %d bytes instead of expected minimum %d", len(b), logicalBlockSize*2)
+ }
+
+ // GPT starts at LBA1
+ gpt := b[logicalBlockSize:]
+
+ table, err := readGPTHeader(gpt)
+ if err != nil {
+ return nil, err
+ }
+
+ // potential protective MBR is at LBA0
+ table.ProtectiveMBR = readProtectiveMBR(b[:logicalBlockSize], uint32(table.secondaryHeader))
+ table.LogicalSectorSize = logicalBlockSize
+ table.PhysicalSectorSize = physicalBlockSize
+ table.initialized = true
+
+ return table, nil
+}
+
// Type report the type of table, always "gpt"
func (t *Table) Type() string {
return "gpt"
@@ -550,3 +588,84 @@ func (t *Table) GetPartitions() []part.Partition {
}
return parts
}
+
+// UUID returns the partition table UUID (disk UUID)
+func (t *Table) UUID() string {
+ return t.GUID
+}
+
+// Verify will attempt to evaluate the headers
+func (t *Table) Verify(f util.File, diskSize uint64) error {
+ if t.LogicalSectorSize == 0 {
+ // Avoid divide by zero panic.
+ return fmt.Errorf("table is not initialized")
+ }
+
+ // Determine the size of disk that GPT expects
+ expectedDiskSize := (t.secondaryHeader + 1) * uint64(t.LogicalSectorSize)
+ if diskSize != expectedDiskSize {
+ return fmt.Errorf("secondary Header is not at end of the disk, expected => %d / actual => %d", expectedDiskSize, diskSize)
+ }
+ b := make([]byte, t.LogicalSectorSize)
+ seekAddress := int64(t.secondaryHeader) * int64(t.LogicalSectorSize)
+ _, err := f.ReadAt(b, seekAddress)
+ if err != nil {
+ return fmt.Errorf("error reading GPT from file at %d / disksize %d : %v", seekAddress, diskSize, err)
+ }
+ secondaryTable, err := tableHeaderFromBytes(b, t.LogicalSectorSize, t.PhysicalSectorSize, false)
+ if err != nil {
+ return fmt.Errorf("error reading GPT from file at %d / disksize %d : %v", seekAddress, diskSize, err)
+ }
+ if t.firstDataSector != secondaryTable.firstDataSector {
+ return fmt.Errorf("error comparing GPT headers expected => %d / actual => %d", t.firstDataSector, secondaryTable.firstDataSector)
+ }
+ partSectors := uint64(t.partitionArraySize) * uint64(t.partitionEntrySize) / uint64(t.LogicalSectorSize)
+ lastDataSector := t.secondaryHeader - partSectors - 1
+ if t.lastDataSector != lastDataSector {
+ return fmt.Errorf("error comparing GPT secondary headers expected => %d / actual => %d", t.lastDataSector, lastDataSector)
+ }
+ return nil
+}
+
+// Repair will attempt to evaluate the headers fix the header location and re-write the primary and secondary header
+func (t *Table) Repair(diskSize uint64) error {
+ if t.LogicalSectorSize == 0 {
+ // Avoid divide by zero panic.
+ return fmt.Errorf("table is not initialized")
+ }
+
+ partSectors := uint64(t.partitionArraySize) * uint64(t.partitionEntrySize) / uint64(t.LogicalSectorSize)
+
+ t.secondaryHeader = (diskSize / uint64(t.LogicalSectorSize)) - 1
+ t.lastDataSector = t.secondaryHeader - partSectors - 1
+
+ return nil
+}
+
+// TotalSize returns the total size of the GPT in bytes.
+//
+// This is counted from the start of the MBR to the end of the secondary
+// header.
+func (t *Table) TotalSize() uint64 {
+ return (t.secondaryHeader + gptHeaderSector) * uint64(t.LogicalSectorSize)
+}
+
+func (t *Table) LastDataSector() uint64 {
+ return t.lastDataSector
+}
+
+// Resize changes the size of the GPT.
+//
+// The size argument is in bytes and must be a multiple of the logical sector
+// size.
+// Use this function in case a storage device is not the same as the total
+// size of its GPT.
+func (t *Table) Resize(size uint64) {
+ // how many sectors on the disk?
+ diskSectors := size / uint64(t.LogicalSectorSize)
+ // how many sectors used for partition entries?
+ partSectors := uint64(t.partitionArraySize) * uint64(t.partitionEntrySize) / uint64(t.LogicalSectorSize)
+
+ t.secondaryHeader = diskSectors - 1
+ t.lastDataSector = t.secondaryHeader - 1 - partSectors
+}
diff --git a/vendor/github.com/diskfs/go-diskfs/partition/mbr/partition.go b/vendor/github.com/diskfs/go-diskfs/partition/mbr/partition.go
index 4afab694780..a63428da9b0 100644
--- a/vendor/github.com/diskfs/go-diskfs/partition/mbr/partition.go
+++ b/vendor/github.com/diskfs/go-diskfs/partition/mbr/partition.go
@@ -27,6 +27,8 @@ type Partition struct {
// we need this for calculations
logicalSectorSize int
physicalSectorSize int
+ // partitionUUID is set when retrieving partitions from a Table
+ partitionUUID string
}
// PartitionEqualBytes compares if the bytes for 2 partitions are equal, ignoring CHS start and end
@@ -46,7 +48,7 @@ func PartitionEqualBytes(b1, b2 []byte) bool {
bytes.Equal(b1[12:16], b2[12:16])
}
-// Equal compares if another partition is equal to this one, ignoring CHS start and end
+// Equal compares if another partition is equal to this one, ignoring the UUID and CHS start and end
func (p *Partition) Equal(p2 *Partition) bool {
if p2 == nil {
return false
@@ -204,3 +206,9 @@ func (p *Partition) sectorSizes() (physical, logical int) {
}
return physical, logical
}
+
+// UUID returns the partitions UUID. For MBR based partition tables this is the
+// partition table UUID with the partition number as a suffix.
+func (p *Partition) UUID() string {
+ return p.partitionUUID
+}
diff --git a/vendor/github.com/diskfs/go-diskfs/partition/mbr/table.go b/vendor/github.com/diskfs/go-diskfs/partition/mbr/table.go
index 631018a3b00..c64dab5f0f9 100644
--- a/vendor/github.com/diskfs/go-diskfs/partition/mbr/table.go
+++ b/vendor/github.com/diskfs/go-diskfs/partition/mbr/table.go
@@ -2,6 +2,7 @@ package mbr
import (
"bytes"
+ "encoding/binary"
"fmt"
"github.com/diskfs/go-diskfs/partition/part"
@@ -13,7 +14,7 @@ type Table struct {
Partitions []*Partition
LogicalSectorSize int // logical size of a sector
PhysicalSectorSize int // physical size of the sector
- initialized bool
+ partitionTableUUID string
}
const (
@@ -23,6 +24,9 @@ const (
partitionEntriesStart = 446
partitionEntriesCount = 4
signatureStart = 510
+ // the partition table UUID is stored in 4 bytes in the MBR
+ partitionTableUUIDStart = 440
+ partitionTableUUIDEnd = 444
)
// partitionEntrySize standard size of an MBR partition
@@ -54,20 +58,7 @@ func comparePartitionArray(p1, p2 []*Partition) bool {
return matches
}
-// ensure that a blank table is initialized
-func (t *Table) initTable() {
- // default settings
- if t.LogicalSectorSize == 0 {
- t.LogicalSectorSize = 512
- }
- if t.PhysicalSectorSize == 0 {
- t.PhysicalSectorSize = 512
- }
-
- t.initialized = true
-}
-
-// Equal check if another table is equal to this one, ignoring CHS start and end for the partitions
+// Equal check if another table is equal to this one, ignoring the partition table UUID and CHS start and end for the partitions
func (t *Table) Equal(t2 *Table) bool {
if t2 == nil {
return false
@@ -85,13 +76,14 @@ func tableFromBytes(b []byte) (*Table, error) {
if len(b) != mbrSize {
return nil, fmt.Errorf("data for partition was %d bytes instead of expected %d", len(b), mbrSize)
}
- mbrSignature := b[signatureStart:]
// validate signature
+ mbrSignature := b[signatureStart:]
if !bytes.Equal(mbrSignature, getMbrSignature()) {
return nil, fmt.Errorf("invalid MBR Signature %v", mbrSignature)
}
+ ptUUID := readPartitionTableUUID(b)
parts := make([]*Partition, 0, partitionEntriesCount)
count := int(partitionEntriesCount)
for i := 0; i < count; i++ {
@@ -102,6 +94,7 @@ func tableFromBytes(b []byte) (*Table, error) {
if err != nil {
return nil, fmt.Errorf("error reading partition entry %d: %v", i, err)
}
+ p.partitionUUID = formatPartitionUUID(ptUUID, i+1)
parts = append(parts, p)
}
@@ -109,17 +102,38 @@ func tableFromBytes(b []byte) (*Table, error) {
Partitions: parts,
LogicalSectorSize: logicalSectorSize,
PhysicalSectorSize: 512,
+ partitionTableUUID: ptUUID,
}
return table, nil
}
+func readPartitionTableUUID(b []byte) string {
+ ptUUID := b[partitionTableUUIDStart:partitionTableUUIDEnd]
+ return fmt.Sprintf("%x", binary.LittleEndian.Uint32(ptUUID))
+}
+
+// UUID returns the partition table UUID used to identify disks
+func (t *Table) UUID() string {
+ return t.partitionTableUUID
+}
+
+// formatPartitionUUID creates the partition UUID which is created by using the
+// partition table UUID and the partition index.
+// Format string taken from libblkid:
+// https://github.com/util-linux/util-linux/blob/master/libblkid/src/partitions/partitions.c#L1387C42-L1387C52
+func formatPartitionUUID(ptUUID string, index int) string {
+ return fmt.Sprintf("%.33s-%02x", ptUUID, index)
+}
+
// Type report the type of table, always the string "mbr"
func (t *Table) Type() string {
return "mbr"
}
// Read read a partition table from a disk, given the logical block size and physical block size
+//
+//nolint:unused,revive // not used in MBR, but it is important to implement the interface
func Read(f util.File, logicalBlockSize, physicalBlockSize int) (*Table, error) {
// read the data off of the disk
b := make([]byte, mbrSize)
@@ -155,6 +169,8 @@ func (t *Table) toBytes() []byte {
// Write writes a given MBR Table to disk.
// Must be passed the util.File to write to and the size of the disk
+//
+//nolint:unused,revive // not used in MBR, but it is important to implement the interface
func (t *Table) Write(f util.File, size int64) error {
b := t.toBytes()
@@ -176,3 +192,17 @@ func (t *Table) GetPartitions() []part.Partition {
}
return parts
}
+
+// Verify will attempt to evaluate the headers
+//
+//nolint:unused,revive // not used in MBR, but it is important to implement the interface
+func (t *Table) Verify(f util.File, diskSize uint64) error {
+ return nil
+}
+
+// Repair will attempt to repair a broken Master Boot Record
+//
+//nolint:unused,revive // not used in MBR, but it is important to implement the interface
+func (t *Table) Repair(diskSize uint64) error {
+ return nil
+}
diff --git a/vendor/github.com/diskfs/go-diskfs/partition/part/partition.go b/vendor/github.com/diskfs/go-diskfs/partition/part/partition.go
index 2a9a3dbec60..f87ba9faa02 100644
--- a/vendor/github.com/diskfs/go-diskfs/partition/part/partition.go
+++ b/vendor/github.com/diskfs/go-diskfs/partition/part/partition.go
@@ -12,4 +12,5 @@ type Partition interface {
GetStart() int64
ReadContents(util.File, io.Writer) (int64, error)
WriteContents(util.File, io.Reader) (uint64, error)
+ UUID() string
}
diff --git a/vendor/github.com/diskfs/go-diskfs/partition/table.go b/vendor/github.com/diskfs/go-diskfs/partition/table.go
index 29cf0046593..62f56503a4d 100644
--- a/vendor/github.com/diskfs/go-diskfs/partition/table.go
+++ b/vendor/github.com/diskfs/go-diskfs/partition/table.go
@@ -10,4 +10,7 @@ type Table interface {
Type() string
Write(util.File, int64) error
GetPartitions() []part.Partition
+ Repair(diskSize uint64) error
+ Verify(f util.File, diskSize uint64) error
+ UUID() string
}
diff --git a/vendor/github.com/diskfs/go-diskfs/util/bitmap.go b/vendor/github.com/diskfs/go-diskfs/util/bitmap.go
new file mode 100644
index 00000000000..6fb85a1ba77
--- /dev/null
+++ b/vendor/github.com/diskfs/go-diskfs/util/bitmap.go
@@ -0,0 +1,171 @@
+package util
+
+import "fmt"
+
+// Bitmap is a structure holding a bitmap
+type Bitmap struct {
+ bits []byte
+}
+
+// Contiguous a position and count of contiguous bits, either free or set
+type Contiguous struct {
+ Position int
+ Count int
+}
+
+// BitmapFromBytes create a bitmap struct from bytes
+func BitmapFromBytes(b []byte) *Bitmap {
+ // just copy them over
+ bits := make([]byte, len(b))
+ copy(bits, b)
+ bm := Bitmap{
+ bits: bits,
+ }
+
+ return &bm
+}
+
+// NewBitmap creates a new bitmap of size bytes; it is not in bits to force the caller to have
+// a complete set
+func NewBitmap(bytes int) *Bitmap {
+ bm := Bitmap{
+ bits: make([]byte, bytes),
+ }
+ return &bm
+}
+
+// ToBytes returns raw bytes underlying the bitmap
+func (bm *Bitmap) ToBytes() []byte {
+ b := make([]byte, len(bm.bits))
+ copy(b, bm.bits)
+
+ return b
+}
+
+// FromBytes overwrite the existing map with the contents of the bytes.
+// It is the equivalent of BitmapFromBytes, but uses an existing Bitmap.
+func (bm *Bitmap) FromBytes(b []byte) {
+ bm.bits = make([]byte, len(b))
+ copy(bm.bits, b)
+}
+
+// IsSet check if a specific bit location is set
+func (bm *Bitmap) IsSet(location int) (bool, error) {
+ byteNumber, bitNumber := findBitForIndex(location)
+ if byteNumber > len(bm.bits) {
+ return false, fmt.Errorf("location %d is not in %d size bitmap", location, len(bm.bits)*8)
+ }
+ mask := byte(0x1) << bitNumber
+ return bm.bits[byteNumber]&mask == mask, nil
+}
+
+// Clear a specific bit location
+func (bm *Bitmap) Clear(location int) error {
+ byteNumber, bitNumber := findBitForIndex(location)
+ if byteNumber > len(bm.bits) {
+ return fmt.Errorf("location %d is not in %d size bitmap", location, len(bm.bits)*8)
+ }
+ mask := byte(0x1) << bitNumber
+ mask = ^mask
+ bm.bits[byteNumber] &= mask
+ return nil
+}
+
+// Set a specific bit location
+func (bm *Bitmap) Set(location int) error {
+ byteNumber, bitNumber := findBitForIndex(location)
+ if byteNumber > len(bm.bits) {
+ return fmt.Errorf("location %d is not in %d size bitmap", location, len(bm.bits)*8)
+ }
+ mask := byte(0x1) << bitNumber
+ bm.bits[byteNumber] |= mask
+ return nil
+}
+
+// FirstFree returns the first free bit in the bitmap
+// Begins at start, so if you want to find the first free bit, pass start=1.
+// Returns -1 if none found.
+func (bm *Bitmap) FirstFree(start int) int {
+ var location = -1
+ candidates := bm.bits[start/8:]
+ for i, b := range candidates {
+ // if all used, continue to next byte
+ if b&0xff == 0xff {
+ continue
+ }
+ // not all used, so find first bit set to 0
+ for j := uint8(0); j < 8; j++ {
+ mask := byte(0x1) << j
+ if b&mask != mask {
+ location = 8*i + int(j)
+ break
+ }
+ }
+ break
+ }
+ return location
+}
+
+// FirstSet returns location of first set bit in the bitmap
+func (bm *Bitmap) FirstSet() int {
+ var location = -1
+ for i, b := range bm.bits {
+ // if all free, continue to next
+ if b == 0x00 {
+ continue
+ }
+ // not all free, so find first bit set to 1
+ for j := uint8(0); j < 8; j++ {
+ mask := byte(0x1) << j
+ mask = ^mask
+ if b|mask != mask {
+ location = 8*i + (8 - int(j))
+ break
+ }
+ }
+ break
+ }
+ return location
+}
+
+// FreeList returns a slicelist of contiguous free locations by location.
+// It is sorted by location. If you want to sort it by size, uses sort.Slice
+// for example, if the bitmap is 10010010 00100000 10000010, it will return
+//
+// 1: 2, // 2 free bits at position 1
+// 4: 2, // 2 free bits at position 4
+// 8: 3, // 3 free bits at position 8
+// 11: 5 // 5 free bits at position 11
+// 17: 5 // 5 free bits at position 17
+// 23: 1, // 1 free bit at position 23
+//
+// if you want it in reverse order, just reverse the slice.
+func (bm *Bitmap) FreeList() []Contiguous {
+ var list []Contiguous
+ var location = -1
+ var count = 0
+ for i, b := range bm.bits {
+ for j := uint8(0); j < 8; j++ {
+ mask := byte(0x1) << j
+ switch {
+ case b&mask != mask:
+ if location == -1 {
+ location = 8*i + int(j)
+ }
+ count++
+ case location != -1:
+ list = append(list, Contiguous{location, count})
+ location = -1
+ count = 0
+ }
+ }
+ }
+ if location != -1 {
+ list = append(list, Contiguous{location, count})
+ }
+ return list
+}
+
+func findBitForIndex(index int) (byteNumber int, bitNumber uint8) {
+ return index / 8, uint8(index % 8)
+}
diff --git a/vendor/github.com/diskfs/go-diskfs/util/uniqify.go b/vendor/github.com/diskfs/go-diskfs/util/uniqify.go
new file mode 100644
index 00000000000..c091a6ecb8e
--- /dev/null
+++ b/vendor/github.com/diskfs/go-diskfs/util/uniqify.go
@@ -0,0 +1,13 @@
+package util
+
+func Uniqify[T comparable](s []T) []T {
+ m := make(map[T]bool)
+ for _, v := range s {
+ m[v] = true
+ }
+ var result = make([]T, 0, len(m))
+ for k := range m {
+ result = append(result, k)
+ }
+ return result
+}
diff --git a/vendor/gopkg.in/djherbis/times.v1/LICENSE b/vendor/github.com/djherbis/times/LICENSE
similarity index 100%
rename from vendor/gopkg.in/djherbis/times.v1/LICENSE
rename to vendor/github.com/djherbis/times/LICENSE
diff --git a/vendor/gopkg.in/djherbis/times.v1/README.md b/vendor/github.com/djherbis/times/README.md
similarity index 82%
rename from vendor/gopkg.in/djherbis/times.v1/README.md
rename to vendor/github.com/djherbis/times/README.md
index b68a132bf33..7c88890489c 100644
--- a/vendor/gopkg.in/djherbis/times.v1/README.md
+++ b/vendor/github.com/djherbis/times/README.md
@@ -4,7 +4,7 @@ times
[](https://godoc.org/github.com/djherbis/times)
[](https://github.com/djherbis/times/releases/latest)
[](LICENSE.txt)
-[](https://travis-ci.org/djherbis/times)
+[](https://github.com/djherbis/times/actions/workflows/go-test.yml)
[](https://coveralls.io/r/djherbis/times?branch=master)
[](https://goreportcard.com/report/github.com/djherbis/times)
[](https://sourcegraph.com/github.com/djherbis/times?badge)
@@ -21,7 +21,7 @@ package main
import (
"log"
- "gopkg.in/djherbis/times.v1"
+ "github.com/djherbis/times"
)
func main() {
@@ -50,8 +50,11 @@ Supported Times
| atime | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ |
| mtime | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ |
| ctime | ✓* | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | ✓ | ✓ |
-| btime | ✓ | | | | | ✓ | ✓| ✓ | | |
+| btime | ✓ | ✓* | | | | ✓ | ✓| ✓ | | |
+* Linux btime requires kernel 4.11 and filesystem support, so HasBirthTime = false.
+Use Timespec.HasBirthTime() to check if file has birth time.
+Get(FileInfo) never returns btime.
* Windows XP does not have ChangeTime so HasChangeTime = false,
however Vista onward does have ChangeTime so Timespec.HasChangeTime() will
only return false on those platforms when the syscall used to obtain them fails.
@@ -60,5 +63,5 @@ only return false on those platforms when the syscall used to obtain them fails.
Installation
------------
```sh
-go get gopkg.in/djherbis/times.v1
+go get -u github.com/djherbis/times
```
diff --git a/vendor/gopkg.in/djherbis/times.v1/ctime_windows.go b/vendor/github.com/djherbis/times/ctime_windows.go
similarity index 100%
rename from vendor/gopkg.in/djherbis/times.v1/ctime_windows.go
rename to vendor/github.com/djherbis/times/ctime_windows.go
diff --git a/vendor/gopkg.in/djherbis/times.v1/js.cover.dockerfile b/vendor/github.com/djherbis/times/js.cover.dockerfile
similarity index 75%
rename from vendor/gopkg.in/djherbis/times.v1/js.cover.dockerfile
rename to vendor/github.com/djherbis/times/js.cover.dockerfile
index 1f52edccd4a..f522a5e4f82 100644
--- a/vendor/gopkg.in/djherbis/times.v1/js.cover.dockerfile
+++ b/vendor/github.com/djherbis/times/js.cover.dockerfile
@@ -1,6 +1,6 @@
-FROM golang:1.16
+FROM golang:1.17
-RUN curl -sL https://deb.nodesource.com/setup_8.x | bash
+RUN curl -sL https://deb.nodesource.com/setup_17.x | bash
RUN apt-get install --yes nodejs
WORKDIR /go/src/github.com/djherbis/times
diff --git a/vendor/gopkg.in/djherbis/times.v1/js.cover.sh b/vendor/github.com/djherbis/times/js.cover.sh
similarity index 100%
rename from vendor/gopkg.in/djherbis/times.v1/js.cover.sh
rename to vendor/github.com/djherbis/times/js.cover.sh
diff --git a/vendor/github.com/djherbis/times/linux.cover.dockerfile b/vendor/github.com/djherbis/times/linux.cover.dockerfile
new file mode 100644
index 00000000000..e1a795927c9
--- /dev/null
+++ b/vendor/github.com/djherbis/times/linux.cover.dockerfile
@@ -0,0 +1,6 @@
+FROM golang:1.17
+
+WORKDIR /go/src/github.com/djherbis/times
+COPY . .
+
+RUN GO111MODULE=auto go test -covermode=count -coverprofile=profile.cov
diff --git a/vendor/github.com/djherbis/times/linux.cover.sh b/vendor/github.com/djherbis/times/linux.cover.sh
new file mode 100644
index 00000000000..83f97431b06
--- /dev/null
+++ b/vendor/github.com/djherbis/times/linux.cover.sh
@@ -0,0 +1,7 @@
+#!/bin/bash
+set -e
+
+docker build -f linux.cover.dockerfile -t linux.cover.djherbis.times .
+docker create --name linux.cover.djherbis.times linux.cover.djherbis.times
+docker cp linux.cover.djherbis.times:/go/src/github.com/djherbis/times/profile.cov .
+docker rm -v linux.cover.djherbis.times
\ No newline at end of file
diff --git a/vendor/gopkg.in/djherbis/times.v1/times.go b/vendor/github.com/djherbis/times/times.go
similarity index 100%
rename from vendor/gopkg.in/djherbis/times.v1/times.go
rename to vendor/github.com/djherbis/times/times.go
diff --git a/vendor/gopkg.in/djherbis/times.v1/times_aix.go b/vendor/github.com/djherbis/times/times_aix.go
similarity index 100%
rename from vendor/gopkg.in/djherbis/times.v1/times_aix.go
rename to vendor/github.com/djherbis/times/times_aix.go
diff --git a/vendor/gopkg.in/djherbis/times.v1/times_darwin.go b/vendor/github.com/djherbis/times/times_darwin.go
similarity index 100%
rename from vendor/gopkg.in/djherbis/times.v1/times_darwin.go
rename to vendor/github.com/djherbis/times/times_darwin.go
diff --git a/vendor/gopkg.in/djherbis/times.v1/times_dragonfly.go b/vendor/github.com/djherbis/times/times_dragonfly.go
similarity index 100%
rename from vendor/gopkg.in/djherbis/times.v1/times_dragonfly.go
rename to vendor/github.com/djherbis/times/times_dragonfly.go
diff --git a/vendor/gopkg.in/djherbis/times.v1/times_freebsd.go b/vendor/github.com/djherbis/times/times_freebsd.go
similarity index 100%
rename from vendor/gopkg.in/djherbis/times.v1/times_freebsd.go
rename to vendor/github.com/djherbis/times/times_freebsd.go
diff --git a/vendor/gopkg.in/djherbis/times.v1/times_js.go b/vendor/github.com/djherbis/times/times_js.go
similarity index 100%
rename from vendor/gopkg.in/djherbis/times.v1/times_js.go
rename to vendor/github.com/djherbis/times/times_js.go
diff --git a/vendor/github.com/djherbis/times/times_linux.go b/vendor/github.com/djherbis/times/times_linux.go
new file mode 100644
index 00000000000..85f87dba3c8
--- /dev/null
+++ b/vendor/github.com/djherbis/times/times_linux.go
@@ -0,0 +1,185 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// http://golang.org/src/os/stat_linux.go
+
+package times
+
+import (
+ "errors"
+ "os"
+ "sync/atomic"
+ "syscall"
+ "time"
+
+ "golang.org/x/sys/unix"
+)
+
+// HasChangeTime and HasBirthTime are true if and only if
+// the target OS supports them.
+const (
+ HasChangeTime = true
+ HasBirthTime = false
+)
+
+type timespec struct {
+ atime
+ mtime
+ ctime
+ nobtime
+}
+
+type timespecBtime struct {
+ atime
+ mtime
+ ctime
+ btime
+}
+
+var (
+ supportsStatx int32 = 1
+ statxFunc = unix.Statx
+)
+
+func isStatXSupported() bool {
+ return atomic.LoadInt32(&supportsStatx) == 1
+}
+
+func isStatXUnsupported(err error) bool {
+ // linux 4.10 and earlier does not support Statx syscall
+ if err != nil && errors.Is(err, unix.ENOSYS) {
+ atomic.StoreInt32(&supportsStatx, 0)
+ return true
+ }
+ return false
+}
+
+// Stat returns the Timespec for the given filename.
+func Stat(name string) (Timespec, error) {
+ if isStatXSupported() {
+ ts, err := statX(name)
+ if err == nil {
+ return ts, nil
+ }
+ if !isStatXUnsupported(err) {
+ return nil, err
+ }
+ // Fallback.
+ }
+ return stat(name, os.Stat)
+}
+
+func statX(name string) (Timespec, error) {
+ // https://man7.org/linux/man-pages/man2/statx.2.html
+ var statx unix.Statx_t
+ err := statxFunc(unix.AT_FDCWD, name, unix.AT_STATX_SYNC_AS_STAT, unix.STATX_ATIME|unix.STATX_MTIME|unix.STATX_CTIME|unix.STATX_BTIME, &statx)
+ if err != nil {
+ return nil, err
+ }
+ return extractTimes(&statx), nil
+}
+
+// Lstat returns the Timespec for the given filename, and does not follow Symlinks.
+func Lstat(name string) (Timespec, error) {
+ if isStatXSupported() {
+ ts, err := lstatx(name)
+ if err == nil {
+ return ts, nil
+ }
+ if !isStatXUnsupported(err) {
+ return nil, err
+ }
+ // Fallback.
+ }
+ return stat(name, os.Lstat)
+}
+
+func lstatx(name string) (Timespec, error) {
+ // https://man7.org/linux/man-pages/man2/statx.2.html
+ var statX unix.Statx_t
+ err := statxFunc(unix.AT_FDCWD, name, unix.AT_STATX_SYNC_AS_STAT|unix.AT_SYMLINK_NOFOLLOW, unix.STATX_ATIME|unix.STATX_MTIME|unix.STATX_CTIME|unix.STATX_BTIME, &statX)
+ if err != nil {
+ return nil, err
+ }
+ return extractTimes(&statX), nil
+}
+
+func statXFile(file *os.File) (Timespec, error) {
+ sc, err := file.SyscallConn()
+ if err != nil {
+ return nil, err
+ }
+
+ var statx unix.Statx_t
+ var statxErr error
+ err = sc.Control(func(fd uintptr) {
+ // https://man7.org/linux/man-pages/man2/statx.2.html
+ statxErr = statxFunc(int(fd), "", unix.AT_EMPTY_PATH|unix.AT_STATX_SYNC_AS_STAT, unix.STATX_ATIME|unix.STATX_MTIME|unix.STATX_CTIME|unix.STATX_BTIME, &statx)
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ if statxErr != nil {
+ return nil, statxErr
+ }
+
+ return extractTimes(&statx), nil
+}
+
+// StatFile returns the Timespec for the given *os.File.
+func StatFile(file *os.File) (Timespec, error) {
+ if isStatXSupported() {
+ ts, err := statXFile(file)
+ if err == nil {
+ return ts, nil
+ }
+ if !isStatXUnsupported(err) {
+ return nil, err
+ }
+ // Fallback.
+ }
+ return statFile(file)
+}
+
+func statFile(file *os.File) (Timespec, error) {
+ fi, err := file.Stat()
+ if err != nil {
+ return nil, err
+ }
+ return getTimespec(fi), nil
+}
+
+func statxTimestampToTime(ts unix.StatxTimestamp) time.Time {
+ return time.Unix(ts.Sec, int64(ts.Nsec))
+}
+
+func extractTimes(statx *unix.Statx_t) Timespec {
+ if statx.Mask&unix.STATX_BTIME == unix.STATX_BTIME {
+ var t timespecBtime
+ t.atime.v = statxTimestampToTime(statx.Atime)
+ t.mtime.v = statxTimestampToTime(statx.Mtime)
+ t.ctime.v = statxTimestampToTime(statx.Ctime)
+ t.btime.v = statxTimestampToTime(statx.Btime)
+ return t
+ }
+
+ var t timespec
+ t.atime.v = statxTimestampToTime(statx.Atime)
+ t.mtime.v = statxTimestampToTime(statx.Mtime)
+ t.ctime.v = statxTimestampToTime(statx.Ctime)
+ return t
+}
+
+func timespecToTime(ts syscall.Timespec) time.Time {
+ return time.Unix(int64(ts.Sec), int64(ts.Nsec))
+}
+
+func getTimespec(fi os.FileInfo) (t timespec) {
+ stat := fi.Sys().(*syscall.Stat_t)
+ t.atime.v = timespecToTime(stat.Atim)
+ t.mtime.v = timespecToTime(stat.Mtim)
+ t.ctime.v = timespecToTime(stat.Ctim)
+ return t
+}
diff --git a/vendor/gopkg.in/djherbis/times.v1/times_nacl.go b/vendor/github.com/djherbis/times/times_nacl.go
similarity index 100%
rename from vendor/gopkg.in/djherbis/times.v1/times_nacl.go
rename to vendor/github.com/djherbis/times/times_nacl.go
diff --git a/vendor/gopkg.in/djherbis/times.v1/times_netbsd.go b/vendor/github.com/djherbis/times/times_netbsd.go
similarity index 100%
rename from vendor/gopkg.in/djherbis/times.v1/times_netbsd.go
rename to vendor/github.com/djherbis/times/times_netbsd.go
diff --git a/vendor/gopkg.in/djherbis/times.v1/times_openbsd.go b/vendor/github.com/djherbis/times/times_openbsd.go
similarity index 100%
rename from vendor/gopkg.in/djherbis/times.v1/times_openbsd.go
rename to vendor/github.com/djherbis/times/times_openbsd.go
diff --git a/vendor/gopkg.in/djherbis/times.v1/times_plan9.go b/vendor/github.com/djherbis/times/times_plan9.go
similarity index 100%
rename from vendor/gopkg.in/djherbis/times.v1/times_plan9.go
rename to vendor/github.com/djherbis/times/times_plan9.go
diff --git a/vendor/gopkg.in/djherbis/times.v1/times_solaris.go b/vendor/github.com/djherbis/times/times_solaris.go
similarity index 100%
rename from vendor/gopkg.in/djherbis/times.v1/times_solaris.go
rename to vendor/github.com/djherbis/times/times_solaris.go
diff --git a/vendor/gopkg.in/djherbis/times.v1/times_linux.go b/vendor/github.com/djherbis/times/times_wasip1.go
similarity index 60%
rename from vendor/gopkg.in/djherbis/times.v1/times_linux.go
rename to vendor/github.com/djherbis/times/times_wasip1.go
index d9eb6976e2c..5463a8f3c65 100644
--- a/vendor/gopkg.in/djherbis/times.v1/times_linux.go
+++ b/vendor/github.com/djherbis/times/times_wasip1.go
@@ -2,7 +2,10 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// http://golang.org/src/os/stat_linux.go
+// https://github.com/golang/go/blob/master/src/os/stat_wasip1.go
+
+//go:build wasip1
+// +build wasip1
package times
@@ -26,14 +29,14 @@ type timespec struct {
nobtime
}
-func timespecToTime(ts syscall.Timespec) time.Time {
- return time.Unix(int64(ts.Sec), int64(ts.Nsec))
+func timespecToTime(sec, nsec int64) time.Time {
+ return time.Unix(sec, nsec)
}
func getTimespec(fi os.FileInfo) (t timespec) {
stat := fi.Sys().(*syscall.Stat_t)
- t.atime.v = timespecToTime(stat.Atim)
- t.mtime.v = timespecToTime(stat.Mtim)
- t.ctime.v = timespecToTime(stat.Ctim)
+ t.atime.v = timespecToTime(int64(stat.Atime), 0)
+ t.mtime.v = timespecToTime(int64(stat.Mtime), 0)
+ t.ctime.v = timespecToTime(int64(stat.Ctime), 0)
return t
}
diff --git a/vendor/gopkg.in/djherbis/times.v1/times_windows.go b/vendor/github.com/djherbis/times/times_windows.go
similarity index 100%
rename from vendor/gopkg.in/djherbis/times.v1/times_windows.go
rename to vendor/github.com/djherbis/times/times_windows.go
diff --git a/vendor/gopkg.in/djherbis/times.v1/use_generic_stat.go b/vendor/github.com/djherbis/times/use_generic_stat.go
similarity index 58%
rename from vendor/gopkg.in/djherbis/times.v1/use_generic_stat.go
rename to vendor/github.com/djherbis/times/use_generic_stat.go
index bc51560a8c4..0040aa9dc51 100644
--- a/vendor/gopkg.in/djherbis/times.v1/use_generic_stat.go
+++ b/vendor/github.com/djherbis/times/use_generic_stat.go
@@ -1,4 +1,4 @@
-// +build !windows
+// +build !windows,!linux
package times
@@ -13,3 +13,12 @@ func Stat(name string) (Timespec, error) {
func Lstat(name string) (Timespec, error) {
return stat(name, os.Lstat)
}
+
+// StatFile returns the Timespec for the given *os.File.
+func StatFile(file *os.File) (Timespec, error) {
+ fi, err := file.Stat()
+ if err != nil {
+ return nil, err
+ }
+ return getTimespec(fi), nil
+}
diff --git a/vendor/github.com/elliotwutingfeng/asciiset/README.md b/vendor/github.com/elliotwutingfeng/asciiset/README.md
index b3be0a69002..e9cac1b1414 100644
--- a/vendor/github.com/elliotwutingfeng/asciiset/README.md
+++ b/vendor/github.com/elliotwutingfeng/asciiset/README.md
@@ -2,7 +2,7 @@
[](https://pkg.go.dev/github.com/elliotwutingfeng/asciiset)
[](https://goreportcard.com/report/github.com/elliotwutingfeng/asciiset)
-[](https://codecov.io/gh/elliotwutingfeng/asciiset)
+[](https://coveralls.io/github/elliotwutingfeng/asciiset?branch=main)
[](LICENSE)
diff --git a/vendor/github.com/elliotwutingfeng/asciiset/asciiset.go b/vendor/github.com/elliotwutingfeng/asciiset/asciiset.go
index ac50bbd2eaa..ee8eb44490e 100644
--- a/vendor/github.com/elliotwutingfeng/asciiset/asciiset.go
+++ b/vendor/github.com/elliotwutingfeng/asciiset/asciiset.go
@@ -95,11 +95,15 @@ func (as *ASCIISet) Equals(as2 ASCIISet) bool {
return as[0] == as2[0] && as[1] == as2[1] && as[2] == as2[2] && as[3] == as2[3]
}
-// Visit calls the do function for each character of as in ascending numerical order.
+// Visit calls the do function for each character of the as set in ascending numerical order.
+//
// If do returns true, Visit returns immediately, skipping any remaining
-// characters, and returns true. It is safe for do to Add or Remove
-// characters. The behavior of Visit is undefined if do changes
-// the set in any other way.
+// characters, and returns true.
+//
+// It is safe for do to add or remove characters from the set
+// via the (*ASCIISet).Add or (*ASCIISet).Remove methods.
+//
+// The behavior of Visit is undefined if do changes the set in any other way.
func (as *ASCIISet) Visit(do func(n byte) (skip bool)) (aborted bool) {
var currentChar byte
for i := uint(0); i < 4; i++ {
diff --git a/vendor/github.com/elliotwutingfeng/asciiset/codecov.yml b/vendor/github.com/elliotwutingfeng/asciiset/codecov.yml
deleted file mode 100644
index d249e435836..00000000000
--- a/vendor/github.com/elliotwutingfeng/asciiset/codecov.yml
+++ /dev/null
@@ -1,25 +0,0 @@
-codecov:
- require_ci_to_pass: yes
-
-coverage:
- precision: 2
- round: down
- range: "90...100"
- status:
- project:
- default:
- target: 90%
- threshold: 5%
- patch: off
-parsers:
- gcov:
- branch_detection:
- conditional: yes
- loop: yes
- method: no
- macro: no
-
-comment:
- layout: "reach,diff,flags,files,footer"
- behavior: default
- require_changes: no
diff --git a/vendor/github.com/elliotwutingfeng/asciiset/renovate.json b/vendor/github.com/elliotwutingfeng/asciiset/renovate.json
new file mode 100644
index 00000000000..e56999f74a4
--- /dev/null
+++ b/vendor/github.com/elliotwutingfeng/asciiset/renovate.json
@@ -0,0 +1,19 @@
+{
+ "extends": ["config:base"],
+ "ignoreTests": false,
+ "packageRules": [
+ {
+ "matchUpdateTypes": ["minor", "patch", "pin", "digest"],
+ "automerge": true
+ },
+ {
+ "description": "Opt-out of minimum Go version updates",
+ "matchManagers": ["gomod"],
+ "matchDepTypes": ["golang"],
+ "enabled": false
+ }
+ ],
+ "gomod": {
+ "postUpdateOptions": ["gomodUpdateImportPaths", "gomodTidy"]
+ }
+}
diff --git a/vendor/github.com/emirpasic/gods/LICENSE b/vendor/github.com/emirpasic/gods/LICENSE
deleted file mode 100644
index e5e449b6eca..00000000000
--- a/vendor/github.com/emirpasic/gods/LICENSE
+++ /dev/null
@@ -1,41 +0,0 @@
-Copyright (c) 2015, Emir Pasic
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met:
-
-* Redistributions of source code must retain the above copyright notice, this
- list of conditions and the following disclaimer.
-
-* Redistributions in binary form must reproduce the above copyright notice,
- this list of conditions and the following disclaimer in the documentation
- and/or other materials provided with the distribution.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
-FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
-CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
-OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
--------------------------------------------------------------------------------
-
-AVL Tree:
-
-Copyright (c) 2017 Benjamin Scher Purcell
-
-Permission to use, copy, modify, and distribute this software for any
-purpose with or without fee is hereby granted, provided that the above
-copyright notice and this permission notice appear in all copies.
-
-THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
-WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
-ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
-ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
-OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
diff --git a/vendor/github.com/emirpasic/gods/containers/containers.go b/vendor/github.com/emirpasic/gods/containers/containers.go
deleted file mode 100644
index a512a3cbaa7..00000000000
--- a/vendor/github.com/emirpasic/gods/containers/containers.go
+++ /dev/null
@@ -1,36 +0,0 @@
-// Copyright (c) 2015, Emir Pasic. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package containers provides core interfaces and functions for data structures.
-//
-// Container is the base interface for all data structures to implement.
-//
-// Iterators provide stateful iterators.
-//
-// Enumerable provides Ruby inspired (each, select, map, find, any?, etc.) container functions.
-//
-// Serialization provides serializers (marshalers) and deserializers (unmarshalers).
-package containers
-
-import "github.com/emirpasic/gods/utils"
-
-// Container is base interface that all data structures implement.
-type Container interface {
- Empty() bool
- Size() int
- Clear()
- Values() []interface{}
- String() string
-}
-
-// GetSortedValues returns sorted container's elements with respect to the passed comparator.
-// Does not affect the ordering of elements within the container.
-func GetSortedValues(container Container, comparator utils.Comparator) []interface{} {
- values := container.Values()
- if len(values) < 2 {
- return values
- }
- utils.Sort(values, comparator)
- return values
-}
diff --git a/vendor/github.com/emirpasic/gods/containers/enumerable.go b/vendor/github.com/emirpasic/gods/containers/enumerable.go
deleted file mode 100644
index 70660054ae5..00000000000
--- a/vendor/github.com/emirpasic/gods/containers/enumerable.go
+++ /dev/null
@@ -1,57 +0,0 @@
-// Copyright (c) 2015, Emir Pasic. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package containers
-
-// EnumerableWithIndex provides functions for ordered containers whose values can be fetched by an index.
-type EnumerableWithIndex interface {
- // Each calls the given function once for each element, passing that element's index and value.
- Each(func(index int, value interface{}))
-
- // Map invokes the given function once for each element and returns a
- // container containing the values returned by the given function.
- // Map(func(index int, value interface{}) interface{}) Container
-
- // Select returns a new container containing all elements for which the given function returns a true value.
- // Select(func(index int, value interface{}) bool) Container
-
- // Any passes each element of the container to the given function and
- // returns true if the function ever returns true for any element.
- Any(func(index int, value interface{}) bool) bool
-
- // All passes each element of the container to the given function and
- // returns true if the function returns true for all elements.
- All(func(index int, value interface{}) bool) bool
-
- // Find passes each element of the container to the given function and returns
- // the first (index,value) for which the function is true or -1,nil otherwise
- // if no element matches the criteria.
- Find(func(index int, value interface{}) bool) (int, interface{})
-}
-
-// EnumerableWithKey provides functions for ordered containers whose values whose elements are key/value pairs.
-type EnumerableWithKey interface {
- // Each calls the given function once for each element, passing that element's key and value.
- Each(func(key interface{}, value interface{}))
-
- // Map invokes the given function once for each element and returns a container
- // containing the values returned by the given function as key/value pairs.
- // Map(func(key interface{}, value interface{}) (interface{}, interface{})) Container
-
- // Select returns a new container containing all elements for which the given function returns a true value.
- // Select(func(key interface{}, value interface{}) bool) Container
-
- // Any passes each element of the container to the given function and
- // returns true if the function ever returns true for any element.
- Any(func(key interface{}, value interface{}) bool) bool
-
- // All passes each element of the container to the given function and
- // returns true if the function returns true for all elements.
- All(func(key interface{}, value interface{}) bool) bool
-
- // Find passes each element of the container to the given function and returns
- // the first (key,value) for which the function is true or nil,nil otherwise if no element
- // matches the criteria.
- Find(func(key interface{}, value interface{}) bool) (interface{}, interface{})
-}
diff --git a/vendor/github.com/emirpasic/gods/containers/iterator.go b/vendor/github.com/emirpasic/gods/containers/iterator.go
deleted file mode 100644
index 73994ec82a7..00000000000
--- a/vendor/github.com/emirpasic/gods/containers/iterator.go
+++ /dev/null
@@ -1,133 +0,0 @@
-// Copyright (c) 2015, Emir Pasic. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package containers
-
-// IteratorWithIndex is stateful iterator for ordered containers whose values can be fetched by an index.
-type IteratorWithIndex interface {
- // Next moves the iterator to the next element and returns true if there was a next element in the container.
- // If Next() returns true, then next element's index and value can be retrieved by Index() and Value().
- // If Next() was called for the first time, then it will point the iterator to the first element if it exists.
- // Modifies the state of the iterator.
- Next() bool
-
- // Value returns the current element's value.
- // Does not modify the state of the iterator.
- Value() interface{}
-
- // Index returns the current element's index.
- // Does not modify the state of the iterator.
- Index() int
-
- // Begin resets the iterator to its initial state (one-before-first)
- // Call Next() to fetch the first element if any.
- Begin()
-
- // First moves the iterator to the first element and returns true if there was a first element in the container.
- // If First() returns true, then first element's index and value can be retrieved by Index() and Value().
- // Modifies the state of the iterator.
- First() bool
-
- // NextTo moves the iterator to the next element from current position that satisfies the condition given by the
- // passed function, and returns true if there was a next element in the container.
- // If NextTo() returns true, then next element's index and value can be retrieved by Index() and Value().
- // Modifies the state of the iterator.
- NextTo(func(index int, value interface{}) bool) bool
-}
-
-// IteratorWithKey is a stateful iterator for ordered containers whose elements are key value pairs.
-type IteratorWithKey interface {
- // Next moves the iterator to the next element and returns true if there was a next element in the container.
- // If Next() returns true, then next element's key and value can be retrieved by Key() and Value().
- // If Next() was called for the first time, then it will point the iterator to the first element if it exists.
- // Modifies the state of the iterator.
- Next() bool
-
- // Value returns the current element's value.
- // Does not modify the state of the iterator.
- Value() interface{}
-
- // Key returns the current element's key.
- // Does not modify the state of the iterator.
- Key() interface{}
-
- // Begin resets the iterator to its initial state (one-before-first)
- // Call Next() to fetch the first element if any.
- Begin()
-
- // First moves the iterator to the first element and returns true if there was a first element in the container.
- // If First() returns true, then first element's key and value can be retrieved by Key() and Value().
- // Modifies the state of the iterator.
- First() bool
-
- // NextTo moves the iterator to the next element from current position that satisfies the condition given by the
- // passed function, and returns true if there was a next element in the container.
- // If NextTo() returns true, then next element's key and value can be retrieved by Key() and Value().
- // Modifies the state of the iterator.
- NextTo(func(key interface{}, value interface{}) bool) bool
-}
-
-// ReverseIteratorWithIndex is stateful iterator for ordered containers whose values can be fetched by an index.
-//
-// Essentially it is the same as IteratorWithIndex, but provides additional:
-//
-// Prev() function to enable traversal in reverse
-//
-// Last() function to move the iterator to the last element.
-//
-// End() function to move the iterator past the last element (one-past-the-end).
-type ReverseIteratorWithIndex interface {
- // Prev moves the iterator to the previous element and returns true if there was a previous element in the container.
- // If Prev() returns true, then previous element's index and value can be retrieved by Index() and Value().
- // Modifies the state of the iterator.
- Prev() bool
-
- // End moves the iterator past the last element (one-past-the-end).
- // Call Prev() to fetch the last element if any.
- End()
-
- // Last moves the iterator to the last element and returns true if there was a last element in the container.
- // If Last() returns true, then last element's index and value can be retrieved by Index() and Value().
- // Modifies the state of the iterator.
- Last() bool
-
- // PrevTo moves the iterator to the previous element from current position that satisfies the condition given by the
- // passed function, and returns true if there was a next element in the container.
- // If PrevTo() returns true, then next element's index and value can be retrieved by Index() and Value().
- // Modifies the state of the iterator.
- PrevTo(func(index int, value interface{}) bool) bool
-
- IteratorWithIndex
-}
-
-// ReverseIteratorWithKey is a stateful iterator for ordered containers whose elements are key value pairs.
-//
-// Essentially it is the same as IteratorWithKey, but provides additional:
-//
-// Prev() function to enable traversal in reverse
-//
-// Last() function to move the iterator to the last element.
-type ReverseIteratorWithKey interface {
- // Prev moves the iterator to the previous element and returns true if there was a previous element in the container.
- // If Prev() returns true, then previous element's key and value can be retrieved by Key() and Value().
- // Modifies the state of the iterator.
- Prev() bool
-
- // End moves the iterator past the last element (one-past-the-end).
- // Call Prev() to fetch the last element if any.
- End()
-
- // Last moves the iterator to the last element and returns true if there was a last element in the container.
- // If Last() returns true, then last element's key and value can be retrieved by Key() and Value().
- // Modifies the state of the iterator.
- Last() bool
-
- // PrevTo moves the iterator to the previous element from current position that satisfies the condition given by the
- // passed function, and returns true if there was a next element in the container.
- // If PrevTo() returns true, then next element's key and value can be retrieved by Key() and Value().
- // Modifies the state of the iterator.
- PrevTo(func(key interface{}, value interface{}) bool) bool
-
- IteratorWithKey
-}
diff --git a/vendor/github.com/emirpasic/gods/containers/serialization.go b/vendor/github.com/emirpasic/gods/containers/serialization.go
deleted file mode 100644
index fd9cbe23a3f..00000000000
--- a/vendor/github.com/emirpasic/gods/containers/serialization.go
+++ /dev/null
@@ -1,21 +0,0 @@
-// Copyright (c) 2015, Emir Pasic. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package containers
-
-// JSONSerializer provides JSON serialization
-type JSONSerializer interface {
- // ToJSON outputs the JSON representation of containers's elements.
- ToJSON() ([]byte, error)
- // MarshalJSON @implements json.Marshaler
- MarshalJSON() ([]byte, error)
-}
-
-// JSONDeserializer provides JSON deserialization
-type JSONDeserializer interface {
- // FromJSON populates containers's elements from the input JSON representation.
- FromJSON([]byte) error
- // UnmarshalJSON @implements json.Unmarshaler
- UnmarshalJSON([]byte) error
-}
diff --git a/vendor/github.com/emirpasic/gods/lists/arraylist/arraylist.go b/vendor/github.com/emirpasic/gods/lists/arraylist/arraylist.go
deleted file mode 100644
index 60ce4583203..00000000000
--- a/vendor/github.com/emirpasic/gods/lists/arraylist/arraylist.go
+++ /dev/null
@@ -1,227 +0,0 @@
-// Copyright (c) 2015, Emir Pasic. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package arraylist implements the array list.
-//
-// Structure is not thread safe.
-//
-// Reference: https://en.wikipedia.org/wiki/List_%28abstract_data_type%29
-package arraylist
-
-import (
- "fmt"
- "strings"
-
- "github.com/emirpasic/gods/lists"
- "github.com/emirpasic/gods/utils"
-)
-
-// Assert List implementation
-var _ lists.List = (*List)(nil)
-
-// List holds the elements in a slice
-type List struct {
- elements []interface{}
- size int
-}
-
-const (
- growthFactor = float32(2.0) // growth by 100%
- shrinkFactor = float32(0.25) // shrink when size is 25% of capacity (0 means never shrink)
-)
-
-// New instantiates a new list and adds the passed values, if any, to the list
-func New(values ...interface{}) *List {
- list := &List{}
- if len(values) > 0 {
- list.Add(values...)
- }
- return list
-}
-
-// Add appends a value at the end of the list
-func (list *List) Add(values ...interface{}) {
- list.growBy(len(values))
- for _, value := range values {
- list.elements[list.size] = value
- list.size++
- }
-}
-
-// Get returns the element at index.
-// Second return parameter is true if index is within bounds of the array and array is not empty, otherwise false.
-func (list *List) Get(index int) (interface{}, bool) {
-
- if !list.withinRange(index) {
- return nil, false
- }
-
- return list.elements[index], true
-}
-
-// Remove removes the element at the given index from the list.
-func (list *List) Remove(index int) {
-
- if !list.withinRange(index) {
- return
- }
-
- list.elements[index] = nil // cleanup reference
- copy(list.elements[index:], list.elements[index+1:list.size]) // shift to the left by one (slow operation, need ways to optimize this)
- list.size--
-
- list.shrink()
-}
-
-// Contains checks if elements (one or more) are present in the set.
-// All elements have to be present in the set for the method to return true.
-// Performance time complexity of n^2.
-// Returns true if no arguments are passed at all, i.e. set is always super-set of empty set.
-func (list *List) Contains(values ...interface{}) bool {
-
- for _, searchValue := range values {
- found := false
- for index := 0; index < list.size; index++ {
- if list.elements[index] == searchValue {
- found = true
- break
- }
- }
- if !found {
- return false
- }
- }
- return true
-}
-
-// Values returns all elements in the list.
-func (list *List) Values() []interface{} {
- newElements := make([]interface{}, list.size, list.size)
- copy(newElements, list.elements[:list.size])
- return newElements
-}
-
-//IndexOf returns index of provided element
-func (list *List) IndexOf(value interface{}) int {
- if list.size == 0 {
- return -1
- }
- for index, element := range list.elements {
- if element == value {
- return index
- }
- }
- return -1
-}
-
-// Empty returns true if list does not contain any elements.
-func (list *List) Empty() bool {
- return list.size == 0
-}
-
-// Size returns number of elements within the list.
-func (list *List) Size() int {
- return list.size
-}
-
-// Clear removes all elements from the list.
-func (list *List) Clear() {
- list.size = 0
- list.elements = []interface{}{}
-}
-
-// Sort sorts values (in-place) using.
-func (list *List) Sort(comparator utils.Comparator) {
- if len(list.elements) < 2 {
- return
- }
- utils.Sort(list.elements[:list.size], comparator)
-}
-
-// Swap swaps the two values at the specified positions.
-func (list *List) Swap(i, j int) {
- if list.withinRange(i) && list.withinRange(j) {
- list.elements[i], list.elements[j] = list.elements[j], list.elements[i]
- }
-}
-
-// Insert inserts values at specified index position shifting the value at that position (if any) and any subsequent elements to the right.
-// Does not do anything if position is negative or bigger than list's size
-// Note: position equal to list's size is valid, i.e. append.
-func (list *List) Insert(index int, values ...interface{}) {
-
- if !list.withinRange(index) {
- // Append
- if index == list.size {
- list.Add(values...)
- }
- return
- }
-
- l := len(values)
- list.growBy(l)
- list.size += l
- copy(list.elements[index+l:], list.elements[index:list.size-l])
- copy(list.elements[index:], values)
-}
-
-// Set the value at specified index
-// Does not do anything if position is negative or bigger than list's size
-// Note: position equal to list's size is valid, i.e. append.
-func (list *List) Set(index int, value interface{}) {
-
- if !list.withinRange(index) {
- // Append
- if index == list.size {
- list.Add(value)
- }
- return
- }
-
- list.elements[index] = value
-}
-
-// String returns a string representation of container
-func (list *List) String() string {
- str := "ArrayList\n"
- values := []string{}
- for _, value := range list.elements[:list.size] {
- values = append(values, fmt.Sprintf("%v", value))
- }
- str += strings.Join(values, ", ")
- return str
-}
-
-// Check that the index is within bounds of the list
-func (list *List) withinRange(index int) bool {
- return index >= 0 && index < list.size
-}
-
-func (list *List) resize(cap int) {
- newElements := make([]interface{}, cap, cap)
- copy(newElements, list.elements)
- list.elements = newElements
-}
-
-// Expand the array if necessary, i.e. capacity will be reached if we add n elements
-func (list *List) growBy(n int) {
- // When capacity is reached, grow by a factor of growthFactor and add number of elements
- currentCapacity := cap(list.elements)
- if list.size+n >= currentCapacity {
- newCapacity := int(growthFactor * float32(currentCapacity+n))
- list.resize(newCapacity)
- }
-}
-
-// Shrink the array if necessary, i.e. when size is shrinkFactor percent of current capacity
-func (list *List) shrink() {
- if shrinkFactor == 0.0 {
- return
- }
- // Shrink when size is at shrinkFactor * capacity
- currentCapacity := cap(list.elements)
- if list.size <= int(float32(currentCapacity)*shrinkFactor) {
- list.resize(list.size)
- }
-}
diff --git a/vendor/github.com/emirpasic/gods/lists/arraylist/enumerable.go b/vendor/github.com/emirpasic/gods/lists/arraylist/enumerable.go
deleted file mode 100644
index 8bd60b0a5cc..00000000000
--- a/vendor/github.com/emirpasic/gods/lists/arraylist/enumerable.go
+++ /dev/null
@@ -1,78 +0,0 @@
-// Copyright (c) 2015, Emir Pasic. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package arraylist
-
-import "github.com/emirpasic/gods/containers"
-
-// Assert Enumerable implementation
-var _ containers.EnumerableWithIndex = (*List)(nil)
-
-// Each calls the given function once for each element, passing that element's index and value.
-func (list *List) Each(f func(index int, value interface{})) {
- iterator := list.Iterator()
- for iterator.Next() {
- f(iterator.Index(), iterator.Value())
- }
-}
-
-// Map invokes the given function once for each element and returns a
-// container containing the values returned by the given function.
-func (list *List) Map(f func(index int, value interface{}) interface{}) *List {
- newList := &List{}
- iterator := list.Iterator()
- for iterator.Next() {
- newList.Add(f(iterator.Index(), iterator.Value()))
- }
- return newList
-}
-
-// Select returns a new container containing all elements for which the given function returns a true value.
-func (list *List) Select(f func(index int, value interface{}) bool) *List {
- newList := &List{}
- iterator := list.Iterator()
- for iterator.Next() {
- if f(iterator.Index(), iterator.Value()) {
- newList.Add(iterator.Value())
- }
- }
- return newList
-}
-
-// Any passes each element of the collection to the given function and
-// returns true if the function ever returns true for any element.
-func (list *List) Any(f func(index int, value interface{}) bool) bool {
- iterator := list.Iterator()
- for iterator.Next() {
- if f(iterator.Index(), iterator.Value()) {
- return true
- }
- }
- return false
-}
-
-// All passes each element of the collection to the given function and
-// returns true if the function returns true for all elements.
-func (list *List) All(f func(index int, value interface{}) bool) bool {
- iterator := list.Iterator()
- for iterator.Next() {
- if !f(iterator.Index(), iterator.Value()) {
- return false
- }
- }
- return true
-}
-
-// Find passes each element of the container to the given function and returns
-// the first (index,value) for which the function is true or -1,nil otherwise
-// if no element matches the criteria.
-func (list *List) Find(f func(index int, value interface{}) bool) (int, interface{}) {
- iterator := list.Iterator()
- for iterator.Next() {
- if f(iterator.Index(), iterator.Value()) {
- return iterator.Index(), iterator.Value()
- }
- }
- return -1, nil
-}
diff --git a/vendor/github.com/emirpasic/gods/lists/arraylist/iterator.go b/vendor/github.com/emirpasic/gods/lists/arraylist/iterator.go
deleted file mode 100644
index f9efe20c541..00000000000
--- a/vendor/github.com/emirpasic/gods/lists/arraylist/iterator.go
+++ /dev/null
@@ -1,110 +0,0 @@
-// Copyright (c) 2015, Emir Pasic. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package arraylist
-
-import "github.com/emirpasic/gods/containers"
-
-// Assert Iterator implementation
-var _ containers.ReverseIteratorWithIndex = (*Iterator)(nil)
-
-// Iterator holding the iterator's state
-type Iterator struct {
- list *List
- index int
-}
-
-// Iterator returns a stateful iterator whose values can be fetched by an index.
-func (list *List) Iterator() Iterator {
- return Iterator{list: list, index: -1}
-}
-
-// Next moves the iterator to the next element and returns true if there was a next element in the container.
-// If Next() returns true, then next element's index and value can be retrieved by Index() and Value().
-// If Next() was called for the first time, then it will point the iterator to the first element if it exists.
-// Modifies the state of the iterator.
-func (iterator *Iterator) Next() bool {
- if iterator.index < iterator.list.size {
- iterator.index++
- }
- return iterator.list.withinRange(iterator.index)
-}
-
-// Prev moves the iterator to the previous element and returns true if there was a previous element in the container.
-// If Prev() returns true, then previous element's index and value can be retrieved by Index() and Value().
-// Modifies the state of the iterator.
-func (iterator *Iterator) Prev() bool {
- if iterator.index >= 0 {
- iterator.index--
- }
- return iterator.list.withinRange(iterator.index)
-}
-
-// Value returns the current element's value.
-// Does not modify the state of the iterator.
-func (iterator *Iterator) Value() interface{} {
- return iterator.list.elements[iterator.index]
-}
-
-// Index returns the current element's index.
-// Does not modify the state of the iterator.
-func (iterator *Iterator) Index() int {
- return iterator.index
-}
-
-// Begin resets the iterator to its initial state (one-before-first)
-// Call Next() to fetch the first element if any.
-func (iterator *Iterator) Begin() {
- iterator.index = -1
-}
-
-// End moves the iterator past the last element (one-past-the-end).
-// Call Prev() to fetch the last element if any.
-func (iterator *Iterator) End() {
- iterator.index = iterator.list.size
-}
-
-// First moves the iterator to the first element and returns true if there was a first element in the container.
-// If First() returns true, then first element's index and value can be retrieved by Index() and Value().
-// Modifies the state of the iterator.
-func (iterator *Iterator) First() bool {
- iterator.Begin()
- return iterator.Next()
-}
-
-// Last moves the iterator to the last element and returns true if there was a last element in the container.
-// If Last() returns true, then last element's index and value can be retrieved by Index() and Value().
-// Modifies the state of the iterator.
-func (iterator *Iterator) Last() bool {
- iterator.End()
- return iterator.Prev()
-}
-
-// NextTo moves the iterator to the next element from current position that satisfies the condition given by the
-// passed function, and returns true if there was a next element in the container.
-// If NextTo() returns true, then next element's index and value can be retrieved by Index() and Value().
-// Modifies the state of the iterator.
-func (iterator *Iterator) NextTo(f func(index int, value interface{}) bool) bool {
- for iterator.Next() {
- index, value := iterator.Index(), iterator.Value()
- if f(index, value) {
- return true
- }
- }
- return false
-}
-
-// PrevTo moves the iterator to the previous element from current position that satisfies the condition given by the
-// passed function, and returns true if there was a next element in the container.
-// If PrevTo() returns true, then next element's index and value can be retrieved by Index() and Value().
-// Modifies the state of the iterator.
-func (iterator *Iterator) PrevTo(f func(index int, value interface{}) bool) bool {
- for iterator.Prev() {
- index, value := iterator.Index(), iterator.Value()
- if f(index, value) {
- return true
- }
- }
- return false
-}
diff --git a/vendor/github.com/emirpasic/gods/lists/arraylist/serialization.go b/vendor/github.com/emirpasic/gods/lists/arraylist/serialization.go
deleted file mode 100644
index 5e86fe96f33..00000000000
--- a/vendor/github.com/emirpasic/gods/lists/arraylist/serialization.go
+++ /dev/null
@@ -1,38 +0,0 @@
-// Copyright (c) 2015, Emir Pasic. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package arraylist
-
-import (
- "encoding/json"
- "github.com/emirpasic/gods/containers"
-)
-
-// Assert Serialization implementation
-var _ containers.JSONSerializer = (*List)(nil)
-var _ containers.JSONDeserializer = (*List)(nil)
-
-// ToJSON outputs the JSON representation of list's elements.
-func (list *List) ToJSON() ([]byte, error) {
- return json.Marshal(list.elements[:list.size])
-}
-
-// FromJSON populates list's elements from the input JSON representation.
-func (list *List) FromJSON(data []byte) error {
- err := json.Unmarshal(data, &list.elements)
- if err == nil {
- list.size = len(list.elements)
- }
- return err
-}
-
-// UnmarshalJSON @implements json.Unmarshaler
-func (list *List) UnmarshalJSON(bytes []byte) error {
- return list.FromJSON(bytes)
-}
-
-// MarshalJSON @implements json.Marshaler
-func (list *List) MarshalJSON() ([]byte, error) {
- return list.ToJSON()
-}
diff --git a/vendor/github.com/emirpasic/gods/lists/lists.go b/vendor/github.com/emirpasic/gods/lists/lists.go
deleted file mode 100644
index 55bd619e235..00000000000
--- a/vendor/github.com/emirpasic/gods/lists/lists.go
+++ /dev/null
@@ -1,34 +0,0 @@
-// Copyright (c) 2015, Emir Pasic. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package lists provides an abstract List interface.
-//
-// In computer science, a list or sequence is an abstract data type that represents an ordered sequence of values, where the same value may occur more than once. An instance of a list is a computer representation of the mathematical concept of a finite sequence; the (potentially) infinite analog of a list is a stream. Lists are a basic example of containers, as they contain other values. If the same value occurs multiple times, each occurrence is considered a distinct item.
-//
-// Reference: https://en.wikipedia.org/wiki/List_%28abstract_data_type%29
-package lists
-
-import (
- "github.com/emirpasic/gods/containers"
- "github.com/emirpasic/gods/utils"
-)
-
-// List interface that all lists implement
-type List interface {
- Get(index int) (interface{}, bool)
- Remove(index int)
- Add(values ...interface{})
- Contains(values ...interface{}) bool
- Sort(comparator utils.Comparator)
- Swap(index1, index2 int)
- Insert(index int, values ...interface{})
- Set(index int, value interface{})
-
- containers.Container
- // Empty() bool
- // Size() int
- // Clear()
- // Values() []interface{}
- // String() string
-}
diff --git a/vendor/github.com/emirpasic/gods/trees/binaryheap/binaryheap.go b/vendor/github.com/emirpasic/gods/trees/binaryheap/binaryheap.go
deleted file mode 100644
index e658f2577e3..00000000000
--- a/vendor/github.com/emirpasic/gods/trees/binaryheap/binaryheap.go
+++ /dev/null
@@ -1,166 +0,0 @@
-// Copyright (c) 2015, Emir Pasic. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package binaryheap implements a binary heap backed by array list.
-//
-// Comparator defines this heap as either min or max heap.
-//
-// Structure is not thread safe.
-//
-// References: http://en.wikipedia.org/wiki/Binary_heap
-package binaryheap
-
-import (
- "fmt"
- "github.com/emirpasic/gods/lists/arraylist"
- "github.com/emirpasic/gods/trees"
- "github.com/emirpasic/gods/utils"
- "strings"
-)
-
-// Assert Tree implementation
-var _ trees.Tree = (*Heap)(nil)
-
-// Heap holds elements in an array-list
-type Heap struct {
- list *arraylist.List
- Comparator utils.Comparator
-}
-
-// NewWith instantiates a new empty heap tree with the custom comparator.
-func NewWith(comparator utils.Comparator) *Heap {
- return &Heap{list: arraylist.New(), Comparator: comparator}
-}
-
-// NewWithIntComparator instantiates a new empty heap with the IntComparator, i.e. elements are of type int.
-func NewWithIntComparator() *Heap {
- return &Heap{list: arraylist.New(), Comparator: utils.IntComparator}
-}
-
-// NewWithStringComparator instantiates a new empty heap with the StringComparator, i.e. elements are of type string.
-func NewWithStringComparator() *Heap {
- return &Heap{list: arraylist.New(), Comparator: utils.StringComparator}
-}
-
-// Push adds a value onto the heap and bubbles it up accordingly.
-func (heap *Heap) Push(values ...interface{}) {
- if len(values) == 1 {
- heap.list.Add(values[0])
- heap.bubbleUp()
- } else {
- // Reference: https://en.wikipedia.org/wiki/Binary_heap#Building_a_heap
- for _, value := range values {
- heap.list.Add(value)
- }
- size := heap.list.Size()/2 + 1
- for i := size; i >= 0; i-- {
- heap.bubbleDownIndex(i)
- }
- }
-}
-
-// Pop removes top element on heap and returns it, or nil if heap is empty.
-// Second return parameter is true, unless the heap was empty and there was nothing to pop.
-func (heap *Heap) Pop() (value interface{}, ok bool) {
- value, ok = heap.list.Get(0)
- if !ok {
- return
- }
- lastIndex := heap.list.Size() - 1
- heap.list.Swap(0, lastIndex)
- heap.list.Remove(lastIndex)
- heap.bubbleDown()
- return
-}
-
-// Peek returns top element on the heap without removing it, or nil if heap is empty.
-// Second return parameter is true, unless the heap was empty and there was nothing to peek.
-func (heap *Heap) Peek() (value interface{}, ok bool) {
- return heap.list.Get(0)
-}
-
-// Empty returns true if heap does not contain any elements.
-func (heap *Heap) Empty() bool {
- return heap.list.Empty()
-}
-
-// Size returns number of elements within the heap.
-func (heap *Heap) Size() int {
- return heap.list.Size()
-}
-
-// Clear removes all elements from the heap.
-func (heap *Heap) Clear() {
- heap.list.Clear()
-}
-
-// Values returns all elements in the heap.
-func (heap *Heap) Values() []interface{} {
- values := make([]interface{}, heap.list.Size(), heap.list.Size())
- for it := heap.Iterator(); it.Next(); {
- values[it.Index()] = it.Value()
- }
- return values
-}
-
-// String returns a string representation of container
-func (heap *Heap) String() string {
- str := "BinaryHeap\n"
- values := []string{}
- for it := heap.Iterator(); it.Next(); {
- values = append(values, fmt.Sprintf("%v", it.Value()))
- }
- str += strings.Join(values, ", ")
- return str
-}
-
-// Performs the "bubble down" operation. This is to place the element that is at the root
-// of the heap in its correct place so that the heap maintains the min/max-heap order property.
-func (heap *Heap) bubbleDown() {
- heap.bubbleDownIndex(0)
-}
-
-// Performs the "bubble down" operation. This is to place the element that is at the index
-// of the heap in its correct place so that the heap maintains the min/max-heap order property.
-func (heap *Heap) bubbleDownIndex(index int) {
- size := heap.list.Size()
- for leftIndex := index<<1 + 1; leftIndex < size; leftIndex = index<<1 + 1 {
- rightIndex := index<<1 + 2
- smallerIndex := leftIndex
- leftValue, _ := heap.list.Get(leftIndex)
- rightValue, _ := heap.list.Get(rightIndex)
- if rightIndex < size && heap.Comparator(leftValue, rightValue) > 0 {
- smallerIndex = rightIndex
- }
- indexValue, _ := heap.list.Get(index)
- smallerValue, _ := heap.list.Get(smallerIndex)
- if heap.Comparator(indexValue, smallerValue) > 0 {
- heap.list.Swap(index, smallerIndex)
- } else {
- break
- }
- index = smallerIndex
- }
-}
-
-// Performs the "bubble up" operation. This is to place a newly inserted
-// element (i.e. last element in the list) in its correct place so that
-// the heap maintains the min/max-heap order property.
-func (heap *Heap) bubbleUp() {
- index := heap.list.Size() - 1
- for parentIndex := (index - 1) >> 1; index > 0; parentIndex = (index - 1) >> 1 {
- indexValue, _ := heap.list.Get(index)
- parentValue, _ := heap.list.Get(parentIndex)
- if heap.Comparator(parentValue, indexValue) <= 0 {
- break
- }
- heap.list.Swap(index, parentIndex)
- index = parentIndex
- }
-}
-
-// Check that the index is within bounds of the list
-func (heap *Heap) withinRange(index int) bool {
- return index >= 0 && index < heap.list.Size()
-}
diff --git a/vendor/github.com/emirpasic/gods/trees/binaryheap/iterator.go b/vendor/github.com/emirpasic/gods/trees/binaryheap/iterator.go
deleted file mode 100644
index f2179633b03..00000000000
--- a/vendor/github.com/emirpasic/gods/trees/binaryheap/iterator.go
+++ /dev/null
@@ -1,143 +0,0 @@
-// Copyright (c) 2015, Emir Pasic. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package binaryheap
-
-import (
- "github.com/emirpasic/gods/containers"
-)
-
-// Assert Iterator implementation
-var _ containers.ReverseIteratorWithIndex = (*Iterator)(nil)
-
-// Iterator returns a stateful iterator whose values can be fetched by an index.
-type Iterator struct {
- heap *Heap
- index int
-}
-
-// Iterator returns a stateful iterator whose values can be fetched by an index.
-func (heap *Heap) Iterator() Iterator {
- return Iterator{heap: heap, index: -1}
-}
-
-// Next moves the iterator to the next element and returns true if there was a next element in the container.
-// If Next() returns true, then next element's index and value can be retrieved by Index() and Value().
-// If Next() was called for the first time, then it will point the iterator to the first element if it exists.
-// Modifies the state of the iterator.
-func (iterator *Iterator) Next() bool {
- if iterator.index < iterator.heap.Size() {
- iterator.index++
- }
- return iterator.heap.withinRange(iterator.index)
-}
-
-// Prev moves the iterator to the previous element and returns true if there was a previous element in the container.
-// If Prev() returns true, then previous element's index and value can be retrieved by Index() and Value().
-// Modifies the state of the iterator.
-func (iterator *Iterator) Prev() bool {
- if iterator.index >= 0 {
- iterator.index--
- }
- return iterator.heap.withinRange(iterator.index)
-}
-
-// Value returns the current element's value.
-// Does not modify the state of the iterator.
-func (iterator *Iterator) Value() interface{} {
- start, end := evaluateRange(iterator.index)
- if end > iterator.heap.Size() {
- end = iterator.heap.Size()
- }
- tmpHeap := NewWith(iterator.heap.Comparator)
- for n := start; n < end; n++ {
- value, _ := iterator.heap.list.Get(n)
- tmpHeap.Push(value)
- }
- for n := 0; n < iterator.index-start; n++ {
- tmpHeap.Pop()
- }
- value, _ := tmpHeap.Pop()
- return value
-}
-
-// Index returns the current element's index.
-// Does not modify the state of the iterator.
-func (iterator *Iterator) Index() int {
- return iterator.index
-}
-
-// Begin resets the iterator to its initial state (one-before-first)
-// Call Next() to fetch the first element if any.
-func (iterator *Iterator) Begin() {
- iterator.index = -1
-}
-
-// End moves the iterator past the last element (one-past-the-end).
-// Call Prev() to fetch the last element if any.
-func (iterator *Iterator) End() {
- iterator.index = iterator.heap.Size()
-}
-
-// First moves the iterator to the first element and returns true if there was a first element in the container.
-// If First() returns true, then first element's index and value can be retrieved by Index() and Value().
-// Modifies the state of the iterator.
-func (iterator *Iterator) First() bool {
- iterator.Begin()
- return iterator.Next()
-}
-
-// Last moves the iterator to the last element and returns true if there was a last element in the container.
-// If Last() returns true, then last element's index and value can be retrieved by Index() and Value().
-// Modifies the state of the iterator.
-func (iterator *Iterator) Last() bool {
- iterator.End()
- return iterator.Prev()
-}
-
-// NextTo moves the iterator to the next element from current position that satisfies the condition given by the
-// passed function, and returns true if there was a next element in the container.
-// If NextTo() returns true, then next element's index and value can be retrieved by Index() and Value().
-// Modifies the state of the iterator.
-func (iterator *Iterator) NextTo(f func(index int, value interface{}) bool) bool {
- for iterator.Next() {
- index, value := iterator.Index(), iterator.Value()
- if f(index, value) {
- return true
- }
- }
- return false
-}
-
-// PrevTo moves the iterator to the previous element from current position that satisfies the condition given by the
-// passed function, and returns true if there was a next element in the container.
-// If PrevTo() returns true, then next element's index and value can be retrieved by Index() and Value().
-// Modifies the state of the iterator.
-func (iterator *Iterator) PrevTo(f func(index int, value interface{}) bool) bool {
- for iterator.Prev() {
- index, value := iterator.Index(), iterator.Value()
- if f(index, value) {
- return true
- }
- }
- return false
-}
-
-// numOfBits counts the number of bits of an int
-func numOfBits(n int) uint {
- var count uint
- for n != 0 {
- count++
- n >>= 1
- }
- return count
-}
-
-// evaluateRange evaluates the index range [start,end) of same level nodes in the heap as the index
-func evaluateRange(index int) (start int, end int) {
- bits := numOfBits(index+1) - 1
- start = 1< b
-type Comparator func(a, b interface{}) int
-
-// StringComparator provides a fast comparison on strings
-func StringComparator(a, b interface{}) int {
- s1 := a.(string)
- s2 := b.(string)
- min := len(s2)
- if len(s1) < len(s2) {
- min = len(s1)
- }
- diff := 0
- for i := 0; i < min && diff == 0; i++ {
- diff = int(s1[i]) - int(s2[i])
- }
- if diff == 0 {
- diff = len(s1) - len(s2)
- }
- if diff < 0 {
- return -1
- }
- if diff > 0 {
- return 1
- }
- return 0
-}
-
-// IntComparator provides a basic comparison on int
-func IntComparator(a, b interface{}) int {
- aAsserted := a.(int)
- bAsserted := b.(int)
- switch {
- case aAsserted > bAsserted:
- return 1
- case aAsserted < bAsserted:
- return -1
- default:
- return 0
- }
-}
-
-// Int8Comparator provides a basic comparison on int8
-func Int8Comparator(a, b interface{}) int {
- aAsserted := a.(int8)
- bAsserted := b.(int8)
- switch {
- case aAsserted > bAsserted:
- return 1
- case aAsserted < bAsserted:
- return -1
- default:
- return 0
- }
-}
-
-// Int16Comparator provides a basic comparison on int16
-func Int16Comparator(a, b interface{}) int {
- aAsserted := a.(int16)
- bAsserted := b.(int16)
- switch {
- case aAsserted > bAsserted:
- return 1
- case aAsserted < bAsserted:
- return -1
- default:
- return 0
- }
-}
-
-// Int32Comparator provides a basic comparison on int32
-func Int32Comparator(a, b interface{}) int {
- aAsserted := a.(int32)
- bAsserted := b.(int32)
- switch {
- case aAsserted > bAsserted:
- return 1
- case aAsserted < bAsserted:
- return -1
- default:
- return 0
- }
-}
-
-// Int64Comparator provides a basic comparison on int64
-func Int64Comparator(a, b interface{}) int {
- aAsserted := a.(int64)
- bAsserted := b.(int64)
- switch {
- case aAsserted > bAsserted:
- return 1
- case aAsserted < bAsserted:
- return -1
- default:
- return 0
- }
-}
-
-// UIntComparator provides a basic comparison on uint
-func UIntComparator(a, b interface{}) int {
- aAsserted := a.(uint)
- bAsserted := b.(uint)
- switch {
- case aAsserted > bAsserted:
- return 1
- case aAsserted < bAsserted:
- return -1
- default:
- return 0
- }
-}
-
-// UInt8Comparator provides a basic comparison on uint8
-func UInt8Comparator(a, b interface{}) int {
- aAsserted := a.(uint8)
- bAsserted := b.(uint8)
- switch {
- case aAsserted > bAsserted:
- return 1
- case aAsserted < bAsserted:
- return -1
- default:
- return 0
- }
-}
-
-// UInt16Comparator provides a basic comparison on uint16
-func UInt16Comparator(a, b interface{}) int {
- aAsserted := a.(uint16)
- bAsserted := b.(uint16)
- switch {
- case aAsserted > bAsserted:
- return 1
- case aAsserted < bAsserted:
- return -1
- default:
- return 0
- }
-}
-
-// UInt32Comparator provides a basic comparison on uint32
-func UInt32Comparator(a, b interface{}) int {
- aAsserted := a.(uint32)
- bAsserted := b.(uint32)
- switch {
- case aAsserted > bAsserted:
- return 1
- case aAsserted < bAsserted:
- return -1
- default:
- return 0
- }
-}
-
-// UInt64Comparator provides a basic comparison on uint64
-func UInt64Comparator(a, b interface{}) int {
- aAsserted := a.(uint64)
- bAsserted := b.(uint64)
- switch {
- case aAsserted > bAsserted:
- return 1
- case aAsserted < bAsserted:
- return -1
- default:
- return 0
- }
-}
-
-// Float32Comparator provides a basic comparison on float32
-func Float32Comparator(a, b interface{}) int {
- aAsserted := a.(float32)
- bAsserted := b.(float32)
- switch {
- case aAsserted > bAsserted:
- return 1
- case aAsserted < bAsserted:
- return -1
- default:
- return 0
- }
-}
-
-// Float64Comparator provides a basic comparison on float64
-func Float64Comparator(a, b interface{}) int {
- aAsserted := a.(float64)
- bAsserted := b.(float64)
- switch {
- case aAsserted > bAsserted:
- return 1
- case aAsserted < bAsserted:
- return -1
- default:
- return 0
- }
-}
-
-// ByteComparator provides a basic comparison on byte
-func ByteComparator(a, b interface{}) int {
- aAsserted := a.(byte)
- bAsserted := b.(byte)
- switch {
- case aAsserted > bAsserted:
- return 1
- case aAsserted < bAsserted:
- return -1
- default:
- return 0
- }
-}
-
-// RuneComparator provides a basic comparison on rune
-func RuneComparator(a, b interface{}) int {
- aAsserted := a.(rune)
- bAsserted := b.(rune)
- switch {
- case aAsserted > bAsserted:
- return 1
- case aAsserted < bAsserted:
- return -1
- default:
- return 0
- }
-}
-
-// TimeComparator provides a basic comparison on time.Time
-func TimeComparator(a, b interface{}) int {
- aAsserted := a.(time.Time)
- bAsserted := b.(time.Time)
-
- switch {
- case aAsserted.After(bAsserted):
- return 1
- case aAsserted.Before(bAsserted):
- return -1
- default:
- return 0
- }
-}
diff --git a/vendor/github.com/emirpasic/gods/utils/sort.go b/vendor/github.com/emirpasic/gods/utils/sort.go
deleted file mode 100644
index 79ced1f5d26..00000000000
--- a/vendor/github.com/emirpasic/gods/utils/sort.go
+++ /dev/null
@@ -1,29 +0,0 @@
-// Copyright (c) 2015, Emir Pasic. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package utils
-
-import "sort"
-
-// Sort sorts values (in-place) with respect to the given comparator.
-//
-// Uses Go's sort (hybrid of quicksort for large and then insertion sort for smaller slices).
-func Sort(values []interface{}, comparator Comparator) {
- sort.Sort(sortable{values, comparator})
-}
-
-type sortable struct {
- values []interface{}
- comparator Comparator
-}
-
-func (s sortable) Len() int {
- return len(s.values)
-}
-func (s sortable) Swap(i, j int) {
- s.values[i], s.values[j] = s.values[j], s.values[i]
-}
-func (s sortable) Less(i, j int) bool {
- return s.comparator(s.values[i], s.values[j]) < 0
-}
diff --git a/vendor/github.com/emirpasic/gods/utils/utils.go b/vendor/github.com/emirpasic/gods/utils/utils.go
deleted file mode 100644
index 262c62576ae..00000000000
--- a/vendor/github.com/emirpasic/gods/utils/utils.go
+++ /dev/null
@@ -1,47 +0,0 @@
-// Copyright (c) 2015, Emir Pasic. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package utils provides common utility functions.
-//
-// Provided functionalities:
-// - sorting
-// - comparators
-package utils
-
-import (
- "fmt"
- "strconv"
-)
-
-// ToString converts a value to string.
-func ToString(value interface{}) string {
- switch value := value.(type) {
- case string:
- return value
- case int8:
- return strconv.FormatInt(int64(value), 10)
- case int16:
- return strconv.FormatInt(int64(value), 10)
- case int32:
- return strconv.FormatInt(int64(value), 10)
- case int64:
- return strconv.FormatInt(value, 10)
- case uint8:
- return strconv.FormatUint(uint64(value), 10)
- case uint16:
- return strconv.FormatUint(uint64(value), 10)
- case uint32:
- return strconv.FormatUint(uint64(value), 10)
- case uint64:
- return strconv.FormatUint(value, 10)
- case float32:
- return strconv.FormatFloat(float64(value), 'g', -1, 64)
- case float64:
- return strconv.FormatFloat(value, 'g', -1, 64)
- case bool:
- return strconv.FormatBool(value)
- default:
- return fmt.Sprintf("%+v", value)
- }
-}
diff --git a/vendor/github.com/go-git/gcfg/.gitignore b/vendor/github.com/go-git/gcfg/.gitignore
deleted file mode 100644
index 2d830686d42..00000000000
--- a/vendor/github.com/go-git/gcfg/.gitignore
+++ /dev/null
@@ -1 +0,0 @@
-coverage.out
diff --git a/vendor/github.com/go-git/gcfg/LICENSE b/vendor/github.com/go-git/gcfg/LICENSE
deleted file mode 100644
index 87a5cede339..00000000000
--- a/vendor/github.com/go-git/gcfg/LICENSE
+++ /dev/null
@@ -1,28 +0,0 @@
-Copyright (c) 2012 Péter Surányi. Portions Copyright (c) 2009 The Go
-Authors. All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
-
- * Redistributions of source code must retain the above copyright
-notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above
-copyright notice, this list of conditions and the following disclaimer
-in the documentation and/or other materials provided with the
-distribution.
- * Neither the name of Google Inc. nor the names of its
-contributors may be used to endorse or promote products derived from
-this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/go-git/gcfg/Makefile b/vendor/github.com/go-git/gcfg/Makefile
deleted file mode 100644
index 73604da6b61..00000000000
--- a/vendor/github.com/go-git/gcfg/Makefile
+++ /dev/null
@@ -1,17 +0,0 @@
-# General
-WORKDIR = $(PWD)
-
-# Go parameters
-GOCMD = go
-GOTEST = $(GOCMD) test
-
-# Coverage
-COVERAGE_REPORT = coverage.out
-COVERAGE_MODE = count
-
-test:
- $(GOTEST) ./...
-
-test-coverage:
- echo "" > $(COVERAGE_REPORT); \
- $(GOTEST) -coverprofile=$(COVERAGE_REPORT) -coverpkg=./... -covermode=$(COVERAGE_MODE) ./...
diff --git a/vendor/github.com/go-git/gcfg/README b/vendor/github.com/go-git/gcfg/README
deleted file mode 100644
index 1ff233a529d..00000000000
--- a/vendor/github.com/go-git/gcfg/README
+++ /dev/null
@@ -1,4 +0,0 @@
-Gcfg reads INI-style configuration files into Go structs;
-supports user-defined types and subsections.
-
-Package docs: https://godoc.org/gopkg.in/gcfg.v1
diff --git a/vendor/github.com/go-git/gcfg/doc.go b/vendor/github.com/go-git/gcfg/doc.go
deleted file mode 100644
index 7bdefbf0203..00000000000
--- a/vendor/github.com/go-git/gcfg/doc.go
+++ /dev/null
@@ -1,145 +0,0 @@
-// Package gcfg reads "INI-style" text-based configuration files with
-// "name=value" pairs grouped into sections (gcfg files).
-//
-// This package is still a work in progress; see the sections below for planned
-// changes.
-//
-// Syntax
-//
-// The syntax is based on that used by git config:
-// http://git-scm.com/docs/git-config#_syntax .
-// There are some (planned) differences compared to the git config format:
-// - improve data portability:
-// - must be encoded in UTF-8 (for now) and must not contain the 0 byte
-// - include and "path" type is not supported
-// (path type may be implementable as a user-defined type)
-// - internationalization
-// - section and variable names can contain unicode letters, unicode digits
-// (as defined in http://golang.org/ref/spec#Characters ) and hyphens
-// (U+002D), starting with a unicode letter
-// - disallow potentially ambiguous or misleading definitions:
-// - `[sec.sub]` format is not allowed (deprecated in gitconfig)
-// - `[sec ""]` is not allowed
-// - use `[sec]` for section name "sec" and empty subsection name
-// - (planned) within a single file, definitions must be contiguous for each:
-// - section: '[secA]' -> '[secB]' -> '[secA]' is an error
-// - subsection: '[sec "A"]' -> '[sec "B"]' -> '[sec "A"]' is an error
-// - multivalued variable: 'multi=a' -> 'other=x' -> 'multi=b' is an error
-//
-// Data structure
-//
-// The functions in this package read values into a user-defined struct.
-// Each section corresponds to a struct field in the config struct, and each
-// variable in a section corresponds to a data field in the section struct.
-// The mapping of each section or variable name to fields is done either based
-// on the "gcfg" struct tag or by matching the name of the section or variable,
-// ignoring case. In the latter case, hyphens '-' in section and variable names
-// correspond to underscores '_' in field names.
-// Fields must be exported; to use a section or variable name starting with a
-// letter that is neither upper- or lower-case, prefix the field name with 'X'.
-// (See https://code.google.com/p/go/issues/detail?id=5763#c4 .)
-//
-// For sections with subsections, the corresponding field in config must be a
-// map, rather than a struct, with string keys and pointer-to-struct values.
-// Values for subsection variables are stored in the map with the subsection
-// name used as the map key.
-// (Note that unlike section and variable names, subsection names are case
-// sensitive.)
-// When using a map, and there is a section with the same section name but
-// without a subsection name, its values are stored with the empty string used
-// as the key.
-// It is possible to provide default values for subsections in the section
-// "default-" (or by setting values in the corresponding struct
-// field "Default_").
-//
-// The functions in this package panic if config is not a pointer to a struct,
-// or when a field is not of a suitable type (either a struct or a map with
-// string keys and pointer-to-struct values).
-//
-// Parsing of values
-//
-// The section structs in the config struct may contain single-valued or
-// multi-valued variables. Variables of unnamed slice type (that is, a type
-// starting with `[]`) are treated as multi-value; all others (including named
-// slice types) are treated as single-valued variables.
-//
-// Single-valued variables are handled based on the type as follows.
-// Unnamed pointer types (that is, types starting with `*`) are dereferenced,
-// and if necessary, a new instance is allocated.
-//
-// For types implementing the encoding.TextUnmarshaler interface, the
-// UnmarshalText method is used to set the value. Implementing this method is
-// the recommended way for parsing user-defined types.
-//
-// For fields of string kind, the value string is assigned to the field, after
-// unquoting and unescaping as needed.
-// For fields of bool kind, the field is set to true if the value is "true",
-// "yes", "on" or "1", and set to false if the value is "false", "no", "off" or
-// "0", ignoring case. In addition, single-valued bool fields can be specified
-// with a "blank" value (variable name without equals sign and value); in such
-// case the value is set to true.
-//
-// Predefined integer types [u]int(|8|16|32|64) and big.Int are parsed as
-// decimal or hexadecimal (if having '0x' prefix). (This is to prevent
-// unintuitively handling zero-padded numbers as octal.) Other types having
-// [u]int* as the underlying type, such as os.FileMode and uintptr allow
-// decimal, hexadecimal, or octal values.
-// Parsing mode for integer types can be overridden using the struct tag option
-// ",int=mode" where mode is a combination of the 'd', 'h', and 'o' characters
-// (each standing for decimal, hexadecimal, and octal, respectively.)
-//
-// All other types are parsed using fmt.Sscanf with the "%v" verb.
-//
-// For multi-valued variables, each individual value is parsed as above and
-// appended to the slice. If the first value is specified as a "blank" value
-// (variable name without equals sign and value), a new slice is allocated;
-// that is any values previously set in the slice will be ignored.
-//
-// The types subpackage for provides helpers for parsing "enum-like" and integer
-// types.
-//
-// Error handling
-//
-// There are 3 types of errors:
-//
-// - programmer errors / panics:
-// - invalid configuration structure
-// - data errors:
-// - fatal errors:
-// - invalid configuration syntax
-// - warnings:
-// - data that doesn't belong to any part of the config structure
-//
-// Programmer errors trigger panics. These are should be fixed by the programmer
-// before releasing code that uses gcfg.
-//
-// Data errors cause gcfg to return a non-nil error value. This includes the
-// case when there are extra unknown key-value definitions in the configuration
-// data (extra data).
-// However, in some occasions it is desirable to be able to proceed in
-// situations when the only data error is that of extra data.
-// These errors are handled at a different (warning) priority and can be
-// filtered out programmatically. To ignore extra data warnings, wrap the
-// gcfg.Read*Into invocation into a call to gcfg.FatalOnly.
-//
-// TODO
-//
-// The following is a list of changes under consideration:
-// - documentation
-// - self-contained syntax documentation
-// - more practical examples
-// - move TODOs to issue tracker (eventually)
-// - syntax
-// - reconsider valid escape sequences
-// (gitconfig doesn't support \r in value, \t in subsection name, etc.)
-// - reading / parsing gcfg files
-// - define internal representation structure
-// - support multiple inputs (readers, strings, files)
-// - support declaring encoding (?)
-// - support varying fields sets for subsections (?)
-// - writing gcfg files
-// - error handling
-// - make error context accessible programmatically?
-// - limit input size?
-//
-package gcfg // import "github.com/go-git/gcfg"
diff --git a/vendor/github.com/go-git/gcfg/errors.go b/vendor/github.com/go-git/gcfg/errors.go
deleted file mode 100644
index 853c76021de..00000000000
--- a/vendor/github.com/go-git/gcfg/errors.go
+++ /dev/null
@@ -1,41 +0,0 @@
-package gcfg
-
-import (
- "gopkg.in/warnings.v0"
-)
-
-// FatalOnly filters the results of a Read*Into invocation and returns only
-// fatal errors. That is, errors (warnings) indicating data for unknown
-// sections / variables is ignored. Example invocation:
-//
-// err := gcfg.FatalOnly(gcfg.ReadFileInto(&cfg, configFile))
-// if err != nil {
-// ...
-//
-func FatalOnly(err error) error {
- return warnings.FatalOnly(err)
-}
-
-func isFatal(err error) bool {
- _, ok := err.(extraData)
- return !ok
-}
-
-type extraData struct {
- section string
- subsection *string
- variable *string
-}
-
-func (e extraData) Error() string {
- s := "can't store data at section \"" + e.section + "\""
- if e.subsection != nil {
- s += ", subsection \"" + *e.subsection + "\""
- }
- if e.variable != nil {
- s += ", variable \"" + *e.variable + "\""
- }
- return s
-}
-
-var _ error = extraData{}
diff --git a/vendor/github.com/go-git/gcfg/read.go b/vendor/github.com/go-git/gcfg/read.go
deleted file mode 100644
index ea5d2edd060..00000000000
--- a/vendor/github.com/go-git/gcfg/read.go
+++ /dev/null
@@ -1,273 +0,0 @@
-package gcfg
-
-import (
- "fmt"
- "io"
- "os"
- "strings"
-
- "gopkg.in/warnings.v0"
-
- "github.com/go-git/gcfg/scanner"
- "github.com/go-git/gcfg/token"
-)
-
-var unescape = map[rune]rune{'\\': '\\', '"': '"', 'n': '\n', 't': '\t', 'b': '\b', '\n': '\n'}
-
-// no error: invalid literals should be caught by scanner
-func unquote(s string) string {
- u, q, esc := make([]rune, 0, len(s)), false, false
- for _, c := range s {
- if esc {
- uc, ok := unescape[c]
- switch {
- case ok:
- u = append(u, uc)
- fallthrough
- case !q && c == '\n':
- esc = false
- continue
- }
- panic("invalid escape sequence")
- }
- switch c {
- case '"':
- q = !q
- case '\\':
- esc = true
- default:
- u = append(u, c)
- }
- }
- if q {
- panic("missing end quote")
- }
- if esc {
- panic("invalid escape sequence")
- }
- return string(u)
-}
-
-func read(c *warnings.Collector, callback func(string, string, string, string, bool) error,
- fset *token.FileSet, file *token.File, src []byte) error {
- //
- var s scanner.Scanner
- var errs scanner.ErrorList
- s.Init(file, src, func(p token.Position, m string) { errs.Add(p, m) }, 0)
- sect, sectsub := "", ""
- pos, tok, lit := s.Scan()
- errfn := func(msg string) error {
- return fmt.Errorf("%s: %s", fset.Position(pos), msg)
- }
- for {
- if errs.Len() > 0 {
- if err := c.Collect(errs.Err()); err != nil {
- return err
- }
- }
- switch tok {
- case token.EOF:
- return nil
- case token.EOL, token.COMMENT:
- pos, tok, lit = s.Scan()
- case token.LBRACK:
- pos, tok, lit = s.Scan()
- if errs.Len() > 0 {
- if err := c.Collect(errs.Err()); err != nil {
- return err
- }
- }
- if tok != token.IDENT {
- if err := c.Collect(errfn("expected section name")); err != nil {
- return err
- }
- }
- sect, sectsub = lit, ""
- pos, tok, lit = s.Scan()
- if errs.Len() > 0 {
- if err := c.Collect(errs.Err()); err != nil {
- return err
- }
- }
- if tok == token.STRING {
- sectsub = unquote(lit)
- if sectsub == "" {
- if err := c.Collect(errfn("empty subsection name")); err != nil {
- return err
- }
- }
- pos, tok, lit = s.Scan()
- if errs.Len() > 0 {
- if err := c.Collect(errs.Err()); err != nil {
- return err
- }
- }
- }
- if tok != token.RBRACK {
- if sectsub == "" {
- if err := c.Collect(errfn("expected subsection name or right bracket")); err != nil {
- return err
- }
- }
- if err := c.Collect(errfn("expected right bracket")); err != nil {
- return err
- }
- }
- pos, tok, lit = s.Scan()
- if tok != token.EOL && tok != token.EOF && tok != token.COMMENT {
- if err := c.Collect(errfn("expected EOL, EOF, or comment")); err != nil {
- return err
- }
- }
- // If a section/subsection header was found, ensure a
- // container object is created, even if there are no
- // variables further down.
- err := c.Collect(callback(sect, sectsub, "", "", true))
- if err != nil {
- return err
- }
- case token.IDENT:
- if sect == "" {
- if err := c.Collect(errfn("expected section header")); err != nil {
- return err
- }
- }
- n := lit
- pos, tok, lit = s.Scan()
- if errs.Len() > 0 {
- return errs.Err()
- }
- blank, v := tok == token.EOF || tok == token.EOL || tok == token.COMMENT, ""
- if !blank {
- if tok != token.ASSIGN {
- if err := c.Collect(errfn("expected '='")); err != nil {
- return err
- }
- }
- pos, tok, lit = s.Scan()
- if errs.Len() > 0 {
- if err := c.Collect(errs.Err()); err != nil {
- return err
- }
- }
- if tok != token.STRING {
- if err := c.Collect(errfn("expected value")); err != nil {
- return err
- }
- }
- v = unquote(lit)
- pos, tok, lit = s.Scan()
- if errs.Len() > 0 {
- if err := c.Collect(errs.Err()); err != nil {
- return err
- }
- }
- if tok != token.EOL && tok != token.EOF && tok != token.COMMENT {
- if err := c.Collect(errfn("expected EOL, EOF, or comment")); err != nil {
- return err
- }
- }
- }
- err := c.Collect(callback(sect, sectsub, n, v, blank))
- if err != nil {
- return err
- }
- default:
- if sect == "" {
- if err := c.Collect(errfn("expected section header")); err != nil {
- return err
- }
- }
- if err := c.Collect(errfn("expected section header or variable declaration")); err != nil {
- return err
- }
- }
- }
- panic("never reached")
-}
-
-func readInto(config interface{}, fset *token.FileSet, file *token.File,
- src []byte) error {
- //
- c := warnings.NewCollector(isFatal)
- firstPassCallback := func(s string, ss string, k string, v string, bv bool) error {
- return set(c, config, s, ss, k, v, bv, false)
- }
- err := read(c, firstPassCallback, fset, file, src)
- if err != nil {
- return err
- }
- secondPassCallback := func(s string, ss string, k string, v string, bv bool) error {
- return set(c, config, s, ss, k, v, bv, true)
- }
- err = read(c, secondPassCallback, fset, file, src)
- if err != nil {
- return err
- }
- return c.Done()
-}
-
-// ReadWithCallback reads gcfg formatted data from reader and calls
-// callback with each section and option found.
-//
-// Callback is called with section, subsection, option key, option value
-// and blank value flag as arguments.
-//
-// When a section is found, callback is called with nil subsection, option key
-// and option value.
-//
-// When a subsection is found, callback is called with nil option key and
-// option value.
-//
-// If blank value flag is true, it means that the value was not set for an option
-// (as opposed to set to empty string).
-//
-// If callback returns an error, ReadWithCallback terminates with an error too.
-func ReadWithCallback(reader io.Reader, callback func(string, string, string, string, bool) error) error {
- src, err := io.ReadAll(reader)
- if err != nil {
- return err
- }
-
- fset := token.NewFileSet()
- file := fset.AddFile("", fset.Base(), len(src))
- c := warnings.NewCollector(isFatal)
-
- return read(c, callback, fset, file, src)
-}
-
-// ReadInto reads gcfg formatted data from reader and sets the values into the
-// corresponding fields in config.
-func ReadInto(config interface{}, reader io.Reader) error {
- src, err := io.ReadAll(reader)
- if err != nil {
- return err
- }
- fset := token.NewFileSet()
- file := fset.AddFile("", fset.Base(), len(src))
- return readInto(config, fset, file, src)
-}
-
-// ReadStringInto reads gcfg formatted data from str and sets the values into
-// the corresponding fields in config.
-func ReadStringInto(config interface{}, str string) error {
- r := strings.NewReader(str)
- return ReadInto(config, r)
-}
-
-// ReadFileInto reads gcfg formatted data from the file filename and sets the
-// values into the corresponding fields in config.
-func ReadFileInto(config interface{}, filename string) error {
- f, err := os.Open(filename)
- if err != nil {
- return err
- }
- defer f.Close()
- src, err := io.ReadAll(f)
- if err != nil {
- return err
- }
- fset := token.NewFileSet()
- file := fset.AddFile(filename, fset.Base(), len(src))
- return readInto(config, fset, file, src)
-}
diff --git a/vendor/github.com/go-git/gcfg/scanner/errors.go b/vendor/github.com/go-git/gcfg/scanner/errors.go
deleted file mode 100644
index a6e00f5c64e..00000000000
--- a/vendor/github.com/go-git/gcfg/scanner/errors.go
+++ /dev/null
@@ -1,121 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package scanner
-
-import (
- "fmt"
- "io"
- "sort"
-)
-
-import (
- "github.com/go-git/gcfg/token"
-)
-
-// In an ErrorList, an error is represented by an *Error.
-// The position Pos, if valid, points to the beginning of
-// the offending token, and the error condition is described
-// by Msg.
-//
-type Error struct {
- Pos token.Position
- Msg string
-}
-
-// Error implements the error interface.
-func (e Error) Error() string {
- if e.Pos.Filename != "" || e.Pos.IsValid() {
- // don't print ""
- // TODO(gri) reconsider the semantics of Position.IsValid
- return e.Pos.String() + ": " + e.Msg
- }
- return e.Msg
-}
-
-// ErrorList is a list of *Errors.
-// The zero value for an ErrorList is an empty ErrorList ready to use.
-//
-type ErrorList []*Error
-
-// Add adds an Error with given position and error message to an ErrorList.
-func (p *ErrorList) Add(pos token.Position, msg string) {
- *p = append(*p, &Error{pos, msg})
-}
-
-// Reset resets an ErrorList to no errors.
-func (p *ErrorList) Reset() { *p = (*p)[0:0] }
-
-// ErrorList implements the sort Interface.
-func (p ErrorList) Len() int { return len(p) }
-func (p ErrorList) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
-
-func (p ErrorList) Less(i, j int) bool {
- e := &p[i].Pos
- f := &p[j].Pos
- if e.Filename < f.Filename {
- return true
- }
- if e.Filename == f.Filename {
- return e.Offset < f.Offset
- }
- return false
-}
-
-// Sort sorts an ErrorList. *Error entries are sorted by position,
-// other errors are sorted by error message, and before any *Error
-// entry.
-//
-func (p ErrorList) Sort() {
- sort.Sort(p)
-}
-
-// RemoveMultiples sorts an ErrorList and removes all but the first error per line.
-func (p *ErrorList) RemoveMultiples() {
- sort.Sort(p)
- var last token.Position // initial last.Line is != any legal error line
- i := 0
- for _, e := range *p {
- if e.Pos.Filename != last.Filename || e.Pos.Line != last.Line {
- last = e.Pos
- (*p)[i] = e
- i++
- }
- }
- (*p) = (*p)[0:i]
-}
-
-// An ErrorList implements the error interface.
-func (p ErrorList) Error() string {
- switch len(p) {
- case 0:
- return "no errors"
- case 1:
- return p[0].Error()
- }
- return fmt.Sprintf("%s (and %d more errors)", p[0], len(p)-1)
-}
-
-// Err returns an error equivalent to this error list.
-// If the list is empty, Err returns nil.
-func (p ErrorList) Err() error {
- if len(p) == 0 {
- return nil
- }
- return p
-}
-
-// PrintError is a utility function that prints a list of errors to w,
-// one error per line, if the err parameter is an ErrorList. Otherwise
-// it prints the err string.
-//
-func PrintError(w io.Writer, err error) {
- if list, ok := err.(ErrorList); ok {
- for _, e := range list {
- fmt.Fprintf(w, "%s\n", e)
- }
- } else if err != nil {
- fmt.Fprintf(w, "%s\n", err)
- }
-}
diff --git a/vendor/github.com/go-git/gcfg/scanner/scanner.go b/vendor/github.com/go-git/gcfg/scanner/scanner.go
deleted file mode 100644
index b3da03d0eb2..00000000000
--- a/vendor/github.com/go-git/gcfg/scanner/scanner.go
+++ /dev/null
@@ -1,334 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package scanner implements a scanner for gcfg configuration text.
-// It takes a []byte as source which can then be tokenized
-// through repeated calls to the Scan method.
-//
-// Note that the API for the scanner package may change to accommodate new
-// features or implementation changes in gcfg.
-package scanner
-
-import (
- "fmt"
- "path/filepath"
- "unicode"
- "unicode/utf8"
-
- "github.com/go-git/gcfg/token"
-)
-
-// An ErrorHandler may be provided to Scanner.Init. If a syntax error is
-// encountered and a handler was installed, the handler is called with a
-// position and an error message. The position points to the beginning of
-// the offending token.
-type ErrorHandler func(pos token.Position, msg string)
-
-// A Scanner holds the scanner's internal state while processing
-// a given text. It can be allocated as part of another data
-// structure but must be initialized via Init before use.
-type Scanner struct {
- // immutable state
- file *token.File // source file handle
- dir string // directory portion of file.Name()
- src []byte // source
- err ErrorHandler // error reporting; or nil
- mode Mode // scanning mode
-
- // scanning state
- ch rune // current character
- offset int // character offset
- rdOffset int // reading offset (position after current character)
- lineOffset int // current line offset
- nextVal bool // next token is expected to be a value
-
- // public state - ok to modify
- ErrorCount int // number of errors encountered
-}
-
-// Read the next Unicode char into s.ch.
-// s.ch < 0 means end-of-file.
-func (s *Scanner) next() {
- if s.rdOffset < len(s.src) {
- s.offset = s.rdOffset
- if s.ch == '\n' {
- s.lineOffset = s.offset
- s.file.AddLine(s.offset)
- }
- r, w := rune(s.src[s.rdOffset]), 1
- switch {
- case r == 0:
- s.error(s.offset, "illegal character NUL")
- case r >= 0x80:
- // not ASCII
- r, w = utf8.DecodeRune(s.src[s.rdOffset:])
- if r == utf8.RuneError && w == 1 {
- s.error(s.offset, "illegal UTF-8 encoding")
- }
- }
- s.rdOffset += w
- s.ch = r
- } else {
- s.offset = len(s.src)
- if s.ch == '\n' {
- s.lineOffset = s.offset
- s.file.AddLine(s.offset)
- }
- s.ch = -1 // eof
- }
-}
-
-// A mode value is a set of flags (or 0).
-// They control scanner behavior.
-type Mode uint
-
-const (
- ScanComments Mode = 1 << iota // return comments as COMMENT tokens
-)
-
-// Init prepares the scanner s to tokenize the text src by setting the
-// scanner at the beginning of src. The scanner uses the file set file
-// for position information and it adds line information for each line.
-// It is ok to re-use the same file when re-scanning the same file as
-// line information which is already present is ignored. Init causes a
-// panic if the file size does not match the src size.
-//
-// Calls to Scan will invoke the error handler err if they encounter a
-// syntax error and err is not nil. Also, for each error encountered,
-// the Scanner field ErrorCount is incremented by one. The mode parameter
-// determines how comments are handled.
-//
-// Note that Init may call err if there is an error in the first character
-// of the file.
-func (s *Scanner) Init(file *token.File, src []byte, err ErrorHandler, mode Mode) {
- // Explicitly initialize all fields since a scanner may be reused.
- if file.Size() != len(src) {
- panic(fmt.Sprintf("file size (%d) does not match src len (%d)", file.Size(), len(src)))
- }
- s.file = file
- s.dir, _ = filepath.Split(file.Name())
- s.src = src
- s.err = err
- s.mode = mode
-
- s.ch = ' '
- s.offset = 0
- s.rdOffset = 0
- s.lineOffset = 0
- s.ErrorCount = 0
- s.nextVal = false
-
- s.next()
-}
-
-func (s *Scanner) error(offs int, msg string) {
- if s.err != nil {
- s.err(s.file.Position(s.file.Pos(offs)), msg)
- }
- s.ErrorCount++
-}
-
-func (s *Scanner) scanComment() string {
- // initial [;#] already consumed
- offs := s.offset - 1 // position of initial [;#]
-
- for s.ch != '\n' && s.ch >= 0 {
- s.next()
- }
- return string(s.src[offs:s.offset])
-}
-
-func isLetter(ch rune) bool {
- return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch >= 0x80 && unicode.IsLetter(ch)
-}
-
-func isDigit(ch rune) bool {
- return '0' <= ch && ch <= '9' || ch >= 0x80 && unicode.IsDigit(ch)
-}
-
-func (s *Scanner) scanIdentifier() string {
- offs := s.offset
- for isLetter(s.ch) || isDigit(s.ch) || s.ch == '-' {
- s.next()
- }
- return string(s.src[offs:s.offset])
-}
-
-// val indicate if we are scanning a value (vs a header)
-func (s *Scanner) scanEscape(val bool) {
- offs := s.offset
- ch := s.ch
- s.next() // always make progress
- switch ch {
- case '\\', '"', '\n':
- // ok
- case 'n', 't', 'b':
- if val {
- break // ok
- }
- fallthrough
- default:
- s.error(offs, "unknown escape sequence")
- }
-}
-
-func (s *Scanner) scanString() string {
- // '"' opening already consumed
- offs := s.offset - 1
-
- for s.ch != '"' {
- ch := s.ch
- s.next()
- if ch == '\n' || ch < 0 {
- s.error(offs, "string not terminated")
- break
- }
- if ch == '\\' {
- s.scanEscape(false)
- }
- }
-
- s.next()
-
- return string(s.src[offs:s.offset])
-}
-
-func stripCR(b []byte) []byte {
- c := make([]byte, len(b))
- i := 0
- for _, ch := range b {
- if ch != '\r' {
- c[i] = ch
- i++
- }
- }
- return c[:i]
-}
-
-func (s *Scanner) scanValString() string {
- offs := s.offset
-
- hasCR := false
- end := offs
- inQuote := false
-loop:
- for inQuote || s.ch >= 0 && s.ch != '\n' && s.ch != ';' && s.ch != '#' {
- ch := s.ch
- s.next()
- switch {
- case inQuote && ch == '\\':
- s.scanEscape(true)
- case !inQuote && ch == '\\':
- if s.ch == '\r' {
- hasCR = true
- s.next()
- }
- if s.ch != '\n' {
- s.scanEscape(true)
- } else {
- s.next()
- }
- case ch == '"':
- inQuote = !inQuote
- case ch == '\r':
- hasCR = true
- case ch < 0 || inQuote && ch == '\n':
- s.error(offs, "string not terminated")
- break loop
- }
- if inQuote || !isWhiteSpace(ch) {
- end = s.offset
- }
- }
-
- lit := s.src[offs:end]
- if hasCR {
- lit = stripCR(lit)
- }
-
- return string(lit)
-}
-
-func isWhiteSpace(ch rune) bool {
- return ch == ' ' || ch == '\t' || ch == '\r'
-}
-
-func (s *Scanner) skipWhitespace() {
- for isWhiteSpace(s.ch) {
- s.next()
- }
-}
-
-// Scan scans the next token and returns the token position, the token,
-// and its literal string if applicable. The source end is indicated by
-// token.EOF.
-//
-// If the returned token is a literal (token.IDENT, token.STRING) or
-// token.COMMENT, the literal string has the corresponding value.
-//
-// If the returned token is token.ILLEGAL, the literal string is the
-// offending character.
-//
-// In all other cases, Scan returns an empty literal string.
-//
-// For more tolerant parsing, Scan will return a valid token if
-// possible even if a syntax error was encountered. Thus, even
-// if the resulting token sequence contains no illegal tokens,
-// a client may not assume that no error occurred. Instead it
-// must check the scanner's ErrorCount or the number of calls
-// of the error handler, if there was one installed.
-//
-// Scan adds line information to the file added to the file
-// set with Init. Token positions are relative to that file
-// and thus relative to the file set.
-func (s *Scanner) Scan() (pos token.Pos, tok token.Token, lit string) {
-scanAgain:
- s.skipWhitespace()
-
- // current token start
- pos = s.file.Pos(s.offset)
-
- // determine token value
- switch ch := s.ch; {
- case s.nextVal:
- lit = s.scanValString()
- tok = token.STRING
- s.nextVal = false
- case isLetter(ch):
- lit = s.scanIdentifier()
- tok = token.IDENT
- default:
- s.next() // always make progress
- switch ch {
- case -1:
- tok = token.EOF
- case '\n':
- tok = token.EOL
- case '"':
- tok = token.STRING
- lit = s.scanString()
- case '[':
- tok = token.LBRACK
- case ']':
- tok = token.RBRACK
- case ';', '#':
- // comment
- lit = s.scanComment()
- if s.mode&ScanComments == 0 {
- // skip comment
- goto scanAgain
- }
- tok = token.COMMENT
- case '=':
- tok = token.ASSIGN
- s.nextVal = true
- default:
- s.error(s.file.Offset(pos), fmt.Sprintf("illegal character %#U", ch))
- tok = token.ILLEGAL
- lit = string(ch)
- }
- }
-
- return
-}
diff --git a/vendor/github.com/go-git/gcfg/set.go b/vendor/github.com/go-git/gcfg/set.go
deleted file mode 100644
index dc9795dbdb2..00000000000
--- a/vendor/github.com/go-git/gcfg/set.go
+++ /dev/null
@@ -1,334 +0,0 @@
-package gcfg
-
-import (
- "bytes"
- "encoding"
- "encoding/gob"
- "fmt"
- "math/big"
- "reflect"
- "strings"
- "unicode"
- "unicode/utf8"
-
- "gopkg.in/warnings.v0"
-
- "github.com/go-git/gcfg/types"
-)
-
-type tag struct {
- ident string
- intMode string
-}
-
-func newTag(ts string) tag {
- t := tag{}
- s := strings.Split(ts, ",")
- t.ident = s[0]
- for _, tse := range s[1:] {
- if strings.HasPrefix(tse, "int=") {
- t.intMode = tse[len("int="):]
- }
- }
- return t
-}
-
-func fieldFold(v reflect.Value, name string) (reflect.Value, tag) {
- var n string
- r0, _ := utf8.DecodeRuneInString(name)
- if unicode.IsLetter(r0) && !unicode.IsLower(r0) && !unicode.IsUpper(r0) {
- n = "X"
- }
- n += strings.Replace(name, "-", "_", -1)
- f, ok := v.Type().FieldByNameFunc(func(fieldName string) bool {
- if !v.FieldByName(fieldName).CanSet() {
- return false
- }
- f, _ := v.Type().FieldByName(fieldName)
- t := newTag(f.Tag.Get("gcfg"))
- if t.ident != "" {
- return strings.EqualFold(t.ident, name)
- }
- return strings.EqualFold(n, fieldName)
- })
- if !ok {
- return reflect.Value{}, tag{}
- }
- return v.FieldByName(f.Name), newTag(f.Tag.Get("gcfg"))
-}
-
-type setter func(destp interface{}, blank bool, val string, t tag) error
-
-var errUnsupportedType = fmt.Errorf("unsupported type")
-var errBlankUnsupported = fmt.Errorf("blank value not supported for type")
-
-var setters = []setter{
- typeSetter, textUnmarshalerSetter, kindSetter, scanSetter,
-}
-
-func textUnmarshalerSetter(d interface{}, blank bool, val string, t tag) error {
- dtu, ok := d.(encoding.TextUnmarshaler)
- if !ok {
- return errUnsupportedType
- }
- if blank {
- return errBlankUnsupported
- }
- return dtu.UnmarshalText([]byte(val))
-}
-
-func boolSetter(d interface{}, blank bool, val string, t tag) error {
- if blank {
- reflect.ValueOf(d).Elem().Set(reflect.ValueOf(true))
- return nil
- }
- b, err := types.ParseBool(val)
- if err == nil {
- reflect.ValueOf(d).Elem().Set(reflect.ValueOf(b))
- }
- return err
-}
-
-func intMode(mode string) types.IntMode {
- var m types.IntMode
- if strings.ContainsAny(mode, "dD") {
- m |= types.Dec
- }
- if strings.ContainsAny(mode, "hH") {
- m |= types.Hex
- }
- if strings.ContainsAny(mode, "oO") {
- m |= types.Oct
- }
- return m
-}
-
-var typeModes = map[reflect.Type]types.IntMode{
- reflect.TypeOf(int(0)): types.Dec | types.Hex,
- reflect.TypeOf(int8(0)): types.Dec | types.Hex,
- reflect.TypeOf(int16(0)): types.Dec | types.Hex,
- reflect.TypeOf(int32(0)): types.Dec | types.Hex,
- reflect.TypeOf(int64(0)): types.Dec | types.Hex,
- reflect.TypeOf(uint(0)): types.Dec | types.Hex,
- reflect.TypeOf(uint8(0)): types.Dec | types.Hex,
- reflect.TypeOf(uint16(0)): types.Dec | types.Hex,
- reflect.TypeOf(uint32(0)): types.Dec | types.Hex,
- reflect.TypeOf(uint64(0)): types.Dec | types.Hex,
- // use default mode (allow dec/hex/oct) for uintptr type
- reflect.TypeOf(big.Int{}): types.Dec | types.Hex,
-}
-
-func intModeDefault(t reflect.Type) types.IntMode {
- m, ok := typeModes[t]
- if !ok {
- m = types.Dec | types.Hex | types.Oct
- }
- return m
-}
-
-func intSetter(d interface{}, blank bool, val string, t tag) error {
- if blank {
- return errBlankUnsupported
- }
- mode := intMode(t.intMode)
- if mode == 0 {
- mode = intModeDefault(reflect.TypeOf(d).Elem())
- }
- return types.ParseInt(d, val, mode)
-}
-
-func stringSetter(d interface{}, blank bool, val string, t tag) error {
- if blank {
- return errBlankUnsupported
- }
- dsp, ok := d.(*string)
- if !ok {
- return errUnsupportedType
- }
- *dsp = val
- return nil
-}
-
-var kindSetters = map[reflect.Kind]setter{
- reflect.String: stringSetter,
- reflect.Bool: boolSetter,
- reflect.Int: intSetter,
- reflect.Int8: intSetter,
- reflect.Int16: intSetter,
- reflect.Int32: intSetter,
- reflect.Int64: intSetter,
- reflect.Uint: intSetter,
- reflect.Uint8: intSetter,
- reflect.Uint16: intSetter,
- reflect.Uint32: intSetter,
- reflect.Uint64: intSetter,
- reflect.Uintptr: intSetter,
-}
-
-var typeSetters = map[reflect.Type]setter{
- reflect.TypeOf(big.Int{}): intSetter,
-}
-
-func typeSetter(d interface{}, blank bool, val string, tt tag) error {
- t := reflect.ValueOf(d).Type().Elem()
- setter, ok := typeSetters[t]
- if !ok {
- return errUnsupportedType
- }
- return setter(d, blank, val, tt)
-}
-
-func kindSetter(d interface{}, blank bool, val string, tt tag) error {
- k := reflect.ValueOf(d).Type().Elem().Kind()
- setter, ok := kindSetters[k]
- if !ok {
- return errUnsupportedType
- }
- return setter(d, blank, val, tt)
-}
-
-func scanSetter(d interface{}, blank bool, val string, tt tag) error {
- if blank {
- return errBlankUnsupported
- }
- return types.ScanFully(d, val, 'v')
-}
-
-func newValue(c *warnings.Collector, sect string, vCfg reflect.Value,
- vType reflect.Type) (reflect.Value, error) {
- //
- pv := reflect.New(vType)
- dfltName := "default-" + sect
- dfltField, _ := fieldFold(vCfg, dfltName)
- var err error
- if dfltField.IsValid() {
- b := bytes.NewBuffer(nil)
- ge := gob.NewEncoder(b)
- if err = c.Collect(ge.EncodeValue(dfltField)); err != nil {
- return pv, err
- }
- gd := gob.NewDecoder(bytes.NewReader(b.Bytes()))
- if err = c.Collect(gd.DecodeValue(pv.Elem())); err != nil {
- return pv, err
- }
- }
- return pv, nil
-}
-
-func set(c *warnings.Collector, cfg interface{}, sect, sub, name string,
- value string, blankValue bool, subsectPass bool) error {
- //
- vPCfg := reflect.ValueOf(cfg)
- if vPCfg.Kind() != reflect.Ptr || vPCfg.Elem().Kind() != reflect.Struct {
- panic(fmt.Errorf("config must be a pointer to a struct"))
- }
- vCfg := vPCfg.Elem()
- vSect, _ := fieldFold(vCfg, sect)
- if !vSect.IsValid() {
- err := extraData{section: sect}
- return c.Collect(err)
- }
- isSubsect := vSect.Kind() == reflect.Map
- if subsectPass != isSubsect {
- return nil
- }
- if isSubsect {
- vst := vSect.Type()
- if vst.Key().Kind() != reflect.String ||
- vst.Elem().Kind() != reflect.Ptr ||
- vst.Elem().Elem().Kind() != reflect.Struct {
- panic(fmt.Errorf("map field for section must have string keys and "+
- " pointer-to-struct values: section %q", sect))
- }
- if vSect.IsNil() {
- vSect.Set(reflect.MakeMap(vst))
- }
- k := reflect.ValueOf(sub)
- pv := vSect.MapIndex(k)
- if !pv.IsValid() {
- vType := vSect.Type().Elem().Elem()
- var err error
- if pv, err = newValue(c, sect, vCfg, vType); err != nil {
- return err
- }
- vSect.SetMapIndex(k, pv)
- }
- vSect = pv.Elem()
- } else if vSect.Kind() != reflect.Struct {
- panic(fmt.Errorf("field for section must be a map or a struct: "+
- "section %q", sect))
- } else if sub != "" {
- err := extraData{section: sect, subsection: &sub}
- return c.Collect(err)
- }
- // Empty name is a special value, meaning that only the
- // section/subsection object is to be created, with no values set.
- if name == "" {
- return nil
- }
- vVar, t := fieldFold(vSect, name)
- if !vVar.IsValid() {
- var err error
- if isSubsect {
- err = extraData{section: sect, subsection: &sub, variable: &name}
- } else {
- err = extraData{section: sect, variable: &name}
- }
- return c.Collect(err)
- }
- // vVal is either single-valued var, or newly allocated value within multi-valued var
- var vVal reflect.Value
- // multi-value if unnamed slice type
- isMulti := vVar.Type().Name() == "" && vVar.Kind() == reflect.Slice ||
- vVar.Type().Name() == "" && vVar.Kind() == reflect.Ptr && vVar.Type().Elem().Name() == "" && vVar.Type().Elem().Kind() == reflect.Slice
- if isMulti && vVar.Kind() == reflect.Ptr {
- if vVar.IsNil() {
- vVar.Set(reflect.New(vVar.Type().Elem()))
- }
- vVar = vVar.Elem()
- }
- if isMulti && blankValue {
- vVar.Set(reflect.Zero(vVar.Type()))
- return nil
- }
- if isMulti {
- vVal = reflect.New(vVar.Type().Elem()).Elem()
- } else {
- vVal = vVar
- }
- isDeref := vVal.Type().Name() == "" && vVal.Type().Kind() == reflect.Ptr
- isNew := isDeref && vVal.IsNil()
- // vAddr is address of value to set (dereferenced & allocated as needed)
- var vAddr reflect.Value
- switch {
- case isNew:
- vAddr = reflect.New(vVal.Type().Elem())
- case isDeref && !isNew:
- vAddr = vVal
- default:
- vAddr = vVal.Addr()
- }
- vAddrI := vAddr.Interface()
- err, ok := error(nil), false
- for _, s := range setters {
- err = s(vAddrI, blankValue, value, t)
- if err == nil {
- ok = true
- break
- }
- if err != errUnsupportedType {
- return err
- }
- }
- if !ok {
- // in case all setters returned errUnsupportedType
- return err
- }
- if isNew { // set reference if it was dereferenced and newly allocated
- vVal.Set(vAddr)
- }
- if isMulti { // append if multi-valued
- vVar.Set(reflect.Append(vVar, vVal))
- }
- return nil
-}
diff --git a/vendor/github.com/go-git/gcfg/token/position.go b/vendor/github.com/go-git/gcfg/token/position.go
deleted file mode 100644
index fc45c1e7693..00000000000
--- a/vendor/github.com/go-git/gcfg/token/position.go
+++ /dev/null
@@ -1,435 +0,0 @@
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// TODO(gri) consider making this a separate package outside the go directory.
-
-package token
-
-import (
- "fmt"
- "sort"
- "sync"
-)
-
-// -----------------------------------------------------------------------------
-// Positions
-
-// Position describes an arbitrary source position
-// including the file, line, and column location.
-// A Position is valid if the line number is > 0.
-//
-type Position struct {
- Filename string // filename, if any
- Offset int // offset, starting at 0
- Line int // line number, starting at 1
- Column int // column number, starting at 1 (character count)
-}
-
-// IsValid returns true if the position is valid.
-func (pos *Position) IsValid() bool { return pos.Line > 0 }
-
-// String returns a string in one of several forms:
-//
-// file:line:column valid position with file name
-// line:column valid position without file name
-// file invalid position with file name
-// - invalid position without file name
-//
-func (pos Position) String() string {
- s := pos.Filename
- if pos.IsValid() {
- if s != "" {
- s += ":"
- }
- s += fmt.Sprintf("%d:%d", pos.Line, pos.Column)
- }
- if s == "" {
- s = "-"
- }
- return s
-}
-
-// Pos is a compact encoding of a source position within a file set.
-// It can be converted into a Position for a more convenient, but much
-// larger, representation.
-//
-// The Pos value for a given file is a number in the range [base, base+size],
-// where base and size are specified when adding the file to the file set via
-// AddFile.
-//
-// To create the Pos value for a specific source offset, first add
-// the respective file to the current file set (via FileSet.AddFile)
-// and then call File.Pos(offset) for that file. Given a Pos value p
-// for a specific file set fset, the corresponding Position value is
-// obtained by calling fset.Position(p).
-//
-// Pos values can be compared directly with the usual comparison operators:
-// If two Pos values p and q are in the same file, comparing p and q is
-// equivalent to comparing the respective source file offsets. If p and q
-// are in different files, p < q is true if the file implied by p was added
-// to the respective file set before the file implied by q.
-//
-type Pos int
-
-// The zero value for Pos is NoPos; there is no file and line information
-// associated with it, and NoPos().IsValid() is false. NoPos is always
-// smaller than any other Pos value. The corresponding Position value
-// for NoPos is the zero value for Position.
-//
-const NoPos Pos = 0
-
-// IsValid returns true if the position is valid.
-func (p Pos) IsValid() bool {
- return p != NoPos
-}
-
-// -----------------------------------------------------------------------------
-// File
-
-// A File is a handle for a file belonging to a FileSet.
-// A File has a name, size, and line offset table.
-//
-type File struct {
- set *FileSet
- name string // file name as provided to AddFile
- base int // Pos value range for this file is [base...base+size]
- size int // file size as provided to AddFile
-
- // lines and infos are protected by set.mutex
- lines []int
- infos []lineInfo
-}
-
-// Name returns the file name of file f as registered with AddFile.
-func (f *File) Name() string {
- return f.name
-}
-
-// Base returns the base offset of file f as registered with AddFile.
-func (f *File) Base() int {
- return f.base
-}
-
-// Size returns the size of file f as registered with AddFile.
-func (f *File) Size() int {
- return f.size
-}
-
-// LineCount returns the number of lines in file f.
-func (f *File) LineCount() int {
- f.set.mutex.RLock()
- n := len(f.lines)
- f.set.mutex.RUnlock()
- return n
-}
-
-// AddLine adds the line offset for a new line.
-// The line offset must be larger than the offset for the previous line
-// and smaller than the file size; otherwise the line offset is ignored.
-//
-func (f *File) AddLine(offset int) {
- f.set.mutex.Lock()
- if i := len(f.lines); (i == 0 || f.lines[i-1] < offset) && offset < f.size {
- f.lines = append(f.lines, offset)
- }
- f.set.mutex.Unlock()
-}
-
-// SetLines sets the line offsets for a file and returns true if successful.
-// The line offsets are the offsets of the first character of each line;
-// for instance for the content "ab\nc\n" the line offsets are {0, 3}.
-// An empty file has an empty line offset table.
-// Each line offset must be larger than the offset for the previous line
-// and smaller than the file size; otherwise SetLines fails and returns
-// false.
-//
-func (f *File) SetLines(lines []int) bool {
- // verify validity of lines table
- size := f.size
- for i, offset := range lines {
- if i > 0 && offset <= lines[i-1] || size <= offset {
- return false
- }
- }
-
- // set lines table
- f.set.mutex.Lock()
- f.lines = lines
- f.set.mutex.Unlock()
- return true
-}
-
-// SetLinesForContent sets the line offsets for the given file content.
-func (f *File) SetLinesForContent(content []byte) {
- var lines []int
- line := 0
- for offset, b := range content {
- if line >= 0 {
- lines = append(lines, line)
- }
- line = -1
- if b == '\n' {
- line = offset + 1
- }
- }
-
- // set lines table
- f.set.mutex.Lock()
- f.lines = lines
- f.set.mutex.Unlock()
-}
-
-// A lineInfo object describes alternative file and line number
-// information (such as provided via a //line comment in a .go
-// file) for a given file offset.
-type lineInfo struct {
- // fields are exported to make them accessible to gob
- Offset int
- Filename string
- Line int
-}
-
-// AddLineInfo adds alternative file and line number information for
-// a given file offset. The offset must be larger than the offset for
-// the previously added alternative line info and smaller than the
-// file size; otherwise the information is ignored.
-//
-// AddLineInfo is typically used to register alternative position
-// information for //line filename:line comments in source files.
-//
-func (f *File) AddLineInfo(offset int, filename string, line int) {
- f.set.mutex.Lock()
- if i := len(f.infos); i == 0 || f.infos[i-1].Offset < offset && offset < f.size {
- f.infos = append(f.infos, lineInfo{offset, filename, line})
- }
- f.set.mutex.Unlock()
-}
-
-// Pos returns the Pos value for the given file offset;
-// the offset must be <= f.Size().
-// f.Pos(f.Offset(p)) == p.
-//
-func (f *File) Pos(offset int) Pos {
- if offset > f.size {
- panic("illegal file offset")
- }
- return Pos(f.base + offset)
-}
-
-// Offset returns the offset for the given file position p;
-// p must be a valid Pos value in that file.
-// f.Offset(f.Pos(offset)) == offset.
-//
-func (f *File) Offset(p Pos) int {
- if int(p) < f.base || int(p) > f.base+f.size {
- panic("illegal Pos value")
- }
- return int(p) - f.base
-}
-
-// Line returns the line number for the given file position p;
-// p must be a Pos value in that file or NoPos.
-//
-func (f *File) Line(p Pos) int {
- // TODO(gri) this can be implemented much more efficiently
- return f.Position(p).Line
-}
-
-func searchLineInfos(a []lineInfo, x int) int {
- return sort.Search(len(a), func(i int) bool { return a[i].Offset > x }) - 1
-}
-
-// info returns the file name, line, and column number for a file offset.
-func (f *File) info(offset int) (filename string, line, column int) {
- filename = f.name
- if i := searchInts(f.lines, offset); i >= 0 {
- line, column = i+1, offset-f.lines[i]+1
- }
- if len(f.infos) > 0 {
- // almost no files have extra line infos
- if i := searchLineInfos(f.infos, offset); i >= 0 {
- alt := &f.infos[i]
- filename = alt.Filename
- if i := searchInts(f.lines, alt.Offset); i >= 0 {
- line += alt.Line - i - 1
- }
- }
- }
- return
-}
-
-func (f *File) position(p Pos) (pos Position) {
- offset := int(p) - f.base
- pos.Offset = offset
- pos.Filename, pos.Line, pos.Column = f.info(offset)
- return
-}
-
-// Position returns the Position value for the given file position p;
-// p must be a Pos value in that file or NoPos.
-//
-func (f *File) Position(p Pos) (pos Position) {
- if p != NoPos {
- if int(p) < f.base || int(p) > f.base+f.size {
- panic("illegal Pos value")
- }
- pos = f.position(p)
- }
- return
-}
-
-// -----------------------------------------------------------------------------
-// FileSet
-
-// A FileSet represents a set of source files.
-// Methods of file sets are synchronized; multiple goroutines
-// may invoke them concurrently.
-//
-type FileSet struct {
- mutex sync.RWMutex // protects the file set
- base int // base offset for the next file
- files []*File // list of files in the order added to the set
- last *File // cache of last file looked up
-}
-
-// NewFileSet creates a new file set.
-func NewFileSet() *FileSet {
- s := new(FileSet)
- s.base = 1 // 0 == NoPos
- return s
-}
-
-// Base returns the minimum base offset that must be provided to
-// AddFile when adding the next file.
-//
-func (s *FileSet) Base() int {
- s.mutex.RLock()
- b := s.base
- s.mutex.RUnlock()
- return b
-
-}
-
-// AddFile adds a new file with a given filename, base offset, and file size
-// to the file set s and returns the file. Multiple files may have the same
-// name. The base offset must not be smaller than the FileSet's Base(), and
-// size must not be negative.
-//
-// Adding the file will set the file set's Base() value to base + size + 1
-// as the minimum base value for the next file. The following relationship
-// exists between a Pos value p for a given file offset offs:
-//
-// int(p) = base + offs
-//
-// with offs in the range [0, size] and thus p in the range [base, base+size].
-// For convenience, File.Pos may be used to create file-specific position
-// values from a file offset.
-//
-func (s *FileSet) AddFile(filename string, base, size int) *File {
- s.mutex.Lock()
- defer s.mutex.Unlock()
- if base < s.base || size < 0 {
- panic("illegal base or size")
- }
- // base >= s.base && size >= 0
- f := &File{s, filename, base, size, []int{0}, nil}
- base += size + 1 // +1 because EOF also has a position
- if base < 0 {
- panic("token.Pos offset overflow (> 2G of source code in file set)")
- }
- // add the file to the file set
- s.base = base
- s.files = append(s.files, f)
- s.last = f
- return f
-}
-
-// Iterate calls f for the files in the file set in the order they were added
-// until f returns false.
-//
-func (s *FileSet) Iterate(f func(*File) bool) {
- for i := 0; ; i++ {
- var file *File
- s.mutex.RLock()
- if i < len(s.files) {
- file = s.files[i]
- }
- s.mutex.RUnlock()
- if file == nil || !f(file) {
- break
- }
- }
-}
-
-func searchFiles(a []*File, x int) int {
- return sort.Search(len(a), func(i int) bool { return a[i].base > x }) - 1
-}
-
-func (s *FileSet) file(p Pos) *File {
- // common case: p is in last file
- if f := s.last; f != nil && f.base <= int(p) && int(p) <= f.base+f.size {
- return f
- }
- // p is not in last file - search all files
- if i := searchFiles(s.files, int(p)); i >= 0 {
- f := s.files[i]
- // f.base <= int(p) by definition of searchFiles
- if int(p) <= f.base+f.size {
- s.last = f
- return f
- }
- }
- return nil
-}
-
-// File returns the file that contains the position p.
-// If no such file is found (for instance for p == NoPos),
-// the result is nil.
-//
-func (s *FileSet) File(p Pos) (f *File) {
- if p != NoPos {
- s.mutex.RLock()
- f = s.file(p)
- s.mutex.RUnlock()
- }
- return
-}
-
-// Position converts a Pos in the fileset into a general Position.
-func (s *FileSet) Position(p Pos) (pos Position) {
- if p != NoPos {
- s.mutex.RLock()
- if f := s.file(p); f != nil {
- pos = f.position(p)
- }
- s.mutex.RUnlock()
- }
- return
-}
-
-// -----------------------------------------------------------------------------
-// Helper functions
-
-func searchInts(a []int, x int) int {
- // This function body is a manually inlined version of:
- //
- // return sort.Search(len(a), func(i int) bool { return a[i] > x }) - 1
- //
- // With better compiler optimizations, this may not be needed in the
- // future, but at the moment this change improves the go/printer
- // benchmark performance by ~30%. This has a direct impact on the
- // speed of gofmt and thus seems worthwhile (2011-04-29).
- // TODO(gri): Remove this when compilers have caught up.
- i, j := 0, len(a)
- for i < j {
- h := i + (j-i)/2 // avoid overflow when computing h
- // i ≤ h < j
- if a[h] <= x {
- i = h + 1
- } else {
- j = h
- }
- }
- return i - 1
-}
diff --git a/vendor/github.com/go-git/gcfg/token/serialize.go b/vendor/github.com/go-git/gcfg/token/serialize.go
deleted file mode 100644
index 4adc8f9e334..00000000000
--- a/vendor/github.com/go-git/gcfg/token/serialize.go
+++ /dev/null
@@ -1,56 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package token
-
-type serializedFile struct {
- // fields correspond 1:1 to fields with same (lower-case) name in File
- Name string
- Base int
- Size int
- Lines []int
- Infos []lineInfo
-}
-
-type serializedFileSet struct {
- Base int
- Files []serializedFile
-}
-
-// Read calls decode to deserialize a file set into s; s must not be nil.
-func (s *FileSet) Read(decode func(interface{}) error) error {
- var ss serializedFileSet
- if err := decode(&ss); err != nil {
- return err
- }
-
- s.mutex.Lock()
- s.base = ss.Base
- files := make([]*File, len(ss.Files))
- for i := 0; i < len(ss.Files); i++ {
- f := &ss.Files[i]
- files[i] = &File{s, f.Name, f.Base, f.Size, f.Lines, f.Infos}
- }
- s.files = files
- s.last = nil
- s.mutex.Unlock()
-
- return nil
-}
-
-// Write calls encode to serialize the file set s.
-func (s *FileSet) Write(encode func(interface{}) error) error {
- var ss serializedFileSet
-
- s.mutex.Lock()
- ss.Base = s.base
- files := make([]serializedFile, len(s.files))
- for i, f := range s.files {
- files[i] = serializedFile{f.name, f.base, f.size, f.lines, f.infos}
- }
- ss.Files = files
- s.mutex.Unlock()
-
- return encode(ss)
-}
diff --git a/vendor/github.com/go-git/gcfg/token/token.go b/vendor/github.com/go-git/gcfg/token/token.go
deleted file mode 100644
index b3c7c83fa9e..00000000000
--- a/vendor/github.com/go-git/gcfg/token/token.go
+++ /dev/null
@@ -1,83 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package token defines constants representing the lexical tokens of the gcfg
-// configuration syntax and basic operations on tokens (printing, predicates).
-//
-// Note that the API for the token package may change to accommodate new
-// features or implementation changes in gcfg.
-//
-package token
-
-import "strconv"
-
-// Token is the set of lexical tokens of the gcfg configuration syntax.
-type Token int
-
-// The list of tokens.
-const (
- // Special tokens
- ILLEGAL Token = iota
- EOF
- COMMENT
-
- literal_beg
- // Identifiers and basic type literals
- // (these tokens stand for classes of literals)
- IDENT // section-name, variable-name
- STRING // "subsection-name", variable value
- literal_end
-
- operator_beg
- // Operators and delimiters
- ASSIGN // =
- LBRACK // [
- RBRACK // ]
- EOL // \n
- operator_end
-)
-
-var tokens = [...]string{
- ILLEGAL: "ILLEGAL",
-
- EOF: "EOF",
- COMMENT: "COMMENT",
-
- IDENT: "IDENT",
- STRING: "STRING",
-
- ASSIGN: "=",
- LBRACK: "[",
- RBRACK: "]",
- EOL: "\n",
-}
-
-// String returns the string corresponding to the token tok.
-// For operators and delimiters, the string is the actual token character
-// sequence (e.g., for the token ASSIGN, the string is "="). For all other
-// tokens the string corresponds to the token constant name (e.g. for the
-// token IDENT, the string is "IDENT").
-//
-func (tok Token) String() string {
- s := ""
- if 0 <= tok && tok < Token(len(tokens)) {
- s = tokens[tok]
- }
- if s == "" {
- s = "token(" + strconv.Itoa(int(tok)) + ")"
- }
- return s
-}
-
-// Predicates
-
-// IsLiteral returns true for tokens corresponding to identifiers
-// and basic type literals; it returns false otherwise.
-//
-func (tok Token) IsLiteral() bool { return literal_beg < tok && tok < literal_end }
-
-// IsOperator returns true for tokens corresponding to operators and
-// delimiters; it returns false otherwise.
-//
-func (tok Token) IsOperator() bool { return operator_beg < tok && tok < operator_end }
diff --git a/vendor/github.com/go-git/gcfg/types/bool.go b/vendor/github.com/go-git/gcfg/types/bool.go
deleted file mode 100644
index 8dcae0d8cfd..00000000000
--- a/vendor/github.com/go-git/gcfg/types/bool.go
+++ /dev/null
@@ -1,23 +0,0 @@
-package types
-
-// BoolValues defines the name and value mappings for ParseBool.
-var BoolValues = map[string]interface{}{
- "true": true, "yes": true, "on": true, "1": true,
- "false": false, "no": false, "off": false, "0": false,
-}
-
-var boolParser = func() *EnumParser {
- ep := &EnumParser{}
- ep.AddVals(BoolValues)
- return ep
-}()
-
-// ParseBool parses bool values according to the definitions in BoolValues.
-// Parsing is case-insensitive.
-func ParseBool(s string) (bool, error) {
- v, err := boolParser.Parse(s)
- if err != nil {
- return false, err
- }
- return v.(bool), nil
-}
diff --git a/vendor/github.com/go-git/gcfg/types/doc.go b/vendor/github.com/go-git/gcfg/types/doc.go
deleted file mode 100644
index 9f9c345f6ea..00000000000
--- a/vendor/github.com/go-git/gcfg/types/doc.go
+++ /dev/null
@@ -1,4 +0,0 @@
-// Package types defines helpers for type conversions.
-//
-// The API for this package is not finalized yet.
-package types
diff --git a/vendor/github.com/go-git/gcfg/types/enum.go b/vendor/github.com/go-git/gcfg/types/enum.go
deleted file mode 100644
index 1a0c7ef453d..00000000000
--- a/vendor/github.com/go-git/gcfg/types/enum.go
+++ /dev/null
@@ -1,44 +0,0 @@
-package types
-
-import (
- "fmt"
- "reflect"
- "strings"
-)
-
-// EnumParser parses "enum" values; i.e. a predefined set of strings to
-// predefined values.
-type EnumParser struct {
- Type string // type name; if not set, use type of first value added
- CaseMatch bool // if true, matching of strings is case-sensitive
- // PrefixMatch bool
- vals map[string]interface{}
-}
-
-// AddVals adds strings and values to an EnumParser.
-func (ep *EnumParser) AddVals(vals map[string]interface{}) {
- if ep.vals == nil {
- ep.vals = make(map[string]interface{})
- }
- for k, v := range vals {
- if ep.Type == "" {
- ep.Type = reflect.TypeOf(v).Name()
- }
- if !ep.CaseMatch {
- k = strings.ToLower(k)
- }
- ep.vals[k] = v
- }
-}
-
-// Parse parses the string and returns the value or an error.
-func (ep EnumParser) Parse(s string) (interface{}, error) {
- if !ep.CaseMatch {
- s = strings.ToLower(s)
- }
- v, ok := ep.vals[s]
- if !ok {
- return false, fmt.Errorf("failed to parse %s %#q", ep.Type, s)
- }
- return v, nil
-}
diff --git a/vendor/github.com/go-git/gcfg/types/int.go b/vendor/github.com/go-git/gcfg/types/int.go
deleted file mode 100644
index af7e75c1250..00000000000
--- a/vendor/github.com/go-git/gcfg/types/int.go
+++ /dev/null
@@ -1,86 +0,0 @@
-package types
-
-import (
- "fmt"
- "strings"
-)
-
-// An IntMode is a mode for parsing integer values, representing a set of
-// accepted bases.
-type IntMode uint8
-
-// IntMode values for ParseInt; can be combined using binary or.
-const (
- Dec IntMode = 1 << iota
- Hex
- Oct
-)
-
-// String returns a string representation of IntMode; e.g. `IntMode(Dec|Hex)`.
-func (m IntMode) String() string {
- var modes []string
- if m&Dec != 0 {
- modes = append(modes, "Dec")
- }
- if m&Hex != 0 {
- modes = append(modes, "Hex")
- }
- if m&Oct != 0 {
- modes = append(modes, "Oct")
- }
- return "IntMode(" + strings.Join(modes, "|") + ")"
-}
-
-var errIntAmbig = fmt.Errorf("ambiguous integer value; must include '0' prefix")
-
-func prefix0(val string) bool {
- return strings.HasPrefix(val, "0") || strings.HasPrefix(val, "-0")
-}
-
-func prefix0x(val string) bool {
- return strings.HasPrefix(val, "0x") || strings.HasPrefix(val, "-0x")
-}
-
-// ParseInt parses val using mode into intptr, which must be a pointer to an
-// integer kind type. Non-decimal value require prefix `0` or `0x` in the cases
-// when mode permits ambiguity of base; otherwise the prefix can be omitted.
-func ParseInt(intptr interface{}, val string, mode IntMode) error {
- val = strings.TrimSpace(val)
- verb := byte(0)
- switch mode {
- case Dec:
- verb = 'd'
- case Dec + Hex:
- if prefix0x(val) {
- verb = 'v'
- } else {
- verb = 'd'
- }
- case Dec + Oct:
- if prefix0(val) && !prefix0x(val) {
- verb = 'v'
- } else {
- verb = 'd'
- }
- case Dec + Hex + Oct:
- verb = 'v'
- case Hex:
- if prefix0x(val) {
- verb = 'v'
- } else {
- verb = 'x'
- }
- case Oct:
- verb = 'o'
- case Hex + Oct:
- if prefix0(val) {
- verb = 'v'
- } else {
- return errIntAmbig
- }
- }
- if verb == 0 {
- panic("unsupported mode")
- }
- return ScanFully(intptr, val, verb)
-}
diff --git a/vendor/github.com/go-git/gcfg/types/scan.go b/vendor/github.com/go-git/gcfg/types/scan.go
deleted file mode 100644
index db2f6ed3caf..00000000000
--- a/vendor/github.com/go-git/gcfg/types/scan.go
+++ /dev/null
@@ -1,23 +0,0 @@
-package types
-
-import (
- "fmt"
- "io"
- "reflect"
-)
-
-// ScanFully uses fmt.Sscanf with verb to fully scan val into ptr.
-func ScanFully(ptr interface{}, val string, verb byte) error {
- t := reflect.ValueOf(ptr).Elem().Type()
- // attempt to read extra bytes to make sure the value is consumed
- var b []byte
- n, err := fmt.Sscanf(val, "%"+string(verb)+"%s", ptr, &b)
- switch {
- case n < 1 || n == 1 && err != io.EOF:
- return fmt.Errorf("failed to parse %q as %v: %v", val, t, err)
- case n > 1:
- return fmt.Errorf("failed to parse %q as %v: extra characters %q", val, t, string(b))
- }
- // n == 1 && err == io.EOF
- return nil
-}
diff --git a/vendor/github.com/go-git/go-billy/v5/.gitignore b/vendor/github.com/go-git/go-billy/v5/.gitignore
deleted file mode 100644
index 7aeb46699cd..00000000000
--- a/vendor/github.com/go-git/go-billy/v5/.gitignore
+++ /dev/null
@@ -1,4 +0,0 @@
-/coverage.txt
-/vendor
-Gopkg.lock
-Gopkg.toml
diff --git a/vendor/github.com/go-git/go-billy/v5/LICENSE b/vendor/github.com/go-git/go-billy/v5/LICENSE
deleted file mode 100644
index 9d60756894a..00000000000
--- a/vendor/github.com/go-git/go-billy/v5/LICENSE
+++ /dev/null
@@ -1,201 +0,0 @@
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "{}"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright 2017 Sourced Technologies S.L.
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/vendor/github.com/go-git/go-billy/v5/Makefile b/vendor/github.com/go-git/go-billy/v5/Makefile
deleted file mode 100644
index 74dad8b4910..00000000000
--- a/vendor/github.com/go-git/go-billy/v5/Makefile
+++ /dev/null
@@ -1,11 +0,0 @@
-# Go parameters
-GOCMD = go
-GOTEST = $(GOCMD) test
-
-.PHONY: test
-test:
- $(GOTEST) -race ./...
-
-test-coverage:
- echo "" > $(COVERAGE_REPORT); \
- $(GOTEST) -coverprofile=$(COVERAGE_REPORT) -coverpkg=./... -covermode=$(COVERAGE_MODE) ./...
diff --git a/vendor/github.com/go-git/go-billy/v5/README.md b/vendor/github.com/go-git/go-billy/v5/README.md
deleted file mode 100644
index da5c074782c..00000000000
--- a/vendor/github.com/go-git/go-billy/v5/README.md
+++ /dev/null
@@ -1,73 +0,0 @@
-# go-billy [](https://pkg.go.dev/github.com/go-git/go-billy/v5) [](https://github.com/go-git/go-billy/actions?query=workflow%3ATest)
-
-The missing interface filesystem abstraction for Go.
-Billy implements an interface based on the `os` standard library, allowing to develop applications without dependency on the underlying storage. Makes it virtually free to implement mocks and testing over filesystem operations.
-
-Billy was born as part of [go-git/go-git](https://github.com/go-git/go-git) project.
-
-## Installation
-
-```go
-import "github.com/go-git/go-billy/v5" // with go modules enabled (GO111MODULE=on or outside GOPATH)
-import "github.com/go-git/go-billy" // with go modules disabled
-```
-
-## Usage
-
-Billy exposes filesystems using the
-[`Filesystem` interface](https://pkg.go.dev/github.com/go-git/go-billy/v5?tab=doc#Filesystem).
-Each filesystem implementation gives you a `New` method, whose arguments depend on
-the implementation itself, that returns a new `Filesystem`.
-
-The following example caches in memory all readable files in a directory from any
-billy's filesystem implementation.
-
-```go
-func LoadToMemory(origin billy.Filesystem, path string) (*memory.Memory, error) {
- memory := memory.New()
-
- files, err := origin.ReadDir("/")
- if err != nil {
- return nil, err
- }
-
- for _, file := range files {
- if file.IsDir() {
- continue
- }
-
- src, err := origin.Open(file.Name())
- if err != nil {
- return nil, err
- }
-
- dst, err := memory.Create(file.Name())
- if err != nil {
- return nil, err
- }
-
- if _, err = io.Copy(dst, src); err != nil {
- return nil, err
- }
-
- if err := dst.Close(); err != nil {
- return nil, err
- }
-
- if err := src.Close(); err != nil {
- return nil, err
- }
- }
-
- return memory, nil
-}
-```
-
-## Why billy?
-
-The library billy deals with storage systems and Billy is the name of a well-known, IKEA
-bookcase. That's it.
-
-## License
-
-Apache License Version 2.0, see [LICENSE](LICENSE)
diff --git a/vendor/github.com/go-git/go-billy/v5/fs.go b/vendor/github.com/go-git/go-billy/v5/fs.go
deleted file mode 100644
index a9efccdeb2f..00000000000
--- a/vendor/github.com/go-git/go-billy/v5/fs.go
+++ /dev/null
@@ -1,202 +0,0 @@
-package billy
-
-import (
- "errors"
- "io"
- "os"
- "time"
-)
-
-var (
- ErrReadOnly = errors.New("read-only filesystem")
- ErrNotSupported = errors.New("feature not supported")
- ErrCrossedBoundary = errors.New("chroot boundary crossed")
-)
-
-// Capability holds the supported features of a billy filesystem. This does
-// not mean that the capability has to be supported by the underlying storage.
-// For example, a billy filesystem may support WriteCapability but the
-// storage be mounted in read only mode.
-type Capability uint64
-
-const (
- // WriteCapability means that the fs is writable.
- WriteCapability Capability = 1 << iota
- // ReadCapability means that the fs is readable.
- ReadCapability
- // ReadAndWriteCapability is the ability to open a file in read and write mode.
- ReadAndWriteCapability
- // SeekCapability means it is able to move position inside the file.
- SeekCapability
- // TruncateCapability means that a file can be truncated.
- TruncateCapability
- // LockCapability is the ability to lock a file.
- LockCapability
-
- // DefaultCapabilities lists all capable features supported by filesystems
- // without Capability interface. This list should not be changed until a
- // major version is released.
- DefaultCapabilities Capability = WriteCapability | ReadCapability |
- ReadAndWriteCapability | SeekCapability | TruncateCapability |
- LockCapability
-
- // AllCapabilities lists all capable features.
- AllCapabilities Capability = WriteCapability | ReadCapability |
- ReadAndWriteCapability | SeekCapability | TruncateCapability |
- LockCapability
-)
-
-// Filesystem abstract the operations in a storage-agnostic interface.
-// Each method implementation mimics the behavior of the equivalent functions
-// at the os package from the standard library.
-type Filesystem interface {
- Basic
- TempFile
- Dir
- Symlink
- Chroot
-}
-
-// Basic abstract the basic operations in a storage-agnostic interface as
-// an extension to the Basic interface.
-type Basic interface {
- // Create creates the named file with mode 0666 (before umask), truncating
- // it if it already exists. If successful, methods on the returned File can
- // be used for I/O; the associated file descriptor has mode O_RDWR.
- Create(filename string) (File, error)
- // Open opens the named file for reading. If successful, methods on the
- // returned file can be used for reading; the associated file descriptor has
- // mode O_RDONLY.
- Open(filename string) (File, error)
- // OpenFile is the generalized open call; most users will use Open or Create
- // instead. It opens the named file with specified flag (O_RDONLY etc.) and
- // perm, (0666 etc.) if applicable. If successful, methods on the returned
- // File can be used for I/O.
- OpenFile(filename string, flag int, perm os.FileMode) (File, error)
- // Stat returns a FileInfo describing the named file.
- Stat(filename string) (os.FileInfo, error)
- // Rename renames (moves) oldpath to newpath. If newpath already exists and
- // is not a directory, Rename replaces it. OS-specific restrictions may
- // apply when oldpath and newpath are in different directories.
- Rename(oldpath, newpath string) error
- // Remove removes the named file or directory.
- Remove(filename string) error
- // Join joins any number of path elements into a single path, adding a
- // Separator if necessary. Join calls filepath.Clean on the result; in
- // particular, all empty strings are ignored. On Windows, the result is a
- // UNC path if and only if the first path element is a UNC path.
- Join(elem ...string) string
-}
-
-type TempFile interface {
- // TempFile creates a new temporary file in the directory dir with a name
- // beginning with prefix, opens the file for reading and writing, and
- // returns the resulting *os.File. If dir is the empty string, TempFile
- // uses the default directory for temporary files (see os.TempDir).
- // Multiple programs calling TempFile simultaneously will not choose the
- // same file. The caller can use f.Name() to find the pathname of the file.
- // It is the caller's responsibility to remove the file when no longer
- // needed.
- TempFile(dir, prefix string) (File, error)
-}
-
-// Dir abstract the dir related operations in a storage-agnostic interface as
-// an extension to the Basic interface.
-type Dir interface {
- // ReadDir reads the directory named by dirname and returns a list of
- // directory entries sorted by filename.
- ReadDir(path string) ([]os.FileInfo, error)
- // MkdirAll creates a directory named path, along with any necessary
- // parents, and returns nil, or else returns an error. The permission bits
- // perm are used for all directories that MkdirAll creates. If path is/
- // already a directory, MkdirAll does nothing and returns nil.
- MkdirAll(filename string, perm os.FileMode) error
-}
-
-// Symlink abstract the symlink related operations in a storage-agnostic
-// interface as an extension to the Basic interface.
-type Symlink interface {
- // Lstat returns a FileInfo describing the named file. If the file is a
- // symbolic link, the returned FileInfo describes the symbolic link. Lstat
- // makes no attempt to follow the link.
- Lstat(filename string) (os.FileInfo, error)
- // Symlink creates a symbolic-link from link to target. target may be an
- // absolute or relative path, and need not refer to an existing node.
- // Parent directories of link are created as necessary.
- Symlink(target, link string) error
- // Readlink returns the target path of link.
- Readlink(link string) (string, error)
-}
-
-// Change abstract the FileInfo change related operations in a storage-agnostic
-// interface as an extension to the Basic interface
-type Change interface {
- // Chmod changes the mode of the named file to mode. If the file is a
- // symbolic link, it changes the mode of the link's target.
- Chmod(name string, mode os.FileMode) error
- // Lchown changes the numeric uid and gid of the named file. If the file is
- // a symbolic link, it changes the uid and gid of the link itself.
- Lchown(name string, uid, gid int) error
- // Chown changes the numeric uid and gid of the named file. If the file is a
- // symbolic link, it changes the uid and gid of the link's target.
- Chown(name string, uid, gid int) error
- // Chtimes changes the access and modification times of the named file,
- // similar to the Unix utime() or utimes() functions.
- //
- // The underlying filesystem may truncate or round the values to a less
- // precise time unit.
- Chtimes(name string, atime time.Time, mtime time.Time) error
-}
-
-// Chroot abstract the chroot related operations in a storage-agnostic interface
-// as an extension to the Basic interface.
-type Chroot interface {
- // Chroot returns a new filesystem from the same type where the new root is
- // the given path. Files outside of the designated directory tree cannot be
- // accessed.
- Chroot(path string) (Filesystem, error)
- // Root returns the root path of the filesystem.
- Root() string
-}
-
-// File represent a file, being a subset of the os.File
-type File interface {
- // Name returns the name of the file as presented to Open.
- Name() string
- io.Writer
- io.Reader
- io.ReaderAt
- io.Seeker
- io.Closer
- // Lock locks the file like e.g. flock. It protects against access from
- // other processes.
- Lock() error
- // Unlock unlocks the file.
- Unlock() error
- // Truncate the file.
- Truncate(size int64) error
-}
-
-// Capable interface can return the available features of a filesystem.
-type Capable interface {
- // Capabilities returns the capabilities of a filesystem in bit flags.
- Capabilities() Capability
-}
-
-// Capabilities returns the features supported by a filesystem. If the FS
-// does not implement Capable interface it returns all features.
-func Capabilities(fs Basic) Capability {
- capable, ok := fs.(Capable)
- if !ok {
- return DefaultCapabilities
- }
-
- return capable.Capabilities()
-}
-
-// CapabilityCheck tests the filesystem for the provided capabilities and
-// returns true in case it supports all of them.
-func CapabilityCheck(fs Basic, capabilities Capability) bool {
- fsCaps := Capabilities(fs)
- return fsCaps&capabilities == capabilities
-}
diff --git a/vendor/github.com/go-git/go-billy/v5/helper/chroot/chroot.go b/vendor/github.com/go-git/go-billy/v5/helper/chroot/chroot.go
deleted file mode 100644
index 8b44e784bd7..00000000000
--- a/vendor/github.com/go-git/go-billy/v5/helper/chroot/chroot.go
+++ /dev/null
@@ -1,242 +0,0 @@
-package chroot
-
-import (
- "os"
- "path/filepath"
- "strings"
-
- "github.com/go-git/go-billy/v5"
- "github.com/go-git/go-billy/v5/helper/polyfill"
-)
-
-// ChrootHelper is a helper to implement billy.Chroot.
-type ChrootHelper struct {
- underlying billy.Filesystem
- base string
-}
-
-// New creates a new filesystem wrapping up the given 'fs'.
-// The created filesystem has its base in the given ChrootHelperectory of the
-// underlying filesystem.
-func New(fs billy.Basic, base string) billy.Filesystem {
- return &ChrootHelper{
- underlying: polyfill.New(fs),
- base: base,
- }
-}
-
-func (fs *ChrootHelper) underlyingPath(filename string) (string, error) {
- if isCrossBoundaries(filename) {
- return "", billy.ErrCrossedBoundary
- }
-
- return fs.Join(fs.Root(), filename), nil
-}
-
-func isCrossBoundaries(path string) bool {
- path = filepath.ToSlash(path)
- path = filepath.Clean(path)
-
- return strings.HasPrefix(path, ".."+string(filepath.Separator))
-}
-
-func (fs *ChrootHelper) Create(filename string) (billy.File, error) {
- fullpath, err := fs.underlyingPath(filename)
- if err != nil {
- return nil, err
- }
-
- f, err := fs.underlying.Create(fullpath)
- if err != nil {
- return nil, err
- }
-
- return newFile(fs, f, filename), nil
-}
-
-func (fs *ChrootHelper) Open(filename string) (billy.File, error) {
- fullpath, err := fs.underlyingPath(filename)
- if err != nil {
- return nil, err
- }
-
- f, err := fs.underlying.Open(fullpath)
- if err != nil {
- return nil, err
- }
-
- return newFile(fs, f, filename), nil
-}
-
-func (fs *ChrootHelper) OpenFile(filename string, flag int, mode os.FileMode) (billy.File, error) {
- fullpath, err := fs.underlyingPath(filename)
- if err != nil {
- return nil, err
- }
-
- f, err := fs.underlying.OpenFile(fullpath, flag, mode)
- if err != nil {
- return nil, err
- }
-
- return newFile(fs, f, filename), nil
-}
-
-func (fs *ChrootHelper) Stat(filename string) (os.FileInfo, error) {
- fullpath, err := fs.underlyingPath(filename)
- if err != nil {
- return nil, err
- }
-
- return fs.underlying.Stat(fullpath)
-}
-
-func (fs *ChrootHelper) Rename(from, to string) error {
- var err error
- from, err = fs.underlyingPath(from)
- if err != nil {
- return err
- }
-
- to, err = fs.underlyingPath(to)
- if err != nil {
- return err
- }
-
- return fs.underlying.Rename(from, to)
-}
-
-func (fs *ChrootHelper) Remove(path string) error {
- fullpath, err := fs.underlyingPath(path)
- if err != nil {
- return err
- }
-
- return fs.underlying.Remove(fullpath)
-}
-
-func (fs *ChrootHelper) Join(elem ...string) string {
- return fs.underlying.Join(elem...)
-}
-
-func (fs *ChrootHelper) TempFile(dir, prefix string) (billy.File, error) {
- fullpath, err := fs.underlyingPath(dir)
- if err != nil {
- return nil, err
- }
-
- f, err := fs.underlying.(billy.TempFile).TempFile(fullpath, prefix)
- if err != nil {
- return nil, err
- }
-
- return newFile(fs, f, fs.Join(dir, filepath.Base(f.Name()))), nil
-}
-
-func (fs *ChrootHelper) ReadDir(path string) ([]os.FileInfo, error) {
- fullpath, err := fs.underlyingPath(path)
- if err != nil {
- return nil, err
- }
-
- return fs.underlying.(billy.Dir).ReadDir(fullpath)
-}
-
-func (fs *ChrootHelper) MkdirAll(filename string, perm os.FileMode) error {
- fullpath, err := fs.underlyingPath(filename)
- if err != nil {
- return err
- }
-
- return fs.underlying.(billy.Dir).MkdirAll(fullpath, perm)
-}
-
-func (fs *ChrootHelper) Lstat(filename string) (os.FileInfo, error) {
- fullpath, err := fs.underlyingPath(filename)
- if err != nil {
- return nil, err
- }
-
- return fs.underlying.(billy.Symlink).Lstat(fullpath)
-}
-
-func (fs *ChrootHelper) Symlink(target, link string) error {
- target = filepath.FromSlash(target)
-
- // only rewrite target if it's already absolute
- if filepath.IsAbs(target) || strings.HasPrefix(target, string(filepath.Separator)) {
- target = fs.Join(fs.Root(), target)
- target = filepath.Clean(filepath.FromSlash(target))
- }
-
- link, err := fs.underlyingPath(link)
- if err != nil {
- return err
- }
-
- return fs.underlying.(billy.Symlink).Symlink(target, link)
-}
-
-func (fs *ChrootHelper) Readlink(link string) (string, error) {
- fullpath, err := fs.underlyingPath(link)
- if err != nil {
- return "", err
- }
-
- target, err := fs.underlying.(billy.Symlink).Readlink(fullpath)
- if err != nil {
- return "", err
- }
-
- if !filepath.IsAbs(target) && !strings.HasPrefix(target, string(filepath.Separator)) {
- return target, nil
- }
-
- target, err = filepath.Rel(fs.base, target)
- if err != nil {
- return "", err
- }
-
- return string(os.PathSeparator) + target, nil
-}
-
-func (fs *ChrootHelper) Chroot(path string) (billy.Filesystem, error) {
- fullpath, err := fs.underlyingPath(path)
- if err != nil {
- return nil, err
- }
-
- return New(fs.underlying, fullpath), nil
-}
-
-func (fs *ChrootHelper) Root() string {
- return fs.base
-}
-
-func (fs *ChrootHelper) Underlying() billy.Basic {
- return fs.underlying
-}
-
-// Capabilities implements the Capable interface.
-func (fs *ChrootHelper) Capabilities() billy.Capability {
- return billy.Capabilities(fs.underlying)
-}
-
-type file struct {
- billy.File
- name string
-}
-
-func newFile(fs billy.Filesystem, f billy.File, filename string) billy.File {
- filename = fs.Join(fs.Root(), filename)
- filename, _ = filepath.Rel(fs.Root(), filename)
-
- return &file{
- File: f,
- name: filename,
- }
-}
-
-func (f *file) Name() string {
- return f.name
-}
diff --git a/vendor/github.com/go-git/go-billy/v5/helper/polyfill/polyfill.go b/vendor/github.com/go-git/go-billy/v5/helper/polyfill/polyfill.go
deleted file mode 100644
index 1efce0e7b8f..00000000000
--- a/vendor/github.com/go-git/go-billy/v5/helper/polyfill/polyfill.go
+++ /dev/null
@@ -1,105 +0,0 @@
-package polyfill
-
-import (
- "os"
- "path/filepath"
-
- "github.com/go-git/go-billy/v5"
-)
-
-// Polyfill is a helper that implements all missing method from billy.Filesystem.
-type Polyfill struct {
- billy.Basic
- c capabilities
-}
-
-type capabilities struct{ tempfile, dir, symlink, chroot bool }
-
-// New creates a new filesystem wrapping up 'fs' the intercepts all the calls
-// made and errors if fs doesn't implement any of the billy interfaces.
-func New(fs billy.Basic) billy.Filesystem {
- if original, ok := fs.(billy.Filesystem); ok {
- return original
- }
-
- h := &Polyfill{Basic: fs}
-
- _, h.c.tempfile = h.Basic.(billy.TempFile)
- _, h.c.dir = h.Basic.(billy.Dir)
- _, h.c.symlink = h.Basic.(billy.Symlink)
- _, h.c.chroot = h.Basic.(billy.Chroot)
- return h
-}
-
-func (h *Polyfill) TempFile(dir, prefix string) (billy.File, error) {
- if !h.c.tempfile {
- return nil, billy.ErrNotSupported
- }
-
- return h.Basic.(billy.TempFile).TempFile(dir, prefix)
-}
-
-func (h *Polyfill) ReadDir(path string) ([]os.FileInfo, error) {
- if !h.c.dir {
- return nil, billy.ErrNotSupported
- }
-
- return h.Basic.(billy.Dir).ReadDir(path)
-}
-
-func (h *Polyfill) MkdirAll(filename string, perm os.FileMode) error {
- if !h.c.dir {
- return billy.ErrNotSupported
- }
-
- return h.Basic.(billy.Dir).MkdirAll(filename, perm)
-}
-
-func (h *Polyfill) Symlink(target, link string) error {
- if !h.c.symlink {
- return billy.ErrNotSupported
- }
-
- return h.Basic.(billy.Symlink).Symlink(target, link)
-}
-
-func (h *Polyfill) Readlink(link string) (string, error) {
- if !h.c.symlink {
- return "", billy.ErrNotSupported
- }
-
- return h.Basic.(billy.Symlink).Readlink(link)
-}
-
-func (h *Polyfill) Lstat(path string) (os.FileInfo, error) {
- if !h.c.symlink {
- return nil, billy.ErrNotSupported
- }
-
- return h.Basic.(billy.Symlink).Lstat(path)
-}
-
-func (h *Polyfill) Chroot(path string) (billy.Filesystem, error) {
- if !h.c.chroot {
- return nil, billy.ErrNotSupported
- }
-
- return h.Basic.(billy.Chroot).Chroot(path)
-}
-
-func (h *Polyfill) Root() string {
- if !h.c.chroot {
- return string(filepath.Separator)
- }
-
- return h.Basic.(billy.Chroot).Root()
-}
-
-func (h *Polyfill) Underlying() billy.Basic {
- return h.Basic
-}
-
-// Capabilities implements the Capable interface.
-func (h *Polyfill) Capabilities() billy.Capability {
- return billy.Capabilities(h.Basic)
-}
diff --git a/vendor/github.com/go-git/go-billy/v5/memfs/memory.go b/vendor/github.com/go-git/go-billy/v5/memfs/memory.go
deleted file mode 100644
index dab73968b63..00000000000
--- a/vendor/github.com/go-git/go-billy/v5/memfs/memory.go
+++ /dev/null
@@ -1,410 +0,0 @@
-// Package memfs provides a billy filesystem base on memory.
-package memfs // import "github.com/go-git/go-billy/v5/memfs"
-
-import (
- "errors"
- "fmt"
- "io"
- "os"
- "path/filepath"
- "sort"
- "strings"
- "time"
-
- "github.com/go-git/go-billy/v5"
- "github.com/go-git/go-billy/v5/helper/chroot"
- "github.com/go-git/go-billy/v5/util"
-)
-
-const separator = filepath.Separator
-
-// Memory a very convenient filesystem based on memory files
-type Memory struct {
- s *storage
-
- tempCount int
-}
-
-//New returns a new Memory filesystem.
-func New() billy.Filesystem {
- fs := &Memory{s: newStorage()}
- return chroot.New(fs, string(separator))
-}
-
-func (fs *Memory) Create(filename string) (billy.File, error) {
- return fs.OpenFile(filename, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666)
-}
-
-func (fs *Memory) Open(filename string) (billy.File, error) {
- return fs.OpenFile(filename, os.O_RDONLY, 0)
-}
-
-func (fs *Memory) OpenFile(filename string, flag int, perm os.FileMode) (billy.File, error) {
- f, has := fs.s.Get(filename)
- if !has {
- if !isCreate(flag) {
- return nil, os.ErrNotExist
- }
-
- var err error
- f, err = fs.s.New(filename, perm, flag)
- if err != nil {
- return nil, err
- }
- } else {
- if isExclusive(flag) {
- return nil, os.ErrExist
- }
-
- if target, isLink := fs.resolveLink(filename, f); isLink {
- return fs.OpenFile(target, flag, perm)
- }
- }
-
- if f.mode.IsDir() {
- return nil, fmt.Errorf("cannot open directory: %s", filename)
- }
-
- return f.Duplicate(filename, perm, flag), nil
-}
-
-var errNotLink = errors.New("not a link")
-
-func (fs *Memory) resolveLink(fullpath string, f *file) (target string, isLink bool) {
- if !isSymlink(f.mode) {
- return fullpath, false
- }
-
- target = string(f.content.bytes)
- if !isAbs(target) {
- target = fs.Join(filepath.Dir(fullpath), target)
- }
-
- return target, true
-}
-
-// On Windows OS, IsAbs validates if a path is valid based on if stars with a
-// unit (eg.: `C:\`) to assert that is absolute, but in this mem implementation
-// any path starting by `separator` is also considered absolute.
-func isAbs(path string) bool {
- return filepath.IsAbs(path) || strings.HasPrefix(path, string(separator))
-}
-
-func (fs *Memory) Stat(filename string) (os.FileInfo, error) {
- f, has := fs.s.Get(filename)
- if !has {
- return nil, os.ErrNotExist
- }
-
- fi, _ := f.Stat()
-
- var err error
- if target, isLink := fs.resolveLink(filename, f); isLink {
- fi, err = fs.Stat(target)
- if err != nil {
- return nil, err
- }
- }
-
- // the name of the file should always the name of the stated file, so we
- // overwrite the Stat returned from the storage with it, since the
- // filename may belong to a link.
- fi.(*fileInfo).name = filepath.Base(filename)
- return fi, nil
-}
-
-func (fs *Memory) Lstat(filename string) (os.FileInfo, error) {
- f, has := fs.s.Get(filename)
- if !has {
- return nil, os.ErrNotExist
- }
-
- return f.Stat()
-}
-
-type ByName []os.FileInfo
-
-func (a ByName) Len() int { return len(a) }
-func (a ByName) Less(i, j int) bool { return a[i].Name() < a[j].Name() }
-func (a ByName) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
-
-func (fs *Memory) ReadDir(path string) ([]os.FileInfo, error) {
- if f, has := fs.s.Get(path); has {
- if target, isLink := fs.resolveLink(path, f); isLink {
- return fs.ReadDir(target)
- }
- }
-
- var entries []os.FileInfo
- for _, f := range fs.s.Children(path) {
- fi, _ := f.Stat()
- entries = append(entries, fi)
- }
-
- sort.Sort(ByName(entries))
-
- return entries, nil
-}
-
-func (fs *Memory) MkdirAll(path string, perm os.FileMode) error {
- _, err := fs.s.New(path, perm|os.ModeDir, 0)
- return err
-}
-
-func (fs *Memory) TempFile(dir, prefix string) (billy.File, error) {
- return util.TempFile(fs, dir, prefix)
-}
-
-func (fs *Memory) getTempFilename(dir, prefix string) string {
- fs.tempCount++
- filename := fmt.Sprintf("%s_%d_%d", prefix, fs.tempCount, time.Now().UnixNano())
- return fs.Join(dir, filename)
-}
-
-func (fs *Memory) Rename(from, to string) error {
- return fs.s.Rename(from, to)
-}
-
-func (fs *Memory) Remove(filename string) error {
- return fs.s.Remove(filename)
-}
-
-func (fs *Memory) Join(elem ...string) string {
- return filepath.Join(elem...)
-}
-
-func (fs *Memory) Symlink(target, link string) error {
- _, err := fs.Stat(link)
- if err == nil {
- return os.ErrExist
- }
-
- if !os.IsNotExist(err) {
- return err
- }
-
- return util.WriteFile(fs, link, []byte(target), 0777|os.ModeSymlink)
-}
-
-func (fs *Memory) Readlink(link string) (string, error) {
- f, has := fs.s.Get(link)
- if !has {
- return "", os.ErrNotExist
- }
-
- if !isSymlink(f.mode) {
- return "", &os.PathError{
- Op: "readlink",
- Path: link,
- Err: fmt.Errorf("not a symlink"),
- }
- }
-
- return string(f.content.bytes), nil
-}
-
-// Capabilities implements the Capable interface.
-func (fs *Memory) Capabilities() billy.Capability {
- return billy.WriteCapability |
- billy.ReadCapability |
- billy.ReadAndWriteCapability |
- billy.SeekCapability |
- billy.TruncateCapability
-}
-
-type file struct {
- name string
- content *content
- position int64
- flag int
- mode os.FileMode
-
- isClosed bool
-}
-
-func (f *file) Name() string {
- return f.name
-}
-
-func (f *file) Read(b []byte) (int, error) {
- n, err := f.ReadAt(b, f.position)
- f.position += int64(n)
-
- if err == io.EOF && n != 0 {
- err = nil
- }
-
- return n, err
-}
-
-func (f *file) ReadAt(b []byte, off int64) (int, error) {
- if f.isClosed {
- return 0, os.ErrClosed
- }
-
- if !isReadAndWrite(f.flag) && !isReadOnly(f.flag) {
- return 0, errors.New("read not supported")
- }
-
- n, err := f.content.ReadAt(b, off)
-
- return n, err
-}
-
-func (f *file) Seek(offset int64, whence int) (int64, error) {
- if f.isClosed {
- return 0, os.ErrClosed
- }
-
- switch whence {
- case io.SeekCurrent:
- f.position += offset
- case io.SeekStart:
- f.position = offset
- case io.SeekEnd:
- f.position = int64(f.content.Len()) + offset
- }
-
- return f.position, nil
-}
-
-func (f *file) Write(p []byte) (int, error) {
- if f.isClosed {
- return 0, os.ErrClosed
- }
-
- if !isReadAndWrite(f.flag) && !isWriteOnly(f.flag) {
- return 0, errors.New("write not supported")
- }
-
- n, err := f.content.WriteAt(p, f.position)
- f.position += int64(n)
-
- return n, err
-}
-
-func (f *file) Close() error {
- if f.isClosed {
- return os.ErrClosed
- }
-
- f.isClosed = true
- return nil
-}
-
-func (f *file) Truncate(size int64) error {
- if size < int64(len(f.content.bytes)) {
- f.content.bytes = f.content.bytes[:size]
- } else if more := int(size) - len(f.content.bytes); more > 0 {
- f.content.bytes = append(f.content.bytes, make([]byte, more)...)
- }
-
- return nil
-}
-
-func (f *file) Duplicate(filename string, mode os.FileMode, flag int) billy.File {
- new := &file{
- name: filename,
- content: f.content,
- mode: mode,
- flag: flag,
- }
-
- if isTruncate(flag) {
- new.content.Truncate()
- }
-
- if isAppend(flag) {
- new.position = int64(new.content.Len())
- }
-
- return new
-}
-
-func (f *file) Stat() (os.FileInfo, error) {
- return &fileInfo{
- name: f.Name(),
- mode: f.mode,
- size: f.content.Len(),
- }, nil
-}
-
-// Lock is a no-op in memfs.
-func (f *file) Lock() error {
- return nil
-}
-
-// Unlock is a no-op in memfs.
-func (f *file) Unlock() error {
- return nil
-}
-
-type fileInfo struct {
- name string
- size int
- mode os.FileMode
-}
-
-func (fi *fileInfo) Name() string {
- return fi.name
-}
-
-func (fi *fileInfo) Size() int64 {
- return int64(fi.size)
-}
-
-func (fi *fileInfo) Mode() os.FileMode {
- return fi.mode
-}
-
-func (*fileInfo) ModTime() time.Time {
- return time.Now()
-}
-
-func (fi *fileInfo) IsDir() bool {
- return fi.mode.IsDir()
-}
-
-func (*fileInfo) Sys() interface{} {
- return nil
-}
-
-func (c *content) Truncate() {
- c.bytes = make([]byte, 0)
-}
-
-func (c *content) Len() int {
- return len(c.bytes)
-}
-
-func isCreate(flag int) bool {
- return flag&os.O_CREATE != 0
-}
-
-func isExclusive(flag int) bool {
- return flag&os.O_EXCL != 0
-}
-
-func isAppend(flag int) bool {
- return flag&os.O_APPEND != 0
-}
-
-func isTruncate(flag int) bool {
- return flag&os.O_TRUNC != 0
-}
-
-func isReadAndWrite(flag int) bool {
- return flag&os.O_RDWR != 0
-}
-
-func isReadOnly(flag int) bool {
- return flag == os.O_RDONLY
-}
-
-func isWriteOnly(flag int) bool {
- return flag&os.O_WRONLY != 0
-}
-
-func isSymlink(m os.FileMode) bool {
- return m&os.ModeSymlink != 0
-}
diff --git a/vendor/github.com/go-git/go-billy/v5/memfs/storage.go b/vendor/github.com/go-git/go-billy/v5/memfs/storage.go
deleted file mode 100644
index e3c4e38bff2..00000000000
--- a/vendor/github.com/go-git/go-billy/v5/memfs/storage.go
+++ /dev/null
@@ -1,238 +0,0 @@
-package memfs
-
-import (
- "errors"
- "fmt"
- "io"
- "os"
- "path/filepath"
- "sync"
-)
-
-type storage struct {
- files map[string]*file
- children map[string]map[string]*file
-}
-
-func newStorage() *storage {
- return &storage{
- files: make(map[string]*file, 0),
- children: make(map[string]map[string]*file, 0),
- }
-}
-
-func (s *storage) Has(path string) bool {
- path = clean(path)
-
- _, ok := s.files[path]
- return ok
-}
-
-func (s *storage) New(path string, mode os.FileMode, flag int) (*file, error) {
- path = clean(path)
- if s.Has(path) {
- if !s.MustGet(path).mode.IsDir() {
- return nil, fmt.Errorf("file already exists %q", path)
- }
-
- return nil, nil
- }
-
- name := filepath.Base(path)
-
- f := &file{
- name: name,
- content: &content{name: name},
- mode: mode,
- flag: flag,
- }
-
- s.files[path] = f
- s.createParent(path, mode, f)
- return f, nil
-}
-
-func (s *storage) createParent(path string, mode os.FileMode, f *file) error {
- base := filepath.Dir(path)
- base = clean(base)
- if f.Name() == string(separator) {
- return nil
- }
-
- if _, err := s.New(base, mode.Perm()|os.ModeDir, 0); err != nil {
- return err
- }
-
- if _, ok := s.children[base]; !ok {
- s.children[base] = make(map[string]*file, 0)
- }
-
- s.children[base][f.Name()] = f
- return nil
-}
-
-func (s *storage) Children(path string) []*file {
- path = clean(path)
-
- l := make([]*file, 0)
- for _, f := range s.children[path] {
- l = append(l, f)
- }
-
- return l
-}
-
-func (s *storage) MustGet(path string) *file {
- f, ok := s.Get(path)
- if !ok {
- panic(fmt.Errorf("couldn't find %q", path))
- }
-
- return f
-}
-
-func (s *storage) Get(path string) (*file, bool) {
- path = clean(path)
- if !s.Has(path) {
- return nil, false
- }
-
- file, ok := s.files[path]
- return file, ok
-}
-
-func (s *storage) Rename(from, to string) error {
- from = clean(from)
- to = clean(to)
-
- if !s.Has(from) {
- return os.ErrNotExist
- }
-
- move := [][2]string{{from, to}}
-
- for pathFrom := range s.files {
- if pathFrom == from || !filepath.HasPrefix(pathFrom, from) {
- continue
- }
-
- rel, _ := filepath.Rel(from, pathFrom)
- pathTo := filepath.Join(to, rel)
-
- move = append(move, [2]string{pathFrom, pathTo})
- }
-
- for _, ops := range move {
- from := ops[0]
- to := ops[1]
-
- if err := s.move(from, to); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (s *storage) move(from, to string) error {
- s.files[to] = s.files[from]
- s.files[to].name = filepath.Base(to)
- s.children[to] = s.children[from]
-
- defer func() {
- delete(s.children, from)
- delete(s.files, from)
- delete(s.children[filepath.Dir(from)], filepath.Base(from))
- }()
-
- return s.createParent(to, 0644, s.files[to])
-}
-
-func (s *storage) Remove(path string) error {
- path = clean(path)
-
- f, has := s.Get(path)
- if !has {
- return os.ErrNotExist
- }
-
- if f.mode.IsDir() && len(s.children[path]) != 0 {
- return fmt.Errorf("dir: %s contains files", path)
- }
-
- base, file := filepath.Split(path)
- base = filepath.Clean(base)
-
- delete(s.children[base], file)
- delete(s.files, path)
- return nil
-}
-
-func clean(path string) string {
- return filepath.Clean(filepath.FromSlash(path))
-}
-
-type content struct {
- name string
- bytes []byte
-
- m sync.RWMutex
-}
-
-func (c *content) WriteAt(p []byte, off int64) (int, error) {
- if off < 0 {
- return 0, &os.PathError{
- Op: "writeat",
- Path: c.name,
- Err: errors.New("negative offset"),
- }
- }
-
- c.m.Lock()
- prev := len(c.bytes)
-
- diff := int(off) - prev
- if diff > 0 {
- c.bytes = append(c.bytes, make([]byte, diff)...)
- }
-
- c.bytes = append(c.bytes[:off], p...)
- if len(c.bytes) < prev {
- c.bytes = c.bytes[:prev]
- }
- c.m.Unlock()
-
- return len(p), nil
-}
-
-func (c *content) ReadAt(b []byte, off int64) (n int, err error) {
- if off < 0 {
- return 0, &os.PathError{
- Op: "readat",
- Path: c.name,
- Err: errors.New("negative offset"),
- }
- }
-
- c.m.RLock()
- size := int64(len(c.bytes))
- if off >= size {
- c.m.RUnlock()
- return 0, io.EOF
- }
-
- l := int64(len(b))
- if off+l > size {
- l = size - off
- }
-
- btr := c.bytes[off : off+l]
- n = copy(b, btr)
-
- if len(btr) < len(b) {
- err = io.EOF
- }
- c.m.RUnlock()
-
- return
-}
diff --git a/vendor/github.com/go-git/go-billy/v5/osfs/os.go b/vendor/github.com/go-git/go-billy/v5/osfs/os.go
deleted file mode 100644
index a7fe79f2f6a..00000000000
--- a/vendor/github.com/go-git/go-billy/v5/osfs/os.go
+++ /dev/null
@@ -1,127 +0,0 @@
-//go:build !js
-// +build !js
-
-// Package osfs provides a billy filesystem for the OS.
-package osfs
-
-import (
- "fmt"
- "io/fs"
- "os"
- "sync"
-
- "github.com/go-git/go-billy/v5"
-)
-
-const (
- defaultDirectoryMode = 0o755
- defaultCreateMode = 0o666
-)
-
-// Default Filesystem representing the root of the os filesystem.
-var Default = &ChrootOS{}
-
-// New returns a new OS filesystem.
-// By default paths are deduplicated, but still enforced
-// under baseDir. For more info refer to WithDeduplicatePath.
-func New(baseDir string, opts ...Option) billy.Filesystem {
- o := &options{
- deduplicatePath: true,
- }
- for _, opt := range opts {
- opt(o)
- }
-
- if o.Type == BoundOSFS {
- return newBoundOS(baseDir, o.deduplicatePath)
- }
-
- return newChrootOS(baseDir)
-}
-
-// WithBoundOS returns the option of using a Bound filesystem OS.
-func WithBoundOS() Option {
- return func(o *options) {
- o.Type = BoundOSFS
- }
-}
-
-// WithChrootOS returns the option of using a Chroot filesystem OS.
-func WithChrootOS() Option {
- return func(o *options) {
- o.Type = ChrootOSFS
- }
-}
-
-// WithDeduplicatePath toggles the deduplication of the base dir in the path.
-// This occurs when absolute links are being used.
-// Assuming base dir /base/dir and an absolute symlink /base/dir/target:
-//
-// With DeduplicatePath (default): /base/dir/target
-// Without DeduplicatePath: /base/dir/base/dir/target
-//
-// This option is only used by the BoundOS OS type.
-func WithDeduplicatePath(enabled bool) Option {
- return func(o *options) {
- o.deduplicatePath = enabled
- }
-}
-
-type options struct {
- Type
- deduplicatePath bool
-}
-
-type Type int
-
-const (
- ChrootOSFS Type = iota
- BoundOSFS
-)
-
-func readDir(dir string) ([]os.FileInfo, error) {
- entries, err := os.ReadDir(dir)
- if err != nil {
- return nil, err
- }
- infos := make([]fs.FileInfo, 0, len(entries))
- for _, entry := range entries {
- fi, err := entry.Info()
- if err != nil {
- return nil, err
- }
- infos = append(infos, fi)
- }
- return infos, nil
-}
-
-func tempFile(dir, prefix string) (billy.File, error) {
- f, err := os.CreateTemp(dir, prefix)
- if err != nil {
- return nil, err
- }
- return &file{File: f}, nil
-}
-
-func openFile(fn string, flag int, perm os.FileMode, createDir func(string) error) (billy.File, error) {
- if flag&os.O_CREATE != 0 {
- if createDir == nil {
- return nil, fmt.Errorf("createDir func cannot be nil if file needs to be opened in create mode")
- }
- if err := createDir(fn); err != nil {
- return nil, err
- }
- }
-
- f, err := os.OpenFile(fn, flag, perm)
- if err != nil {
- return nil, err
- }
- return &file{File: f}, err
-}
-
-// file is a wrapper for an os.File which adds support for file locking.
-type file struct {
- *os.File
- m sync.Mutex
-}
diff --git a/vendor/github.com/go-git/go-billy/v5/osfs/os_bound.go b/vendor/github.com/go-git/go-billy/v5/osfs/os_bound.go
deleted file mode 100644
index b4b6dbc07ae..00000000000
--- a/vendor/github.com/go-git/go-billy/v5/osfs/os_bound.go
+++ /dev/null
@@ -1,261 +0,0 @@
-//go:build !js
-// +build !js
-
-/*
- Copyright 2022 The Flux authors.
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-*/
-
-package osfs
-
-import (
- "fmt"
- "os"
- "path/filepath"
- "strings"
-
- securejoin "github.com/cyphar/filepath-securejoin"
- "github.com/go-git/go-billy/v5"
-)
-
-// BoundOS is a fs implementation based on the OS filesystem which is bound to
-// a base dir.
-// Prefer this fs implementation over ChrootOS.
-//
-// Behaviours of note:
-// 1. Read and write operations can only be directed to files which descends
-// from the base dir.
-// 2. Symlinks don't have their targets modified, and therefore can point
-// to locations outside the base dir or to non-existent paths.
-// 3. Readlink and Lstat ensures that the link file is located within the base
-// dir, evaluating any symlinks that file or base dir may contain.
-type BoundOS struct {
- baseDir string
- deduplicatePath bool
-}
-
-func newBoundOS(d string, deduplicatePath bool) billy.Filesystem {
- return &BoundOS{baseDir: d, deduplicatePath: deduplicatePath}
-}
-
-func (fs *BoundOS) Create(filename string) (billy.File, error) {
- return fs.OpenFile(filename, os.O_RDWR|os.O_CREATE|os.O_TRUNC, defaultCreateMode)
-}
-
-func (fs *BoundOS) OpenFile(filename string, flag int, perm os.FileMode) (billy.File, error) {
- fn, err := fs.abs(filename)
- if err != nil {
- return nil, err
- }
- return openFile(fn, flag, perm, fs.createDir)
-}
-
-func (fs *BoundOS) ReadDir(path string) ([]os.FileInfo, error) {
- dir, err := fs.abs(path)
- if err != nil {
- return nil, err
- }
-
- return readDir(dir)
-}
-
-func (fs *BoundOS) Rename(from, to string) error {
- f, err := fs.abs(from)
- if err != nil {
- return err
- }
- t, err := fs.abs(to)
- if err != nil {
- return err
- }
-
- // MkdirAll for target name.
- if err := fs.createDir(t); err != nil {
- return err
- }
-
- return os.Rename(f, t)
-}
-
-func (fs *BoundOS) MkdirAll(path string, perm os.FileMode) error {
- dir, err := fs.abs(path)
- if err != nil {
- return err
- }
- return os.MkdirAll(dir, perm)
-}
-
-func (fs *BoundOS) Open(filename string) (billy.File, error) {
- return fs.OpenFile(filename, os.O_RDONLY, 0)
-}
-
-func (fs *BoundOS) Stat(filename string) (os.FileInfo, error) {
- filename, err := fs.abs(filename)
- if err != nil {
- return nil, err
- }
- return os.Stat(filename)
-}
-
-func (fs *BoundOS) Remove(filename string) error {
- fn, err := fs.abs(filename)
- if err != nil {
- return err
- }
- return os.Remove(fn)
-}
-
-// TempFile creates a temporary file. If dir is empty, the file
-// will be created within the OS Temporary dir. If dir is provided
-// it must descend from the current base dir.
-func (fs *BoundOS) TempFile(dir, prefix string) (billy.File, error) {
- if dir != "" {
- var err error
- dir, err = fs.abs(dir)
- if err != nil {
- return nil, err
- }
- }
-
- return tempFile(dir, prefix)
-}
-
-func (fs *BoundOS) Join(elem ...string) string {
- return filepath.Join(elem...)
-}
-
-func (fs *BoundOS) RemoveAll(path string) error {
- dir, err := fs.abs(path)
- if err != nil {
- return err
- }
- return os.RemoveAll(dir)
-}
-
-func (fs *BoundOS) Symlink(target, link string) error {
- ln, err := fs.abs(link)
- if err != nil {
- return err
- }
- // MkdirAll for containing dir.
- if err := fs.createDir(ln); err != nil {
- return err
- }
- return os.Symlink(target, ln)
-}
-
-func (fs *BoundOS) Lstat(filename string) (os.FileInfo, error) {
- filename = filepath.Clean(filename)
- if !filepath.IsAbs(filename) {
- filename = filepath.Join(fs.baseDir, filename)
- }
- if ok, err := fs.insideBaseDirEval(filename); !ok {
- return nil, err
- }
- return os.Lstat(filename)
-}
-
-func (fs *BoundOS) Readlink(link string) (string, error) {
- if !filepath.IsAbs(link) {
- link = filepath.Clean(filepath.Join(fs.baseDir, link))
- }
- if ok, err := fs.insideBaseDirEval(link); !ok {
- return "", err
- }
- return os.Readlink(link)
-}
-
-// Chroot returns a new OS filesystem, with the base dir set to the
-// result of joining the provided path with the underlying base dir.
-func (fs *BoundOS) Chroot(path string) (billy.Filesystem, error) {
- joined, err := securejoin.SecureJoin(fs.baseDir, path)
- if err != nil {
- return nil, err
- }
- return New(joined), nil
-}
-
-// Root returns the current base dir of the billy.Filesystem.
-// This is required in order for this implementation to be a drop-in
-// replacement for other upstream implementations (e.g. memory and osfs).
-func (fs *BoundOS) Root() string {
- return fs.baseDir
-}
-
-func (fs *BoundOS) createDir(fullpath string) error {
- dir := filepath.Dir(fullpath)
- if dir != "." {
- if err := os.MkdirAll(dir, defaultDirectoryMode); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-// abs transforms filename to an absolute path, taking into account the base dir.
-// Relative paths won't be allowed to ascend the base dir, so `../file` will become
-// `/working-dir/file`.
-//
-// Note that if filename is a symlink, the returned address will be the target of the
-// symlink.
-func (fs *BoundOS) abs(filename string) (string, error) {
- if filename == fs.baseDir {
- filename = string(filepath.Separator)
- }
-
- path, err := securejoin.SecureJoin(fs.baseDir, filename)
- if err != nil {
- return "", nil
- }
-
- if fs.deduplicatePath {
- vol := filepath.VolumeName(fs.baseDir)
- dup := filepath.Join(fs.baseDir, fs.baseDir[len(vol):])
- if strings.HasPrefix(path, dup+string(filepath.Separator)) {
- return fs.abs(path[len(dup):])
- }
- }
- return path, nil
-}
-
-// insideBaseDir checks whether filename is located within
-// the fs.baseDir.
-func (fs *BoundOS) insideBaseDir(filename string) (bool, error) {
- if filename == fs.baseDir {
- return true, nil
- }
- if !strings.HasPrefix(filename, fs.baseDir+string(filepath.Separator)) {
- return false, fmt.Errorf("path outside base dir")
- }
- return true, nil
-}
-
-// insideBaseDirEval checks whether filename is contained within
-// a dir that is within the fs.baseDir, by first evaluating any symlinks
-// that either filename or fs.baseDir may contain.
-func (fs *BoundOS) insideBaseDirEval(filename string) (bool, error) {
- dir, err := filepath.EvalSymlinks(filepath.Dir(filename))
- if dir == "" || os.IsNotExist(err) {
- dir = filepath.Dir(filename)
- }
- wd, err := filepath.EvalSymlinks(fs.baseDir)
- if wd == "" || os.IsNotExist(err) {
- wd = fs.baseDir
- }
- if filename != wd && dir != wd && !strings.HasPrefix(dir, wd+string(filepath.Separator)) {
- return false, fmt.Errorf("path outside base dir")
- }
- return true, nil
-}
diff --git a/vendor/github.com/go-git/go-billy/v5/osfs/os_chroot.go b/vendor/github.com/go-git/go-billy/v5/osfs/os_chroot.go
deleted file mode 100644
index fd65e773c4b..00000000000
--- a/vendor/github.com/go-git/go-billy/v5/osfs/os_chroot.go
+++ /dev/null
@@ -1,112 +0,0 @@
-//go:build !js
-// +build !js
-
-package osfs
-
-import (
- "os"
- "path/filepath"
-
- "github.com/go-git/go-billy/v5"
- "github.com/go-git/go-billy/v5/helper/chroot"
-)
-
-// ChrootOS is a legacy filesystem based on a "soft chroot" of the os filesystem.
-// Although this is still the default os filesystem, consider using BoundOS instead.
-//
-// Behaviours of note:
-// 1. A "soft chroot" translates the base dir to "/" for the purposes of the
-// fs abstraction.
-// 2. Symlinks targets may be modified to be kept within the chroot bounds.
-// 3. Some file modes does not pass-through the fs abstraction.
-// 4. The combination of 1 and 2 may cause go-git to think that a Git repository
-// is dirty, when in fact it isn't.
-type ChrootOS struct{}
-
-func newChrootOS(baseDir string) billy.Filesystem {
- return chroot.New(&ChrootOS{}, baseDir)
-}
-
-func (fs *ChrootOS) Create(filename string) (billy.File, error) {
- return fs.OpenFile(filename, os.O_RDWR|os.O_CREATE|os.O_TRUNC, defaultCreateMode)
-}
-
-func (fs *ChrootOS) OpenFile(filename string, flag int, perm os.FileMode) (billy.File, error) {
- return openFile(filename, flag, perm, fs.createDir)
-}
-
-func (fs *ChrootOS) createDir(fullpath string) error {
- dir := filepath.Dir(fullpath)
- if dir != "." {
- if err := os.MkdirAll(dir, defaultDirectoryMode); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (fs *ChrootOS) ReadDir(dir string) ([]os.FileInfo, error) {
- return readDir(dir)
-}
-
-func (fs *ChrootOS) Rename(from, to string) error {
- if err := fs.createDir(to); err != nil {
- return err
- }
-
- return rename(from, to)
-}
-
-func (fs *ChrootOS) MkdirAll(path string, perm os.FileMode) error {
- return os.MkdirAll(path, defaultDirectoryMode)
-}
-
-func (fs *ChrootOS) Open(filename string) (billy.File, error) {
- return fs.OpenFile(filename, os.O_RDONLY, 0)
-}
-
-func (fs *ChrootOS) Stat(filename string) (os.FileInfo, error) {
- return os.Stat(filename)
-}
-
-func (fs *ChrootOS) Remove(filename string) error {
- return os.Remove(filename)
-}
-
-func (fs *ChrootOS) TempFile(dir, prefix string) (billy.File, error) {
- if err := fs.createDir(dir + string(os.PathSeparator)); err != nil {
- return nil, err
- }
-
- return tempFile(dir, prefix)
-}
-
-func (fs *ChrootOS) Join(elem ...string) string {
- return filepath.Join(elem...)
-}
-
-func (fs *ChrootOS) RemoveAll(path string) error {
- return os.RemoveAll(filepath.Clean(path))
-}
-
-func (fs *ChrootOS) Lstat(filename string) (os.FileInfo, error) {
- return os.Lstat(filepath.Clean(filename))
-}
-
-func (fs *ChrootOS) Symlink(target, link string) error {
- if err := fs.createDir(link); err != nil {
- return err
- }
-
- return os.Symlink(target, link)
-}
-
-func (fs *ChrootOS) Readlink(link string) (string, error) {
- return os.Readlink(link)
-}
-
-// Capabilities implements the Capable interface.
-func (fs *ChrootOS) Capabilities() billy.Capability {
- return billy.DefaultCapabilities
-}
diff --git a/vendor/github.com/go-git/go-billy/v5/osfs/os_js.go b/vendor/github.com/go-git/go-billy/v5/osfs/os_js.go
deleted file mode 100644
index 2e58aa5c610..00000000000
--- a/vendor/github.com/go-git/go-billy/v5/osfs/os_js.go
+++ /dev/null
@@ -1,25 +0,0 @@
-//go:build js
-// +build js
-
-package osfs
-
-import (
- "github.com/go-git/go-billy/v5"
- "github.com/go-git/go-billy/v5/helper/chroot"
- "github.com/go-git/go-billy/v5/memfs"
-)
-
-// globalMemFs is the global memory fs
-var globalMemFs = memfs.New()
-
-// Default Filesystem representing the root of in-memory filesystem for a
-// js/wasm environment.
-var Default = memfs.New()
-
-// New returns a new OS filesystem.
-func New(baseDir string, _ ...Option) billy.Filesystem {
- return chroot.New(Default, Default.Join("/", baseDir))
-}
-
-type options struct {
-}
diff --git a/vendor/github.com/go-git/go-billy/v5/osfs/os_options.go b/vendor/github.com/go-git/go-billy/v5/osfs/os_options.go
deleted file mode 100644
index 2f235c6ddcd..00000000000
--- a/vendor/github.com/go-git/go-billy/v5/osfs/os_options.go
+++ /dev/null
@@ -1,3 +0,0 @@
-package osfs
-
-type Option func(*options)
diff --git a/vendor/github.com/go-git/go-billy/v5/osfs/os_plan9.go b/vendor/github.com/go-git/go-billy/v5/osfs/os_plan9.go
deleted file mode 100644
index 84020b52f11..00000000000
--- a/vendor/github.com/go-git/go-billy/v5/osfs/os_plan9.go
+++ /dev/null
@@ -1,91 +0,0 @@
-//go:build plan9
-// +build plan9
-
-package osfs
-
-import (
- "io"
- "os"
- "path/filepath"
- "syscall"
-)
-
-func (f *file) Lock() error {
- // Plan 9 uses a mode bit instead of explicit lock/unlock syscalls.
- //
- // Per http://man.cat-v.org/plan_9/5/stat: “Exclusive use files may be open
- // for I/O by only one fid at a time across all clients of the server. If a
- // second open is attempted, it draws an error.”
- //
- // There is no obvious way to implement this function using the exclusive use bit.
- // See https://golang.org/src/cmd/go/internal/lockedfile/lockedfile_plan9.go
- // for how file locking is done by the go tool on Plan 9.
- return nil
-}
-
-func (f *file) Unlock() error {
- return nil
-}
-
-func rename(from, to string) error {
- // If from and to are in different directories, copy the file
- // since Plan 9 does not support cross-directory rename.
- if filepath.Dir(from) != filepath.Dir(to) {
- fi, err := os.Stat(from)
- if err != nil {
- return &os.LinkError{"rename", from, to, err}
- }
- if fi.Mode().IsDir() {
- return &os.LinkError{"rename", from, to, syscall.EISDIR}
- }
- fromFile, err := os.Open(from)
- if err != nil {
- return &os.LinkError{"rename", from, to, err}
- }
- toFile, err := os.OpenFile(to, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, fi.Mode())
- if err != nil {
- return &os.LinkError{"rename", from, to, err}
- }
- _, err = io.Copy(toFile, fromFile)
- if err != nil {
- return &os.LinkError{"rename", from, to, err}
- }
-
- // Copy mtime and mode from original file.
- // We need only one syscall if we avoid os.Chmod and os.Chtimes.
- dir := fi.Sys().(*syscall.Dir)
- var d syscall.Dir
- d.Null()
- d.Mtime = dir.Mtime
- d.Mode = dir.Mode
- if err = dirwstat(to, &d); err != nil {
- return &os.LinkError{"rename", from, to, err}
- }
-
- // Remove original file.
- err = os.Remove(from)
- if err != nil {
- return &os.LinkError{"rename", from, to, err}
- }
- return nil
- }
- return os.Rename(from, to)
-}
-
-func dirwstat(name string, d *syscall.Dir) error {
- var buf [syscall.STATFIXLEN]byte
-
- n, err := d.Marshal(buf[:])
- if err != nil {
- return &os.PathError{"dirwstat", name, err}
- }
- if err = syscall.Wstat(name, buf[:n]); err != nil {
- return &os.PathError{"dirwstat", name, err}
- }
- return nil
-}
-
-func umask(new int) func() {
- return func() {
- }
-}
diff --git a/vendor/github.com/go-git/go-billy/v5/osfs/os_posix.go b/vendor/github.com/go-git/go-billy/v5/osfs/os_posix.go
deleted file mode 100644
index d834a1145a3..00000000000
--- a/vendor/github.com/go-git/go-billy/v5/osfs/os_posix.go
+++ /dev/null
@@ -1,38 +0,0 @@
-//go:build !plan9 && !windows && !js
-// +build !plan9,!windows,!js
-
-package osfs
-
-import (
- "os"
- "syscall"
-
- "golang.org/x/sys/unix"
-)
-
-func (f *file) Lock() error {
- f.m.Lock()
- defer f.m.Unlock()
-
- return unix.Flock(int(f.File.Fd()), unix.LOCK_EX)
-}
-
-func (f *file) Unlock() error {
- f.m.Lock()
- defer f.m.Unlock()
-
- return unix.Flock(int(f.File.Fd()), unix.LOCK_UN)
-}
-
-func rename(from, to string) error {
- return os.Rename(from, to)
-}
-
-// umask sets umask to a new value, and returns a func which allows the
-// caller to reset it back to what it was originally.
-func umask(new int) func() {
- old := syscall.Umask(new)
- return func() {
- syscall.Umask(old)
- }
-}
diff --git a/vendor/github.com/go-git/go-billy/v5/osfs/os_windows.go b/vendor/github.com/go-git/go-billy/v5/osfs/os_windows.go
deleted file mode 100644
index e54df748e5f..00000000000
--- a/vendor/github.com/go-git/go-billy/v5/osfs/os_windows.go
+++ /dev/null
@@ -1,58 +0,0 @@
-//go:build windows
-// +build windows
-
-package osfs
-
-import (
- "os"
- "runtime"
- "unsafe"
-
- "golang.org/x/sys/windows"
-)
-
-var (
- kernel32DLL = windows.NewLazySystemDLL("kernel32.dll")
- lockFileExProc = kernel32DLL.NewProc("LockFileEx")
- unlockFileProc = kernel32DLL.NewProc("UnlockFile")
-)
-
-const (
- lockfileExclusiveLock = 0x2
-)
-
-func (f *file) Lock() error {
- f.m.Lock()
- defer f.m.Unlock()
-
- var overlapped windows.Overlapped
- // err is always non-nil as per sys/windows semantics.
- ret, _, err := lockFileExProc.Call(f.File.Fd(), lockfileExclusiveLock, 0, 0xFFFFFFFF, 0,
- uintptr(unsafe.Pointer(&overlapped)))
- runtime.KeepAlive(&overlapped)
- if ret == 0 {
- return err
- }
- return nil
-}
-
-func (f *file) Unlock() error {
- f.m.Lock()
- defer f.m.Unlock()
-
- // err is always non-nil as per sys/windows semantics.
- ret, _, err := unlockFileProc.Call(f.File.Fd(), 0, 0, 0xFFFFFFFF, 0)
- if ret == 0 {
- return err
- }
- return nil
-}
-
-func rename(from, to string) error {
- return os.Rename(from, to)
-}
-
-func umask(new int) func() {
- return func() {
- }
-}
diff --git a/vendor/github.com/go-git/go-billy/v5/util/glob.go b/vendor/github.com/go-git/go-billy/v5/util/glob.go
deleted file mode 100644
index f7cb1de8966..00000000000
--- a/vendor/github.com/go-git/go-billy/v5/util/glob.go
+++ /dev/null
@@ -1,111 +0,0 @@
-package util
-
-import (
- "path/filepath"
- "sort"
- "strings"
-
- "github.com/go-git/go-billy/v5"
-)
-
-// Glob returns the names of all files matching pattern or nil
-// if there is no matching file. The syntax of patterns is the same
-// as in Match. The pattern may describe hierarchical names such as
-// /usr/*/bin/ed (assuming the Separator is '/').
-//
-// Glob ignores file system errors such as I/O errors reading directories.
-// The only possible returned error is ErrBadPattern, when pattern
-// is malformed.
-//
-// Function originally from https://golang.org/src/path/filepath/match_test.go
-func Glob(fs billy.Filesystem, pattern string) (matches []string, err error) {
- if !hasMeta(pattern) {
- if _, err = fs.Lstat(pattern); err != nil {
- return nil, nil
- }
- return []string{pattern}, nil
- }
-
- dir, file := filepath.Split(pattern)
- // Prevent infinite recursion. See issue 15879.
- if dir == pattern {
- return nil, filepath.ErrBadPattern
- }
-
- var m []string
- m, err = Glob(fs, cleanGlobPath(dir))
- if err != nil {
- return
- }
- for _, d := range m {
- matches, err = glob(fs, d, file, matches)
- if err != nil {
- return
- }
- }
- return
-}
-
-// cleanGlobPath prepares path for glob matching.
-func cleanGlobPath(path string) string {
- switch path {
- case "":
- return "."
- case string(filepath.Separator):
- // do nothing to the path
- return path
- default:
- return path[0 : len(path)-1] // chop off trailing separator
- }
-}
-
-// glob searches for files matching pattern in the directory dir
-// and appends them to matches. If the directory cannot be
-// opened, it returns the existing matches. New matches are
-// added in lexicographical order.
-func glob(fs billy.Filesystem, dir, pattern string, matches []string) (m []string, e error) {
- m = matches
- fi, err := fs.Stat(dir)
- if err != nil {
- return
- }
-
- if !fi.IsDir() {
- return
- }
-
- names, _ := readdirnames(fs, dir)
- sort.Strings(names)
-
- for _, n := range names {
- matched, err := filepath.Match(pattern, n)
- if err != nil {
- return m, err
- }
- if matched {
- m = append(m, filepath.Join(dir, n))
- }
- }
- return
-}
-
-// hasMeta reports whether path contains any of the magic characters
-// recognized by Match.
-func hasMeta(path string) bool {
- // TODO(niemeyer): Should other magic characters be added here?
- return strings.ContainsAny(path, "*?[")
-}
-
-func readdirnames(fs billy.Filesystem, dir string) ([]string, error) {
- files, err := fs.ReadDir(dir)
- if err != nil {
- return nil, err
- }
-
- var names []string
- for _, file := range files {
- names = append(names, file.Name())
- }
-
- return names, nil
-}
diff --git a/vendor/github.com/go-git/go-billy/v5/util/util.go b/vendor/github.com/go-git/go-billy/v5/util/util.go
deleted file mode 100644
index 5c77128c3ca..00000000000
--- a/vendor/github.com/go-git/go-billy/v5/util/util.go
+++ /dev/null
@@ -1,282 +0,0 @@
-package util
-
-import (
- "io"
- "os"
- "path/filepath"
- "strconv"
- "sync"
- "time"
-
- "github.com/go-git/go-billy/v5"
-)
-
-// RemoveAll removes path and any children it contains. It removes everything it
-// can but returns the first error it encounters. If the path does not exist,
-// RemoveAll returns nil (no error).
-func RemoveAll(fs billy.Basic, path string) error {
- fs, path = getUnderlyingAndPath(fs, path)
-
- if r, ok := fs.(removerAll); ok {
- return r.RemoveAll(path)
- }
-
- return removeAll(fs, path)
-}
-
-type removerAll interface {
- RemoveAll(string) error
-}
-
-func removeAll(fs billy.Basic, path string) error {
- // This implementation is adapted from os.RemoveAll.
-
- // Simple case: if Remove works, we're done.
- err := fs.Remove(path)
- if err == nil || os.IsNotExist(err) {
- return nil
- }
-
- // Otherwise, is this a directory we need to recurse into?
- dir, serr := fs.Stat(path)
- if serr != nil {
- if os.IsNotExist(serr) {
- return nil
- }
-
- return serr
- }
-
- if !dir.IsDir() {
- // Not a directory; return the error from Remove.
- return err
- }
-
- dirfs, ok := fs.(billy.Dir)
- if !ok {
- return billy.ErrNotSupported
- }
-
- // Directory.
- fis, err := dirfs.ReadDir(path)
- if err != nil {
- if os.IsNotExist(err) {
- // Race. It was deleted between the Lstat and Open.
- // Return nil per RemoveAll's docs.
- return nil
- }
-
- return err
- }
-
- // Remove contents & return first error.
- err = nil
- for _, fi := range fis {
- cpath := fs.Join(path, fi.Name())
- err1 := removeAll(fs, cpath)
- if err == nil {
- err = err1
- }
- }
-
- // Remove directory.
- err1 := fs.Remove(path)
- if err1 == nil || os.IsNotExist(err1) {
- return nil
- }
-
- if err == nil {
- err = err1
- }
-
- return err
-
-}
-
-// WriteFile writes data to a file named by filename in the given filesystem.
-// If the file does not exist, WriteFile creates it with permissions perm;
-// otherwise WriteFile truncates it before writing.
-func WriteFile(fs billy.Basic, filename string, data []byte, perm os.FileMode) error {
- f, err := fs.OpenFile(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm)
- if err != nil {
- return err
- }
-
- n, err := f.Write(data)
- if err == nil && n < len(data) {
- err = io.ErrShortWrite
- }
-
- if err1 := f.Close(); err == nil {
- err = err1
- }
-
- return err
-}
-
-// Random number state.
-// We generate random temporary file names so that there's a good
-// chance the file doesn't exist yet - keeps the number of tries in
-// TempFile to a minimum.
-var rand uint32
-var randmu sync.Mutex
-
-func reseed() uint32 {
- return uint32(time.Now().UnixNano() + int64(os.Getpid()))
-}
-
-func nextSuffix() string {
- randmu.Lock()
- r := rand
- if r == 0 {
- r = reseed()
- }
- r = r*1664525 + 1013904223 // constants from Numerical Recipes
- rand = r
- randmu.Unlock()
- return strconv.Itoa(int(1e9 + r%1e9))[1:]
-}
-
-// TempFile creates a new temporary file in the directory dir with a name
-// beginning with prefix, opens the file for reading and writing, and returns
-// the resulting *os.File. If dir is the empty string, TempFile uses the default
-// directory for temporary files (see os.TempDir). Multiple programs calling
-// TempFile simultaneously will not choose the same file. The caller can use
-// f.Name() to find the pathname of the file. It is the caller's responsibility
-// to remove the file when no longer needed.
-func TempFile(fs billy.Basic, dir, prefix string) (f billy.File, err error) {
- // This implementation is based on stdlib ioutil.TempFile.
- if dir == "" {
- dir = getTempDir(fs)
- }
-
- nconflict := 0
- for i := 0; i < 10000; i++ {
- name := filepath.Join(dir, prefix+nextSuffix())
- f, err = fs.OpenFile(name, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600)
- if os.IsExist(err) {
- if nconflict++; nconflict > 10 {
- randmu.Lock()
- rand = reseed()
- randmu.Unlock()
- }
- continue
- }
- break
- }
- return
-}
-
-// TempDir creates a new temporary directory in the directory dir
-// with a name beginning with prefix and returns the path of the
-// new directory. If dir is the empty string, TempDir uses the
-// default directory for temporary files (see os.TempDir).
-// Multiple programs calling TempDir simultaneously
-// will not choose the same directory. It is the caller's responsibility
-// to remove the directory when no longer needed.
-func TempDir(fs billy.Dir, dir, prefix string) (name string, err error) {
- // This implementation is based on stdlib ioutil.TempDir
-
- if dir == "" {
- dir = getTempDir(fs.(billy.Basic))
- }
-
- nconflict := 0
- for i := 0; i < 10000; i++ {
- try := filepath.Join(dir, prefix+nextSuffix())
- err = fs.MkdirAll(try, 0700)
- if os.IsExist(err) {
- if nconflict++; nconflict > 10 {
- randmu.Lock()
- rand = reseed()
- randmu.Unlock()
- }
- continue
- }
- if os.IsNotExist(err) {
- if _, err := os.Stat(dir); os.IsNotExist(err) {
- return "", err
- }
- }
- if err == nil {
- name = try
- }
- break
- }
- return
-}
-
-func getTempDir(fs billy.Basic) string {
- ch, ok := fs.(billy.Chroot)
- if !ok || ch.Root() == "" || ch.Root() == "/" || ch.Root() == string(filepath.Separator) {
- return os.TempDir()
- }
-
- return ".tmp"
-}
-
-type underlying interface {
- Underlying() billy.Basic
-}
-
-func getUnderlyingAndPath(fs billy.Basic, path string) (billy.Basic, string) {
- u, ok := fs.(underlying)
- if !ok {
- return fs, path
- }
- if ch, ok := fs.(billy.Chroot); ok {
- path = fs.Join(ch.Root(), path)
- }
-
- return u.Underlying(), path
-}
-
-// ReadFile reads the named file and returns the contents from the given filesystem.
-// A successful call returns err == nil, not err == EOF.
-// Because ReadFile reads the whole file, it does not treat an EOF from Read
-// as an error to be reported.
-func ReadFile(fs billy.Basic, name string) ([]byte, error) {
- f, err := fs.Open(name)
- if err != nil {
- return nil, err
- }
-
- defer f.Close()
-
- var size int
- if info, err := fs.Stat(name); err == nil {
- size64 := info.Size()
- if int64(int(size64)) == size64 {
- size = int(size64)
- }
- }
-
- size++ // one byte for final read at EOF
- // If a file claims a small size, read at least 512 bytes.
- // In particular, files in Linux's /proc claim size 0 but
- // then do not work right if read in small pieces,
- // so an initial read of 1 byte would not work correctly.
-
- if size < 512 {
- size = 512
- }
-
- data := make([]byte, 0, size)
- for {
- if len(data) >= cap(data) {
- d := append(data[:cap(data)], 0)
- data = d[:len(data)]
- }
-
- n, err := f.Read(data[len(data):cap(data)])
- data = data[:len(data)+n]
-
- if err != nil {
- if err == io.EOF {
- err = nil
- }
-
- return data, err
- }
- }
-}
diff --git a/vendor/github.com/go-git/go-billy/v5/util/walk.go b/vendor/github.com/go-git/go-billy/v5/util/walk.go
deleted file mode 100644
index 1531bcaaaeb..00000000000
--- a/vendor/github.com/go-git/go-billy/v5/util/walk.go
+++ /dev/null
@@ -1,72 +0,0 @@
-package util
-
-import (
- "os"
- "path/filepath"
-
- "github.com/go-git/go-billy/v5"
-)
-
-// walk recursively descends path, calling walkFn
-// adapted from https://golang.org/src/path/filepath/path.go
-func walk(fs billy.Filesystem, path string, info os.FileInfo, walkFn filepath.WalkFunc) error {
- if !info.IsDir() {
- return walkFn(path, info, nil)
- }
-
- names, err := readdirnames(fs, path)
- err1 := walkFn(path, info, err)
- // If err != nil, walk can't walk into this directory.
- // err1 != nil means walkFn want walk to skip this directory or stop walking.
- // Therefore, if one of err and err1 isn't nil, walk will return.
- if err != nil || err1 != nil {
- // The caller's behavior is controlled by the return value, which is decided
- // by walkFn. walkFn may ignore err and return nil.
- // If walkFn returns SkipDir, it will be handled by the caller.
- // So walk should return whatever walkFn returns.
- return err1
- }
-
- for _, name := range names {
- filename := filepath.Join(path, name)
- fileInfo, err := fs.Lstat(filename)
- if err != nil {
- if err := walkFn(filename, fileInfo, err); err != nil && err != filepath.SkipDir {
- return err
- }
- } else {
- err = walk(fs, filename, fileInfo, walkFn)
- if err != nil {
- if !fileInfo.IsDir() || err != filepath.SkipDir {
- return err
- }
- }
- }
- }
- return nil
-}
-
-// Walk walks the file tree rooted at root, calling fn for each file or
-// directory in the tree, including root. All errors that arise visiting files
-// and directories are filtered by fn: see the WalkFunc documentation for
-// details.
-//
-// The files are walked in lexical order, which makes the output deterministic
-// but requires Walk to read an entire directory into memory before proceeding
-// to walk that directory. Walk does not follow symbolic links.
-//
-// Function adapted from https://github.com/golang/go/blob/3b770f2ccb1fa6fecc22ea822a19447b10b70c5c/src/path/filepath/path.go#L500
-func Walk(fs billy.Filesystem, root string, walkFn filepath.WalkFunc) error {
- info, err := fs.Lstat(root)
- if err != nil {
- err = walkFn(root, nil, err)
- } else {
- err = walk(fs, root, info, walkFn)
- }
-
- if err == filepath.SkipDir {
- return nil
- }
-
- return err
-}
diff --git a/vendor/github.com/go-git/go-git/v5/.gitignore b/vendor/github.com/go-git/go-git/v5/.gitignore
deleted file mode 100644
index b7f2c5807ca..00000000000
--- a/vendor/github.com/go-git/go-git/v5/.gitignore
+++ /dev/null
@@ -1,7 +0,0 @@
-coverage.out
-*~
-coverage.txt
-profile.out
-.tmp/
-.git-dist/
-.vscode
diff --git a/vendor/github.com/go-git/go-git/v5/CODE_OF_CONDUCT.md b/vendor/github.com/go-git/go-git/v5/CODE_OF_CONDUCT.md
deleted file mode 100644
index a689fa3c34a..00000000000
--- a/vendor/github.com/go-git/go-git/v5/CODE_OF_CONDUCT.md
+++ /dev/null
@@ -1,74 +0,0 @@
-# Contributor Covenant Code of Conduct
-
-## Our Pledge
-
-In the interest of fostering an open and welcoming environment, we as
-contributors and maintainers pledge to making participation in our project and
-our community a harassment-free experience for everyone, regardless of age, body
-size, disability, ethnicity, gender identity and expression, level of experience,
-education, socio-economic status, nationality, personal appearance, race,
-religion, or sexual identity and orientation.
-
-## Our Standards
-
-Examples of behavior that contributes to creating a positive environment
-include:
-
-* Using welcoming and inclusive language
-* Being respectful of differing viewpoints and experiences
-* Gracefully accepting constructive criticism
-* Focusing on what is best for the community
-* Showing empathy towards other community members
-
-Examples of unacceptable behavior by participants include:
-
-* The use of sexualized language or imagery and unwelcome sexual attention or
- advances
-* Trolling, insulting/derogatory comments, and personal or political attacks
-* Public or private harassment
-* Publishing others' private information, such as a physical or electronic
- address, without explicit permission
-* Other conduct which could reasonably be considered inappropriate in a
- professional setting
-
-## Our Responsibilities
-
-Project maintainers are responsible for clarifying the standards of acceptable
-behavior and are expected to take appropriate and fair corrective action in
-response to any instances of unacceptable behavior.
-
-Project maintainers have the right and responsibility to remove, edit, or
-reject comments, commits, code, wiki edits, issues, and other contributions
-that are not aligned to this Code of Conduct, or to ban temporarily or
-permanently any contributor for other behaviors that they deem inappropriate,
-threatening, offensive, or harmful.
-
-## Scope
-
-This Code of Conduct applies both within project spaces and in public spaces
-when an individual is representing the project or its community. Examples of
-representing a project or community include using an official project e-mail
-address, posting via an official social media account, or acting as an appointed
-representative at an online or offline event. Representation of a project may be
-further defined and clarified by project maintainers.
-
-## Enforcement
-
-Instances of abusive, harassing, or otherwise unacceptable behavior may be
-reported by contacting the project team at conduct@sourced.tech. All
-complaints will be reviewed and investigated and will result in a response that
-is deemed necessary and appropriate to the circumstances. The project team is
-obligated to maintain confidentiality with regard to the reporter of an incident.
-Further details of specific enforcement policies may be posted separately.
-
-Project maintainers who do not follow or enforce the Code of Conduct in good
-faith may face temporary or permanent repercussions as determined by other
-members of the project's leadership.
-
-## Attribution
-
-This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4,
-available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html
-
-[homepage]: https://www.contributor-covenant.org
-
diff --git a/vendor/github.com/go-git/go-git/v5/COMPATIBILITY.md b/vendor/github.com/go-git/go-git/v5/COMPATIBILITY.md
deleted file mode 100644
index ff0c22c8969..00000000000
--- a/vendor/github.com/go-git/go-git/v5/COMPATIBILITY.md
+++ /dev/null
@@ -1,233 +0,0 @@
-# Supported Features
-
-Here is a non-comprehensive table of git commands and features and their
-compatibility status with go-git.
-
-## Getting and creating repositories
-
-| Feature | Sub-feature | Status | Notes | Examples |
-| ------- | ------------------------------------------------------------------------------------------------------------------ | ------ | ----- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
-| `init` | | ✅ | | |
-| `init` | `--bare` | ✅ | | |
-| `init` | `--template`
`--separate-git-dir`
`--shared` | ❌ | | |
-| `clone` | | ✅ | | - [PlainClone](_examples/clone/main.go) |
-| `clone` | Authentication:
- none
- access token
- username + password
- ssh | ✅ | | - [clone ssh](_examples/clone/auth/ssh/main.go)
- [clone access token](_examples/clone/auth/basic/access_token/main.go)
- [clone user + password](_examples/clone/auth/basic/username_password/main.go) |
-| `clone` | `--progress`
`--single-branch`
`--depth`
`--origin`
`--recurse-submodules`
`--shared` | ✅ | | - [recurse submodules](_examples/clone/main.go)
- [progress](_examples/progress/main.go) |
-
-## Basic snapshotting
-
-| Feature | Sub-feature | Status | Notes | Examples |
-| -------- | ----------- | ------ | -------------------------------------------------------- | ------------------------------------ |
-| `add` | | ✅ | Plain add is supported. Any other flags aren't supported | |
-| `status` | | ✅ | | |
-| `commit` | | ✅ | | - [commit](_examples/commit/main.go) |
-| `reset` | | ✅ | | |
-| `rm` | | ✅ | | |
-| `mv` | | ✅ | | |
-
-## Branching and merging
-
-| Feature | Sub-feature | Status | Notes | Examples |
-| ----------- | ----------- | ------------ | --------------------------------------- | ----------------------------------------------------------------------------------------------- |
-| `branch` | | ✅ | | - [branch](_examples/branch/main.go) |
-| `checkout` | | ✅ | Basic usages of checkout are supported. | - [checkout](_examples/checkout/main.go) |
-| `merge` | | ⚠️ (partial) | Fast-forward only | |
-| `mergetool` | | ❌ | | |
-| `stash` | | ❌ | | |
-| `tag` | | ✅ | | - [tag](_examples/tag/main.go)
- [tag create and push](_examples/tag-create-push/main.go) |
-
-## Sharing and updating projects
-
-| Feature | Sub-feature | Status | Notes | Examples |
-| ----------- | ----------- | ------ | ----------------------------------------------------------------------- | ------------------------------------------ |
-| `fetch` | | ✅ | | |
-| `pull` | | ✅ | Only supports merges where the merge can be resolved as a fast-forward. | - [pull](_examples/pull/main.go) |
-| `push` | | ✅ | | - [push](_examples/push/main.go) |
-| `remote` | | ✅ | | - [remotes](_examples/remotes/main.go) |
-| `submodule` | | ✅ | | - [submodule](_examples/submodule/main.go) |
-| `submodule` | deinit | ❌ | | |
-
-## Inspection and comparison
-
-| Feature | Sub-feature | Status | Notes | Examples |
-| ---------- | ----------- | --------- | ----- | ------------------------------ |
-| `show` | | ✅ | | |
-| `log` | | ✅ | | - [log](_examples/log/main.go) |
-| `shortlog` | | (see log) | | |
-| `describe` | | ❌ | | |
-
-## Patching
-
-| Feature | Sub-feature | Status | Notes | Examples |
-| ------------- | ----------- | ------ | ---------------------------------------------------- | -------- |
-| `apply` | | ❌ | | |
-| `cherry-pick` | | ❌ | | |
-| `diff` | | ✅ | Patch object with UnifiedDiff output representation. | |
-| `rebase` | | ❌ | | |
-| `revert` | | ❌ | | |
-
-## Debugging
-
-| Feature | Sub-feature | Status | Notes | Examples |
-| -------- | ----------- | ------ | ----- | ---------------------------------- |
-| `bisect` | | ❌ | | |
-| `blame` | | ✅ | | - [blame](_examples/blame/main.go) |
-| `grep` | | ✅ | | |
-
-## Email
-
-| Feature | Sub-feature | Status | Notes | Examples |
-| -------------- | ----------- | ------ | ----- | -------- |
-| `am` | | ❌ | | |
-| `apply` | | ❌ | | |
-| `format-patch` | | ❌ | | |
-| `send-email` | | ❌ | | |
-| `request-pull` | | ❌ | | |
-
-## External systems
-
-| Feature | Sub-feature | Status | Notes | Examples |
-| ------------- | ----------- | ------ | ----- | -------- |
-| `svn` | | ❌ | | |
-| `fast-import` | | ❌ | | |
-| `lfs` | | ❌ | | |
-
-## Administration
-
-| Feature | Sub-feature | Status | Notes | Examples |
-| --------------- | ----------- | ------ | ----- | -------- |
-| `clean` | | ✅ | | |
-| `gc` | | ❌ | | |
-| `fsck` | | ❌ | | |
-| `reflog` | | ❌ | | |
-| `filter-branch` | | ❌ | | |
-| `instaweb` | | ❌ | | |
-| `archive` | | ❌ | | |
-| `bundle` | | ❌ | | |
-| `prune` | | ❌ | | |
-| `repack` | | ❌ | | |
-
-## Server admin
-
-| Feature | Sub-feature | Status | Notes | Examples |
-| -------------------- | ----------- | ------ | ----- | ----------------------------------------- |
-| `daemon` | | ❌ | | |
-| `update-server-info` | | ✅ | | [cli](./cli/go-git/update_server_info.go) |
-
-## Advanced
-
-| Feature | Sub-feature | Status | Notes | Examples |
-| ---------- | ----------- | ----------- | ----- | -------- |
-| `notes` | | ❌ | | |
-| `replace` | | ❌ | | |
-| `worktree` | | ❌ | | |
-| `annotate` | | (see blame) | | |
-
-## GPG
-
-| Feature | Sub-feature | Status | Notes | Examples |
-| ------------------- | ----------- | ------ | ----- | -------- |
-| `git-verify-commit` | | ✅ | | |
-| `git-verify-tag` | | ✅ | | |
-
-## Plumbing commands
-
-| Feature | Sub-feature | Status | Notes | Examples |
-| --------------- | ------------------------------------- | ------------ | --------------------------------------------------- | -------------------------------------------- |
-| `cat-file` | | ✅ | | |
-| `check-ignore` | | ❌ | | |
-| `commit-tree` | | ❌ | | |
-| `count-objects` | | ❌ | | |
-| `diff-index` | | ❌ | | |
-| `for-each-ref` | | ✅ | | |
-| `hash-object` | | ✅ | | |
-| `ls-files` | | ✅ | | |
-| `ls-remote` | | ✅ | | - [ls-remote](_examples/ls-remote/main.go) |
-| `merge-base` | `--independent`
`--is-ancestor` | ⚠️ (partial) | Calculates the merge-base only between two commits. | - [merge-base](_examples/merge_base/main.go) |
-| `merge-base` | `--fork-point`
`--octopus` | ❌ | | |
-| `read-tree` | | ❌ | | |
-| `rev-list` | | ✅ | | |
-| `rev-parse` | | ❌ | | |
-| `show-ref` | | ✅ | | |
-| `symbolic-ref` | | ✅ | | |
-| `update-index` | | ❌ | | |
-| `update-ref` | | ❌ | | |
-| `verify-pack` | | ❌ | | |
-| `write-tree` | | ❌ | | |
-
-## Indexes and Git Protocols
-
-| Feature | Version | Status | Notes |
-| -------------------- | ------------------------------------------------------------------------------- | ------ | ----- |
-| index | [v1](https://github.com/git/git/blob/master/Documentation/gitformat-index.txt) | ❌ | |
-| index | [v2](https://github.com/git/git/blob/master/Documentation/gitformat-index.txt) | ✅ | |
-| index | [v3](https://github.com/git/git/blob/master/Documentation/gitformat-index.txt) | ❌ | |
-| pack-protocol | [v1](https://github.com/git/git/blob/master/Documentation/gitprotocol-pack.txt) | ✅ | |
-| pack-protocol | [v2](https://github.com/git/git/blob/master/Documentation/gitprotocol-v2.txt) | ❌ | |
-| multi-pack-index | [v1](https://github.com/git/git/blob/master/Documentation/gitformat-pack.txt) | ❌ | |
-| pack-\*.rev files | [v1](https://github.com/git/git/blob/master/Documentation/gitformat-pack.txt) | ❌ | |
-| pack-\*.mtimes files | [v1](https://github.com/git/git/blob/master/Documentation/gitformat-pack.txt) | ❌ | |
-| cruft packs | | ❌ | |
-
-## Capabilities
-
-| Feature | Status | Notes |
-| ------------------------------ | ------------ | ----- |
-| `multi_ack` | ❌ | |
-| `multi_ack_detailed` | ❌ | |
-| `no-done` | ❌ | |
-| `thin-pack` | ❌ | |
-| `side-band` | ⚠️ (partial) | |
-| `side-band-64k` | ⚠️ (partial) | |
-| `ofs-delta` | ✅ | |
-| `agent` | ✅ | |
-| `object-format` | ❌ | |
-| `symref` | ✅ | |
-| `shallow` | ✅ | |
-| `deepen-since` | ✅ | |
-| `deepen-not` | ❌ | |
-| `deepen-relative` | ❌ | |
-| `no-progress` | ✅ | |
-| `include-tag` | ✅ | |
-| `report-status` | ✅ | |
-| `report-status-v2` | ❌ | |
-| `delete-refs` | ✅ | |
-| `quiet` | ❌ | |
-| `atomic` | ✅ | |
-| `push-options` | ✅ | |
-| `allow-tip-sha1-in-want` | ✅ | |
-| `allow-reachable-sha1-in-want` | ❌ | |
-| `push-cert=` | ❌ | |
-| `filter` | ❌ | |
-| `session-id=` | ❌ | |
-
-## Transport Schemes
-
-| Scheme | Status | Notes | Examples |
-| -------------------- | ------------ | ---------------------------------------------------------------------- | ---------------------------------------------- |
-| `http(s)://` (dumb) | ❌ | | |
-| `http(s)://` (smart) | ✅ | | |
-| `git://` | ✅ | | |
-| `ssh://` | ✅ | | |
-| `file://` | ⚠️ (partial) | Warning: this is not pure Golang. This shells out to the `git` binary. | |
-| Custom | ✅ | All existing schemes can be replaced by custom implementations. | - [custom_http](_examples/custom_http/main.go) |
-
-## SHA256
-
-| Feature | Sub-feature | Status | Notes | Examples |
-| -------- | ----------- | ------ | ---------------------------------- | ------------------------------------ |
-| `init` | | ✅ | Requires building with tag sha256. | - [init](_examples/sha256/main.go) |
-| `commit` | | ✅ | Requires building with tag sha256. | - [commit](_examples/sha256/main.go) |
-| `pull` | | ❌ | | |
-| `fetch` | | ❌ | | |
-| `push` | | ❌ | | |
-
-## Other features
-
-| Feature | Sub-feature | Status | Notes | Examples |
-| --------------- | --------------------------- | ------ | ---------------------------------------------- | -------- |
-| `config` | `--local` | ✅ | Read and write per-repository (`.git/config`). | |
-| `config` | `--global`
`--system` | ✅ | Read-only. | |
-| `gitignore` | | ✅ | | |
-| `gitattributes` | | ✅ | | |
-| `git-worktree` | | ❌ | Multiple worktrees are not supported. | |
diff --git a/vendor/github.com/go-git/go-git/v5/CONTRIBUTING.md b/vendor/github.com/go-git/go-git/v5/CONTRIBUTING.md
deleted file mode 100644
index fce25328a7f..00000000000
--- a/vendor/github.com/go-git/go-git/v5/CONTRIBUTING.md
+++ /dev/null
@@ -1,46 +0,0 @@
-# Contributing Guidelines
-
-source{d} go-git project is [Apache 2.0 licensed](LICENSE) and accepts
-contributions via GitHub pull requests. This document outlines some of the
-conventions on development workflow, commit message formatting, contact points,
-and other resources to make it easier to get your contribution accepted.
-
-## Support Channels
-
-The official support channels, for both users and contributors, are:
-
-- [StackOverflow go-git tag](https://stackoverflow.com/questions/tagged/go-git) for user questions.
-- GitHub [Issues](https://github.com/src-d/go-git/issues)* for bug reports and feature requests.
-
-*Before opening a new issue or submitting a new pull request, it's helpful to
-search the project - it's likely that another user has already reported the
-issue you're facing, or it's a known issue that we're already aware of.
-
-
-## How to Contribute
-
-Pull Requests (PRs) are the main and exclusive way to contribute to the official go-git project.
-In order for a PR to be accepted it needs to pass a list of requirements:
-
-- You should be able to run the same query using `git`. We don't accept features that are not implemented in the official git implementation.
-- The expected behavior must match the [official git implementation](https://github.com/git/git).
-- The actual behavior must be correctly explained with natural language and providing a minimum working example in Go that reproduces it.
-- All PRs must be written in idiomatic Go, formatted according to [gofmt](https://golang.org/cmd/gofmt/), and without any warnings from [go lint](https://github.com/golang/lint) nor [go vet](https://golang.org/cmd/vet/).
-- They should in general include tests, and those shall pass.
-- If the PR is a bug fix, it has to include a suite of unit tests for the new functionality.
-- If the PR is a new feature, it has to come with a suite of unit tests, that tests the new functionality.
-- In any case, all the PRs have to pass the personal evaluation of at least one of the maintainers of go-git.
-
-### Format of the commit message
-
-Every commit message should describe what was changed, under which context and, if applicable, the GitHub issue it relates to:
-
-```
-plumbing: packp, Skip argument validations for unknown capabilities. Fixes #623
-```
-
-The format can be described more formally as follows:
-
-```
-: , . [Fixes #]
-```
diff --git a/vendor/github.com/go-git/go-git/v5/EXTENDING.md b/vendor/github.com/go-git/go-git/v5/EXTENDING.md
deleted file mode 100644
index a2778e34abf..00000000000
--- a/vendor/github.com/go-git/go-git/v5/EXTENDING.md
+++ /dev/null
@@ -1,78 +0,0 @@
-# Extending go-git
-
-`go-git` was built in a highly extensible manner, which enables some of its functionalities to be changed or extended without the need of changing its codebase. Here are the key extensibility features:
-
-## Dot Git Storers
-
-Dot git storers are the components responsible for storing the Git internal files, including objects and references.
-
-The built-in storer implementations include [memory](storage/memory) and [filesystem](storage/filesystem). The `memory` storer stores all the data in memory, and its use look like this:
-
-```go
- r, err := git.Init(memory.NewStorage(), nil)
-```
-
-The `filesystem` storer stores the data in the OS filesystem, and can be used as follows:
-
-```go
- r, err := git.Init(filesystem.NewStorage(osfs.New("/tmp/foo")), nil)
-```
-
-New implementations can be created by implementing the [storage.Storer interface](storage/storer.go#L16).
-
-## Filesystem
-
-Git repository worktrees are managed using a filesystem abstraction based on [go-billy](https://github.com/go-git/go-billy). The Git operations will take place against the specific filesystem implementation. Initialising a repository in Memory can be done as follows:
-
-```go
- fs := memfs.New()
- r, err := git.Init(memory.NewStorage(), fs)
-```
-
-The same operation can be done against the OS filesystem:
-
-```go
- fs := osfs.New("/tmp/foo")
- r, err := git.Init(memory.NewStorage(), fs)
-```
-
-New filesystems (e.g. cloud based storage) could be created by implementing `go-billy`'s [Filesystem interface](https://github.com/go-git/go-billy/blob/326c59f064021b821a55371d57794fbfb86d4cb3/fs.go#L52).
-
-## Transport Schemes
-
-Git supports various transport schemes, including `http`, `https`, `ssh`, `git`, `file`. `go-git` defines the [transport.Transport interface](plumbing/transport/common.go#L48) to represent them.
-
-The built-in implementations can be replaced by calling `client.InstallProtocol`.
-
-An example of changing the built-in `https` implementation to skip TLS could look like this:
-
-```go
- customClient := &http.Client{
- Transport: &http.Transport{
- TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
- },
- }
-
- client.InstallProtocol("https", githttp.NewClient(customClient))
-```
-
-Some internal implementations enables code reuse amongst the different transport implementations. Some of these may be made public in the future (e.g. `plumbing/transport/internal/common`).
-
-## Cache
-
-Several different operations across `go-git` lean on caching of objects in order to achieve optimal performance. The caching functionality is defined by the [cache.Object interface](plumbing/cache/common.go#L17).
-
-Two built-in implementations are `cache.ObjectLRU` and `cache.BufferLRU`. However, the caching functionality can be customized by implementing the interface `cache.Object` interface.
-
-## Hash
-
-`go-git` uses the `crypto.Hash` interface to represent hash functions. The built-in implementations are `github.com/pjbgf/sha1cd` for SHA1 and Go's `crypto/SHA256`.
-
-The default hash functions can be changed by calling `hash.RegisterHash`.
-```go
- func init() {
- hash.RegisterHash(crypto.SHA1, sha1.New)
- }
-```
-
-New `SHA1` or `SHA256` hash functions that implement the `hash.RegisterHash` interface can be registered by calling `RegisterHash`.
diff --git a/vendor/github.com/go-git/go-git/v5/LICENSE b/vendor/github.com/go-git/go-git/v5/LICENSE
deleted file mode 100644
index 8aa3d854cf7..00000000000
--- a/vendor/github.com/go-git/go-git/v5/LICENSE
+++ /dev/null
@@ -1,201 +0,0 @@
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "{}"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright 2018 Sourced Technologies, S.L.
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/vendor/github.com/go-git/go-git/v5/Makefile b/vendor/github.com/go-git/go-git/v5/Makefile
deleted file mode 100644
index 3d5b54f7e65..00000000000
--- a/vendor/github.com/go-git/go-git/v5/Makefile
+++ /dev/null
@@ -1,54 +0,0 @@
-# General
-WORKDIR = $(PWD)
-
-# Go parameters
-GOCMD = go
-GOTEST = $(GOCMD) test
-
-# Git config
-GIT_VERSION ?=
-GIT_DIST_PATH ?= $(PWD)/.git-dist
-GIT_REPOSITORY = http://github.com/git/git.git
-
-# Coverage
-COVERAGE_REPORT = coverage.out
-COVERAGE_MODE = count
-
-build-git:
- @if [ -f $(GIT_DIST_PATH)/git ]; then \
- echo "nothing to do, using cache $(GIT_DIST_PATH)"; \
- else \
- git clone $(GIT_REPOSITORY) -b $(GIT_VERSION) --depth 1 --single-branch $(GIT_DIST_PATH); \
- cd $(GIT_DIST_PATH); \
- make configure; \
- ./configure; \
- make all; \
- fi
-
-test:
- @echo "running against `git version`"; \
- $(GOTEST) -race ./...
- $(GOTEST) -v _examples/common_test.go _examples/common.go --examples
-
-TEMP_REPO := $(shell mktemp)
-test-sha256:
- $(GOCMD) run -tags sha256 _examples/sha256/main.go $(TEMP_REPO)
- cd $(TEMP_REPO) && git fsck
- rm -rf $(TEMP_REPO)
-
-test-coverage:
- @echo "running against `git version`"; \
- echo "" > $(COVERAGE_REPORT); \
- $(GOTEST) -coverprofile=$(COVERAGE_REPORT) -coverpkg=./... -covermode=$(COVERAGE_MODE) ./...
-
-clean:
- rm -rf $(GIT_DIST_PATH)
-
-fuzz:
- @go test -fuzz=FuzzParser $(PWD)/internal/revision
- @go test -fuzz=FuzzDecoder $(PWD)/plumbing/format/config
- @go test -fuzz=FuzzPatchDelta $(PWD)/plumbing/format/packfile
- @go test -fuzz=FuzzParseSignedBytes $(PWD)/plumbing/object
- @go test -fuzz=FuzzDecode $(PWD)/plumbing/object
- @go test -fuzz=FuzzDecoder $(PWD)/plumbing/protocol/packp
- @go test -fuzz=FuzzNewEndpoint $(PWD)/plumbing/transport
diff --git a/vendor/github.com/go-git/go-git/v5/README.md b/vendor/github.com/go-git/go-git/v5/README.md
deleted file mode 100644
index ff0c9b72bae..00000000000
--- a/vendor/github.com/go-git/go-git/v5/README.md
+++ /dev/null
@@ -1,131 +0,0 @@
-
-[](https://pkg.go.dev/github.com/go-git/go-git/v5) [](https://github.com/go-git/go-git/actions) [](https://goreportcard.com/report/github.com/go-git/go-git)
-
-*go-git* is a highly extensible git implementation library written in **pure Go**.
-
-It can be used to manipulate git repositories at low level *(plumbing)* or high level *(porcelain)*, through an idiomatic Go API. It also supports several types of storage, such as in-memory filesystems, or custom implementations, thanks to the [`Storer`](https://pkg.go.dev/github.com/go-git/go-git/v5/plumbing/storer) interface.
-
-It's being actively developed since 2015 and is being used extensively by [Keybase](https://keybase.io/blog/encrypted-git-for-everyone), [Gitea](https://gitea.io/en-us/) or [Pulumi](https://github.com/search?q=org%3Apulumi+go-git&type=Code), and by many other libraries and tools.
-
-Project Status
---------------
-
-After the legal issues with the [`src-d`](https://github.com/src-d) organization, the lack of update for four months and the requirement to make a hard fork, the project is **now back to normality**.
-
-The project is currently actively maintained by individual contributors, including several of the original authors, but also backed by a new company, [gitsight](https://github.com/gitsight), where `go-git` is a critical component used at scale.
-
-
-Comparison with git
--------------------
-
-*go-git* aims to be fully compatible with [git](https://github.com/git/git), all the *porcelain* operations are implemented to work exactly as *git* does.
-
-*git* is a humongous project with years of development by thousands of contributors, making it challenging for *go-git* to implement all the features. You can find a comparison of *go-git* vs *git* in the [compatibility documentation](COMPATIBILITY.md).
-
-
-Installation
-------------
-
-The recommended way to install *go-git* is:
-
-```go
-import "github.com/go-git/go-git/v5" // with go modules enabled (GO111MODULE=on or outside GOPATH)
-import "github.com/go-git/go-git" // with go modules disabled
-```
-
-
-Examples
---------
-
-> Please note that the `CheckIfError` and `Info` functions used in the examples are from the [examples package](https://github.com/go-git/go-git/blob/master/_examples/common.go#L19) just to be used in the examples.
-
-
-### Basic example
-
-A basic example that mimics the standard `git clone` command
-
-```go
-// Clone the given repository to the given directory
-Info("git clone https://github.com/go-git/go-git")
-
-_, err := git.PlainClone("/tmp/foo", false, &git.CloneOptions{
- URL: "https://github.com/go-git/go-git",
- Progress: os.Stdout,
-})
-
-CheckIfError(err)
-```
-
-Outputs:
-```
-Counting objects: 4924, done.
-Compressing objects: 100% (1333/1333), done.
-Total 4924 (delta 530), reused 6 (delta 6), pack-reused 3533
-```
-
-### In-memory example
-
-Cloning a repository into memory and printing the history of HEAD, just like `git log` does
-
-
-```go
-// Clones the given repository in memory, creating the remote, the local
-// branches and fetching the objects, exactly as:
-Info("git clone https://github.com/go-git/go-billy")
-
-r, err := git.Clone(memory.NewStorage(), nil, &git.CloneOptions{
- URL: "https://github.com/go-git/go-billy",
-})
-
-CheckIfError(err)
-
-// Gets the HEAD history from HEAD, just like this command:
-Info("git log")
-
-// ... retrieves the branch pointed by HEAD
-ref, err := r.Head()
-CheckIfError(err)
-
-
-// ... retrieves the commit history
-cIter, err := r.Log(&git.LogOptions{From: ref.Hash()})
-CheckIfError(err)
-
-// ... just iterates over the commits, printing it
-err = cIter.ForEach(func(c *object.Commit) error {
- fmt.Println(c)
- return nil
-})
-CheckIfError(err)
-```
-
-Outputs:
-```
-commit ded8054fd0c3994453e9c8aacaf48d118d42991e
-Author: Santiago M. Mola
-Date: Sat Nov 12 21:18:41 2016 +0100
-
- index: ReadFrom/WriteTo returns IndexReadError/IndexWriteError. (#9)
-
-commit df707095626f384ce2dc1a83b30f9a21d69b9dfc
-Author: Santiago M. Mola
-Date: Fri Nov 11 13:23:22 2016 +0100
-
- readwriter: fix bug when writing index. (#10)
-
- When using ReadWriter on an existing siva file, absolute offset for
- index entries was not being calculated correctly.
-...
-```
-
-You can find this [example](_examples/log/main.go) and many others in the [examples](_examples) folder.
-
-Contribute
-----------
-
-[Contributions](https://github.com/go-git/go-git/issues?q=is%3Aissue+is%3Aopen+label%3A%22help+wanted%22) are more than welcome, if you are interested please take a look to
-our [Contributing Guidelines](CONTRIBUTING.md).
-
-License
--------
-Apache License Version 2.0, see [LICENSE](LICENSE)
diff --git a/vendor/github.com/go-git/go-git/v5/SECURITY.md b/vendor/github.com/go-git/go-git/v5/SECURITY.md
deleted file mode 100644
index 0d2f8d038f3..00000000000
--- a/vendor/github.com/go-git/go-git/v5/SECURITY.md
+++ /dev/null
@@ -1,38 +0,0 @@
-# go-git Security Policy
-
-The purpose of this security policy is to outline `go-git`'s process
-for reporting, handling and disclosing security sensitive information.
-
-## Supported Versions
-
-The project follows a version support policy where only the latest minor
-release is actively supported. Therefore, only issues that impact the latest
-minor release will be fixed. Users are encouraged to upgrade to the latest
-minor/patch release to benefit from the most up-to-date features, bug fixes,
-and security enhancements.
-
-The supported versions policy applies to both the `go-git` library and its
-associated repositories within the `go-git` org.
-
-## Reporting Security Issues
-
-Please report any security vulnerabilities or potential weaknesses in `go-git`
-privately via go-git-security@googlegroups.com. Do not publicly disclose the
-details of the vulnerability until a fix has been implemented and released.
-
-During the process the project maintainers will investigate the report, so please
-provide detailed information, including steps to reproduce, affected versions, and any mitigations if known.
-
-The project maintainers will acknowledge the receipt of the report and work with
-the reporter to validate and address the issue.
-
-Please note that `go-git` does not have any bounty programs, and therefore do
-not provide financial compensation for disclosures.
-
-## Security Disclosure Process
-
-The project maintainers will make every effort to promptly address security issues.
-
-Once a security vulnerability is fixed, a security advisory will be published to notify users and provide appropriate mitigation measures.
-
-All `go-git` advisories can be found at https://github.com/go-git/go-git/security/advisories.
diff --git a/vendor/github.com/go-git/go-git/v5/blame.go b/vendor/github.com/go-git/go-git/v5/blame.go
deleted file mode 100644
index 2a877dcdf96..00000000000
--- a/vendor/github.com/go-git/go-git/v5/blame.go
+++ /dev/null
@@ -1,590 +0,0 @@
-package git
-
-import (
- "bytes"
- "container/heap"
- "errors"
- "fmt"
- "io"
- "strconv"
- "time"
- "unicode/utf8"
-
- "github.com/go-git/go-git/v5/plumbing"
- "github.com/go-git/go-git/v5/plumbing/object"
- "github.com/go-git/go-git/v5/utils/diff"
- "github.com/sergi/go-diff/diffmatchpatch"
-)
-
-// BlameResult represents the result of a Blame operation.
-type BlameResult struct {
- // Path is the path of the File that we're blaming.
- Path string
- // Rev (Revision) is the hash of the specified Commit used to generate this result.
- Rev plumbing.Hash
- // Lines contains every line with its authorship.
- Lines []*Line
-}
-
-// Blame returns a BlameResult with the information about the last author of
-// each line from file `path` at commit `c`.
-func Blame(c *object.Commit, path string) (*BlameResult, error) {
- // The file to blame is identified by the input arguments:
- // commit and path. commit is a Commit object obtained from a Repository. Path
- // represents a path to a specific file contained in the repository.
- //
- // Blaming a file is done by walking the tree in reverse order trying to find where each line was last modified.
- //
- // When a diff is found it cannot immediately assume it came from that commit, as it may have come from 1 of its
- // parents, so it will first try to resolve those diffs from its parents, if it couldn't find the change in its
- // parents then it will assign the change to itself.
- //
- // When encountering 2 parents that have made the same change to a file it will choose the parent that was merged
- // into the current branch first (this is determined by the order of the parents inside the commit).
- //
- // This currently works on a line by line basis, if performance becomes an issue it could be changed to work with
- // hunks rather than lines. Then when encountering diff hunks it would need to split them where necessary.
-
- b := new(blame)
- b.fRev = c
- b.path = path
- b.q = new(priorityQueue)
-
- file, err := b.fRev.File(path)
- if err != nil {
- return nil, err
- }
- finalLines, err := file.Lines()
- if err != nil {
- return nil, err
- }
- finalLength := len(finalLines)
-
- needsMap := make([]lineMap, finalLength)
- for i := range needsMap {
- needsMap[i] = lineMap{i, i, nil, -1}
- }
- contents, err := file.Contents()
- if err != nil {
- return nil, err
- }
- b.q.Push(&queueItem{
- nil,
- nil,
- c,
- path,
- contents,
- needsMap,
- 0,
- false,
- 0,
- })
- items := make([]*queueItem, 0)
- for {
- items = items[:0]
- for {
- if b.q.Len() == 0 {
- return nil, errors.New("invalid state: no items left on the blame queue")
- }
- item := b.q.Pop()
- items = append(items, item)
- next := b.q.Peek()
- if next == nil || next.Hash != item.Commit.Hash {
- break
- }
- }
- finished, err := b.addBlames(items)
- if err != nil {
- return nil, err
- }
- if finished == true {
- break
- }
- }
- if err != nil {
- return nil, err
- }
-
- b.lineToCommit = make([]*object.Commit, finalLength)
- for i := range needsMap {
- b.lineToCommit[i] = needsMap[i].Commit
- }
-
- lines, err := newLines(finalLines, b.lineToCommit)
- if err != nil {
- return nil, err
- }
-
- return &BlameResult{
- Path: path,
- Rev: c.Hash,
- Lines: lines,
- }, nil
-}
-
-// Line values represent the contents and author of a line in BlamedResult values.
-type Line struct {
- // Author is the email address of the last author that modified the line.
- Author string
- // AuthorName is the name of the last author that modified the line.
- AuthorName string
- // Text is the original text of the line.
- Text string
- // Date is when the original text of the line was introduced
- Date time.Time
- // Hash is the commit hash that introduced the original line
- Hash plumbing.Hash
-}
-
-func newLine(author, authorName, text string, date time.Time, hash plumbing.Hash) *Line {
- return &Line{
- Author: author,
- AuthorName: authorName,
- Text: text,
- Hash: hash,
- Date: date,
- }
-}
-
-func newLines(contents []string, commits []*object.Commit) ([]*Line, error) {
- result := make([]*Line, 0, len(contents))
- for i := range contents {
- result = append(result, newLine(
- commits[i].Author.Email, commits[i].Author.Name, contents[i],
- commits[i].Author.When, commits[i].Hash,
- ))
- }
-
- return result, nil
-}
-
-// this struct is internally used by the blame function to hold its
-// inputs, outputs and state.
-type blame struct {
- // the path of the file to blame
- path string
- // the commit of the final revision of the file to blame
- fRev *object.Commit
- // resolved lines
- lineToCommit []*object.Commit
- // queue of commits that need resolving
- q *priorityQueue
-}
-
-type lineMap struct {
- Orig, Cur int
- Commit *object.Commit
- FromParentNo int
-}
-
-func (b *blame) addBlames(curItems []*queueItem) (bool, error) {
- curItem := curItems[0]
-
- // Simple optimisation to merge paths, there is potential to go a bit further here and check for any duplicates
- // not only if they are all the same.
- if len(curItems) == 1 {
- curItems = nil
- } else if curItem.IdenticalToChild {
- allSame := true
- lenCurItems := len(curItems)
- lowestParentNo := curItem.ParentNo
- for i := 1; i < lenCurItems; i++ {
- if !curItems[i].IdenticalToChild || curItem.Child != curItems[i].Child {
- allSame = false
- break
- }
- lowestParentNo = min(lowestParentNo, curItems[i].ParentNo)
- }
- if allSame {
- curItem.Child.numParentsNeedResolving = curItem.Child.numParentsNeedResolving - lenCurItems + 1
- curItems = nil // free the memory
- curItem.ParentNo = lowestParentNo
-
- // Now check if we can remove the parent completely
- for curItem.Child.IdenticalToChild && curItem.Child.MergedChildren == nil && curItem.Child.numParentsNeedResolving == 1 {
- oldChild := curItem.Child
- curItem.Child = oldChild.Child
- curItem.ParentNo = oldChild.ParentNo
- }
- }
- }
-
- // if we have more than 1 item for this commit, create a single needsMap
- if len(curItems) > 1 {
- curItem.MergedChildren = make([]childToNeedsMap, len(curItems))
- for i, c := range curItems {
- curItem.MergedChildren[i] = childToNeedsMap{c.Child, c.NeedsMap, c.IdenticalToChild, c.ParentNo}
- }
- newNeedsMap := make([]lineMap, 0, len(curItem.NeedsMap))
- newNeedsMap = append(newNeedsMap, curItems[0].NeedsMap...)
-
- for i := 1; i < len(curItems); i++ {
- cur := curItems[i].NeedsMap
- n := 0 // position in newNeedsMap
- c := 0 // position in current list
- for c < len(cur) {
- if n == len(newNeedsMap) {
- newNeedsMap = append(newNeedsMap, cur[c:]...)
- break
- } else if newNeedsMap[n].Cur == cur[c].Cur {
- n++
- c++
- } else if newNeedsMap[n].Cur < cur[c].Cur {
- n++
- } else {
- newNeedsMap = append(newNeedsMap, cur[c])
- newPos := len(newNeedsMap) - 1
- for newPos > n {
- newNeedsMap[newPos-1], newNeedsMap[newPos] = newNeedsMap[newPos], newNeedsMap[newPos-1]
- newPos--
- }
- }
- }
- }
- curItem.NeedsMap = newNeedsMap
- curItem.IdenticalToChild = false
- curItem.Child = nil
- curItems = nil // free the memory
- }
-
- parents, err := parentsContainingPath(curItem.path, curItem.Commit)
- if err != nil {
- return false, err
- }
-
- anyPushed := false
- for parnetNo, prev := range parents {
- currentHash, err := blobHash(curItem.path, curItem.Commit)
- if err != nil {
- return false, err
- }
- prevHash, err := blobHash(prev.Path, prev.Commit)
- if err != nil {
- return false, err
- }
- if currentHash == prevHash {
- if len(parents) == 1 && curItem.MergedChildren == nil && curItem.IdenticalToChild {
- // commit that has 1 parent and 1 child and is the same as both, bypass it completely
- b.q.Push(&queueItem{
- Child: curItem.Child,
- Commit: prev.Commit,
- path: prev.Path,
- Contents: curItem.Contents,
- NeedsMap: curItem.NeedsMap, // reuse the NeedsMap as we are throwing away this item
- IdenticalToChild: true,
- ParentNo: curItem.ParentNo,
- })
- } else {
- b.q.Push(&queueItem{
- Child: curItem,
- Commit: prev.Commit,
- path: prev.Path,
- Contents: curItem.Contents,
- NeedsMap: append([]lineMap(nil), curItem.NeedsMap...), // create new slice and copy
- IdenticalToChild: true,
- ParentNo: parnetNo,
- })
- curItem.numParentsNeedResolving++
- }
- anyPushed = true
- continue
- }
-
- // get the contents of the file
- file, err := prev.Commit.File(prev.Path)
- if err != nil {
- return false, err
- }
- prevContents, err := file.Contents()
- if err != nil {
- return false, err
- }
-
- hunks := diff.Do(prevContents, curItem.Contents)
- prevl := -1
- curl := -1
- need := 0
- getFromParent := make([]lineMap, 0)
- out:
- for h := range hunks {
- hLines := countLines(hunks[h].Text)
- for hl := 0; hl < hLines; hl++ {
- switch {
- case hunks[h].Type == diffmatchpatch.DiffEqual:
- prevl++
- curl++
- if curl == curItem.NeedsMap[need].Cur {
- // add to needs
- getFromParent = append(getFromParent, lineMap{curl, prevl, nil, -1})
- // move to next need
- need++
- if need >= len(curItem.NeedsMap) {
- break out
- }
- }
- case hunks[h].Type == diffmatchpatch.DiffInsert:
- curl++
- if curl == curItem.NeedsMap[need].Cur {
- // the line we want is added, it may have been added here (or by another parent), skip it for now
- need++
- if need >= len(curItem.NeedsMap) {
- break out
- }
- }
- case hunks[h].Type == diffmatchpatch.DiffDelete:
- prevl += hLines
- continue out
- default:
- return false, errors.New("invalid state: invalid hunk Type")
- }
- }
- }
-
- if len(getFromParent) > 0 {
- b.q.Push(&queueItem{
- curItem,
- nil,
- prev.Commit,
- prev.Path,
- prevContents,
- getFromParent,
- 0,
- false,
- parnetNo,
- })
- curItem.numParentsNeedResolving++
- anyPushed = true
- }
- }
-
- curItem.Contents = "" // no longer need, free the memory
-
- if !anyPushed {
- return finishNeeds(curItem)
- }
-
- return false, nil
-}
-
-func finishNeeds(curItem *queueItem) (bool, error) {
- // any needs left in the needsMap must have come from this revision
- for i := range curItem.NeedsMap {
- if curItem.NeedsMap[i].Commit == nil {
- curItem.NeedsMap[i].Commit = curItem.Commit
- curItem.NeedsMap[i].FromParentNo = -1
- }
- }
-
- if curItem.Child == nil && curItem.MergedChildren == nil {
- return true, nil
- }
-
- if curItem.MergedChildren == nil {
- return applyNeeds(curItem.Child, curItem.NeedsMap, curItem.IdenticalToChild, curItem.ParentNo)
- }
-
- for _, ctn := range curItem.MergedChildren {
- m := 0 // position in merged needs map
- p := 0 // position in parent needs map
- for p < len(ctn.NeedsMap) {
- if ctn.NeedsMap[p].Cur == curItem.NeedsMap[m].Cur {
- ctn.NeedsMap[p].Commit = curItem.NeedsMap[m].Commit
- m++
- p++
- } else if ctn.NeedsMap[p].Cur < curItem.NeedsMap[m].Cur {
- p++
- } else {
- m++
- }
- }
- finished, err := applyNeeds(ctn.Child, ctn.NeedsMap, ctn.IdenticalToChild, ctn.ParentNo)
- if finished || err != nil {
- return finished, err
- }
- }
-
- return false, nil
-}
-
-func applyNeeds(child *queueItem, needsMap []lineMap, identicalToChild bool, parentNo int) (bool, error) {
- if identicalToChild {
- for i := range child.NeedsMap {
- l := &child.NeedsMap[i]
- if l.Cur != needsMap[i].Cur || l.Orig != needsMap[i].Orig {
- return false, errors.New("needsMap isn't the same? Why not??")
- }
- if l.Commit == nil || parentNo < l.FromParentNo {
- l.Commit = needsMap[i].Commit
- l.FromParentNo = parentNo
- }
- }
- } else {
- i := 0
- out:
- for j := range child.NeedsMap {
- l := &child.NeedsMap[j]
- for needsMap[i].Orig < l.Cur {
- i++
- if i == len(needsMap) {
- break out
- }
- }
- if l.Cur == needsMap[i].Orig {
- if l.Commit == nil || parentNo < l.FromParentNo {
- l.Commit = needsMap[i].Commit
- l.FromParentNo = parentNo
- }
- }
- }
- }
- child.numParentsNeedResolving--
- if child.numParentsNeedResolving == 0 {
- finished, err := finishNeeds(child)
- if finished || err != nil {
- return finished, err
- }
- }
-
- return false, nil
-}
-
-// String prints the results of a Blame using git-blame's style.
-func (b BlameResult) String() string {
- var buf bytes.Buffer
-
- // max line number length
- mlnl := len(strconv.Itoa(len(b.Lines)))
- // max author length
- mal := b.maxAuthorLength()
- format := fmt.Sprintf("%%s (%%-%ds %%s %%%dd) %%s\n", mal, mlnl)
-
- for ln := range b.Lines {
- _, _ = fmt.Fprintf(&buf, format, b.Lines[ln].Hash.String()[:8],
- b.Lines[ln].AuthorName, b.Lines[ln].Date.Format("2006-01-02 15:04:05 -0700"), ln+1, b.Lines[ln].Text)
- }
- return buf.String()
-}
-
-// utility function to calculate the number of runes needed
-// to print the longest author name in the blame of a file.
-func (b BlameResult) maxAuthorLength() int {
- m := 0
- for ln := range b.Lines {
- m = max(m, utf8.RuneCountInString(b.Lines[ln].AuthorName))
- }
- return m
-}
-
-func min(a, b int) int {
- if a < b {
- return a
- }
- return b
-}
-
-func max(a, b int) int {
- if a > b {
- return a
- }
- return b
-}
-
-type childToNeedsMap struct {
- Child *queueItem
- NeedsMap []lineMap
- IdenticalToChild bool
- ParentNo int
-}
-
-type queueItem struct {
- Child *queueItem
- MergedChildren []childToNeedsMap
- Commit *object.Commit
- path string
- Contents string
- NeedsMap []lineMap
- numParentsNeedResolving int
- IdenticalToChild bool
- ParentNo int
-}
-
-type priorityQueueImp []*queueItem
-
-func (pq *priorityQueueImp) Len() int { return len(*pq) }
-func (pq *priorityQueueImp) Less(i, j int) bool {
- return !(*pq)[i].Commit.Less((*pq)[j].Commit)
-}
-func (pq *priorityQueueImp) Swap(i, j int) { (*pq)[i], (*pq)[j] = (*pq)[j], (*pq)[i] }
-func (pq *priorityQueueImp) Push(x any) { *pq = append(*pq, x.(*queueItem)) }
-func (pq *priorityQueueImp) Pop() any {
- n := len(*pq)
- ret := (*pq)[n-1]
- (*pq)[n-1] = nil // ovoid memory leak
- *pq = (*pq)[0 : n-1]
-
- return ret
-}
-func (pq *priorityQueueImp) Peek() *object.Commit {
- if len(*pq) == 0 {
- return nil
- }
- return (*pq)[0].Commit
-}
-
-type priorityQueue priorityQueueImp
-
-func (pq *priorityQueue) Init() { heap.Init((*priorityQueueImp)(pq)) }
-func (pq *priorityQueue) Len() int { return (*priorityQueueImp)(pq).Len() }
-func (pq *priorityQueue) Push(c *queueItem) {
- heap.Push((*priorityQueueImp)(pq), c)
-}
-func (pq *priorityQueue) Pop() *queueItem {
- return heap.Pop((*priorityQueueImp)(pq)).(*queueItem)
-}
-func (pq *priorityQueue) Peek() *object.Commit { return (*priorityQueueImp)(pq).Peek() }
-
-type parentCommit struct {
- Commit *object.Commit
- Path string
-}
-
-func parentsContainingPath(path string, c *object.Commit) ([]parentCommit, error) {
- // TODO: benchmark this method making git.object.Commit.parent public instead of using
- // an iterator
- var result []parentCommit
- iter := c.Parents()
- for {
- parent, err := iter.Next()
- if err == io.EOF {
- return result, nil
- }
- if err != nil {
- return nil, err
- }
- if _, err := parent.File(path); err == nil {
- result = append(result, parentCommit{parent, path})
- } else {
- // look for renames
- patch, err := parent.Patch(c)
- if err != nil {
- return nil, err
- } else if patch != nil {
- for _, fp := range patch.FilePatches() {
- from, to := fp.Files()
- if from != nil && to != nil && to.Path() == path {
- result = append(result, parentCommit{parent, from.Path()})
- break
- }
- }
- }
- }
- }
-}
-
-func blobHash(path string, commit *object.Commit) (plumbing.Hash, error) {
- file, err := commit.File(path)
- if err != nil {
- return plumbing.ZeroHash, err
- }
- return file.Hash, nil
-}
diff --git a/vendor/github.com/go-git/go-git/v5/common.go b/vendor/github.com/go-git/go-git/v5/common.go
deleted file mode 100644
index 6174339a815..00000000000
--- a/vendor/github.com/go-git/go-git/v5/common.go
+++ /dev/null
@@ -1,20 +0,0 @@
-package git
-
-import "strings"
-
-// countLines returns the number of lines in a string à la git, this is
-// The newline character is assumed to be '\n'. The empty string
-// contains 0 lines. If the last line of the string doesn't end with a
-// newline, it will still be considered a line.
-func countLines(s string) int {
- if s == "" {
- return 0
- }
-
- nEOL := strings.Count(s, "\n")
- if strings.HasSuffix(s, "\n") {
- return nEOL
- }
-
- return nEOL + 1
-}
diff --git a/vendor/github.com/go-git/go-git/v5/config/branch.go b/vendor/github.com/go-git/go-git/v5/config/branch.go
deleted file mode 100644
index db2cb499acc..00000000000
--- a/vendor/github.com/go-git/go-git/v5/config/branch.go
+++ /dev/null
@@ -1,123 +0,0 @@
-package config
-
-import (
- "errors"
- "strings"
-
- "github.com/go-git/go-git/v5/plumbing"
- format "github.com/go-git/go-git/v5/plumbing/format/config"
-)
-
-var (
- errBranchEmptyName = errors.New("branch config: empty name")
- errBranchInvalidMerge = errors.New("branch config: invalid merge")
- errBranchInvalidRebase = errors.New("branch config: rebase must be one of 'true' or 'interactive'")
-)
-
-// Branch contains information on the
-// local branches and which remote to track
-type Branch struct {
- // Name of branch
- Name string
- // Remote name of remote to track
- Remote string
- // Merge is the local refspec for the branch
- Merge plumbing.ReferenceName
- // Rebase instead of merge when pulling. Valid values are
- // "true" and "interactive". "false" is undocumented and
- // typically represented by the non-existence of this field
- Rebase string
- // Description explains what the branch is for.
- // Multi-line explanations may be used.
- //
- // Original git command to edit:
- // git branch --edit-description
- Description string
-
- raw *format.Subsection
-}
-
-// Validate validates fields of branch
-func (b *Branch) Validate() error {
- if b.Name == "" {
- return errBranchEmptyName
- }
-
- if b.Merge != "" && !b.Merge.IsBranch() {
- return errBranchInvalidMerge
- }
-
- if b.Rebase != "" &&
- b.Rebase != "true" &&
- b.Rebase != "interactive" &&
- b.Rebase != "false" {
- return errBranchInvalidRebase
- }
-
- return plumbing.NewBranchReferenceName(b.Name).Validate()
-}
-
-func (b *Branch) marshal() *format.Subsection {
- if b.raw == nil {
- b.raw = &format.Subsection{}
- }
-
- b.raw.Name = b.Name
-
- if b.Remote == "" {
- b.raw.RemoveOption(remoteSection)
- } else {
- b.raw.SetOption(remoteSection, b.Remote)
- }
-
- if b.Merge == "" {
- b.raw.RemoveOption(mergeKey)
- } else {
- b.raw.SetOption(mergeKey, string(b.Merge))
- }
-
- if b.Rebase == "" {
- b.raw.RemoveOption(rebaseKey)
- } else {
- b.raw.SetOption(rebaseKey, b.Rebase)
- }
-
- if b.Description == "" {
- b.raw.RemoveOption(descriptionKey)
- } else {
- desc := quoteDescription(b.Description)
- b.raw.SetOption(descriptionKey, desc)
- }
-
- return b.raw
-}
-
-// hack to trigger conditional quoting in the
-// plumbing/format/config/Encoder.encodeOptions
-//
-// Current Encoder implementation uses Go %q format if value contains a backslash character,
-// which is not consistent with reference git implementation.
-// git just replaces newline characters with \n, while Encoder prints them directly.
-// Until value quoting fix, we should escape description value by replacing newline characters with \n.
-func quoteDescription(desc string) string {
- return strings.ReplaceAll(desc, "\n", `\n`)
-}
-
-func (b *Branch) unmarshal(s *format.Subsection) error {
- b.raw = s
-
- b.Name = b.raw.Name
- b.Remote = b.raw.Options.Get(remoteSection)
- b.Merge = plumbing.ReferenceName(b.raw.Options.Get(mergeKey))
- b.Rebase = b.raw.Options.Get(rebaseKey)
- b.Description = unquoteDescription(b.raw.Options.Get(descriptionKey))
-
- return b.Validate()
-}
-
-// hack to enable conditional quoting in the
-// plumbing/format/config/Encoder.encodeOptions
-// goto quoteDescription for details.
-func unquoteDescription(desc string) string {
- return strings.ReplaceAll(desc, `\n`, "\n")
-}
diff --git a/vendor/github.com/go-git/go-git/v5/config/config.go b/vendor/github.com/go-git/go-git/v5/config/config.go
deleted file mode 100644
index 6d41c15dcdc..00000000000
--- a/vendor/github.com/go-git/go-git/v5/config/config.go
+++ /dev/null
@@ -1,696 +0,0 @@
-// Package config contains the abstraction of multiple config files
-package config
-
-import (
- "bytes"
- "errors"
- "fmt"
- "io"
- "os"
- "path/filepath"
- "sort"
- "strconv"
-
- "github.com/go-git/go-billy/v5/osfs"
- "github.com/go-git/go-git/v5/internal/url"
- "github.com/go-git/go-git/v5/plumbing"
- format "github.com/go-git/go-git/v5/plumbing/format/config"
-)
-
-const (
- // DefaultFetchRefSpec is the default refspec used for fetch.
- DefaultFetchRefSpec = "+refs/heads/*:refs/remotes/%s/*"
- // DefaultPushRefSpec is the default refspec used for push.
- DefaultPushRefSpec = "refs/heads/*:refs/heads/*"
-)
-
-// ConfigStorer generic storage of Config object
-type ConfigStorer interface {
- Config() (*Config, error)
- SetConfig(*Config) error
-}
-
-var (
- ErrInvalid = errors.New("config invalid key in remote or branch")
- ErrRemoteConfigNotFound = errors.New("remote config not found")
- ErrRemoteConfigEmptyURL = errors.New("remote config: empty URL")
- ErrRemoteConfigEmptyName = errors.New("remote config: empty name")
-)
-
-// Scope defines the scope of a config file, such as local, global or system.
-type Scope int
-
-// Available ConfigScope's
-const (
- LocalScope Scope = iota
- GlobalScope
- SystemScope
-)
-
-// Config contains the repository configuration
-// https://www.kernel.org/pub/software/scm/git/docs/git-config.html#FILES
-type Config struct {
- Core struct {
- // IsBare if true this repository is assumed to be bare and has no
- // working directory associated with it.
- IsBare bool
- // Worktree is the path to the root of the working tree.
- Worktree string
- // CommentChar is the character indicating the start of a
- // comment for commands like commit and tag
- CommentChar string
- // RepositoryFormatVersion identifies the repository format and layout version.
- RepositoryFormatVersion format.RepositoryFormatVersion
- }
-
- User struct {
- // Name is the personal name of the author and the committer of a commit.
- Name string
- // Email is the email of the author and the committer of a commit.
- Email string
- }
-
- Author struct {
- // Name is the personal name of the author of a commit.
- Name string
- // Email is the email of the author of a commit.
- Email string
- }
-
- Committer struct {
- // Name is the personal name of the committer of a commit.
- Name string
- // Email is the email of the committer of a commit.
- Email string
- }
-
- Pack struct {
- // Window controls the size of the sliding window for delta
- // compression. The default is 10. A value of 0 turns off
- // delta compression entirely.
- Window uint
- }
-
- Init struct {
- // DefaultBranch Allows overriding the default branch name
- // e.g. when initializing a new repository or when cloning
- // an empty repository.
- DefaultBranch string
- }
-
- Extensions struct {
- // ObjectFormat specifies the hash algorithm to use. The
- // acceptable values are sha1 and sha256. If not specified,
- // sha1 is assumed. It is an error to specify this key unless
- // core.repositoryFormatVersion is 1.
- //
- // This setting must not be changed after repository initialization
- // (e.g. clone or init).
- ObjectFormat format.ObjectFormat
- }
-
- // Remotes list of repository remotes, the key of the map is the name
- // of the remote, should equal to RemoteConfig.Name.
- Remotes map[string]*RemoteConfig
- // Submodules list of repository submodules, the key of the map is the name
- // of the submodule, should equal to Submodule.Name.
- Submodules map[string]*Submodule
- // Branches list of branches, the key is the branch name and should
- // equal Branch.Name
- Branches map[string]*Branch
- // URLs list of url rewrite rules, if repo url starts with URL.InsteadOf value, it will be replaced with the
- // key instead.
- URLs map[string]*URL
- // Raw contains the raw information of a config file. The main goal is
- // preserve the parsed information from the original format, to avoid
- // dropping unsupported fields.
- Raw *format.Config
-}
-
-// NewConfig returns a new empty Config.
-func NewConfig() *Config {
- config := &Config{
- Remotes: make(map[string]*RemoteConfig),
- Submodules: make(map[string]*Submodule),
- Branches: make(map[string]*Branch),
- URLs: make(map[string]*URL),
- Raw: format.New(),
- }
-
- config.Pack.Window = DefaultPackWindow
-
- return config
-}
-
-// ReadConfig reads a config file from a io.Reader.
-func ReadConfig(r io.Reader) (*Config, error) {
- b, err := io.ReadAll(r)
- if err != nil {
- return nil, err
- }
-
- cfg := NewConfig()
- if err = cfg.Unmarshal(b); err != nil {
- return nil, err
- }
-
- return cfg, nil
-}
-
-// LoadConfig loads a config file from a given scope. The returned Config,
-// contains exclusively information from the given scope. If it couldn't find a
-// config file to the given scope, an empty one is returned.
-func LoadConfig(scope Scope) (*Config, error) {
- if scope == LocalScope {
- return nil, fmt.Errorf("LocalScope should be read from the a ConfigStorer")
- }
-
- files, err := Paths(scope)
- if err != nil {
- return nil, err
- }
-
- for _, file := range files {
- f, err := osfs.Default.Open(file)
- if err != nil {
- if os.IsNotExist(err) {
- continue
- }
-
- return nil, err
- }
-
- defer f.Close()
- return ReadConfig(f)
- }
-
- return NewConfig(), nil
-}
-
-// Paths returns the config file location for a given scope.
-func Paths(scope Scope) ([]string, error) {
- var files []string
- switch scope {
- case GlobalScope:
- xdg := os.Getenv("XDG_CONFIG_HOME")
- if xdg != "" {
- files = append(files, filepath.Join(xdg, "git/config"))
- }
-
- home, err := os.UserHomeDir()
- if err != nil {
- return nil, err
- }
-
- files = append(files,
- filepath.Join(home, ".gitconfig"),
- filepath.Join(home, ".config/git/config"),
- )
- case SystemScope:
- files = append(files, "/etc/gitconfig")
- }
-
- return files, nil
-}
-
-// Validate validates the fields and sets the default values.
-func (c *Config) Validate() error {
- for name, r := range c.Remotes {
- if r.Name != name {
- return ErrInvalid
- }
-
- if err := r.Validate(); err != nil {
- return err
- }
- }
-
- for name, b := range c.Branches {
- if b.Name != name {
- return ErrInvalid
- }
-
- if err := b.Validate(); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-const (
- remoteSection = "remote"
- submoduleSection = "submodule"
- branchSection = "branch"
- coreSection = "core"
- packSection = "pack"
- userSection = "user"
- authorSection = "author"
- committerSection = "committer"
- initSection = "init"
- urlSection = "url"
- extensionsSection = "extensions"
- fetchKey = "fetch"
- urlKey = "url"
- bareKey = "bare"
- worktreeKey = "worktree"
- commentCharKey = "commentChar"
- windowKey = "window"
- mergeKey = "merge"
- rebaseKey = "rebase"
- nameKey = "name"
- emailKey = "email"
- descriptionKey = "description"
- defaultBranchKey = "defaultBranch"
- repositoryFormatVersionKey = "repositoryformatversion"
- objectFormat = "objectformat"
- mirrorKey = "mirror"
-
- // DefaultPackWindow holds the number of previous objects used to
- // generate deltas. The value 10 is the same used by git command.
- DefaultPackWindow = uint(10)
-)
-
-// Unmarshal parses a git-config file and stores it.
-func (c *Config) Unmarshal(b []byte) error {
- r := bytes.NewBuffer(b)
- d := format.NewDecoder(r)
-
- c.Raw = format.New()
- if err := d.Decode(c.Raw); err != nil {
- return err
- }
-
- c.unmarshalCore()
- c.unmarshalUser()
- c.unmarshalInit()
- if err := c.unmarshalPack(); err != nil {
- return err
- }
- unmarshalSubmodules(c.Raw, c.Submodules)
-
- if err := c.unmarshalBranches(); err != nil {
- return err
- }
-
- if err := c.unmarshalURLs(); err != nil {
- return err
- }
-
- return c.unmarshalRemotes()
-}
-
-func (c *Config) unmarshalCore() {
- s := c.Raw.Section(coreSection)
- if s.Options.Get(bareKey) == "true" {
- c.Core.IsBare = true
- }
-
- c.Core.Worktree = s.Options.Get(worktreeKey)
- c.Core.CommentChar = s.Options.Get(commentCharKey)
-}
-
-func (c *Config) unmarshalUser() {
- s := c.Raw.Section(userSection)
- c.User.Name = s.Options.Get(nameKey)
- c.User.Email = s.Options.Get(emailKey)
-
- s = c.Raw.Section(authorSection)
- c.Author.Name = s.Options.Get(nameKey)
- c.Author.Email = s.Options.Get(emailKey)
-
- s = c.Raw.Section(committerSection)
- c.Committer.Name = s.Options.Get(nameKey)
- c.Committer.Email = s.Options.Get(emailKey)
-}
-
-func (c *Config) unmarshalPack() error {
- s := c.Raw.Section(packSection)
- window := s.Options.Get(windowKey)
- if window == "" {
- c.Pack.Window = DefaultPackWindow
- } else {
- winUint, err := strconv.ParseUint(window, 10, 32)
- if err != nil {
- return err
- }
- c.Pack.Window = uint(winUint)
- }
- return nil
-}
-
-func (c *Config) unmarshalRemotes() error {
- s := c.Raw.Section(remoteSection)
- for _, sub := range s.Subsections {
- r := &RemoteConfig{}
- if err := r.unmarshal(sub); err != nil {
- return err
- }
-
- c.Remotes[r.Name] = r
- }
-
- // Apply insteadOf url rules
- for _, r := range c.Remotes {
- r.applyURLRules(c.URLs)
- }
-
- return nil
-}
-
-func (c *Config) unmarshalURLs() error {
- s := c.Raw.Section(urlSection)
- for _, sub := range s.Subsections {
- r := &URL{}
- if err := r.unmarshal(sub); err != nil {
- return err
- }
-
- c.URLs[r.Name] = r
- }
-
- return nil
-}
-
-func unmarshalSubmodules(fc *format.Config, submodules map[string]*Submodule) {
- s := fc.Section(submoduleSection)
- for _, sub := range s.Subsections {
- m := &Submodule{}
- m.unmarshal(sub)
-
- if m.Validate() == ErrModuleBadPath {
- continue
- }
-
- submodules[m.Name] = m
- }
-}
-
-func (c *Config) unmarshalBranches() error {
- bs := c.Raw.Section(branchSection)
- for _, sub := range bs.Subsections {
- b := &Branch{}
-
- if err := b.unmarshal(sub); err != nil {
- return err
- }
-
- c.Branches[b.Name] = b
- }
- return nil
-}
-
-func (c *Config) unmarshalInit() {
- s := c.Raw.Section(initSection)
- c.Init.DefaultBranch = s.Options.Get(defaultBranchKey)
-}
-
-// Marshal returns Config encoded as a git-config file.
-func (c *Config) Marshal() ([]byte, error) {
- c.marshalCore()
- c.marshalExtensions()
- c.marshalUser()
- c.marshalPack()
- c.marshalRemotes()
- c.marshalSubmodules()
- c.marshalBranches()
- c.marshalURLs()
- c.marshalInit()
-
- buf := bytes.NewBuffer(nil)
- if err := format.NewEncoder(buf).Encode(c.Raw); err != nil {
- return nil, err
- }
-
- return buf.Bytes(), nil
-}
-
-func (c *Config) marshalCore() {
- s := c.Raw.Section(coreSection)
- s.SetOption(bareKey, fmt.Sprintf("%t", c.Core.IsBare))
- if string(c.Core.RepositoryFormatVersion) != "" {
- s.SetOption(repositoryFormatVersionKey, string(c.Core.RepositoryFormatVersion))
- }
-
- if c.Core.Worktree != "" {
- s.SetOption(worktreeKey, c.Core.Worktree)
- }
-}
-
-func (c *Config) marshalExtensions() {
- // Extensions are only supported on Version 1, therefore
- // ignore them otherwise.
- if c.Core.RepositoryFormatVersion == format.Version_1 {
- s := c.Raw.Section(extensionsSection)
- s.SetOption(objectFormat, string(c.Extensions.ObjectFormat))
- }
-}
-
-func (c *Config) marshalUser() {
- s := c.Raw.Section(userSection)
- if c.User.Name != "" {
- s.SetOption(nameKey, c.User.Name)
- }
-
- if c.User.Email != "" {
- s.SetOption(emailKey, c.User.Email)
- }
-
- s = c.Raw.Section(authorSection)
- if c.Author.Name != "" {
- s.SetOption(nameKey, c.Author.Name)
- }
-
- if c.Author.Email != "" {
- s.SetOption(emailKey, c.Author.Email)
- }
-
- s = c.Raw.Section(committerSection)
- if c.Committer.Name != "" {
- s.SetOption(nameKey, c.Committer.Name)
- }
-
- if c.Committer.Email != "" {
- s.SetOption(emailKey, c.Committer.Email)
- }
-}
-
-func (c *Config) marshalPack() {
- s := c.Raw.Section(packSection)
- if c.Pack.Window != DefaultPackWindow {
- s.SetOption(windowKey, fmt.Sprintf("%d", c.Pack.Window))
- }
-}
-
-func (c *Config) marshalRemotes() {
- s := c.Raw.Section(remoteSection)
- newSubsections := make(format.Subsections, 0, len(c.Remotes))
- added := make(map[string]bool)
- for _, subsection := range s.Subsections {
- if remote, ok := c.Remotes[subsection.Name]; ok {
- newSubsections = append(newSubsections, remote.marshal())
- added[subsection.Name] = true
- }
- }
-
- remoteNames := make([]string, 0, len(c.Remotes))
- for name := range c.Remotes {
- remoteNames = append(remoteNames, name)
- }
-
- sort.Strings(remoteNames)
-
- for _, name := range remoteNames {
- if !added[name] {
- newSubsections = append(newSubsections, c.Remotes[name].marshal())
- }
- }
-
- s.Subsections = newSubsections
-}
-
-func (c *Config) marshalSubmodules() {
- s := c.Raw.Section(submoduleSection)
- s.Subsections = make(format.Subsections, len(c.Submodules))
-
- var i int
- for _, r := range c.Submodules {
- section := r.marshal()
- // the submodule section at config is a subset of the .gitmodule file
- // we should remove the non-valid options for the config file.
- section.RemoveOption(pathKey)
- s.Subsections[i] = section
- i++
- }
-}
-
-func (c *Config) marshalBranches() {
- s := c.Raw.Section(branchSection)
- newSubsections := make(format.Subsections, 0, len(c.Branches))
- added := make(map[string]bool)
- for _, subsection := range s.Subsections {
- if branch, ok := c.Branches[subsection.Name]; ok {
- newSubsections = append(newSubsections, branch.marshal())
- added[subsection.Name] = true
- }
- }
-
- branchNames := make([]string, 0, len(c.Branches))
- for name := range c.Branches {
- branchNames = append(branchNames, name)
- }
-
- sort.Strings(branchNames)
-
- for _, name := range branchNames {
- if !added[name] {
- newSubsections = append(newSubsections, c.Branches[name].marshal())
- }
- }
-
- s.Subsections = newSubsections
-}
-
-func (c *Config) marshalURLs() {
- s := c.Raw.Section(urlSection)
- s.Subsections = make(format.Subsections, len(c.URLs))
-
- var i int
- for _, r := range c.URLs {
- section := r.marshal()
- // the submodule section at config is a subset of the .gitmodule file
- // we should remove the non-valid options for the config file.
- s.Subsections[i] = section
- i++
- }
-}
-
-func (c *Config) marshalInit() {
- s := c.Raw.Section(initSection)
- if c.Init.DefaultBranch != "" {
- s.SetOption(defaultBranchKey, c.Init.DefaultBranch)
- }
-}
-
-// RemoteConfig contains the configuration for a given remote repository.
-type RemoteConfig struct {
- // Name of the remote
- Name string
- // URLs the URLs of a remote repository. It must be non-empty. Fetch will
- // always use the first URL, while push will use all of them.
- URLs []string
- // Mirror indicates that the repository is a mirror of remote.
- Mirror bool
-
- // insteadOfRulesApplied have urls been modified
- insteadOfRulesApplied bool
- // originalURLs are the urls before applying insteadOf rules
- originalURLs []string
-
- // Fetch the default set of "refspec" for fetch operation
- Fetch []RefSpec
-
- // raw representation of the subsection, filled by marshal or unmarshal are
- // called
- raw *format.Subsection
-}
-
-// Validate validates the fields and sets the default values.
-func (c *RemoteConfig) Validate() error {
- if c.Name == "" {
- return ErrRemoteConfigEmptyName
- }
-
- if len(c.URLs) == 0 {
- return ErrRemoteConfigEmptyURL
- }
-
- for _, r := range c.Fetch {
- if err := r.Validate(); err != nil {
- return err
- }
- }
-
- if len(c.Fetch) == 0 {
- c.Fetch = []RefSpec{RefSpec(fmt.Sprintf(DefaultFetchRefSpec, c.Name))}
- }
-
- return plumbing.NewRemoteHEADReferenceName(c.Name).Validate()
-}
-
-func (c *RemoteConfig) unmarshal(s *format.Subsection) error {
- c.raw = s
-
- fetch := []RefSpec{}
- for _, f := range c.raw.Options.GetAll(fetchKey) {
- rs := RefSpec(f)
- if err := rs.Validate(); err != nil {
- return err
- }
-
- fetch = append(fetch, rs)
- }
-
- c.Name = c.raw.Name
- c.URLs = append([]string(nil), c.raw.Options.GetAll(urlKey)...)
- c.Fetch = fetch
- c.Mirror = c.raw.Options.Get(mirrorKey) == "true"
-
- return nil
-}
-
-func (c *RemoteConfig) marshal() *format.Subsection {
- if c.raw == nil {
- c.raw = &format.Subsection{}
- }
-
- c.raw.Name = c.Name
- if len(c.URLs) == 0 {
- c.raw.RemoveOption(urlKey)
- } else {
- urls := c.URLs
- if c.insteadOfRulesApplied {
- urls = c.originalURLs
- }
-
- c.raw.SetOption(urlKey, urls...)
- }
-
- if len(c.Fetch) == 0 {
- c.raw.RemoveOption(fetchKey)
- } else {
- var values []string
- for _, rs := range c.Fetch {
- values = append(values, rs.String())
- }
-
- c.raw.SetOption(fetchKey, values...)
- }
-
- if c.Mirror {
- c.raw.SetOption(mirrorKey, strconv.FormatBool(c.Mirror))
- }
-
- return c.raw
-}
-
-func (c *RemoteConfig) IsFirstURLLocal() bool {
- return url.IsLocalEndpoint(c.URLs[0])
-}
-
-func (c *RemoteConfig) applyURLRules(urlRules map[string]*URL) {
- // save original urls
- originalURLs := make([]string, len(c.URLs))
- copy(originalURLs, c.URLs)
-
- for i, url := range c.URLs {
- if matchingURLRule := findLongestInsteadOfMatch(url, urlRules); matchingURLRule != nil {
- c.URLs[i] = matchingURLRule.ApplyInsteadOf(c.URLs[i])
- c.insteadOfRulesApplied = true
- }
- }
-
- if c.insteadOfRulesApplied {
- c.originalURLs = originalURLs
- }
-}
diff --git a/vendor/github.com/go-git/go-git/v5/config/modules.go b/vendor/github.com/go-git/go-git/v5/config/modules.go
deleted file mode 100644
index 1c10aa354eb..00000000000
--- a/vendor/github.com/go-git/go-git/v5/config/modules.go
+++ /dev/null
@@ -1,139 +0,0 @@
-package config
-
-import (
- "bytes"
- "errors"
- "regexp"
-
- format "github.com/go-git/go-git/v5/plumbing/format/config"
-)
-
-var (
- ErrModuleEmptyURL = errors.New("module config: empty URL")
- ErrModuleEmptyPath = errors.New("module config: empty path")
- ErrModuleBadPath = errors.New("submodule has an invalid path")
-)
-
-var (
- // Matches module paths with dotdot ".." components.
- dotdotPath = regexp.MustCompile(`(^|[/\\])\.\.([/\\]|$)`)
-)
-
-// Modules defines the submodules properties, represents a .gitmodules file
-// https://www.kernel.org/pub/software/scm/git/docs/gitmodules.html
-type Modules struct {
- // Submodules is a map of submodules being the key the name of the submodule.
- Submodules map[string]*Submodule
-
- raw *format.Config
-}
-
-// NewModules returns a new empty Modules
-func NewModules() *Modules {
- return &Modules{
- Submodules: make(map[string]*Submodule),
- raw: format.New(),
- }
-}
-
-const (
- pathKey = "path"
- branchKey = "branch"
-)
-
-// Unmarshal parses a git-config file and stores it.
-func (m *Modules) Unmarshal(b []byte) error {
- r := bytes.NewBuffer(b)
- d := format.NewDecoder(r)
-
- m.raw = format.New()
- if err := d.Decode(m.raw); err != nil {
- return err
- }
-
- unmarshalSubmodules(m.raw, m.Submodules)
- return nil
-}
-
-// Marshal returns Modules encoded as a git-config file.
-func (m *Modules) Marshal() ([]byte, error) {
- s := m.raw.Section(submoduleSection)
- s.Subsections = make(format.Subsections, len(m.Submodules))
-
- var i int
- for _, r := range m.Submodules {
- s.Subsections[i] = r.marshal()
- i++
- }
-
- buf := bytes.NewBuffer(nil)
- if err := format.NewEncoder(buf).Encode(m.raw); err != nil {
- return nil, err
- }
-
- return buf.Bytes(), nil
-}
-
-// Submodule defines a submodule.
-type Submodule struct {
- // Name module name
- Name string
- // Path defines the path, relative to the top-level directory of the Git
- // working tree.
- Path string
- // URL defines a URL from which the submodule repository can be cloned.
- URL string
- // Branch is a remote branch name for tracking updates in the upstream
- // submodule. Optional value.
- Branch string
-
- // raw representation of the subsection, filled by marshal or unmarshal are
- // called.
- raw *format.Subsection
-}
-
-// Validate validates the fields and sets the default values.
-func (m *Submodule) Validate() error {
- if m.Path == "" {
- return ErrModuleEmptyPath
- }
-
- if m.URL == "" {
- return ErrModuleEmptyURL
- }
-
- if dotdotPath.MatchString(m.Path) {
- return ErrModuleBadPath
- }
-
- return nil
-}
-
-func (m *Submodule) unmarshal(s *format.Subsection) {
- m.raw = s
-
- m.Name = m.raw.Name
- m.Path = m.raw.Option(pathKey)
- m.URL = m.raw.Option(urlKey)
- m.Branch = m.raw.Option(branchKey)
-}
-
-func (m *Submodule) marshal() *format.Subsection {
- if m.raw == nil {
- m.raw = &format.Subsection{}
- }
-
- m.raw.Name = m.Name
- if m.raw.Name == "" {
- m.raw.Name = m.Path
- }
-
- m.raw.SetOption(pathKey, m.Path)
- m.raw.SetOption(urlKey, m.URL)
-
- if m.Branch != "" {
- m.raw.SetOption(branchKey, m.Branch)
- }
-
- return m.raw
-}
diff --git a/vendor/github.com/go-git/go-git/v5/config/refspec.go b/vendor/github.com/go-git/go-git/v5/config/refspec.go
deleted file mode 100644
index e2cf8c97b13..00000000000
--- a/vendor/github.com/go-git/go-git/v5/config/refspec.go
+++ /dev/null
@@ -1,155 +0,0 @@
-package config
-
-import (
- "errors"
- "strings"
-
- "github.com/go-git/go-git/v5/plumbing"
-)
-
-const (
- refSpecWildcard = "*"
- refSpecForce = "+"
- refSpecSeparator = ":"
-)
-
-var (
- ErrRefSpecMalformedSeparator = errors.New("malformed refspec, separators are wrong")
- ErrRefSpecMalformedWildcard = errors.New("malformed refspec, mismatched number of wildcards")
-)
-
-// RefSpec is a mapping from local branches to remote references.
-// The format of the refspec is an optional +, followed by :, where
-// is the pattern for references on the remote side and is where
-// those references will be written locally. The + tells Git to update the
-// reference even if it isn’t a fast-forward.
-// eg.: "+refs/heads/*:refs/remotes/origin/*"
-//
-// https://git-scm.com/book/en/v2/Git-Internals-The-Refspec
-type RefSpec string
-
-// Validate validates the RefSpec
-func (s RefSpec) Validate() error {
- spec := string(s)
- if strings.Count(spec, refSpecSeparator) != 1 {
- return ErrRefSpecMalformedSeparator
- }
-
- sep := strings.Index(spec, refSpecSeparator)
- if sep == len(spec)-1 {
- return ErrRefSpecMalformedSeparator
- }
-
- ws := strings.Count(spec[0:sep], refSpecWildcard)
- wd := strings.Count(spec[sep+1:], refSpecWildcard)
- if ws == wd && ws < 2 && wd < 2 {
- return nil
- }
-
- return ErrRefSpecMalformedWildcard
-}
-
-// IsForceUpdate returns if update is allowed in non fast-forward merges.
-func (s RefSpec) IsForceUpdate() bool {
- return s[0] == refSpecForce[0]
-}
-
-// IsDelete returns true if the refspec indicates a delete (empty src).
-func (s RefSpec) IsDelete() bool {
- return s[0] == refSpecSeparator[0]
-}
-
-// IsExactSHA1 returns true if the source is a SHA1 hash.
-func (s RefSpec) IsExactSHA1() bool {
- return plumbing.IsHash(s.Src())
-}
-
-// Src returns the src side.
-func (s RefSpec) Src() string {
- spec := string(s)
-
- var start int
- if s.IsForceUpdate() {
- start = 1
- } else {
- start = 0
- }
-
- end := strings.Index(spec, refSpecSeparator)
- return spec[start:end]
-}
-
-// Match match the given plumbing.ReferenceName against the source.
-func (s RefSpec) Match(n plumbing.ReferenceName) bool {
- if !s.IsWildcard() {
- return s.matchExact(n)
- }
-
- return s.matchGlob(n)
-}
-
-// IsWildcard returns true if the RefSpec contains a wildcard.
-func (s RefSpec) IsWildcard() bool {
- return strings.Contains(string(s), refSpecWildcard)
-}
-
-func (s RefSpec) matchExact(n plumbing.ReferenceName) bool {
- return s.Src() == n.String()
-}
-
-func (s RefSpec) matchGlob(n plumbing.ReferenceName) bool {
- src := s.Src()
- name := n.String()
- wildcard := strings.Index(src, refSpecWildcard)
-
- var prefix, suffix string
- prefix = src[0:wildcard]
- if len(src) > wildcard+1 {
- suffix = src[wildcard+1:]
- }
-
- return len(name) >= len(prefix)+len(suffix) &&
- strings.HasPrefix(name, prefix) &&
- strings.HasSuffix(name, suffix)
-}
-
-// Dst returns the destination for the given remote reference.
-func (s RefSpec) Dst(n plumbing.ReferenceName) plumbing.ReferenceName {
- spec := string(s)
- start := strings.Index(spec, refSpecSeparator) + 1
- dst := spec[start:]
- src := s.Src()
-
- if !s.IsWildcard() {
- return plumbing.ReferenceName(dst)
- }
-
- name := n.String()
- ws := strings.Index(src, refSpecWildcard)
- wd := strings.Index(dst, refSpecWildcard)
- match := name[ws : len(name)-(len(src)-(ws+1))]
-
- return plumbing.ReferenceName(dst[0:wd] + match + dst[wd+1:])
-}
-
-func (s RefSpec) Reverse() RefSpec {
- spec := string(s)
- separator := strings.Index(spec, refSpecSeparator)
-
- return RefSpec(spec[separator+1:] + refSpecSeparator + spec[:separator])
-}
-
-func (s RefSpec) String() string {
- return string(s)
-}
-
-// MatchAny returns true if any of the RefSpec match with the given ReferenceName.
-func MatchAny(l []RefSpec, n plumbing.ReferenceName) bool {
- for _, r := range l {
- if r.Match(n) {
- return true
- }
- }
-
- return false
-}
diff --git a/vendor/github.com/go-git/go-git/v5/config/url.go b/vendor/github.com/go-git/go-git/v5/config/url.go
deleted file mode 100644
index 114d6b2662a..00000000000
--- a/vendor/github.com/go-git/go-git/v5/config/url.go
+++ /dev/null
@@ -1,81 +0,0 @@
-package config
-
-import (
- "errors"
- "strings"
-
- format "github.com/go-git/go-git/v5/plumbing/format/config"
-)
-
-var (
- errURLEmptyInsteadOf = errors.New("url config: empty insteadOf")
-)
-
-// Url defines Url rewrite rules
-type URL struct {
- // Name new base url
- Name string
- // Any URL that starts with this value will be rewritten to start, instead, with .
- // When more than one insteadOf strings match a given URL, the longest match is used.
- InsteadOf string
-
- // raw representation of the subsection, filled by marshal or unmarshal are
- // called.
- raw *format.Subsection
-}
-
-// Validate validates fields of branch
-func (b *URL) Validate() error {
- if b.InsteadOf == "" {
- return errURLEmptyInsteadOf
- }
-
- return nil
-}
-
-const (
- insteadOfKey = "insteadOf"
-)
-
-func (u *URL) unmarshal(s *format.Subsection) error {
- u.raw = s
-
- u.Name = s.Name
- u.InsteadOf = u.raw.Option(insteadOfKey)
- return nil
-}
-
-func (u *URL) marshal() *format.Subsection {
- if u.raw == nil {
- u.raw = &format.Subsection{}
- }
-
- u.raw.Name = u.Name
- u.raw.SetOption(insteadOfKey, u.InsteadOf)
-
- return u.raw
-}
-
-func findLongestInsteadOfMatch(remoteURL string, urls map[string]*URL) *URL {
- var longestMatch *URL
- for _, u := range urls {
- if !strings.HasPrefix(remoteURL, u.InsteadOf) {
- continue
- }
-
- // according to spec if there is more than one match, take the logest
- if longestMatch == nil || len(longestMatch.InsteadOf) < len(u.InsteadOf) {
- longestMatch = u
- }
- }
-
- return longestMatch
-}
-
-func (u *URL) ApplyInsteadOf(url string) string {
- if !strings.HasPrefix(url, u.InsteadOf) {
- return url
- }
-
- return u.Name + url[len(u.InsteadOf):]
-}
diff --git a/vendor/github.com/go-git/go-git/v5/doc.go b/vendor/github.com/go-git/go-git/v5/doc.go
deleted file mode 100644
index 3d817fe9c8c..00000000000
--- a/vendor/github.com/go-git/go-git/v5/doc.go
+++ /dev/null
@@ -1,10 +0,0 @@
-// A highly extensible git implementation in pure Go.
-//
-// go-git aims to reach the completeness of libgit2 or jgit, nowadays covers the
-// majority of the plumbing read operations and some of the main write
-// operations, but lacks the main porcelain operations such as merges.
-//
-// It is highly extensible, we have been following the open/close principle in
-// its design to facilitate extensions, mainly focusing the efforts on the
-// persistence of the objects.
-package git
diff --git a/vendor/github.com/go-git/go-git/v5/internal/path_util/path_util.go b/vendor/github.com/go-git/go-git/v5/internal/path_util/path_util.go
deleted file mode 100644
index 48e4a3d0ece..00000000000
--- a/vendor/github.com/go-git/go-git/v5/internal/path_util/path_util.go
+++ /dev/null
@@ -1,29 +0,0 @@
-package path_util
-
-import (
- "os"
- "os/user"
- "strings"
-)
-
-func ReplaceTildeWithHome(path string) (string, error) {
- if strings.HasPrefix(path, "~") {
- firstSlash := strings.Index(path, "/")
- if firstSlash == 1 {
- home, err := os.UserHomeDir()
- if err != nil {
- return path, err
- }
- return strings.Replace(path, "~", home, 1), nil
- } else if firstSlash > 1 {
- username := path[1:firstSlash]
- userAccount, err := user.Lookup(username)
- if err != nil {
- return path, err
- }
- return strings.Replace(path, path[:firstSlash], userAccount.HomeDir, 1), nil
- }
- }
-
- return path, nil
-}
diff --git a/vendor/github.com/go-git/go-git/v5/internal/revision/parser.go b/vendor/github.com/go-git/go-git/v5/internal/revision/parser.go
deleted file mode 100644
index 8a2a7190e5c..00000000000
--- a/vendor/github.com/go-git/go-git/v5/internal/revision/parser.go
+++ /dev/null
@@ -1,626 +0,0 @@
-// Package revision extracts git revision from string
-// More information about revision : https://www.kernel.org/pub/software/scm/git/docs/gitrevisions.html
-package revision
-
-import (
- "bytes"
- "fmt"
- "io"
- "regexp"
- "strconv"
- "time"
-)
-
-// ErrInvalidRevision is emitted if string doesn't match valid revision
-type ErrInvalidRevision struct {
- s string
-}
-
-func (e *ErrInvalidRevision) Error() string {
- return "Revision invalid : " + e.s
-}
-
-// Revisioner represents a revision component.
-// A revision is made of multiple revision components
-// obtained after parsing a revision string,
-// for instance revision "master~" will be converted in
-// two revision components Ref and TildePath
-type Revisioner interface {
-}
-
-// Ref represents a reference name : HEAD, master,
-type Ref string
-
-// TildePath represents ~, ~{n}
-type TildePath struct {
- Depth int
-}
-
-// CaretPath represents ^, ^{n}
-type CaretPath struct {
- Depth int
-}
-
-// CaretReg represents ^{/foo bar}
-type CaretReg struct {
- Regexp *regexp.Regexp
- Negate bool
-}
-
-// CaretType represents ^{commit}
-type CaretType struct {
- ObjectType string
-}
-
-// AtReflog represents @{n}
-type AtReflog struct {
- Depth int
-}
-
-// AtCheckout represents @{-n}
-type AtCheckout struct {
- Depth int
-}
-
-// AtUpstream represents @{upstream}, @{u}
-type AtUpstream struct {
- BranchName string
-}
-
-// AtPush represents @{push}
-type AtPush struct {
- BranchName string
-}
-
-// AtDate represents @{"2006-01-02T15:04:05Z"}
-type AtDate struct {
- Date time.Time
-}
-
-// ColonReg represents :/foo bar
-type ColonReg struct {
- Regexp *regexp.Regexp
- Negate bool
-}
-
-// ColonPath represents :./ :
-type ColonPath struct {
- Path string
-}
-
-// ColonStagePath represents ::/
-type ColonStagePath struct {
- Path string
- Stage int
-}
-
-// Parser represents a parser
-// use to tokenize and transform to revisioner chunks
-// a given string
-type Parser struct {
- s *scanner
- currentParsedChar struct {
- tok token
- lit string
- }
- unreadLastChar bool
-}
-
-// NewParserFromString returns a new instance of parser from a string.
-func NewParserFromString(s string) *Parser {
- return NewParser(bytes.NewBufferString(s))
-}
-
-// NewParser returns a new instance of parser.
-func NewParser(r io.Reader) *Parser {
- return &Parser{s: newScanner(r)}
-}
-
-// scan returns the next token from the underlying scanner
-// or the last scanned token if an unscan was requested
-func (p *Parser) scan() (token, string, error) {
- if p.unreadLastChar {
- p.unreadLastChar = false
- return p.currentParsedChar.tok, p.currentParsedChar.lit, nil
- }
-
- tok, lit, err := p.s.scan()
-
- p.currentParsedChar.tok, p.currentParsedChar.lit = tok, lit
-
- return tok, lit, err
-}
-
-// unscan pushes the previously read token back onto the buffer.
-func (p *Parser) unscan() { p.unreadLastChar = true }
-
-// Parse explode a revision string into revisioner chunks
-func (p *Parser) Parse() ([]Revisioner, error) {
- var rev Revisioner
- var revs []Revisioner
- var tok token
- var err error
-
- for {
- tok, _, err = p.scan()
-
- if err != nil {
- return nil, err
- }
-
- switch tok {
- case at:
- rev, err = p.parseAt()
- case tilde:
- rev, err = p.parseTilde()
- case caret:
- rev, err = p.parseCaret()
- case colon:
- rev, err = p.parseColon()
- case eof:
- err = p.validateFullRevision(&revs)
-
- if err != nil {
- return []Revisioner{}, err
- }
-
- return revs, nil
- default:
- p.unscan()
- rev, err = p.parseRef()
- }
-
- if err != nil {
- return []Revisioner{}, err
- }
-
- revs = append(revs, rev)
- }
-}
-
-// validateFullRevision ensures all revisioner chunks make a valid revision
-func (p *Parser) validateFullRevision(chunks *[]Revisioner) error {
- var hasReference bool
-
- for i, chunk := range *chunks {
- switch chunk.(type) {
- case Ref:
- if i == 0 {
- hasReference = true
- } else {
- return &ErrInvalidRevision{`reference must be defined once at the beginning`}
- }
- case AtDate:
- if len(*chunks) == 1 || hasReference && len(*chunks) == 2 {
- return nil
- }
-
- return &ErrInvalidRevision{`"@" statement is not valid, could be : @{}, @{}`}
- case AtReflog:
- if len(*chunks) == 1 || hasReference && len(*chunks) == 2 {
- return nil
- }
-
- return &ErrInvalidRevision{`"@" statement is not valid, could be : @{}, @{}`}
- case AtCheckout:
- if len(*chunks) == 1 {
- return nil
- }
-
- return &ErrInvalidRevision{`"@" statement is not valid, could be : @{-}`}
- case AtUpstream:
- if len(*chunks) == 1 || hasReference && len(*chunks) == 2 {
- return nil
- }
-
- return &ErrInvalidRevision{`"@" statement is not valid, could be : @{upstream}, @{upstream}, @{u}, @{u}`}
- case AtPush:
- if len(*chunks) == 1 || hasReference && len(*chunks) == 2 {
- return nil
- }
-
- return &ErrInvalidRevision{`"@" statement is not valid, could be : @{push}, @{push}`}
- case TildePath, CaretPath, CaretReg:
- if !hasReference {
- return &ErrInvalidRevision{`"~" or "^" statement must have a reference defined at the beginning`}
- }
- case ColonReg:
- if len(*chunks) == 1 {
- return nil
- }
-
- return &ErrInvalidRevision{`":" statement is not valid, could be : :/`}
- case ColonPath:
- if i == len(*chunks)-1 && hasReference || len(*chunks) == 1 {
- return nil
- }
-
- return &ErrInvalidRevision{`":" statement is not valid, could be : :`}
- case ColonStagePath:
- if len(*chunks) == 1 {
- return nil
- }
-
- return &ErrInvalidRevision{`":" statement is not valid, could be : ::`}
- }
- }
-
- return nil
-}
-
-// parseAt extract @ statements
-func (p *Parser) parseAt() (Revisioner, error) {
- var tok, nextTok token
- var lit, nextLit string
- var err error
-
- tok, _, err = p.scan()
-
- if err != nil {
- return nil, err
- }
-
- if tok != obrace {
- p.unscan()
-
- return Ref("HEAD"), nil
- }
-
- tok, lit, err = p.scan()
-
- if err != nil {
- return nil, err
- }
-
- nextTok, nextLit, err = p.scan()
-
- if err != nil {
- return nil, err
- }
-
- switch {
- case tok == word && (lit == "u" || lit == "upstream") && nextTok == cbrace:
- return AtUpstream{}, nil
- case tok == word && lit == "push" && nextTok == cbrace:
- return AtPush{}, nil
- case tok == number && nextTok == cbrace:
- n, _ := strconv.Atoi(lit)
-
- return AtReflog{n}, nil
- case tok == minus && nextTok == number:
- n, _ := strconv.Atoi(nextLit)
-
- t, _, err := p.scan()
-
- if err != nil {
- return nil, err
- }
-
- if t != cbrace {
- return nil, &ErrInvalidRevision{s: `missing "}" in @{-n} structure`}
- }
-
- return AtCheckout{n}, nil
- default:
- p.unscan()
-
- date := lit
-
- for {
- tok, lit, err = p.scan()
-
- if err != nil {
- return nil, err
- }
-
- switch {
- case tok == cbrace:
- t, err := time.Parse("2006-01-02T15:04:05Z", date)
-
- if err != nil {
- return nil, &ErrInvalidRevision{fmt.Sprintf(`wrong date "%s" must fit ISO-8601 format : 2006-01-02T15:04:05Z`, date)}
- }
-
- return AtDate{t}, nil
- case tok == eof:
- return nil, &ErrInvalidRevision{s: `missing "}" in @{} structure`}
- default:
- date += lit
- }
- }
- }
-}
-
-// parseTilde extract ~ statements
-func (p *Parser) parseTilde() (Revisioner, error) {
- var tok token
- var lit string
- var err error
-
- tok, lit, err = p.scan()
-
- if err != nil {
- return nil, err
- }
-
- switch {
- case tok == number:
- n, _ := strconv.Atoi(lit)
-
- return TildePath{n}, nil
- default:
- p.unscan()
- return TildePath{1}, nil
- }
-}
-
-// parseCaret extract ^ statements
-func (p *Parser) parseCaret() (Revisioner, error) {
- var tok token
- var lit string
- var err error
-
- tok, lit, err = p.scan()
-
- if err != nil {
- return nil, err
- }
-
- switch {
- case tok == obrace:
- r, err := p.parseCaretBraces()
-
- if err != nil {
- return nil, err
- }
-
- return r, nil
- case tok == number:
- n, _ := strconv.Atoi(lit)
-
- if n > 2 {
- return nil, &ErrInvalidRevision{fmt.Sprintf(`"%s" found must be 0, 1 or 2 after "^"`, lit)}
- }
-
- return CaretPath{n}, nil
- default:
- p.unscan()
- return CaretPath{1}, nil
- }
-}
-
-// parseCaretBraces extract ^{} statements
-func (p *Parser) parseCaretBraces() (Revisioner, error) {
- var tok, nextTok token
- var lit, _ string
- start := true
- var re string
- var negate bool
- var err error
-
- for {
- tok, lit, err = p.scan()
-
- if err != nil {
- return nil, err
- }
-
- nextTok, _, err = p.scan()
-
- if err != nil {
- return nil, err
- }
-
- switch {
- case tok == word && nextTok == cbrace && (lit == "commit" || lit == "tree" || lit == "blob" || lit == "tag" || lit == "object"):
- return CaretType{lit}, nil
- case re == "" && tok == cbrace:
- return CaretType{"tag"}, nil
- case re == "" && tok == emark && nextTok == emark:
- re += lit
- case re == "" && tok == emark && nextTok == minus:
- negate = true
- case re == "" && tok == emark:
- return nil, &ErrInvalidRevision{s: `revision suffix brace component sequences starting with "/!" others than those defined are reserved`}
- case re == "" && tok == slash:
- p.unscan()
- case tok != slash && start:
- return nil, &ErrInvalidRevision{fmt.Sprintf(`"%s" is not a valid revision suffix brace component`, lit)}
- case tok == eof:
- return nil, &ErrInvalidRevision{s: `missing "}" in ^{} structure`}
- case tok != cbrace:
- p.unscan()
- re += lit
- case tok == cbrace:
- p.unscan()
-
- reg, err := regexp.Compile(re)
-
- if err != nil {
- return CaretReg{}, &ErrInvalidRevision{fmt.Sprintf(`revision suffix brace component, %s`, err.Error())}
- }
-
- return CaretReg{reg, negate}, nil
- }
-
- start = false
- }
-}
-
-// parseColon extract : statements
-func (p *Parser) parseColon() (Revisioner, error) {
- var tok token
- var err error
-
- tok, _, err = p.scan()
-
- if err != nil {
- return nil, err
- }
-
- switch tok {
- case slash:
- return p.parseColonSlash()
- default:
- p.unscan()
- return p.parseColonDefault()
- }
-}
-
-// parseColonSlash extract :/ statements
-func (p *Parser) parseColonSlash() (Revisioner, error) {
- var tok, nextTok token
- var lit string
- var re string
- var negate bool
- var err error
-
- for {
- tok, lit, err = p.scan()
-
- if err != nil {
- return nil, err
- }
-
- nextTok, _, err = p.scan()
-
- if err != nil {
- return nil, err
- }
-
- switch {
- case tok == emark && nextTok == emark:
- re += lit
- case re == "" && tok == emark && nextTok == minus:
- negate = true
- case re == "" && tok == emark:
- return nil, &ErrInvalidRevision{s: `revision suffix brace component sequences starting with "/!" others than those defined are reserved`}
- case tok == eof:
- p.unscan()
- reg, err := regexp.Compile(re)
-
- if err != nil {
- return ColonReg{}, &ErrInvalidRevision{fmt.Sprintf(`revision suffix brace component, %s`, err.Error())}
- }
-
- return ColonReg{reg, negate}, nil
- default:
- p.unscan()
- re += lit
- }
- }
-}
-
-// parseColonDefault extract : statements
-func (p *Parser) parseColonDefault() (Revisioner, error) {
- var tok token
- var lit string
- var path string
- var stage int
- var err error
- var n = -1
-
- tok, lit, err = p.scan()
-
- if err != nil {
- return nil, err
- }
-
- nextTok, _, err := p.scan()
-
- if err != nil {
- return nil, err
- }
-
- if tok == number && nextTok == colon {
- n, _ = strconv.Atoi(lit)
- }
-
- switch n {
- case 0, 1, 2, 3:
- stage = n
- default:
- path += lit
- p.unscan()
- }
-
- for {
- tok, lit, err = p.scan()
-
- if err != nil {
- return nil, err
- }
-
- switch {
- case tok == eof && n == -1:
- return ColonPath{path}, nil
- case tok == eof:
- return ColonStagePath{path, stage}, nil
- default:
- path += lit
- }
- }
-}
-
-// parseRef extract reference name
-func (p *Parser) parseRef() (Revisioner, error) {
- var tok, prevTok token
- var lit, buf string
- var endOfRef bool
- var err error
-
- for {
- tok, lit, err = p.scan()
-
- if err != nil {
- return nil, err
- }
-
- switch tok {
- case eof, at, colon, tilde, caret:
- endOfRef = true
- }
-
- err := p.checkRefFormat(tok, lit, prevTok, buf, endOfRef)
-
- if err != nil {
- return "", err
- }
-
- if endOfRef {
- p.unscan()
- return Ref(buf), nil
- }
-
- buf += lit
- prevTok = tok
- }
-}
-
-// checkRefFormat ensure reference name follow rules defined here :
-// https://git-scm.com/docs/git-check-ref-format
-func (p *Parser) checkRefFormat(token token, literal string, previousToken token, buffer string, endOfRef bool) error {
- switch token {
- case aslash, space, control, qmark, asterisk, obracket:
- return &ErrInvalidRevision{fmt.Sprintf(`must not contains "%s"`, literal)}
- }
-
- switch {
- case (token == dot || token == slash) && buffer == "":
- return &ErrInvalidRevision{fmt.Sprintf(`must not start with "%s"`, literal)}
- case previousToken == slash && endOfRef:
- return &ErrInvalidRevision{`must not end with "/"`}
- case previousToken == dot && endOfRef:
- return &ErrInvalidRevision{`must not end with "."`}
- case token == dot && previousToken == slash:
- return &ErrInvalidRevision{`must not contains "/."`}
- case previousToken == dot && token == dot:
- return &ErrInvalidRevision{`must not contains ".."`}
- case previousToken == slash && token == slash:
- return &ErrInvalidRevision{`must not contains consecutively "/"`}
- case (token == slash || endOfRef) && len(buffer) > 4 && buffer[len(buffer)-5:] == ".lock":
- return &ErrInvalidRevision{"cannot end with .lock"}
- }
-
- return nil
-}
diff --git a/vendor/github.com/go-git/go-git/v5/internal/revision/scanner.go b/vendor/github.com/go-git/go-git/v5/internal/revision/scanner.go
deleted file mode 100644
index c46c21b7959..00000000000
--- a/vendor/github.com/go-git/go-git/v5/internal/revision/scanner.go
+++ /dev/null
@@ -1,117 +0,0 @@
-package revision
-
-import (
- "bufio"
- "io"
- "unicode"
-)
-
-// runeCategoryValidator takes a rune as input and
-// validates it belongs to a rune category
-type runeCategoryValidator func(r rune) bool
-
-// tokenizeExpression aggregates a series of runes matching check predicate into a single
-// string and provides given tokenType as token type
-func tokenizeExpression(ch rune, tokenType token, check runeCategoryValidator, r *bufio.Reader) (token, string, error) {
- var data []rune
- data = append(data, ch)
-
- for {
- c, _, err := r.ReadRune()
-
- if c == zeroRune {
- break
- }
-
- if err != nil {
- return tokenError, "", err
- }
-
- if check(c) {
- data = append(data, c)
- } else {
- err := r.UnreadRune()
-
- if err != nil {
- return tokenError, "", err
- }
-
- return tokenType, string(data), nil
- }
- }
-
- return tokenType, string(data), nil
-}
-
-var zeroRune = rune(0)
-
-// scanner represents a lexical scanner.
-type scanner struct {
- r *bufio.Reader
-}
-
-// newScanner returns a new instance of scanner.
-func newScanner(r io.Reader) *scanner {
- return &scanner{r: bufio.NewReader(r)}
-}
-
-// Scan extracts tokens and their strings counterpart
-// from the reader
-func (s *scanner) scan() (token, string, error) {
- ch, _, err := s.r.ReadRune()
-
- if err != nil && err != io.EOF {
- return tokenError, "", err
- }
-
- switch ch {
- case zeroRune:
- return eof, "", nil
- case ':':
- return colon, string(ch), nil
- case '~':
- return tilde, string(ch), nil
- case '^':
- return caret, string(ch), nil
- case '.':
- return dot, string(ch), nil
- case '/':
- return slash, string(ch), nil
- case '{':
- return obrace, string(ch), nil
- case '}':
- return cbrace, string(ch), nil
- case '-':
- return minus, string(ch), nil
- case '@':
- return at, string(ch), nil
- case '\\':
- return aslash, string(ch), nil
- case '?':
- return qmark, string(ch), nil
- case '*':
- return asterisk, string(ch), nil
- case '[':
- return obracket, string(ch), nil
- case '!':
- return emark, string(ch), nil
- }
-
- if unicode.IsSpace(ch) {
- return space, string(ch), nil
- }
-
- if unicode.IsControl(ch) {
- return control, string(ch), nil
- }
-
- if unicode.IsLetter(ch) {
- return tokenizeExpression(ch, word, unicode.IsLetter, s.r)
- }
-
- if unicode.IsNumber(ch) {
- return tokenizeExpression(ch, number, unicode.IsNumber, s.r)
- }
-
- return tokenError, string(ch), nil
-}
diff --git a/vendor/github.com/go-git/go-git/v5/internal/revision/token.go b/vendor/github.com/go-git/go-git/v5/internal/revision/token.go
deleted file mode 100644
index abc40488693..00000000000
--- a/vendor/github.com/go-git/go-git/v5/internal/revision/token.go
+++ /dev/null
@@ -1,28 +0,0 @@
-package revision
-
-// token represents a entity extracted from string parsing
-type token int
-
-const (
- eof token = iota
-
- aslash
- asterisk
- at
- caret
- cbrace
- colon
- control
- dot
- emark
- minus
- number
- obrace
- obracket
- qmark
- slash
- space
- tilde
- tokenError
- word
-)
diff --git a/vendor/github.com/go-git/go-git/v5/internal/url/url.go b/vendor/github.com/go-git/go-git/v5/internal/url/url.go
deleted file mode 100644
index 26624486937..00000000000
--- a/vendor/github.com/go-git/go-git/v5/internal/url/url.go
+++ /dev/null
@@ -1,39 +0,0 @@
-package url
-
-import (
- "regexp"
-)
-
-var (
- isSchemeRegExp = regexp.MustCompile(`^[^:]+://`)
-
- // Ref: https://github.com/git/git/blob/master/Documentation/urls.txt#L37
- scpLikeUrlRegExp = regexp.MustCompile(`^(?:(?P[^@]+)@)?(?P[^:\s]+):(?:(?P[0-9]{1,5}):)?(?P[^\\].*)$`)
-)
-
-// MatchesScheme returns true if the given string matches a URL-like
-// format scheme.
-func MatchesScheme(url string) bool {
- return isSchemeRegExp.MatchString(url)
-}
-
-// MatchesScpLike returns true if the given string matches an SCP-like
-// format scheme.
-func MatchesScpLike(url string) bool {
- return scpLikeUrlRegExp.MatchString(url)
-}
-
-// FindScpLikeComponents returns the user, host, port and path of the
-// given SCP-like URL.
-func FindScpLikeComponents(url string) (user, host, port, path string) {
- m := scpLikeUrlRegExp.FindStringSubmatch(url)
- return m[1], m[2], m[3], m[4]
-}
-
-// IsLocalEndpoint returns true if the given URL string specifies a
-// local file endpoint. For example, on a Linux machine,
-// `/home/user/src/go-git` would match as a local endpoint, but
-// `https://github.com/src-d/go-git` would not.
-func IsLocalEndpoint(url string) bool {
- return !MatchesScheme(url) && !MatchesScpLike(url)
-}
diff --git a/vendor/github.com/go-git/go-git/v5/object_walker.go b/vendor/github.com/go-git/go-git/v5/object_walker.go
deleted file mode 100644
index 3a537bd8023..00000000000
--- a/vendor/github.com/go-git/go-git/v5/object_walker.go
+++ /dev/null
@@ -1,104 +0,0 @@
-package git
-
-import (
- "fmt"
-
- "github.com/go-git/go-git/v5/plumbing"
- "github.com/go-git/go-git/v5/plumbing/filemode"
- "github.com/go-git/go-git/v5/plumbing/object"
- "github.com/go-git/go-git/v5/storage"
-)
-
-type objectWalker struct {
- Storer storage.Storer
- // seen is the set of objects seen in the repo.
- // seen map can become huge if walking over large
- // repos. Thus using struct{} as the value type.
- seen map[plumbing.Hash]struct{}
-}
-
-func newObjectWalker(s storage.Storer) *objectWalker {
- return &objectWalker{s, map[plumbing.Hash]struct{}{}}
-}
-
-// walkAllRefs walks all (hash) references from the repo.
-func (p *objectWalker) walkAllRefs() error {
- // Walk over all the references in the repo.
- it, err := p.Storer.IterReferences()
- if err != nil {
- return err
- }
- defer it.Close()
- err = it.ForEach(func(ref *plumbing.Reference) error {
- // Exit this iteration early for non-hash references.
- if ref.Type() != plumbing.HashReference {
- return nil
- }
- return p.walkObjectTree(ref.Hash())
- })
- return err
-}
-
-func (p *objectWalker) isSeen(hash plumbing.Hash) bool {
- _, seen := p.seen[hash]
- return seen
-}
-
-func (p *objectWalker) add(hash plumbing.Hash) {
- p.seen[hash] = struct{}{}
-}
-
-// walkObjectTree walks over all objects and remembers references
-// to them in the objectWalker. This is used instead of the revlist
-// walks because memory usage is tight with huge repos.
-func (p *objectWalker) walkObjectTree(hash plumbing.Hash) error {
- // Check if we have already seen, and mark this object
- if p.isSeen(hash) {
- return nil
- }
- p.add(hash)
- // Fetch the object.
- obj, err := object.GetObject(p.Storer, hash)
- if err != nil {
- return fmt.Errorf("getting object %s failed: %v", hash, err)
- }
- // Walk all children depending on object type.
- switch obj := obj.(type) {
- case *object.Commit:
- err = p.walkObjectTree(obj.TreeHash)
- if err != nil {
- return err
- }
- for _, h := range obj.ParentHashes {
- err = p.walkObjectTree(h)
- if err != nil {
- return err
- }
- }
- case *object.Tree:
- for i := range obj.Entries {
- // Shortcut for blob objects:
- // 'or' the lower bits of a mode and check that it
- // it matches a filemode.Executable. The type information
- // is in the higher bits, but this is the cleanest way
- // to handle plain files with different modes.
- // Other non-tree objects are somewhat rare, so they
- // are not special-cased.
- if obj.Entries[i].Mode|0755 == filemode.Executable {
- p.add(obj.Entries[i].Hash)
- continue
- }
- // Normal walk for sub-trees (and symlinks etc).
- err = p.walkObjectTree(obj.Entries[i].Hash)
- if err != nil {
- return err
- }
- }
- case *object.Tag:
- return p.walkObjectTree(obj.Target)
- default:
- // Error out on unhandled object types.
- return fmt.Errorf("unknown object %X %s %T", obj.ID(), obj.Type(), obj)
- }
- return nil
-}
diff --git a/vendor/github.com/go-git/go-git/v5/options.go b/vendor/github.com/go-git/go-git/v5/options.go
deleted file mode 100644
index d7776dad5e3..00000000000
--- a/vendor/github.com/go-git/go-git/v5/options.go
+++ /dev/null
@@ -1,792 +0,0 @@
-package git
-
-import (
- "errors"
- "fmt"
- "regexp"
- "strings"
- "time"
-
- "github.com/ProtonMail/go-crypto/openpgp"
- "github.com/go-git/go-git/v5/config"
- "github.com/go-git/go-git/v5/plumbing"
- formatcfg "github.com/go-git/go-git/v5/plumbing/format/config"
- "github.com/go-git/go-git/v5/plumbing/object"
- "github.com/go-git/go-git/v5/plumbing/protocol/packp/sideband"
- "github.com/go-git/go-git/v5/plumbing/transport"
-)
-
-// SubmoduleRescursivity defines how depth will affect any submodule recursive
-// operation.
-type SubmoduleRescursivity uint
-
-const (
- // DefaultRemoteName name of the default Remote, just like git command.
- DefaultRemoteName = "origin"
-
- // NoRecurseSubmodules disables the recursion for a submodule operation.
- NoRecurseSubmodules SubmoduleRescursivity = 0
- // DefaultSubmoduleRecursionDepth allow recursion in a submodule operation.
- DefaultSubmoduleRecursionDepth SubmoduleRescursivity = 10
-)
-
-var (
- ErrMissingURL = errors.New("URL field is required")
-)
-
-// CloneOptions describes how a clone should be performed.
-type CloneOptions struct {
- // The (possibly remote) repository URL to clone from.
- URL string
- // Auth credentials, if required, to use with the remote repository.
- Auth transport.AuthMethod
- // Name of the remote to be added, by default `origin`.
- RemoteName string
- // Remote branch to clone.
- ReferenceName plumbing.ReferenceName
- // Fetch only ReferenceName if true.
- SingleBranch bool
- // Mirror clones the repository as a mirror.
- //
- // Compared to a bare clone, mirror not only maps local branches of the
- // source to local branches of the target, it maps all refs (including
- // remote-tracking branches, notes etc.) and sets up a refspec configuration
- // such that all these refs are overwritten by a git remote update in the
- // target repository.
- Mirror bool
- // No checkout of HEAD after clone if true.
- NoCheckout bool
- // Limit fetching to the specified number of commits.
- Depth int
- // RecurseSubmodules after the clone is created, initialize all submodules
- // within, using their default settings. This option is ignored if the
- // cloned repository does not have a worktree.
- RecurseSubmodules SubmoduleRescursivity
- // ShallowSubmodules limit cloning submodules to the 1 level of depth.
- // It matches the git command --shallow-submodules.
- ShallowSubmodules bool
- // Progress is where the human readable information sent by the server is
- // stored, if nil nothing is stored and the capability (if supported)
- // no-progress, is sent to the server to avoid send this information.
- Progress sideband.Progress
- // Tags describe how the tags will be fetched from the remote repository,
- // by default is AllTags.
- Tags TagMode
- // InsecureSkipTLS skips ssl verify if protocol is https
- InsecureSkipTLS bool
- // CABundle specify additional ca bundle with system cert pool
- CABundle []byte
- // ProxyOptions provides info required for connecting to a proxy.
- ProxyOptions transport.ProxyOptions
- // When the repository to clone is on the local machine, instead of
- // using hard links, automatically setup .git/objects/info/alternates
- // to share the objects with the source repository.
- // The resulting repository starts out without any object of its own.
- // NOTE: this is a possibly dangerous operation; do not use it unless
- // you understand what it does.
- //
- // [Reference]: https://git-scm.com/docs/git-clone#Documentation/git-clone.txt---shared
- Shared bool
-}
-
-// MergeOptions describes how a merge should be performed.
-type MergeOptions struct {
- // Strategy defines the merge strategy to be used.
- Strategy MergeStrategy
-}
-
-// MergeStrategy represents the different types of merge strategies.
-type MergeStrategy int8
-
-const (
- // FastForwardMerge represents a Git merge strategy where the current
- // branch can be simply updated to point to the HEAD of the branch being
- // merged. This is only possible if the history of the branch being merged
- // is a linear descendant of the current branch, with no conflicting commits.
- //
- // This is the default option.
- FastForwardMerge MergeStrategy = iota
-)
-
-// Validate validates the fields and sets the default values.
-func (o *CloneOptions) Validate() error {
- if o.URL == "" {
- return ErrMissingURL
- }
-
- if o.RemoteName == "" {
- o.RemoteName = DefaultRemoteName
- }
-
- if o.ReferenceName == "" {
- o.ReferenceName = plumbing.HEAD
- }
-
- if o.Tags == InvalidTagMode {
- o.Tags = AllTags
- }
-
- return nil
-}
-
-// PullOptions describes how a pull should be performed.
-type PullOptions struct {
- // Name of the remote to be pulled. If empty, uses the default.
- RemoteName string
- // RemoteURL overrides the remote repo address with a custom URL
- RemoteURL string
- // Remote branch to clone. If empty, uses HEAD.
- ReferenceName plumbing.ReferenceName
- // Fetch only ReferenceName if true.
- SingleBranch bool
- // Limit fetching to the specified number of commits.
- Depth int
- // Auth credentials, if required, to use with the remote repository.
- Auth transport.AuthMethod
- // RecurseSubmodules controls if new commits of all populated submodules
- // should be fetched too.
- RecurseSubmodules SubmoduleRescursivity
- // Progress is where the human readable information sent by the server is
- // stored, if nil nothing is stored and the capability (if supported)
- // no-progress, is sent to the server to avoid send this information.
- Progress sideband.Progress
- // Force allows the pull to update a local branch even when the remote
- // branch does not descend from it.
- Force bool
- // InsecureSkipTLS skips ssl verify if protocol is https
- InsecureSkipTLS bool
- // CABundle specify additional ca bundle with system cert pool
- CABundle []byte
- // ProxyOptions provides info required for connecting to a proxy.
- ProxyOptions transport.ProxyOptions
-}
-
-// Validate validates the fields and sets the default values.
-func (o *PullOptions) Validate() error {
- if o.RemoteName == "" {
- o.RemoteName = DefaultRemoteName
- }
-
- if o.ReferenceName == "" {
- o.ReferenceName = plumbing.HEAD
- }
-
- return nil
-}
-
-type TagMode int
-
-const (
- InvalidTagMode TagMode = iota
- // TagFollowing any tag that points into the histories being fetched is also
- // fetched. TagFollowing requires a server with `include-tag` capability
- // in order to fetch the annotated tags objects.
- TagFollowing
- // AllTags fetch all tags from the remote (i.e., fetch remote tags
- // refs/tags/* into local tags with the same name)
- AllTags
- // NoTags fetch no tags from the remote at all
- NoTags
-)
-
-// FetchOptions describes how a fetch should be performed
-type FetchOptions struct {
- // Name of the remote to fetch from. Defaults to origin.
- RemoteName string
- // RemoteURL overrides the remote repo address with a custom URL
- RemoteURL string
- RefSpecs []config.RefSpec
- // Depth limit fetching to the specified number of commits from the tip of
- // each remote branch history.
- Depth int
- // Auth credentials, if required, to use with the remote repository.
- Auth transport.AuthMethod
- // Progress is where the human readable information sent by the server is
- // stored, if nil nothing is stored and the capability (if supported)
- // no-progress, is sent to the server to avoid send this information.
- Progress sideband.Progress
- // Tags describe how the tags will be fetched from the remote repository,
- // by default is TagFollowing.
- Tags TagMode
- // Force allows the fetch to update a local branch even when the remote
- // branch does not descend from it.
- Force bool
- // InsecureSkipTLS skips ssl verify if protocol is https
- InsecureSkipTLS bool
- // CABundle specify additional ca bundle with system cert pool
- CABundle []byte
- // ProxyOptions provides info required for connecting to a proxy.
- ProxyOptions transport.ProxyOptions
- // Prune specify that local refs that match given RefSpecs and that do
- // not exist remotely will be removed.
- Prune bool
-}
-
-// Validate validates the fields and sets the default values.
-func (o *FetchOptions) Validate() error {
- if o.RemoteName == "" {
- o.RemoteName = DefaultRemoteName
- }
-
- if o.Tags == InvalidTagMode {
- o.Tags = TagFollowing
- }
-
- for _, r := range o.RefSpecs {
- if err := r.Validate(); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-// PushOptions describes how a push should be performed.
-type PushOptions struct {
- // RemoteName is the name of the remote to be pushed to.
- RemoteName string
- // RemoteURL overrides the remote repo address with a custom URL
- RemoteURL string
- // RefSpecs specify what destination ref to update with what source object.
- //
- // The format of a parameter is an optional plus +, followed by
- // the source object , followed by a colon :, followed by the destination ref .
- // The is often the name of the branch you would want to push, but it can be a SHA-1.
- // The tells which ref on the remote side is updated with this push.
- //
- // A refspec with empty src can be used to delete a reference.
- RefSpecs []config.RefSpec
- // Auth credentials, if required, to use with the remote repository.
- Auth transport.AuthMethod
- // Progress is where the human readable information sent by the server is
- // stored, if nil nothing is stored.
- Progress sideband.Progress
- // Prune specify that remote refs that match given RefSpecs and that do
- // not exist locally will be removed.
- Prune bool
- // Force allows the push to update a remote branch even when the local
- // branch does not descend from it.
- Force bool
- // InsecureSkipTLS skips ssl verify if protocol is https
- InsecureSkipTLS bool
- // CABundle specify additional ca bundle with system cert pool
- CABundle []byte
- // RequireRemoteRefs only allows a remote ref to be updated if its current
- // value is the one specified here.
- RequireRemoteRefs []config.RefSpec
- // FollowTags will send any annotated tags with a commit target reachable from
- // the refs already being pushed
- FollowTags bool
- // ForceWithLease allows a force push as long as the remote ref adheres to a "lease"
- ForceWithLease *ForceWithLease
- // PushOptions sets options to be transferred to the server during push.
- Options map[string]string
- // Atomic sets option to be an atomic push
- Atomic bool
- // ProxyOptions provides info required for connecting to a proxy.
- ProxyOptions transport.ProxyOptions
-}
-
-// ForceWithLease sets fields on the lease
-// If neither RefName nor Hash are set, ForceWithLease protects
-// all refs in the refspec by ensuring the ref of the remote in the local repsitory
-// matches the one in the ref advertisement.
-type ForceWithLease struct {
- // RefName, when set will protect the ref by ensuring it matches the
- // hash in the ref advertisement.
- RefName plumbing.ReferenceName
- // Hash is the expected object id of RefName. The push will be rejected unless this
- // matches the corresponding object id of RefName in the refs advertisement.
- Hash plumbing.Hash
-}
-
-// Validate validates the fields and sets the default values.
-func (o *PushOptions) Validate() error {
- if o.RemoteName == "" {
- o.RemoteName = DefaultRemoteName
- }
-
- if len(o.RefSpecs) == 0 {
- o.RefSpecs = []config.RefSpec{
- config.RefSpec(config.DefaultPushRefSpec),
- }
- }
-
- for _, r := range o.RefSpecs {
- if err := r.Validate(); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-// SubmoduleUpdateOptions describes how a submodule update should be performed.
-type SubmoduleUpdateOptions struct {
- // Init, if true initializes the submodules recorded in the index.
- Init bool
- // NoFetch tell to the update command to not fetch new objects from the
- // remote site.
- NoFetch bool
- // RecurseSubmodules the update is performed not only in the submodules of
- // the current repository but also in any nested submodules inside those
- // submodules (and so on). Until the SubmoduleRescursivity is reached.
- RecurseSubmodules SubmoduleRescursivity
- // Auth credentials, if required, to use with the remote repository.
- Auth transport.AuthMethod
- // Depth limit fetching to the specified number of commits from the tip of
- // each remote branch history.
- Depth int
-}
-
-var (
- ErrBranchHashExclusive = errors.New("Branch and Hash are mutually exclusive")
- ErrCreateRequiresBranch = errors.New("Branch is mandatory when Create is used")
-)
-
-// CheckoutOptions describes how a checkout operation should be performed.
-type CheckoutOptions struct {
- // Hash is the hash of a commit or tag to be checked out. If used, HEAD
- // will be in detached mode. If Create is not used, Branch and Hash are
- // mutually exclusive.
- Hash plumbing.Hash
- // Branch to be checked out, if Branch and Hash are empty is set to `master`.
- Branch plumbing.ReferenceName
- // Create a new branch named Branch and start it at Hash.
- Create bool
- // Force, if true when switching branches, proceed even if the index or the
- // working tree differs from HEAD. This is used to throw away local changes
- Force bool
- // Keep, if true when switching branches, local changes (the index or the
- // working tree changes) will be kept so that they can be committed to the
- // target branch. Force and Keep are mutually exclusive, should not be both
- // set to true.
- Keep bool
- // SparseCheckoutDirectories
- SparseCheckoutDirectories []string
-}
-
-// Validate validates the fields and sets the default values.
-func (o *CheckoutOptions) Validate() error {
- if !o.Create && !o.Hash.IsZero() && o.Branch != "" {
- return ErrBranchHashExclusive
- }
-
- if o.Create && o.Branch == "" {
- return ErrCreateRequiresBranch
- }
-
- if o.Branch == "" {
- o.Branch = plumbing.Master
- }
-
- return nil
-}
-
-// ResetMode defines the mode of a reset operation.
-type ResetMode int8
-
-const (
- // MixedReset resets the index but not the working tree (i.e., the changed
- // files are preserved but not marked for commit) and reports what has not
- // been updated. This is the default action.
- MixedReset ResetMode = iota
- // HardReset resets the index and working tree. Any changes to tracked files
- // in the working tree are discarded.
- HardReset
- // MergeReset resets the index and updates the files in the working tree
- // that are different between Commit and HEAD, but keeps those which are
- // different between the index and working tree (i.e. which have changes
- // which have not been added).
- //
- // If a file that is different between Commit and the index has unstaged
- // changes, reset is aborted.
- MergeReset
- // SoftReset does not touch the index file or the working tree at all (but
- // resets the head to , just like all modes do). This leaves all
- // your changed files "Changes to be committed", as git status would put it.
- SoftReset
-)
-
-// ResetOptions describes how a reset operation should be performed.
-type ResetOptions struct {
- // Commit, if commit is present set the current branch head (HEAD) to it.
- Commit plumbing.Hash
- // Mode, form resets the current branch head to Commit and possibly updates
- // the index (resetting it to the tree of Commit) and the working tree
- // depending on Mode. If empty MixedReset is used.
- Mode ResetMode
-}
-
-// Validate validates the fields and sets the default values.
-func (o *ResetOptions) Validate(r *Repository) error {
- if o.Commit == plumbing.ZeroHash {
- ref, err := r.Head()
- if err != nil {
- return err
- }
-
- o.Commit = ref.Hash()
- } else {
- _, err := r.CommitObject(o.Commit)
- if err != nil {
- return fmt.Errorf("invalid reset option: %w", err)
- }
- }
-
- return nil
-}
-
-type LogOrder int8
-
-const (
- LogOrderDefault LogOrder = iota
- LogOrderDFS
- LogOrderDFSPost
- LogOrderBSF
- LogOrderCommitterTime
-)
-
-// LogOptions describes how a log action should be performed.
-type LogOptions struct {
- // When the From option is set the log will only contain commits
- // reachable from it. If this option is not set, HEAD will be used as
- // the default From.
- From plumbing.Hash
-
- // The default traversal algorithm is Depth-first search
- // set Order=LogOrderCommitterTime for ordering by committer time (more compatible with `git log`)
- // set Order=LogOrderBSF for Breadth-first search
- Order LogOrder
-
- // Show only those commits in which the specified file was inserted/updated.
- // It is equivalent to running `git log -- `.
- // this field is kept for compatibility, it can be replaced with PathFilter
- FileName *string
-
- // Filter commits based on the path of files that are updated
- // takes file path as argument and should return true if the file is desired
- // It can be used to implement `git log -- `
- // either is a file path, or directory path, or a regexp of file/directory path
- PathFilter func(string) bool
-
- // Pretend as if all the refs in refs/, along with HEAD, are listed on the command line as .
- // It is equivalent to running `git log --all`.
- // If set on true, the From option will be ignored.
- All bool
-
- // Show commits more recent than a specific date.
- // It is equivalent to running `git log --since ` or `git log --after `.
- Since *time.Time
-
- // Show commits older than a specific date.
- // It is equivalent to running `git log --until ` or `git log --before `.
- Until *time.Time
-}
-
-var (
- ErrMissingAuthor = errors.New("author field is required")
-)
-
-// AddOptions describes how an `add` operation should be performed
-type AddOptions struct {
- // All equivalent to `git add -A`, update the index not only where the
- // working tree has a file matching `Path` but also where the index already
- // has an entry. This adds, modifies, and removes index entries to match the
- // working tree. If no `Path` nor `Glob` is given when `All` option is
- // used, all files in the entire working tree are updated.
- All bool
- // Path is the exact filepath to the file or directory to be added.
- Path string
- // Glob adds all paths, matching pattern, to the index. If pattern matches a
- // directory path, all directory contents are added to the index recursively.
- Glob string
- // SkipStatus adds the path with no status check. This option is relevant only
- // when the `Path` option is specified and does not apply when the `All` option is used.
- // Notice that when passing an ignored path it will be added anyway.
- // When true it can speed up adding files to the worktree in very large repositories.
- SkipStatus bool
-}
-
-// Validate validates the fields and sets the default values.
-func (o *AddOptions) Validate(r *Repository) error {
- if o.Path != "" && o.Glob != "" {
- return fmt.Errorf("fields Path and Glob are mutual exclusive")
- }
-
- return nil
-}
-
-// CommitOptions describes how a commit operation should be performed.
-type CommitOptions struct {
- // All automatically stage files that have been modified and deleted, but
- // new files you have not told Git about are not affected.
- All bool
- // AllowEmptyCommits enable empty commits to be created. An empty commit
- // is when no changes to the tree were made, but a new commit message is
- // provided. The default behavior is false, which results in ErrEmptyCommit.
- AllowEmptyCommits bool
- // Author is the author's signature of the commit. If Author is empty the
- // Name and Email is read from the config, and time.Now it's used as When.
- Author *object.Signature
- // Committer is the committer's signature of the commit. If Committer is
- // nil the Author signature is used.
- Committer *object.Signature
- // Parents are the parents commits for the new commit, by default when
- // len(Parents) is zero, the hash of HEAD reference is used.
- Parents []plumbing.Hash
- // SignKey denotes a key to sign the commit with. A nil value here means the
- // commit will not be signed. The private key must be present and already
- // decrypted.
- SignKey *openpgp.Entity
- // Signer denotes a cryptographic signer to sign the commit with.
- // A nil value here means the commit will not be signed.
- // Takes precedence over SignKey.
- Signer Signer
- // Amend will create a new commit object and replace the commit that HEAD currently
- // points to. Cannot be used with All nor Parents.
- Amend bool
-}
-
-// Validate validates the fields and sets the default values.
-func (o *CommitOptions) Validate(r *Repository) error {
- if o.All && o.Amend {
- return errors.New("all and amend cannot be used together")
- }
-
- if o.Amend && len(o.Parents) > 0 {
- return errors.New("parents cannot be used with amend")
- }
-
- if o.Author == nil {
- if err := o.loadConfigAuthorAndCommitter(r); err != nil {
- return err
- }
- }
-
- if o.Committer == nil {
- o.Committer = o.Author
- }
-
- if len(o.Parents) == 0 {
- head, err := r.Head()
- if err != nil && err != plumbing.ErrReferenceNotFound {
- return err
- }
-
- if head != nil {
- o.Parents = []plumbing.Hash{head.Hash()}
- }
- }
-
- return nil
-}
-
-func (o *CommitOptions) loadConfigAuthorAndCommitter(r *Repository) error {
- cfg, err := r.ConfigScoped(config.SystemScope)
- if err != nil {
- return err
- }
-
- if o.Author == nil && cfg.Author.Email != "" && cfg.Author.Name != "" {
- o.Author = &object.Signature{
- Name: cfg.Author.Name,
- Email: cfg.Author.Email,
- When: time.Now(),
- }
- }
-
- if o.Committer == nil && cfg.Committer.Email != "" && cfg.Committer.Name != "" {
- o.Committer = &object.Signature{
- Name: cfg.Committer.Name,
- Email: cfg.Committer.Email,
- When: time.Now(),
- }
- }
-
- if o.Author == nil && cfg.User.Email != "" && cfg.User.Name != "" {
- o.Author = &object.Signature{
- Name: cfg.User.Name,
- Email: cfg.User.Email,
- When: time.Now(),
- }
- }
-
- if o.Author == nil {
- return ErrMissingAuthor
- }
-
- return nil
-}
-
-var (
- ErrMissingName = errors.New("name field is required")
- ErrMissingTagger = errors.New("tagger field is required")
- ErrMissingMessage = errors.New("message field is required")
-)
-
-// CreateTagOptions describes how a tag object should be created.
-type CreateTagOptions struct {
- // Tagger defines the signature of the tag creator. If Tagger is empty the
- // Name and Email is read from the config, and time.Now it's used as When.
- Tagger *object.Signature
- // Message defines the annotation of the tag. It is canonicalized during
- // validation into the format expected by git - no leading whitespace and
- // ending in a newline.
- Message string
- // SignKey denotes a key to sign the tag with. A nil value here means the tag
- // will not be signed. The private key must be present and already decrypted.
- SignKey *openpgp.Entity
-}
-
-// Validate validates the fields and sets the default values.
-func (o *CreateTagOptions) Validate(r *Repository, hash plumbing.Hash) error {
- if o.Tagger == nil {
- if err := o.loadConfigTagger(r); err != nil {
- return err
- }
- }
-
- if o.Message == "" {
- return ErrMissingMessage
- }
-
- // Canonicalize the message into the expected message format.
- o.Message = strings.TrimSpace(o.Message) + "\n"
-
- return nil
-}
-
-func (o *CreateTagOptions) loadConfigTagger(r *Repository) error {
- cfg, err := r.ConfigScoped(config.SystemScope)
- if err != nil {
- return err
- }
-
- if o.Tagger == nil && cfg.Author.Email != "" && cfg.Author.Name != "" {
- o.Tagger = &object.Signature{
- Name: cfg.Author.Name,
- Email: cfg.Author.Email,
- When: time.Now(),
- }
- }
-
- if o.Tagger == nil && cfg.User.Email != "" && cfg.User.Name != "" {
- o.Tagger = &object.Signature{
- Name: cfg.User.Name,
- Email: cfg.User.Email,
- When: time.Now(),
- }
- }
-
- if o.Tagger == nil {
- return ErrMissingTagger
- }
-
- return nil
-}
-
-// ListOptions describes how a remote list should be performed.
-type ListOptions struct {
- // Auth credentials, if required, to use with the remote repository.
- Auth transport.AuthMethod
- // InsecureSkipTLS skips ssl verify if protocol is https
- InsecureSkipTLS bool
- // CABundle specify additional ca bundle with system cert pool
- CABundle []byte
- // PeelingOption defines how peeled objects are handled during a
- // remote list.
- PeelingOption PeelingOption
- // ProxyOptions provides info required for connecting to a proxy.
- ProxyOptions transport.ProxyOptions
- // Timeout specifies the timeout in seconds for list operations
- Timeout int
-}
-
-// PeelingOption represents the different ways to handle peeled references.
-//
-// Peeled references represent the underlying object of an annotated
-// (or signed) tag. Refer to upstream documentation for more info:
-// https://github.com/git/git/blob/master/Documentation/technical/reftable.txt
-type PeelingOption uint8
-
-const (
- // IgnorePeeled ignores all peeled reference names. This is the default behavior.
- IgnorePeeled PeelingOption = 0
- // OnlyPeeled returns only peeled reference names.
- OnlyPeeled PeelingOption = 1
- // AppendPeeled appends peeled reference names to the reference list.
- AppendPeeled PeelingOption = 2
-)
-
-// CleanOptions describes how a clean should be performed.
-type CleanOptions struct {
- Dir bool
-}
-
-// GrepOptions describes how a grep should be performed.
-type GrepOptions struct {
- // Patterns are compiled Regexp objects to be matched.
- Patterns []*regexp.Regexp
- // InvertMatch selects non-matching lines.
- InvertMatch bool
- // CommitHash is the hash of the commit from which worktree should be derived.
- CommitHash plumbing.Hash
- // ReferenceName is the branch or tag name from which worktree should be derived.
- ReferenceName plumbing.ReferenceName
- // PathSpecs are compiled Regexp objects of pathspec to use in the matching.
- PathSpecs []*regexp.Regexp
-}
-
-var (
- ErrHashOrReference = errors.New("ambiguous options, only one of CommitHash or ReferenceName can be passed")
-)
-
-// Validate validates the fields and sets the default values.
-//
-// TODO: deprecate in favor of Validate(r *Repository) in v6.
-func (o *GrepOptions) Validate(w *Worktree) error {
- return o.validate(w.r)
-}
-
-func (o *GrepOptions) validate(r *Repository) error {
- if !o.CommitHash.IsZero() && o.ReferenceName != "" {
- return ErrHashOrReference
- }
-
- // If none of CommitHash and ReferenceName are provided, set commit hash of
- // the repository's head.
- if o.CommitHash.IsZero() && o.ReferenceName == "" {
- ref, err := r.Head()
- if err != nil {
- return err
- }
- o.CommitHash = ref.Hash()
- }
-
- return nil
-}
-
-// PlainOpenOptions describes how opening a plain repository should be
-// performed.
-type PlainOpenOptions struct {
- // DetectDotGit defines whether parent directories should be
- // walked until a .git directory or file is found.
- DetectDotGit bool
- // Enable .git/commondir support (see https://git-scm.com/docs/gitrepository-layout#Documentation/gitrepository-layout.txt).
- // NOTE: This option will only work with the filesystem storage.
- EnableDotGitCommonDir bool
-}
-
-// Validate validates the fields and sets the default values.
-func (o *PlainOpenOptions) Validate() error { return nil }
-
-type PlainInitOptions struct {
- InitOptions
- // Determines if the repository will have a worktree (non-bare) or not (bare).
- Bare bool
- ObjectFormat formatcfg.ObjectFormat
-}
-
-// Validate validates the fields and sets the default values.
-func (o *PlainInitOptions) Validate() error { return nil }
diff --git a/vendor/github.com/go-git/go-git/v5/oss-fuzz.sh b/vendor/github.com/go-git/go-git/v5/oss-fuzz.sh
deleted file mode 100644
index 885548f401b..00000000000
--- a/vendor/github.com/go-git/go-git/v5/oss-fuzz.sh
+++ /dev/null
@@ -1,35 +0,0 @@
-#!/bin/bash -eu
-# Copyright 2023 Google LLC
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-################################################################################
-
-
-go mod download
-go get github.com/AdamKorcz/go-118-fuzz-build/testing
-
-if [ "$SANITIZER" != "coverage" ]; then
- sed -i '/func (s \*DecoderSuite) TestDecode(/,/^}/ s/^/\/\//' plumbing/format/config/decoder_test.go
- sed -n '35,$p' plumbing/format/packfile/common_test.go >> plumbing/format/packfile/delta_test.go
- sed -n '20,53p' plumbing/object/object_test.go >> plumbing/object/tree_test.go
- sed -i 's|func Test|// func Test|' plumbing/transport/common_test.go
-fi
-
-compile_native_go_fuzzer $(pwd)/internal/revision FuzzParser fuzz_parser
-compile_native_go_fuzzer $(pwd)/plumbing/format/config FuzzDecoder fuzz_decoder_config
-compile_native_go_fuzzer $(pwd)/plumbing/format/packfile FuzzPatchDelta fuzz_patch_delta
-compile_native_go_fuzzer $(pwd)/plumbing/object FuzzParseSignedBytes fuzz_parse_signed_bytes
-compile_native_go_fuzzer $(pwd)/plumbing/object FuzzDecode fuzz_decode
-compile_native_go_fuzzer $(pwd)/plumbing/protocol/packp FuzzDecoder fuzz_decoder_packp
-compile_native_go_fuzzer $(pwd)/plumbing/transport FuzzNewEndpoint fuzz_new_endpoint
diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/cache/buffer_lru.go b/vendor/github.com/go-git/go-git/v5/plumbing/cache/buffer_lru.go
deleted file mode 100644
index acaf1952033..00000000000
--- a/vendor/github.com/go-git/go-git/v5/plumbing/cache/buffer_lru.go
+++ /dev/null
@@ -1,98 +0,0 @@
-package cache
-
-import (
- "container/list"
- "sync"
-)
-
-// BufferLRU implements an object cache with an LRU eviction policy and a
-// maximum size (measured in object size).
-type BufferLRU struct {
- MaxSize FileSize
-
- actualSize FileSize
- ll *list.List
- cache map[int64]*list.Element
- mut sync.Mutex
-}
-
-// NewBufferLRU creates a new BufferLRU with the given maximum size. The maximum
-// size will never be exceeded.
-func NewBufferLRU(maxSize FileSize) *BufferLRU {
- return &BufferLRU{MaxSize: maxSize}
-}
-
-// NewBufferLRUDefault creates a new BufferLRU with the default cache size.
-func NewBufferLRUDefault() *BufferLRU {
- return &BufferLRU{MaxSize: DefaultMaxSize}
-}
-
-type buffer struct {
- Key int64
- Slice []byte
-}
-
-// Put puts a buffer into the cache. If the buffer is already in the cache, it
-// will be marked as used. Otherwise, it will be inserted. A buffers might
-// be evicted to make room for the new one.
-func (c *BufferLRU) Put(key int64, slice []byte) {
- c.mut.Lock()
- defer c.mut.Unlock()
-
- if c.cache == nil {
- c.actualSize = 0
- c.cache = make(map[int64]*list.Element, 1000)
- c.ll = list.New()
- }
-
- bufSize := FileSize(len(slice))
- if ee, ok := c.cache[key]; ok {
- oldBuf := ee.Value.(buffer)
- // in this case bufSize is a delta: new size - old size
- bufSize -= FileSize(len(oldBuf.Slice))
- c.ll.MoveToFront(ee)
- ee.Value = buffer{key, slice}
- } else {
- if bufSize > c.MaxSize {
- return
- }
- ee := c.ll.PushFront(buffer{key, slice})
- c.cache[key] = ee
- }
-
- c.actualSize += bufSize
- for c.actualSize > c.MaxSize {
- last := c.ll.Back()
- lastObj := last.Value.(buffer)
- lastSize := FileSize(len(lastObj.Slice))
-
- c.ll.Remove(last)
- delete(c.cache, lastObj.Key)
- c.actualSize -= lastSize
- }
-}
-
-// Get returns a buffer by its key. It marks the buffer as used. If the buffer
-// is not in the cache, (nil, false) will be returned.
-func (c *BufferLRU) Get(key int64) ([]byte, bool) {
- c.mut.Lock()
- defer c.mut.Unlock()
-
- ee, ok := c.cache[key]
- if !ok {
- return nil, false
- }
-
- c.ll.MoveToFront(ee)
- return ee.Value.(buffer).Slice, true
-}
-
-// Clear the content of this buffer cache.
-func (c *BufferLRU) Clear() {
- c.mut.Lock()
- defer c.mut.Unlock()
-
- c.ll = nil
- c.cache = nil
- c.actualSize = 0
-}
diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/cache/common.go b/vendor/github.com/go-git/go-git/v5/plumbing/cache/common.go
deleted file mode 100644
index 7b0d0c76bb3..00000000000
--- a/vendor/github.com/go-git/go-git/v5/plumbing/cache/common.go
+++ /dev/null
@@ -1,39 +0,0 @@
-package cache
-
-import "github.com/go-git/go-git/v5/plumbing"
-
-const (
- Byte FileSize = 1 << (iota * 10)
- KiByte
- MiByte
- GiByte
-)
-
-type FileSize int64
-
-const DefaultMaxSize FileSize = 96 * MiByte
-
-// Object is an interface to a object cache.
-type Object interface {
- // Put puts the given object into the cache. Whether this object will
- // actually be put into the cache or not is implementation specific.
- Put(o plumbing.EncodedObject)
- // Get gets an object from the cache given its hash. The second return value
- // is true if the object was returned, and false otherwise.
- Get(k plumbing.Hash) (plumbing.EncodedObject, bool)
- // Clear clears every object from the cache.
- Clear()
-}
-
-// Buffer is an interface to a buffer cache.
-type Buffer interface {
- // Put puts a buffer into the cache. If the buffer is already in the cache,
- // it will be marked as used. Otherwise, it will be inserted. Buffer might
- // be evicted to make room for the new one.
- Put(key int64, slice []byte)
- // Get returns a buffer by its key. It marks the buffer as used. If the
- // buffer is not in the cache, (nil, false) will be returned.
- Get(key int64) ([]byte, bool)
- // Clear clears every object from the cache.
- Clear()
-}
diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/cache/object_lru.go b/vendor/github.com/go-git/go-git/v5/plumbing/cache/object_lru.go
deleted file mode 100644
index c50d0d1e6c5..00000000000
--- a/vendor/github.com/go-git/go-git/v5/plumbing/cache/object_lru.go
+++ /dev/null
@@ -1,101 +0,0 @@
-package cache
-
-import (
- "container/list"
- "sync"
-
- "github.com/go-git/go-git/v5/plumbing"
-)
-
-// ObjectLRU implements an object cache with an LRU eviction policy and a
-// maximum size (measured in object size).
-type ObjectLRU struct {
- MaxSize FileSize
-
- actualSize FileSize
- ll *list.List
- cache map[interface{}]*list.Element
- mut sync.Mutex
-}
-
-// NewObjectLRU creates a new ObjectLRU with the given maximum size. The maximum
-// size will never be exceeded.
-func NewObjectLRU(maxSize FileSize) *ObjectLRU {
- return &ObjectLRU{MaxSize: maxSize}
-}
-
-// NewObjectLRUDefault creates a new ObjectLRU with the default cache size.
-func NewObjectLRUDefault() *ObjectLRU {
- return &ObjectLRU{MaxSize: DefaultMaxSize}
-}
-
-// Put puts an object into the cache. If the object is already in the cache, it
-// will be marked as used. Otherwise, it will be inserted. A single object might
-// be evicted to make room for the new object.
-func (c *ObjectLRU) Put(obj plumbing.EncodedObject) {
- c.mut.Lock()
- defer c.mut.Unlock()
-
- if c.cache == nil {
- c.actualSize = 0
- c.cache = make(map[interface{}]*list.Element, 1000)
- c.ll = list.New()
- }
-
- objSize := FileSize(obj.Size())
- key := obj.Hash()
- if ee, ok := c.cache[key]; ok {
- oldObj := ee.Value.(plumbing.EncodedObject)
- // in this case objSize is a delta: new size - old size
- objSize -= FileSize(oldObj.Size())
- c.ll.MoveToFront(ee)
- ee.Value = obj
- } else {
- if objSize > c.MaxSize {
- return
- }
- ee := c.ll.PushFront(obj)
- c.cache[key] = ee
- }
-
- c.actualSize += objSize
- for c.actualSize > c.MaxSize {
- last := c.ll.Back()
- if last == nil {
- c.actualSize = 0
- break
- }
-
- lastObj := last.Value.(plumbing.EncodedObject)
- lastSize := FileSize(lastObj.Size())
-
- c.ll.Remove(last)
- delete(c.cache, lastObj.Hash())
- c.actualSize -= lastSize
- }
-}
-
-// Get returns an object by its hash. It marks the object as used. If the object
-// is not in the cache, (nil, false) will be returned.
-func (c *ObjectLRU) Get(k plumbing.Hash) (plumbing.EncodedObject, bool) {
- c.mut.Lock()
- defer c.mut.Unlock()
-
- ee, ok := c.cache[k]
- if !ok {
- return nil, false
- }
-
- c.ll.MoveToFront(ee)
- return ee.Value.(plumbing.EncodedObject), true
-}
-
-// Clear the content of this object cache.
-func (c *ObjectLRU) Clear() {
- c.mut.Lock()
- defer c.mut.Unlock()
-
- c.ll = nil
- c.cache = nil
- c.actualSize = 0
-}
diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/color/color.go b/vendor/github.com/go-git/go-git/v5/plumbing/color/color.go
deleted file mode 100644
index 2cd74bdc1a8..00000000000
--- a/vendor/github.com/go-git/go-git/v5/plumbing/color/color.go
+++ /dev/null
@@ -1,38 +0,0 @@
-package color
-
-// TODO read colors from a github.com/go-git/go-git/plumbing/format/config.Config struct
-// TODO implement color parsing, see https://github.com/git/git/blob/v2.26.2/color.c
-
-// Colors. See https://github.com/git/git/blob/v2.26.2/color.h#L24-L53.
-const (
- Normal = ""
- Reset = "\033[m"
- Bold = "\033[1m"
- Red = "\033[31m"
- Green = "\033[32m"
- Yellow = "\033[33m"
- Blue = "\033[34m"
- Magenta = "\033[35m"
- Cyan = "\033[36m"
- BoldRed = "\033[1;31m"
- BoldGreen = "\033[1;32m"
- BoldYellow = "\033[1;33m"
- BoldBlue = "\033[1;34m"
- BoldMagenta = "\033[1;35m"
- BoldCyan = "\033[1;36m"
- FaintRed = "\033[2;31m"
- FaintGreen = "\033[2;32m"
- FaintYellow = "\033[2;33m"
- FaintBlue = "\033[2;34m"
- FaintMagenta = "\033[2;35m"
- FaintCyan = "\033[2;36m"
- BgRed = "\033[41m"
- BgGreen = "\033[42m"
- BgYellow = "\033[43m"
- BgBlue = "\033[44m"
- BgMagenta = "\033[45m"
- BgCyan = "\033[46m"
- Faint = "\033[2m"
- FaintItalic = "\033[2;3m"
- Reverse = "\033[7m"
-)
diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/error.go b/vendor/github.com/go-git/go-git/v5/plumbing/error.go
deleted file mode 100644
index a3ebed3f6c2..00000000000
--- a/vendor/github.com/go-git/go-git/v5/plumbing/error.go
+++ /dev/null
@@ -1,35 +0,0 @@
-package plumbing
-
-import "fmt"
-
-type PermanentError struct {
- Err error
-}
-
-func NewPermanentError(err error) *PermanentError {
- if err == nil {
- return nil
- }
-
- return &PermanentError{Err: err}
-}
-
-func (e *PermanentError) Error() string {
- return fmt.Sprintf("permanent client error: %s", e.Err.Error())
-}
-
-type UnexpectedError struct {
- Err error
-}
-
-func NewUnexpectedError(err error) *UnexpectedError {
- if err == nil {
- return nil
- }
-
- return &UnexpectedError{Err: err}
-}
-
-func (e *UnexpectedError) Error() string {
- return fmt.Sprintf("unexpected client error: %s", e.Err.Error())
-}
diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/filemode/filemode.go b/vendor/github.com/go-git/go-git/v5/plumbing/filemode/filemode.go
deleted file mode 100644
index ea1a457558e..00000000000
--- a/vendor/github.com/go-git/go-git/v5/plumbing/filemode/filemode.go
+++ /dev/null
@@ -1,188 +0,0 @@
-package filemode
-
-import (
- "encoding/binary"
- "fmt"
- "os"
- "strconv"
-)
-
-// A FileMode represents the kind of tree entries used by git. It
-// resembles regular file systems modes, although FileModes are
-// considerably simpler (there are not so many), and there are some,
-// like Submodule that has no file system equivalent.
-type FileMode uint32
-
-const (
- // Empty is used as the FileMode of tree elements when comparing
- // trees in the following situations:
- //
- // - the mode of tree elements before their creation. - the mode of
- // tree elements after their deletion. - the mode of unmerged
- // elements when checking the index.
- //
- // Empty has no file system equivalent. As Empty is the zero value
- // of FileMode, it is also returned by New and
- // NewFromOsNewFromOSFileMode along with an error, when they fail.
- Empty FileMode = 0
- // Dir represent a Directory.
- Dir FileMode = 0040000
- // Regular represent non-executable files. Please note this is not
- // the same as golang regular files, which include executable files.
- Regular FileMode = 0100644
- // Deprecated represent non-executable files with the group writable
- // bit set. This mode was supported by the first versions of git,
- // but it has been deprecated nowadays. This library uses them
- // internally, so you can read old packfiles, but will treat them as
- // Regulars when interfacing with the outside world. This is the
- // standard git behaviour.
- Deprecated FileMode = 0100664
- // Executable represents executable files.
- Executable FileMode = 0100755
- // Symlink represents symbolic links to files.
- Symlink FileMode = 0120000
- // Submodule represents git submodules. This mode has no file system
- // equivalent.
- Submodule FileMode = 0160000
-)
-
-// New takes the octal string representation of a FileMode and returns
-// the FileMode and a nil error. If the string can not be parsed to a
-// 32 bit unsigned octal number, it returns Empty and the parsing error.
-//
-// Example: "40000" means Dir, "100644" means Regular.
-//
-// Please note this function does not check if the returned FileMode
-// is valid in git or if it is malformed. For instance, "1" will
-// return the malformed FileMode(1) and a nil error.
-func New(s string) (FileMode, error) {
- n, err := strconv.ParseUint(s, 8, 32)
- if err != nil {
- return Empty, err
- }
-
- return FileMode(n), nil
-}
-
-// NewFromOSFileMode returns the FileMode used by git to represent
-// the provided file system modes and a nil error on success. If the
-// file system mode cannot be mapped to any valid git mode (as with
-// sockets or named pipes), it will return Empty and an error.
-//
-// Note that some git modes cannot be generated from os.FileModes, like
-// Deprecated and Submodule; while Empty will be returned, along with an
-// error, only when the method fails.
-func NewFromOSFileMode(m os.FileMode) (FileMode, error) {
- if m.IsRegular() {
- if isSetTemporary(m) {
- return Empty, fmt.Errorf("no equivalent git mode for %s", m)
- }
- if isSetCharDevice(m) {
- return Empty, fmt.Errorf("no equivalent git mode for %s", m)
- }
- if isSetUserExecutable(m) {
- return Executable, nil
- }
- return Regular, nil
- }
-
- if m.IsDir() {
- return Dir, nil
- }
-
- if isSetSymLink(m) {
- return Symlink, nil
- }
-
- return Empty, fmt.Errorf("no equivalent git mode for %s", m)
-}
-
-func isSetCharDevice(m os.FileMode) bool {
- return m&os.ModeCharDevice != 0
-}
-
-func isSetTemporary(m os.FileMode) bool {
- return m&os.ModeTemporary != 0
-}
-
-func isSetUserExecutable(m os.FileMode) bool {
- return m&0100 != 0
-}
-
-func isSetSymLink(m os.FileMode) bool {
- return m&os.ModeSymlink != 0
-}
-
-// Bytes return a slice of 4 bytes with the mode in little endian
-// encoding.
-func (m FileMode) Bytes() []byte {
- ret := make([]byte, 4)
- binary.LittleEndian.PutUint32(ret, uint32(m))
- return ret
-}
-
-// IsMalformed returns if the FileMode should not appear in a git packfile,
-// this is: Empty and any other mode not mentioned as a constant in this
-// package.
-func (m FileMode) IsMalformed() bool {
- return m != Dir &&
- m != Regular &&
- m != Deprecated &&
- m != Executable &&
- m != Symlink &&
- m != Submodule
-}
-
-// String returns the FileMode as a string in the standard git format,
-// this is, an octal number padded with ceros to 7 digits. Malformed
-// modes are printed in that same format, for easier debugging.
-//
-// Example: Regular is "0100644", Empty is "0000000".
-func (m FileMode) String() string {
- return fmt.Sprintf("%07o", uint32(m))
-}
-
-// IsRegular returns if the FileMode represents that of a regular file,
-// this is, either Regular or Deprecated. Please note that Executable
-// are not regular even though in the UNIX tradition, they usually are:
-// See the IsFile method.
-func (m FileMode) IsRegular() bool {
- return m == Regular ||
- m == Deprecated
-}
-
-// IsFile returns if the FileMode represents that of a file, this is,
-// Regular, Deprecated, Executable or Link.
-func (m FileMode) IsFile() bool {
- return m == Regular ||
- m == Deprecated ||
- m == Executable ||
- m == Symlink
-}
-
-// ToOSFileMode returns the os.FileMode to be used when creating file
-// system elements with the given git mode and a nil error on success.
-//
-// When the provided mode cannot be mapped to a valid file system mode
-// (e.g. Submodule) it returns os.FileMode(0) and an error.
-//
-// The returned file mode does not take into account the umask.
-func (m FileMode) ToOSFileMode() (os.FileMode, error) {
- switch m {
- case Dir:
- return os.ModePerm | os.ModeDir, nil
- case Submodule:
- return os.ModePerm | os.ModeDir, nil
- case Regular:
- return os.FileMode(0644), nil
- // Deprecated is no longer allowed: treated as a Regular instead
- case Deprecated:
- return os.FileMode(0644), nil
- case Executable:
- return os.FileMode(0755), nil
- case Symlink:
- return os.ModePerm | os.ModeSymlink, nil
- }
-
- return os.FileMode(0), fmt.Errorf("malformed mode (%s)", m)
-}
diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/format/config/common.go b/vendor/github.com/go-git/go-git/v5/plumbing/format/config/common.go
deleted file mode 100644
index 6d689ea1e01..00000000000
--- a/vendor/github.com/go-git/go-git/v5/plumbing/format/config/common.go
+++ /dev/null
@@ -1,109 +0,0 @@
-package config
-
-// New creates a new config instance.
-func New() *Config {
- return &Config{}
-}
-
-// Config contains all the sections, comments and includes from a config file.
-type Config struct {
- Comment *Comment
- Sections Sections
- Includes Includes
-}
-
-// Includes is a list of Includes in a config file.
-type Includes []*Include
-
-// Include is a reference to an included config file.
-type Include struct {
- Path string
- Config *Config
-}
-
-// Comment string without the prefix '#' or ';'.
-type Comment string
-
-const (
- // NoSubsection token is passed to Config.Section and Config.SetSection to
- // represent the absence of a section.
- NoSubsection = ""
-)
-
-// Section returns a existing section with the given name or creates a new one.
-func (c *Config) Section(name string) *Section {
- for i := len(c.Sections) - 1; i >= 0; i-- {
- s := c.Sections[i]
- if s.IsName(name) {
- return s
- }
- }
-
- s := &Section{Name: name}
- c.Sections = append(c.Sections, s)
- return s
-}
-
-// HasSection checks if the Config has a section with the specified name.
-func (c *Config) HasSection(name string) bool {
- for _, s := range c.Sections {
- if s.IsName(name) {
- return true
- }
- }
- return false
-}
-
-// RemoveSection removes a section from a config file.
-func (c *Config) RemoveSection(name string) *Config {
- result := Sections{}
- for _, s := range c.Sections {
- if !s.IsName(name) {
- result = append(result, s)
- }
- }
-
- c.Sections = result
- return c
-}
-
-// RemoveSubsection remove a subsection from a config file.
-func (c *Config) RemoveSubsection(section string, subsection string) *Config {
- for _, s := range c.Sections {
- if s.IsName(section) {
- result := Subsections{}
- for _, ss := range s.Subsections {
- if !ss.IsName(subsection) {
- result = append(result, ss)
- }
- }
- s.Subsections = result
- }
- }
-
- return c
-}
-
-// AddOption adds an option to a given section and subsection. Use the
-// NoSubsection constant for the subsection argument if no subsection is wanted.
-func (c *Config) AddOption(section string, subsection string, key string, value string) *Config {
- if subsection == "" {
- c.Section(section).AddOption(key, value)
- } else {
- c.Section(section).Subsection(subsection).AddOption(key, value)
- }
-
- return c
-}
-
-// SetOption sets an option to a given section and subsection. Use the
-// NoSubsection constant for the subsection argument if no subsection is wanted.
-func (c *Config) SetOption(section string, subsection string, key string, value string) *Config {
- if subsection == "" {
- c.Section(section).SetOption(key, value)
- } else {
- c.Section(section).Subsection(subsection).SetOption(key, value)
- }
-
- return c
-}
diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/format/config/decoder.go b/vendor/github.com/go-git/go-git/v5/plumbing/format/config/decoder.go
deleted file mode 100644
index 8e52d57f302..00000000000
--- a/vendor/github.com/go-git/go-git/v5/plumbing/format/config/decoder.go
+++ /dev/null
@@ -1,37 +0,0 @@
-package config
-
-import (
- "io"
-
- "github.com/go-git/gcfg"
-)
-
-// A Decoder reads and decodes config files from an input stream.
-type Decoder struct {
- io.Reader
-}
-
-// NewDecoder returns a new decoder that reads from r.
-func NewDecoder(r io.Reader) *Decoder {
- return &Decoder{r}
-}
-
-// Decode reads the whole config from its input and stores it in the
-// value pointed to by config.
-func (d *Decoder) Decode(config *Config) error {
- cb := func(s string, ss string, k string, v string, bv bool) error {
- if ss == "" && k == "" {
- config.Section(s)
- return nil
- }
-
- if ss != "" && k == "" {
- config.Section(s).Subsection(ss)
- return nil
- }
-
- config.AddOption(s, ss, k, v)
- return nil
- }
- return gcfg.ReadWithCallback(d, cb)
-}
diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/format/config/doc.go b/vendor/github.com/go-git/go-git/v5/plumbing/format/config/doc.go
deleted file mode 100644
index 3986c836581..00000000000
--- a/vendor/github.com/go-git/go-git/v5/plumbing/format/config/doc.go
+++ /dev/null
@@ -1,122 +0,0 @@
-// Package config implements encoding and decoding of git config files.
-//
-// Configuration File
-// ------------------
-//
-// The Git configuration file contains a number of variables that affect
-// the Git commands' behavior. The `.git/config` file in each repository
-// is used to store the configuration for that repository, and
-// `$HOME/.gitconfig` is used to store a per-user configuration as
-// fallback values for the `.git/config` file. The file `/etc/gitconfig`
-// can be used to store a system-wide default configuration.
-//
-// The configuration variables are used by both the Git plumbing
-// and the porcelains. The variables are divided into sections, wherein
-// the fully qualified variable name of the variable itself is the last
-// dot-separated segment and the section name is everything before the last
-// dot. The variable names are case-insensitive, allow only alphanumeric
-// characters and `-`, and must start with an alphabetic character. Some
-// variables may appear multiple times; we say then that the variable is
-// multivalued.
-//
-// Syntax
-// ~~~~~~
-//
-// The syntax is fairly flexible and permissive; whitespaces are mostly
-// ignored. The '#' and ';' characters begin comments to the end of line,
-// blank lines are ignored.
-//
-// The file consists of sections and variables. A section begins with
-// the name of the section in square brackets and continues until the next
-// section begins. Section names are case-insensitive. Only alphanumeric
-// characters, `-` and `.` are allowed in section names. Each variable
-// must belong to some section, which means that there must be a section
-// header before the first setting of a variable.
-//
-// Sections can be further divided into subsections. To begin a subsection
-// put its name in double quotes, separated by space from the section name,
-// in the section header, like in the example below:
-//
-// --------
-// [section "subsection"]
-//
-// --------
-//
-// Subsection names are case sensitive and can contain any characters except
-// newline (doublequote `"` and backslash can be included by escaping them
-// as `\"` and `\\`, respectively). Section headers cannot span multiple
-// lines. Variables may belong directly to a section or to a given subsection.
-// You can have `[section]` if you have `[section "subsection"]`, but you
-// don't need to.
-//
-// There is also a deprecated `[section.subsection]` syntax. With this
-// syntax, the subsection name is converted to lower-case and is also
-// compared case sensitively. These subsection names follow the same
-// restrictions as section names.
-//
-// All the other lines (and the remainder of the line after the section
-// header) are recognized as setting variables, in the form
-// 'name = value' (or just 'name', which is a short-hand to say that
-// the variable is the boolean "true").
-// The variable names are case-insensitive, allow only alphanumeric characters
-// and `-`, and must start with an alphabetic character.
-//
-// A line that defines a value can be continued to the next line by
-// ending it with a `\`; the backquote and the end-of-line are
-// stripped. Leading whitespaces after 'name =', the remainder of the
-// line after the first comment character '#' or ';', and trailing
-// whitespaces of the line are discarded unless they are enclosed in
-// double quotes. Internal whitespaces within the value are retained
-// verbatim.
-//
-// Inside double quotes, double quote `"` and backslash `\` characters
-// must be escaped: use `\"` for `"` and `\\` for `\`.
-//
-// The following escape sequences (beside `\"` and `\\`) are recognized:
-// `\n` for newline character (NL), `\t` for horizontal tabulation (HT, TAB)
-// and `\b` for backspace (BS). Other char escape sequences (including octal
-// escape sequences) are invalid.
-//
-// Includes
-// ~~~~~~~~
-//
-// You can include one config file from another by setting the special
-// `include.path` variable to the name of the file to be included. The
-// variable takes a pathname as its value, and is subject to tilde
-// expansion.
-//
-// The included file is expanded immediately, as if its contents had been
-// found at the location of the include directive. If the value of the
-// `include.path` variable is a relative path, the path is considered to be
-// relative to the configuration file in which the include directive was
-// found. See below for examples.
-//
-//
-// Example
-// ~~~~~~~
-//
-// # Core variables
-// [core]
-// ; Don't trust file modes
-// filemode = false
-//
-// # Our diff algorithm
-// [diff]
-// external = /usr/local/bin/diff-wrapper
-// renames = true
-//
-// [branch "devel"]
-// remote = origin
-// merge = refs/heads/devel
-//
-// # Proxy settings
-// [core]
-// gitProxy="ssh" for "kernel.org"
-// gitProxy=default-proxy ; for the rest
-//
-// [include]
-// path = /path/to/foo.inc ; include by absolute path
-// path = foo ; expand "foo" relative to the current file
-// path = ~/foo ; expand "foo" in your `$HOME` directory
-//
-package config
diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/format/config/encoder.go b/vendor/github.com/go-git/go-git/v5/plumbing/format/config/encoder.go
deleted file mode 100644
index de069aed5e7..00000000000
--- a/vendor/github.com/go-git/go-git/v5/plumbing/format/config/encoder.go
+++ /dev/null
@@ -1,82 +0,0 @@
-package config
-
-import (
- "fmt"
- "io"
- "strings"
-)
-
-// An Encoder writes config files to an output stream.
-type Encoder struct {
- w io.Writer
-}
-
-var (
- subsectionReplacer = strings.NewReplacer(`"`, `\"`, `\`, `\\`)
- valueReplacer = strings.NewReplacer(`"`, `\"`, `\`, `\\`, "\n", `\n`, "\t", `\t`, "\b", `\b`)
-)
-// NewEncoder returns a new encoder that writes to w.
-func NewEncoder(w io.Writer) *Encoder {
- return &Encoder{w}
-}
-
-// Encode writes the config in git config format to the stream of the encoder.
-func (e *Encoder) Encode(cfg *Config) error {
- for _, s := range cfg.Sections {
- if err := e.encodeSection(s); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (e *Encoder) encodeSection(s *Section) error {
- if len(s.Options) > 0 {
- if err := e.printf("[%s]\n", s.Name); err != nil {
- return err
- }
-
- if err := e.encodeOptions(s.Options); err != nil {
- return err
- }
- }
-
- for _, ss := range s.Subsections {
- if err := e.encodeSubsection(s.Name, ss); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (e *Encoder) encodeSubsection(sectionName string, s *Subsection) error {
- if err := e.printf("[%s \"%s\"]\n", sectionName, subsectionReplacer.Replace(s.Name)); err != nil {
- return err
- }
-
- return e.encodeOptions(s.Options)
-}
-
-func (e *Encoder) encodeOptions(opts Options) error {
- for _, o := range opts {
- var value string
- if strings.ContainsAny(o.Value, "#;\"\t\n\\") || strings.HasPrefix(o.Value, " ") || strings.HasSuffix(o.Value, " ") {
- value = `"`+valueReplacer.Replace(o.Value)+`"`
- } else {
- value = o.Value
- }
-
- if err := e.printf("\t%s = %s\n", o.Key, value); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (e *Encoder) printf(msg string, args ...interface{}) error {
- _, err := fmt.Fprintf(e.w, msg, args...)
- return err
-}
diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/format/config/format.go b/vendor/github.com/go-git/go-git/v5/plumbing/format/config/format.go
deleted file mode 100644
index 4873ea9258c..00000000000
--- a/vendor/github.com/go-git/go-git/v5/plumbing/format/config/format.go
+++ /dev/null
@@ -1,53 +0,0 @@
-package config
-
-// RepositoryFormatVersion represents the repository format version,
-// as per defined at:
-//
-// https://git-scm.com/docs/repository-version
-type RepositoryFormatVersion string
-
-const (
- // Version_0 is the format defined by the initial version of git,
- // including but not limited to the format of the repository
- // directory, the repository configuration file, and the object
- // and ref storage.
- //
- // Specifying the complete behavior of git is beyond the scope
- // of this document.
- Version_0 = "0"
-
- // Version_1 is identical to version 0, with the following exceptions:
- //
- // 1. When reading the core.repositoryformatversion variable, a git
- // implementation which supports version 1 MUST also read any
- // configuration keys found in the extensions section of the
- // configuration file.
- //
- // 2. If a version-1 repository specifies any extensions.* keys that
- // the running git has not implemented, the operation MUST NOT proceed.
- // Similarly, if the value of any known key is not understood by the
- // implementation, the operation MUST NOT proceed.
- //
- // Note that if no extensions are specified in the config file, then
- // core.repositoryformatversion SHOULD be set to 0 (setting it to 1 provides
- // no benefit, and makes the repository incompatible with older
- // implementations of git).
- Version_1 = "1"
-
- // DefaultRepositoryFormatVersion holds the default repository format version.
- DefaultRepositoryFormatVersion = Version_0
-)
-
-// ObjectFormat defines the object format.
-type ObjectFormat string
-
-const (
- // SHA1 represents the object format used for SHA1.
- SHA1 ObjectFormat = "sha1"
-
- // SHA256 represents the object format used for SHA256.
- SHA256 ObjectFormat = "sha256"
-
- // DefaultObjectFormat holds the default object format.
- DefaultObjectFormat = SHA1
-)
diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/format/config/option.go b/vendor/github.com/go-git/go-git/v5/plumbing/format/config/option.go
deleted file mode 100644
index cad394810a1..00000000000
--- a/vendor/github.com/go-git/go-git/v5/plumbing/format/config/option.go
+++ /dev/null
@@ -1,127 +0,0 @@
-package config
-
-import (
- "fmt"
- "strings"
-)
-
-// Option defines a key/value entity in a config file.
-type Option struct {
- // Key preserving original caseness.
- // Use IsKey instead to compare key regardless of caseness.
- Key string
- // Original value as string, could be not normalized.
- Value string
-}
-
-type Options []*Option
-
-// IsKey returns true if the given key matches
-// this option's key in a case-insensitive comparison.
-func (o *Option) IsKey(key string) bool {
- return strings.EqualFold(o.Key, key)
-}
-
-func (opts Options) GoString() string {
- var strs []string
- for _, opt := range opts {
- strs = append(strs, fmt.Sprintf("%#v", opt))
- }
-
- return strings.Join(strs, ", ")
-}
-
-// Get gets the value for the given key if set,
-// otherwise it returns the empty string.
-//
-// Note that there is no difference
-//
-// This matches git behaviour since git v1.8.1-rc1,
-// if there are multiple definitions of a key, the
-// last one wins.
-//
-// See: http://article.gmane.org/gmane.linux.kernel/1407184
-//
-// In order to get all possible values for the same key,
-// use GetAll.
-func (opts Options) Get(key string) string {
- for i := len(opts) - 1; i >= 0; i-- {
- o := opts[i]
- if o.IsKey(key) {
- return o.Value
- }
- }
- return ""
-}
-
-// Has checks if an Option exist with the given key.
-func (opts Options) Has(key string) bool {
- for _, o := range opts {
- if o.IsKey(key) {
- return true
- }
- }
- return false
-}
-
-// GetAll returns all possible values for the same key.
-func (opts Options) GetAll(key string) []string {
- result := []string{}
- for _, o := range opts {
- if o.IsKey(key) {
- result = append(result, o.Value)
- }
- }
- return result
-}
-
-func (opts Options) withoutOption(key string) Options {
- result := Options{}
- for _, o := range opts {
- if !o.IsKey(key) {
- result = append(result, o)
- }
- }
- return result
-}
-
-func (opts Options) withAddedOption(key string, value string) Options {
- return append(opts, &Option{key, value})
-}
-
-func (opts Options) withSettedOption(key string, values ...string) Options {
- var result Options
- var added []string
- for _, o := range opts {
- if !o.IsKey(key) {
- result = append(result, o)
- continue
- }
-
- if contains(values, o.Value) {
- added = append(added, o.Value)
- result = append(result, o)
- continue
- }
- }
-
- for _, value := range values {
- if contains(added, value) {
- continue
- }
-
- result = result.withAddedOption(key, value)
- }
-
- return result
-}
-
-func contains(haystack []string, needle string) bool {
- for _, s := range haystack {
- if s == needle {
- return true
- }
- }
-
- return false
-}
diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/format/config/section.go b/vendor/github.com/go-git/go-git/v5/plumbing/format/config/section.go
deleted file mode 100644
index 4625ac5837e..00000000000
--- a/vendor/github.com/go-git/go-git/v5/plumbing/format/config/section.go
+++ /dev/null
@@ -1,181 +0,0 @@
-package config
-
-import (
- "fmt"
- "strings"
-)
-
-// Section is the representation of a section inside git configuration files.
-// Each Section contains Options that are used by both the Git plumbing
-// and the porcelains.
-// Sections can be further divided into subsections. To begin a subsection
-// put its name in double quotes, separated by space from the section name,
-// in the section header, like in the example below:
-//
-// [section "subsection"]
-//
-// All the other lines (and the remainder of the line after the section header)
-// are recognized as option variables, in the form "name = value" (or just name,
-// which is a short-hand to say that the variable is the boolean "true").
-// The variable names are case-insensitive, allow only alphanumeric characters
-// and -, and must start with an alphabetic character:
-//
-// [section "subsection1"]
-// option1 = value1
-// option2
-// [section "subsection2"]
-// option3 = value2
-//
-type Section struct {
- Name string
- Options Options
- Subsections Subsections
-}
-
-type Subsection struct {
- Name string
- Options Options
-}
-
-type Sections []*Section
-
-func (s Sections) GoString() string {
- var strs []string
- for _, ss := range s {
- strs = append(strs, fmt.Sprintf("%#v", ss))
- }
-
- return strings.Join(strs, ", ")
-}
-
-type Subsections []*Subsection
-
-func (s Subsections) GoString() string {
- var strs []string
- for _, ss := range s {
- strs = append(strs, fmt.Sprintf("%#v", ss))
- }
-
- return strings.Join(strs, ", ")
-}
-
-// IsName checks if the name provided is equals to the Section name, case insensitive.
-func (s *Section) IsName(name string) bool {
- return strings.EqualFold(s.Name, name)
-}
-
-// Subsection returns a Subsection from the specified Section. If the
-// Subsection does not exists, new one is created and added to Section.
-func (s *Section) Subsection(name string) *Subsection {
- for i := len(s.Subsections) - 1; i >= 0; i-- {
- ss := s.Subsections[i]
- if ss.IsName(name) {
- return ss
- }
- }
-
- ss := &Subsection{Name: name}
- s.Subsections = append(s.Subsections, ss)
- return ss
-}
-
-// HasSubsection checks if the Section has a Subsection with the specified name.
-func (s *Section) HasSubsection(name string) bool {
- for _, ss := range s.Subsections {
- if ss.IsName(name) {
- return true
- }
- }
-
- return false
-}
-
-// RemoveSubsection removes a subsection from a Section.
-func (s *Section) RemoveSubsection(name string) *Section {
- result := Subsections{}
- for _, s := range s.Subsections {
- if !s.IsName(name) {
- result = append(result, s)
- }
- }
-
- s.Subsections = result
- return s
-}
-
-// Option returns the value for the specified key. Empty string is returned if
-// key does not exists.
-func (s *Section) Option(key string) string {
- return s.Options.Get(key)
-}
-
-// OptionAll returns all possible values for an option with the specified key.
-// If the option does not exists, an empty slice will be returned.
-func (s *Section) OptionAll(key string) []string {
- return s.Options.GetAll(key)
-}
-
-// HasOption checks if the Section has an Option with the given key.
-func (s *Section) HasOption(key string) bool {
- return s.Options.Has(key)
-}
-
-// AddOption adds a new Option to the Section. The updated Section is returned.
-func (s *Section) AddOption(key string, value string) *Section {
- s.Options = s.Options.withAddedOption(key, value)
- return s
-}
-
-// SetOption adds a new Option to the Section. If the option already exists, is replaced.
-// The updated Section is returned.
-func (s *Section) SetOption(key string, value string) *Section {
- s.Options = s.Options.withSettedOption(key, value)
- return s
-}
-
-// Remove an option with the specified key. The updated Section is returned.
-func (s *Section) RemoveOption(key string) *Section {
- s.Options = s.Options.withoutOption(key)
- return s
-}
-
-// IsName checks if the name of the subsection is exactly the specified name.
-func (s *Subsection) IsName(name string) bool {
- return s.Name == name
-}
-
-// Option returns an option with the specified key. If the option does not exists,
-// empty spring will be returned.
-func (s *Subsection) Option(key string) string {
- return s.Options.Get(key)
-}
-
-// OptionAll returns all possible values for an option with the specified key.
-// If the option does not exists, an empty slice will be returned.
-func (s *Subsection) OptionAll(key string) []string {
- return s.Options.GetAll(key)
-}
-
-// HasOption checks if the Subsection has an Option with the given key.
-func (s *Subsection) HasOption(key string) bool {
- return s.Options.Has(key)
-}
-
-// AddOption adds a new Option to the Subsection. The updated Subsection is returned.
-func (s *Subsection) AddOption(key string, value string) *Subsection {
- s.Options = s.Options.withAddedOption(key, value)
- return s
-}
-
-// SetOption adds a new Option to the Subsection. If the option already exists, is replaced.
-// The updated Subsection is returned.
-func (s *Subsection) SetOption(key string, value ...string) *Subsection {
- s.Options = s.Options.withSettedOption(key, value...)
- return s
-}
-
-// RemoveOption removes the option with the specified key. The updated Subsection is returned.
-func (s *Subsection) RemoveOption(key string) *Subsection {
- s.Options = s.Options.withoutOption(key)
- return s
-}
diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/format/diff/colorconfig.go b/vendor/github.com/go-git/go-git/v5/plumbing/format/diff/colorconfig.go
deleted file mode 100644
index 6fd4158462d..00000000000
--- a/vendor/github.com/go-git/go-git/v5/plumbing/format/diff/colorconfig.go
+++ /dev/null
@@ -1,97 +0,0 @@
-package diff
-
-import "github.com/go-git/go-git/v5/plumbing/color"
-
-// A ColorKey is a key into a ColorConfig map and also equal to the key in the
-// diff.color subsection of the config. See
-// https://github.com/git/git/blob/v2.26.2/diff.c#L83-L106.
-type ColorKey string
-
-// ColorKeys.
-const (
- Context ColorKey = "context"
- Meta ColorKey = "meta"
- Frag ColorKey = "frag"
- Old ColorKey = "old"
- New ColorKey = "new"
- Commit ColorKey = "commit"
- Whitespace ColorKey = "whitespace"
- Func ColorKey = "func"
- OldMoved ColorKey = "oldMoved"
- OldMovedAlternative ColorKey = "oldMovedAlternative"
- OldMovedDimmed ColorKey = "oldMovedDimmed"
- OldMovedAlternativeDimmed ColorKey = "oldMovedAlternativeDimmed"
- NewMoved ColorKey = "newMoved"
- NewMovedAlternative ColorKey = "newMovedAlternative"
- NewMovedDimmed ColorKey = "newMovedDimmed"
- NewMovedAlternativeDimmed ColorKey = "newMovedAlternativeDimmed"
- ContextDimmed ColorKey = "contextDimmed"
- OldDimmed ColorKey = "oldDimmed"
- NewDimmed ColorKey = "newDimmed"
- ContextBold ColorKey = "contextBold"
- OldBold ColorKey = "oldBold"
- NewBold ColorKey = "newBold"
-)
-
-// A ColorConfig is a color configuration. A nil or empty ColorConfig
-// corresponds to no color.
-type ColorConfig map[ColorKey]string
-
-// A ColorConfigOption sets an option on a ColorConfig.
-type ColorConfigOption func(ColorConfig)
-
-// WithColor sets the color for key.
-func WithColor(key ColorKey, color string) ColorConfigOption {
- return func(cc ColorConfig) {
- cc[key] = color
- }
-}
-
-// defaultColorConfig is the default color configuration. See
-// https://github.com/git/git/blob/v2.26.2/diff.c#L57-L81.
-var defaultColorConfig = ColorConfig{
- Context: color.Normal,
- Meta: color.Bold,
- Frag: color.Cyan,
- Old: color.Red,
- New: color.Green,
- Commit: color.Yellow,
- Whitespace: color.BgRed,
- Func: color.Normal,
- OldMoved: color.BoldMagenta,
- OldMovedAlternative: color.BoldBlue,
- OldMovedDimmed: color.Faint,
- OldMovedAlternativeDimmed: color.FaintItalic,
- NewMoved: color.BoldCyan,
- NewMovedAlternative: color.BoldYellow,
- NewMovedDimmed: color.Faint,
- NewMovedAlternativeDimmed: color.FaintItalic,
- ContextDimmed: color.Faint,
- OldDimmed: color.FaintRed,
- NewDimmed: color.FaintGreen,
- ContextBold: color.Bold,
- OldBold: color.BoldRed,
- NewBold: color.BoldGreen,
-}
-
-// NewColorConfig returns a new ColorConfig.
-func NewColorConfig(options ...ColorConfigOption) ColorConfig {
- cc := make(ColorConfig)
- for key, value := range defaultColorConfig {
- cc[key] = value
- }
- for _, option := range options {
- option(cc)
- }
- return cc
-}
-
-// Reset returns the ANSI escape sequence to reset the color with key set from
-// cc. If no color was set then no reset is needed so it returns the empty
-// string.
-func (cc ColorConfig) Reset(key ColorKey) string {
- if cc[key] == "" {
- return ""
- }
- return color.Reset
-}
diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/format/diff/patch.go b/vendor/github.com/go-git/go-git/v5/plumbing/format/diff/patch.go
deleted file mode 100644
index c7678b01a4d..00000000000
--- a/vendor/github.com/go-git/go-git/v5/plumbing/format/diff/patch.go
+++ /dev/null
@@ -1,58 +0,0 @@
-package diff
-
-import (
- "github.com/go-git/go-git/v5/plumbing"
- "github.com/go-git/go-git/v5/plumbing/filemode"
-)
-
-// Operation defines the operation of a diff item.
-type Operation int
-
-const (
- // Equal item represents an equals diff.
- Equal Operation = iota
- // Add item represents an insert diff.
- Add
- // Delete item represents a delete diff.
- Delete
-)
-
-// Patch represents a collection of steps to transform several files.
-type Patch interface {
- // FilePatches returns a slice of patches per file.
- FilePatches() []FilePatch
- // Message returns an optional message that can be at the top of the
- // Patch representation.
- Message() string
-}
-
-// FilePatch represents the necessary steps to transform one file into another.
-type FilePatch interface {
- // IsBinary returns true if this patch is representing a binary file.
- IsBinary() bool
- // Files returns the from and to Files, with all the necessary metadata
- // about them. If the patch creates a new file, "from" will be nil.
- // If the patch deletes a file, "to" will be nil.
- Files() (from, to File)
- // Chunks returns a slice of ordered changes to transform "from" File into
- // "to" File. If the file is a binary one, Chunks will be empty.
- Chunks() []Chunk
-}
-
-// File contains all the file metadata necessary to print some patch formats.
-type File interface {
- // Hash returns the File Hash.
- Hash() plumbing.Hash
- // Mode returns the FileMode.
- Mode() filemode.FileMode
- // Path returns the complete Path to the file, including the filename.
- Path() string
-}
-
-// Chunk represents a portion of a file transformation into another.
-type Chunk interface {
- // Content contains the portion of the file.
- Content() string
- // Type contains the Operation to do with this Chunk.
- Type() Operation
-}
diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/format/diff/unified_encoder.go b/vendor/github.com/go-git/go-git/v5/plumbing/format/diff/unified_encoder.go
deleted file mode 100644
index fa605b1985e..00000000000
--- a/vendor/github.com/go-git/go-git/v5/plumbing/format/diff/unified_encoder.go
+++ /dev/null
@@ -1,395 +0,0 @@
-package diff
-
-import (
- "fmt"
- "io"
- "regexp"
- "strconv"
- "strings"
-
- "github.com/go-git/go-git/v5/plumbing"
-)
-
-// DefaultContextLines is the default number of context lines.
-const DefaultContextLines = 3
-
-var (
- splitLinesRegexp = regexp.MustCompile(`[^\n]*(\n|$)`)
-
- operationChar = map[Operation]byte{
- Add: '+',
- Delete: '-',
- Equal: ' ',
- }
-
- operationColorKey = map[Operation]ColorKey{
- Add: New,
- Delete: Old,
- Equal: Context,
- }
-)
-
-// UnifiedEncoder encodes an unified diff into the provided Writer. It does not
-// support similarity index for renames or sorting hash representations.
-type UnifiedEncoder struct {
- io.Writer
-
- // contextLines is the count of unchanged lines that will appear surrounding
- // a change.
- contextLines int
-
- // srcPrefix and dstPrefix are prepended to file paths when encoding a diff.
- srcPrefix string
- dstPrefix string
-
- // colorConfig is the color configuration. The default is no color.
- color ColorConfig
-}
-
-// NewUnifiedEncoder returns a new UnifiedEncoder that writes to w.
-func NewUnifiedEncoder(w io.Writer, contextLines int) *UnifiedEncoder {
- return &UnifiedEncoder{
- Writer: w,
- srcPrefix: "a/",
- dstPrefix: "b/",
- contextLines: contextLines,
- }
-}
-
-// SetColor sets e's color configuration and returns e.
-func (e *UnifiedEncoder) SetColor(colorConfig ColorConfig) *UnifiedEncoder {
- e.color = colorConfig
- return e
-}
-
-// SetSrcPrefix sets e's srcPrefix and returns e.
-func (e *UnifiedEncoder) SetSrcPrefix(prefix string) *UnifiedEncoder {
- e.srcPrefix = prefix
- return e
-}
-
-// SetDstPrefix sets e's dstPrefix and returns e.
-func (e *UnifiedEncoder) SetDstPrefix(prefix string) *UnifiedEncoder {
- e.dstPrefix = prefix
- return e
-}
-
-// Encode encodes patch.
-func (e *UnifiedEncoder) Encode(patch Patch) error {
- sb := &strings.Builder{}
-
- if message := patch.Message(); message != "" {
- sb.WriteString(message)
- if !strings.HasSuffix(message, "\n") {
- sb.WriteByte('\n')
- }
- }
-
- for _, filePatch := range patch.FilePatches() {
- e.writeFilePatchHeader(sb, filePatch)
- g := newHunksGenerator(filePatch.Chunks(), e.contextLines)
- for _, hunk := range g.Generate() {
- hunk.writeTo(sb, e.color)
- }
- }
-
- _, err := e.Write([]byte(sb.String()))
- return err
-}
-
-func (e *UnifiedEncoder) writeFilePatchHeader(sb *strings.Builder, filePatch FilePatch) {
- from, to := filePatch.Files()
- if from == nil && to == nil {
- return
- }
- isBinary := filePatch.IsBinary()
-
- var lines []string
- switch {
- case from != nil && to != nil:
- hashEquals := from.Hash() == to.Hash()
- lines = append(lines,
- fmt.Sprintf("diff --git %s%s %s%s",
- e.srcPrefix, from.Path(), e.dstPrefix, to.Path()),
- )
- if from.Mode() != to.Mode() {
- lines = append(lines,
- fmt.Sprintf("old mode %o", from.Mode()),
- fmt.Sprintf("new mode %o", to.Mode()),
- )
- }
- if from.Path() != to.Path() {
- lines = append(lines,
- fmt.Sprintf("rename from %s", from.Path()),
- fmt.Sprintf("rename to %s", to.Path()),
- )
- }
- if from.Mode() != to.Mode() && !hashEquals {
- lines = append(lines,
- fmt.Sprintf("index %s..%s", from.Hash(), to.Hash()),
- )
- } else if !hashEquals {
- lines = append(lines,
- fmt.Sprintf("index %s..%s %o", from.Hash(), to.Hash(), from.Mode()),
- )
- }
- if !hashEquals {
- lines = e.appendPathLines(lines, e.srcPrefix+from.Path(), e.dstPrefix+to.Path(), isBinary)
- }
- case from == nil:
- lines = append(lines,
- fmt.Sprintf("diff --git %s %s", e.srcPrefix+to.Path(), e.dstPrefix+to.Path()),
- fmt.Sprintf("new file mode %o", to.Mode()),
- fmt.Sprintf("index %s..%s", plumbing.ZeroHash, to.Hash()),
- )
- lines = e.appendPathLines(lines, "/dev/null", e.dstPrefix+to.Path(), isBinary)
- case to == nil:
- lines = append(lines,
- fmt.Sprintf("diff --git %s %s", e.srcPrefix+from.Path(), e.dstPrefix+from.Path()),
- fmt.Sprintf("deleted file mode %o", from.Mode()),
- fmt.Sprintf("index %s..%s", from.Hash(), plumbing.ZeroHash),
- )
- lines = e.appendPathLines(lines, e.srcPrefix+from.Path(), "/dev/null", isBinary)
- }
-
- sb.WriteString(e.color[Meta])
- sb.WriteString(lines[0])
- for _, line := range lines[1:] {
- sb.WriteByte('\n')
- sb.WriteString(line)
- }
- sb.WriteString(e.color.Reset(Meta))
- sb.WriteByte('\n')
-}
-
-func (e *UnifiedEncoder) appendPathLines(lines []string, fromPath, toPath string, isBinary bool) []string {
- if isBinary {
- return append(lines,
- fmt.Sprintf("Binary files %s and %s differ", fromPath, toPath),
- )
- }
- return append(lines,
- fmt.Sprintf("--- %s", fromPath),
- fmt.Sprintf("+++ %s", toPath),
- )
-}
-
-type hunksGenerator struct {
- fromLine, toLine int
- ctxLines int
- chunks []Chunk
- current *hunk
- hunks []*hunk
- beforeContext, afterContext []string
-}
-
-func newHunksGenerator(chunks []Chunk, ctxLines int) *hunksGenerator {
- return &hunksGenerator{
- chunks: chunks,
- ctxLines: ctxLines,
- }
-}
-
-func (g *hunksGenerator) Generate() []*hunk {
- for i, chunk := range g.chunks {
- lines := splitLines(chunk.Content())
- nLines := len(lines)
-
- switch chunk.Type() {
- case Equal:
- g.fromLine += nLines
- g.toLine += nLines
- g.processEqualsLines(lines, i)
- case Delete:
- if nLines != 0 {
- g.fromLine++
- }
-
- g.processHunk(i, chunk.Type())
- g.fromLine += nLines - 1
- g.current.AddOp(chunk.Type(), lines...)
- case Add:
- if nLines != 0 {
- g.toLine++
- }
- g.processHunk(i, chunk.Type())
- g.toLine += nLines - 1
- g.current.AddOp(chunk.Type(), lines...)
- }
-
- if i == len(g.chunks)-1 && g.current != nil {
- g.hunks = append(g.hunks, g.current)
- }
- }
-
- return g.hunks
-}
-
-func (g *hunksGenerator) processHunk(i int, op Operation) {
- if g.current != nil {
- return
- }
-
- var ctxPrefix string
- linesBefore := len(g.beforeContext)
- if linesBefore > g.ctxLines {
- ctxPrefix = g.beforeContext[linesBefore-g.ctxLines-1]
- g.beforeContext = g.beforeContext[linesBefore-g.ctxLines:]
- linesBefore = g.ctxLines
- }
-
- g.current = &hunk{ctxPrefix: strings.TrimSuffix(ctxPrefix, "\n")}
- g.current.AddOp(Equal, g.beforeContext...)
-
- switch op {
- case Delete:
- g.current.fromLine, g.current.toLine =
- g.addLineNumbers(g.fromLine, g.toLine, linesBefore, i, Add)
- case Add:
- g.current.toLine, g.current.fromLine =
- g.addLineNumbers(g.toLine, g.fromLine, linesBefore, i, Delete)
- }
-
- g.beforeContext = nil
-}
-
-// addLineNumbers obtains the line numbers in a new chunk.
-func (g *hunksGenerator) addLineNumbers(la, lb int, linesBefore int, i int, op Operation) (cla, clb int) {
- cla = la - linesBefore
- // we need to search for a reference for the next diff
- switch {
- case linesBefore != 0 && g.ctxLines != 0:
- if lb > g.ctxLines {
- clb = lb - g.ctxLines + 1
- } else {
- clb = 1
- }
- case g.ctxLines == 0:
- clb = lb
- case i != len(g.chunks)-1:
- next := g.chunks[i+1]
- if next.Type() == op || next.Type() == Equal {
- // this diff will be into this chunk
- clb = lb + 1
- }
- }
-
- return
-}
-
-func (g *hunksGenerator) processEqualsLines(ls []string, i int) {
- if g.current == nil {
- g.beforeContext = append(g.beforeContext, ls...)
- return
- }
-
- g.afterContext = append(g.afterContext, ls...)
- if len(g.afterContext) <= g.ctxLines*2 && i != len(g.chunks)-1 {
- g.current.AddOp(Equal, g.afterContext...)
- g.afterContext = nil
- } else {
- ctxLines := g.ctxLines
- if ctxLines > len(g.afterContext) {
- ctxLines = len(g.afterContext)
- }
- g.current.AddOp(Equal, g.afterContext[:ctxLines]...)
- g.hunks = append(g.hunks, g.current)
-
- g.current = nil
- g.beforeContext = g.afterContext[ctxLines:]
- g.afterContext = nil
- }
-}
-
-func splitLines(s string) []string {
- out := splitLinesRegexp.FindAllString(s, -1)
- if out[len(out)-1] == "" {
- out = out[:len(out)-1]
- }
- return out
-}
-
-type hunk struct {
- fromLine int
- toLine int
-
- fromCount int
- toCount int
-
- ctxPrefix string
- ops []*op
-}
-
-func (h *hunk) writeTo(sb *strings.Builder, color ColorConfig) {
- sb.WriteString(color[Frag])
- sb.WriteString("@@ -")
-
- if h.fromCount == 1 {
- sb.WriteString(strconv.Itoa(h.fromLine))
- } else {
- sb.WriteString(strconv.Itoa(h.fromLine))
- sb.WriteByte(',')
- sb.WriteString(strconv.Itoa(h.fromCount))
- }
-
- sb.WriteString(" +")
-
- if h.toCount == 1 {
- sb.WriteString(strconv.Itoa(h.toLine))
- } else {
- sb.WriteString(strconv.Itoa(h.toLine))
- sb.WriteByte(',')
- sb.WriteString(strconv.Itoa(h.toCount))
- }
-
- sb.WriteString(" @@")
- sb.WriteString(color.Reset(Frag))
-
- if h.ctxPrefix != "" {
- sb.WriteByte(' ')
- sb.WriteString(color[Func])
- sb.WriteString(h.ctxPrefix)
- sb.WriteString(color.Reset(Func))
- }
-
- sb.WriteByte('\n')
-
- for _, op := range h.ops {
- op.writeTo(sb, color)
- }
-}
-
-func (h *hunk) AddOp(t Operation, ss ...string) {
- n := len(ss)
- switch t {
- case Add:
- h.toCount += n
- case Delete:
- h.fromCount += n
- case Equal:
- h.toCount += n
- h.fromCount += n
- }
-
- for _, s := range ss {
- h.ops = append(h.ops, &op{s, t})
- }
-}
-
-type op struct {
- text string
- t Operation
-}
-
-func (o *op) writeTo(sb *strings.Builder, color ColorConfig) {
- colorKey := operationColorKey[o.t]
- sb.WriteString(color[colorKey])
- sb.WriteByte(operationChar[o.t])
- if strings.HasSuffix(o.text, "\n") {
- sb.WriteString(strings.TrimSuffix(o.text, "\n"))
- } else {
- sb.WriteString(o.text + "\n\\ No newline at end of file")
- }
- sb.WriteString(color.Reset(colorKey))
- sb.WriteByte('\n')
-}
diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/format/gitignore/dir.go b/vendor/github.com/go-git/go-git/v5/plumbing/format/gitignore/dir.go
deleted file mode 100644
index aca5d0dbd23..00000000000
--- a/vendor/github.com/go-git/go-git/v5/plumbing/format/gitignore/dir.go
+++ /dev/null
@@ -1,144 +0,0 @@
-package gitignore
-
-import (
- "bufio"
- "bytes"
- "io"
- "os"
- "strings"
-
- "github.com/go-git/go-billy/v5"
- "github.com/go-git/go-git/v5/internal/path_util"
- "github.com/go-git/go-git/v5/plumbing/format/config"
- gioutil "github.com/go-git/go-git/v5/utils/ioutil"
-)
-
-const (
- commentPrefix = "#"
- coreSection = "core"
- excludesfile = "excludesfile"
- gitDir = ".git"
- gitignoreFile = ".gitignore"
- gitconfigFile = ".gitconfig"
- systemFile = "/etc/gitconfig"
- infoExcludeFile = gitDir + "/info/exclude"
-)
-
-// readIgnoreFile reads a specific git ignore file.
-func readIgnoreFile(fs billy.Filesystem, path []string, ignoreFile string) (ps []Pattern, err error) {
-
- ignoreFile, _ = path_util.ReplaceTildeWithHome(ignoreFile)
-
- f, err := fs.Open(fs.Join(append(path, ignoreFile)...))
- if err == nil {
- defer f.Close()
-
- scanner := bufio.NewScanner(f)
- for scanner.Scan() {
- s := scanner.Text()
- if !strings.HasPrefix(s, commentPrefix) && len(strings.TrimSpace(s)) > 0 {
- ps = append(ps, ParsePattern(s, path))
- }
- }
- } else if !os.IsNotExist(err) {
- return nil, err
- }
-
- return
-}
-
-// ReadPatterns reads the .git/info/exclude and then the gitignore patterns
-// recursively traversing through the directory structure. The result is in
-// the ascending order of priority (last higher).
-func ReadPatterns(fs billy.Filesystem, path []string) (ps []Pattern, err error) {
- ps, _ = readIgnoreFile(fs, path, infoExcludeFile)
-
- subps, _ := readIgnoreFile(fs, path, gitignoreFile)
- ps = append(ps, subps...)
-
- var fis []os.FileInfo
- fis, err = fs.ReadDir(fs.Join(path...))
- if err != nil {
- return
- }
-
- for _, fi := range fis {
- if fi.IsDir() && fi.Name() != gitDir {
- var subps []Pattern
- subps, err = ReadPatterns(fs, append(path, fi.Name()))
- if err != nil {
- return
- }
-
- if len(subps) > 0 {
- ps = append(ps, subps...)
- }
- }
- }
-
- return
-}
-
-func loadPatterns(fs billy.Filesystem, path string) (ps []Pattern, err error) {
- f, err := fs.Open(path)
- if err != nil {
- if os.IsNotExist(err) {
- return nil, nil
- }
- return nil, err
- }
-
- defer gioutil.CheckClose(f, &err)
-
- b, err := io.ReadAll(f)
- if err != nil {
- return
- }
-
- d := config.NewDecoder(bytes.NewBuffer(b))
-
- raw := config.New()
- if err = d.Decode(raw); err != nil {
- return
- }
-
- s := raw.Section(coreSection)
- efo := s.Options.Get(excludesfile)
- if efo == "" {
- return nil, nil
- }
-
- ps, err = readIgnoreFile(fs, nil, efo)
- if os.IsNotExist(err) {
- return nil, nil
- }
-
- return
-}
-
-// LoadGlobalPatterns loads gitignore patterns from the gitignore file
-// declared in a user's ~/.gitconfig file. If the ~/.gitconfig file does not
-// exist the function will return nil. If the core.excludesfile property
-// is not declared, the function will return nil. If the file pointed to by
-// the core.excludesfile property does not exist, the function will return nil.
-//
-// The function assumes fs is rooted at the root filesystem.
-func LoadGlobalPatterns(fs billy.Filesystem) (ps []Pattern, err error) {
- home, err := os.UserHomeDir()
- if err != nil {
- return
- }
-
- return loadPatterns(fs, fs.Join(home, gitconfigFile))
-}
-
-// LoadSystemPatterns loads gitignore patterns from the gitignore file
-// declared in a system's /etc/gitconfig file. If the /etc/gitconfig file does
-// not exist the function will return nil. If the core.excludesfile property
-// is not declared, the function will return nil. If the file pointed to by
-// the core.excludesfile property does not exist, the function will return nil.
-//
-// The function assumes fs is rooted at the root filesystem.
-func LoadSystemPatterns(fs billy.Filesystem) (ps []Pattern, err error) {
- return loadPatterns(fs, systemFile)
-}
diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/format/gitignore/doc.go b/vendor/github.com/go-git/go-git/v5/plumbing/format/gitignore/doc.go
deleted file mode 100644
index eecd4baccb2..00000000000
--- a/vendor/github.com/go-git/go-git/v5/plumbing/format/gitignore/doc.go
+++ /dev/null
@@ -1,70 +0,0 @@
-// Package gitignore implements matching file system paths to gitignore patterns that
-// can be automatically read from a git repository tree in the order of definition
-// priorities. It support all pattern formats as specified in the original gitignore
-// documentation, copied below:
-//
-// Pattern format
-// ==============
-//
-// - A blank line matches no files, so it can serve as a separator for readability.
-//
-// - A line starting with # serves as a comment. Put a backslash ("\") in front of
-// the first hash for patterns that begin with a hash.
-//
-// - Trailing spaces are ignored unless they are quoted with backslash ("\").
-//
-// - An optional prefix "!" which negates the pattern; any matching file excluded
-// by a previous pattern will become included again. It is not possible to
-// re-include a file if a parent directory of that file is excluded.
-// Git doesn’t list excluded directories for performance reasons, so
-// any patterns on contained files have no effect, no matter where they are
-// defined. Put a backslash ("\") in front of the first "!" for patterns
-// that begin with a literal "!", for example, "\!important!.txt".
-//
-// - If the pattern ends with a slash, it is removed for the purpose of the
-// following description, but it would only find a match with a directory.
-// In other words, foo/ will match a directory foo and paths underneath it,
-// but will not match a regular file or a symbolic link foo (this is consistent
-// with the way how pathspec works in general in Git).
-//
-// - If the pattern does not contain a slash /, Git treats it as a shell glob
-// pattern and checks for a match against the pathname relative to the location
-// of the .gitignore file (relative to the toplevel of the work tree if not
-// from a .gitignore file).
-//
-// - Otherwise, Git treats the pattern as a shell glob suitable for consumption
-// by fnmatch(3) with the FNM_PATHNAME flag: wildcards in the pattern will
-// not match a / in the pathname. For example, "Documentation/*.html" matches
-// "Documentation/git.html" but not "Documentation/ppc/ppc.html" or
-// "tools/perf/Documentation/perf.html".
-//
-// - A leading slash matches the beginning of the pathname. For example,
-// "/*.c" matches "cat-file.c" but not "mozilla-sha1/sha1.c".
-//
-// Two consecutive asterisks ("**") in patterns matched against full pathname
-// may have special meaning:
-//
-// - A leading "**" followed by a slash means match in all directories.
-// For example, "**/foo" matches file or directory "foo" anywhere, the same as
-// pattern "foo". "**/foo/bar" matches file or directory "bar"
-// anywhere that is directly under directory "foo".
-//
-// - A trailing "/**" matches everything inside. For example, "abc/**" matches
-// all files inside directory "abc", relative to the location of the
-// .gitignore file, with infinite depth.
-//
-// - A slash followed by two consecutive asterisks then a slash matches
-// zero or more directories. For example, "a/**/b" matches "a/b", "a/x/b",
-// "a/x/y/b" and so on.
-//
-// - Other consecutive asterisks are considered invalid.
-//
-// Copyright and license
-// =====================
-//
-// Copyright (c) Oleg Sklyar, Silvertern and source{d}
-//
-// The package code was donated to source{d} to include, modify and develop
-// further as a part of the `go-git` project, release it on the license of
-// the whole project or delete it from the project.
-package gitignore
diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/format/gitignore/matcher.go b/vendor/github.com/go-git/go-git/v5/plumbing/format/gitignore/matcher.go
deleted file mode 100644
index bd1e9e2d4cf..00000000000
--- a/vendor/github.com/go-git/go-git/v5/plumbing/format/gitignore/matcher.go
+++ /dev/null
@@ -1,30 +0,0 @@
-package gitignore
-
-// Matcher defines a global multi-pattern matcher for gitignore patterns
-type Matcher interface {
- // Match matches patterns in the order of priorities. As soon as an inclusion or
- // exclusion is found, not further matching is performed.
- Match(path []string, isDir bool) bool
-}
-
-// NewMatcher constructs a new global matcher. Patterns must be given in the order of
-// increasing priority. That is most generic settings files first, then the content of
-// the repo .gitignore, then content of .gitignore down the path or the repo and then
-// the content command line arguments.
-func NewMatcher(ps []Pattern) Matcher {
- return &matcher{ps}
-}
-
-type matcher struct {
- patterns []Pattern
-}
-
-func (m *matcher) Match(path []string, isDir bool) bool {
- n := len(m.patterns)
- for i := n - 1; i >= 0; i-- {
- if match := m.patterns[i].Match(path, isDir); match > NoMatch {
- return match == Exclude
- }
- }
- return false
-}
diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/format/gitignore/pattern.go b/vendor/github.com/go-git/go-git/v5/plumbing/format/gitignore/pattern.go
deleted file mode 100644
index 450b3cdf72b..00000000000
--- a/vendor/github.com/go-git/go-git/v5/plumbing/format/gitignore/pattern.go
+++ /dev/null
@@ -1,155 +0,0 @@
-package gitignore
-
-import (
- "path/filepath"
- "strings"
-)
-
-// MatchResult defines outcomes of a match, no match, exclusion or inclusion.
-type MatchResult int
-
-const (
- // NoMatch defines the no match outcome of a match check
- NoMatch MatchResult = iota
- // Exclude defines an exclusion of a file as a result of a match check
- Exclude
- // Include defines an explicit inclusion of a file as a result of a match check
- Include
-)
-
-const (
- inclusionPrefix = "!"
- zeroToManyDirs = "**"
- patternDirSep = "/"
-)
-
-// Pattern defines a single gitignore pattern.
-type Pattern interface {
- // Match matches the given path to the pattern.
- Match(path []string, isDir bool) MatchResult
-}
-
-type pattern struct {
- domain []string
- pattern []string
- inclusion bool
- dirOnly bool
- isGlob bool
-}
-
-// ParsePattern parses a gitignore pattern string into the Pattern structure.
-func ParsePattern(p string, domain []string) Pattern {
- // storing domain, copy it to ensure it isn't changed externally
- domain = append([]string(nil), domain...)
- res := pattern{domain: domain}
-
- if strings.HasPrefix(p, inclusionPrefix) {
- res.inclusion = true
- p = p[1:]
- }
-
- if !strings.HasSuffix(p, "\\ ") {
- p = strings.TrimRight(p, " ")
- }
-
- if strings.HasSuffix(p, patternDirSep) {
- res.dirOnly = true
- p = p[:len(p)-1]
- }
-
- if strings.Contains(p, patternDirSep) {
- res.isGlob = true
- }
-
- res.pattern = strings.Split(p, patternDirSep)
- return &res
-}
-
-func (p *pattern) Match(path []string, isDir bool) MatchResult {
- if len(path) <= len(p.domain) {
- return NoMatch
- }
- for i, e := range p.domain {
- if path[i] != e {
- return NoMatch
- }
- }
-
- path = path[len(p.domain):]
- if p.isGlob && !p.globMatch(path, isDir) {
- return NoMatch
- } else if !p.isGlob && !p.simpleNameMatch(path, isDir) {
- return NoMatch
- }
-
- if p.inclusion {
- return Include
- } else {
- return Exclude
- }
-}
-
-func (p *pattern) simpleNameMatch(path []string, isDir bool) bool {
- for i, name := range path {
- if match, err := filepath.Match(p.pattern[0], name); err != nil {
- return false
- } else if !match {
- continue
- }
- if p.dirOnly && !isDir && i == len(path)-1 {
- return false
- }
- return true
- }
- return false
-}
-
-func (p *pattern) globMatch(path []string, isDir bool) bool {
- matched := false
- canTraverse := false
- for i, pattern := range p.pattern {
- if pattern == "" {
- canTraverse = false
- continue
- }
- if pattern == zeroToManyDirs {
- if i == len(p.pattern)-1 {
- break
- }
- canTraverse = true
- continue
- }
- if strings.Contains(pattern, zeroToManyDirs) {
- return false
- }
- if len(path) == 0 {
- return false
- }
- if canTraverse {
- canTraverse = false
- for len(path) > 0 {
- e := path[0]
- path = path[1:]
- if match, err := filepath.Match(pattern, e); err != nil {
- return false
- } else if match {
- matched = true
- break
- } else if len(path) == 0 {
- // if nothing left then fail
- matched = false
- }
- }
- } else {
- if match, err := filepath.Match(pattern, path[0]); err != nil || !match {
- return false
- }
- matched = true
- path = path[1:]
- }
- }
- if matched && p.dirOnly && !isDir && len(path) == 0 {
- matched = false
- }
- return matched
-}
diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/format/idxfile/decoder.go b/vendor/github.com/go-git/go-git/v5/plumbing/format/idxfile/decoder.go
deleted file mode 100644
index 9afdce30111..00000000000
--- a/vendor/github.com/go-git/go-git/v5/plumbing/format/idxfile/decoder.go
+++ /dev/null
@@ -1,178 +0,0 @@
-package idxfile
-
-import (
- "bufio"
- "bytes"
- "errors"
- "io"
-
- "github.com/go-git/go-git/v5/plumbing/hash"
- "github.com/go-git/go-git/v5/utils/binary"
-)
-
-var (
- // ErrUnsupportedVersion is returned by Decode when the idx file version
- // is not supported.
- ErrUnsupportedVersion = errors.New("unsupported version")
- // ErrMalformedIdxFile is returned by Decode when the idx file is corrupted.
- ErrMalformedIdxFile = errors.New("malformed IDX file")
-)
-
-const (
- fanout = 256
- objectIDLength = hash.Size
-)
-
-// Decoder reads and decodes idx files from an input stream.
-type Decoder struct {
- *bufio.Reader
-}
-
-// NewDecoder builds a new idx stream decoder, that reads from r.
-func NewDecoder(r io.Reader) *Decoder {
- return &Decoder{bufio.NewReader(r)}
-}
-
-// Decode reads from the stream and decode the content into the MemoryIndex struct.
-func (d *Decoder) Decode(idx *MemoryIndex) error {
- if err := validateHeader(d); err != nil {
- return err
- }
-
- flow := []func(*MemoryIndex, io.Reader) error{
- readVersion,
- readFanout,
- readObjectNames,
- readCRC32,
- readOffsets,
- readChecksums,
- }
-
- for _, f := range flow {
- if err := f(idx, d); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func validateHeader(r io.Reader) error {
- var h = make([]byte, 4)
- if _, err := io.ReadFull(r, h); err != nil {
- return err
- }
-
- if !bytes.Equal(h, idxHeader) {
- return ErrMalformedIdxFile
- }
-
- return nil
-}
-
-func readVersion(idx *MemoryIndex, r io.Reader) error {
- v, err := binary.ReadUint32(r)
- if err != nil {
- return err
- }
-
- if v > VersionSupported {
- return ErrUnsupportedVersion
- }
-
- idx.Version = v
- return nil
-}
-
-func readFanout(idx *MemoryIndex, r io.Reader) error {
- for k := 0; k < fanout; k++ {
- n, err := binary.ReadUint32(r)
- if err != nil {
- return err
- }
-
- idx.Fanout[k] = n
- idx.FanoutMapping[k] = noMapping
- }
-
- return nil
-}
-
-func readObjectNames(idx *MemoryIndex, r io.Reader) error {
- for k := 0; k < fanout; k++ {
- var buckets uint32
- if k == 0 {
- buckets = idx.Fanout[k]
- } else {
- buckets = idx.Fanout[k] - idx.Fanout[k-1]
- }
-
- if buckets == 0 {
- continue
- }
-
- idx.FanoutMapping[k] = len(idx.Names)
-
- nameLen := int(buckets * objectIDLength)
- bin := make([]byte, nameLen)
- if _, err := io.ReadFull(r, bin); err != nil {
- return err
- }
-
- idx.Names = append(idx.Names, bin)
- idx.Offset32 = append(idx.Offset32, make([]byte, buckets*4))
- idx.CRC32 = append(idx.CRC32, make([]byte, buckets*4))
- }
-
- return nil
-}
-
-func readCRC32(idx *MemoryIndex, r io.Reader) error {
- for k := 0; k < fanout; k++ {
- if pos := idx.FanoutMapping[k]; pos != noMapping {
- if _, err := io.ReadFull(r, idx.CRC32[pos]); err != nil {
- return err
- }
- }
- }
-
- return nil
-}
-
-func readOffsets(idx *MemoryIndex, r io.Reader) error {
- var o64cnt int
- for k := 0; k < fanout; k++ {
- if pos := idx.FanoutMapping[k]; pos != noMapping {
- if _, err := io.ReadFull(r, idx.Offset32[pos]); err != nil {
- return err
- }
-
- for p := 0; p < len(idx.Offset32[pos]); p += 4 {
- if idx.Offset32[pos][p]&(byte(1)<<7) > 0 {
- o64cnt++
- }
- }
- }
- }
-
- if o64cnt > 0 {
- idx.Offset64 = make([]byte, o64cnt*8)
- if _, err := io.ReadFull(r, idx.Offset64); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func readChecksums(idx *MemoryIndex, r io.Reader) error {
- if _, err := io.ReadFull(r, idx.PackfileChecksum[:]); err != nil {
- return err
- }
-
- if _, err := io.ReadFull(r, idx.IdxChecksum[:]); err != nil {
- return err
- }
-
- return nil
-}
diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/format/idxfile/doc.go b/vendor/github.com/go-git/go-git/v5/plumbing/format/idxfile/doc.go
deleted file mode 100644
index 1e628ab4a5e..00000000000
--- a/vendor/github.com/go-git/go-git/v5/plumbing/format/idxfile/doc.go
+++ /dev/null
@@ -1,128 +0,0 @@
-// Package idxfile implements encoding and decoding of packfile idx files.
-//
-// == Original (version 1) pack-*.idx files have the following format:
-//
-// - The header consists of 256 4-byte network byte order
-// integers. N-th entry of this table records the number of
-// objects in the corresponding pack, the first byte of whose
-// object name is less than or equal to N. This is called the
-// 'first-level fan-out' table.
-//
-// - The header is followed by sorted 24-byte entries, one entry
-// per object in the pack. Each entry is:
-//
-// 4-byte network byte order integer, recording where the
-// object is stored in the packfile as the offset from the
-// beginning.
-//
-// 20-byte object name.
-//
-// - The file is concluded with a trailer:
-//
-// A copy of the 20-byte SHA1 checksum at the end of
-// corresponding packfile.
-//
-// 20-byte SHA1-checksum of all of the above.
-//
-// Pack Idx file:
-//
-// -- +--------------------------------+
-// fanout | fanout[0] = 2 (for example) |-.
-// table +--------------------------------+ |
-// | fanout[1] | |
-// +--------------------------------+ |
-// | fanout[2] | |
-// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ |
-// | fanout[255] = total objects |---.
-// -- +--------------------------------+ | |
-// main | offset | | |
-// index | object name 00XXXXXXXXXXXXXXXX | | |
-// tab +--------------------------------+ | |
-// | offset | | |
-// | object name 00XXXXXXXXXXXXXXXX | | |
-// +--------------------------------+<+ |
-// .-| offset | |
-// | | object name 01XXXXXXXXXXXXXXXX | |
-// | +--------------------------------+ |
-// | | offset | |
-// | | object name 01XXXXXXXXXXXXXXXX | |
-// | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ |
-// | | offset | |
-// | | object name FFXXXXXXXXXXXXXXXX | |
-// --| +--------------------------------+<--+
-// trailer | | packfile checksum |
-// | +--------------------------------+
-// | | idxfile checksum |
-// | +--------------------------------+
-// .---------.
-// |
-// Pack file entry: <+
-//
-// packed object header:
-// 1-byte size extension bit (MSB)
-// type (next 3 bit)
-// size0 (lower 4-bit)
-// n-byte sizeN (as long as MSB is set, each 7-bit)
-// size0..sizeN form 4+7+7+..+7 bit integer, size0
-// is the least significant part, and sizeN is the
-// most significant part.
-// packed object data:
-// If it is not DELTA, then deflated bytes (the size above
-// is the size before compression).
-// If it is REF_DELTA, then
-// 20-byte base object name SHA1 (the size above is the
-// size of the delta data that follows).
-// delta data, deflated.
-// If it is OFS_DELTA, then
-// n-byte offset (see below) interpreted as a negative
-// offset from the type-byte of the header of the
-// ofs-delta entry (the size above is the size of
-// the delta data that follows).
-// delta data, deflated.
-//
-// offset encoding:
-// n bytes with MSB set in all but the last one.
-// The offset is then the number constructed by
-// concatenating the lower 7 bit of each byte, and
-// for n >= 2 adding 2^7 + 2^14 + ... + 2^(7*(n-1))
-// to the result.
-//
-// == Version 2 pack-*.idx files support packs larger than 4 GiB, and
-// have some other reorganizations. They have the format:
-//
-// - A 4-byte magic number '\377tOc' which is an unreasonable
-// fanout[0] value.
-//
-// - A 4-byte version number (= 2)
-//
-// - A 256-entry fan-out table just like v1.
-//
-// - A table of sorted 20-byte SHA1 object names. These are
-// packed together without offset values to reduce the cache
-// footprint of the binary search for a specific object name.
-//
-// - A table of 4-byte CRC32 values of the packed object data.
-// This is new in v2 so compressed data can be copied directly
-// from pack to pack during repacking without undetected
-// data corruption.
-//
-// - A table of 4-byte offset values (in network byte order).
-// These are usually 31-bit pack file offsets, but large
-// offsets are encoded as an index into the next table with
-// the msbit set.
-//
-// - A table of 8-byte offset entries (empty for pack files less
-// than 2 GiB). Pack files are organized with heavily used
-// objects toward the front, so most object references should
-// not need to refer to this table.
-//
-// - The same trailer as a v1 pack file:
-//
-// A copy of the 20-byte SHA1 checksum at the end of
-// corresponding packfile.
-//
-// 20-byte SHA1-checksum of all of the above.
-//
-// Source:
-// https://www.kernel.org/pub/software/scm/git/docs/v1.7.5/technical/pack-format.txt
-package idxfile
diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/format/idxfile/encoder.go b/vendor/github.com/go-git/go-git/v5/plumbing/format/idxfile/encoder.go
deleted file mode 100644
index 75147376b66..00000000000
--- a/vendor/github.com/go-git/go-git/v5/plumbing/format/idxfile/encoder.go
+++ /dev/null
@@ -1,141 +0,0 @@
-package idxfile
-
-import (
- "io"
-
- "github.com/go-git/go-git/v5/plumbing/hash"
- "github.com/go-git/go-git/v5/utils/binary"
-)
-
-// Encoder writes MemoryIndex structs to an output stream.
-type Encoder struct {
- io.Writer
- hash hash.Hash
-}
-
-// NewEncoder returns a new stream encoder that writes to w.
-func NewEncoder(w io.Writer) *Encoder {
- h := hash.New(hash.CryptoType)
- mw := io.MultiWriter(w, h)
- return &Encoder{mw, h}
-}
-
-// Encode encodes an MemoryIndex to the encoder writer.
-func (e *Encoder) Encode(idx *MemoryIndex) (int, error) {
- flow := []func(*MemoryIndex) (int, error){
- e.encodeHeader,
- e.encodeFanout,
- e.encodeHashes,
- e.encodeCRC32,
- e.encodeOffsets,
- e.encodeChecksums,
- }
-
- sz := 0
- for _, f := range flow {
- i, err := f(idx)
- sz += i
-
- if err != nil {
- return sz, err
- }
- }
-
- return sz, nil
-}
-
-func (e *Encoder) encodeHeader(idx *MemoryIndex) (int, error) {
- c, err := e.Write(idxHeader)
- if err != nil {
- return c, err
- }
-
- return c + 4, binary.WriteUint32(e, idx.Version)
-}
-
-func (e *Encoder) encodeFanout(idx *MemoryIndex) (int, error) {
- for _, c := range idx.Fanout {
- if err := binary.WriteUint32(e, c); err != nil {
- return 0, err
- }
- }
-
- return fanout * 4, nil
-}
-
-func (e *Encoder) encodeHashes(idx *MemoryIndex) (int, error) {
- var size int
- for k := 0; k < fanout; k++ {
- pos := idx.FanoutMapping[k]
- if pos == noMapping {
- continue
- }
-
- n, err := e.Write(idx.Names[pos])
- if err != nil {
- return size, err
- }
- size += n
- }
- return size, nil
-}
-
-func (e *Encoder) encodeCRC32(idx *MemoryIndex) (int, error) {
- var size int
- for k := 0; k < fanout; k++ {
- pos := idx.FanoutMapping[k]
- if pos == noMapping {
- continue
- }
-
- n, err := e.Write(idx.CRC32[pos])
- if err != nil {
- return size, err
- }
-
- size += n
- }
-
- return size, nil
-}
-
-func (e *Encoder) encodeOffsets(idx *MemoryIndex) (int, error) {
- var size int
- for k := 0; k < fanout; k++ {
- pos := idx.FanoutMapping[k]
- if pos == noMapping {
- continue
- }
-
- n, err := e.Write(idx.Offset32[pos])
- if err != nil {
- return size, err
- }
-
- size += n
- }
-
- if len(idx.Offset64) > 0 {
- n, err := e.Write(idx.Offset64)
- if err != nil {
- return size, err
- }
-
- size += n
- }
-
- return size, nil
-}
-
-func (e *Encoder) encodeChecksums(idx *MemoryIndex) (int, error) {
- if _, err := e.Write(idx.PackfileChecksum[:]); err != nil {
- return 0, err
- }
-
- copy(idx.IdxChecksum[:], e.hash.Sum(nil)[:hash.Size])
- if _, err := e.Write(idx.IdxChecksum[:]); err != nil {
- return 0, err
- }
-
- return hash.HexSize, nil
-}
diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/format/idxfile/idxfile.go b/vendor/github.com/go-git/go-git/v5/plumbing/format/idxfile/idxfile.go
deleted file mode 100644
index 9237a743425..00000000000
--- a/vendor/github.com/go-git/go-git/v5/plumbing/format/idxfile/idxfile.go
+++ /dev/null
@@ -1,347 +0,0 @@
-package idxfile
-
-import (
- "bytes"
- "io"
- "sort"
-
- encbin "encoding/binary"
-
- "github.com/go-git/go-git/v5/plumbing"
- "github.com/go-git/go-git/v5/plumbing/hash"
-)
-
-const (
- // VersionSupported is the only idx version supported.
- VersionSupported = 2
-
- noMapping = -1
-)
-
-var (
- idxHeader = []byte{255, 't', 'O', 'c'}
-)
-
-// Index represents an index of a packfile.
-type Index interface {
- // Contains checks whether the given hash is in the index.
- Contains(h plumbing.Hash) (bool, error)
- // FindOffset finds the offset in the packfile for the object with
- // the given hash.
- FindOffset(h plumbing.Hash) (int64, error)
- // FindCRC32 finds the CRC32 of the object with the given hash.
- FindCRC32(h plumbing.Hash) (uint32, error)
- // FindHash finds the hash for the object with the given offset.
- FindHash(o int64) (plumbing.Hash, error)
- // Count returns the number of entries in the index.
- Count() (int64, error)
- // Entries returns an iterator to retrieve all index entries.
- Entries() (EntryIter, error)
- // EntriesByOffset returns an iterator to retrieve all index entries ordered
- // by offset.
- EntriesByOffset() (EntryIter, error)
-}
-
-// MemoryIndex is the in memory representation of an idx file.
-type MemoryIndex struct {
- Version uint32
- Fanout [256]uint32
- // FanoutMapping maps the position in the fanout table to the position
- // in the Names, Offset32 and CRC32 slices. This improves the memory
- // usage by not needing an array with unnecessary empty slots.
- FanoutMapping [256]int
- Names [][]byte
- Offset32 [][]byte
- CRC32 [][]byte
- Offset64 []byte
- PackfileChecksum [hash.Size]byte
- IdxChecksum [hash.Size]byte
-
- offsetHash map[int64]plumbing.Hash
- offsetHashIsFull bool
-}
-
-var _ Index = (*MemoryIndex)(nil)
-
-// NewMemoryIndex returns an instance of a new MemoryIndex.
-func NewMemoryIndex() *MemoryIndex {
- return &MemoryIndex{}
-}
-
-func (idx *MemoryIndex) findHashIndex(h plumbing.Hash) (int, bool) {
- k := idx.FanoutMapping[h[0]]
- if k == noMapping {
- return 0, false
- }
-
- if len(idx.Names) <= k {
- return 0, false
- }
-
- data := idx.Names[k]
- high := uint64(len(idx.Offset32[k])) >> 2
- if high == 0 {
- return 0, false
- }
-
- low := uint64(0)
- for {
- mid := (low + high) >> 1
- offset := mid * objectIDLength
-
- cmp := bytes.Compare(h[:], data[offset:offset+objectIDLength])
- if cmp < 0 {
- high = mid
- } else if cmp == 0 {
- return int(mid), true
- } else {
- low = mid + 1
- }
-
- if low >= high {
- break
- }
- }
-
- return 0, false
-}
-
-// Contains implements the Index interface.
-func (idx *MemoryIndex) Contains(h plumbing.Hash) (bool, error) {
- _, ok := idx.findHashIndex(h)
- return ok, nil
-}
-
-// FindOffset implements the Index interface.
-func (idx *MemoryIndex) FindOffset(h plumbing.Hash) (int64, error) {
- if len(idx.FanoutMapping) <= int(h[0]) {
- return 0, plumbing.ErrObjectNotFound
- }
-
- k := idx.FanoutMapping[h[0]]
- i, ok := idx.findHashIndex(h)
- if !ok {
- return 0, plumbing.ErrObjectNotFound
- }
-
- offset := idx.getOffset(k, i)
-
- if !idx.offsetHashIsFull {
- // Save the offset for reverse lookup
- if idx.offsetHash == nil {
- idx.offsetHash = make(map[int64]plumbing.Hash)
- }
- idx.offsetHash[int64(offset)] = h
- }
-
- return int64(offset), nil
-}
-
-const isO64Mask = uint64(1) << 31
-
-func (idx *MemoryIndex) getOffset(firstLevel, secondLevel int) uint64 {
- offset := secondLevel << 2
- ofs := encbin.BigEndian.Uint32(idx.Offset32[firstLevel][offset : offset+4])
-
- if (uint64(ofs) & isO64Mask) != 0 {
- offset := 8 * (uint64(ofs) & ^isO64Mask)
- n := encbin.BigEndian.Uint64(idx.Offset64[offset : offset+8])
- return n
- }
-
- return uint64(ofs)
-}
-
-// FindCRC32 implements the Index interface.
-func (idx *MemoryIndex) FindCRC32(h plumbing.Hash) (uint32, error) {
- k := idx.FanoutMapping[h[0]]
- i, ok := idx.findHashIndex(h)
- if !ok {
- return 0, plumbing.ErrObjectNotFound
- }
-
- return idx.getCRC32(k, i), nil
-}
-
-func (idx *MemoryIndex) getCRC32(firstLevel, secondLevel int) uint32 {
- offset := secondLevel << 2
- return encbin.BigEndian.Uint32(idx.CRC32[firstLevel][offset : offset+4])
-}
-
-// FindHash implements the Index interface.
-func (idx *MemoryIndex) FindHash(o int64) (plumbing.Hash, error) {
- var hash plumbing.Hash
- var ok bool
-
- if idx.offsetHash != nil {
- if hash, ok = idx.offsetHash[o]; ok {
- return hash, nil
- }
- }
-
- // Lazily generate the reverse offset/hash map if required.
- if !idx.offsetHashIsFull || idx.offsetHash == nil {
- if err := idx.genOffsetHash(); err != nil {
- return plumbing.ZeroHash, err
- }
-
- hash, ok = idx.offsetHash[o]
- }
-
- if !ok {
- return plumbing.ZeroHash, plumbing.ErrObjectNotFound
- }
-
- return hash, nil
-}
-
-// genOffsetHash generates the offset/hash mapping for reverse search.
-func (idx *MemoryIndex) genOffsetHash() error {
- count, err := idx.Count()
- if err != nil {
- return err
- }
-
- idx.offsetHash = make(map[int64]plumbing.Hash, count)
- idx.offsetHashIsFull = true
-
- var hash plumbing.Hash
- i := uint32(0)
- for firstLevel, fanoutValue := range idx.Fanout {
- mappedFirstLevel := idx.FanoutMapping[firstLevel]
- for secondLevel := uint32(0); i < fanoutValue; i++ {
- copy(hash[:], idx.Names[mappedFirstLevel][secondLevel*objectIDLength:])
- offset := int64(idx.getOffset(mappedFirstLevel, int(secondLevel)))
- idx.offsetHash[offset] = hash
- secondLevel++
- }
- }
-
- return nil
-}
-
-// Count implements the Index interface.
-func (idx *MemoryIndex) Count() (int64, error) {
- return int64(idx.Fanout[fanout-1]), nil
-}
-
-// Entries implements the Index interface.
-func (idx *MemoryIndex) Entries() (EntryIter, error) {
- return &idxfileEntryIter{idx, 0, 0, 0}, nil
-}
-
-// EntriesByOffset implements the Index interface.
-func (idx *MemoryIndex) EntriesByOffset() (EntryIter, error) {
- count, err := idx.Count()
- if err != nil {
- return nil, err
- }
-
- iter := &idxfileEntryOffsetIter{
- entries: make(entriesByOffset, count),
- }
-
- entries, err := idx.Entries()
- if err != nil {
- return nil, err
- }
-
- for pos := 0; int64(pos) < count; pos++ {
- entry, err := entries.Next()
- if err != nil {
- return nil, err
- }
-
- iter.entries[pos] = entry
- }
-
- sort.Sort(iter.entries)
-
- return iter, nil
-}
-
-// EntryIter is an iterator that will return the entries in a packfile index.
-type EntryIter interface {
- // Next returns the next entry in the packfile index.
- Next() (*Entry, error)
- // Close closes the iterator.
- Close() error
-}
-
-type idxfileEntryIter struct {
- idx *MemoryIndex
- total int
- firstLevel, secondLevel int
-}
-
-func (i *idxfileEntryIter) Next() (*Entry, error) {
- for {
- if i.firstLevel >= fanout {
- return nil, io.EOF
- }
-
- if i.total >= int(i.idx.Fanout[i.firstLevel]) {
- i.firstLevel++
- i.secondLevel = 0
- continue
- }
-
- mappedFirstLevel := i.idx.FanoutMapping[i.firstLevel]
- entry := new(Entry)
- copy(entry.Hash[:], i.idx.Names[mappedFirstLevel][i.secondLevel*objectIDLength:])
- entry.Offset = i.idx.getOffset(mappedFirstLevel, i.secondLevel)
- entry.CRC32 = i.idx.getCRC32(mappedFirstLevel, i.secondLevel)
-
- i.secondLevel++
- i.total++
-
- return entry, nil
- }
-}
-
-func (i *idxfileEntryIter) Close() error {
- i.firstLevel = fanout
- return nil
-}
-
-// Entry is the in memory representation of an object entry in the idx file.
-type Entry struct {
- Hash plumbing.Hash
- CRC32 uint32
- Offset uint64
-}
-
-type idxfileEntryOffsetIter struct {
- entries entriesByOffset
- pos int
-}
-
-func (i *idxfileEntryOffsetIter) Next() (*Entry, error) {
- if i.pos >= len(i.entries) {
- return nil, io.EOF
- }
-
- entry := i.entries[i.pos]
- i.pos++
-
- return entry, nil
-}
-
-func (i *idxfileEntryOffsetIter) Close() error {
- i.pos = len(i.entries) + 1
- return nil
-}
-
-type entriesByOffset []*Entry
-
-func (o entriesByOffset) Len() int {
- return len(o)
-}
-
-func (o entriesByOffset) Less(i int, j int) bool {
- return o[i].Offset < o[j].Offset
-}
-
-func (o entriesByOffset) Swap(i int, j int) {
- o[i], o[j] = o[j], o[i]
-}
diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/format/idxfile/writer.go b/vendor/github.com/go-git/go-git/v5/plumbing/format/idxfile/writer.go
deleted file mode 100644
index c4c21e1676d..00000000000
--- a/vendor/github.com/go-git/go-git/v5/plumbing/format/idxfile/writer.go
+++ /dev/null
@@ -1,193 +0,0 @@
-package idxfile
-
-import (
- "bytes"
- "fmt"
- "math"
- "sort"
- "sync"
-
- "github.com/go-git/go-git/v5/plumbing"
- "github.com/go-git/go-git/v5/utils/binary"
-)
-
-// objects implements sort.Interface and uses hash as sorting key.
-type objects []Entry
-
-// Writer implements a packfile Observer interface and is used to generate
-// indexes.
-type Writer struct {
- m sync.Mutex
-
- count uint32
- checksum plumbing.Hash
- objects objects
- offset64 uint32
- finished bool
- index *MemoryIndex
- added map[plumbing.Hash]struct{}
-}
-
-// Index returns a previously created MemoryIndex or creates a new one if
-// needed.
-func (w *Writer) Index() (*MemoryIndex, error) {
- w.m.Lock()
- defer w.m.Unlock()
-
- if w.index == nil {
- return w.createIndex()
- }
-
- return w.index, nil
-}
-
-// Add appends new object data.
-func (w *Writer) Add(h plumbing.Hash, pos uint64, crc uint32) {
- w.m.Lock()
- defer w.m.Unlock()
-
- if w.added == nil {
- w.added = make(map[plumbing.Hash]struct{})
- }
-
- if _, ok := w.added[h]; !ok {
- w.added[h] = struct{}{}
- w.objects = append(w.objects, Entry{h, crc, pos})
- }
-
-}
-
-func (w *Writer) Finished() bool {
- return w.finished
-}
-
-// OnHeader implements packfile.Observer interface.
-func (w *Writer) OnHeader(count uint32) error {
- w.count = count
- w.objects = make(objects, 0, count)
- return nil
-}
-
-// OnInflatedObjectHeader implements packfile.Observer interface.
-func (w *Writer) OnInflatedObjectHeader(t plumbing.ObjectType, objSize int64, pos int64) error {
- return nil
-}
-
-// OnInflatedObjectContent implements packfile.Observer interface.
-func (w *Writer) OnInflatedObjectContent(h plumbing.Hash, pos int64, crc uint32, _ []byte) error {
- w.Add(h, uint64(pos), crc)
- return nil
-}
-
-// OnFooter implements packfile.Observer interface.
-func (w *Writer) OnFooter(h plumbing.Hash) error {
- w.checksum = h
- w.finished = true
- _, err := w.createIndex()
-
- return err
-}
-
-// creatIndex returns a filled MemoryIndex with the information filled by
-// the observer callbacks.
-func (w *Writer) createIndex() (*MemoryIndex, error) {
- if !w.finished {
- return nil, fmt.Errorf("the index still hasn't finished building")
- }
-
- idx := new(MemoryIndex)
- w.index = idx
-
- sort.Sort(w.objects)
-
- // unmap all fans by default
- for i := range idx.FanoutMapping {
- idx.FanoutMapping[i] = noMapping
- }
-
- buf := new(bytes.Buffer)
-
- last := -1
- bucket := -1
- for i, o := range w.objects {
- fan := o.Hash[0]
-
- // fill the gaps between fans
- for j := last + 1; j < int(fan); j++ {
- idx.Fanout[j] = uint32(i)
- }
-
- // update the number of objects for this position
- idx.Fanout[fan] = uint32(i + 1)
-
- // we move from one bucket to another, update counters and allocate
- // memory
- if last != int(fan) {
- bucket++
- idx.FanoutMapping[fan] = bucket
- last = int(fan)
-
- idx.Names = append(idx.Names, make([]byte, 0))
- idx.Offset32 = append(idx.Offset32, make([]byte, 0))
- idx.CRC32 = append(idx.CRC32, make([]byte, 0))
- }
-
- idx.Names[bucket] = append(idx.Names[bucket], o.Hash[:]...)
-
- offset := o.Offset
- if offset > math.MaxInt32 {
- var err error
- offset, err = w.addOffset64(offset)
- if err != nil {
- return nil, err
- }
- }
-
- buf.Truncate(0)
- if err := binary.WriteUint32(buf, uint32(offset)); err != nil {
- return nil, err
- }
- idx.Offset32[bucket] = append(idx.Offset32[bucket], buf.Bytes()...)
-
- buf.Truncate(0)
- if err := binary.WriteUint32(buf, o.CRC32); err != nil {
- return nil, err
- }
- idx.CRC32[bucket] = append(idx.CRC32[bucket], buf.Bytes()...)
- }
-
- for j := last + 1; j < 256; j++ {
- idx.Fanout[j] = uint32(len(w.objects))
- }
-
- idx.Version = VersionSupported
- idx.PackfileChecksum = w.checksum
-
- return idx, nil
-}
-
-func (w *Writer) addOffset64(pos uint64) (uint64, error) {
- buf := new(bytes.Buffer)
- if err := binary.WriteUint64(buf, pos); err != nil {
- return 0, err
- }
-
- w.index.Offset64 = append(w.index.Offset64, buf.Bytes()...)
- index := uint64(w.offset64 | (1 << 31))
- w.offset64++
-
- return index, nil
-}
-
-func (o objects) Len() int {
- return len(o)
-}
-
-func (o objects) Less(i int, j int) bool {
- cmp := bytes.Compare(o[i].Hash[:], o[j].Hash[:])
- return cmp < 0
-}
-
-func (o objects) Swap(i int, j int) {
- o[i], o[j] = o[j], o[i]
-}
diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/format/index/decoder.go b/vendor/github.com/go-git/go-git/v5/plumbing/format/index/decoder.go
deleted file mode 100644
index 6778cf74ec8..00000000000
--- a/vendor/github.com/go-git/go-git/v5/plumbing/format/index/decoder.go
+++ /dev/null
@@ -1,478 +0,0 @@
-package index
-
-import (
- "bufio"
- "bytes"
- "errors"
- "io"
-
- "strconv"
- "time"
-
- "github.com/go-git/go-git/v5/plumbing"
- "github.com/go-git/go-git/v5/plumbing/hash"
- "github.com/go-git/go-git/v5/utils/binary"
-)
-
-var (
- // DecodeVersionSupported is the range of supported index versions
- DecodeVersionSupported = struct{ Min, Max uint32 }{Min: 2, Max: 4}
-
- // ErrMalformedSignature is returned by Decode when the index header file is
- // malformed
- ErrMalformedSignature = errors.New("malformed index signature file")
- // ErrInvalidChecksum is returned by Decode if the SHA1 hash mismatch with
- // the read content
- ErrInvalidChecksum = errors.New("invalid checksum")
-
- errUnknownExtension = errors.New("unknown extension")
-)
-
-const (
- entryHeaderLength = 62
- entryExtended = 0x4000
- entryValid = 0x8000
- nameMask = 0xfff
- intentToAddMask = 1 << 13
- skipWorkTreeMask = 1 << 14
-)
-
-// A Decoder reads and decodes index files from an input stream.
-type Decoder struct {
- r io.Reader
- hash hash.Hash
- lastEntry *Entry
-
- extReader *bufio.Reader
-}
-
-// NewDecoder returns a new decoder that reads from r.
-func NewDecoder(r io.Reader) *Decoder {
- h := hash.New(hash.CryptoType)
- return &Decoder{
- r: io.TeeReader(r, h),
- hash: h,
- extReader: bufio.NewReader(nil),
- }
-}
-
-// Decode reads the whole index object from its input and stores it in the
-// value pointed to by idx.
-func (d *Decoder) Decode(idx *Index) error {
- var err error
- idx.Version, err = validateHeader(d.r)
- if err != nil {
- return err
- }
-
- entryCount, err := binary.ReadUint32(d.r)
- if err != nil {
- return err
- }
-
- if err := d.readEntries(idx, int(entryCount)); err != nil {
- return err
- }
-
- return d.readExtensions(idx)
-}
-
-func (d *Decoder) readEntries(idx *Index, count int) error {
- for i := 0; i < count; i++ {
- e, err := d.readEntry(idx)
- if err != nil {
- return err
- }
-
- d.lastEntry = e
- idx.Entries = append(idx.Entries, e)
- }
-
- return nil
-}
-
-func (d *Decoder) readEntry(idx *Index) (*Entry, error) {
- e := &Entry{}
-
- var msec, mnsec, sec, nsec uint32
- var flags uint16
-
- flow := []interface{}{
- &sec, &nsec,
- &msec, &mnsec,
- &e.Dev,
- &e.Inode,
- &e.Mode,
- &e.UID,
- &e.GID,
- &e.Size,
- &e.Hash,
- &flags,
- }
-
- if err := binary.Read(d.r, flow...); err != nil {
- return nil, err
- }
-
- read := entryHeaderLength
-
- if sec != 0 || nsec != 0 {
- e.CreatedAt = time.Unix(int64(sec), int64(nsec))
- }
-
- if msec != 0 || mnsec != 0 {
- e.ModifiedAt = time.Unix(int64(msec), int64(mnsec))
- }
-
- e.Stage = Stage(flags>>12) & 0x3
-
- if flags&entryExtended != 0 {
- extended, err := binary.ReadUint16(d.r)
- if err != nil {
- return nil, err
- }
-
- read += 2
- e.IntentToAdd = extended&intentToAddMask != 0
- e.SkipWorktree = extended&skipWorkTreeMask != 0
- }
-
- if err := d.readEntryName(idx, e, flags); err != nil {
- return nil, err
- }
-
- return e, d.padEntry(idx, e, read)
-}
-
-func (d *Decoder) readEntryName(idx *Index, e *Entry, flags uint16) error {
- var name string
- var err error
-
- switch idx.Version {
- case 2, 3:
- len := flags & nameMask
- name, err = d.doReadEntryName(len)
- case 4:
- name, err = d.doReadEntryNameV4()
- default:
- return ErrUnsupportedVersion
- }
-
- if err != nil {
- return err
- }
-
- e.Name = name
- return nil
-}
-
-func (d *Decoder) doReadEntryNameV4() (string, error) {
- l, err := binary.ReadVariableWidthInt(d.r)
- if err != nil {
- return "", err
- }
-
- var base string
- if d.lastEntry != nil {
- base = d.lastEntry.Name[:len(d.lastEntry.Name)-int(l)]
- }
-
- name, err := binary.ReadUntil(d.r, '\x00')
- if err != nil {
- return "", err
- }
-
- return base + string(name), nil
-}
-
-func (d *Decoder) doReadEntryName(len uint16) (string, error) {
- name := make([]byte, len)
- _, err := io.ReadFull(d.r, name)
-
- return string(name), err
-}
-
-// Index entries are padded out to the next 8 byte alignment
-// for historical reasons related to how C Git read the files.
-func (d *Decoder) padEntry(idx *Index, e *Entry, read int) error {
- if idx.Version == 4 {
- return nil
- }
-
- entrySize := read + len(e.Name)
- padLen := 8 - entrySize%8
- _, err := io.CopyN(io.Discard, d.r, int64(padLen))
- return err
-}
-
-func (d *Decoder) readExtensions(idx *Index) error {
- // TODO: support 'Split index' and 'Untracked cache' extensions, take in
- // count that they are not supported by jgit or libgit
-
- var expected []byte
- var err error
-
- var header [4]byte
- for {
- expected = d.hash.Sum(nil)
-
- var n int
- if n, err = io.ReadFull(d.r, header[:]); err != nil {
- if n == 0 {
- err = io.EOF
- }
-
- break
- }
-
- err = d.readExtension(idx, header[:])
- if err != nil {
- break
- }
- }
-
- if err != errUnknownExtension {
- return err
- }
-
- return d.readChecksum(expected, header)
-}
-
-func (d *Decoder) readExtension(idx *Index, header []byte) error {
- switch {
- case bytes.Equal(header, treeExtSignature):
- r, err := d.getExtensionReader()
- if err != nil {
- return err
- }
-
- idx.Cache = &Tree{}
- d := &treeExtensionDecoder{r}
- if err := d.Decode(idx.Cache); err != nil {
- return err
- }
- case bytes.Equal(header, resolveUndoExtSignature):
- r, err := d.getExtensionReader()
- if err != nil {
- return err
- }
-
- idx.ResolveUndo = &ResolveUndo{}
- d := &resolveUndoDecoder{r}
- if err := d.Decode(idx.ResolveUndo); err != nil {
- return err
- }
- case bytes.Equal(header, endOfIndexEntryExtSignature):
- r, err := d.getExtensionReader()
- if err != nil {
- return err
- }
-
- idx.EndOfIndexEntry = &EndOfIndexEntry{}
- d := &endOfIndexEntryDecoder{r}
- if err := d.Decode(idx.EndOfIndexEntry); err != nil {
- return err
- }
- default:
- return errUnknownExtension
- }
-
- return nil
-}
-
-func (d *Decoder) getExtensionReader() (*bufio.Reader, error) {
- len, err := binary.ReadUint32(d.r)
- if err != nil {
- return nil, err
- }
-
- d.extReader.Reset(&io.LimitedReader{R: d.r, N: int64(len)})
- return d.extReader, nil
-}
-
-func (d *Decoder) readChecksum(expected []byte, alreadyRead [4]byte) error {
- var h plumbing.Hash
- copy(h[:4], alreadyRead[:])
-
- if _, err := io.ReadFull(d.r, h[4:]); err != nil {
- return err
- }
-
- if !bytes.Equal(h[:], expected) {
- return ErrInvalidChecksum
- }
-
- return nil
-}
-
-func validateHeader(r io.Reader) (version uint32, err error) {
- var s = make([]byte, 4)
- if _, err := io.ReadFull(r, s); err != nil {
- return 0, err
- }
-
- if !bytes.Equal(s, indexSignature) {
- return 0, ErrMalformedSignature
- }
-
- version, err = binary.ReadUint32(r)
- if err != nil {
- return 0, err
- }
-
- if version < DecodeVersionSupported.Min || version > DecodeVersionSupported.Max {
- return 0, ErrUnsupportedVersion
- }
-
- return
-}
-
-type treeExtensionDecoder struct {
- r *bufio.Reader
-}
-
-func (d *treeExtensionDecoder) Decode(t *Tree) error {
- for {
- e, err := d.readEntry()
- if err != nil {
- if err == io.EOF {
- return nil
- }
-
- return err
- }
-
- if e == nil {
- continue
- }
-
- t.Entries = append(t.Entries, *e)
- }
-}
-
-func (d *treeExtensionDecoder) readEntry() (*TreeEntry, error) {
- e := &TreeEntry{}
-
- path, err := binary.ReadUntil(d.r, '\x00')
- if err != nil {
- return nil, err
- }
-
- e.Path = string(path)
-
- count, err := binary.ReadUntil(d.r, ' ')
- if err != nil {
- return nil, err
- }
-
- i, err := strconv.Atoi(string(count))
- if err != nil {
- return nil, err
- }
-
- // An entry can be in an invalidated state and is represented by having a
- // negative number in the entry_count field.
- if i == -1 {
- return nil, nil
- }
-
- e.Entries = i
- trees, err := binary.ReadUntil(d.r, '\n')
- if err != nil {
- return nil, err
- }
-
- i, err = strconv.Atoi(string(trees))
- if err != nil {
- return nil, err
- }
-
- e.Trees = i
- _, err = io.ReadFull(d.r, e.Hash[:])
- if err != nil {
- return nil, err
- }
- return e, nil
-}
-
-type resolveUndoDecoder struct {
- r *bufio.Reader
-}
-
-func (d *resolveUndoDecoder) Decode(ru *ResolveUndo) error {
- for {
- e, err := d.readEntry()
- if err != nil {
- if err == io.EOF {
- return nil
- }
-
- return err
- }
-
- ru.Entries = append(ru.Entries, *e)
- }
-}
-
-func (d *resolveUndoDecoder) readEntry() (*ResolveUndoEntry, error) {
- e := &ResolveUndoEntry{
- Stages: make(map[Stage]plumbing.Hash),
- }
-
- path, err := binary.ReadUntil(d.r, '\x00')
- if err != nil {
- return nil, err
- }
-
- e.Path = string(path)
-
- for i := 0; i < 3; i++ {
- if err := d.readStage(e, Stage(i+1)); err != nil {
- return nil, err
- }
- }
-
- for s := range e.Stages {
- var hash plumbing.Hash
- if _, err := io.ReadFull(d.r, hash[:]); err != nil {
- return nil, err
- }
-
- e.Stages[s] = hash
- }
-
- return e, nil
-}
-
-func (d *resolveUndoDecoder) readStage(e *ResolveUndoEntry, s Stage) error {
- ascii, err := binary.ReadUntil(d.r, '\x00')
- if err != nil {
- return err
- }
-
- stage, err := strconv.ParseInt(string(ascii), 8, 64)
- if err != nil {
- return err
- }
-
- if stage != 0 {
- e.Stages[s] = plumbing.ZeroHash
- }
-
- return nil
-}
-
-type endOfIndexEntryDecoder struct {
- r *bufio.Reader
-}
-
-func (d *endOfIndexEntryDecoder) Decode(e *EndOfIndexEntry) error {
- var err error
- e.Offset, err = binary.ReadUint32(d.r)
- if err != nil {
- return err
- }
-
- _, err = io.ReadFull(d.r, e.Hash[:])
- return err
-}
diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/format/index/doc.go b/vendor/github.com/go-git/go-git/v5/plumbing/format/index/doc.go
deleted file mode 100644
index 39ae6ad5f91..00000000000
--- a/vendor/github.com/go-git/go-git/v5/plumbing/format/index/doc.go
+++ /dev/null
@@ -1,360 +0,0 @@
-// Package index implements encoding and decoding of index format files.
-//
-// Git index format
-// ================
-//
-// == The Git index file has the following format
-//
-// All binary numbers are in network byte order. Version 2 is described
-// here unless stated otherwise.
-//
-// - A 12-byte header consisting of
-//
-// 4-byte signature:
-// The signature is { 'D', 'I', 'R', 'C' } (stands for "dircache")
-//
-// 4-byte version number:
-// The current supported versions are 2, 3 and 4.
-//
-// 32-bit number of index entries.
-//
-// - A number of sorted index entries (see below).
-//
-// - Extensions
-//
-// Extensions are identified by signature. Optional extensions can
-// be ignored if Git does not understand them.
-//
-// Git currently supports cached tree and resolve undo extensions.
-//
-// 4-byte extension signature. If the first byte is 'A'..'Z' the
-// extension is optional and can be ignored.
-//
-// 32-bit size of the extension
-//
-// Extension data
-//
-// - 160-bit SHA-1 over the content of the index file before this
-// checksum.
-//
-// == Index entry
-//
-// Index entries are sorted in ascending order on the name field,
-// interpreted as a string of unsigned bytes (i.e. memcmp() order, no
-// localization, no special casing of directory separator '/'). Entries
-// with the same name are sorted by their stage field.
-//
-// 32-bit ctime seconds, the last time a file's metadata changed
-// this is stat(2) data
-//
-// 32-bit ctime nanosecond fractions
-// this is stat(2) data
-//
-// 32-bit mtime seconds, the last time a file's data changed
-// this is stat(2) data
-//
-// 32-bit mtime nanosecond fractions
-// this is stat(2) data
-//
-// 32-bit dev
-// this is stat(2) data
-//
-// 32-bit ino
-// this is stat(2) data
-//
-// 32-bit mode, split into (high to low bits)
-//
-// 4-bit object type
-// valid values in binary are 1000 (regular file), 1010 (symbolic link)
-// and 1110 (gitlink)
-//
-// 3-bit unused
-//
-// 9-bit unix permission. Only 0755 and 0644 are valid for regular files.
-// Symbolic links and gitlinks have value 0 in this field.
-//
-// 32-bit uid
-// this is stat(2) data
-//
-// 32-bit gid
-// this is stat(2) data
-//
-// 32-bit file size
-// This is the on-disk size from stat(2), truncated to 32-bit.
-//
-// 160-bit SHA-1 for the represented object
-//
-// A 16-bit 'flags' field split into (high to low bits)
-//
-// 1-bit assume-valid flag
-//
-// 1-bit extended flag (must be zero in version 2)
-//
-// 2-bit stage (during merge)
-//
-// 12-bit name length if the length is less than 0xFFF; otherwise 0xFFF
-// is stored in this field.
-//
-// (Version 3 or later) A 16-bit field, only applicable if the
-// "extended flag" above is 1, split into (high to low bits).
-//
-// 1-bit reserved for future
-//
-// 1-bit skip-worktree flag (used by sparse checkout)
-//
-// 1-bit intent-to-add flag (used by "git add -N")
-//
-// 13-bit unused, must be zero
-//
-// Entry path name (variable length) relative to top level directory
-// (without leading slash). '/' is used as path separator. The special
-// path components ".", ".." and ".git" (without quotes) are disallowed.
-// Trailing slash is also disallowed.
-//
-// The exact encoding is undefined, but the '.' and '/' characters
-// are encoded in 7-bit ASCII and the encoding cannot contain a NUL
-// byte (iow, this is a UNIX pathname).
-//
-// (Version 4) In version 4, the entry path name is prefix-compressed
-// relative to the path name for the previous entry (the very first
-// entry is encoded as if the path name for the previous entry is an
-// empty string). At the beginning of an entry, an integer N in the
-// variable width encoding (the same encoding as the offset is encoded
-// for OFS_DELTA pack entries; see pack-format.txt) is stored, followed
-// by a NUL-terminated string S. Removing N bytes from the end of the
-// path name for the previous entry, and replacing it with the string S
-// yields the path name for this entry.
-//
-// 1-8 nul bytes as necessary to pad the entry to a multiple of eight bytes
-// while keeping the name NUL-terminated.
-//
-// (Version 4) In version 4, the padding after the pathname does not
-// exist.
-//
-// Interpretation of index entries in split index mode is completely
-// different. See below for details.
-//
-// == Extensions
-//
-// === Cached tree
-//
-// Cached tree extension contains pre-computed hashes for trees that can
-// be derived from the index. It helps speed up tree object generation
-// from index for a new commit.
-//
-// When a path is updated in index, the path must be invalidated and
-// removed from tree cache.
-//
-// The signature for this extension is { 'T', 'R', 'E', 'E' }.
-//
-// A series of entries fill the entire extension; each of which
-// consists of:
-//
-// - NUL-terminated path component (relative to its parent directory);
-//
-// - ASCII decimal number of entries in the index that is covered by the
-// tree this entry represents (entry_count);
-//
-// - A space (ASCII 32);
-//
-// - ASCII decimal number that represents the number of subtrees this
-// tree has;
-//
-// - A newline (ASCII 10); and
-//
-// - 160-bit object name for the object that would result from writing
-// this span of index as a tree.
-//
-// An entry can be in an invalidated state and is represented by having
-// a negative number in the entry_count field. In this case, there is no
-// object name and the next entry starts immediately after the newline.
-// When writing an invalid entry, -1 should always be used as entry_count.
-//
-// The entries are written out in the top-down, depth-first order. The
-// first entry represents the root level of the repository, followed by the
-// first subtree--let's call this A--of the root level (with its name
-// relative to the root level), followed by the first subtree of A (with
-// its name relative to A), ...
-//
-// === Resolve undo
-//
-// A conflict is represented in the index as a set of higher stage entries.
-// When a conflict is resolved (e.g. with "git add path"), these higher
-// stage entries will be removed and a stage-0 entry with proper resolution
-// is added.
-//
-// When these higher stage entries are removed, they are saved in the
-// resolve undo extension, so that conflicts can be recreated (e.g. with
-// "git checkout -m"), in case users want to redo a conflict resolution
-// from scratch.
-//
-// The signature for this extension is { 'R', 'E', 'U', 'C' }.
-//
-// A series of entries fill the entire extension; each of which
-// consists of:
-//
-// - NUL-terminated pathname the entry describes (relative to the root of
-// the repository, i.e. full pathname);
-//
-// - Three NUL-terminated ASCII octal numbers, entry mode of entries in
-// stage 1 to 3 (a missing stage is represented by "0" in this field);
-// and
-//
-// - At most three 160-bit object names of the entry in stages from 1 to 3
-// (nothing is written for a missing stage).
-//
-// === Split index
-//
-// In split index mode, the majority of index entries could be stored
-// in a separate file. This extension records the changes to be made on
-// top of that to produce the final index.
-//
-// The signature for this extension is { 'l', 'i', 'n', 'k' }.
-//
-// The extension consists of:
-//
-// - 160-bit SHA-1 of the shared index file. The shared index file path
-// is $GIT_DIR/sharedindex.. If all 160 bits are zero, the
-// index does not require a shared index file.
-//
-// - An ewah-encoded delete bitmap, each bit represents an entry in the
-// shared index. If a bit is set, its corresponding entry in the
-// shared index will be removed from the final index. Note, because
-// a delete operation changes index entry positions, but we do need
-// original positions in replace phase, it's best to just mark
-// entries for removal, then do a mass deletion after replacement.
-//
-// - An ewah-encoded replace bitmap, each bit represents an entry in
-// the shared index. If a bit is set, its corresponding entry in the
-// shared index will be replaced with an entry in this index
-// file. All replaced entries are stored in sorted order in this
-// index. The first "1" bit in the replace bitmap corresponds to the
-// first index entry, the second "1" bit to the second entry and so
-// on. Replaced entries may have empty path names to save space.
-//
-// The remaining index entries after replaced ones will be added to the
-// final index. These added entries are also sorted by entry name then
-// stage.
-//
-// == Untracked cache
-//
-// Untracked cache saves the untracked file list and necessary data to
-// verify the cache. The signature for this extension is { 'U', 'N',
-// 'T', 'R' }.
-//
-// The extension starts with
-//
-// - A sequence of NUL-terminated strings, preceded by the size of the
-// sequence in variable width encoding. Each string describes the
-// environment where the cache can be used.
-//
-// - Stat data of $GIT_DIR/info/exclude. See "Index entry" section from
-// ctime field until "file size".
-//
-// - Stat data of plumbing.excludesfile
-//
-// - 32-bit dir_flags (see struct dir_struct)
-//
-// - 160-bit SHA-1 of $GIT_DIR/info/exclude. Null SHA-1 means the file
-// does not exist.
-//
-// - 160-bit SHA-1 of plumbing.excludesfile. Null SHA-1 means the file does
-// not exist.
-//
-// - NUL-terminated string of per-dir exclude file name. This usually
-// is ".gitignore".
-//
-// - The number of following directory blocks, variable width
-// encoding. If this number is zero, the extension ends here with a
-// following NUL.
-//
-// - A number of directory blocks in depth-first-search order, each
-// consists of
-//
-// - The number of untracked entries, variable width encoding.
-//
-// - The number of sub-directory blocks, variable width encoding.
-//
-// - The directory name terminated by NUL.
-//
-// - A number of untracked file/dir names terminated by NUL.
-//
-// The remaining data of each directory block is grouped by type:
-//
-// - An ewah bitmap, the n-th bit marks whether the n-th directory has
-// valid untracked cache entries.
-//
-// - An ewah bitmap, the n-th bit records "check-only" bit of
-// read_directory_recursive() for the n-th directory.
-//
-// - An ewah bitmap, the n-th bit indicates whether SHA-1 and stat data
-// is valid for the n-th directory and exists in the next data.
-//
-// - An array of stat data. The n-th data corresponds with the n-th
-// "one" bit in the previous ewah bitmap.
-//
-// - An array of SHA-1. The n-th SHA-1 corresponds with the n-th "one" bit
-// in the previous ewah bitmap.
-//
-// - One NUL.
-//
-// == File System Monitor cache
-//
-// The file system monitor cache tracks files for which the core.fsmonitor
-// hook has told us about changes. The signature for this extension is
-// { 'F', 'S', 'M', 'N' }.
-//
-// The extension starts with
-//
-// - 32-bit version number: the current supported version is 1.
-//
-// - 64-bit time: the extension data reflects all changes through the given
-// time which is stored as the nanoseconds elapsed since midnight,
-// January 1, 1970.
-//
-// - 32-bit bitmap size: the size of the CE_FSMONITOR_VALID bitmap.
-//
-// - An ewah bitmap, the n-th bit indicates whether the n-th index entry
-// is not CE_FSMONITOR_VALID.
-//
-// == End of Index Entry
-//
-// The End of Index Entry (EOIE) is used to locate the end of the variable
-// length index entries and the beginning of the extensions. Code can take
-// advantage of this to quickly locate the index extensions without having
-// to parse through all of the index entries.
-//
-// Because it must be able to be loaded before the variable length cache
-// entries and other index extensions, this extension must be written last.
-// The signature for this extension is { 'E', 'O', 'I', 'E' }.
-//
-// The extension consists of:
-//
-// - 32-bit offset to the end of the index entries
-//
-// - 160-bit SHA-1 over the extension types and their sizes (but not
-// their contents). E.g. if we have "TREE" extension that is N-bytes
-// long, "REUC" extension that is M-bytes long, followed by "EOIE",
-// then the hash would be:
-//
-// SHA-1("TREE" + +
-// "REUC" + )
-//
-// == Index Entry Offset Table
-//
-// The Index Entry Offset Table (IEOT) is used to help address the CPU
-// cost of loading the index by enabling multi-threading the process of
-// converting cache entries from the on-disk format to the in-memory format.
-// The signature for this extension is { 'I', 'E', 'O', 'T' }.
-//
-// The extension consists of:
-//
-// - 32-bit version (currently 1)
-//
-// - A number of index offset entries each consisting of:
-//
-// - 32-bit offset from the beginning of the file to the first cache entry
-// in this block of entries.
-//
-// - 32-bit count of cache entries in this blockpackage index
-package index
diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/format/index/encoder.go b/vendor/github.com/go-git/go-git/v5/plumbing/format/index/encoder.go
deleted file mode 100644
index fa2d814454d..00000000000
--- a/vendor/github.com/go-git/go-git/v5/plumbing/format/index/encoder.go
+++ /dev/null
@@ -1,165 +0,0 @@
-package index
-
-import (
- "bytes"
- "errors"
- "io"
- "sort"
- "time"
-
- "github.com/go-git/go-git/v5/plumbing/hash"
- "github.com/go-git/go-git/v5/utils/binary"
-)
-
-var (
- // EncodeVersionSupported is the range of supported index versions
- EncodeVersionSupported uint32 = 3
-
- // ErrInvalidTimestamp is returned by Encode if a Index with a Entry with
- // negative timestamp values
- ErrInvalidTimestamp = errors.New("negative timestamps are not allowed")
-)
-
-// An Encoder writes an Index to an output stream.
-type Encoder struct {
- w io.Writer
- hash hash.Hash
-}
-
-// NewEncoder returns a new encoder that writes to w.
-func NewEncoder(w io.Writer) *Encoder {
- h := hash.New(hash.CryptoType)
- mw := io.MultiWriter(w, h)
- return &Encoder{mw, h}
-}
-
-// Encode writes the Index to the stream of the encoder.
-func (e *Encoder) Encode(idx *Index) error {
- // TODO: support v4
- // TODO: support extensions
- if idx.Version > EncodeVersionSupported {
- return ErrUnsupportedVersion
- }
-
- if err := e.encodeHeader(idx); err != nil {
- return err
- }
-
- if err := e.encodeEntries(idx); err != nil {
- return err
- }
-
- return e.encodeFooter()
-}
-
-func (e *Encoder) encodeHeader(idx *Index) error {
- return binary.Write(e.w,
- indexSignature,
- idx.Version,
- uint32(len(idx.Entries)),
- )
-}
-
-func (e *Encoder) encodeEntries(idx *Index) error {
- sort.Sort(byName(idx.Entries))
-
- for _, entry := range idx.Entries {
- if err := e.encodeEntry(entry); err != nil {
- return err
- }
- entryLength := entryHeaderLength
- if entry.IntentToAdd || entry.SkipWorktree {
- entryLength += 2
- }
-
- wrote := entryLength + len(entry.Name)
- if err := e.padEntry(wrote); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (e *Encoder) encodeEntry(entry *Entry) error {
- sec, nsec, err := e.timeToUint32(&entry.CreatedAt)
- if err != nil {
- return err
- }
-
- msec, mnsec, err := e.timeToUint32(&entry.ModifiedAt)
- if err != nil {
- return err
- }
-
- flags := uint16(entry.Stage&0x3) << 12
- if l := len(entry.Name); l < nameMask {
- flags |= uint16(l)
- } else {
- flags |= nameMask
- }
-
- flow := []interface{}{
- sec, nsec,
- msec, mnsec,
- entry.Dev,
- entry.Inode,
- entry.Mode,
- entry.UID,
- entry.GID,
- entry.Size,
- entry.Hash[:],
- }
-
- flagsFlow := []interface{}{flags}
-
- if entry.IntentToAdd || entry.SkipWorktree {
- var extendedFlags uint16
-
- if entry.IntentToAdd {
- extendedFlags |= intentToAddMask
- }
- if entry.SkipWorktree {
- extendedFlags |= skipWorkTreeMask
- }
-
- flagsFlow = []interface{}{flags | entryExtended, extendedFlags}
- }
-
- flow = append(flow, flagsFlow...)
-
- if err := binary.Write(e.w, flow...); err != nil {
- return err
- }
-
- return binary.Write(e.w, []byte(entry.Name))
-}
-
-func (e *Encoder) timeToUint32(t *time.Time) (uint32, uint32, error) {
- if t.IsZero() {
- return 0, 0, nil
- }
-
- if t.Unix() < 0 || t.UnixNano() < 0 {
- return 0, 0, ErrInvalidTimestamp
- }
-
- return uint32(t.Unix()), uint32(t.Nanosecond()), nil
-}
-
-func (e *Encoder) padEntry(wrote int) error {
- padLen := 8 - wrote%8
-
- _, err := e.w.Write(bytes.Repeat([]byte{'\x00'}, padLen))
- return err
-}
-
-func (e *Encoder) encodeFooter() error {
- return binary.Write(e.w, e.hash.Sum(nil))
-}
-
-type byName []*Entry
-
-func (l byName) Len() int { return len(l) }
-func (l byName) Swap(i, j int) { l[i], l[j] = l[j], l[i] }
-func (l byName) Less(i, j int) bool { return l[i].Name < l[j].Name }
diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/format/index/index.go b/vendor/github.com/go-git/go-git/v5/plumbing/format/index/index.go
deleted file mode 100644
index f4c7647d343..00000000000
--- a/vendor/github.com/go-git/go-git/v5/plumbing/format/index/index.go
+++ /dev/null
@@ -1,231 +0,0 @@
-package index
-
-import (
- "bytes"
- "errors"
- "fmt"
- "path/filepath"
- "strings"
- "time"
-
- "github.com/go-git/go-git/v5/plumbing"
- "github.com/go-git/go-git/v5/plumbing/filemode"
-)
-
-var (
- // ErrUnsupportedVersion is returned by Decode when the index file version
- // is not supported.
- ErrUnsupportedVersion = errors.New("unsupported version")
- // ErrEntryNotFound is returned by Index.Entry, if an entry is not found.
- ErrEntryNotFound = errors.New("entry not found")
-
- indexSignature = []byte{'D', 'I', 'R', 'C'}
- treeExtSignature = []byte{'T', 'R', 'E', 'E'}
- resolveUndoExtSignature = []byte{'R', 'E', 'U', 'C'}
- endOfIndexEntryExtSignature = []byte{'E', 'O', 'I', 'E'}
-)
-
-// Stage during merge
-type Stage int
-
-const (
- // Merged is the default stage, fully merged
- Merged Stage = 1
- // AncestorMode is the base revision
- AncestorMode Stage = 1
- // OurMode is the first tree revision, ours
- OurMode Stage = 2
- // TheirMode is the second tree revision, theirs
- TheirMode Stage = 3
-)
-
-// Index contains the information about which objects are currently checked out
-// in the worktree, having information about the working files. Changes in
-// worktree are detected using this Index. The Index is also used during merges
-type Index struct {
- // Version is index version
- Version uint32
- // Entries collection of entries represented by this Index. The order of
- // this collection is not guaranteed
- Entries []*Entry
- // Cache represents the 'Cached tree' extension
- Cache *Tree
- // ResolveUndo represents the 'Resolve undo' extension
- ResolveUndo *ResolveUndo
- // EndOfIndexEntry represents the 'End of Index Entry' extension
- EndOfIndexEntry *EndOfIndexEntry
-}
-
-// Add creates a new Entry and returns it. The caller should first check that
-// another entry with the same path does not exist.
-func (i *Index) Add(path string) *Entry {
- e := &Entry{
- Name: filepath.ToSlash(path),
- }
-
- i.Entries = append(i.Entries, e)
- return e
-}
-
-// Entry returns the entry that match the given path, if any.
-func (i *Index) Entry(path string) (*Entry, error) {
- path = filepath.ToSlash(path)
- for _, e := range i.Entries {
- if e.Name == path {
- return e, nil
- }
- }
-
- return nil, ErrEntryNotFound
-}
-
-// Remove remove the entry that match the give path and returns deleted entry.
-func (i *Index) Remove(path string) (*Entry, error) {
- path = filepath.ToSlash(path)
- for index, e := range i.Entries {
- if e.Name == path {
- i.Entries = append(i.Entries[:index], i.Entries[index+1:]...)
- return e, nil
- }
- }
-
- return nil, ErrEntryNotFound
-}
-
-// Glob returns the all entries matching pattern or nil if there is no matching
-// entry. The syntax of patterns is the same as in filepath.Glob.
-func (i *Index) Glob(pattern string) (matches []*Entry, err error) {
- pattern = filepath.ToSlash(pattern)
- for _, e := range i.Entries {
- m, err := match(pattern, e.Name)
- if err != nil {
- return nil, err
- }
-
- if m {
- matches = append(matches, e)
- }
- }
-
- return
-}
-
-// String is equivalent to `git ls-files --stage --debug`
-func (i *Index) String() string {
- buf := bytes.NewBuffer(nil)
- for _, e := range i.Entries {
- buf.WriteString(e.String())
- }
-
- return buf.String()
-}
-
-// Entry represents a single file (or stage of a file) in the cache. An entry
-// represents exactly one stage of a file. If a file path is unmerged then
-// multiple Entry instances may appear for the same path name.
-type Entry struct {
- // Hash is the SHA1 of the represented file
- Hash plumbing.Hash
- // Name is the Entry path name relative to top level directory
- Name string
- // CreatedAt time when the tracked path was created
- CreatedAt time.Time
- // ModifiedAt time when the tracked path was changed
- ModifiedAt time.Time
- // Dev and Inode of the tracked path
- Dev, Inode uint32
- // Mode of the path
- Mode filemode.FileMode
- // UID and GID, userid and group id of the owner
- UID, GID uint32
- // Size is the length in bytes for regular files
- Size uint32
- // Stage on a merge is defines what stage is representing this entry
- // https://git-scm.com/book/en/v2/Git-Tools-Advanced-Merging
- Stage Stage
- // SkipWorktree used in sparse checkouts
- // https://git-scm.com/docs/git-read-tree#_sparse_checkout
- SkipWorktree bool
- // IntentToAdd record only the fact that the path will be added later
- // https://git-scm.com/docs/git-add ("git add -N")
- IntentToAdd bool
-}
-
-func (e Entry) String() string {
- buf := bytes.NewBuffer(nil)
-
- fmt.Fprintf(buf, "%06o %s %d\t%s\n", e.Mode, e.Hash, e.Stage, e.Name)
- fmt.Fprintf(buf, " ctime: %d:%d\n", e.CreatedAt.Unix(), e.CreatedAt.Nanosecond())
- fmt.Fprintf(buf, " mtime: %d:%d\n", e.ModifiedAt.Unix(), e.ModifiedAt.Nanosecond())
- fmt.Fprintf(buf, " dev: %d\tino: %d\n", e.Dev, e.Inode)
- fmt.Fprintf(buf, " uid: %d\tgid: %d\n", e.UID, e.GID)
- fmt.Fprintf(buf, " size: %d\tflags: %x\n", e.Size, 0)
-
- return buf.String()
-}
-
-// Tree contains pre-computed hashes for trees that can be derived from the
-// index. It helps speed up tree object generation from index for a new commit.
-type Tree struct {
- Entries []TreeEntry
-}
-
-// TreeEntry entry of a cached Tree
-type TreeEntry struct {
- // Path component (relative to its parent directory)
- Path string
- // Entries is the number of entries in the index that is covered by the tree
- // this entry represents.
- Entries int
- // Trees is the number that represents the number of subtrees this tree has
- Trees int
- // Hash object name for the object that would result from writing this span
- // of index as a tree.
- Hash plumbing.Hash
-}
-
-// ResolveUndo is used when a conflict is resolved (e.g. with "git add path"),
-// these higher stage entries are removed and a stage-0 entry with proper
-// resolution is added. When these higher stage entries are removed, they are
-// saved in the resolve undo extension.
-type ResolveUndo struct {
- Entries []ResolveUndoEntry
-}
-
-// ResolveUndoEntry contains the information about a conflict when is resolved
-type ResolveUndoEntry struct {
- Path string
- Stages map[Stage]plumbing.Hash
-}
-
-// EndOfIndexEntry is the End of Index Entry (EOIE) is used to locate the end of
-// the variable length index entries and the beginning of the extensions. Code
-// can take advantage of this to quickly locate the index extensions without
-// having to parse through all of the index entries.
-//
-// Because it must be able to be loaded before the variable length cache
-// entries and other index extensions, this extension must be written last.
-type EndOfIndexEntry struct {
- // Offset to the end of the index entries
- Offset uint32
- // Hash is a SHA-1 over the extension types and their sizes (but not
- // their contents).
- Hash plumbing.Hash
-}
-
-// SkipUnless applies patterns in the form of A, A/B, A/B/C
-// to the index to prevent the files from being checked out
-func (i *Index) SkipUnless(patterns []string) {
- for _, e := range i.Entries {
- var include bool
- for _, pattern := range patterns {
- if strings.HasPrefix(e.Name, pattern) {
- include = true
- break
- }
- }
- if !include {
- e.SkipWorktree = true
- }
- }
-}
diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/format/index/match.go b/vendor/github.com/go-git/go-git/v5/plumbing/format/index/match.go
deleted file mode 100644
index 2891d7d34cc..00000000000
--- a/vendor/github.com/go-git/go-git/v5/plumbing/format/index/match.go
+++ /dev/null
@@ -1,186 +0,0 @@
-package index
-
-import (
- "path/filepath"
- "runtime"
- "unicode/utf8"
-)
-
-// match is filepath.Match with support to match fullpath and not only filenames
-// code from:
-// https://github.com/golang/go/blob/39852bf4cce6927e01d0136c7843f65a801738cb/src/path/filepath/match.go#L44-L224
-func match(pattern, name string) (matched bool, err error) {
-Pattern:
- for len(pattern) > 0 {
- var star bool
- var chunk string
- star, chunk, pattern = scanChunk(pattern)
-
- // Look for match at current position.
- t, ok, err := matchChunk(chunk, name)
- // if we're the last chunk, make sure we've exhausted the name
- // otherwise we'll give a false result even if we could still match
- // using the star
- if ok && (len(t) == 0 || len(pattern) > 0) {
- name = t
- continue
- }
- if err != nil {
- return false, err
- }
- if star {
- // Look for match skipping i+1 bytes.
- // Cannot skip /.
- for i := 0; i < len(name); i++ {
- t, ok, err := matchChunk(chunk, name[i+1:])
- if ok {
- // if we're the last chunk, make sure we exhausted the name
- if len(pattern) == 0 && len(t) > 0 {
- continue
- }
- name = t
- continue Pattern
- }
- if err != nil {
- return false, err
- }
- }
- }
- return false, nil
- }
- return len(name) == 0, nil
-}
-
-// scanChunk gets the next segment of pattern, which is a non-star string
-// possibly preceded by a star.
-func scanChunk(pattern string) (star bool, chunk, rest string) {
- for len(pattern) > 0 && pattern[0] == '*' {
- pattern = pattern[1:]
- star = true
- }
- inrange := false
- var i int
-Scan:
- for i = 0; i < len(pattern); i++ {
- switch pattern[i] {
- case '\\':
- if runtime.GOOS != "windows" {
- // error check handled in matchChunk: bad pattern.
- if i+1 < len(pattern) {
- i++
- }
- }
- case '[':
- inrange = true
- case ']':
- inrange = false
- case '*':
- if !inrange {
- break Scan
- }
- }
- }
- return star, pattern[0:i], pattern[i:]
-}
-
-// matchChunk checks whether chunk matches the beginning of s.
-// If so, it returns the remainder of s (after the match).
-// Chunk is all single-character operators: literals, char classes, and ?.
-func matchChunk(chunk, s string) (rest string, ok bool, err error) {
- for len(chunk) > 0 {
- if len(s) == 0 {
- return
- }
- switch chunk[0] {
- case '[':
- // character class
- r, n := utf8.DecodeRuneInString(s)
- s = s[n:]
- chunk = chunk[1:]
- // We can't end right after '[', we're expecting at least
- // a closing bracket and possibly a caret.
- if len(chunk) == 0 {
- err = filepath.ErrBadPattern
- return
- }
- // possibly negated
- negated := chunk[0] == '^'
- if negated {
- chunk = chunk[1:]
- }
- // parse all ranges
- match := false
- nrange := 0
- for {
- if len(chunk) > 0 && chunk[0] == ']' && nrange > 0 {
- chunk = chunk[1:]
- break
- }
- var lo, hi rune
- if lo, chunk, err = getEsc(chunk); err != nil {
- return
- }
- hi = lo
- if chunk[0] == '-' {
- if hi, chunk, err = getEsc(chunk[1:]); err != nil {
- return
- }
- }
- if lo <= r && r <= hi {
- match = true
- }
- nrange++
- }
- if match == negated {
- return
- }
-
- case '?':
- _, n := utf8.DecodeRuneInString(s)
- s = s[n:]
- chunk = chunk[1:]
-
- case '\\':
- if runtime.GOOS != "windows" {
- chunk = chunk[1:]
- if len(chunk) == 0 {
- err = filepath.ErrBadPattern
- return
- }
- }
- fallthrough
-
- default:
- if chunk[0] != s[0] {
- return
- }
- s = s[1:]
- chunk = chunk[1:]
- }
- }
- return s, true, nil
-}
-
-// getEsc gets a possibly-escaped character from chunk, for a character class.
-func getEsc(chunk string) (r rune, nchunk string, err error) {
- if len(chunk) == 0 || chunk[0] == '-' || chunk[0] == ']' {
- err = filepath.ErrBadPattern
- return
- }
- if chunk[0] == '\\' && runtime.GOOS != "windows" {
- chunk = chunk[1:]
- if len(chunk) == 0 {
- err = filepath.ErrBadPattern
- return
- }
- }
- r, n := utf8.DecodeRuneInString(chunk)
- if r == utf8.RuneError && n == 1 {
- err = filepath.ErrBadPattern
- }
- nchunk = chunk[n:]
- if len(nchunk) == 0 {
- err = filepath.ErrBadPattern
- }
- return
-}
diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/format/objfile/doc.go b/vendor/github.com/go-git/go-git/v5/plumbing/format/objfile/doc.go
deleted file mode 100644
index a7145160ae0..00000000000
--- a/vendor/github.com/go-git/go-git/v5/plumbing/format/objfile/doc.go
+++ /dev/null
@@ -1,2 +0,0 @@
-// Package objfile implements encoding and decoding of object files.
-package objfile
diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/format/objfile/reader.go b/vendor/github.com/go-git/go-git/v5/plumbing/format/objfile/reader.go
deleted file mode 100644
index d7932f4ea88..00000000000
--- a/vendor/github.com/go-git/go-git/v5/plumbing/format/objfile/reader.go
+++ /dev/null
@@ -1,117 +0,0 @@
-package objfile
-
-import (
- "errors"
- "io"
- "strconv"
-
- "github.com/go-git/go-git/v5/plumbing"
- "github.com/go-git/go-git/v5/plumbing/format/packfile"
- "github.com/go-git/go-git/v5/utils/sync"
-)
-
-var (
- ErrClosed = errors.New("objfile: already closed")
- ErrHeader = errors.New("objfile: invalid header")
- ErrNegativeSize = errors.New("objfile: negative object size")
-)
-
-// Reader reads and decodes compressed objfile data from a provided io.Reader.
-// Reader implements io.ReadCloser. Close should be called when finished with
-// the Reader. Close will not close the underlying io.Reader.
-type Reader struct {
- multi io.Reader
- zlib io.Reader
- zlibref sync.ZLibReader
- hasher plumbing.Hasher
-}
-
-// NewReader returns a new Reader reading from r.
-func NewReader(r io.Reader) (*Reader, error) {
- zlib, err := sync.GetZlibReader(r)
- if err != nil {
- return nil, packfile.ErrZLib.AddDetails(err.Error())
- }
-
- return &Reader{
- zlib: zlib.Reader,
- zlibref: zlib,
- }, nil
-}
-
-// Header reads the type and the size of object, and prepares the reader for read
-func (r *Reader) Header() (t plumbing.ObjectType, size int64, err error) {
- var raw []byte
- raw, err = r.readUntil(' ')
- if err != nil {
- return
- }
-
- t, err = plumbing.ParseObjectType(string(raw))
- if err != nil {
- return
- }
-
- raw, err = r.readUntil(0)
- if err != nil {
- return
- }
-
- size, err = strconv.ParseInt(string(raw), 10, 64)
- if err != nil {
- err = ErrHeader
- return
- }
-
- defer r.prepareForRead(t, size)
- return
-}
-
-// readSlice reads one byte at a time from r until it encounters delim or an
-// error.
-func (r *Reader) readUntil(delim byte) ([]byte, error) {
- var buf [1]byte
- value := make([]byte, 0, 16)
- for {
- if n, err := r.zlib.Read(buf[:]); err != nil && (err != io.EOF || n == 0) {
- if err == io.EOF {
- return nil, ErrHeader
- }
- return nil, err
- }
-
- if buf[0] == delim {
- return value, nil
- }
-
- value = append(value, buf[0])
- }
-}
-
-func (r *Reader) prepareForRead(t plumbing.ObjectType, size int64) {
- r.hasher = plumbing.NewHasher(t, size)
- r.multi = io.TeeReader(r.zlib, r.hasher)
-}
-
-// Read reads len(p) bytes into p from the object data stream. It returns
-// the number of bytes read (0 <= n <= len(p)) and any error encountered. Even
-// if Read returns n < len(p), it may use all of p as scratch space during the
-// call.
-//
-// If Read encounters the end of the data stream it will return err == io.EOF,
-// either in the current call if n > 0 or in a subsequent call.
-func (r *Reader) Read(p []byte) (n int, err error) {
- return r.multi.Read(p)
-}
-
-// Hash returns the hash of the object data stream that has been read so far.
-func (r *Reader) Hash() plumbing.Hash {
- return r.hasher.Sum()
-}
-
-// Close releases any resources consumed by the Reader. Calling Close does not
-// close the wrapped io.Reader originally passed to NewReader.
-func (r *Reader) Close() error {
- sync.PutZlibReader(r.zlibref)
- return nil
-}
diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/format/objfile/writer.go b/vendor/github.com/go-git/go-git/v5/plumbing/format/objfile/writer.go
deleted file mode 100644
index 0d0f1549286..00000000000
--- a/vendor/github.com/go-git/go-git/v5/plumbing/format/objfile/writer.go
+++ /dev/null
@@ -1,112 +0,0 @@
-package objfile
-
-import (
- "compress/zlib"
- "errors"
- "io"
- "strconv"
-
- "github.com/go-git/go-git/v5/plumbing"
- "github.com/go-git/go-git/v5/utils/sync"
-)
-
-var (
- ErrOverflow = errors.New("objfile: declared data length exceeded (overflow)")
-)
-
-// Writer writes and encodes data in compressed objfile format to a provided
-// io.Writer. Close should be called when finished with the Writer. Close will
-// not close the underlying io.Writer.
-type Writer struct {
- raw io.Writer
- hasher plumbing.Hasher
- multi io.Writer
- zlib *zlib.Writer
-
- closed bool
- pending int64 // number of unwritten bytes
-}
-
-// NewWriter returns a new Writer writing to w.
-//
-// The returned Writer implements io.WriteCloser. Close should be called when
-// finished with the Writer. Close will not close the underlying io.Writer.
-func NewWriter(w io.Writer) *Writer {
- zlib := sync.GetZlibWriter(w)
- return &Writer{
- raw: w,
- zlib: zlib,
- }
-}
-
-// WriteHeader writes the type and the size and prepares to accept the object's
-// contents. If an invalid t is provided, plumbing.ErrInvalidType is returned. If a
-// negative size is provided, ErrNegativeSize is returned.
-func (w *Writer) WriteHeader(t plumbing.ObjectType, size int64) error {
- if !t.Valid() {
- return plumbing.ErrInvalidType
- }
- if size < 0 {
- return ErrNegativeSize
- }
-
- b := t.Bytes()
- b = append(b, ' ')
- b = append(b, []byte(strconv.FormatInt(size, 10))...)
- b = append(b, 0)
-
- defer w.prepareForWrite(t, size)
- _, err := w.zlib.Write(b)
-
- return err
-}
-
-func (w *Writer) prepareForWrite(t plumbing.ObjectType, size int64) {
- w.pending = size
-
- w.hasher = plumbing.NewHasher(t, size)
- w.multi = io.MultiWriter(w.zlib, w.hasher)
-}
-
-// Write writes the object's contents. Write returns the error ErrOverflow if
-// more than size bytes are written after WriteHeader.
-func (w *Writer) Write(p []byte) (n int, err error) {
- if w.closed {
- return 0, ErrClosed
- }
-
- overwrite := false
- if int64(len(p)) > w.pending {
- p = p[0:w.pending]
- overwrite = true
- }
-
- n, err = w.multi.Write(p)
- w.pending -= int64(n)
- if err == nil && overwrite {
- err = ErrOverflow
- return
- }
-
- return
-}
-
-// Hash returns the hash of the object data stream that has been written so far.
-// It can be called before or after Close.
-func (w *Writer) Hash() plumbing.Hash {
- return w.hasher.Sum() // Not yet closed, return hash of data written so far
-}
-
-// Close releases any resources consumed by the Writer.
-//
-// Calling Close does not close the wrapped io.Writer originally passed to
-// NewWriter.
-func (w *Writer) Close() error {
- defer sync.PutZlibWriter(w.zlib)
- if err := w.zlib.Close(); err != nil {
- return err
- }
-
- w.closed = true
- return nil
-}
diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/common.go b/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/common.go
deleted file mode 100644
index 36c5ef5b88c..00000000000
--- a/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/common.go
+++ /dev/null
@@ -1,60 +0,0 @@
-package packfile
-
-import (
- "io"
-
- "github.com/go-git/go-git/v5/plumbing/storer"
- "github.com/go-git/go-git/v5/utils/ioutil"
-)
-
-var signature = []byte{'P', 'A', 'C', 'K'}
-
-const (
- // VersionSupported is the packfile version supported by this package
- VersionSupported uint32 = 2
-
- firstLengthBits = uint8(4) // the first byte into object header has 4 bits to store the length
- lengthBits = uint8(7) // subsequent bytes has 7 bits to store the length
- maskFirstLength = 15 // 0000 1111
- maskContinue = 0x80 // 1000 0000
- maskLength = uint8(127) // 0111 1111
- maskType = uint8(112) // 0111 0000
-)
-
-// UpdateObjectStorage updates the storer with the objects in the given
-// packfile.
-func UpdateObjectStorage(s storer.Storer, packfile io.Reader) error {
- if pw, ok := s.(storer.PackfileWriter); ok {
- return WritePackfileToObjectStorage(pw, packfile)
- }
-
- p, err := NewParserWithStorage(NewScanner(packfile), s)
- if err != nil {
- return err
- }
-
- _, err = p.Parse()
- return err
-}
-
-// WritePackfileToObjectStorage writes all the packfile objects into the given
-// object storage.
-func WritePackfileToObjectStorage(
- sw storer.PackfileWriter,
- packfile io.Reader,
-) (err error) {
- w, err := sw.PackfileWriter()
- if err != nil {
- return err
- }
-
- defer ioutil.CheckClose(w, &err)
-
- var n int64
- n, err = io.Copy(w, packfile)
- if err == nil && n == 0 {
- return ErrEmptyPackfile
- }
-
- return err
-}
diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/delta_index.go b/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/delta_index.go
deleted file mode 100644
index 07a61120e5a..00000000000
--- a/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/delta_index.go
+++ /dev/null
@@ -1,297 +0,0 @@
-package packfile
-
-const blksz = 16
-const maxChainLength = 64
-
-// deltaIndex is a modified version of JGit's DeltaIndex adapted to our current
-// design.
-type deltaIndex struct {
- table []int
- entries []int
- mask int
-}
-
-func (idx *deltaIndex) init(buf []byte) {
- scanner := newDeltaIndexScanner(buf, len(buf))
- idx.mask = scanner.mask
- idx.table = scanner.table
- idx.entries = make([]int, countEntries(scanner)+1)
- idx.copyEntries(scanner)
-}
-
-// findMatch returns the offset of src where the block starting at tgtOffset
-// is and the length of the match. A length of 0 means there was no match. A
-// length of -1 means the src length is lower than the blksz and whatever
-// other positive length is the length of the match in bytes.
-func (idx *deltaIndex) findMatch(src, tgt []byte, tgtOffset int) (srcOffset, l int) {
- if len(tgt) < tgtOffset+s {
- return 0, len(tgt) - tgtOffset
- }
-
- if len(src) < blksz {
- return 0, -1
- }
-
- if len(tgt) >= tgtOffset+s && len(src) >= blksz {
- h := hashBlock(tgt, tgtOffset)
- tIdx := h & idx.mask
- eIdx := idx.table[tIdx]
- if eIdx != 0 {
- srcOffset = idx.entries[eIdx]
- } else {
- return
- }
-
- l = matchLength(src, tgt, tgtOffset, srcOffset)
- }
-
- return
-}
-
-func matchLength(src, tgt []byte, otgt, osrc int) (l int) {
- lensrc := len(src)
- lentgt := len(tgt)
- for (osrc < lensrc && otgt < lentgt) && src[osrc] == tgt[otgt] {
- l++
- osrc++
- otgt++
- }
- return
-}
-
-func countEntries(scan *deltaIndexScanner) (cnt int) {
- // Figure out exactly how many entries we need. As we do the
- // enumeration truncate any delta chains longer than what we
- // are willing to scan during encode. This keeps the encode
- // logic linear in the size of the input rather than quadratic.
- for i := 0; i < len(scan.table); i++ {
- h := scan.table[i]
- if h == 0 {
- continue
- }
-
- size := 0
- for {
- size++
- if size == maxChainLength {
- scan.next[h] = 0
- break
- }
- h = scan.next[h]
-
- if h == 0 {
- break
- }
- }
- cnt += size
- }
-
- return
-}
-
-func (idx *deltaIndex) copyEntries(scanner *deltaIndexScanner) {
- // Rebuild the entries list from the scanner, positioning all
- // blocks in the same hash chain next to each other. We can
- // then later discard the next list, along with the scanner.
- //
- next := 1
- for i := 0; i < len(idx.table); i++ {
- h := idx.table[i]
- if h == 0 {
- continue
- }
-
- idx.table[i] = next
- for {
- idx.entries[next] = scanner.entries[h]
- next++
- h = scanner.next[h]
-
- if h == 0 {
- break
- }
- }
- }
-}
-
-type deltaIndexScanner struct {
- table []int
- entries []int
- next []int
- mask int
- count int
-}
-
-func newDeltaIndexScanner(buf []byte, size int) *deltaIndexScanner {
- size -= size % blksz
- worstCaseBlockCnt := size / blksz
- if worstCaseBlockCnt < 1 {
- return new(deltaIndexScanner)
- }
-
- tableSize := tableSize(worstCaseBlockCnt)
- scanner := &deltaIndexScanner{
- table: make([]int, tableSize),
- mask: tableSize - 1,
- entries: make([]int, worstCaseBlockCnt+1),
- next: make([]int, worstCaseBlockCnt+1),
- }
-
- scanner.scan(buf, size)
- return scanner
-}
-
-// slightly modified version of JGit's DeltaIndexScanner. We store the offset on the entries
-// instead of the entries and the key, so we avoid operations to retrieve the offset later, as
-// we don't use the key.
-// See: https://github.com/eclipse/jgit/blob/005e5feb4ecd08c4e4d141a38b9e7942accb3212/org.eclipse.jgit/src/org/eclipse/jgit/internal/storage/pack/DeltaIndexScanner.java
-func (s *deltaIndexScanner) scan(buf []byte, end int) {
- lastHash := 0
- ptr := end - blksz
-
- for {
- key := hashBlock(buf, ptr)
- tIdx := key & s.mask
- head := s.table[tIdx]
- if head != 0 && lastHash == key {
- s.entries[head] = ptr
- } else {
- s.count++
- eIdx := s.count
- s.entries[eIdx] = ptr
- s.next[eIdx] = head
- s.table[tIdx] = eIdx
- }
-
- lastHash = key
- ptr -= blksz
-
- if 0 > ptr {
- break
- }
- }
-}
-
-func tableSize(worstCaseBlockCnt int) int {
- shift := 32 - leadingZeros(uint32(worstCaseBlockCnt))
- sz := 1 << uint(shift-1)
- if sz < worstCaseBlockCnt {
- sz <<= 1
- }
- return sz
-}
-
-// use https://golang.org/pkg/math/bits/#LeadingZeros32 in the future
-func leadingZeros(x uint32) (n int) {
- if x >= 1<<16 {
- x >>= 16
- n = 16
- }
- if x >= 1<<8 {
- x >>= 8
- n += 8
- }
- n += int(len8tab[x])
- return 32 - n
-}
-
-var len8tab = [256]uint8{
- 0x00, 0x01, 0x02, 0x02, 0x03, 0x03, 0x03, 0x03, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04,
- 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05,
- 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06,
- 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06,
- 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07,
- 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07,
- 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07,
- 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07,
- 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08,
- 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08,
- 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08,
- 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08,
- 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08,
- 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08,
- 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08,
- 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08,
-}
-
-func hashBlock(raw []byte, ptr int) int {
- // The first 4 steps collapse out into a 4 byte big-endian decode,
- // with a larger right shift as we combined shift lefts together.
- //
- hash := ((uint32(raw[ptr]) & 0xff) << 24) |
- ((uint32(raw[ptr+1]) & 0xff) << 16) |
- ((uint32(raw[ptr+2]) & 0xff) << 8) |
- (uint32(raw[ptr+3]) & 0xff)
- hash ^= T[hash>>31]
-
- hash = ((hash << 8) | (uint32(raw[ptr+4]) & 0xff)) ^ T[hash>>23]
- hash = ((hash << 8) | (uint32(raw[ptr+5]) & 0xff)) ^ T[hash>>23]
- hash = ((hash << 8) | (uint32(raw[ptr+6]) & 0xff)) ^ T[hash>>23]
- hash = ((hash << 8) | (uint32(raw[ptr+7]) & 0xff)) ^ T[hash>>23]
-
- hash = ((hash << 8) | (uint32(raw[ptr+8]) & 0xff)) ^ T[hash>>23]
- hash = ((hash << 8) | (uint32(raw[ptr+9]) & 0xff)) ^ T[hash>>23]
- hash = ((hash << 8) | (uint32(raw[ptr+10]) & 0xff)) ^ T[hash>>23]
- hash = ((hash << 8) | (uint32(raw[ptr+11]) & 0xff)) ^ T[hash>>23]
-
- hash = ((hash << 8) | (uint32(raw[ptr+12]) & 0xff)) ^ T[hash>>23]
- hash = ((hash << 8) | (uint32(raw[ptr+13]) & 0xff)) ^ T[hash>>23]
- hash = ((hash << 8) | (uint32(raw[ptr+14]) & 0xff)) ^ T[hash>>23]
- hash = ((hash << 8) | (uint32(raw[ptr+15]) & 0xff)) ^ T[hash>>23]
-
- return int(hash)
-}
-
-var T = []uint32{0x00000000, 0xd4c6b32d, 0x7d4bd577,
- 0xa98d665a, 0x2e5119c3, 0xfa97aaee, 0x531accb4, 0x87dc7f99,
- 0x5ca23386, 0x886480ab, 0x21e9e6f1, 0xf52f55dc, 0x72f32a45,
- 0xa6359968, 0x0fb8ff32, 0xdb7e4c1f, 0x6d82d421, 0xb944670c,
- 0x10c90156, 0xc40fb27b, 0x43d3cde2, 0x97157ecf, 0x3e981895,
- 0xea5eabb8, 0x3120e7a7, 0xe5e6548a, 0x4c6b32d0, 0x98ad81fd,
- 0x1f71fe64, 0xcbb74d49, 0x623a2b13, 0xb6fc983e, 0x0fc31b6f,
- 0xdb05a842, 0x7288ce18, 0xa64e7d35, 0x219202ac, 0xf554b181,
- 0x5cd9d7db, 0x881f64f6, 0x536128e9, 0x87a79bc4, 0x2e2afd9e,
- 0xfaec4eb3, 0x7d30312a, 0xa9f68207, 0x007be45d, 0xd4bd5770,
- 0x6241cf4e, 0xb6877c63, 0x1f0a1a39, 0xcbcca914, 0x4c10d68d,
- 0x98d665a0, 0x315b03fa, 0xe59db0d7, 0x3ee3fcc8, 0xea254fe5,
- 0x43a829bf, 0x976e9a92, 0x10b2e50b, 0xc4745626, 0x6df9307c,
- 0xb93f8351, 0x1f8636de, 0xcb4085f3, 0x62cde3a9, 0xb60b5084,
- 0x31d72f1d, 0xe5119c30, 0x4c9cfa6a, 0x985a4947, 0x43240558,
- 0x97e2b675, 0x3e6fd02f, 0xeaa96302, 0x6d751c9b, 0xb9b3afb6,
- 0x103ec9ec, 0xc4f87ac1, 0x7204e2ff, 0xa6c251d2, 0x0f4f3788,
- 0xdb8984a5, 0x5c55fb3c, 0x88934811, 0x211e2e4b, 0xf5d89d66,
- 0x2ea6d179, 0xfa606254, 0x53ed040e, 0x872bb723, 0x00f7c8ba,
- 0xd4317b97, 0x7dbc1dcd, 0xa97aaee0, 0x10452db1, 0xc4839e9c,
- 0x6d0ef8c6, 0xb9c84beb, 0x3e143472, 0xead2875f, 0x435fe105,
- 0x97995228, 0x4ce71e37, 0x9821ad1a, 0x31accb40, 0xe56a786d,
- 0x62b607f4, 0xb670b4d9, 0x1ffdd283, 0xcb3b61ae, 0x7dc7f990,
- 0xa9014abd, 0x008c2ce7, 0xd44a9fca, 0x5396e053, 0x8750537e,
- 0x2edd3524, 0xfa1b8609, 0x2165ca16, 0xf5a3793b, 0x5c2e1f61,
- 0x88e8ac4c, 0x0f34d3d5, 0xdbf260f8, 0x727f06a2, 0xa6b9b58f,
- 0x3f0c6dbc, 0xebcade91, 0x4247b8cb, 0x96810be6, 0x115d747f,
- 0xc59bc752, 0x6c16a108, 0xb8d01225, 0x63ae5e3a, 0xb768ed17,
- 0x1ee58b4d, 0xca233860, 0x4dff47f9, 0x9939f4d4, 0x30b4928e,
- 0xe47221a3, 0x528eb99d, 0x86480ab0, 0x2fc56cea, 0xfb03dfc7,
- 0x7cdfa05e, 0xa8191373, 0x01947529, 0xd552c604, 0x0e2c8a1b,
- 0xdaea3936, 0x73675f6c, 0xa7a1ec41, 0x207d93d8, 0xf4bb20f5,
- 0x5d3646af, 0x89f0f582, 0x30cf76d3, 0xe409c5fe, 0x4d84a3a4,
- 0x99421089, 0x1e9e6f10, 0xca58dc3d, 0x63d5ba67, 0xb713094a,
- 0x6c6d4555, 0xb8abf678, 0x11269022, 0xc5e0230f, 0x423c5c96,
- 0x96faefbb, 0x3f7789e1, 0xebb13acc, 0x5d4da2f2, 0x898b11df,
- 0x20067785, 0xf4c0c4a8, 0x731cbb31, 0xa7da081c, 0x0e576e46,
- 0xda91dd6b, 0x01ef9174, 0xd5292259, 0x7ca44403, 0xa862f72e,
- 0x2fbe88b7, 0xfb783b9a, 0x52f55dc0, 0x8633eeed, 0x208a5b62,
- 0xf44ce84f, 0x5dc18e15, 0x89073d38, 0x0edb42a1, 0xda1df18c,
- 0x739097d6, 0xa75624fb, 0x7c2868e4, 0xa8eedbc9, 0x0163bd93,
- 0xd5a50ebe, 0x52797127, 0x86bfc20a, 0x2f32a450, 0xfbf4177d,
- 0x4d088f43, 0x99ce3c6e, 0x30435a34, 0xe485e919, 0x63599680,
- 0xb79f25ad, 0x1e1243f7, 0xcad4f0da, 0x11aabcc5, 0xc56c0fe8,
- 0x6ce169b2, 0xb827da9f, 0x3ffba506, 0xeb3d162b, 0x42b07071,
- 0x9676c35c, 0x2f49400d, 0xfb8ff320, 0x5202957a, 0x86c42657,
- 0x011859ce, 0xd5deeae3, 0x7c538cb9, 0xa8953f94, 0x73eb738b,
- 0xa72dc0a6, 0x0ea0a6fc, 0xda6615d1, 0x5dba6a48, 0x897cd965,
- 0x20f1bf3f, 0xf4370c12, 0x42cb942c, 0x960d2701, 0x3f80415b,
- 0xeb46f276, 0x6c9a8def, 0xb85c3ec2, 0x11d15898, 0xc517ebb5,
- 0x1e69a7aa, 0xcaaf1487, 0x632272dd, 0xb7e4c1f0, 0x3038be69,
- 0xe4fe0d44, 0x4d736b1e, 0x99b5d833,
-}
diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/delta_selector.go b/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/delta_selector.go
deleted file mode 100644
index 4b60ff39470..00000000000
--- a/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/delta_selector.go
+++ /dev/null
@@ -1,369 +0,0 @@
-package packfile
-
-import (
- "sort"
- "sync"
-
- "github.com/go-git/go-git/v5/plumbing"
- "github.com/go-git/go-git/v5/plumbing/storer"
-)
-
-const (
- // deltas based on deltas, how many steps we can do.
- // 50 is the default value used in JGit
- maxDepth = int64(50)
-)
-
-// applyDelta is the set of object types that we should apply deltas
-var applyDelta = map[plumbing.ObjectType]bool{
- plumbing.BlobObject: true,
- plumbing.TreeObject: true,
-}
-
-type deltaSelector struct {
- storer storer.EncodedObjectStorer
-}
-
-func newDeltaSelector(s storer.EncodedObjectStorer) *deltaSelector {
- return &deltaSelector{s}
-}
-
-// ObjectsToPack creates a list of ObjectToPack from the hashes
-// provided, creating deltas if it's suitable, using an specific
-// internal logic. `packWindow` specifies the size of the sliding
-// window used to compare objects for delta compression; 0 turns off
-// delta compression entirely.
-func (dw *deltaSelector) ObjectsToPack(
- hashes []plumbing.Hash,
- packWindow uint,
-) ([]*ObjectToPack, error) {
- otp, err := dw.objectsToPack(hashes, packWindow)
- if err != nil {
- return nil, err
- }
-
- if packWindow == 0 {
- return otp, nil
- }
-
- dw.sort(otp)
-
- var objectGroups [][]*ObjectToPack
- var prev *ObjectToPack
- i := -1
- for _, obj := range otp {
- if prev == nil || prev.Type() != obj.Type() {
- objectGroups = append(objectGroups, []*ObjectToPack{obj})
- i++
- prev = obj
- } else {
- objectGroups[i] = append(objectGroups[i], obj)
- }
- }
-
- var wg sync.WaitGroup
- var once sync.Once
- for _, objs := range objectGroups {
- objs := objs
- wg.Add(1)
- go func() {
- if walkErr := dw.walk(objs, packWindow); walkErr != nil {
- once.Do(func() {
- err = walkErr
- })
- }
- wg.Done()
- }()
- }
- wg.Wait()
-
- if err != nil {
- return nil, err
- }
-
- return otp, nil
-}
-
-func (dw *deltaSelector) objectsToPack(
- hashes []plumbing.Hash,
- packWindow uint,
-) ([]*ObjectToPack, error) {
- var objectsToPack []*ObjectToPack
- for _, h := range hashes {
- var o plumbing.EncodedObject
- var err error
- if packWindow == 0 {
- o, err = dw.encodedObject(h)
- } else {
- o, err = dw.encodedDeltaObject(h)
- }
- if err != nil {
- return nil, err
- }
-
- otp := newObjectToPack(o)
- if _, ok := o.(plumbing.DeltaObject); ok {
- otp.CleanOriginal()
- }
-
- objectsToPack = append(objectsToPack, otp)
- }
-
- if packWindow == 0 {
- return objectsToPack, nil
- }
-
- if err := dw.fixAndBreakChains(objectsToPack); err != nil {
- return nil, err
- }
-
- return objectsToPack, nil
-}
-
-func (dw *deltaSelector) encodedDeltaObject(h plumbing.Hash) (plumbing.EncodedObject, error) {
- edos, ok := dw.storer.(storer.DeltaObjectStorer)
- if !ok {
- return dw.encodedObject(h)
- }
-
- return edos.DeltaObject(plumbing.AnyObject, h)
-}
-
-func (dw *deltaSelector) encodedObject(h plumbing.Hash) (plumbing.EncodedObject, error) {
- return dw.storer.EncodedObject(plumbing.AnyObject, h)
-}
-
-func (dw *deltaSelector) fixAndBreakChains(objectsToPack []*ObjectToPack) error {
- m := make(map[plumbing.Hash]*ObjectToPack, len(objectsToPack))
- for _, otp := range objectsToPack {
- m[otp.Hash()] = otp
- }
-
- for _, otp := range objectsToPack {
- if err := dw.fixAndBreakChainsOne(m, otp); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (dw *deltaSelector) fixAndBreakChainsOne(objectsToPack map[plumbing.Hash]*ObjectToPack, otp *ObjectToPack) error {
- if !otp.Object.Type().IsDelta() {
- return nil
- }
-
- // Initial ObjectToPack instances might have a delta assigned to Object
- // but no actual base initially. Once Base is assigned to a delta, it means
- // we already fixed it.
- if otp.Base != nil {
- return nil
- }
-
- do, ok := otp.Object.(plumbing.DeltaObject)
- if !ok {
- // if this is not a DeltaObject, then we cannot retrieve its base,
- // so we have to break the delta chain here.
- return dw.undeltify(otp)
- }
-
- base, ok := objectsToPack[do.BaseHash()]
- if !ok {
- // The base of the delta is not in our list of objects to pack, so
- // we break the chain.
- return dw.undeltify(otp)
- }
-
- if err := dw.fixAndBreakChainsOne(objectsToPack, base); err != nil {
- return err
- }
-
- otp.SetDelta(base, otp.Object)
- return nil
-}
-
-func (dw *deltaSelector) restoreOriginal(otp *ObjectToPack) error {
- if otp.Original != nil {
- return nil
- }
-
- if !otp.Object.Type().IsDelta() {
- return nil
- }
-
- obj, err := dw.encodedObject(otp.Hash())
- if err != nil {
- return err
- }
-
- otp.SetOriginal(obj)
-
- return nil
-}
-
-// undeltify undeltifies an *ObjectToPack by retrieving the original object from
-// the storer and resetting it.
-func (dw *deltaSelector) undeltify(otp *ObjectToPack) error {
- if err := dw.restoreOriginal(otp); err != nil {
- return err
- }
-
- otp.Object = otp.Original
- otp.Depth = 0
- return nil
-}
-
-func (dw *deltaSelector) sort(objectsToPack []*ObjectToPack) {
- sort.Sort(byTypeAndSize(objectsToPack))
-}
-
-func (dw *deltaSelector) walk(
- objectsToPack []*ObjectToPack,
- packWindow uint,
-) error {
- indexMap := make(map[plumbing.Hash]*deltaIndex)
- for i := 0; i < len(objectsToPack); i++ {
- // Clean up the index map and reconstructed delta objects for anything
- // outside our pack window, to save memory.
- if i > int(packWindow) {
- obj := objectsToPack[i-int(packWindow)]
-
- delete(indexMap, obj.Hash())
-
- if obj.IsDelta() {
- obj.SaveOriginalMetadata()
- obj.CleanOriginal()
- }
- }
-
- target := objectsToPack[i]
-
- // If we already have a delta, we don't try to find a new one for this
- // object. This happens when a delta is set to be reused from an existing
- // packfile.
- if target.IsDelta() {
- continue
- }
-
- // We only want to create deltas from specific types.
- if !applyDelta[target.Type()] {
- continue
- }
-
- for j := i - 1; j >= 0 && i-j < int(packWindow); j-- {
- base := objectsToPack[j]
- // Objects must use only the same type as their delta base.
- // Since objectsToPack is sorted by type and size, once we find
- // a different type, we know we won't find more of them.
- if base.Type() != target.Type() {
- break
- }
-
- if err := dw.tryToDeltify(indexMap, base, target); err != nil {
- return err
- }
- }
- }
-
- return nil
-}
-
-func (dw *deltaSelector) tryToDeltify(indexMap map[plumbing.Hash]*deltaIndex, base, target *ObjectToPack) error {
- // Original object might not be present if we're reusing a delta, so we
- // ensure it is restored.
- if err := dw.restoreOriginal(target); err != nil {
- return err
- }
-
- if err := dw.restoreOriginal(base); err != nil {
- return err
- }
-
- // If the sizes are radically different, this is a bad pairing.
- if target.Size() < base.Size()>>4 {
- return nil
- }
-
- msz := dw.deltaSizeLimit(
- target.Object.Size(),
- base.Depth,
- target.Depth,
- target.IsDelta(),
- )
-
- // Nearly impossible to fit useful delta.
- if msz <= 8 {
- return nil
- }
-
- // If we have to insert a lot to make this work, find another.
- if base.Size()-target.Size() > msz {
- return nil
- }
-
- if _, ok := indexMap[base.Hash()]; !ok {
- indexMap[base.Hash()] = new(deltaIndex)
- }
-
- // Now we can generate the delta using originals
- delta, err := getDelta(indexMap[base.Hash()], base.Original, target.Original)
- if err != nil {
- return err
- }
-
- // if delta better than target
- if delta.Size() < msz {
- target.SetDelta(base, delta)
- }
-
- return nil
-}
-
-func (dw *deltaSelector) deltaSizeLimit(targetSize int64, baseDepth int,
- targetDepth int, targetDelta bool) int64 {
- if !targetDelta {
- // Any delta should be no more than 50% of the original size
- // (for text files deflate of whole form should shrink 50%).
- n := targetSize >> 1
-
- // Evenly distribute delta size limits over allowed depth.
- // If src is non-delta (depth = 0), delta <= 50% of original.
- // If src is almost at limit (9/10), delta <= 10% of original.
- return n * (maxDepth - int64(baseDepth)) / maxDepth
- }
-
- // With a delta base chosen any new delta must be "better".
- // Retain the distribution described above.
- d := int64(targetDepth)
- n := targetSize
-
- // If target depth is bigger than maxDepth, this delta is not suitable to be used.
- if d >= maxDepth {
- return 0
- }
-
- // If src is whole (depth=0) and base is near limit (depth=9/10)
- // any delta using src can be 10x larger and still be better.
- //
- // If src is near limit (depth=9/10) and base is whole (depth=0)
- // a new delta dependent on src must be 1/10th the size.
- return n * (maxDepth - int64(baseDepth)) / (maxDepth - d)
-}
-
-type byTypeAndSize []*ObjectToPack
-
-func (a byTypeAndSize) Len() int { return len(a) }
-
-func (a byTypeAndSize) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
-
-func (a byTypeAndSize) Less(i, j int) bool {
- if a[i].Type() < a[j].Type() {
- return false
- }
-
- if a[i].Type() > a[j].Type() {
- return true
- }
-
- return a[i].Size() > a[j].Size()
-}
diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/diff_delta.go b/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/diff_delta.go
deleted file mode 100644
index 8898e5830e4..00000000000
--- a/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/diff_delta.go
+++ /dev/null
@@ -1,204 +0,0 @@
-package packfile
-
-import (
- "bytes"
-
- "github.com/go-git/go-git/v5/plumbing"
- "github.com/go-git/go-git/v5/utils/ioutil"
- "github.com/go-git/go-git/v5/utils/sync"
-)
-
-// See https://github.com/jelmer/dulwich/blob/master/dulwich/pack.py and
-// https://github.com/tarruda/node-git-core/blob/master/src/js/delta.js
-// for more info
-
-const (
- // Standard chunk size used to generate fingerprints
- s = 16
-
- // https://github.com/git/git/blob/f7466e94375b3be27f229c78873f0acf8301c0a5/diff-delta.c#L428
- // Max size of a copy operation (64KB).
- maxCopySize = 64 * 1024
-
- // Min size of a copy operation.
- minCopySize = 4
-)
-
-// GetDelta returns an EncodedObject of type OFSDeltaObject. Base and Target object,
-// will be loaded into memory to be able to create the delta object.
-// To generate target again, you will need the obtained object and "base" one.
-// Error will be returned if base or target object cannot be read.
-func GetDelta(base, target plumbing.EncodedObject) (plumbing.EncodedObject, error) {
- return getDelta(new(deltaIndex), base, target)
-}
-
-func getDelta(index *deltaIndex, base, target plumbing.EncodedObject) (o plumbing.EncodedObject, err error) {
- br, err := base.Reader()
- if err != nil {
- return nil, err
- }
-
- defer ioutil.CheckClose(br, &err)
-
- tr, err := target.Reader()
- if err != nil {
- return nil, err
- }
-
- defer ioutil.CheckClose(tr, &err)
-
- bb := sync.GetBytesBuffer()
- defer sync.PutBytesBuffer(bb)
-
- _, err = bb.ReadFrom(br)
- if err != nil {
- return nil, err
- }
-
- tb := sync.GetBytesBuffer()
- defer sync.PutBytesBuffer(tb)
-
- _, err = tb.ReadFrom(tr)
- if err != nil {
- return nil, err
- }
-
- db := diffDelta(index, bb.Bytes(), tb.Bytes())
- delta := &plumbing.MemoryObject{}
- _, err = delta.Write(db)
- if err != nil {
- return nil, err
- }
-
- delta.SetSize(int64(len(db)))
- delta.SetType(plumbing.OFSDeltaObject)
-
- return delta, nil
-}
-
-// DiffDelta returns the delta that transforms src into tgt.
-func DiffDelta(src, tgt []byte) []byte {
- return diffDelta(new(deltaIndex), src, tgt)
-}
-
-func diffDelta(index *deltaIndex, src []byte, tgt []byte) []byte {
- buf := sync.GetBytesBuffer()
- defer sync.PutBytesBuffer(buf)
- buf.Write(deltaEncodeSize(len(src)))
- buf.Write(deltaEncodeSize(len(tgt)))
-
- if len(index.entries) == 0 {
- index.init(src)
- }
-
- ibuf := sync.GetBytesBuffer()
- defer sync.PutBytesBuffer(ibuf)
- for i := 0; i < len(tgt); i++ {
- offset, l := index.findMatch(src, tgt, i)
-
- if l == 0 {
- // couldn't find a match, just write the current byte and continue
- ibuf.WriteByte(tgt[i])
- } else if l < 0 {
- // src is less than blksz, copy the rest of the target to avoid
- // calls to findMatch
- for ; i < len(tgt); i++ {
- ibuf.WriteByte(tgt[i])
- }
- } else if l < s {
- // remaining target is less than blksz, copy what's left of it
- // and avoid calls to findMatch
- for j := i; j < i+l; j++ {
- ibuf.WriteByte(tgt[j])
- }
- i += l - 1
- } else {
- encodeInsertOperation(ibuf, buf)
-
- rl := l
- aOffset := offset
- for rl > 0 {
- if rl < maxCopySize {
- buf.Write(encodeCopyOperation(aOffset, rl))
- break
- }
-
- buf.Write(encodeCopyOperation(aOffset, maxCopySize))
- rl -= maxCopySize
- aOffset += maxCopySize
- }
-
- i += l - 1
- }
- }
-
- encodeInsertOperation(ibuf, buf)
-
- // buf.Bytes() is only valid until the next modifying operation on the buffer. Copy it.
- return append([]byte{}, buf.Bytes()...)
-}
-
-func encodeInsertOperation(ibuf, buf *bytes.Buffer) {
- if ibuf.Len() == 0 {
- return
- }
-
- b := ibuf.Bytes()
- s := ibuf.Len()
- o := 0
- for {
- if s <= 127 {
- break
- }
- buf.WriteByte(byte(127))
- buf.Write(b[o : o+127])
- s -= 127
- o += 127
- }
- buf.WriteByte(byte(s))
- buf.Write(b[o : o+s])
-
- ibuf.Reset()
-}
-
-func deltaEncodeSize(size int) []byte {
- var ret []byte
- c := size & 0x7f
- size >>= 7
- for {
- if size == 0 {
- break
- }
-
- ret = append(ret, byte(c|0x80))
- c = size & 0x7f
- size >>= 7
- }
- ret = append(ret, byte(c))
-
- return ret
-}
-
-func encodeCopyOperation(offset, length int) []byte {
- code := 0x80
- var opcodes []byte
-
- var i uint
- for i = 0; i < 4; i++ {
- f := 0xff << (i * 8)
- if offset&f != 0 {
- opcodes = append(opcodes, byte(offset&f>>(i*8)))
- code |= 0x01 << i
- }
- }
-
- for i = 0; i < 3; i++ {
- f := 0xff << (i * 8)
- if length&f != 0 {
- opcodes = append(opcodes, byte(length&f>>(i*8)))
- code |= 0x10 << i
- }
- }
-
- return append([]byte{byte(code)}, opcodes...)
-}
diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/doc.go b/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/doc.go
deleted file mode 100644
index 2882a7f3782..00000000000
--- a/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/doc.go
+++ /dev/null
@@ -1,39 +0,0 @@
-// Package packfile implements encoding and decoding of packfile format.
-//
-// == pack-*.pack files have the following format:
-//
-// - A header appears at the beginning and consists of the following:
-//
-// 4-byte signature:
-// The signature is: {'P', 'A', 'C', 'K'}
-//
-// 4-byte version number (network byte order):
-// GIT currently accepts version number 2 or 3 but
-// generates version 2 only.
-//
-// 4-byte number of objects contained in the pack (network byte order)
-//
-// Observation: we cannot have more than 4G versions ;-) and
-// more than 4G objects in a pack.
-//
-// - The header is followed by number of object entries, each of
-// which looks like this:
-//
-// (undeltified representation)
-// n-byte type and length (3-bit type, (n-1)*7+4-bit length)
-// compressed data
-//
-// (deltified representation)
-// n-byte type and length (3-bit type, (n-1)*7+4-bit length)
-// 20-byte base object name
-// compressed delta data
-//
-// Observation: length of each object is encoded in a variable
-// length format and is not constrained to 32-bit or anything.
-//
-// - The trailer records 20-byte SHA1 checksum of all of the above.
-//
-//
-// Source:
-// https://www.kernel.org/pub/software/scm/git/docs/v1.7.5/technical/pack-protocol.txt
-package packfile
diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/encoder.go b/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/encoder.go
deleted file mode 100644
index 804f5a876b6..00000000000
--- a/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/encoder.go
+++ /dev/null
@@ -1,221 +0,0 @@
-package packfile
-
-import (
- "compress/zlib"
- "fmt"
- "io"
-
- "github.com/go-git/go-git/v5/plumbing"
- "github.com/go-git/go-git/v5/plumbing/hash"
- "github.com/go-git/go-git/v5/plumbing/storer"
- "github.com/go-git/go-git/v5/utils/binary"
- "github.com/go-git/go-git/v5/utils/ioutil"
-)
-
-// Encoder gets the data from the storage and write it into the writer in PACK
-// format
-type Encoder struct {
- selector *deltaSelector
- w *offsetWriter
- zw *zlib.Writer
- hasher plumbing.Hasher
-
- useRefDeltas bool
-}
-
-// NewEncoder creates a new packfile encoder using a specific Writer and
-// EncodedObjectStorer. By default deltas used to generate the packfile will be
-// OFSDeltaObject. To use Reference deltas, set useRefDeltas to true.
-func NewEncoder(w io.Writer, s storer.EncodedObjectStorer, useRefDeltas bool) *Encoder {
- h := plumbing.Hasher{
- Hash: hash.New(hash.CryptoType),
- }
- mw := io.MultiWriter(w, h)
- ow := newOffsetWriter(mw)
- zw := zlib.NewWriter(mw)
- return &Encoder{
- selector: newDeltaSelector(s),
- w: ow,
- zw: zw,
- hasher: h,
- useRefDeltas: useRefDeltas,
- }
-}
-
-// Encode creates a packfile containing all the objects referenced in
-// hashes and writes it to the writer in the Encoder. `packWindow`
-// specifies the size of the sliding window used to compare objects
-// for delta compression; 0 turns off delta compression entirely.
-func (e *Encoder) Encode(
- hashes []plumbing.Hash,
- packWindow uint,
-) (plumbing.Hash, error) {
- objects, err := e.selector.ObjectsToPack(hashes, packWindow)
- if err != nil {
- return plumbing.ZeroHash, err
- }
-
- return e.encode(objects)
-}
-
-func (e *Encoder) encode(objects []*ObjectToPack) (plumbing.Hash, error) {
- if err := e.head(len(objects)); err != nil {
- return plumbing.ZeroHash, err
- }
-
- for _, o := range objects {
- if err := e.entry(o); err != nil {
- return plumbing.ZeroHash, err
- }
- }
-
- return e.footer()
-}
-
-func (e *Encoder) head(numEntries int) error {
- return binary.Write(
- e.w,
- signature,
- int32(VersionSupported),
- int32(numEntries),
- )
-}
-
-func (e *Encoder) entry(o *ObjectToPack) (err error) {
- if o.WantWrite() {
- // A cycle exists in this delta chain. This should only occur if a
- // selected object representation disappeared during writing
- // (for example due to a concurrent repack) and a different base
- // was chosen, forcing a cycle. Select something other than a
- // delta, and write this object.
- e.selector.restoreOriginal(o)
- o.BackToOriginal()
- }
-
- if o.IsWritten() {
- return nil
- }
-
- o.MarkWantWrite()
-
- if err := e.writeBaseIfDelta(o); err != nil {
- return err
- }
-
- // We need to check if we already write that object due a cyclic delta chain
- if o.IsWritten() {
- return nil
- }
-
- o.Offset = e.w.Offset()
-
- if o.IsDelta() {
- if err := e.writeDeltaHeader(o); err != nil {
- return err
- }
- } else {
- if err := e.entryHead(o.Type(), o.Size()); err != nil {
- return err
- }
- }
-
- e.zw.Reset(e.w)
-
- defer ioutil.CheckClose(e.zw, &err)
-
- or, err := o.Object.Reader()
- if err != nil {
- return err
- }
-
- defer ioutil.CheckClose(or, &err)
-
- _, err = io.Copy(e.zw, or)
- return err
-}
-
-func (e *Encoder) writeBaseIfDelta(o *ObjectToPack) error {
- if o.IsDelta() && !o.Base.IsWritten() {
- // We must write base first
- return e.entry(o.Base)
- }
-
- return nil
-}
-
-func (e *Encoder) writeDeltaHeader(o *ObjectToPack) error {
- // Write offset deltas by default
- t := plumbing.OFSDeltaObject
- if e.useRefDeltas {
- t = plumbing.REFDeltaObject
- }
-
- if err := e.entryHead(t, o.Object.Size()); err != nil {
- return err
- }
-
- if e.useRefDeltas {
- return e.writeRefDeltaHeader(o.Base.Hash())
- } else {
- return e.writeOfsDeltaHeader(o)
- }
-}
-
-func (e *Encoder) writeRefDeltaHeader(base plumbing.Hash) error {
- return binary.Write(e.w, base)
-}
-
-func (e *Encoder) writeOfsDeltaHeader(o *ObjectToPack) error {
- // for OFS_DELTA, offset of the base is interpreted as negative offset
- // relative to the type-byte of the header of the ofs-delta entry.
- relativeOffset := o.Offset - o.Base.Offset
- if relativeOffset <= 0 {
- return fmt.Errorf("bad offset for OFS_DELTA entry: %d", relativeOffset)
- }
-
- return binary.WriteVariableWidthInt(e.w, relativeOffset)
-}
-
-func (e *Encoder) entryHead(typeNum plumbing.ObjectType, size int64) error {
- t := int64(typeNum)
- header := []byte{}
- c := (t << firstLengthBits) | (size & maskFirstLength)
- size >>= firstLengthBits
- for {
- if size == 0 {
- break
- }
- header = append(header, byte(c|maskContinue))
- c = size & int64(maskLength)
- size >>= lengthBits
- }
-
- header = append(header, byte(c))
- _, err := e.w.Write(header)
-
- return err
-}
-
-func (e *Encoder) footer() (plumbing.Hash, error) {
- h := e.hasher.Sum()
- return h, binary.Write(e.w, h)
-}
-
-type offsetWriter struct {
- w io.Writer
- offset int64
-}
-
-func newOffsetWriter(w io.Writer) *offsetWriter {
- return &offsetWriter{w: w}
-}
-
-func (ow *offsetWriter) Write(p []byte) (n int, err error) {
- n, err = ow.w.Write(p)
- ow.offset += int64(n)
- return n, err
-}
-
-func (ow *offsetWriter) Offset() int64 {
- return ow.offset
-}
diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/error.go b/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/error.go
deleted file mode 100644
index c0b91633131..00000000000
--- a/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/error.go
+++ /dev/null
@@ -1,30 +0,0 @@
-package packfile
-
-import "fmt"
-
-// Error specifies errors returned during packfile parsing.
-type Error struct {
- reason, details string
-}
-
-// NewError returns a new error.
-func NewError(reason string) *Error {
- return &Error{reason: reason}
-}
-
-// Error returns a text representation of the error.
-func (e *Error) Error() string {
- if e.details == "" {
- return e.reason
- }
-
- return fmt.Sprintf("%s: %s", e.reason, e.details)
-}
-
-// AddDetails adds details to an error, with additional text.
-func (e *Error) AddDetails(format string, args ...interface{}) *Error {
- return &Error{
- reason: e.reason,
- details: fmt.Sprintf(format, args...),
- }
-}
diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/fsobject.go b/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/fsobject.go
deleted file mode 100644
index 238339daf89..00000000000
--- a/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/fsobject.go
+++ /dev/null
@@ -1,119 +0,0 @@
-package packfile
-
-import (
- "io"
-
- billy "github.com/go-git/go-billy/v5"
- "github.com/go-git/go-git/v5/plumbing"
- "github.com/go-git/go-git/v5/plumbing/cache"
- "github.com/go-git/go-git/v5/plumbing/format/idxfile"
- "github.com/go-git/go-git/v5/utils/ioutil"
-)
-
-// FSObject is an object from the packfile on the filesystem.
-type FSObject struct {
- hash plumbing.Hash
- offset int64
- size int64
- typ plumbing.ObjectType
- index idxfile.Index
- fs billy.Filesystem
- path string
- cache cache.Object
- largeObjectThreshold int64
-}
-
-// NewFSObject creates a new filesystem object.
-func NewFSObject(
- hash plumbing.Hash,
- finalType plumbing.ObjectType,
- offset int64,
- contentSize int64,
- index idxfile.Index,
- fs billy.Filesystem,
- path string,
- cache cache.Object,
- largeObjectThreshold int64,
-) *FSObject {
- return &FSObject{
- hash: hash,
- offset: offset,
- size: contentSize,
- typ: finalType,
- index: index,
- fs: fs,
- path: path,
- cache: cache,
- largeObjectThreshold: largeObjectThreshold,
- }
-}
-
-// Reader implements the plumbing.EncodedObject interface.
-func (o *FSObject) Reader() (io.ReadCloser, error) {
- obj, ok := o.cache.Get(o.hash)
- if ok && obj != o {
- reader, err := obj.Reader()
- if err != nil {
- return nil, err
- }
-
- return reader, nil
- }
-
- f, err := o.fs.Open(o.path)
- if err != nil {
- return nil, err
- }
-
- p := NewPackfileWithCache(o.index, nil, f, o.cache, o.largeObjectThreshold)
- if o.largeObjectThreshold > 0 && o.size > o.largeObjectThreshold {
- // We have a big object
- h, err := p.objectHeaderAtOffset(o.offset)
- if err != nil {
- return nil, err
- }
-
- r, err := p.getReaderDirect(h)
- if err != nil {
- _ = f.Close()
- return nil, err
- }
- return ioutil.NewReadCloserWithCloser(r, f.Close), nil
- }
- r, err := p.getObjectContent(o.offset)
- if err != nil {
- _ = f.Close()
- return nil, err
- }
-
- if err := f.Close(); err != nil {
- return nil, err
- }
-
- return r, nil
-}
-
-// SetSize implements the plumbing.EncodedObject interface. This method
-// is a noop.
-func (o *FSObject) SetSize(int64) {}
-
-// SetType implements the plumbing.EncodedObject interface. This method is
-// a noop.
-func (o *FSObject) SetType(plumbing.ObjectType) {}
-
-// Hash implements the plumbing.EncodedObject interface.
-func (o *FSObject) Hash() plumbing.Hash { return o.hash }
-
-// Size implements the plumbing.EncodedObject interface.
-func (o *FSObject) Size() int64 { return o.size }
-
-// Type implements the plumbing.EncodedObject interface.
-func (o *FSObject) Type() plumbing.ObjectType {
- return o.typ
-}
-
-// Writer implements the plumbing.EncodedObject interface. This method always
-// returns a nil writer.
-func (o *FSObject) Writer() (io.WriteCloser, error) {
- return nil, nil
-}
diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/object_pack.go b/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/object_pack.go
deleted file mode 100644
index 8ce29ef8ba0..00000000000
--- a/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/object_pack.go
+++ /dev/null
@@ -1,164 +0,0 @@
-package packfile
-
-import (
- "github.com/go-git/go-git/v5/plumbing"
-)
-
-// ObjectToPack is a representation of an object that is going to be into a
-// pack file.
-type ObjectToPack struct {
- // The main object to pack, it could be any object, including deltas
- Object plumbing.EncodedObject
- // Base is the object that a delta is based on (it could be also another delta).
- // If the main object is not a delta, Base will be null
- Base *ObjectToPack
- // Original is the object that we can generate applying the delta to
- // Base, or the same object as Object in the case of a non-delta
- // object.
- Original plumbing.EncodedObject
- // Depth is the amount of deltas needed to resolve to obtain Original
- // (delta based on delta based on ...)
- Depth int
-
- // offset in pack when object has been already written, or 0 if it
- // has not been written yet
- Offset int64
-
- // Information from the original object
- resolvedOriginal bool
- originalType plumbing.ObjectType
- originalSize int64
- originalHash plumbing.Hash
-}
-
-// newObjectToPack creates a correct ObjectToPack based on a non-delta object
-func newObjectToPack(o plumbing.EncodedObject) *ObjectToPack {
- return &ObjectToPack{
- Object: o,
- Original: o,
- }
-}
-
-// newDeltaObjectToPack creates a correct ObjectToPack for a delta object, based on
-// his base (could be another delta), the delta target (in this case called original),
-// and the delta Object itself
-func newDeltaObjectToPack(base *ObjectToPack, original, delta plumbing.EncodedObject) *ObjectToPack {
- return &ObjectToPack{
- Object: delta,
- Base: base,
- Original: original,
- Depth: base.Depth + 1,
- }
-}
-
-// BackToOriginal converts that ObjectToPack to a non-deltified object if it was one
-func (o *ObjectToPack) BackToOriginal() {
- if o.IsDelta() && o.Original != nil {
- o.Object = o.Original
- o.Base = nil
- o.Depth = 0
- }
-}
-
-// IsWritten returns if that ObjectToPack was
-// already written into the packfile or not
-func (o *ObjectToPack) IsWritten() bool {
- return o.Offset > 1
-}
-
-// MarkWantWrite marks this ObjectToPack as WantWrite
-// to avoid delta chain loops
-func (o *ObjectToPack) MarkWantWrite() {
- o.Offset = 1
-}
-
-// WantWrite checks if this ObjectToPack was marked as WantWrite before
-func (o *ObjectToPack) WantWrite() bool {
- return o.Offset == 1
-}
-
-// SetOriginal sets both Original and saves size, type and hash. If object
-// is nil Original is set but previous resolved values are kept
-func (o *ObjectToPack) SetOriginal(obj plumbing.EncodedObject) {
- o.Original = obj
- o.SaveOriginalMetadata()
-}
-
-// SaveOriginalMetadata saves size, type and hash of Original object
-func (o *ObjectToPack) SaveOriginalMetadata() {
- if o.Original != nil {
- o.originalSize = o.Original.Size()
- o.originalType = o.Original.Type()
- o.originalHash = o.Original.Hash()
- o.resolvedOriginal = true
- }
-}
-
-// CleanOriginal sets Original to nil
-func (o *ObjectToPack) CleanOriginal() {
- o.Original = nil
-}
-
-func (o *ObjectToPack) Type() plumbing.ObjectType {
- if o.Original != nil {
- return o.Original.Type()
- }
-
- if o.resolvedOriginal {
- return o.originalType
- }
-
- if o.Base != nil {
- return o.Base.Type()
- }
-
- if o.Object != nil {
- return o.Object.Type()
- }
-
- panic("cannot get type")
-}
-
-func (o *ObjectToPack) Hash() plumbing.Hash {
- if o.Original != nil {
- return o.Original.Hash()
- }
-
- if o.resolvedOriginal {
- return o.originalHash
- }
-
- do, ok := o.Object.(plumbing.DeltaObject)
- if ok {
- return do.ActualHash()
- }
-
- panic("cannot get hash")
-}
-
-func (o *ObjectToPack) Size() int64 {
- if o.Original != nil {
- return o.Original.Size()
- }
-
- if o.resolvedOriginal {
- return o.originalSize
- }
-
- do, ok := o.Object.(plumbing.DeltaObject)
- if ok {
- return do.ActualSize()
- }
-
- panic("cannot get ObjectToPack size")
-}
-
-func (o *ObjectToPack) IsDelta() bool {
- return o.Base != nil
-}
-
-func (o *ObjectToPack) SetDelta(base *ObjectToPack, delta plumbing.EncodedObject) {
- o.Object = delta
- o.Base = base
- o.Depth = base.Depth + 1
-}
diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/packfile.go b/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/packfile.go
deleted file mode 100644
index 68527022578..00000000000
--- a/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/packfile.go
+++ /dev/null
@@ -1,641 +0,0 @@
-package packfile
-
-import (
- "bytes"
- "fmt"
- "io"
- "os"
-
- billy "github.com/go-git/go-billy/v5"
- "github.com/go-git/go-git/v5/plumbing"
- "github.com/go-git/go-git/v5/plumbing/cache"
- "github.com/go-git/go-git/v5/plumbing/format/idxfile"
- "github.com/go-git/go-git/v5/plumbing/storer"
- "github.com/go-git/go-git/v5/utils/ioutil"
- "github.com/go-git/go-git/v5/utils/sync"
-)
-
-var (
- // ErrInvalidObject is returned by Decode when an invalid object is
- // found in the packfile.
- ErrInvalidObject = NewError("invalid git object")
- // ErrZLib is returned by Decode when there was an error unzipping
- // the packfile contents.
- ErrZLib = NewError("zlib reading error")
-)
-
-// When reading small objects from packfile it is beneficial to do so at
-// once to exploit the buffered I/O. In many cases the objects are so small
-// that they were already loaded to memory when the object header was
-// loaded from the packfile. Wrapping in FSObject would cause this buffered
-// data to be thrown away and then re-read later, with the additional
-// seeking causing reloads from disk. Objects smaller than this threshold
-// are now always read into memory and stored in cache instead of being
-// wrapped in FSObject.
-const smallObjectThreshold = 16 * 1024
-
-// Packfile allows retrieving information from inside a packfile.
-type Packfile struct {
- idxfile.Index
- fs billy.Filesystem
- file billy.File
- s *Scanner
- deltaBaseCache cache.Object
- offsetToType map[int64]plumbing.ObjectType
- largeObjectThreshold int64
-}
-
-// NewPackfileWithCache creates a new Packfile with the given object cache.
-// If the filesystem is provided, the packfile will return FSObjects, otherwise
-// it will return MemoryObjects.
-func NewPackfileWithCache(
- index idxfile.Index,
- fs billy.Filesystem,
- file billy.File,
- cache cache.Object,
- largeObjectThreshold int64,
-) *Packfile {
- s := NewScanner(file)
- return &Packfile{
- index,
- fs,
- file,
- s,
- cache,
- make(map[int64]plumbing.ObjectType),
- largeObjectThreshold,
- }
-}
-
-// NewPackfile returns a packfile representation for the given packfile file
-// and packfile idx.
-// If the filesystem is provided, the packfile will return FSObjects, otherwise
-// it will return MemoryObjects.
-func NewPackfile(index idxfile.Index, fs billy.Filesystem, file billy.File, largeObjectThreshold int64) *Packfile {
- return NewPackfileWithCache(index, fs, file, cache.NewObjectLRUDefault(), largeObjectThreshold)
-}
-
-// Get retrieves the encoded object in the packfile with the given hash.
-func (p *Packfile) Get(h plumbing.Hash) (plumbing.EncodedObject, error) {
- offset, err := p.FindOffset(h)
- if err != nil {
- return nil, err
- }
-
- return p.objectAtOffset(offset, h)
-}
-
-// GetByOffset retrieves the encoded object from the packfile at the given
-// offset.
-func (p *Packfile) GetByOffset(o int64) (plumbing.EncodedObject, error) {
- hash, err := p.FindHash(o)
- if err != nil {
- return nil, err
- }
-
- return p.objectAtOffset(o, hash)
-}
-
-// GetSizeByOffset retrieves the size of the encoded object from the
-// packfile with the given offset.
-func (p *Packfile) GetSizeByOffset(o int64) (size int64, err error) {
- if _, err := p.s.SeekFromStart(o); err != nil {
- if err == io.EOF || isInvalid(err) {
- return 0, plumbing.ErrObjectNotFound
- }
-
- return 0, err
- }
-
- h, err := p.nextObjectHeader()
- if err != nil {
- return 0, err
- }
- return p.getObjectSize(h)
-}
-
-func (p *Packfile) objectHeaderAtOffset(offset int64) (*ObjectHeader, error) {
- h, err := p.s.SeekObjectHeader(offset)
- p.s.pendingObject = nil
- return h, err
-}
-
-func (p *Packfile) nextObjectHeader() (*ObjectHeader, error) {
- h, err := p.s.NextObjectHeader()
- p.s.pendingObject = nil
- return h, err
-}
-
-func (p *Packfile) getDeltaObjectSize(buf *bytes.Buffer) int64 {
- delta := buf.Bytes()
- _, delta = decodeLEB128(delta) // skip src size
- sz, _ := decodeLEB128(delta)
- return int64(sz)
-}
-
-func (p *Packfile) getObjectSize(h *ObjectHeader) (int64, error) {
- switch h.Type {
- case plumbing.CommitObject, plumbing.TreeObject, plumbing.BlobObject, plumbing.TagObject:
- return h.Length, nil
- case plumbing.REFDeltaObject, plumbing.OFSDeltaObject:
- buf := sync.GetBytesBuffer()
- defer sync.PutBytesBuffer(buf)
-
- if _, _, err := p.s.NextObject(buf); err != nil {
- return 0, err
- }
-
- return p.getDeltaObjectSize(buf), nil
- default:
- return 0, ErrInvalidObject.AddDetails("type %q", h.Type)
- }
-}
-
-func (p *Packfile) getObjectType(h *ObjectHeader) (typ plumbing.ObjectType, err error) {
- switch h.Type {
- case plumbing.CommitObject, plumbing.TreeObject, plumbing.BlobObject, plumbing.TagObject:
- return h.Type, nil
- case plumbing.REFDeltaObject, plumbing.OFSDeltaObject:
- var offset int64
- if h.Type == plumbing.REFDeltaObject {
- offset, err = p.FindOffset(h.Reference)
- if err != nil {
- return
- }
- } else {
- offset = h.OffsetReference
- }
-
- if baseType, ok := p.offsetToType[offset]; ok {
- typ = baseType
- } else {
- h, err = p.objectHeaderAtOffset(offset)
- if err != nil {
- return
- }
-
- typ, err = p.getObjectType(h)
- if err != nil {
- return
- }
- }
- default:
- err = ErrInvalidObject.AddDetails("type %q", h.Type)
- }
-
- p.offsetToType[h.Offset] = typ
-
- return
-}
-
-func (p *Packfile) objectAtOffset(offset int64, hash plumbing.Hash) (plumbing.EncodedObject, error) {
- if obj, ok := p.cacheGet(hash); ok {
- return obj, nil
- }
-
- h, err := p.objectHeaderAtOffset(offset)
- if err != nil {
- if err == io.EOF || isInvalid(err) {
- return nil, plumbing.ErrObjectNotFound
- }
- return nil, err
- }
-
- return p.getNextObject(h, hash)
-}
-
-func (p *Packfile) getNextObject(h *ObjectHeader, hash plumbing.Hash) (plumbing.EncodedObject, error) {
- var err error
-
- // If we have no filesystem, we will return a MemoryObject instead
- // of an FSObject.
- if p.fs == nil {
- return p.getNextMemoryObject(h)
- }
-
- // If the object is small enough then read it completely into memory now since
- // it is already read from disk into buffer anyway. For delta objects we want
- // to perform the optimization too, but we have to be careful about applying
- // small deltas on big objects.
- var size int64
- if h.Length <= smallObjectThreshold {
- if h.Type != plumbing.OFSDeltaObject && h.Type != plumbing.REFDeltaObject {
- return p.getNextMemoryObject(h)
- }
-
- // For delta objects we read the delta data and apply the small object
- // optimization only if the expanded version of the object still meets
- // the small object threshold condition.
- buf := sync.GetBytesBuffer()
- defer sync.PutBytesBuffer(buf)
-
- if _, _, err := p.s.NextObject(buf); err != nil {
- return nil, err
- }
-
- size = p.getDeltaObjectSize(buf)
- if size <= smallObjectThreshold {
- var obj = new(plumbing.MemoryObject)
- obj.SetSize(size)
- if h.Type == plumbing.REFDeltaObject {
- err = p.fillREFDeltaObjectContentWithBuffer(obj, h.Reference, buf)
- } else {
- err = p.fillOFSDeltaObjectContentWithBuffer(obj, h.OffsetReference, buf)
- }
- return obj, err
- }
- } else {
- size, err = p.getObjectSize(h)
- if err != nil {
- return nil, err
- }
- }
-
- typ, err := p.getObjectType(h)
- if err != nil {
- return nil, err
- }
-
- p.offsetToType[h.Offset] = typ
-
- return NewFSObject(
- hash,
- typ,
- h.Offset,
- size,
- p.Index,
- p.fs,
- p.file.Name(),
- p.deltaBaseCache,
- p.largeObjectThreshold,
- ), nil
-}
-
-func (p *Packfile) getObjectContent(offset int64) (io.ReadCloser, error) {
- h, err := p.objectHeaderAtOffset(offset)
- if err != nil {
- return nil, err
- }
-
- // getObjectContent is called from FSObject, so we have to explicitly
- // get memory object here to avoid recursive cycle
- obj, err := p.getNextMemoryObject(h)
- if err != nil {
- return nil, err
- }
-
- return obj.Reader()
-}
-
-func asyncReader(p *Packfile) (io.ReadCloser, error) {
- reader := ioutil.NewReaderUsingReaderAt(p.file, p.s.r.offset)
- zr, err := sync.GetZlibReader(reader)
- if err != nil {
- return nil, fmt.Errorf("zlib reset error: %s", err)
- }
-
- return ioutil.NewReadCloserWithCloser(zr.Reader, func() error {
- sync.PutZlibReader(zr)
- return nil
- }), nil
-
-}
-
-func (p *Packfile) getReaderDirect(h *ObjectHeader) (io.ReadCloser, error) {
- switch h.Type {
- case plumbing.CommitObject, plumbing.TreeObject, plumbing.BlobObject, plumbing.TagObject:
- return asyncReader(p)
- case plumbing.REFDeltaObject:
- deltaRc, err := asyncReader(p)
- if err != nil {
- return nil, err
- }
- r, err := p.readREFDeltaObjectContent(h, deltaRc)
- if err != nil {
- return nil, err
- }
- return r, nil
- case plumbing.OFSDeltaObject:
- deltaRc, err := asyncReader(p)
- if err != nil {
- return nil, err
- }
- r, err := p.readOFSDeltaObjectContent(h, deltaRc)
- if err != nil {
- return nil, err
- }
- return r, nil
- default:
- return nil, ErrInvalidObject.AddDetails("type %q", h.Type)
- }
-}
-
-func (p *Packfile) getNextMemoryObject(h *ObjectHeader) (plumbing.EncodedObject, error) {
- var obj = new(plumbing.MemoryObject)
- obj.SetSize(h.Length)
- obj.SetType(h.Type)
-
- var err error
- switch h.Type {
- case plumbing.CommitObject, plumbing.TreeObject, plumbing.BlobObject, plumbing.TagObject:
- err = p.fillRegularObjectContent(obj)
- case plumbing.REFDeltaObject:
- err = p.fillREFDeltaObjectContent(obj, h.Reference)
- case plumbing.OFSDeltaObject:
- err = p.fillOFSDeltaObjectContent(obj, h.OffsetReference)
- default:
- err = ErrInvalidObject.AddDetails("type %q", h.Type)
- }
-
- if err != nil {
- return nil, err
- }
-
- p.offsetToType[h.Offset] = obj.Type()
-
- return obj, nil
-}
-
-func (p *Packfile) fillRegularObjectContent(obj plumbing.EncodedObject) (err error) {
- w, err := obj.Writer()
- if err != nil {
- return err
- }
-
- defer ioutil.CheckClose(w, &err)
-
- _, _, err = p.s.NextObject(w)
- p.cachePut(obj)
-
- return err
-}
-
-func (p *Packfile) fillREFDeltaObjectContent(obj plumbing.EncodedObject, ref plumbing.Hash) error {
- buf := sync.GetBytesBuffer()
- defer sync.PutBytesBuffer(buf)
-
- _, _, err := p.s.NextObject(buf)
- if err != nil {
- return err
- }
-
- return p.fillREFDeltaObjectContentWithBuffer(obj, ref, buf)
-}
-
-func (p *Packfile) readREFDeltaObjectContent(h *ObjectHeader, deltaRC io.Reader) (io.ReadCloser, error) {
- var err error
-
- base, ok := p.cacheGet(h.Reference)
- if !ok {
- base, err = p.Get(h.Reference)
- if err != nil {
- return nil, err
- }
- }
-
- return ReaderFromDelta(base, deltaRC)
-}
-
-func (p *Packfile) fillREFDeltaObjectContentWithBuffer(obj plumbing.EncodedObject, ref plumbing.Hash, buf *bytes.Buffer) error {
- var err error
-
- base, ok := p.cacheGet(ref)
- if !ok {
- base, err = p.Get(ref)
- if err != nil {
- return err
- }
- }
-
- obj.SetType(base.Type())
- err = ApplyDelta(obj, base, buf.Bytes())
- p.cachePut(obj)
-
- return err
-}
-
-func (p *Packfile) fillOFSDeltaObjectContent(obj plumbing.EncodedObject, offset int64) error {
- buf := sync.GetBytesBuffer()
- defer sync.PutBytesBuffer(buf)
-
- _, _, err := p.s.NextObject(buf)
- if err != nil {
- return err
- }
-
- return p.fillOFSDeltaObjectContentWithBuffer(obj, offset, buf)
-}
-
-func (p *Packfile) readOFSDeltaObjectContent(h *ObjectHeader, deltaRC io.Reader) (io.ReadCloser, error) {
- hash, err := p.FindHash(h.OffsetReference)
- if err != nil {
- return nil, err
- }
-
- base, err := p.objectAtOffset(h.OffsetReference, hash)
- if err != nil {
- return nil, err
- }
-
- return ReaderFromDelta(base, deltaRC)
-}
-
-func (p *Packfile) fillOFSDeltaObjectContentWithBuffer(obj plumbing.EncodedObject, offset int64, buf *bytes.Buffer) error {
- hash, err := p.FindHash(offset)
- if err != nil {
- return err
- }
-
- base, err := p.objectAtOffset(offset, hash)
- if err != nil {
- return err
- }
-
- obj.SetType(base.Type())
- err = ApplyDelta(obj, base, buf.Bytes())
- p.cachePut(obj)
-
- return err
-}
-
-func (p *Packfile) cacheGet(h plumbing.Hash) (plumbing.EncodedObject, bool) {
- if p.deltaBaseCache == nil {
- return nil, false
- }
-
- return p.deltaBaseCache.Get(h)
-}
-
-func (p *Packfile) cachePut(obj plumbing.EncodedObject) {
- if p.deltaBaseCache == nil {
- return
- }
-
- p.deltaBaseCache.Put(obj)
-}
-
-// GetAll returns an iterator with all encoded objects in the packfile.
-// The iterator returned is not thread-safe, it should be used in the same
-// thread as the Packfile instance.
-func (p *Packfile) GetAll() (storer.EncodedObjectIter, error) {
- return p.GetByType(plumbing.AnyObject)
-}
-
-// GetByType returns all the objects of the given type.
-func (p *Packfile) GetByType(typ plumbing.ObjectType) (storer.EncodedObjectIter, error) {
- switch typ {
- case plumbing.AnyObject,
- plumbing.BlobObject,
- plumbing.TreeObject,
- plumbing.CommitObject,
- plumbing.TagObject:
- entries, err := p.EntriesByOffset()
- if err != nil {
- return nil, err
- }
-
- return &objectIter{
- // Easiest way to provide an object decoder is just to pass a Packfile
- // instance. To not mess with the seeks, it's a new instance with a
- // different scanner but the same cache and offset to hash map for
- // reusing as much cache as possible.
- p: p,
- iter: entries,
- typ: typ,
- }, nil
- default:
- return nil, plumbing.ErrInvalidType
- }
-}
-
-// ID returns the ID of the packfile, which is the checksum at the end of it.
-func (p *Packfile) ID() (plumbing.Hash, error) {
- prev, err := p.file.Seek(-20, io.SeekEnd)
- if err != nil {
- return plumbing.ZeroHash, err
- }
-
- var hash plumbing.Hash
- if _, err := io.ReadFull(p.file, hash[:]); err != nil {
- return plumbing.ZeroHash, err
- }
-
- if _, err := p.file.Seek(prev, io.SeekStart); err != nil {
- return plumbing.ZeroHash, err
- }
-
- return hash, nil
-}
-
-// Scanner returns the packfile's Scanner
-func (p *Packfile) Scanner() *Scanner {
- return p.s
-}
-
-// Close the packfile and its resources.
-func (p *Packfile) Close() error {
- closer, ok := p.file.(io.Closer)
- if !ok {
- return nil
- }
-
- return closer.Close()
-}
-
-type objectIter struct {
- p *Packfile
- typ plumbing.ObjectType
- iter idxfile.EntryIter
-}
-
-func (i *objectIter) Next() (plumbing.EncodedObject, error) {
- for {
- e, err := i.iter.Next()
- if err != nil {
- return nil, err
- }
-
- if i.typ != plumbing.AnyObject {
- if typ, ok := i.p.offsetToType[int64(e.Offset)]; ok {
- if typ != i.typ {
- continue
- }
- } else if obj, ok := i.p.cacheGet(e.Hash); ok {
- if obj.Type() != i.typ {
- i.p.offsetToType[int64(e.Offset)] = obj.Type()
- continue
- }
- return obj, nil
- } else {
- h, err := i.p.objectHeaderAtOffset(int64(e.Offset))
- if err != nil {
- return nil, err
- }
-
- if h.Type == plumbing.REFDeltaObject || h.Type == plumbing.OFSDeltaObject {
- typ, err := i.p.getObjectType(h)
- if err != nil {
- return nil, err
- }
- if typ != i.typ {
- i.p.offsetToType[int64(e.Offset)] = typ
- continue
- }
- // getObjectType will seek in the file so we cannot use getNextObject safely
- return i.p.objectAtOffset(int64(e.Offset), e.Hash)
- } else {
- if h.Type != i.typ {
- i.p.offsetToType[int64(e.Offset)] = h.Type
- continue
- }
- return i.p.getNextObject(h, e.Hash)
- }
- }
- }
-
- obj, err := i.p.objectAtOffset(int64(e.Offset), e.Hash)
- if err != nil {
- return nil, err
- }
-
- return obj, nil
- }
-}
-
-func (i *objectIter) ForEach(f func(plumbing.EncodedObject) error) error {
- for {
- o, err := i.Next()
- if err != nil {
- if err == io.EOF {
- return nil
- }
- return err
- }
-
- if err := f(o); err != nil {
- return err
- }
- }
-}
-
-func (i *objectIter) Close() {
- i.iter.Close()
-}
-
-// isInvalid checks whether an error is an os.PathError with an os.ErrInvalid
-// error inside. It also checks for the windows error, which is different from
-// os.ErrInvalid.
-func isInvalid(err error) bool {
- pe, ok := err.(*os.PathError)
- if !ok {
- return false
- }
-
- errstr := pe.Err.Error()
- return errstr == errInvalidUnix || errstr == errInvalidWindows
-}
-
-// errInvalidWindows is the Windows equivalent to os.ErrInvalid
-const errInvalidWindows = "The parameter is incorrect."
-
-var errInvalidUnix = os.ErrInvalid.Error()
diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/parser.go b/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/parser.go
deleted file mode 100644
index 62f1d13cb8e..00000000000
--- a/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/parser.go
+++ /dev/null
@@ -1,611 +0,0 @@
-package packfile
-
-import (
- "bytes"
- "errors"
- "fmt"
- "io"
-
- "github.com/go-git/go-git/v5/plumbing"
- "github.com/go-git/go-git/v5/plumbing/cache"
- "github.com/go-git/go-git/v5/plumbing/storer"
- "github.com/go-git/go-git/v5/utils/ioutil"
- "github.com/go-git/go-git/v5/utils/sync"
-)
-
-var (
- // ErrReferenceDeltaNotFound is returned when the reference delta is not
- // found.
- ErrReferenceDeltaNotFound = errors.New("reference delta not found")
-
- // ErrNotSeekableSource is returned when the source for the parser is not
- // seekable and a storage was not provided, so it can't be parsed.
- ErrNotSeekableSource = errors.New("parser source is not seekable and storage was not provided")
-
- // ErrDeltaNotCached is returned when the delta could not be found in cache.
- ErrDeltaNotCached = errors.New("delta could not be found in cache")
-)
-
-// Observer interface is implemented by index encoders.
-type Observer interface {
- // OnHeader is called when a new packfile is opened.
- OnHeader(count uint32) error
- // OnInflatedObjectHeader is called for each object header read.
- OnInflatedObjectHeader(t plumbing.ObjectType, objSize int64, pos int64) error
- // OnInflatedObjectContent is called for each decoded object.
- OnInflatedObjectContent(h plumbing.Hash, pos int64, crc uint32, content []byte) error
- // OnFooter is called when decoding is done.
- OnFooter(h plumbing.Hash) error
-}
-
-// Parser decodes a packfile and calls any observer associated to it. Is used
-// to generate indexes.
-type Parser struct {
- storage storer.EncodedObjectStorer
- scanner *Scanner
- count uint32
- oi []*objectInfo
- oiByHash map[plumbing.Hash]*objectInfo
- oiByOffset map[int64]*objectInfo
- checksum plumbing.Hash
-
- cache *cache.BufferLRU
- // delta content by offset, only used if source is not seekable
- deltas map[int64][]byte
-
- ob []Observer
-}
-
-// NewParser creates a new Parser. The Scanner source must be seekable.
-// If it's not, NewParserWithStorage should be used instead.
-func NewParser(scanner *Scanner, ob ...Observer) (*Parser, error) {
- return NewParserWithStorage(scanner, nil, ob...)
-}
-
-// NewParserWithStorage creates a new Parser. The scanner source must either
-// be seekable or a storage must be provided.
-func NewParserWithStorage(
- scanner *Scanner,
- storage storer.EncodedObjectStorer,
- ob ...Observer,
-) (*Parser, error) {
- if !scanner.IsSeekable && storage == nil {
- return nil, ErrNotSeekableSource
- }
-
- var deltas map[int64][]byte
- if !scanner.IsSeekable {
- deltas = make(map[int64][]byte)
- }
-
- return &Parser{
- storage: storage,
- scanner: scanner,
- ob: ob,
- count: 0,
- cache: cache.NewBufferLRUDefault(),
- deltas: deltas,
- }, nil
-}
-
-func (p *Parser) forEachObserver(f func(o Observer) error) error {
- for _, o := range p.ob {
- if err := f(o); err != nil {
- return err
- }
- }
- return nil
-}
-
-func (p *Parser) onHeader(count uint32) error {
- return p.forEachObserver(func(o Observer) error {
- return o.OnHeader(count)
- })
-}
-
-func (p *Parser) onInflatedObjectHeader(
- t plumbing.ObjectType,
- objSize int64,
- pos int64,
-) error {
- return p.forEachObserver(func(o Observer) error {
- return o.OnInflatedObjectHeader(t, objSize, pos)
- })
-}
-
-func (p *Parser) onInflatedObjectContent(
- h plumbing.Hash,
- pos int64,
- crc uint32,
- content []byte,
-) error {
- return p.forEachObserver(func(o Observer) error {
- return o.OnInflatedObjectContent(h, pos, crc, content)
- })
-}
-
-func (p *Parser) onFooter(h plumbing.Hash) error {
- return p.forEachObserver(func(o Observer) error {
- return o.OnFooter(h)
- })
-}
-
-// Parse start decoding phase of the packfile.
-func (p *Parser) Parse() (plumbing.Hash, error) {
- if err := p.init(); err != nil {
- return plumbing.ZeroHash, err
- }
-
- if err := p.indexObjects(); err != nil {
- return plumbing.ZeroHash, err
- }
-
- var err error
- p.checksum, err = p.scanner.Checksum()
- if err != nil && err != io.EOF {
- return plumbing.ZeroHash, err
- }
-
- if err := p.resolveDeltas(); err != nil {
- return plumbing.ZeroHash, err
- }
-
- if err := p.onFooter(p.checksum); err != nil {
- return plumbing.ZeroHash, err
- }
-
- return p.checksum, nil
-}
-
-func (p *Parser) init() error {
- _, c, err := p.scanner.Header()
- if err != nil {
- return err
- }
-
- if err := p.onHeader(c); err != nil {
- return err
- }
-
- p.count = c
- p.oiByHash = make(map[plumbing.Hash]*objectInfo, p.count)
- p.oiByOffset = make(map[int64]*objectInfo, p.count)
- p.oi = make([]*objectInfo, p.count)
-
- return nil
-}
-
-type objectHeaderWriter func(typ plumbing.ObjectType, sz int64) error
-
-type lazyObjectWriter interface {
- // LazyWriter enables an object to be lazily written.
- // It returns:
- // - w: a writer to receive the object's content.
- // - lwh: a func to write the object header.
- // - err: any error from the initial writer creation process.
- //
- // Note that if the object header is not written BEFORE the writer
- // is used, this will result in an invalid object.
- LazyWriter() (w io.WriteCloser, lwh objectHeaderWriter, err error)
-}
-
-func (p *Parser) indexObjects() error {
- buf := sync.GetBytesBuffer()
- defer sync.PutBytesBuffer(buf)
-
- for i := uint32(0); i < p.count; i++ {
- oh, err := p.scanner.NextObjectHeader()
- if err != nil {
- return err
- }
-
- delta := false
- var ota *objectInfo
- switch t := oh.Type; t {
- case plumbing.OFSDeltaObject:
- delta = true
-
- parent, ok := p.oiByOffset[oh.OffsetReference]
- if !ok {
- return plumbing.ErrObjectNotFound
- }
-
- ota = newDeltaObject(oh.Offset, oh.Length, t, parent)
- parent.Children = append(parent.Children, ota)
- case plumbing.REFDeltaObject:
- delta = true
- parent, ok := p.oiByHash[oh.Reference]
- if !ok {
- // can't find referenced object in this pack file
- // this must be a "thin" pack.
- parent = &objectInfo{ //Placeholder parent
- SHA1: oh.Reference,
- ExternalRef: true, // mark as an external reference that must be resolved
- Type: plumbing.AnyObject,
- DiskType: plumbing.AnyObject,
- }
- p.oiByHash[oh.Reference] = parent
- }
- ota = newDeltaObject(oh.Offset, oh.Length, t, parent)
- parent.Children = append(parent.Children, ota)
-
- default:
- ota = newBaseObject(oh.Offset, oh.Length, t)
- }
-
- hasher := plumbing.NewHasher(oh.Type, oh.Length)
- writers := []io.Writer{hasher}
- var obj *plumbing.MemoryObject
-
- // Lazy writing is only available for non-delta objects.
- if p.storage != nil && !delta {
- // When a storage is set and supports lazy writing,
- // use that instead of creating a memory object.
- if low, ok := p.storage.(lazyObjectWriter); ok {
- ow, lwh, err := low.LazyWriter()
- if err != nil {
- return err
- }
-
- if err = lwh(oh.Type, oh.Length); err != nil {
- return err
- }
-
- defer ow.Close()
- writers = append(writers, ow)
- } else {
- obj = new(plumbing.MemoryObject)
- obj.SetSize(oh.Length)
- obj.SetType(oh.Type)
-
- writers = append(writers, obj)
- }
- }
- if delta && !p.scanner.IsSeekable {
- buf.Reset()
- buf.Grow(int(oh.Length))
- writers = append(writers, buf)
- }
-
- mw := io.MultiWriter(writers...)
-
- _, crc, err := p.scanner.NextObject(mw)
- if err != nil {
- return err
- }
-
- // Non delta objects needs to be added into the storage. This
- // is only required when lazy writing is not supported.
- if obj != nil {
- if _, err := p.storage.SetEncodedObject(obj); err != nil {
- return err
- }
- }
-
- ota.Crc32 = crc
- ota.Length = oh.Length
-
- if !delta {
- sha1 := hasher.Sum()
-
- // Move children of placeholder parent into actual parent, in case this
- // was a non-external delta reference.
- if placeholder, ok := p.oiByHash[sha1]; ok {
- ota.Children = placeholder.Children
- for _, c := range ota.Children {
- c.Parent = ota
- }
- }
-
- ota.SHA1 = sha1
- p.oiByHash[ota.SHA1] = ota
- }
-
- if delta && !p.scanner.IsSeekable {
- data := buf.Bytes()
- p.deltas[oh.Offset] = make([]byte, len(data))
- copy(p.deltas[oh.Offset], data)
- }
-
- p.oiByOffset[oh.Offset] = ota
- p.oi[i] = ota
- }
-
- return nil
-}
-
-func (p *Parser) resolveDeltas() error {
- buf := sync.GetBytesBuffer()
- defer sync.PutBytesBuffer(buf)
-
- for _, obj := range p.oi {
- buf.Reset()
- buf.Grow(int(obj.Length))
- err := p.get(obj, buf)
- if err != nil {
- return err
- }
-
- if err := p.onInflatedObjectHeader(obj.Type, obj.Length, obj.Offset); err != nil {
- return err
- }
-
- if err := p.onInflatedObjectContent(obj.SHA1, obj.Offset, obj.Crc32, nil); err != nil {
- return err
- }
-
- if !obj.IsDelta() && len(obj.Children) > 0 {
- // Dealing with an io.ReaderAt object, means we can
- // create it once and reuse across all children.
- r := bytes.NewReader(buf.Bytes())
- for _, child := range obj.Children {
- // Even though we are discarding the output, we still need to read it to
- // so that the scanner can advance to the next object, and the SHA1 can be
- // calculated.
- if err := p.resolveObject(io.Discard, child, r); err != nil {
- return err
- }
- p.resolveExternalRef(child)
- }
-
- // Remove the delta from the cache.
- if obj.DiskType.IsDelta() && !p.scanner.IsSeekable {
- delete(p.deltas, obj.Offset)
- }
- }
- }
-
- return nil
-}
-
-func (p *Parser) resolveExternalRef(o *objectInfo) {
- if ref, ok := p.oiByHash[o.SHA1]; ok && ref.ExternalRef {
- p.oiByHash[o.SHA1] = o
- o.Children = ref.Children
- for _, c := range o.Children {
- c.Parent = o
- }
- }
-}
-
-func (p *Parser) get(o *objectInfo, buf *bytes.Buffer) (err error) {
- if !o.ExternalRef { // skip cache check for placeholder parents
- b, ok := p.cache.Get(o.Offset)
- if ok {
- _, err := buf.Write(b)
- return err
- }
- }
-
- // If it's not on the cache and is not a delta we can try to find it in the
- // storage, if there's one. External refs must enter here.
- if p.storage != nil && !o.Type.IsDelta() {
- var e plumbing.EncodedObject
- e, err = p.storage.EncodedObject(plumbing.AnyObject, o.SHA1)
- if err != nil {
- return err
- }
- o.Type = e.Type()
-
- var r io.ReadCloser
- r, err = e.Reader()
- if err != nil {
- return err
- }
-
- defer ioutil.CheckClose(r, &err)
-
- _, err = buf.ReadFrom(io.LimitReader(r, e.Size()))
- return err
- }
-
- if o.ExternalRef {
- // we were not able to resolve a ref in a thin pack
- return ErrReferenceDeltaNotFound
- }
-
- if o.DiskType.IsDelta() {
- b := sync.GetBytesBuffer()
- defer sync.PutBytesBuffer(b)
- buf.Grow(int(o.Length))
- err := p.get(o.Parent, b)
- if err != nil {
- return err
- }
-
- err = p.resolveObject(buf, o, bytes.NewReader(b.Bytes()))
- if err != nil {
- return err
- }
- } else {
- err := p.readData(buf, o)
- if err != nil {
- return err
- }
- }
-
- // If the scanner is seekable, caching this data into
- // memory by offset seems wasteful.
- // There is a trade-off to be considered here in terms
- // of execution time vs memory consumption.
- //
- // TODO: improve seekable execution time, so that we can
- // skip this cache.
- if len(o.Children) > 0 {
- data := make([]byte, buf.Len())
- copy(data, buf.Bytes())
- p.cache.Put(o.Offset, data)
- }
- return nil
-}
-
-// resolveObject resolves an object from base, using information
-// provided by o.
-//
-// This call has the side-effect of changing field values
-// from the object info o:
-// - Type: OFSDeltaObject may become the target type (e.g. Blob).
-// - Size: The size may be update with the target size.
-// - Hash: Zero hashes will be calculated as part of the object
-// resolution. Hence why this process can't be avoided even when w
-// is an io.Discard.
-//
-// base must be an io.ReaderAt, which is a requirement from
-// patchDeltaStream. The main reason being that reversing an
-// delta object may lead to going backs and forths within base,
-// which is not supported by io.Reader.
-func (p *Parser) resolveObject(
- w io.Writer,
- o *objectInfo,
- base io.ReaderAt,
-) error {
- if !o.DiskType.IsDelta() {
- return nil
- }
- buf := sync.GetBytesBuffer()
- defer sync.PutBytesBuffer(buf)
- err := p.readData(buf, o)
- if err != nil {
- return err
- }
-
- writers := []io.Writer{w}
- var obj *plumbing.MemoryObject
- var lwh objectHeaderWriter
-
- if p.storage != nil {
- if low, ok := p.storage.(lazyObjectWriter); ok {
- ow, wh, err := low.LazyWriter()
- if err != nil {
- return err
- }
- lwh = wh
-
- defer ow.Close()
- writers = append(writers, ow)
- } else {
- obj = new(plumbing.MemoryObject)
- ow, err := obj.Writer()
- if err != nil {
- return err
- }
-
- writers = append(writers, ow)
- }
- }
-
- mw := io.MultiWriter(writers...)
-
- err = applyPatchBase(o, base, buf, mw, lwh)
- if err != nil {
- return err
- }
-
- if obj != nil {
- obj.SetType(o.Type)
- obj.SetSize(o.Size()) // Size here is correct as it was populated by applyPatchBase.
- if _, err := p.storage.SetEncodedObject(obj); err != nil {
- return err
- }
- }
- return err
-}
-
-func (p *Parser) readData(w io.Writer, o *objectInfo) error {
- if !p.scanner.IsSeekable && o.DiskType.IsDelta() {
- data, ok := p.deltas[o.Offset]
- if !ok {
- return ErrDeltaNotCached
- }
- _, err := w.Write(data)
- return err
- }
-
- if _, err := p.scanner.SeekObjectHeader(o.Offset); err != nil {
- return err
- }
-
- if _, _, err := p.scanner.NextObject(w); err != nil {
- return err
- }
- return nil
-}
-
-// applyPatchBase applies the patch to target.
-//
-// Note that ota will be updated based on the description in resolveObject.
-func applyPatchBase(ota *objectInfo, base io.ReaderAt, delta io.Reader, target io.Writer, wh objectHeaderWriter) error {
- if target == nil {
- return fmt.Errorf("cannot apply patch against nil target")
- }
-
- typ := ota.Type
- if ota.SHA1 == plumbing.ZeroHash {
- typ = ota.Parent.Type
- }
-
- sz, h, err := patchDeltaWriter(target, base, delta, typ, wh)
- if err != nil {
- return err
- }
-
- if ota.SHA1 == plumbing.ZeroHash {
- ota.Type = typ
- ota.Length = int64(sz)
- ota.SHA1 = h
- }
-
- return nil
-}
-
-func getSHA1(t plumbing.ObjectType, data []byte) (plumbing.Hash, error) {
- hasher := plumbing.NewHasher(t, int64(len(data)))
- if _, err := hasher.Write(data); err != nil {
- return plumbing.ZeroHash, err
- }
-
- return hasher.Sum(), nil
-}
-
-type objectInfo struct {
- Offset int64
- Length int64
- Type plumbing.ObjectType
- DiskType plumbing.ObjectType
- ExternalRef bool // indicates this is an external reference in a thin pack file
-
- Crc32 uint32
-
- Parent *objectInfo
- Children []*objectInfo
- SHA1 plumbing.Hash
-}
-
-func newBaseObject(offset, length int64, t plumbing.ObjectType) *objectInfo {
- return newDeltaObject(offset, length, t, nil)
-}
-
-func newDeltaObject(
- offset, length int64,
- t plumbing.ObjectType,
- parent *objectInfo,
-) *objectInfo {
- obj := &objectInfo{
- Offset: offset,
- Length: length,
- Type: t,
- DiskType: t,
- Crc32: 0,
- Parent: parent,
- }
-
- return obj
-}
-
-func (o *objectInfo) IsDelta() bool {
- return o.Type.IsDelta()
-}
-
-func (o *objectInfo) Size() int64 {
- return o.Length
-}
diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/patch_delta.go b/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/patch_delta.go
deleted file mode 100644
index 960769c7c81..00000000000
--- a/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/patch_delta.go
+++ /dev/null
@@ -1,526 +0,0 @@
-package packfile
-
-import (
- "bufio"
- "bytes"
- "errors"
- "fmt"
- "io"
- "math"
-
- "github.com/go-git/go-git/v5/plumbing"
- "github.com/go-git/go-git/v5/utils/ioutil"
- "github.com/go-git/go-git/v5/utils/sync"
-)
-
-// See https://github.com/git/git/blob/49fa3dc76179e04b0833542fa52d0f287a4955ac/delta.h
-// https://github.com/git/git/blob/c2c5f6b1e479f2c38e0e01345350620944e3527f/patch-delta.c,
-// and https://github.com/tarruda/node-git-core/blob/master/src/js/delta.js
-// for details about the delta format.
-
-var (
- ErrInvalidDelta = errors.New("invalid delta")
- ErrDeltaCmd = errors.New("wrong delta command")
-)
-
-const (
- payload = 0x7f // 0111 1111
- continuation = 0x80 // 1000 0000
-)
-
-type offset struct {
- mask byte
- shift uint
-}
-
-var offsets = []offset{
- {mask: 0x01, shift: 0},
- {mask: 0x02, shift: 8},
- {mask: 0x04, shift: 16},
- {mask: 0x08, shift: 24},
-}
-
-var sizes = []offset{
- {mask: 0x10, shift: 0},
- {mask: 0x20, shift: 8},
- {mask: 0x40, shift: 16},
-}
-
-// ApplyDelta writes to target the result of applying the modification deltas in delta to base.
-func ApplyDelta(target, base plumbing.EncodedObject, delta []byte) (err error) {
- r, err := base.Reader()
- if err != nil {
- return err
- }
-
- defer ioutil.CheckClose(r, &err)
-
- w, err := target.Writer()
- if err != nil {
- return err
- }
-
- defer ioutil.CheckClose(w, &err)
-
- buf := sync.GetBytesBuffer()
- defer sync.PutBytesBuffer(buf)
- _, err = buf.ReadFrom(r)
- if err != nil {
- return err
- }
- src := buf.Bytes()
-
- dst := sync.GetBytesBuffer()
- defer sync.PutBytesBuffer(dst)
- err = patchDelta(dst, src, delta)
- if err != nil {
- return err
- }
-
- target.SetSize(int64(dst.Len()))
-
- b := sync.GetByteSlice()
- _, err = io.CopyBuffer(w, dst, *b)
- sync.PutByteSlice(b)
- return err
-}
-
-// PatchDelta returns the result of applying the modification deltas in delta to src.
-// An error will be returned if delta is corrupted (ErrDeltaLen) or an action command
-// is not copy from source or copy from delta (ErrDeltaCmd).
-func PatchDelta(src, delta []byte) ([]byte, error) {
- b := &bytes.Buffer{}
- if err := patchDelta(b, src, delta); err != nil {
- return nil, err
- }
- return b.Bytes(), nil
-}
-
-func ReaderFromDelta(base plumbing.EncodedObject, deltaRC io.Reader) (io.ReadCloser, error) {
- deltaBuf := bufio.NewReaderSize(deltaRC, 1024)
- srcSz, err := decodeLEB128ByteReader(deltaBuf)
- if err != nil {
- if err == io.EOF {
- return nil, ErrInvalidDelta
- }
- return nil, err
- }
- if srcSz != uint(base.Size()) {
- return nil, ErrInvalidDelta
- }
-
- targetSz, err := decodeLEB128ByteReader(deltaBuf)
- if err != nil {
- if err == io.EOF {
- return nil, ErrInvalidDelta
- }
- return nil, err
- }
- remainingTargetSz := targetSz
-
- dstRd, dstWr := io.Pipe()
-
- go func() {
- baseRd, err := base.Reader()
- if err != nil {
- _ = dstWr.CloseWithError(ErrInvalidDelta)
- return
- }
- defer baseRd.Close()
-
- baseBuf := bufio.NewReader(baseRd)
- basePos := uint(0)
-
- for {
- cmd, err := deltaBuf.ReadByte()
- if err == io.EOF {
- _ = dstWr.CloseWithError(ErrInvalidDelta)
- return
- }
- if err != nil {
- _ = dstWr.CloseWithError(err)
- return
- }
-
- switch {
- case isCopyFromSrc(cmd):
- offset, err := decodeOffsetByteReader(cmd, deltaBuf)
- if err != nil {
- _ = dstWr.CloseWithError(err)
- return
- }
- sz, err := decodeSizeByteReader(cmd, deltaBuf)
- if err != nil {
- _ = dstWr.CloseWithError(err)
- return
- }
-
- if invalidSize(sz, targetSz) ||
- invalidOffsetSize(offset, sz, srcSz) {
- _ = dstWr.Close()
- return
- }
-
- discard := offset - basePos
- if basePos > offset {
- _ = baseRd.Close()
- baseRd, err = base.Reader()
- if err != nil {
- _ = dstWr.CloseWithError(ErrInvalidDelta)
- return
- }
- baseBuf.Reset(baseRd)
- discard = offset
- }
- for discard > math.MaxInt32 {
- n, err := baseBuf.Discard(math.MaxInt32)
- if err != nil {
- _ = dstWr.CloseWithError(err)
- return
- }
- basePos += uint(n)
- discard -= uint(n)
- }
- for discard > 0 {
- n, err := baseBuf.Discard(int(discard))
- if err != nil {
- _ = dstWr.CloseWithError(err)
- return
- }
- basePos += uint(n)
- discard -= uint(n)
- }
- if _, err := io.Copy(dstWr, io.LimitReader(baseBuf, int64(sz))); err != nil {
- _ = dstWr.CloseWithError(err)
- return
- }
- remainingTargetSz -= sz
- basePos += sz
-
- case isCopyFromDelta(cmd):
- sz := uint(cmd) // cmd is the size itself
- if invalidSize(sz, targetSz) {
- _ = dstWr.CloseWithError(ErrInvalidDelta)
- return
- }
- if _, err := io.Copy(dstWr, io.LimitReader(deltaBuf, int64(sz))); err != nil {
- _ = dstWr.CloseWithError(err)
- return
- }
-
- remainingTargetSz -= sz
-
- default:
- _ = dstWr.CloseWithError(ErrDeltaCmd)
- return
- }
-
- if remainingTargetSz <= 0 {
- _ = dstWr.Close()
- return
- }
- }
- }()
-
- return dstRd, nil
-}
-
-func patchDelta(dst *bytes.Buffer, src, delta []byte) error {
- if len(delta) < minCopySize {
- return ErrInvalidDelta
- }
-
- srcSz, delta := decodeLEB128(delta)
- if srcSz != uint(len(src)) {
- return ErrInvalidDelta
- }
-
- targetSz, delta := decodeLEB128(delta)
- remainingTargetSz := targetSz
-
- var cmd byte
- dst.Grow(int(targetSz))
- for {
- if len(delta) == 0 {
- return ErrInvalidDelta
- }
-
- cmd = delta[0]
- delta = delta[1:]
-
- switch {
- case isCopyFromSrc(cmd):
- var offset, sz uint
- var err error
- offset, delta, err = decodeOffset(cmd, delta)
- if err != nil {
- return err
- }
-
- sz, delta, err = decodeSize(cmd, delta)
- if err != nil {
- return err
- }
-
- if invalidSize(sz, targetSz) ||
- invalidOffsetSize(offset, sz, srcSz) {
- break
- }
- dst.Write(src[offset : offset+sz])
- remainingTargetSz -= sz
-
- case isCopyFromDelta(cmd):
- sz := uint(cmd) // cmd is the size itself
- if invalidSize(sz, targetSz) {
- return ErrInvalidDelta
- }
-
- if uint(len(delta)) < sz {
- return ErrInvalidDelta
- }
-
- dst.Write(delta[0:sz])
- remainingTargetSz -= sz
- delta = delta[sz:]
-
- default:
- return ErrDeltaCmd
- }
-
- if remainingTargetSz <= 0 {
- break
- }
- }
-
- return nil
-}
-
-func patchDeltaWriter(dst io.Writer, base io.ReaderAt, delta io.Reader,
- typ plumbing.ObjectType, writeHeader objectHeaderWriter) (uint, plumbing.Hash, error) {
- deltaBuf := bufio.NewReaderSize(delta, 1024)
- srcSz, err := decodeLEB128ByteReader(deltaBuf)
- if err != nil {
- if err == io.EOF {
- return 0, plumbing.ZeroHash, ErrInvalidDelta
- }
- return 0, plumbing.ZeroHash, err
- }
-
- if r, ok := base.(*bytes.Reader); ok && srcSz != uint(r.Size()) {
- return 0, plumbing.ZeroHash, ErrInvalidDelta
- }
-
- targetSz, err := decodeLEB128ByteReader(deltaBuf)
- if err != nil {
- if err == io.EOF {
- return 0, plumbing.ZeroHash, ErrInvalidDelta
- }
- return 0, plumbing.ZeroHash, err
- }
-
- // If header still needs to be written, caller will provide
- // a LazyObjectWriterHeader. This seems to be the case when
- // dealing with thin-packs.
- if writeHeader != nil {
- err = writeHeader(typ, int64(targetSz))
- if err != nil {
- return 0, plumbing.ZeroHash, fmt.Errorf("could not lazy write header: %w", err)
- }
- }
-
- remainingTargetSz := targetSz
-
- hasher := plumbing.NewHasher(typ, int64(targetSz))
- mw := io.MultiWriter(dst, hasher)
-
- bufp := sync.GetByteSlice()
- defer sync.PutByteSlice(bufp)
-
- sr := io.NewSectionReader(base, int64(0), int64(srcSz))
- // Keep both the io.LimitedReader types, so we can reset N.
- baselr := io.LimitReader(sr, 0).(*io.LimitedReader)
- deltalr := io.LimitReader(deltaBuf, 0).(*io.LimitedReader)
-
- for {
- buf := *bufp
- cmd, err := deltaBuf.ReadByte()
- if err == io.EOF {
- return 0, plumbing.ZeroHash, ErrInvalidDelta
- }
- if err != nil {
- return 0, plumbing.ZeroHash, err
- }
-
- if isCopyFromSrc(cmd) {
- offset, err := decodeOffsetByteReader(cmd, deltaBuf)
- if err != nil {
- return 0, plumbing.ZeroHash, err
- }
- sz, err := decodeSizeByteReader(cmd, deltaBuf)
- if err != nil {
- return 0, plumbing.ZeroHash, err
- }
-
- if invalidSize(sz, targetSz) ||
- invalidOffsetSize(offset, sz, srcSz) {
- return 0, plumbing.ZeroHash, err
- }
-
- if _, err := sr.Seek(int64(offset), io.SeekStart); err != nil {
- return 0, plumbing.ZeroHash, err
- }
- baselr.N = int64(sz)
- if _, err := io.CopyBuffer(mw, baselr, buf); err != nil {
- return 0, plumbing.ZeroHash, err
- }
- remainingTargetSz -= sz
- } else if isCopyFromDelta(cmd) {
- sz := uint(cmd) // cmd is the size itself
- if invalidSize(sz, targetSz) {
- return 0, plumbing.ZeroHash, ErrInvalidDelta
- }
- deltalr.N = int64(sz)
- if _, err := io.CopyBuffer(mw, deltalr, buf); err != nil {
- return 0, plumbing.ZeroHash, err
- }
-
- remainingTargetSz -= sz
- } else {
- return 0, plumbing.ZeroHash, err
- }
- if remainingTargetSz <= 0 {
- break
- }
- }
-
- return targetSz, hasher.Sum(), nil
-}
-
-// Decodes a number encoded as an unsigned LEB128 at the start of some
-// binary data and returns the decoded number and the rest of the
-// stream.
-//
-// This must be called twice on the delta data buffer, first to get the
-// expected source buffer size, and again to get the target buffer size.
-func decodeLEB128(input []byte) (uint, []byte) {
- var num, sz uint
- var b byte
- for {
- b = input[sz]
- num |= (uint(b) & payload) << (sz * 7) // concats 7 bits chunks
- sz++
-
- if uint(b)&continuation == 0 || sz == uint(len(input)) {
- break
- }
- }
-
- return num, input[sz:]
-}
-
-func decodeLEB128ByteReader(input io.ByteReader) (uint, error) {
- var num, sz uint
- for {
- b, err := input.ReadByte()
- if err != nil {
- return 0, err
- }
-
- num |= (uint(b) & payload) << (sz * 7) // concats 7 bits chunks
- sz++
-
- if uint(b)&continuation == 0 {
- break
- }
- }
-
- return num, nil
-}
-
-func isCopyFromSrc(cmd byte) bool {
- return (cmd & continuation) != 0
-}
-
-func isCopyFromDelta(cmd byte) bool {
- return (cmd&continuation) == 0 && cmd != 0
-}
-
-func decodeOffsetByteReader(cmd byte, delta io.ByteReader) (uint, error) {
- var offset uint
- for _, o := range offsets {
- if (cmd & o.mask) != 0 {
- next, err := delta.ReadByte()
- if err != nil {
- return 0, err
- }
- offset |= uint(next) << o.shift
- }
- }
-
- return offset, nil
-}
-
-func decodeOffset(cmd byte, delta []byte) (uint, []byte, error) {
- var offset uint
- for _, o := range offsets {
- if (cmd & o.mask) != 0 {
- if len(delta) == 0 {
- return 0, nil, ErrInvalidDelta
- }
- offset |= uint(delta[0]) << o.shift
- delta = delta[1:]
- }
- }
-
- return offset, delta, nil
-}
-
-func decodeSizeByteReader(cmd byte, delta io.ByteReader) (uint, error) {
- var sz uint
- for _, s := range sizes {
- if (cmd & s.mask) != 0 {
- next, err := delta.ReadByte()
- if err != nil {
- return 0, err
- }
- sz |= uint(next) << s.shift
- }
- }
-
- if sz == 0 {
- sz = maxCopySize
- }
-
- return sz, nil
-}
-
-func decodeSize(cmd byte, delta []byte) (uint, []byte, error) {
- var sz uint
- for _, s := range sizes {
- if (cmd & s.mask) != 0 {
- if len(delta) == 0 {
- return 0, nil, ErrInvalidDelta
- }
- sz |= uint(delta[0]) << s.shift
- delta = delta[1:]
- }
- }
- if sz == 0 {
- sz = maxCopySize
- }
-
- return sz, delta, nil
-}
-
-func invalidSize(sz, targetSz uint) bool {
- return sz > targetSz
-}
-
-func invalidOffsetSize(offset, sz, srcSz uint) bool {
- return sumOverflows(offset, sz) ||
- offset+sz > srcSz
-}
-
-func sumOverflows(a, b uint) bool {
- return a+b < a
-}
diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/scanner.go b/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/scanner.go
deleted file mode 100644
index 730343ee39d..00000000000
--- a/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/scanner.go
+++ /dev/null
@@ -1,474 +0,0 @@
-package packfile
-
-import (
- "bufio"
- "bytes"
- "fmt"
- "hash"
- "hash/crc32"
- "io"
-
- "github.com/go-git/go-git/v5/plumbing"
- "github.com/go-git/go-git/v5/utils/binary"
- "github.com/go-git/go-git/v5/utils/ioutil"
- "github.com/go-git/go-git/v5/utils/sync"
-)
-
-var (
- // ErrEmptyPackfile is returned by ReadHeader when no data is found in the packfile
- ErrEmptyPackfile = NewError("empty packfile")
- // ErrBadSignature is returned by ReadHeader when the signature in the packfile is incorrect.
- ErrBadSignature = NewError("malformed pack file signature")
- // ErrUnsupportedVersion is returned by ReadHeader when the packfile version is
- // different than VersionSupported.
- ErrUnsupportedVersion = NewError("unsupported packfile version")
- // ErrSeekNotSupported returned if seek is not support
- ErrSeekNotSupported = NewError("not seek support")
-)
-
-// ObjectHeader contains the information related to the object, this information
-// is collected from the previous bytes to the content of the object.
-type ObjectHeader struct {
- Type plumbing.ObjectType
- Offset int64
- Length int64
- Reference plumbing.Hash
- OffsetReference int64
-}
-
-type Scanner struct {
- r *scannerReader
- crc hash.Hash32
-
- // pendingObject is used to detect if an object has been read, or still
- // is waiting to be read
- pendingObject *ObjectHeader
- version, objects uint32
-
- // lsSeekable says if this scanner can do Seek or not, to have a Scanner
- // seekable a r implementing io.Seeker is required
- IsSeekable bool
-}
-
-// NewScanner returns a new Scanner based on a reader, if the given reader
-// implements io.ReadSeeker the Scanner will be also Seekable
-func NewScanner(r io.Reader) *Scanner {
- _, ok := r.(io.ReadSeeker)
-
- crc := crc32.NewIEEE()
- return &Scanner{
- r: newScannerReader(r, crc),
- crc: crc,
- IsSeekable: ok,
- }
-}
-
-func (s *Scanner) Reset(r io.Reader) {
- _, ok := r.(io.ReadSeeker)
-
- s.r.Reset(r)
- s.crc.Reset()
- s.IsSeekable = ok
- s.pendingObject = nil
- s.version = 0
- s.objects = 0
-}
-
-// Header reads the whole packfile header (signature, version and object count).
-// It returns the version and the object count and performs checks on the
-// validity of the signature and the version fields.
-func (s *Scanner) Header() (version, objects uint32, err error) {
- if s.version != 0 {
- return s.version, s.objects, nil
- }
-
- sig, err := s.readSignature()
- if err != nil {
- if err == io.EOF {
- err = ErrEmptyPackfile
- }
-
- return
- }
-
- if !s.isValidSignature(sig) {
- err = ErrBadSignature
- return
- }
-
- version, err = s.readVersion()
- s.version = version
- if err != nil {
- return
- }
-
- if !s.isSupportedVersion(version) {
- err = ErrUnsupportedVersion.AddDetails("%d", version)
- return
- }
-
- objects, err = s.readCount()
- s.objects = objects
- return
-}
-
-// readSignature reads a returns the signature field in the packfile.
-func (s *Scanner) readSignature() ([]byte, error) {
- var sig = make([]byte, 4)
- if _, err := io.ReadFull(s.r, sig); err != nil {
- return []byte{}, err
- }
-
- return sig, nil
-}
-
-// isValidSignature returns if sig is a valid packfile signature.
-func (s *Scanner) isValidSignature(sig []byte) bool {
- return bytes.Equal(sig, signature)
-}
-
-// readVersion reads and returns the version field of a packfile.
-func (s *Scanner) readVersion() (uint32, error) {
- return binary.ReadUint32(s.r)
-}
-
-// isSupportedVersion returns whether version v is supported by the parser.
-// The current supported version is VersionSupported, defined above.
-func (s *Scanner) isSupportedVersion(v uint32) bool {
- return v == VersionSupported
-}
-
-// readCount reads and returns the count of objects field of a packfile.
-func (s *Scanner) readCount() (uint32, error) {
- return binary.ReadUint32(s.r)
-}
-
-// SeekObjectHeader seeks to specified offset and returns the ObjectHeader
-// for the next object in the reader
-func (s *Scanner) SeekObjectHeader(offset int64) (*ObjectHeader, error) {
- // if seeking we assume that you are not interested in the header
- if s.version == 0 {
- s.version = VersionSupported
- }
-
- if _, err := s.r.Seek(offset, io.SeekStart); err != nil {
- return nil, err
- }
-
- h, err := s.nextObjectHeader()
- if err != nil {
- return nil, err
- }
-
- h.Offset = offset
- return h, nil
-}
-
-// NextObjectHeader returns the ObjectHeader for the next object in the reader
-func (s *Scanner) NextObjectHeader() (*ObjectHeader, error) {
- if err := s.doPending(); err != nil {
- return nil, err
- }
-
- offset, err := s.r.Seek(0, io.SeekCurrent)
- if err != nil {
- return nil, err
- }
-
- h, err := s.nextObjectHeader()
- if err != nil {
- return nil, err
- }
-
- h.Offset = offset
- return h, nil
-}
-
-// nextObjectHeader returns the ObjectHeader for the next object in the reader
-// without the Offset field
-func (s *Scanner) nextObjectHeader() (*ObjectHeader, error) {
- s.r.Flush()
- s.crc.Reset()
-
- h := &ObjectHeader{}
- s.pendingObject = h
-
- var err error
- h.Offset, err = s.r.Seek(0, io.SeekCurrent)
- if err != nil {
- return nil, err
- }
-
- h.Type, h.Length, err = s.readObjectTypeAndLength()
- if err != nil {
- return nil, err
- }
-
- switch h.Type {
- case plumbing.OFSDeltaObject:
- no, err := binary.ReadVariableWidthInt(s.r)
- if err != nil {
- return nil, err
- }
-
- h.OffsetReference = h.Offset - no
- case plumbing.REFDeltaObject:
- var err error
- h.Reference, err = binary.ReadHash(s.r)
- if err != nil {
- return nil, err
- }
- }
-
- return h, nil
-}
-
-func (s *Scanner) doPending() error {
- if s.version == 0 {
- var err error
- s.version, s.objects, err = s.Header()
- if err != nil {
- return err
- }
- }
-
- return s.discardObjectIfNeeded()
-}
-
-func (s *Scanner) discardObjectIfNeeded() error {
- if s.pendingObject == nil {
- return nil
- }
-
- h := s.pendingObject
- n, _, err := s.NextObject(io.Discard)
- if err != nil {
- return err
- }
-
- if n != h.Length {
- return fmt.Errorf(
- "error discarding object, discarded %d, expected %d",
- n, h.Length,
- )
- }
-
- return nil
-}
-
-// ReadObjectTypeAndLength reads and returns the object type and the
-// length field from an object entry in a packfile.
-func (s *Scanner) readObjectTypeAndLength() (plumbing.ObjectType, int64, error) {
- t, c, err := s.readType()
- if err != nil {
- return t, 0, err
- }
-
- l, err := s.readLength(c)
-
- return t, l, err
-}
-
-func (s *Scanner) readType() (plumbing.ObjectType, byte, error) {
- var c byte
- var err error
- if c, err = s.r.ReadByte(); err != nil {
- return plumbing.ObjectType(0), 0, err
- }
-
- typ := parseType(c)
-
- return typ, c, nil
-}
-
-func parseType(b byte) plumbing.ObjectType {
- return plumbing.ObjectType((b & maskType) >> firstLengthBits)
-}
-
-// the length is codified in the last 4 bits of the first byte and in
-// the last 7 bits of subsequent bytes. Last byte has a 0 MSB.
-func (s *Scanner) readLength(first byte) (int64, error) {
- length := int64(first & maskFirstLength)
-
- c := first
- shift := firstLengthBits
- var err error
- for c&maskContinue > 0 {
- if c, err = s.r.ReadByte(); err != nil {
- return 0, err
- }
-
- length += int64(c&maskLength) << shift
- shift += lengthBits
- }
-
- return length, nil
-}
-
-// NextObject writes the content of the next object into the reader, returns
-// the number of bytes written, the CRC32 of the content and an error, if any
-func (s *Scanner) NextObject(w io.Writer) (written int64, crc32 uint32, err error) {
- s.pendingObject = nil
- written, err = s.copyObject(w)
-
- s.r.Flush()
- crc32 = s.crc.Sum32()
- s.crc.Reset()
-
- return
-}
-
-// ReadObject returns a reader for the object content and an error
-func (s *Scanner) ReadObject() (io.ReadCloser, error) {
- s.pendingObject = nil
- zr, err := sync.GetZlibReader(s.r)
-
- if err != nil {
- return nil, fmt.Errorf("zlib reset error: %s", err)
- }
-
- return ioutil.NewReadCloserWithCloser(zr.Reader, func() error {
- sync.PutZlibReader(zr)
- return nil
- }), nil
-}
-
-// ReadRegularObject reads and write a non-deltified object
-// from it zlib stream in an object entry in the packfile.
-func (s *Scanner) copyObject(w io.Writer) (n int64, err error) {
- zr, err := sync.GetZlibReader(s.r)
- defer sync.PutZlibReader(zr)
-
- if err != nil {
- return 0, fmt.Errorf("zlib reset error: %s", err)
- }
-
- defer ioutil.CheckClose(zr.Reader, &err)
- buf := sync.GetByteSlice()
- n, err = io.CopyBuffer(w, zr.Reader, *buf)
- sync.PutByteSlice(buf)
- return
-}
-
-// SeekFromStart sets a new offset from start, returns the old position before
-// the change.
-func (s *Scanner) SeekFromStart(offset int64) (previous int64, err error) {
- // if seeking we assume that you are not interested in the header
- if s.version == 0 {
- s.version = VersionSupported
- }
-
- previous, err = s.r.Seek(0, io.SeekCurrent)
- if err != nil {
- return -1, err
- }
-
- _, err = s.r.Seek(offset, io.SeekStart)
- return previous, err
-}
-
-// Checksum returns the checksum of the packfile
-func (s *Scanner) Checksum() (plumbing.Hash, error) {
- err := s.discardObjectIfNeeded()
- if err != nil {
- return plumbing.ZeroHash, err
- }
-
- return binary.ReadHash(s.r)
-}
-
-// Close reads the reader until io.EOF
-func (s *Scanner) Close() error {
- buf := sync.GetByteSlice()
- _, err := io.CopyBuffer(io.Discard, s.r, *buf)
- sync.PutByteSlice(buf)
-
- return err
-}
-
-// Flush is a no-op (deprecated)
-func (s *Scanner) Flush() error {
- return nil
-}
-
-// scannerReader has the following characteristics:
-// - Provides an io.SeekReader impl for bufio.Reader, when the underlying
-// reader supports it.
-// - Keeps track of the current read position, for when the underlying reader
-// isn't an io.SeekReader, but we still want to know the current offset.
-// - Writes to the hash writer what it reads, with the aid of a smaller buffer.
-// The buffer helps avoid a performance penalty for performing small writes
-// to the crc32 hash writer.
-type scannerReader struct {
- reader io.Reader
- crc io.Writer
- rbuf *bufio.Reader
- wbuf *bufio.Writer
- offset int64
-}
-
-func newScannerReader(r io.Reader, h io.Writer) *scannerReader {
- sr := &scannerReader{
- rbuf: bufio.NewReader(nil),
- wbuf: bufio.NewWriterSize(nil, 64),
- crc: h,
- }
- sr.Reset(r)
-
- return sr
-}
-
-func (r *scannerReader) Reset(reader io.Reader) {
- r.reader = reader
- r.rbuf.Reset(r.reader)
- r.wbuf.Reset(r.crc)
-
- r.offset = 0
- if seeker, ok := r.reader.(io.ReadSeeker); ok {
- r.offset, _ = seeker.Seek(0, io.SeekCurrent)
- }
-}
-
-func (r *scannerReader) Read(p []byte) (n int, err error) {
- n, err = r.rbuf.Read(p)
-
- r.offset += int64(n)
- if _, err := r.wbuf.Write(p[:n]); err != nil {
- return n, err
- }
- return
-}
-
-func (r *scannerReader) ReadByte() (b byte, err error) {
- b, err = r.rbuf.ReadByte()
- if err == nil {
- r.offset++
- return b, r.wbuf.WriteByte(b)
- }
- return
-}
-
-func (r *scannerReader) Flush() error {
- return r.wbuf.Flush()
-}
-
-// Seek seeks to a location. If the underlying reader is not an io.ReadSeeker,
-// then only whence=io.SeekCurrent is supported, any other operation fails.
-func (r *scannerReader) Seek(offset int64, whence int) (int64, error) {
- var err error
-
- if seeker, ok := r.reader.(io.ReadSeeker); !ok {
- if whence != io.SeekCurrent || offset != 0 {
- return -1, ErrSeekNotSupported
- }
- } else {
- if whence == io.SeekCurrent && offset == 0 {
- return r.offset, nil
- }
-
- r.offset, err = seeker.Seek(offset, whence)
- r.rbuf.Reset(r.reader)
- }
-
- return r.offset, err
-}
diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/format/pktline/encoder.go b/vendor/github.com/go-git/go-git/v5/plumbing/format/pktline/encoder.go
deleted file mode 100644
index b6144faf584..00000000000
--- a/vendor/github.com/go-git/go-git/v5/plumbing/format/pktline/encoder.go
+++ /dev/null
@@ -1,126 +0,0 @@
-// Package pktline implements reading payloads form pkt-lines and encoding
-// pkt-lines from payloads.
-package pktline
-
-import (
- "bytes"
- "errors"
- "fmt"
- "io"
-
- "github.com/go-git/go-git/v5/utils/trace"
-)
-
-// An Encoder writes pkt-lines to an output stream.
-type Encoder struct {
- w io.Writer
-}
-
-const (
- // MaxPayloadSize is the maximum payload size of a pkt-line in bytes.
- MaxPayloadSize = 65516
-
- // For compatibility with canonical Git implementation, accept longer pkt-lines
- OversizePayloadMax = 65520
-)
-
-var (
- // FlushPkt are the contents of a flush-pkt pkt-line.
- FlushPkt = []byte{'0', '0', '0', '0'}
- // Flush is the payload to use with the Encode method to encode a flush-pkt.
- Flush = []byte{}
- // FlushString is the payload to use with the EncodeString method to encode a flush-pkt.
- FlushString = ""
- // ErrPayloadTooLong is returned by the Encode methods when any of the
- // provided payloads is bigger than MaxPayloadSize.
- ErrPayloadTooLong = errors.New("payload is too long")
-)
-
-// NewEncoder returns a new encoder that writes to w.
-func NewEncoder(w io.Writer) *Encoder {
- return &Encoder{
- w: w,
- }
-}
-
-// Flush encodes a flush-pkt to the output stream.
-func (e *Encoder) Flush() error {
- defer trace.Packet.Print("packet: > 0000")
- _, err := e.w.Write(FlushPkt)
- return err
-}
-
-// Encode encodes a pkt-line with the payload specified and write it to
-// the output stream. If several payloads are specified, each of them
-// will get streamed in their own pkt-lines.
-func (e *Encoder) Encode(payloads ...[]byte) error {
- for _, p := range payloads {
- if err := e.encodeLine(p); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (e *Encoder) encodeLine(p []byte) error {
- if len(p) > MaxPayloadSize {
- return ErrPayloadTooLong
- }
-
- if bytes.Equal(p, Flush) {
- return e.Flush()
- }
-
- n := len(p) + 4
- defer trace.Packet.Printf("packet: > %04x %s", n, p)
- if _, err := e.w.Write(asciiHex16(n)); err != nil {
- return err
- }
- _, err := e.w.Write(p)
- return err
-}
-
-// Returns the hexadecimal ascii representation of the 16 less
-// significant bits of n. The length of the returned slice will always
-// be 4. Example: if n is 1234 (0x4d2), the return value will be
-// []byte{'0', '4', 'd', '2'}.
-func asciiHex16(n int) []byte {
- var ret [4]byte
- ret[0] = byteToASCIIHex(byte(n & 0xf000 >> 12))
- ret[1] = byteToASCIIHex(byte(n & 0x0f00 >> 8))
- ret[2] = byteToASCIIHex(byte(n & 0x00f0 >> 4))
- ret[3] = byteToASCIIHex(byte(n & 0x000f))
-
- return ret[:]
-}
-
-// turns a byte into its hexadecimal ascii representation. Example:
-// from 11 (0xb) to 'b'.
-func byteToASCIIHex(n byte) byte {
- if n < 10 {
- return '0' + n
- }
-
- return 'a' - 10 + n
-}
-
-// EncodeString works similarly as Encode but payloads are specified as strings.
-func (e *Encoder) EncodeString(payloads ...string) error {
- for _, p := range payloads {
- if err := e.Encode([]byte(p)); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-// Encodef encodes a single pkt-line with the payload formatted as
-// the format specifier. The rest of the arguments will be used in
-// the format string.
-func (e *Encoder) Encodef(format string, a ...interface{}) error {
- return e.EncodeString(
- fmt.Sprintf(format, a...),
- )
-}
diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/format/pktline/error.go b/vendor/github.com/go-git/go-git/v5/plumbing/format/pktline/error.go
deleted file mode 100644
index 2c0e5a72a9b..00000000000
--- a/vendor/github.com/go-git/go-git/v5/plumbing/format/pktline/error.go
+++ /dev/null
@@ -1,51 +0,0 @@
-package pktline
-
-import (
- "bytes"
- "errors"
- "io"
- "strings"
-)
-
-var (
- // ErrInvalidErrorLine is returned by Decode when the packet line is not an
- // error line.
- ErrInvalidErrorLine = errors.New("expected an error-line")
-
- errPrefix = []byte("ERR ")
-)
-
-// ErrorLine is a packet line that contains an error message.
-// Once this packet is sent by client or server, the data transfer process is
-// terminated.
-// See https://git-scm.com/docs/pack-protocol#_pkt_line_format
-type ErrorLine struct {
- Text string
-}
-
-// Error implements the error interface.
-func (e *ErrorLine) Error() string {
- return e.Text
-}
-
-// Encode encodes the ErrorLine into a packet line.
-func (e *ErrorLine) Encode(w io.Writer) error {
- p := NewEncoder(w)
- return p.Encodef("%s%s\n", string(errPrefix), e.Text)
-}
-
-// Decode decodes a packet line into an ErrorLine.
-func (e *ErrorLine) Decode(r io.Reader) error {
- s := NewScanner(r)
- if !s.Scan() {
- return s.Err()
- }
-
- line := s.Bytes()
- if !bytes.HasPrefix(line, errPrefix) {
- return ErrInvalidErrorLine
- }
-
- e.Text = strings.TrimSpace(string(line[4:]))
- return nil
-}
diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/format/pktline/scanner.go b/vendor/github.com/go-git/go-git/v5/plumbing/format/pktline/scanner.go
deleted file mode 100644
index fbb137de06b..00000000000
--- a/vendor/github.com/go-git/go-git/v5/plumbing/format/pktline/scanner.go
+++ /dev/null
@@ -1,146 +0,0 @@
-package pktline
-
-import (
- "bytes"
- "errors"
- "io"
- "strings"
-
- "github.com/go-git/go-git/v5/utils/trace"
-)
-
-const (
- lenSize = 4
-)
-
-// ErrInvalidPktLen is returned by Err() when an invalid pkt-len is found.
-var ErrInvalidPktLen = errors.New("invalid pkt-len found")
-
-// Scanner provides a convenient interface for reading the payloads of a
-// series of pkt-lines. It takes an io.Reader providing the source,
-// which then can be tokenized through repeated calls to the Scan
-// method.
-//
-// After each Scan call, the Bytes method will return the payload of the
-// corresponding pkt-line on a shared buffer, which will be 65516 bytes
-// or smaller. Flush pkt-lines are represented by empty byte slices.
-//
-// Scanning stops at EOF or the first I/O error.
-type Scanner struct {
- r io.Reader // The reader provided by the client
- err error // Sticky error
- payload []byte // Last pkt-payload
- len [lenSize]byte // Last pkt-len
-}
-
-// NewScanner returns a new Scanner to read from r.
-func NewScanner(r io.Reader) *Scanner {
- return &Scanner{
- r: r,
- }
-}
-
-// Err returns the first error encountered by the Scanner.
-func (s *Scanner) Err() error {
- return s.err
-}
-
-// Scan advances the Scanner to the next pkt-line, whose payload will
-// then be available through the Bytes method. Scanning stops at EOF
-// or the first I/O error. After Scan returns false, the Err method
-// will return any error that occurred during scanning, except that if
-// it was io.EOF, Err will return nil.
-func (s *Scanner) Scan() bool {
- var l int
- l, s.err = s.readPayloadLen()
- if s.err == io.EOF {
- s.err = nil
- return false
- }
- if s.err != nil {
- return false
- }
-
- if cap(s.payload) < l {
- s.payload = make([]byte, 0, l)
- }
-
- if _, s.err = io.ReadFull(s.r, s.payload[:l]); s.err != nil {
- return false
- }
- s.payload = s.payload[:l]
- trace.Packet.Printf("packet: < %04x %s", l, s.payload)
-
- if bytes.HasPrefix(s.payload, errPrefix) {
- s.err = &ErrorLine{
- Text: strings.TrimSpace(string(s.payload[4:])),
- }
- return false
- }
-
- return true
-}
-
-// Bytes returns the most recent payload generated by a call to Scan.
-// The underlying array may point to data that will be overwritten by a
-// subsequent call to Scan. It does no allocation.
-func (s *Scanner) Bytes() []byte {
- return s.payload
-}
-
-// Method readPayloadLen returns the payload length by reading the
-// pkt-len and subtracting the pkt-len size.
-func (s *Scanner) readPayloadLen() (int, error) {
- if _, err := io.ReadFull(s.r, s.len[:]); err != nil {
- if err == io.ErrUnexpectedEOF {
- return 0, ErrInvalidPktLen
- }
-
- return 0, err
- }
-
- n, err := hexDecode(s.len)
- if err != nil {
- return 0, err
- }
-
- switch {
- case n == 0:
- return 0, nil
- case n <= lenSize:
- return 0, ErrInvalidPktLen
- case n > OversizePayloadMax+lenSize:
- return 0, ErrInvalidPktLen
- default:
- return n - lenSize, nil
- }
-}
-
-// Turns the hexadecimal representation of a number in a byte slice into
-// a number. This function substitute strconv.ParseUint(string(buf), 16,
-// 16) and/or hex.Decode, to avoid generating new strings, thus helping the
-// GC.
-func hexDecode(buf [lenSize]byte) (int, error) {
- var ret int
- for i := 0; i < lenSize; i++ {
- n, err := asciiHexToByte(buf[i])
- if err != nil {
- return 0, ErrInvalidPktLen
- }
- ret = 16*ret + int(n)
- }
- return ret, nil
-}
-
-// turns the hexadecimal ascii representation of a byte into its
-// numerical value. Example: from 'b' to 11 (0xb).
-func asciiHexToByte(b byte) (byte, error) {
- switch {
- case b >= '0' && b <= '9':
- return b - '0', nil
- case b >= 'a' && b <= 'f':
- return b - 'a' + 10, nil
- default:
- return 0, ErrInvalidPktLen
- }
-}
diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/hash.go b/vendor/github.com/go-git/go-git/v5/plumbing/hash.go
deleted file mode 100644
index 39bb73fbb45..00000000000
--- a/vendor/github.com/go-git/go-git/v5/plumbing/hash.go
+++ /dev/null
@@ -1,84 +0,0 @@
-package plumbing
-
-import (
- "bytes"
- "encoding/hex"
- "sort"
- "strconv"
-
- "github.com/go-git/go-git/v5/plumbing/hash"
-)
-
-// Hash SHA1 hashed content
-type Hash [hash.Size]byte
-
-// ZeroHash is Hash with value zero
-var ZeroHash Hash
-
-// ComputeHash compute the hash for a given ObjectType and content
-func ComputeHash(t ObjectType, content []byte) Hash {
- h := NewHasher(t, int64(len(content)))
- h.Write(content)
- return h.Sum()
-}
-
-// NewHash return a new Hash from a hexadecimal hash representation
-func NewHash(s string) Hash {
- b, _ := hex.DecodeString(s)
-
- var h Hash
- copy(h[:], b)
-
- return h
-}
-
-func (h Hash) IsZero() bool {
- var empty Hash
- return h == empty
-}
-
-func (h Hash) String() string {
- return hex.EncodeToString(h[:])
-}
-
-type Hasher struct {
- hash.Hash
-}
-
-func NewHasher(t ObjectType, size int64) Hasher {
- h := Hasher{hash.New(hash.CryptoType)}
- h.Write(t.Bytes())
- h.Write([]byte(" "))
- h.Write([]byte(strconv.FormatInt(size, 10)))
- h.Write([]byte{0})
- return h
-}
-
-func (h Hasher) Sum() (hash Hash) {
- copy(hash[:], h.Hash.Sum(nil))
- return
-}
-
-// HashesSort sorts a slice of Hashes in increasing order.
-func HashesSort(a []Hash) {
- sort.Sort(HashSlice(a))
-}
-
-// HashSlice attaches the methods of sort.Interface to []Hash, sorting in
-// increasing order.
-type HashSlice []Hash
-
-func (p HashSlice) Len() int { return len(p) }
-func (p HashSlice) Less(i, j int) bool { return bytes.Compare(p[i][:], p[j][:]) < 0 }
-func (p HashSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
-
-// IsHash returns true if the given string is a valid hash.
-func IsHash(s string) bool {
- switch len(s) {
- case hash.HexSize:
- _, err := hex.DecodeString(s)
- return err == nil
- default:
- return false
- }
-}
diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/hash/hash.go b/vendor/github.com/go-git/go-git/v5/plumbing/hash/hash.go
deleted file mode 100644
index 8609848f679..00000000000
--- a/vendor/github.com/go-git/go-git/v5/plumbing/hash/hash.go
+++ /dev/null
@@ -1,60 +0,0 @@
-// package hash provides a way for managing the
-// underlying hash implementations used across go-git.
-package hash
-
-import (
- "crypto"
- "fmt"
- "hash"
-
- "github.com/pjbgf/sha1cd"
-)
-
-// algos is a map of hash algorithms.
-var algos = map[crypto.Hash]func() hash.Hash{}
-
-func init() {
- reset()
-}
-
-// reset resets the default algos value. Can be used after running tests
-// that registers new algorithms to avoid side effects.
-func reset() {
- algos[crypto.SHA1] = sha1cd.New
- algos[crypto.SHA256] = crypto.SHA256.New
-}
-
-// RegisterHash allows for the hash algorithm used to be overridden.
-// This ensures the hash selection for go-git must be explicit, when
-// overriding the default value.
-func RegisterHash(h crypto.Hash, f func() hash.Hash) error {
- if f == nil {
- return fmt.Errorf("cannot register hash: f is nil")
- }
-
- switch h {
- case crypto.SHA1:
- algos[h] = f
- case crypto.SHA256:
- algos[h] = f
- default:
- return fmt.Errorf("unsupported hash function: %v", h)
- }
- return nil
-}
-
-// Hash is the same as hash.Hash. This allows consumers
-// to not having to import this package alongside "hash".
-type Hash interface {
- hash.Hash
-}
-
-// New returns a new Hash for the given hash function.
-// It panics if the hash function is not registered.
-func New(h crypto.Hash) Hash {
- hh, ok := algos[h]
- if !ok {
- panic(fmt.Sprintf("hash algorithm not registered: %v", h))
- }
- return hh()
-}
diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/hash/hash_sha1.go b/vendor/github.com/go-git/go-git/v5/plumbing/hash/hash_sha1.go
deleted file mode 100644
index e3cb60fec9e..00000000000
--- a/vendor/github.com/go-git/go-git/v5/plumbing/hash/hash_sha1.go
+++ /dev/null
@@ -1,15 +0,0 @@
-//go:build !sha256
-// +build !sha256
-
-package hash
-
-import "crypto"
-
-const (
- // CryptoType defines what hash algorithm is being used.
- CryptoType = crypto.SHA1
- // Size defines the amount of bytes the hash yields.
- Size = 20
- // HexSize defines the strings size of the hash when represented in hexadecimal.
- HexSize = 40
-)
diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/hash/hash_sha256.go b/vendor/github.com/go-git/go-git/v5/plumbing/hash/hash_sha256.go
deleted file mode 100644
index 1c52b897539..00000000000
--- a/vendor/github.com/go-git/go-git/v5/plumbing/hash/hash_sha256.go
+++ /dev/null
@@ -1,15 +0,0 @@
-//go:build sha256
-// +build sha256
-
-package hash
-
-import "crypto"
-
-const (
- // CryptoType defines what hash algorithm is being used.
- CryptoType = crypto.SHA256
- // Size defines the amount of bytes the hash yields.
- Size = 32
- // HexSize defines the strings size of the hash when represented in hexadecimal.
- HexSize = 64
-)
diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/memory.go b/vendor/github.com/go-git/go-git/v5/plumbing/memory.go
deleted file mode 100644
index 6d11271dd67..00000000000
--- a/vendor/github.com/go-git/go-git/v5/plumbing/memory.go
+++ /dev/null
@@ -1,72 +0,0 @@
-package plumbing
-
-import (
- "bytes"
- "io"
-)
-
-// MemoryObject on memory Object implementation
-type MemoryObject struct {
- t ObjectType
- h Hash
- cont []byte
- sz int64
-}
-
-// Hash returns the object Hash, the hash is calculated on-the-fly the first
-// time it's called, in all subsequent calls the same Hash is returned even
-// if the type or the content have changed. The Hash is only generated if the
-// size of the content is exactly the object size.
-func (o *MemoryObject) Hash() Hash {
- if o.h == ZeroHash && int64(len(o.cont)) == o.sz {
- o.h = ComputeHash(o.t, o.cont)
- }
-
- return o.h
-}
-
-// Type returns the ObjectType
-func (o *MemoryObject) Type() ObjectType { return o.t }
-
-// SetType sets the ObjectType
-func (o *MemoryObject) SetType(t ObjectType) { o.t = t }
-
-// Size returns the size of the object
-func (o *MemoryObject) Size() int64 { return o.sz }
-
-// SetSize set the object size, a content of the given size should be written
-// afterwards
-func (o *MemoryObject) SetSize(s int64) { o.sz = s }
-
-// Reader returns an io.ReadCloser used to read the object's content.
-//
-// For a MemoryObject, this reader is seekable.
-func (o *MemoryObject) Reader() (io.ReadCloser, error) {
- return nopCloser{bytes.NewReader(o.cont)}, nil
-}
-
-// Writer returns a ObjectWriter used to write the object's content.
-func (o *MemoryObject) Writer() (io.WriteCloser, error) {
- return o, nil
-}
-
-func (o *MemoryObject) Write(p []byte) (n int, err error) {
- o.cont = append(o.cont, p...)
- o.sz = int64(len(o.cont))
-
- return len(p), nil
-}
-
-// Close releases any resources consumed by the object when it is acting as a
-// ObjectWriter.
-func (o *MemoryObject) Close() error { return nil }
-
-// nopCloser exposes the extra methods of bytes.Reader while nopping Close().
-//
-// This allows clients to attempt seeking in a cached Blob's Reader.
-type nopCloser struct {
- *bytes.Reader
-}
-
-// Close does nothing.
-func (nc nopCloser) Close() error { return nil }
diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/object.go b/vendor/github.com/go-git/go-git/v5/plumbing/object.go
deleted file mode 100644
index 3ee9de9f3ec..00000000000
--- a/vendor/github.com/go-git/go-git/v5/plumbing/object.go
+++ /dev/null
@@ -1,111 +0,0 @@
-// package plumbing implement the core interfaces and structs used by go-git
-package plumbing
-
-import (
- "errors"
- "io"
-)
-
-var (
- ErrObjectNotFound = errors.New("object not found")
- // ErrInvalidType is returned when an invalid object type is provided.
- ErrInvalidType = errors.New("invalid object type")
-)
-
-// Object is a generic representation of any git object
-type EncodedObject interface {
- Hash() Hash
- Type() ObjectType
- SetType(ObjectType)
- Size() int64
- SetSize(int64)
- Reader() (io.ReadCloser, error)
- Writer() (io.WriteCloser, error)
-}
-
-// DeltaObject is an EncodedObject representing a delta.
-type DeltaObject interface {
- EncodedObject
- // BaseHash returns the hash of the object used as base for this delta.
- BaseHash() Hash
- // ActualHash returns the hash of the object after applying the delta.
- ActualHash() Hash
- // Size returns the size of the object after applying the delta.
- ActualSize() int64
-}
-
-// ObjectType internal object type
-// Integer values from 0 to 7 map to those exposed by git.
-// AnyObject is used to represent any from 0 to 7.
-type ObjectType int8
-
-const (
- InvalidObject ObjectType = 0
- CommitObject ObjectType = 1
- TreeObject ObjectType = 2
- BlobObject ObjectType = 3
- TagObject ObjectType = 4
- // 5 reserved for future expansion
- OFSDeltaObject ObjectType = 6
- REFDeltaObject ObjectType = 7
-
- AnyObject ObjectType = -127
-)
-
-func (t ObjectType) String() string {
- switch t {
- case CommitObject:
- return "commit"
- case TreeObject:
- return "tree"
- case BlobObject:
- return "blob"
- case TagObject:
- return "tag"
- case OFSDeltaObject:
- return "ofs-delta"
- case REFDeltaObject:
- return "ref-delta"
- case AnyObject:
- return "any"
- default:
- return "unknown"
- }
-}
-
-func (t ObjectType) Bytes() []byte {
- return []byte(t.String())
-}
-
-// Valid returns true if t is a valid ObjectType.
-func (t ObjectType) Valid() bool {
- return t >= CommitObject && t <= REFDeltaObject
-}
-
-// IsDelta returns true for any ObjectType that represents a delta (i.e.
-// REFDeltaObject or OFSDeltaObject).
-func (t ObjectType) IsDelta() bool {
- return t == REFDeltaObject || t == OFSDeltaObject
-}
-
-// ParseObjectType parses a string representation of ObjectType. It returns an
-// error on parse failure.
-func ParseObjectType(value string) (typ ObjectType, err error) {
- switch value {
- case "commit":
- typ = CommitObject
- case "tree":
- typ = TreeObject
- case "blob":
- typ = BlobObject
- case "tag":
- typ = TagObject
- case "ofs-delta":
- typ = OFSDeltaObject
- case "ref-delta":
- typ = REFDeltaObject
- default:
- err = ErrInvalidType
- }
- return
-}
diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/object/blob.go b/vendor/github.com/go-git/go-git/v5/plumbing/object/blob.go
deleted file mode 100644
index 8fb7576fa3f..00000000000
--- a/vendor/github.com/go-git/go-git/v5/plumbing/object/blob.go
+++ /dev/null
@@ -1,144 +0,0 @@
-package object
-
-import (
- "io"
-
- "github.com/go-git/go-git/v5/plumbing"
- "github.com/go-git/go-git/v5/plumbing/storer"
- "github.com/go-git/go-git/v5/utils/ioutil"
-)
-
-// Blob is used to store arbitrary data - it is generally a file.
-type Blob struct {
- // Hash of the blob.
- Hash plumbing.Hash
- // Size of the (uncompressed) blob.
- Size int64
-
- obj plumbing.EncodedObject
-}
-
-// GetBlob gets a blob from an object storer and decodes it.
-func GetBlob(s storer.EncodedObjectStorer, h plumbing.Hash) (*Blob, error) {
- o, err := s.EncodedObject(plumbing.BlobObject, h)
- if err != nil {
- return nil, err
- }
-
- return DecodeBlob(o)
-}
-
-// DecodeObject decodes an encoded object into a *Blob.
-func DecodeBlob(o plumbing.EncodedObject) (*Blob, error) {
- b := &Blob{}
- if err := b.Decode(o); err != nil {
- return nil, err
- }
-
- return b, nil
-}
-
-// ID returns the object ID of the blob. The returned value will always match
-// the current value of Blob.Hash.
-//
-// ID is present to fulfill the Object interface.
-func (b *Blob) ID() plumbing.Hash {
- return b.Hash
-}
-
-// Type returns the type of object. It always returns plumbing.BlobObject.
-//
-// Type is present to fulfill the Object interface.
-func (b *Blob) Type() plumbing.ObjectType {
- return plumbing.BlobObject
-}
-
-// Decode transforms a plumbing.EncodedObject into a Blob struct.
-func (b *Blob) Decode(o plumbing.EncodedObject) error {
- if o.Type() != plumbing.BlobObject {
- return ErrUnsupportedObject
- }
-
- b.Hash = o.Hash()
- b.Size = o.Size()
- b.obj = o
-
- return nil
-}
-
-// Encode transforms a Blob into a plumbing.EncodedObject.
-func (b *Blob) Encode(o plumbing.EncodedObject) (err error) {
- o.SetType(plumbing.BlobObject)
-
- w, err := o.Writer()
- if err != nil {
- return err
- }
-
- defer ioutil.CheckClose(w, &err)
-
- r, err := b.Reader()
- if err != nil {
- return err
- }
-
- defer ioutil.CheckClose(r, &err)
-
- _, err = io.Copy(w, r)
- return err
-}
-
-// Reader returns a reader allow the access to the content of the blob
-func (b *Blob) Reader() (io.ReadCloser, error) {
- return b.obj.Reader()
-}
-
-// BlobIter provides an iterator for a set of blobs.
-type BlobIter struct {
- storer.EncodedObjectIter
- s storer.EncodedObjectStorer
-}
-
-// NewBlobIter takes a storer.EncodedObjectStorer and a
-// storer.EncodedObjectIter and returns a *BlobIter that iterates over all
-// blobs contained in the storer.EncodedObjectIter.
-//
-// Any non-blob object returned by the storer.EncodedObjectIter is skipped.
-func NewBlobIter(s storer.EncodedObjectStorer, iter storer.EncodedObjectIter) *BlobIter {
- return &BlobIter{iter, s}
-}
-
-// Next moves the iterator to the next blob and returns a pointer to it. If
-// there are no more blobs, it returns io.EOF.
-func (iter *BlobIter) Next() (*Blob, error) {
- for {
- obj, err := iter.EncodedObjectIter.Next()
- if err != nil {
- return nil, err
- }
-
- if obj.Type() != plumbing.BlobObject {
- continue
- }
-
- return DecodeBlob(obj)
- }
-}
-
-// ForEach call the cb function for each blob contained on this iter until
-// an error happens or the end of the iter is reached. If ErrStop is sent
-// the iteration is stop but no error is returned. The iterator is closed.
-func (iter *BlobIter) ForEach(cb func(*Blob) error) error {
- return iter.EncodedObjectIter.ForEach(func(obj plumbing.EncodedObject) error {
- if obj.Type() != plumbing.BlobObject {
- return nil
- }
-
- b, err := DecodeBlob(obj)
- if err != nil {
- return err
- }
-
- return cb(b)
- })
-}
diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/object/change.go b/vendor/github.com/go-git/go-git/v5/plumbing/object/change.go
deleted file mode 100644
index 3c619df868f..00000000000
--- a/vendor/github.com/go-git/go-git/v5/plumbing/object/change.go
+++ /dev/null
@@ -1,159 +0,0 @@
-package object
-
-import (
- "bytes"
- "context"
- "fmt"
- "strings"
-
- "github.com/go-git/go-git/v5/utils/merkletrie"
-)
-
-// Change values represent a detected change between two git trees. For
-// modifications, From is the original status of the node and To is its
-// final status. For insertions, From is the zero value and for
-// deletions To is the zero value.
-type Change struct {
- From ChangeEntry
- To ChangeEntry
-}
-
-var empty ChangeEntry
-
-// Action returns the kind of action represented by the change, an
-// insertion, a deletion or a modification.
-func (c *Change) Action() (merkletrie.Action, error) {
- if c.From == empty && c.To == empty {
- return merkletrie.Action(0),
- fmt.Errorf("malformed change: empty from and to")
- }
-
- if c.From == empty {
- return merkletrie.Insert, nil
- }
-
- if c.To == empty {
- return merkletrie.Delete, nil
- }
-
- return merkletrie.Modify, nil
-}
-
-// Files returns the files before and after a change.
-// For insertions from will be nil. For deletions to will be nil.
-func (c *Change) Files() (from, to *File, err error) {
- action, err := c.Action()
- if err != nil {
- return
- }
-
- if action == merkletrie.Insert || action == merkletrie.Modify {
- to, err = c.To.Tree.TreeEntryFile(&c.To.TreeEntry)
- if !c.To.TreeEntry.Mode.IsFile() {
- return nil, nil, nil
- }
-
- if err != nil {
- return
- }
- }
-
- if action == merkletrie.Delete || action == merkletrie.Modify {
- from, err = c.From.Tree.TreeEntryFile(&c.From.TreeEntry)
- if !c.From.TreeEntry.Mode.IsFile() {
- return nil, nil, nil
- }
-
- if err != nil {
- return
- }
- }
-
- return
-}
-
-func (c *Change) String() string {
- action, err := c.Action()
- if err != nil {
- return "malformed change"
- }
-
- return fmt.Sprintf("", action, c.name())
-}
-
-// Patch returns a Patch with all the file changes in chunks. This
-// representation can be used to create several diff outputs.
-func (c *Change) Patch() (*Patch, error) {
- return c.PatchContext(context.Background())
-}
-
-// Patch returns a Patch with all the file changes in chunks. This
-// representation can be used to create several diff outputs.
-// If context expires, an non-nil error will be returned
-// Provided context must be non-nil
-func (c *Change) PatchContext(ctx context.Context) (*Patch, error) {
- return getPatchContext(ctx, "", c)
-}
-
-func (c *Change) name() string {
- if c.From != empty {
- return c.From.Name
- }
-
- return c.To.Name
-}
-
-// ChangeEntry values represent a node that has suffered a change.
-type ChangeEntry struct {
- // Full path of the node using "/" as separator.
- Name string
- // Parent tree of the node that has changed.
- Tree *Tree
- // The entry of the node.
- TreeEntry TreeEntry
-}
-
-// Changes represents a collection of changes between two git trees.
-// Implements sort.Interface lexicographically over the path of the
-// changed files.
-type Changes []*Change
-
-func (c Changes) Len() int {
- return len(c)
-}
-
-func (c Changes) Swap(i, j int) {
- c[i], c[j] = c[j], c[i]
-}
-
-func (c Changes) Less(i, j int) bool {
- return strings.Compare(c[i].name(), c[j].name()) < 0
-}
-
-func (c Changes) String() string {
- var buffer bytes.Buffer
- buffer.WriteString("[")
- comma := ""
- for _, v := range c {
- buffer.WriteString(comma)
- buffer.WriteString(v.String())
- comma = ", "
- }
- buffer.WriteString("]")
-
- return buffer.String()
-}
-
-// Patch returns a Patch with all the changes in chunks. This
-// representation can be used to create several diff outputs.
-func (c Changes) Patch() (*Patch, error) {
- return c.PatchContext(context.Background())
-}
-
-// Patch returns a Patch with all the changes in chunks. This
-// representation can be used to create several diff outputs.
-// If context expires, an non-nil error will be returned
-// Provided context must be non-nil
-func (c Changes) PatchContext(ctx context.Context) (*Patch, error) {
- return getPatchContext(ctx, "", c...)
-}
diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/object/change_adaptor.go b/vendor/github.com/go-git/go-git/v5/plumbing/object/change_adaptor.go
deleted file mode 100644
index b96ee84d90e..00000000000
--- a/vendor/github.com/go-git/go-git/v5/plumbing/object/change_adaptor.go
+++ /dev/null
@@ -1,61 +0,0 @@
-package object
-
-import (
- "errors"
- "fmt"
-
- "github.com/go-git/go-git/v5/utils/merkletrie"
- "github.com/go-git/go-git/v5/utils/merkletrie/noder"
-)
-
-// The following functions transform changes types form the merkletrie
-// package to changes types from this package.
-
-func newChange(c merkletrie.Change) (*Change, error) {
- ret := &Change{}
-
- var err error
- if ret.From, err = newChangeEntry(c.From); err != nil {
- return nil, fmt.Errorf("from field: %s", err)
- }
-
- if ret.To, err = newChangeEntry(c.To); err != nil {
- return nil, fmt.Errorf("to field: %s", err)
- }
-
- return ret, nil
-}
-
-func newChangeEntry(p noder.Path) (ChangeEntry, error) {
- if p == nil {
- return empty, nil
- }
-
- asTreeNoder, ok := p.Last().(*treeNoder)
- if !ok {
- return ChangeEntry{}, errors.New("cannot transform non-TreeNoders")
- }
-
- return ChangeEntry{
- Name: p.String(),
- Tree: asTreeNoder.parent,
- TreeEntry: TreeEntry{
- Name: asTreeNoder.name,
- Mode: asTreeNoder.mode,
- Hash: asTreeNoder.hash,
- },
- }, nil
-}
-
-func newChanges(src merkletrie.Changes) (Changes, error) {
- ret := make(Changes, len(src))
- var err error
- for i, e := range src {
- ret[i], err = newChange(e)
- if err != nil {
- return nil, fmt.Errorf("change #%d: %s", i, err)
- }
- }
-
- return ret, nil
-}
diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/object/commit.go b/vendor/github.com/go-git/go-git/v5/plumbing/object/commit.go
deleted file mode 100644
index 3d096e18b80..00000000000
--- a/vendor/github.com/go-git/go-git/v5/plumbing/object/commit.go
+++ /dev/null
@@ -1,507 +0,0 @@
-package object
-
-import (
- "bytes"
- "context"
- "errors"
- "fmt"
- "io"
- "strings"
-
- "github.com/ProtonMail/go-crypto/openpgp"
-
- "github.com/go-git/go-git/v5/plumbing"
- "github.com/go-git/go-git/v5/plumbing/storer"
- "github.com/go-git/go-git/v5/utils/ioutil"
- "github.com/go-git/go-git/v5/utils/sync"
-)
-
-const (
- beginpgp string = "-----BEGIN PGP SIGNATURE-----"
- endpgp string = "-----END PGP SIGNATURE-----"
- headerpgp string = "gpgsig"
- headerencoding string = "encoding"
-
- // https://github.com/git/git/blob/bcb6cae2966cc407ca1afc77413b3ef11103c175/Documentation/gitformat-signature.txt#L153
- // When a merge commit is created from a signed tag, the tag is embedded in
- // the commit with the "mergetag" header.
- headermergetag string = "mergetag"
-
- defaultUtf8CommitMessageEncoding MessageEncoding = "UTF-8"
-)
-
-// Hash represents the hash of an object
-type Hash plumbing.Hash
-
-// MessageEncoding represents the encoding of a commit
-type MessageEncoding string
-
-// Commit points to a single tree, marking it as what the project looked like
-// at a certain point in time. It contains meta-information about that point
-// in time, such as a timestamp, the author of the changes since the last
-// commit, a pointer to the previous commit(s), etc.
-// http://shafiulazam.com/gitbook/1_the_git_object_model.html
-type Commit struct {
- // Hash of the commit object.
- Hash plumbing.Hash
- // Author is the original author of the commit.
- Author Signature
- // Committer is the one performing the commit, might be different from
- // Author.
- Committer Signature
- // MergeTag is the embedded tag object when a merge commit is created by
- // merging a signed tag.
- MergeTag string
- // PGPSignature is the PGP signature of the commit.
- PGPSignature string
- // Message is the commit message, contains arbitrary text.
- Message string
- // TreeHash is the hash of the root tree of the commit.
- TreeHash plumbing.Hash
- // ParentHashes are the hashes of the parent commits of the commit.
- ParentHashes []plumbing.Hash
- // Encoding is the encoding of the commit.
- Encoding MessageEncoding
-
- s storer.EncodedObjectStorer
-}
-
-// GetCommit gets a commit from an object storer and decodes it.
-func GetCommit(s storer.EncodedObjectStorer, h plumbing.Hash) (*Commit, error) {
- o, err := s.EncodedObject(plumbing.CommitObject, h)
- if err != nil {
- return nil, err
- }
-
- return DecodeCommit(s, o)
-}
-
-// DecodeCommit decodes an encoded object into a *Commit and associates it to
-// the given object storer.
-func DecodeCommit(s storer.EncodedObjectStorer, o plumbing.EncodedObject) (*Commit, error) {
- c := &Commit{s: s}
- if err := c.Decode(o); err != nil {
- return nil, err
- }
-
- return c, nil
-}
-
-// Tree returns the Tree from the commit.
-func (c *Commit) Tree() (*Tree, error) {
- return GetTree(c.s, c.TreeHash)
-}
-
-// PatchContext returns the Patch between the actual commit and the provided one.
-// Error will be return if context expires. Provided context must be non-nil.
-//
-// NOTE: Since version 5.1.0 the renames are correctly handled, the settings
-// used are the recommended options DefaultDiffTreeOptions.
-func (c *Commit) PatchContext(ctx context.Context, to *Commit) (*Patch, error) {
- fromTree, err := c.Tree()
- if err != nil {
- return nil, err
- }
-
- var toTree *Tree
- if to != nil {
- toTree, err = to.Tree()
- if err != nil {
- return nil, err
- }
- }
-
- return fromTree.PatchContext(ctx, toTree)
-}
-
-// Patch returns the Patch between the actual commit and the provided one.
-//
-// NOTE: Since version 5.1.0 the renames are correctly handled, the settings
-// used are the recommended options DefaultDiffTreeOptions.
-func (c *Commit) Patch(to *Commit) (*Patch, error) {
- return c.PatchContext(context.Background(), to)
-}
-
-// Parents return a CommitIter to the parent Commits.
-func (c *Commit) Parents() CommitIter {
- return NewCommitIter(c.s,
- storer.NewEncodedObjectLookupIter(c.s, plumbing.CommitObject, c.ParentHashes),
- )
-}
-
-// NumParents returns the number of parents in a commit.
-func (c *Commit) NumParents() int {
- return len(c.ParentHashes)
-}
-
-var ErrParentNotFound = errors.New("commit parent not found")
-
-// Parent returns the ith parent of a commit.
-func (c *Commit) Parent(i int) (*Commit, error) {
- if len(c.ParentHashes) == 0 || i > len(c.ParentHashes)-1 {
- return nil, ErrParentNotFound
- }
-
- return GetCommit(c.s, c.ParentHashes[i])
-}
-
-// File returns the file with the specified "path" in the commit and a
-// nil error if the file exists. If the file does not exist, it returns
-// a nil file and the ErrFileNotFound error.
-func (c *Commit) File(path string) (*File, error) {
- tree, err := c.Tree()
- if err != nil {
- return nil, err
- }
-
- return tree.File(path)
-}
-
-// Files returns a FileIter allowing to iterate over the Tree
-func (c *Commit) Files() (*FileIter, error) {
- tree, err := c.Tree()
- if err != nil {
- return nil, err
- }
-
- return tree.Files(), nil
-}
-
-// ID returns the object ID of the commit. The returned value will always match
-// the current value of Commit.Hash.
-//
-// ID is present to fulfill the Object interface.
-func (c *Commit) ID() plumbing.Hash {
- return c.Hash
-}
-
-// Type returns the type of object. It always returns plumbing.CommitObject.
-//
-// Type is present to fulfill the Object interface.
-func (c *Commit) Type() plumbing.ObjectType {
- return plumbing.CommitObject
-}
-
-// Decode transforms a plumbing.EncodedObject into a Commit struct.
-func (c *Commit) Decode(o plumbing.EncodedObject) (err error) {
- if o.Type() != plumbing.CommitObject {
- return ErrUnsupportedObject
- }
-
- c.Hash = o.Hash()
- c.Encoding = defaultUtf8CommitMessageEncoding
-
- reader, err := o.Reader()
- if err != nil {
- return err
- }
- defer ioutil.CheckClose(reader, &err)
-
- r := sync.GetBufioReader(reader)
- defer sync.PutBufioReader(r)
-
- var message bool
- var mergetag bool
- var pgpsig bool
- var msgbuf bytes.Buffer
- for {
- line, err := r.ReadBytes('\n')
- if err != nil && err != io.EOF {
- return err
- }
-
- if mergetag {
- if len(line) > 0 && line[0] == ' ' {
- line = bytes.TrimLeft(line, " ")
- c.MergeTag += string(line)
- continue
- } else {
- mergetag = false
- }
- }
-
- if pgpsig {
- if len(line) > 0 && line[0] == ' ' {
- line = bytes.TrimLeft(line, " ")
- c.PGPSignature += string(line)
- continue
- } else {
- pgpsig = false
- }
- }
-
- if !message {
- line = bytes.TrimSpace(line)
- if len(line) == 0 {
- message = true
- continue
- }
-
- split := bytes.SplitN(line, []byte{' '}, 2)
-
- var data []byte
- if len(split) == 2 {
- data = split[1]
- }
-
- switch string(split[0]) {
- case "tree":
- c.TreeHash = plumbing.NewHash(string(data))
- case "parent":
- c.ParentHashes = append(c.ParentHashes, plumbing.NewHash(string(data)))
- case "author":
- c.Author.Decode(data)
- case "committer":
- c.Committer.Decode(data)
- case headermergetag:
- c.MergeTag += string(data) + "\n"
- mergetag = true
- case headerencoding:
- c.Encoding = MessageEncoding(data)
- case headerpgp:
- c.PGPSignature += string(data) + "\n"
- pgpsig = true
- }
- } else {
- msgbuf.Write(line)
- }
-
- if err == io.EOF {
- break
- }
- }
- c.Message = msgbuf.String()
- return nil
-}
-
-// Encode transforms a Commit into a plumbing.EncodedObject.
-func (c *Commit) Encode(o plumbing.EncodedObject) error {
- return c.encode(o, true)
-}
-
-// EncodeWithoutSignature export a Commit into a plumbing.EncodedObject without the signature (correspond to the payload of the PGP signature).
-func (c *Commit) EncodeWithoutSignature(o plumbing.EncodedObject) error {
- return c.encode(o, false)
-}
-
-func (c *Commit) encode(o plumbing.EncodedObject, includeSig bool) (err error) {
- o.SetType(plumbing.CommitObject)
- w, err := o.Writer()
- if err != nil {
- return err
- }
-
- defer ioutil.CheckClose(w, &err)
-
- if _, err = fmt.Fprintf(w, "tree %s\n", c.TreeHash.String()); err != nil {
- return err
- }
-
- for _, parent := range c.ParentHashes {
- if _, err = fmt.Fprintf(w, "parent %s\n", parent.String()); err != nil {
- return err
- }
- }
-
- if _, err = fmt.Fprint(w, "author "); err != nil {
- return err
- }
-
- if err = c.Author.Encode(w); err != nil {
- return err
- }
-
- if _, err = fmt.Fprint(w, "\ncommitter "); err != nil {
- return err
- }
-
- if err = c.Committer.Encode(w); err != nil {
- return err
- }
-
- if c.MergeTag != "" {
- if _, err = fmt.Fprint(w, "\n"+headermergetag+" "); err != nil {
- return err
- }
-
- // Split tag information lines and re-write with a left padding and
- // newline. Use join for this so it's clear that a newline should not be
- // added after this section. The newline will be added either as part of
- // the PGP signature or the commit message.
- mergetag := strings.TrimSuffix(c.MergeTag, "\n")
- lines := strings.Split(mergetag, "\n")
- if _, err = fmt.Fprint(w, strings.Join(lines, "\n ")); err != nil {
- return err
- }
- }
-
- if string(c.Encoding) != "" && c.Encoding != defaultUtf8CommitMessageEncoding {
- if _, err = fmt.Fprintf(w, "\n%s %s", headerencoding, c.Encoding); err != nil {
- return err
- }
- }
-
- if c.PGPSignature != "" && includeSig {
- if _, err = fmt.Fprint(w, "\n"+headerpgp+" "); err != nil {
- return err
- }
-
- // Split all the signature lines and re-write with a left padding and
- // newline. Use join for this so it's clear that a newline should not be
- // added after this section, as it will be added when the message is
- // printed.
- signature := strings.TrimSuffix(c.PGPSignature, "\n")
- lines := strings.Split(signature, "\n")
- if _, err = fmt.Fprint(w, strings.Join(lines, "\n ")); err != nil {
- return err
- }
- }
-
- if _, err = fmt.Fprintf(w, "\n\n%s", c.Message); err != nil {
- return err
- }
-
- return err
-}
-
-// Stats returns the stats of a commit.
-func (c *Commit) Stats() (FileStats, error) {
- return c.StatsContext(context.Background())
-}
-
-// StatsContext returns the stats of a commit. Error will be return if context
-// expires. Provided context must be non-nil.
-func (c *Commit) StatsContext(ctx context.Context) (FileStats, error) {
- fromTree, err := c.Tree()
- if err != nil {
- return nil, err
- }
-
- toTree := &Tree{}
- if c.NumParents() != 0 {
- firstParent, err := c.Parents().Next()
- if err != nil {
- return nil, err
- }
-
- toTree, err = firstParent.Tree()
- if err != nil {
- return nil, err
- }
- }
-
- patch, err := toTree.PatchContext(ctx, fromTree)
- if err != nil {
- return nil, err
- }
-
- return getFileStatsFromFilePatches(patch.FilePatches()), nil
-}
-
-func (c *Commit) String() string {
- return fmt.Sprintf(
- "%s %s\nAuthor: %s\nDate: %s\n\n%s\n",
- plumbing.CommitObject, c.Hash, c.Author.String(),
- c.Author.When.Format(DateFormat), indent(c.Message),
- )
-}
-
-// Verify performs PGP verification of the commit with a provided armored
-// keyring and returns openpgp.Entity associated with verifying key on success.
-func (c *Commit) Verify(armoredKeyRing string) (*openpgp.Entity, error) {
- keyRingReader := strings.NewReader(armoredKeyRing)
- keyring, err := openpgp.ReadArmoredKeyRing(keyRingReader)
- if err != nil {
- return nil, err
- }
-
- // Extract signature.
- signature := strings.NewReader(c.PGPSignature)
-
- encoded := &plumbing.MemoryObject{}
- // Encode commit components, excluding signature and get a reader object.
- if err := c.EncodeWithoutSignature(encoded); err != nil {
- return nil, err
- }
- er, err := encoded.Reader()
- if err != nil {
- return nil, err
- }
-
- return openpgp.CheckArmoredDetachedSignature(keyring, er, signature, nil)
-}
-
-// Less defines a compare function to determine which commit is 'earlier' by:
-// - First use Committer.When
-// - If Committer.When are equal then use Author.When
-// - If Author.When also equal then compare the string value of the hash
-func (c *Commit) Less(rhs *Commit) bool {
- return c.Committer.When.Before(rhs.Committer.When) ||
- (c.Committer.When.Equal(rhs.Committer.When) &&
- (c.Author.When.Before(rhs.Author.When) ||
- (c.Author.When.Equal(rhs.Author.When) && bytes.Compare(c.Hash[:], rhs.Hash[:]) < 0)))
-}
-
-func indent(t string) string {
- var output []string
- for _, line := range strings.Split(t, "\n") {
- if len(line) != 0 {
- line = " " + line
- }
-
- output = append(output, line)
- }
-
- return strings.Join(output, "\n")
-}
-
-// CommitIter is a generic closable interface for iterating over commits.
-type CommitIter interface {
- Next() (*Commit, error)
- ForEach(func(*Commit) error) error
- Close()
-}
-
-// storerCommitIter provides an iterator from commits in an EncodedObjectStorer.
-type storerCommitIter struct {
- storer.EncodedObjectIter
- s storer.EncodedObjectStorer
-}
-
-// NewCommitIter takes a storer.EncodedObjectStorer and a
-// storer.EncodedObjectIter and returns a CommitIter that iterates over all
-// commits contained in the storer.EncodedObjectIter.
-//
-// Any non-commit object returned by the storer.EncodedObjectIter is skipped.
-func NewCommitIter(s storer.EncodedObjectStorer, iter storer.EncodedObjectIter) CommitIter {
- return &storerCommitIter{iter, s}
-}
-
-// Next moves the iterator to the next commit and returns a pointer to it. If
-// there are no more commits, it returns io.EOF.
-func (iter *storerCommitIter) Next() (*Commit, error) {
- obj, err := iter.EncodedObjectIter.Next()
- if err != nil {
- return nil, err
- }
-
- return DecodeCommit(iter.s, obj)
-}
-
-// ForEach call the cb function for each commit contained on this iter until
-// an error appends or the end of the iter is reached. If ErrStop is sent
-// the iteration is stopped but no error is returned. The iterator is closed.
-func (iter *storerCommitIter) ForEach(cb func(*Commit) error) error {
- return iter.EncodedObjectIter.ForEach(func(obj plumbing.EncodedObject) error {
- c, err := DecodeCommit(iter.s, obj)
- if err != nil {
- return err
- }
-
- return cb(c)
- })
-}
-
-func (iter *storerCommitIter) Close() {
- iter.EncodedObjectIter.Close()
-}
diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/object/commit_walker.go b/vendor/github.com/go-git/go-git/v5/plumbing/object/commit_walker.go
deleted file mode 100644
index a96b6a4cf0f..00000000000
--- a/vendor/github.com/go-git/go-git/v5/plumbing/object/commit_walker.go
+++ /dev/null
@@ -1,327 +0,0 @@
-package object
-
-import (
- "container/list"
- "io"
-
- "github.com/go-git/go-git/v5/plumbing"
- "github.com/go-git/go-git/v5/plumbing/storer"
- "github.com/go-git/go-git/v5/storage"
-)
-
-type commitPreIterator struct {
- seenExternal map[plumbing.Hash]bool
- seen map[plumbing.Hash]bool
- stack []CommitIter
- start *Commit
-}
-
-// NewCommitPreorderIter returns a CommitIter that walks the commit history,
-// starting at the given commit and visiting its parents in pre-order.
-// The given callback will be called for each visited commit. Each commit will
-// be visited only once. If the callback returns an error, walking will stop
-// and will return the error. Other errors might be returned if the history
-// cannot be traversed (e.g. missing objects). Ignore allows to skip some
-// commits from being iterated.
-func NewCommitPreorderIter(
- c *Commit,
- seenExternal map[plumbing.Hash]bool,
- ignore []plumbing.Hash,
-) CommitIter {
- seen := make(map[plumbing.Hash]bool)
- for _, h := range ignore {
- seen[h] = true
- }
-
- return &commitPreIterator{
- seenExternal: seenExternal,
- seen: seen,
- stack: make([]CommitIter, 0),
- start: c,
- }
-}
-
-func (w *commitPreIterator) Next() (*Commit, error) {
- var c *Commit
- for {
- if w.start != nil {
- c = w.start
- w.start = nil
- } else {
- current := len(w.stack) - 1
- if current < 0 {
- return nil, io.EOF
- }
-
- var err error
- c, err = w.stack[current].Next()
- if err == io.EOF {
- w.stack = w.stack[:current]
- continue
- }
-
- if err != nil {
- return nil, err
- }
- }
-
- if w.seen[c.Hash] || w.seenExternal[c.Hash] {
- continue
- }
-
- w.seen[c.Hash] = true
-
- if c.NumParents() > 0 {
- w.stack = append(w.stack, filteredParentIter(c, w.seen))
- }
-
- return c, nil
- }
-}
-
-func filteredParentIter(c *Commit, seen map[plumbing.Hash]bool) CommitIter {
- var hashes []plumbing.Hash
- for _, h := range c.ParentHashes {
- if !seen[h] {
- hashes = append(hashes, h)
- }
- }
-
- return NewCommitIter(c.s,
- storer.NewEncodedObjectLookupIter(c.s, plumbing.CommitObject, hashes),
- )
-}
-
-func (w *commitPreIterator) ForEach(cb func(*Commit) error) error {
- for {
- c, err := w.Next()
- if err == io.EOF {
- break
- }
- if err != nil {
- return err
- }
-
- err = cb(c)
- if err == storer.ErrStop {
- break
- }
- if err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (w *commitPreIterator) Close() {}
-
-type commitPostIterator struct {
- stack []*Commit
- seen map[plumbing.Hash]bool
-}
-
-// NewCommitPostorderIter returns a CommitIter that walks the commit
-// history like WalkCommitHistory but in post-order. This means that after
-// walking a merge commit, the merged commit will be walked before the base
-// it was merged on. This can be useful if you wish to see the history in
-// chronological order. Ignore allows to skip some commits from being iterated.
-func NewCommitPostorderIter(c *Commit, ignore []plumbing.Hash) CommitIter {
- seen := make(map[plumbing.Hash]bool)
- for _, h := range ignore {
- seen[h] = true
- }
-
- return &commitPostIterator{
- stack: []*Commit{c},
- seen: seen,
- }
-}
-
-func (w *commitPostIterator) Next() (*Commit, error) {
- for {
- if len(w.stack) == 0 {
- return nil, io.EOF
- }
-
- c := w.stack[len(w.stack)-1]
- w.stack = w.stack[:len(w.stack)-1]
-
- if w.seen[c.Hash] {
- continue
- }
-
- w.seen[c.Hash] = true
-
- return c, c.Parents().ForEach(func(p *Commit) error {
- w.stack = append(w.stack, p)
- return nil
- })
- }
-}
-
-func (w *commitPostIterator) ForEach(cb func(*Commit) error) error {
- for {
- c, err := w.Next()
- if err == io.EOF {
- break
- }
- if err != nil {
- return err
- }
-
- err = cb(c)
- if err == storer.ErrStop {
- break
- }
- if err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (w *commitPostIterator) Close() {}
-
-// commitAllIterator stands for commit iterator for all refs.
-type commitAllIterator struct {
- // currCommit points to the current commit.
- currCommit *list.Element
-}
-
-// NewCommitAllIter returns a new commit iterator for all refs.
-// repoStorer is a repo Storer used to get commits and references.
-// commitIterFunc is a commit iterator function, used to iterate through ref commits in chosen order
-func NewCommitAllIter(repoStorer storage.Storer, commitIterFunc func(*Commit) CommitIter) (CommitIter, error) {
- commitsPath := list.New()
- commitsLookup := make(map[plumbing.Hash]*list.Element)
- head, err := storer.ResolveReference(repoStorer, plumbing.HEAD)
- if err == nil {
- err = addReference(repoStorer, commitIterFunc, head, commitsPath, commitsLookup)
- }
-
- if err != nil && err != plumbing.ErrReferenceNotFound {
- return nil, err
- }
-
- // add all references along with the HEAD
- refIter, err := repoStorer.IterReferences()
- if err != nil {
- return nil, err
- }
- defer refIter.Close()
-
- for {
- ref, err := refIter.Next()
- if err == io.EOF {
- break
- }
-
- if err == plumbing.ErrReferenceNotFound {
- continue
- }
-
- if err != nil {
- return nil, err
- }
-
- if err = addReference(repoStorer, commitIterFunc, ref, commitsPath, commitsLookup); err != nil {
- return nil, err
- }
- }
-
- return &commitAllIterator{commitsPath.Front()}, nil
-}
-
-func addReference(
- repoStorer storage.Storer,
- commitIterFunc func(*Commit) CommitIter,
- ref *plumbing.Reference,
- commitsPath *list.List,
- commitsLookup map[plumbing.Hash]*list.Element) error {
-
- _, exists := commitsLookup[ref.Hash()]
- if exists {
- // we already have it - skip the reference.
- return nil
- }
-
- refCommit, _ := GetCommit(repoStorer, ref.Hash())
- if refCommit == nil {
- // if it's not a commit - skip it.
- return nil
- }
-
- var (
- refCommits []*Commit
- parent *list.Element
- )
- // collect all ref commits to add
- commitIter := commitIterFunc(refCommit)
- for c, e := commitIter.Next(); e == nil; {
- parent, exists = commitsLookup[c.Hash]
- if exists {
- break
- }
- refCommits = append(refCommits, c)
- c, e = commitIter.Next()
- }
- commitIter.Close()
-
- if parent == nil {
- // common parent - not found
- // add all commits to the path from this ref (maybe it's a HEAD and we don't have anything, yet)
- for _, c := range refCommits {
- parent = commitsPath.PushBack(c)
- commitsLookup[c.Hash] = parent
- }
- } else {
- // add ref's commits to the path in reverse order (from the latest)
- for i := len(refCommits) - 1; i >= 0; i-- {
- c := refCommits[i]
- // insert before found common parent
- parent = commitsPath.InsertBefore(c, parent)
- commitsLookup[c.Hash] = parent
- }
- }
-
- return nil
-}
-
-func (it *commitAllIterator) Next() (*Commit, error) {
- if it.currCommit == nil {
- return nil, io.EOF
- }
-
- c := it.currCommit.Value.(*Commit)
- it.currCommit = it.currCommit.Next()
-
- return c, nil
-}
-
-func (it *commitAllIterator) ForEach(cb func(*Commit) error) error {
- for {
- c, err := it.Next()
- if err == io.EOF {
- break
- }
- if err != nil {
- return err
- }
-
- err = cb(c)
- if err == storer.ErrStop {
- break
- }
- if err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (it *commitAllIterator) Close() {
- it.currCommit = nil
-}
diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/object/commit_walker_bfs.go b/vendor/github.com/go-git/go-git/v5/plumbing/object/commit_walker_bfs.go
deleted file mode 100644
index 8047fa9bc0e..00000000000
--- a/vendor/github.com/go-git/go-git/v5/plumbing/object/commit_walker_bfs.go
+++ /dev/null
@@ -1,100 +0,0 @@
-package object
-
-import (
- "io"
-
- "github.com/go-git/go-git/v5/plumbing"
- "github.com/go-git/go-git/v5/plumbing/storer"
-)
-
-type bfsCommitIterator struct {
- seenExternal map[plumbing.Hash]bool
- seen map[plumbing.Hash]bool
- queue []*Commit
-}
-
-// NewCommitIterBSF returns a CommitIter that walks the commit history,
-// starting at the given commit and visiting its parents in pre-order.
-// The given callback will be called for each visited commit. Each commit will
-// be visited only once. If the callback returns an error, walking will stop
-// and will return the error. Other errors might be returned if the history
-// cannot be traversed (e.g. missing objects). Ignore allows to skip some
-// commits from being iterated.
-func NewCommitIterBSF(
- c *Commit,
- seenExternal map[plumbing.Hash]bool,
- ignore []plumbing.Hash,
-) CommitIter {
- seen := make(map[plumbing.Hash]bool)
- for _, h := range ignore {
- seen[h] = true
- }
-
- return &bfsCommitIterator{
- seenExternal: seenExternal,
- seen: seen,
- queue: []*Commit{c},
- }
-}
-
-func (w *bfsCommitIterator) appendHash(store storer.EncodedObjectStorer, h plumbing.Hash) error {
- if w.seen[h] || w.seenExternal[h] {
- return nil
- }
- c, err := GetCommit(store, h)
- if err != nil {
- return err
- }
- w.queue = append(w.queue, c)
- return nil
-}
-
-func (w *bfsCommitIterator) Next() (*Commit, error) {
- var c *Commit
- for {
- if len(w.queue) == 0 {
- return nil, io.EOF
- }
- c = w.queue[0]
- w.queue = w.queue[1:]
-
- if w.seen[c.Hash] || w.seenExternal[c.Hash] {
- continue
- }
-
- w.seen[c.Hash] = true
-
- for _, h := range c.ParentHashes {
- err := w.appendHash(c.s, h)
- if err != nil {
- return nil, err
- }
- }
-
- return c, nil
- }
-}
-
-func (w *bfsCommitIterator) ForEach(cb func(*Commit) error) error {
- for {
- c, err := w.Next()
- if err == io.EOF {
- break
- }
- if err != nil {
- return err
- }
-
- err = cb(c)
- if err == storer.ErrStop {
- break
- }
- if err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (w *bfsCommitIterator) Close() {}
diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/object/commit_walker_bfs_filtered.go b/vendor/github.com/go-git/go-git/v5/plumbing/object/commit_walker_bfs_filtered.go
deleted file mode 100644
index 9d518133e21..00000000000
--- a/vendor/github.com/go-git/go-git/v5/plumbing/object/commit_walker_bfs_filtered.go
+++ /dev/null
@@ -1,175 +0,0 @@
-package object
-
-import (
- "io"
-
- "github.com/go-git/go-git/v5/plumbing"
- "github.com/go-git/go-git/v5/plumbing/storer"
-)
-
-// NewFilterCommitIter returns a CommitIter that walks the commit history,
-// starting at the passed commit and visiting its parents in Breadth-first order.
-// The commits returned by the CommitIter will validate the passed CommitFilter.
-// The history won't be transversed beyond a commit if isLimit is true for it.
-// Each commit will be visited only once.
-// If the commit history can not be traversed, or the Close() method is called,
-// the CommitIter won't return more commits.
-// If no isValid is passed, all ancestors of from commit will be valid.
-// If no isLimit is limit, all ancestors of all commits will be visited.
-func NewFilterCommitIter(
- from *Commit,
- isValid *CommitFilter,
- isLimit *CommitFilter,
-) CommitIter {
- var validFilter CommitFilter
- if isValid == nil {
- validFilter = func(_ *Commit) bool {
- return true
- }
- } else {
- validFilter = *isValid
- }
-
- var limitFilter CommitFilter
- if isLimit == nil {
- limitFilter = func(_ *Commit) bool {
- return false
- }
- } else {
- limitFilter = *isLimit
- }
-
- return &filterCommitIter{
- isValid: validFilter,
- isLimit: limitFilter,
- visited: map[plumbing.Hash]struct{}{},
- queue: []*Commit{from},
- }
-}
-
-// CommitFilter returns a boolean for the passed Commit
-type CommitFilter func(*Commit) bool
-
-// filterCommitIter implements CommitIter
-type filterCommitIter struct {
- isValid CommitFilter
- isLimit CommitFilter
- visited map[plumbing.Hash]struct{}
- queue []*Commit
- lastErr error
-}
-
-// Next returns the next commit of the CommitIter.
-// It will return io.EOF if there are no more commits to visit,
-// or an error if the history could not be traversed.
-func (w *filterCommitIter) Next() (*Commit, error) {
- var commit *Commit
- var err error
- for {
- commit, err = w.popNewFromQueue()
- if err != nil {
- return nil, w.close(err)
- }
-
- w.visited[commit.Hash] = struct{}{}
-
- if !w.isLimit(commit) {
- err = w.addToQueue(commit.s, commit.ParentHashes...)
- if err != nil {
- return nil, w.close(err)
- }
- }
-
- if w.isValid(commit) {
- return commit, nil
- }
- }
-}
-
-// ForEach runs the passed callback over each Commit returned by the CommitIter
-// until the callback returns an error or there is no more commits to traverse.
-func (w *filterCommitIter) ForEach(cb func(*Commit) error) error {
- for {
- commit, err := w.Next()
- if err == io.EOF {
- break
- }
-
- if err != nil {
- return err
- }
-
- if err := cb(commit); err == storer.ErrStop {
- break
- } else if err != nil {
- return err
- }
- }
-
- return nil
-}
-
-// Error returns the error that caused that the CommitIter is no longer returning commits
-func (w *filterCommitIter) Error() error {
- return w.lastErr
-}
-
-// Close closes the CommitIter
-func (w *filterCommitIter) Close() {
- w.visited = map[plumbing.Hash]struct{}{}
- w.queue = []*Commit{}
- w.isLimit = nil
- w.isValid = nil
-}
-
-// close closes the CommitIter with an error
-func (w *filterCommitIter) close(err error) error {
- w.Close()
- w.lastErr = err
- return err
-}
-
-// popNewFromQueue returns the first new commit from the internal fifo queue,
-// or an io.EOF error if the queue is empty
-func (w *filterCommitIter) popNewFromQueue() (*Commit, error) {
- var first *Commit
- for {
- if len(w.queue) == 0 {
- if w.lastErr != nil {
- return nil, w.lastErr
- }
-
- return nil, io.EOF
- }
-
- first = w.queue[0]
- w.queue = w.queue[1:]
- if _, ok := w.visited[first.Hash]; ok {
- continue
- }
-
- return first, nil
- }
-}
-
-// addToQueue adds the passed commits to the internal fifo queue if they weren't seen
-// or returns an error if the passed hashes could not be used to get valid commits
-func (w *filterCommitIter) addToQueue(
- store storer.EncodedObjectStorer,
- hashes ...plumbing.Hash,
-) error {
- for _, hash := range hashes {
- if _, ok := w.visited[hash]; ok {
- continue
- }
-
- commit, err := GetCommit(store, hash)
- if err != nil {
- return err
- }
-
- w.queue = append(w.queue, commit)
- }
-
- return nil
-}
diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/object/commit_walker_ctime.go b/vendor/github.com/go-git/go-git/v5/plumbing/object/commit_walker_ctime.go
deleted file mode 100644
index fbddf1d238f..00000000000
--- a/vendor/github.com/go-git/go-git/v5/plumbing/object/commit_walker_ctime.go
+++ /dev/null
@@ -1,103 +0,0 @@
-package object
-
-import (
- "io"
-
- "github.com/emirpasic/gods/trees/binaryheap"
-
- "github.com/go-git/go-git/v5/plumbing"
- "github.com/go-git/go-git/v5/plumbing/storer"
-)
-
-type commitIteratorByCTime struct {
- seenExternal map[plumbing.Hash]bool
- seen map[plumbing.Hash]bool
- heap *binaryheap.Heap
-}
-
-// NewCommitIterCTime returns a CommitIter that walks the commit history,
-// starting at the given commit and visiting its parents while preserving Committer Time order.
-// this appears to be the closest order to `git log`
-// The given callback will be called for each visited commit. Each commit will
-// be visited only once. If the callback returns an error, walking will stop
-// and will return the error. Other errors might be returned if the history
-// cannot be traversed (e.g. missing objects). Ignore allows to skip some
-// commits from being iterated.
-func NewCommitIterCTime(
- c *Commit,
- seenExternal map[plumbing.Hash]bool,
- ignore []plumbing.Hash,
-) CommitIter {
- seen := make(map[plumbing.Hash]bool)
- for _, h := range ignore {
- seen[h] = true
- }
-
- heap := binaryheap.NewWith(func(a, b interface{}) int {
- if a.(*Commit).Committer.When.Before(b.(*Commit).Committer.When) {
- return 1
- }
- return -1
- })
- heap.Push(c)
-
- return &commitIteratorByCTime{
- seenExternal: seenExternal,
- seen: seen,
- heap: heap,
- }
-}
-
-func (w *commitIteratorByCTime) Next() (*Commit, error) {
- var c *Commit
- for {
- cIn, ok := w.heap.Pop()
- if !ok {
- return nil, io.EOF
- }
- c = cIn.(*Commit)
-
- if w.seen[c.Hash] || w.seenExternal[c.Hash] {
- continue
- }
-
- w.seen[c.Hash] = true
-
- for _, h := range c.ParentHashes {
- if w.seen[h] || w.seenExternal[h] {
- continue
- }
- pc, err := GetCommit(c.s, h)
- if err != nil {
- return nil, err
- }
- w.heap.Push(pc)
- }
-
- return c, nil
- }
-}
-
-func (w *commitIteratorByCTime) ForEach(cb func(*Commit) error) error {
- for {
- c, err := w.Next()
- if err == io.EOF {
- break
- }
- if err != nil {
- return err
- }
-
- err = cb(c)
- if err == storer.ErrStop {
- break
- }
- if err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (w *commitIteratorByCTime) Close() {}
diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/object/commit_walker_limit.go b/vendor/github.com/go-git/go-git/v5/plumbing/object/commit_walker_limit.go
deleted file mode 100644
index ac56a71c41a..00000000000
--- a/vendor/github.com/go-git/go-git/v5/plumbing/object/commit_walker_limit.go
+++ /dev/null
@@ -1,65 +0,0 @@
-package object
-
-import (
- "io"
- "time"
-
- "github.com/go-git/go-git/v5/plumbing/storer"
-)
-
-type commitLimitIter struct {
- sourceIter CommitIter
- limitOptions LogLimitOptions
-}
-
-type LogLimitOptions struct {
- Since *time.Time
- Until *time.Time
-}
-
-func NewCommitLimitIterFromIter(commitIter CommitIter, limitOptions LogLimitOptions) CommitIter {
- iterator := new(commitLimitIter)
- iterator.sourceIter = commitIter
- iterator.limitOptions = limitOptions
- return iterator
-}
-
-func (c *commitLimitIter) Next() (*Commit, error) {
- for {
- commit, err := c.sourceIter.Next()
- if err != nil {
- return nil, err
- }
-
- if c.limitOptions.Since != nil && commit.Committer.When.Before(*c.limitOptions.Since) {
- continue
- }
- if c.limitOptions.Until != nil && commit.Committer.When.After(*c.limitOptions.Until) {
- continue
- }
- return commit, nil
- }
-}
-
-func (c *commitLimitIter) ForEach(cb func(*Commit) error) error {
- for {
- commit, nextErr := c.Next()
- if nextErr == io.EOF {
- break
- }
- if nextErr != nil {
- return nextErr
- }
- err := cb(commit)
- if err == storer.ErrStop {
- return nil
- } else if err != nil {
- return err
- }
- }
- return nil
-}
-
-func (c *commitLimitIter) Close() {
- c.sourceIter.Close()
-}
diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/object/commit_walker_path.go b/vendor/github.com/go-git/go-git/v5/plumbing/object/commit_walker_path.go
deleted file mode 100644
index c1ec8ba7ae1..00000000000
--- a/vendor/github.com/go-git/go-git/v5/plumbing/object/commit_walker_path.go
+++ /dev/null
@@ -1,167 +0,0 @@
-package object
-
-import (
- "io"
-
- "github.com/go-git/go-git/v5/plumbing"
- "github.com/go-git/go-git/v5/plumbing/storer"
-)
-
-type commitPathIter struct {
- pathFilter func(string) bool
- sourceIter CommitIter
- currentCommit *Commit
- checkParent bool
-}
-
-// NewCommitPathIterFromIter returns a commit iterator which performs diffTree between
-// successive trees returned from the commit iterator from the argument. The purpose of this is
-// to find the commits that explain how the files that match the path came to be.
-// If checkParent is true then the function double checks if potential parent (next commit in a path)
-// is one of the parents in the tree (it's used by `git log --all`).
-// pathFilter is a function that takes path of file as argument and returns true if we want it
-func NewCommitPathIterFromIter(pathFilter func(string) bool, commitIter CommitIter, checkParent bool) CommitIter {
- iterator := new(commitPathIter)
- iterator.sourceIter = commitIter
- iterator.pathFilter = pathFilter
- iterator.checkParent = checkParent
- return iterator
-}
-
-// NewCommitFileIterFromIter is kept for compatibility, can be replaced with NewCommitPathIterFromIter
-func NewCommitFileIterFromIter(fileName string, commitIter CommitIter, checkParent bool) CommitIter {
- return NewCommitPathIterFromIter(
- func(path string) bool {
- return path == fileName
- },
- commitIter,
- checkParent,
- )
-}
-
-func (c *commitPathIter) Next() (*Commit, error) {
- if c.currentCommit == nil {
- var err error
- c.currentCommit, err = c.sourceIter.Next()
- if err != nil {
- return nil, err
- }
- }
- commit, commitErr := c.getNextFileCommit()
-
- // Setting current-commit to nil to prevent unwanted states when errors are raised
- if commitErr != nil {
- c.currentCommit = nil
- }
- return commit, commitErr
-}
-
-func (c *commitPathIter) getNextFileCommit() (*Commit, error) {
- var parentTree, currentTree *Tree
-
- for {
- // Parent-commit can be nil if the current-commit is the initial commit
- parentCommit, parentCommitErr := c.sourceIter.Next()
- if parentCommitErr != nil {
- // If the parent-commit is beyond the initial commit, keep it nil
- if parentCommitErr != io.EOF {
- return nil, parentCommitErr
- }
- parentCommit = nil
- }
-
- if parentTree == nil {
- var currTreeErr error
- currentTree, currTreeErr = c.currentCommit.Tree()
- if currTreeErr != nil {
- return nil, currTreeErr
- }
- } else {
- currentTree = parentTree
- parentTree = nil
- }
-
- if parentCommit != nil {
- var parentTreeErr error
- parentTree, parentTreeErr = parentCommit.Tree()
- if parentTreeErr != nil {
- return nil, parentTreeErr
- }
- }
-
- // Find diff between current and parent trees
- changes, diffErr := DiffTree(currentTree, parentTree)
- if diffErr != nil {
- return nil, diffErr
- }
-
- found := c.hasFileChange(changes, parentCommit)
-
- // Storing the current-commit in-case a change is found, and
- // Updating the current-commit for the next-iteration
- prevCommit := c.currentCommit
- c.currentCommit = parentCommit
-
- if found {
- return prevCommit, nil
- }
-
- // If not matches found and if parent-commit is beyond the initial commit, then return with EOF
- if parentCommit == nil {
- return nil, io.EOF
- }
- }
-}
-
-func (c *commitPathIter) hasFileChange(changes Changes, parent *Commit) bool {
- for _, change := range changes {
- if !c.pathFilter(change.name()) {
- continue
- }
-
- // filename matches, now check if source iterator contains all commits (from all refs)
- if c.checkParent {
- // Check if parent is beyond the initial commit
- if parent == nil || isParentHash(parent.Hash, c.currentCommit) {
- return true
- }
- continue
- }
-
- return true
- }
-
- return false
-}
-
-func isParentHash(hash plumbing.Hash, commit *Commit) bool {
- for _, h := range commit.ParentHashes {
- if h == hash {
- return true
- }
- }
- return false
-}
-
-func (c *commitPathIter) ForEach(cb func(*Commit) error) error {
- for {
- commit, nextErr := c.Next()
- if nextErr == io.EOF {
- break
- }
- if nextErr != nil {
- return nextErr
- }
- err := cb(commit)
- if err == storer.ErrStop {
- return nil
- } else if err != nil {
- return err
- }
- }
- return nil
-}
-
-func (c *commitPathIter) Close() {
- c.sourceIter.Close()
-}
diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/object/difftree.go b/vendor/github.com/go-git/go-git/v5/plumbing/object/difftree.go
deleted file mode 100644
index 7c2222702ce..00000000000
--- a/vendor/github.com/go-git/go-git/v5/plumbing/object/difftree.go
+++ /dev/null
@@ -1,98 +0,0 @@
-package object
-
-import (
- "bytes"
- "context"
-
- "github.com/go-git/go-git/v5/utils/merkletrie"
- "github.com/go-git/go-git/v5/utils/merkletrie/noder"
-)
-
-// DiffTree compares the content and mode of the blobs found via two
-// tree objects.
-// DiffTree does not perform rename detection, use DiffTreeWithOptions
-// instead to detect renames.
-func DiffTree(a, b *Tree) (Changes, error) {
- return DiffTreeContext(context.Background(), a, b)
-}
-
-// DiffTreeContext compares the content and mode of the blobs found via two
-// tree objects. Provided context must be non-nil.
-// An error will be returned if context expires.
-func DiffTreeContext(ctx context.Context, a, b *Tree) (Changes, error) {
- return DiffTreeWithOptions(ctx, a, b, nil)
-}
-
-// DiffTreeOptions are the configurable options when performing a diff tree.
-type DiffTreeOptions struct {
- // DetectRenames is whether the diff tree will use rename detection.
- DetectRenames bool
- // RenameScore is the threshold to of similarity between files to consider
- // that a pair of delete and insert are a rename. The number must be
- // exactly between 0 and 100.
- RenameScore uint
- // RenameLimit is the maximum amount of files that can be compared when
- // detecting renames. The number of comparisons that have to be performed
- // is equal to the number of deleted files * the number of added files.
- // That means, that if 100 files were deleted and 50 files were added, 5000
- // file comparisons may be needed. So, if the rename limit is 50, the number
- // of both deleted and added needs to be equal or less than 50.
- // A value of 0 means no limit.
- RenameLimit uint
- // OnlyExactRenames performs only detection of exact renames and will not perform
- // any detection of renames based on file similarity.
- OnlyExactRenames bool
-}
-
-// DefaultDiffTreeOptions are the default and recommended options for the
-// diff tree.
-var DefaultDiffTreeOptions = &DiffTreeOptions{
- DetectRenames: true,
- RenameScore: 60,
- RenameLimit: 0,
- OnlyExactRenames: false,
-}
-
-// DiffTreeWithOptions compares the content and mode of the blobs found
-// via two tree objects with the given options. The provided context
-// must be non-nil.
-// If no options are passed, no rename detection will be performed. The
-// recommended options are DefaultDiffTreeOptions.
-// An error will be returned if the context expires.
-// This function will be deprecated and removed in v6 so the default
-// behaviour of DiffTree is to detect renames.
-func DiffTreeWithOptions(
- ctx context.Context,
- a, b *Tree,
- opts *DiffTreeOptions,
-) (Changes, error) {
- from := NewTreeRootNode(a)
- to := NewTreeRootNode(b)
-
- hashEqual := func(a, b noder.Hasher) bool {
- return bytes.Equal(a.Hash(), b.Hash())
- }
-
- merkletrieChanges, err := merkletrie.DiffTreeContext(ctx, from, to, hashEqual)
- if err != nil {
- if err == merkletrie.ErrCanceled {
- return nil, ErrCanceled
- }
- return nil, err
- }
-
- changes, err := newChanges(merkletrieChanges)
- if err != nil {
- return nil, err
- }
-
- if opts == nil {
- opts = new(DiffTreeOptions)
- }
-
- if opts.DetectRenames {
- return DetectRenames(changes, opts)
- }
-
- return changes, nil
-}
diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/object/file.go b/vendor/github.com/go-git/go-git/v5/plumbing/object/file.go
deleted file mode 100644
index 6cc5367d8d6..00000000000
--- a/vendor/github.com/go-git/go-git/v5/plumbing/object/file.go
+++ /dev/null
@@ -1,137 +0,0 @@
-package object
-
-import (
- "bytes"
- "io"
- "strings"
-
- "github.com/go-git/go-git/v5/plumbing/filemode"
- "github.com/go-git/go-git/v5/plumbing/storer"
- "github.com/go-git/go-git/v5/utils/binary"
- "github.com/go-git/go-git/v5/utils/ioutil"
-)
-
-// File represents git file objects.
-type File struct {
- // Name is the path of the file. It might be relative to a tree,
- // depending of the function that generates it.
- Name string
- // Mode is the file mode.
- Mode filemode.FileMode
- // Blob with the contents of the file.
- Blob
-}
-
-// NewFile returns a File based on the given blob object
-func NewFile(name string, m filemode.FileMode, b *Blob) *File {
- return &File{Name: name, Mode: m, Blob: *b}
-}
-
-// Contents returns the contents of a file as a string.
-func (f *File) Contents() (content string, err error) {
- reader, err := f.Reader()
- if err != nil {
- return "", err
- }
- defer ioutil.CheckClose(reader, &err)
-
- buf := new(bytes.Buffer)
- if _, err := buf.ReadFrom(reader); err != nil {
- return "", err
- }
-
- return buf.String(), nil
-}
-
-// IsBinary returns if the file is binary or not
-func (f *File) IsBinary() (bin bool, err error) {
- reader, err := f.Reader()
- if err != nil {
- return false, err
- }
- defer ioutil.CheckClose(reader, &err)
-
- return binary.IsBinary(reader)
-}
-
-// Lines returns a slice of lines from the contents of a file, stripping
-// all end of line characters. If the last line is empty (does not end
-// in an end of line), it is also stripped.
-func (f *File) Lines() ([]string, error) {
- content, err := f.Contents()
- if err != nil {
- return nil, err
- }
-
- splits := strings.Split(content, "\n")
- // remove the last line if it is empty
- if splits[len(splits)-1] == "" {
- return splits[:len(splits)-1], nil
- }
-
- return splits, nil
-}
-
-// FileIter provides an iterator for the files in a tree.
-type FileIter struct {
- s storer.EncodedObjectStorer
- w TreeWalker
-}
-
-// NewFileIter takes a storer.EncodedObjectStorer and a Tree and returns a
-// *FileIter that iterates over all files contained in the tree, recursively.
-func NewFileIter(s storer.EncodedObjectStorer, t *Tree) *FileIter {
- return &FileIter{s: s, w: *NewTreeWalker(t, true, nil)}
-}
-
-// Next moves the iterator to the next file and returns a pointer to it. If
-// there are no more files, it returns io.EOF.
-func (iter *FileIter) Next() (*File, error) {
- for {
- name, entry, err := iter.w.Next()
- if err != nil {
- return nil, err
- }
-
- if entry.Mode == filemode.Dir || entry.Mode == filemode.Submodule {
- continue
- }
-
- blob, err := GetBlob(iter.s, entry.Hash)
- if err != nil {
- return nil, err
- }
-
- return NewFile(name, entry.Mode, blob), nil
- }
-}
-
-// ForEach call the cb function for each file contained in this iter until
-// an error happens or the end of the iter is reached. If plumbing.ErrStop is sent
-// the iteration is stop but no error is returned. The iterator is closed.
-func (iter *FileIter) ForEach(cb func(*File) error) error {
- defer iter.Close()
-
- for {
- f, err := iter.Next()
- if err != nil {
- if err == io.EOF {
- return nil
- }
-
- return err
- }
-
- if err := cb(f); err != nil {
- if err == storer.ErrStop {
- return nil
- }
-
- return err
- }
- }
-}
-
-func (iter *FileIter) Close() {
- iter.w.Close()
-}
diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/object/merge_base.go b/vendor/github.com/go-git/go-git/v5/plumbing/object/merge_base.go
deleted file mode 100644
index b412361d029..00000000000
--- a/vendor/github.com/go-git/go-git/v5/plumbing/object/merge_base.go
+++ /dev/null
@@ -1,210 +0,0 @@
-package object
-
-import (
- "fmt"
- "sort"
-
- "github.com/go-git/go-git/v5/plumbing"
- "github.com/go-git/go-git/v5/plumbing/storer"
-)
-
-// errIsReachable is thrown when first commit is an ancestor of the second
-var errIsReachable = fmt.Errorf("first is reachable from second")
-
-// MergeBase mimics the behavior of `git merge-base actual other`, returning the
-// best common ancestor between the actual and the passed one.
-// The best common ancestors can not be reached from other common ancestors.
-func (c *Commit) MergeBase(other *Commit) ([]*Commit, error) {
- // use sortedByCommitDateDesc strategy
- sorted := sortByCommitDateDesc(c, other)
- newer := sorted[0]
- older := sorted[1]
-
- newerHistory, err := ancestorsIndex(older, newer)
- if err == errIsReachable {
- return []*Commit{older}, nil
- }
-
- if err != nil {
- return nil, err
- }
-
- var res []*Commit
- inNewerHistory := isInIndexCommitFilter(newerHistory)
- resIter := NewFilterCommitIter(older, &inNewerHistory, &inNewerHistory)
- _ = resIter.ForEach(func(commit *Commit) error {
- res = append(res, commit)
- return nil
- })
-
- return Independents(res)
-}
-
-// IsAncestor returns true if the actual commit is ancestor of the passed one.
-// It returns an error if the history is not transversable
-// It mimics the behavior of `git merge --is-ancestor actual other`
-func (c *Commit) IsAncestor(other *Commit) (bool, error) {
- found := false
- iter := NewCommitPreorderIter(other, nil, nil)
- err := iter.ForEach(func(comm *Commit) error {
- if comm.Hash != c.Hash {
- return nil
- }
-
- found = true
- return storer.ErrStop
- })
-
- return found, err
-}
-
-// ancestorsIndex returns a map with the ancestors of the starting commit if the
-// excluded one is not one of them. It returns errIsReachable if the excluded commit
-// is ancestor of the starting, or another error if the history is not traversable.
-func ancestorsIndex(excluded, starting *Commit) (map[plumbing.Hash]struct{}, error) {
- if excluded.Hash.String() == starting.Hash.String() {
- return nil, errIsReachable
- }
-
- startingHistory := map[plumbing.Hash]struct{}{}
- startingIter := NewCommitIterBSF(starting, nil, nil)
- err := startingIter.ForEach(func(commit *Commit) error {
- if commit.Hash == excluded.Hash {
- return errIsReachable
- }
-
- startingHistory[commit.Hash] = struct{}{}
- return nil
- })
-
- if err != nil {
- return nil, err
- }
-
- return startingHistory, nil
-}
-
-// Independents returns a subset of the passed commits, that are not reachable the others
-// It mimics the behavior of `git merge-base --independent commit...`.
-func Independents(commits []*Commit) ([]*Commit, error) {
- // use sortedByCommitDateDesc strategy
- candidates := sortByCommitDateDesc(commits...)
- candidates = removeDuplicated(candidates)
-
- seen := map[plumbing.Hash]struct{}{}
- var isLimit CommitFilter = func(commit *Commit) bool {
- _, ok := seen[commit.Hash]
- return ok
- }
-
- if len(candidates) < 2 {
- return candidates, nil
- }
-
- pos := 0
- for {
- from := candidates[pos]
- others := remove(candidates, from)
- fromHistoryIter := NewFilterCommitIter(from, nil, &isLimit)
- err := fromHistoryIter.ForEach(func(fromAncestor *Commit) error {
- for _, other := range others {
- if fromAncestor.Hash == other.Hash {
- candidates = remove(candidates, other)
- others = remove(others, other)
- }
- }
-
- if len(candidates) == 1 {
- return storer.ErrStop
- }
-
- seen[fromAncestor.Hash] = struct{}{}
- return nil
- })
-
- if err != nil {
- return nil, err
- }
-
- nextPos := indexOf(candidates, from) + 1
- if nextPos >= len(candidates) {
- break
- }
-
- pos = nextPos
- }
-
- return candidates, nil
-}
-
-// sortByCommitDateDesc returns the passed commits, sorted by `committer.When desc`
-//
-// Following this strategy, it is tried to reduce the time needed when walking
-// the history from one commit to reach the others. It is assumed that ancestors
-// use to be committed before its descendant;
-// That way `Independents(A^, A)` will be processed as being `Independents(A, A^)`;
-// so starting by `A` it will be reached `A^` way sooner than walking from `A^`
-// to the initial commit, and then from `A` to `A^`.
-func sortByCommitDateDesc(commits ...*Commit) []*Commit {
- sorted := make([]*Commit, len(commits))
- copy(sorted, commits)
- sort.Slice(sorted, func(i, j int) bool {
- return sorted[i].Committer.When.After(sorted[j].Committer.When)
- })
-
- return sorted
-}
-
-// indexOf returns the first position where target was found in the passed commits
-func indexOf(commits []*Commit, target *Commit) int {
- for i, commit := range commits {
- if target.Hash == commit.Hash {
- return i
- }
- }
-
- return -1
-}
-
-// remove returns the passed commits excluding the commit toDelete
-func remove(commits []*Commit, toDelete *Commit) []*Commit {
- res := make([]*Commit, len(commits))
- j := 0
- for _, commit := range commits {
- if commit.Hash == toDelete.Hash {
- continue
- }
-
- res[j] = commit
- j++
- }
-
- return res[:j]
-}
-
-// removeDuplicated removes duplicated commits from the passed slice of commits
-func removeDuplicated(commits []*Commit) []*Commit {
- seen := make(map[plumbing.Hash]struct{}, len(commits))
- res := make([]*Commit, len(commits))
- j := 0
- for _, commit := range commits {
- if _, ok := seen[commit.Hash]; ok {
- continue
- }
-
- seen[commit.Hash] = struct{}{}
- res[j] = commit
- j++
- }
-
- return res[:j]
-}
-
-// isInIndexCommitFilter returns a commitFilter that returns true
-// if the commit is in the passed index.
-func isInIndexCommitFilter(index map[plumbing.Hash]struct{}) CommitFilter {
- return func(c *Commit) bool {
- _, ok := index[c.Hash]
- return ok
- }
-}
diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/object/object.go b/vendor/github.com/go-git/go-git/v5/plumbing/object/object.go
deleted file mode 100644
index 13b1e91c9c6..00000000000
--- a/vendor/github.com/go-git/go-git/v5/plumbing/object/object.go
+++ /dev/null
@@ -1,239 +0,0 @@
-// Package object contains implementations of all Git objects and utility
-// functions to work with them.
-package object
-
-import (
- "bytes"
- "errors"
- "fmt"
- "io"
- "strconv"
- "time"
-
- "github.com/go-git/go-git/v5/plumbing"
- "github.com/go-git/go-git/v5/plumbing/storer"
-)
-
-// ErrUnsupportedObject trigger when a non-supported object is being decoded.
-var ErrUnsupportedObject = errors.New("unsupported object type")
-
-// Object is a generic representation of any git object. It is implemented by
-// Commit, Tree, Blob, and Tag, and includes the functions that are common to
-// them.
-//
-// Object is returned when an object can be of any type. It is frequently used
-// with a type cast to acquire the specific type of object:
-//
-// func process(obj Object) {
-// switch o := obj.(type) {
-// case *Commit:
-// // o is a Commit
-// case *Tree:
-// // o is a Tree
-// case *Blob:
-// // o is a Blob
-// case *Tag:
-// // o is a Tag
-// }
-// }
-//
-// This interface is intentionally different from plumbing.EncodedObject, which
-// is a lower level interface used by storage implementations to read and write
-// objects in its encoded form.
-type Object interface {
- ID() plumbing.Hash
- Type() plumbing.ObjectType
- Decode(plumbing.EncodedObject) error
- Encode(plumbing.EncodedObject) error
-}
-
-// GetObject gets an object from an object storer and decodes it.
-func GetObject(s storer.EncodedObjectStorer, h plumbing.Hash) (Object, error) {
- o, err := s.EncodedObject(plumbing.AnyObject, h)
- if err != nil {
- return nil, err
- }
-
- return DecodeObject(s, o)
-}
-
-// DecodeObject decodes an encoded object into an Object and associates it to
-// the given object storer.
-func DecodeObject(s storer.EncodedObjectStorer, o plumbing.EncodedObject) (Object, error) {
- switch o.Type() {
- case plumbing.CommitObject:
- return DecodeCommit(s, o)
- case plumbing.TreeObject:
- return DecodeTree(s, o)
- case plumbing.BlobObject:
- return DecodeBlob(o)
- case plumbing.TagObject:
- return DecodeTag(s, o)
- default:
- return nil, plumbing.ErrInvalidType
- }
-}
-
-// DateFormat is the format being used in the original git implementation
-const DateFormat = "Mon Jan 02 15:04:05 2006 -0700"
-
-// Signature is used to identify who and when created a commit or tag.
-type Signature struct {
- // Name represents a person name. It is an arbitrary string.
- Name string
- // Email is an email, but it cannot be assumed to be well-formed.
- Email string
- // When is the timestamp of the signature.
- When time.Time
-}
-
-// Decode decodes a byte slice into a signature
-func (s *Signature) Decode(b []byte) {
- open := bytes.LastIndexByte(b, '<')
- close := bytes.LastIndexByte(b, '>')
- if open == -1 || close == -1 {
- return
- }
-
- if close < open {
- return
- }
-
- s.Name = string(bytes.Trim(b[:open], " "))
- s.Email = string(b[open+1 : close])
-
- hasTime := close+2 < len(b)
- if hasTime {
- s.decodeTimeAndTimeZone(b[close+2:])
- }
-}
-
-// Encode encodes a Signature into a writer.
-func (s *Signature) Encode(w io.Writer) error {
- if _, err := fmt.Fprintf(w, "%s <%s> ", s.Name, s.Email); err != nil {
- return err
- }
- if err := s.encodeTimeAndTimeZone(w); err != nil {
- return err
- }
- return nil
-}
-
-var timeZoneLength = 5
-
-func (s *Signature) decodeTimeAndTimeZone(b []byte) {
- space := bytes.IndexByte(b, ' ')
- if space == -1 {
- space = len(b)
- }
-
- ts, err := strconv.ParseInt(string(b[:space]), 10, 64)
- if err != nil {
- return
- }
-
- s.When = time.Unix(ts, 0).In(time.UTC)
- var tzStart = space + 1
- if tzStart >= len(b) || tzStart+timeZoneLength > len(b) {
- return
- }
-
- timezone := string(b[tzStart : tzStart+timeZoneLength])
- tzhours, err1 := strconv.ParseInt(timezone[0:3], 10, 64)
- tzmins, err2 := strconv.ParseInt(timezone[3:], 10, 64)
- if err1 != nil || err2 != nil {
- return
- }
- if tzhours < 0 {
- tzmins *= -1
- }
-
- tz := time.FixedZone("", int(tzhours*60*60+tzmins*60))
-
- s.When = s.When.In(tz)
-}
-
-func (s *Signature) encodeTimeAndTimeZone(w io.Writer) error {
- u := s.When.Unix()
- if u < 0 {
- u = 0
- }
- _, err := fmt.Fprintf(w, "%d %s", u, s.When.Format("-0700"))
- return err
-}
-
-func (s *Signature) String() string {
- return fmt.Sprintf("%s <%s>", s.Name, s.Email)
-}
-
-// ObjectIter provides an iterator for a set of objects.
-type ObjectIter struct {
- storer.EncodedObjectIter
- s storer.EncodedObjectStorer
-}
-
-// NewObjectIter takes a storer.EncodedObjectStorer and a
-// storer.EncodedObjectIter and returns an *ObjectIter that iterates over all
-// objects contained in the storer.EncodedObjectIter.
-func NewObjectIter(s storer.EncodedObjectStorer, iter storer.EncodedObjectIter) *ObjectIter {
- return &ObjectIter{iter, s}
-}
-
-// Next moves the iterator to the next object and returns a pointer to it. If
-// there are no more objects, it returns io.EOF.
-func (iter *ObjectIter) Next() (Object, error) {
- for {
- obj, err := iter.EncodedObjectIter.Next()
- if err != nil {
- return nil, err
- }
-
- o, err := iter.toObject(obj)
- if err == plumbing.ErrInvalidType {
- continue
- }
-
- if err != nil {
- return nil, err
- }
-
- return o, nil
- }
-}
-
-// ForEach call the cb function for each object contained on this iter until
-// an error happens or the end of the iter is reached. If ErrStop is sent
-// the iteration is stop but no error is returned. The iterator is closed.
-func (iter *ObjectIter) ForEach(cb func(Object) error) error {
- return iter.EncodedObjectIter.ForEach(func(obj plumbing.EncodedObject) error {
- o, err := iter.toObject(obj)
- if err == plumbing.ErrInvalidType {
- return nil
- }
-
- if err != nil {
- return err
- }
-
- return cb(o)
- })
-}
-
-func (iter *ObjectIter) toObject(obj plumbing.EncodedObject) (Object, error) {
- switch obj.Type() {
- case plumbing.BlobObject:
- blob := &Blob{}
- return blob, blob.Decode(obj)
- case plumbing.TreeObject:
- tree := &Tree{s: iter.s}
- return tree, tree.Decode(obj)
- case plumbing.CommitObject:
- commit := &Commit{}
- return commit, commit.Decode(obj)
- case plumbing.TagObject:
- tag := &Tag{}
- return tag, tag.Decode(obj)
- default:
- return nil, plumbing.ErrInvalidType
- }
-}
diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/object/patch.go b/vendor/github.com/go-git/go-git/v5/plumbing/object/patch.go
deleted file mode 100644
index 3c61f626abb..00000000000
--- a/vendor/github.com/go-git/go-git/v5/plumbing/object/patch.go
+++ /dev/null
@@ -1,337 +0,0 @@
-package object
-
-import (
- "bytes"
- "context"
- "errors"
- "fmt"
- "io"
- "strconv"
- "strings"
-
- "github.com/go-git/go-git/v5/plumbing"
- "github.com/go-git/go-git/v5/plumbing/filemode"
- fdiff "github.com/go-git/go-git/v5/plumbing/format/diff"
- "github.com/go-git/go-git/v5/utils/diff"
-
- dmp "github.com/sergi/go-diff/diffmatchpatch"
-)
-
-var (
- ErrCanceled = errors.New("operation canceled")
-)
-
-func getPatch(message string, changes ...*Change) (*Patch, error) {
- ctx := context.Background()
- return getPatchContext(ctx, message, changes...)
-}
-
-func getPatchContext(ctx context.Context, message string, changes ...*Change) (*Patch, error) {
- var filePatches []fdiff.FilePatch
- for _, c := range changes {
- select {
- case <-ctx.Done():
- return nil, ErrCanceled
- default:
- }
-
- fp, err := filePatchWithContext(ctx, c)
- if err != nil {
- return nil, err
- }
-
- filePatches = append(filePatches, fp)
- }
-
- return &Patch{message, filePatches}, nil
-}
-
-func filePatchWithContext(ctx context.Context, c *Change) (fdiff.FilePatch, error) {
- from, to, err := c.Files()
- if err != nil {
- return nil, err
- }
- fromContent, fIsBinary, err := fileContent(from)
- if err != nil {
- return nil, err
- }
-
- toContent, tIsBinary, err := fileContent(to)
- if err != nil {
- return nil, err
- }
-
- if fIsBinary || tIsBinary {
- return &textFilePatch{from: c.From, to: c.To}, nil
- }
-
- diffs := diff.Do(fromContent, toContent)
-
- var chunks []fdiff.Chunk
- for _, d := range diffs {
- select {
- case <-ctx.Done():
- return nil, ErrCanceled
- default:
- }
-
- var op fdiff.Operation
- switch d.Type {
- case dmp.DiffEqual:
- op = fdiff.Equal
- case dmp.DiffDelete:
- op = fdiff.Delete
- case dmp.DiffInsert:
- op = fdiff.Add
- }
-
- chunks = append(chunks, &textChunk{d.Text, op})
- }
-
- return &textFilePatch{
- chunks: chunks,
- from: c.From,
- to: c.To,
- }, nil
-
-}
-
-func fileContent(f *File) (content string, isBinary bool, err error) {
- if f == nil {
- return
- }
-
- isBinary, err = f.IsBinary()
- if err != nil || isBinary {
- return
- }
-
- content, err = f.Contents()
-
- return
-}
-
-// Patch is an implementation of fdiff.Patch interface
-type Patch struct {
- message string
- filePatches []fdiff.FilePatch
-}
-
-func (p *Patch) FilePatches() []fdiff.FilePatch {
- return p.filePatches
-}
-
-func (p *Patch) Message() string {
- return p.message
-}
-
-func (p *Patch) Encode(w io.Writer) error {
- ue := fdiff.NewUnifiedEncoder(w, fdiff.DefaultContextLines)
-
- return ue.Encode(p)
-}
-
-func (p *Patch) Stats() FileStats {
- return getFileStatsFromFilePatches(p.FilePatches())
-}
-
-func (p *Patch) String() string {
- buf := bytes.NewBuffer(nil)
- err := p.Encode(buf)
- if err != nil {
- return fmt.Sprintf("malformed patch: %s", err.Error())
- }
-
- return buf.String()
-}
-
-// changeEntryWrapper is an implementation of fdiff.File interface
-type changeEntryWrapper struct {
- ce ChangeEntry
-}
-
-func (f *changeEntryWrapper) Hash() plumbing.Hash {
- if !f.ce.TreeEntry.Mode.IsFile() {
- return plumbing.ZeroHash
- }
-
- return f.ce.TreeEntry.Hash
-}
-
-func (f *changeEntryWrapper) Mode() filemode.FileMode {
- return f.ce.TreeEntry.Mode
-}
-func (f *changeEntryWrapper) Path() string {
- if !f.ce.TreeEntry.Mode.IsFile() {
- return ""
- }
-
- return f.ce.Name
-}
-
-func (f *changeEntryWrapper) Empty() bool {
- return !f.ce.TreeEntry.Mode.IsFile()
-}
-
-// textFilePatch is an implementation of fdiff.FilePatch interface
-type textFilePatch struct {
- chunks []fdiff.Chunk
- from, to ChangeEntry
-}
-
-func (tf *textFilePatch) Files() (from fdiff.File, to fdiff.File) {
- f := &changeEntryWrapper{tf.from}
- t := &changeEntryWrapper{tf.to}
-
- if !f.Empty() {
- from = f
- }
-
- if !t.Empty() {
- to = t
- }
-
- return
-}
-
-func (tf *textFilePatch) IsBinary() bool {
- return len(tf.chunks) == 0
-}
-
-func (tf *textFilePatch) Chunks() []fdiff.Chunk {
- return tf.chunks
-}
-
-// textChunk is an implementation of fdiff.Chunk interface
-type textChunk struct {
- content string
- op fdiff.Operation
-}
-
-func (t *textChunk) Content() string {
- return t.content
-}
-
-func (t *textChunk) Type() fdiff.Operation {
- return t.op
-}
-
-// FileStat stores the status of changes in content of a file.
-type FileStat struct {
- Name string
- Addition int
- Deletion int
-}
-
-func (fs FileStat) String() string {
- return printStat([]FileStat{fs})
-}
-
-// FileStats is a collection of FileStat.
-type FileStats []FileStat
-
-func (fileStats FileStats) String() string {
- return printStat(fileStats)
-}
-
-// printStat prints the stats of changes in content of files.
-// Original implementation: https://github.com/git/git/blob/1a87c842ece327d03d08096395969aca5e0a6996/diff.c#L2615
-// Parts of the output:
-// |<+++/--->
-// example: " main.go | 10 +++++++--- "
-func printStat(fileStats []FileStat) string {
- maxGraphWidth := uint(53)
- maxNameLen := 0
- maxChangeLen := 0
-
- scaleLinear := func(it, width, max uint) uint {
- if it == 0 || max == 0 {
- return 0
- }
-
- return 1 + (it * (width - 1) / max)
- }
-
- for _, fs := range fileStats {
- if len(fs.Name) > maxNameLen {
- maxNameLen = len(fs.Name)
- }
-
- changes := strconv.Itoa(fs.Addition + fs.Deletion)
- if len(changes) > maxChangeLen {
- maxChangeLen = len(changes)
- }
- }
-
- result := ""
- for _, fs := range fileStats {
- add := uint(fs.Addition)
- del := uint(fs.Deletion)
- np := maxNameLen - len(fs.Name)
- cp := maxChangeLen - len(strconv.Itoa(fs.Addition+fs.Deletion))
-
- total := add + del
- if total > maxGraphWidth {
- add = scaleLinear(add, maxGraphWidth, total)
- del = scaleLinear(del, maxGraphWidth, total)
- }
-
- adds := strings.Repeat("+", int(add))
- dels := strings.Repeat("-", int(del))
- namePad := strings.Repeat(" ", np)
- changePad := strings.Repeat(" ", cp)
-
- result += fmt.Sprintf(" %s%s | %s%d %s%s\n", fs.Name, namePad, changePad, total, adds, dels)
- }
- return result
-}
-
-func getFileStatsFromFilePatches(filePatches []fdiff.FilePatch) FileStats {
- var fileStats FileStats
-
- for _, fp := range filePatches {
- // ignore empty patches (binary files, submodule refs updates)
- if len(fp.Chunks()) == 0 {
- continue
- }
-
- cs := FileStat{}
- from, to := fp.Files()
- if from == nil {
- // New File is created.
- cs.Name = to.Path()
- } else if to == nil {
- // File is deleted.
- cs.Name = from.Path()
- } else if from.Path() != to.Path() {
- // File is renamed.
- cs.Name = fmt.Sprintf("%s => %s", from.Path(), to.Path())
- } else {
- cs.Name = from.Path()
- }
-
- for _, chunk := range fp.Chunks() {
- s := chunk.Content()
- if len(s) == 0 {
- continue
- }
-
- switch chunk.Type() {
- case fdiff.Add:
- cs.Addition += strings.Count(s, "\n")
- if s[len(s)-1] != '\n' {
- cs.Addition++
- }
- case fdiff.Delete:
- cs.Deletion += strings.Count(s, "\n")
- if s[len(s)-1] != '\n' {
- cs.Deletion++
- }
- }
- }
-
- fileStats = append(fileStats, cs)
- }
-
- return fileStats
-}
diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/object/rename.go b/vendor/github.com/go-git/go-git/v5/plumbing/object/rename.go
deleted file mode 100644
index ad2b902c25a..00000000000
--- a/vendor/github.com/go-git/go-git/v5/plumbing/object/rename.go
+++ /dev/null
@@ -1,816 +0,0 @@
-package object
-
-import (
- "errors"
- "io"
- "sort"
- "strings"
-
- "github.com/go-git/go-git/v5/plumbing"
- "github.com/go-git/go-git/v5/plumbing/filemode"
- "github.com/go-git/go-git/v5/utils/ioutil"
- "github.com/go-git/go-git/v5/utils/merkletrie"
-)
-
-// DetectRenames detects the renames in the given changes on two trees with
-// the given options. It will return the given changes grouping additions and
-// deletions into modifications when possible.
-// If options is nil, the default diff tree options will be used.
-func DetectRenames(
- changes Changes,
- opts *DiffTreeOptions,
-) (Changes, error) {
- if opts == nil {
- opts = DefaultDiffTreeOptions
- }
-
- detector := &renameDetector{
- renameScore: int(opts.RenameScore),
- renameLimit: int(opts.RenameLimit),
- onlyExact: opts.OnlyExactRenames,
- }
-
- for _, c := range changes {
- action, err := c.Action()
- if err != nil {
- return nil, err
- }
-
- switch action {
- case merkletrie.Insert:
- detector.added = append(detector.added, c)
- case merkletrie.Delete:
- detector.deleted = append(detector.deleted, c)
- default:
- detector.modified = append(detector.modified, c)
- }
- }
-
- return detector.detect()
-}
-
-// renameDetector will detect and resolve renames in a set of changes.
-// see: https://github.com/eclipse/jgit/blob/master/org.eclipse.jgit/src/org/eclipse/jgit/diff/RenameDetector.java
-type renameDetector struct {
- added []*Change
- deleted []*Change
- modified []*Change
-
- renameScore int
- renameLimit int
- onlyExact bool
-}
-
-// detectExactRenames detects matches files that were deleted with files that
-// were added where the hash is the same on both. If there are multiple targets
-// the one with the most similar path will be chosen as the rename and the
-// rest as either deletions or additions.
-func (d *renameDetector) detectExactRenames() {
- added := groupChangesByHash(d.added)
- deletes := groupChangesByHash(d.deleted)
- var uniqueAdds []*Change
- var nonUniqueAdds [][]*Change
- var addedLeft []*Change
-
- for _, cs := range added {
- if len(cs) == 1 {
- uniqueAdds = append(uniqueAdds, cs[0])
- } else {
- nonUniqueAdds = append(nonUniqueAdds, cs)
- }
- }
-
- for _, c := range uniqueAdds {
- hash := changeHash(c)
- deleted := deletes[hash]
-
- if len(deleted) == 1 {
- if sameMode(c, deleted[0]) {
- d.modified = append(d.modified, &Change{From: deleted[0].From, To: c.To})
- delete(deletes, hash)
- } else {
- addedLeft = append(addedLeft, c)
- }
- } else if len(deleted) > 1 {
- bestMatch := bestNameMatch(c, deleted)
- if bestMatch != nil && sameMode(c, bestMatch) {
- d.modified = append(d.modified, &Change{From: bestMatch.From, To: c.To})
- delete(deletes, hash)
-
- var newDeletes = make([]*Change, 0, len(deleted)-1)
- for _, d := range deleted {
- if d != bestMatch {
- newDeletes = append(newDeletes, d)
- }
- }
- deletes[hash] = newDeletes
- }
- } else {
- addedLeft = append(addedLeft, c)
- }
- }
-
- for _, added := range nonUniqueAdds {
- hash := changeHash(added[0])
- deleted := deletes[hash]
-
- if len(deleted) == 1 {
- deleted := deleted[0]
- bestMatch := bestNameMatch(deleted, added)
- if bestMatch != nil && sameMode(deleted, bestMatch) {
- d.modified = append(d.modified, &Change{From: deleted.From, To: bestMatch.To})
- delete(deletes, hash)
-
- for _, c := range added {
- if c != bestMatch {
- addedLeft = append(addedLeft, c)
- }
- }
- } else {
- addedLeft = append(addedLeft, added...)
- }
- } else if len(deleted) > 1 {
- maxSize := len(deleted) * len(added)
- if d.renameLimit > 0 && d.renameLimit < maxSize {
- maxSize = d.renameLimit
- }
-
- matrix := make(similarityMatrix, 0, maxSize)
-
- for delIdx, del := range deleted {
- deletedName := changeName(del)
-
- for addIdx, add := range added {
- addedName := changeName(add)
-
- score := nameSimilarityScore(addedName, deletedName)
- matrix = append(matrix, similarityPair{added: addIdx, deleted: delIdx, score: score})
-
- if len(matrix) >= maxSize {
- break
- }
- }
-
- if len(matrix) >= maxSize {
- break
- }
- }
-
- sort.Stable(matrix)
-
- usedAdds := make(map[*Change]struct{})
- usedDeletes := make(map[*Change]struct{})
- for i := len(matrix) - 1; i >= 0; i-- {
- del := deleted[matrix[i].deleted]
- add := added[matrix[i].added]
-
- if add == nil || del == nil {
- // it was already matched
- continue
- }
-
- usedAdds[add] = struct{}{}
- usedDeletes[del] = struct{}{}
- d.modified = append(d.modified, &Change{From: del.From, To: add.To})
- added[matrix[i].added] = nil
- deleted[matrix[i].deleted] = nil
- }
-
- for _, c := range added {
- if _, ok := usedAdds[c]; !ok && c != nil {
- addedLeft = append(addedLeft, c)
- }
- }
-
- var newDeletes = make([]*Change, 0, len(deleted)-len(usedDeletes))
- for _, c := range deleted {
- if _, ok := usedDeletes[c]; !ok && c != nil {
- newDeletes = append(newDeletes, c)
- }
- }
- deletes[hash] = newDeletes
- } else {
- addedLeft = append(addedLeft, added...)
- }
- }
-
- d.added = addedLeft
- d.deleted = nil
- for _, dels := range deletes {
- d.deleted = append(d.deleted, dels...)
- }
-}
-
-// detectContentRenames detects renames based on the similarity of the content
-// in the files by building a matrix of pairs between sources and destinations
-// and matching by the highest score.
-// see: https://github.com/eclipse/jgit/blob/master/org.eclipse.jgit/src/org/eclipse/jgit/diff/SimilarityRenameDetector.java
-func (d *renameDetector) detectContentRenames() error {
- cnt := max(len(d.added), len(d.deleted))
- if d.renameLimit > 0 && cnt > d.renameLimit {
- return nil
- }
-
- srcs, dsts := d.deleted, d.added
- matrix, err := buildSimilarityMatrix(srcs, dsts, d.renameScore)
- if err != nil {
- return err
- }
- renames := make([]*Change, 0, min(len(matrix), len(dsts)))
-
- // Match rename pairs on a first come, first serve basis until
- // we have looked at everything that is above the minimum score.
- for i := len(matrix) - 1; i >= 0; i-- {
- pair := matrix[i]
- src := srcs[pair.deleted]
- dst := dsts[pair.added]
-
- if dst == nil || src == nil {
- // It was already matched before
- continue
- }
-
- renames = append(renames, &Change{From: src.From, To: dst.To})
-
- // Claim destination and source as matched
- dsts[pair.added] = nil
- srcs[pair.deleted] = nil
- }
-
- d.modified = append(d.modified, renames...)
- d.added = compactChanges(dsts)
- d.deleted = compactChanges(srcs)
-
- return nil
-}
-
-func (d *renameDetector) detect() (Changes, error) {
- if len(d.added) > 0 && len(d.deleted) > 0 {
- d.detectExactRenames()
-
- if !d.onlyExact {
- if err := d.detectContentRenames(); err != nil {
- return nil, err
- }
- }
- }
-
- result := make(Changes, 0, len(d.added)+len(d.deleted)+len(d.modified))
- result = append(result, d.added...)
- result = append(result, d.deleted...)
- result = append(result, d.modified...)
-
- sort.Stable(result)
-
- return result, nil
-}
-
-func bestNameMatch(change *Change, changes []*Change) *Change {
- var best *Change
- var bestScore int
-
- cname := changeName(change)
-
- for _, c := range changes {
- score := nameSimilarityScore(cname, changeName(c))
- if score > bestScore {
- bestScore = score
- best = c
- }
- }
-
- return best
-}
-
-func nameSimilarityScore(a, b string) int {
- aDirLen := strings.LastIndexByte(a, '/') + 1
- bDirLen := strings.LastIndexByte(b, '/') + 1
-
- dirMin := min(aDirLen, bDirLen)
- dirMax := max(aDirLen, bDirLen)
-
- var dirScoreLtr, dirScoreRtl int
- if dirMax == 0 {
- dirScoreLtr = 100
- dirScoreRtl = 100
- } else {
- var dirSim int
-
- for ; dirSim < dirMin; dirSim++ {
- if a[dirSim] != b[dirSim] {
- break
- }
- }
-
- dirScoreLtr = dirSim * 100 / dirMax
-
- if dirScoreLtr == 100 {
- dirScoreRtl = 100
- } else {
- for dirSim = 0; dirSim < dirMin; dirSim++ {
- if a[aDirLen-1-dirSim] != b[bDirLen-1-dirSim] {
- break
- }
- }
- dirScoreRtl = dirSim * 100 / dirMax
- }
- }
-
- fileMin := min(len(a)-aDirLen, len(b)-bDirLen)
- fileMax := max(len(a)-aDirLen, len(b)-bDirLen)
-
- fileSim := 0
- for ; fileSim < fileMin; fileSim++ {
- if a[len(a)-1-fileSim] != b[len(b)-1-fileSim] {
- break
- }
- }
- fileScore := fileSim * 100 / fileMax
-
- return (((dirScoreLtr + dirScoreRtl) * 25) + (fileScore * 50)) / 100
-}
-
-func changeName(c *Change) string {
- if c.To != empty {
- return c.To.Name
- }
- return c.From.Name
-}
-
-func changeHash(c *Change) plumbing.Hash {
- if c.To != empty {
- return c.To.TreeEntry.Hash
- }
-
- return c.From.TreeEntry.Hash
-}
-
-func changeMode(c *Change) filemode.FileMode {
- if c.To != empty {
- return c.To.TreeEntry.Mode
- }
-
- return c.From.TreeEntry.Mode
-}
-
-func sameMode(a, b *Change) bool {
- return changeMode(a) == changeMode(b)
-}
-
-func groupChangesByHash(changes []*Change) map[plumbing.Hash][]*Change {
- var result = make(map[plumbing.Hash][]*Change)
- for _, c := range changes {
- hash := changeHash(c)
- result[hash] = append(result[hash], c)
- }
- return result
-}
-
-type similarityMatrix []similarityPair
-
-func (m similarityMatrix) Len() int { return len(m) }
-func (m similarityMatrix) Swap(i, j int) { m[i], m[j] = m[j], m[i] }
-func (m similarityMatrix) Less(i, j int) bool {
- if m[i].score == m[j].score {
- if m[i].added == m[j].added {
- return m[i].deleted < m[j].deleted
- }
- return m[i].added < m[j].added
- }
- return m[i].score < m[j].score
-}
-
-type similarityPair struct {
- // index of the added file
- added int
- // index of the deleted file
- deleted int
- // similarity score
- score int
-}
-
-func max(a, b int) int {
- if a > b {
- return a
- }
- return b
-}
-
-func min(a, b int) int {
- if a < b {
- return a
- }
- return b
-}
-
-const maxMatrixSize = 10000
-
-func buildSimilarityMatrix(srcs, dsts []*Change, renameScore int) (similarityMatrix, error) {
- // Allocate for the worst-case scenario where every pair has a score
- // that we need to consider. We might not need that many.
- matrixSize := len(srcs) * len(dsts)
- if matrixSize > maxMatrixSize {
- matrixSize = maxMatrixSize
- }
- matrix := make(similarityMatrix, 0, matrixSize)
- srcSizes := make([]int64, len(srcs))
- dstSizes := make([]int64, len(dsts))
- dstTooLarge := make(map[int]bool)
-
- // Consider each pair of files, if the score is above the minimum
- // threshold we need to record that scoring in the matrix so we can
- // later find the best matches.
-outerLoop:
- for srcIdx, src := range srcs {
- if changeMode(src) != filemode.Regular {
- continue
- }
-
- // Declare the from file and the similarity index here to be able to
- // reuse it inside the inner loop. The reason to not initialize them
- // here is so we can skip the initialization in case they happen to
- // not be needed later. They will be initialized inside the inner
- // loop if and only if they're needed and reused in subsequent passes.
- var from *File
- var s *similarityIndex
- var err error
- for dstIdx, dst := range dsts {
- if changeMode(dst) != filemode.Regular {
- continue
- }
-
- if dstTooLarge[dstIdx] {
- continue
- }
-
- var to *File
- srcSize := srcSizes[srcIdx]
- if srcSize == 0 {
- from, _, err = src.Files()
- if err != nil {
- return nil, err
- }
- srcSize = from.Size + 1
- srcSizes[srcIdx] = srcSize
- }
-
- dstSize := dstSizes[dstIdx]
- if dstSize == 0 {
- _, to, err = dst.Files()
- if err != nil {
- return nil, err
- }
- dstSize = to.Size + 1
- dstSizes[dstIdx] = dstSize
- }
-
- min, max := srcSize, dstSize
- if dstSize < srcSize {
- min = dstSize
- max = srcSize
- }
-
- if int(min*100/max) < renameScore {
- // File sizes are too different to be a match
- continue
- }
-
- if s == nil {
- s, err = fileSimilarityIndex(from)
- if err != nil {
- if err == errIndexFull {
- continue outerLoop
- }
- return nil, err
- }
- }
-
- if to == nil {
- _, to, err = dst.Files()
- if err != nil {
- return nil, err
- }
- }
-
- di, err := fileSimilarityIndex(to)
- if err != nil {
- if err == errIndexFull {
- dstTooLarge[dstIdx] = true
- }
-
- return nil, err
- }
-
- contentScore := s.score(di, 10000)
- // The name score returns a value between 0 and 100, so we need to
- // convert it to the same range as the content score.
- nameScore := nameSimilarityScore(src.From.Name, dst.To.Name) * 100
- score := (contentScore*99 + nameScore*1) / 10000
-
- if score < renameScore {
- continue
- }
-
- matrix = append(matrix, similarityPair{added: dstIdx, deleted: srcIdx, score: score})
- }
- }
-
- sort.Stable(matrix)
-
- return matrix, nil
-}
-
-func compactChanges(changes []*Change) []*Change {
- var result []*Change
- for _, c := range changes {
- if c != nil {
- result = append(result, c)
- }
- }
- return result
-}
-
-const (
- keyShift = 32
- maxCountValue = (1 << keyShift) - 1
-)
-
-var errIndexFull = errors.New("index is full")
-
-// similarityIndex is an index structure of lines/blocks in one file.
-// This structure can be used to compute an approximation of the similarity
-// between two files.
-// To save space in memory, this index uses a space efficient encoding which
-// will not exceed 1MiB per instance. The index starts out at a smaller size
-// (closer to 2KiB), but may grow as more distinct blocks within the scanned
-// file are discovered.
-// see: https://github.com/eclipse/jgit/blob/master/org.eclipse.jgit/src/org/eclipse/jgit/diff/SimilarityIndex.java
-type similarityIndex struct {
- hashed uint64
- // number of non-zero entries in hashes
- numHashes int
- growAt int
- hashes []keyCountPair
- hashBits int
-}
-
-func fileSimilarityIndex(f *File) (*similarityIndex, error) {
- idx := newSimilarityIndex()
- if err := idx.hash(f); err != nil {
- return nil, err
- }
-
- sort.Stable(keyCountPairs(idx.hashes))
-
- return idx, nil
-}
-
-func newSimilarityIndex() *similarityIndex {
- return &similarityIndex{
- hashBits: 8,
- hashes: make([]keyCountPair, 1<<8),
- growAt: shouldGrowAt(8),
- }
-}
-
-func (i *similarityIndex) hash(f *File) error {
- isBin, err := f.IsBinary()
- if err != nil {
- return err
- }
-
- r, err := f.Reader()
- if err != nil {
- return err
- }
-
- defer ioutil.CheckClose(r, &err)
-
- return i.hashContent(r, f.Size, isBin)
-}
-
-func (i *similarityIndex) hashContent(r io.Reader, size int64, isBin bool) error {
- var buf = make([]byte, 4096)
- var ptr, cnt int
- remaining := size
-
- for 0 < remaining {
- hash := 5381
- var blockHashedCnt uint64
-
- // Hash one line or block, whatever happens first
- n := int64(0)
- for {
- if ptr == cnt {
- ptr = 0
- var err error
- cnt, err = io.ReadFull(r, buf)
- if err != nil && err != io.ErrUnexpectedEOF {
- return err
- }
-
- if cnt == 0 {
- return io.EOF
- }
- }
- n++
- c := buf[ptr] & 0xff
- ptr++
-
- // Ignore CR in CRLF sequence if it's text
- if !isBin && c == '\r' && ptr < cnt && buf[ptr] == '\n' {
- continue
- }
- blockHashedCnt++
-
- if c == '\n' {
- break
- }
-
- hash = (hash << 5) + hash + int(c)
-
- if n >= 64 || n >= remaining {
- break
- }
- }
- i.hashed += blockHashedCnt
- if err := i.add(hash, blockHashedCnt); err != nil {
- return err
- }
- remaining -= n
- }
-
- return nil
-}
-
-// score computes the similarity score between this index and another one.
-// A region of a file is defined as a line in a text file or a fixed-size
-// block in a binary file. To prepare an index, each region in the file is
-// hashed; the values and counts of hashes are retained in a sorted table.
-// Define the similarity fraction F as the count of matching regions between
-// the two files divided between the maximum count of regions in either file.
-// The similarity score is F multiplied by the maxScore constant, yielding a
-// range [0, maxScore]. It is defined as maxScore for the degenerate case of
-// two empty files.
-// The similarity score is symmetrical; i.e. a.score(b) == b.score(a).
-func (i *similarityIndex) score(other *similarityIndex, maxScore int) int {
- var maxHashed = i.hashed
- if maxHashed < other.hashed {
- maxHashed = other.hashed
- }
- if maxHashed == 0 {
- return maxScore
- }
-
- return int(i.common(other) * uint64(maxScore) / maxHashed)
-}
-
-func (i *similarityIndex) common(dst *similarityIndex) uint64 {
- srcIdx, dstIdx := 0, 0
- if i.numHashes == 0 || dst.numHashes == 0 {
- return 0
- }
-
- var common uint64
- srcKey, dstKey := i.hashes[srcIdx].key(), dst.hashes[dstIdx].key()
-
- for {
- if srcKey == dstKey {
- srcCnt, dstCnt := i.hashes[srcIdx].count(), dst.hashes[dstIdx].count()
- if srcCnt < dstCnt {
- common += srcCnt
- } else {
- common += dstCnt
- }
-
- srcIdx++
- if srcIdx == len(i.hashes) {
- break
- }
- srcKey = i.hashes[srcIdx].key()
-
- dstIdx++
- if dstIdx == len(dst.hashes) {
- break
- }
- dstKey = dst.hashes[dstIdx].key()
- } else if srcKey < dstKey {
- // Region of src that is not in dst
- srcIdx++
- if srcIdx == len(i.hashes) {
- break
- }
- srcKey = i.hashes[srcIdx].key()
- } else {
- // Region of dst that is not in src
- dstIdx++
- if dstIdx == len(dst.hashes) {
- break
- }
- dstKey = dst.hashes[dstIdx].key()
- }
- }
-
- return common
-}
-
-func (i *similarityIndex) add(key int, cnt uint64) error {
- key = int(uint32(key) * 0x9e370001 >> 1)
-
- j := i.slot(key)
- for {
- v := i.hashes[j]
- if v == 0 {
- // It's an empty slot, so we can store it here.
- if i.growAt <= i.numHashes {
- if err := i.grow(); err != nil {
- return err
- }
- j = i.slot(key)
- continue
- }
-
- var err error
- i.hashes[j], err = newKeyCountPair(key, cnt)
- if err != nil {
- return err
- }
- i.numHashes++
- return nil
- } else if v.key() == key {
- // It's the same key, so increment the counter.
- var err error
- i.hashes[j], err = newKeyCountPair(key, v.count()+cnt)
- return err
- } else if j+1 >= len(i.hashes) {
- j = 0
- } else {
- j++
- }
- }
-}
-
-type keyCountPair uint64
-
-func newKeyCountPair(key int, cnt uint64) (keyCountPair, error) {
- if cnt > maxCountValue {
- return 0, errIndexFull
- }
-
- return keyCountPair((uint64(key) << keyShift) | cnt), nil
-}
-
-func (p keyCountPair) key() int {
- return int(p >> keyShift)
-}
-
-func (p keyCountPair) count() uint64 {
- return uint64(p) & maxCountValue
-}
-
-func (i *similarityIndex) slot(key int) int {
- // We use 31 - hashBits because the upper bit was already forced
- // to be 0 and we want the remaining high bits to be used as the
- // table slot.
- return int(uint32(key) >> uint(31-i.hashBits))
-}
-
-func shouldGrowAt(hashBits int) int {
- return (1 << uint(hashBits)) * (hashBits - 3) / hashBits
-}
-
-func (i *similarityIndex) grow() error {
- if i.hashBits == 30 {
- return errIndexFull
- }
-
- old := i.hashes
-
- i.hashBits++
- i.growAt = shouldGrowAt(i.hashBits)
-
- // TODO(erizocosmico): find a way to check if it will OOM and return
- // errIndexFull instead.
- i.hashes = make([]keyCountPair, 1<= len(i.hashes) {
- j = 0
- }
- }
- i.hashes[j] = v
- }
- }
-
- return nil
-}
-
-type keyCountPairs []keyCountPair
-
-func (p keyCountPairs) Len() int { return len(p) }
-func (p keyCountPairs) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
-func (p keyCountPairs) Less(i, j int) bool { return p[i] < p[j] }
diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/object/signature.go b/vendor/github.com/go-git/go-git/v5/plumbing/object/signature.go
deleted file mode 100644
index 91cf371f0c6..00000000000
--- a/vendor/github.com/go-git/go-git/v5/plumbing/object/signature.go
+++ /dev/null
@@ -1,101 +0,0 @@
-package object
-
-import "bytes"
-
-const (
- signatureTypeUnknown signatureType = iota
- signatureTypeOpenPGP
- signatureTypeX509
- signatureTypeSSH
-)
-
-var (
- // openPGPSignatureFormat is the format of an OpenPGP signature.
- openPGPSignatureFormat = signatureFormat{
- []byte("-----BEGIN PGP SIGNATURE-----"),
- []byte("-----BEGIN PGP MESSAGE-----"),
- }
- // x509SignatureFormat is the format of an X509 signature, which is
- // a PKCS#7 (S/MIME) signature.
- x509SignatureFormat = signatureFormat{
- []byte("-----BEGIN CERTIFICATE-----"),
- }
-
- // sshSignatureFormat is the format of an SSH signature.
- sshSignatureFormat = signatureFormat{
- []byte("-----BEGIN SSH SIGNATURE-----"),
- }
-)
-
-var (
- // knownSignatureFormats is a map of known signature formats, indexed by
- // their signatureType.
- knownSignatureFormats = map[signatureType]signatureFormat{
- signatureTypeOpenPGP: openPGPSignatureFormat,
- signatureTypeX509: x509SignatureFormat,
- signatureTypeSSH: sshSignatureFormat,
- }
-)
-
-// signatureType represents the type of the signature.
-type signatureType int8
-
-// signatureFormat represents the beginning of a signature.
-type signatureFormat [][]byte
-
-// typeForSignature returns the type of the signature based on its format.
-func typeForSignature(b []byte) signatureType {
- for t, i := range knownSignatureFormats {
- for _, begin := range i {
- if bytes.HasPrefix(b, begin) {
- return t
- }
- }
- }
- return signatureTypeUnknown
-}
-
-// parseSignedBytes returns the position of the last signature block found in
-// the given bytes. If no signature block is found, it returns -1.
-//
-// When multiple signature blocks are found, the position of the last one is
-// returned. Any tailing bytes after this signature block start should be
-// considered part of the signature.
-//
-// Given this, it would be safe to use the returned position to split the bytes
-// into two parts: the first part containing the message, the second part
-// containing the signature.
-//
-// Example:
-//
-// message := []byte(`Message with signature
-//
-// -----BEGIN SSH SIGNATURE-----
-// ...`)
-//
-// var signature string
-// if pos, _ := parseSignedBytes(message); pos != -1 {
-// signature = string(message[pos:])
-// message = message[:pos]
-// }
-//
-// This logic is on par with git's gpg-interface.c:parse_signed_buffer().
-// https://github.com/git/git/blob/7c2ef319c52c4997256f5807564523dfd4acdfc7/gpg-interface.c#L668
-func parseSignedBytes(b []byte) (int, signatureType) {
- var n, match = 0, -1
- var t signatureType
- for n < len(b) {
- var i = b[n:]
- if st := typeForSignature(i); st != signatureTypeUnknown {
- match = n
- t = st
- }
- if eol := bytes.IndexByte(i, '\n'); eol >= 0 {
- n += eol + 1
- continue
- }
- // If we reach this point, we've reached the end.
- break
- }
- return match, t
-}
diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/object/tag.go b/vendor/github.com/go-git/go-git/v5/plumbing/object/tag.go
deleted file mode 100644
index cf46c08e181..00000000000
--- a/vendor/github.com/go-git/go-git/v5/plumbing/object/tag.go
+++ /dev/null
@@ -1,330 +0,0 @@
-package object
-
-import (
- "bytes"
- "fmt"
- "io"
- "strings"
-
- "github.com/ProtonMail/go-crypto/openpgp"
- "github.com/go-git/go-git/v5/plumbing"
- "github.com/go-git/go-git/v5/plumbing/storer"
- "github.com/go-git/go-git/v5/utils/ioutil"
- "github.com/go-git/go-git/v5/utils/sync"
-)
-
-// Tag represents an annotated tag object. It points to a single git object of
-// any type, but tags typically are applied to commit or blob objects. It
-// provides a reference that associates the target with a tag name. It also
-// contains meta-information about the tag, including the tagger, tag date and
-// message.
-//
-// Note that this is not used for lightweight tags.
-//
-// https://git-scm.com/book/en/v2/Git-Internals-Git-References#Tags
-type Tag struct {
- // Hash of the tag.
- Hash plumbing.Hash
- // Name of the tag.
- Name string
- // Tagger is the one who created the tag.
- Tagger Signature
- // Message is an arbitrary text message.
- Message string
- // PGPSignature is the PGP signature of the tag.
- PGPSignature string
- // TargetType is the object type of the target.
- TargetType plumbing.ObjectType
- // Target is the hash of the target object.
- Target plumbing.Hash
-
- s storer.EncodedObjectStorer
-}
-
-// GetTag gets a tag from an object storer and decodes it.
-func GetTag(s storer.EncodedObjectStorer, h plumbing.Hash) (*Tag, error) {
- o, err := s.EncodedObject(plumbing.TagObject, h)
- if err != nil {
- return nil, err
- }
-
- return DecodeTag(s, o)
-}
-
-// DecodeTag decodes an encoded object into a *Commit and associates it to the
-// given object storer.
-func DecodeTag(s storer.EncodedObjectStorer, o plumbing.EncodedObject) (*Tag, error) {
- t := &Tag{s: s}
- if err := t.Decode(o); err != nil {
- return nil, err
- }
-
- return t, nil
-}
-
-// ID returns the object ID of the tag, not the object that the tag references.
-// The returned value will always match the current value of Tag.Hash.
-//
-// ID is present to fulfill the Object interface.
-func (t *Tag) ID() plumbing.Hash {
- return t.Hash
-}
-
-// Type returns the type of object. It always returns plumbing.TagObject.
-//
-// Type is present to fulfill the Object interface.
-func (t *Tag) Type() plumbing.ObjectType {
- return plumbing.TagObject
-}
-
-// Decode transforms a plumbing.EncodedObject into a Tag struct.
-func (t *Tag) Decode(o plumbing.EncodedObject) (err error) {
- if o.Type() != plumbing.TagObject {
- return ErrUnsupportedObject
- }
-
- t.Hash = o.Hash()
-
- reader, err := o.Reader()
- if err != nil {
- return err
- }
- defer ioutil.CheckClose(reader, &err)
-
- r := sync.GetBufioReader(reader)
- defer sync.PutBufioReader(r)
-
- for {
- var line []byte
- line, err = r.ReadBytes('\n')
- if err != nil && err != io.EOF {
- return err
- }
-
- line = bytes.TrimSpace(line)
- if len(line) == 0 {
- break // Start of message
- }
-
- split := bytes.SplitN(line, []byte{' '}, 2)
- switch string(split[0]) {
- case "object":
- t.Target = plumbing.NewHash(string(split[1]))
- case "type":
- t.TargetType, err = plumbing.ParseObjectType(string(split[1]))
- if err != nil {
- return err
- }
- case "tag":
- t.Name = string(split[1])
- case "tagger":
- t.Tagger.Decode(split[1])
- }
-
- if err == io.EOF {
- return nil
- }
- }
-
- data, err := io.ReadAll(r)
- if err != nil {
- return err
- }
- if sm, _ := parseSignedBytes(data); sm >= 0 {
- t.PGPSignature = string(data[sm:])
- data = data[:sm]
- }
- t.Message = string(data)
-
- return nil
-}
-
-// Encode transforms a Tag into a plumbing.EncodedObject.
-func (t *Tag) Encode(o plumbing.EncodedObject) error {
- return t.encode(o, true)
-}
-
-// EncodeWithoutSignature export a Tag into a plumbing.EncodedObject without the signature (correspond to the payload of the PGP signature).
-func (t *Tag) EncodeWithoutSignature(o plumbing.EncodedObject) error {
- return t.encode(o, false)
-}
-
-func (t *Tag) encode(o plumbing.EncodedObject, includeSig bool) (err error) {
- o.SetType(plumbing.TagObject)
- w, err := o.Writer()
- if err != nil {
- return err
- }
- defer ioutil.CheckClose(w, &err)
-
- if _, err = fmt.Fprintf(w,
- "object %s\ntype %s\ntag %s\ntagger ",
- t.Target.String(), t.TargetType.Bytes(), t.Name); err != nil {
- return err
- }
-
- if err = t.Tagger.Encode(w); err != nil {
- return err
- }
-
- if _, err = fmt.Fprint(w, "\n\n"); err != nil {
- return err
- }
-
- if _, err = fmt.Fprint(w, t.Message); err != nil {
- return err
- }
-
- // Note that this is highly sensitive to what it sent along in the message.
- // Message *always* needs to end with a newline, or else the message and the
- // signature will be concatenated into a corrupt object. Since this is a
- // lower-level method, we assume you know what you are doing and have already
- // done the needful on the message in the caller.
- if includeSig {
- if _, err = fmt.Fprint(w, t.PGPSignature); err != nil {
- return err
- }
- }
-
- return err
-}
-
-// Commit returns the commit pointed to by the tag. If the tag points to a
-// different type of object ErrUnsupportedObject will be returned.
-func (t *Tag) Commit() (*Commit, error) {
- if t.TargetType != plumbing.CommitObject {
- return nil, ErrUnsupportedObject
- }
-
- o, err := t.s.EncodedObject(plumbing.CommitObject, t.Target)
- if err != nil {
- return nil, err
- }
-
- return DecodeCommit(t.s, o)
-}
-
-// Tree returns the tree pointed to by the tag. If the tag points to a commit
-// object the tree of that commit will be returned. If the tag does not point
-// to a commit or tree object ErrUnsupportedObject will be returned.
-func (t *Tag) Tree() (*Tree, error) {
- switch t.TargetType {
- case plumbing.CommitObject:
- c, err := t.Commit()
- if err != nil {
- return nil, err
- }
-
- return c.Tree()
- case plumbing.TreeObject:
- return GetTree(t.s, t.Target)
- default:
- return nil, ErrUnsupportedObject
- }
-}
-
-// Blob returns the blob pointed to by the tag. If the tag points to a
-// different type of object ErrUnsupportedObject will be returned.
-func (t *Tag) Blob() (*Blob, error) {
- if t.TargetType != plumbing.BlobObject {
- return nil, ErrUnsupportedObject
- }
-
- return GetBlob(t.s, t.Target)
-}
-
-// Object returns the object pointed to by the tag.
-func (t *Tag) Object() (Object, error) {
- o, err := t.s.EncodedObject(t.TargetType, t.Target)
- if err != nil {
- return nil, err
- }
-
- return DecodeObject(t.s, o)
-}
-
-// String returns the meta information contained in the tag as a formatted
-// string.
-func (t *Tag) String() string {
- obj, _ := t.Object()
-
- return fmt.Sprintf(
- "%s %s\nTagger: %s\nDate: %s\n\n%s\n%s",
- plumbing.TagObject, t.Name, t.Tagger.String(), t.Tagger.When.Format(DateFormat),
- t.Message, objectAsString(obj),
- )
-}
-
-// Verify performs PGP verification of the tag with a provided armored
-// keyring and returns openpgp.Entity associated with verifying key on success.
-func (t *Tag) Verify(armoredKeyRing string) (*openpgp.Entity, error) {
- keyRingReader := strings.NewReader(armoredKeyRing)
- keyring, err := openpgp.ReadArmoredKeyRing(keyRingReader)
- if err != nil {
- return nil, err
- }
-
- // Extract signature.
- signature := strings.NewReader(t.PGPSignature)
-
- encoded := &plumbing.MemoryObject{}
- // Encode tag components, excluding signature and get a reader object.
- if err := t.EncodeWithoutSignature(encoded); err != nil {
- return nil, err
- }
- er, err := encoded.Reader()
- if err != nil {
- return nil, err
- }
-
- return openpgp.CheckArmoredDetachedSignature(keyring, er, signature, nil)
-}
-
-// TagIter provides an iterator for a set of tags.
-type TagIter struct {
- storer.EncodedObjectIter
- s storer.EncodedObjectStorer
-}
-
-// NewTagIter takes a storer.EncodedObjectStorer and a
-// storer.EncodedObjectIter and returns a *TagIter that iterates over all
-// tags contained in the storer.EncodedObjectIter.
-//
-// Any non-tag object returned by the storer.EncodedObjectIter is skipped.
-func NewTagIter(s storer.EncodedObjectStorer, iter storer.EncodedObjectIter) *TagIter {
- return &TagIter{iter, s}
-}
-
-// Next moves the iterator to the next tag and returns a pointer to it. If
-// there are no more tags, it returns io.EOF.
-func (iter *TagIter) Next() (*Tag, error) {
- obj, err := iter.EncodedObjectIter.Next()
- if err != nil {
- return nil, err
- }
-
- return DecodeTag(iter.s, obj)
-}
-
-// ForEach call the cb function for each tag contained on this iter until
-// an error happens or the end of the iter is reached. If ErrStop is sent
-// the iteration is stop but no error is returned. The iterator is closed.
-func (iter *TagIter) ForEach(cb func(*Tag) error) error {
- return iter.EncodedObjectIter.ForEach(func(obj plumbing.EncodedObject) error {
- t, err := DecodeTag(iter.s, obj)
- if err != nil {
- return err
- }
-
- return cb(t)
- })
-}
-
-func objectAsString(obj Object) string {
- switch o := obj.(type) {
- case *Commit:
- return o.String()
- default:
- return ""
- }
-}
diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/object/tree.go b/vendor/github.com/go-git/go-git/v5/plumbing/object/tree.go
deleted file mode 100644
index 0fd0e51398f..00000000000
--- a/vendor/github.com/go-git/go-git/v5/plumbing/object/tree.go
+++ /dev/null
@@ -1,557 +0,0 @@
-package object
-
-import (
- "context"
- "errors"
- "fmt"
- "io"
- "path"
- "path/filepath"
- "sort"
- "strings"
-
- "github.com/go-git/go-git/v5/plumbing"
- "github.com/go-git/go-git/v5/plumbing/filemode"
- "github.com/go-git/go-git/v5/plumbing/storer"
- "github.com/go-git/go-git/v5/utils/ioutil"
- "github.com/go-git/go-git/v5/utils/sync"
-)
-
-const (
- maxTreeDepth = 1024
- startingStackSize = 8
-)
-
-// New errors defined by this package.
-var (
- ErrMaxTreeDepth = errors.New("maximum tree depth exceeded")
- ErrFileNotFound = errors.New("file not found")
- ErrDirectoryNotFound = errors.New("directory not found")
- ErrEntryNotFound = errors.New("entry not found")
- ErrEntriesNotSorted = errors.New("entries in tree are not sorted")
-)
-
-// Tree is basically like a directory - it references a bunch of other trees
-// and/or blobs (i.e. files and sub-directories)
-type Tree struct {
- Entries []TreeEntry
- Hash plumbing.Hash
-
- s storer.EncodedObjectStorer
- m map[string]*TreeEntry
- t map[string]*Tree // tree path cache
-}
-
-// GetTree gets a tree from an object storer and decodes it.
-func GetTree(s storer.EncodedObjectStorer, h plumbing.Hash) (*Tree, error) {
- o, err := s.EncodedObject(plumbing.TreeObject, h)
- if err != nil {
- return nil, err
- }
-
- return DecodeTree(s, o)
-}
-
-// DecodeTree decodes an encoded object into a *Tree and associates it to the
-// given object storer.
-func DecodeTree(s storer.EncodedObjectStorer, o plumbing.EncodedObject) (*Tree, error) {
- t := &Tree{s: s}
- if err := t.Decode(o); err != nil {
- return nil, err
- }
-
- return t, nil
-}
-
-// TreeEntry represents a file
-type TreeEntry struct {
- Name string
- Mode filemode.FileMode
- Hash plumbing.Hash
-}
-
-// File returns the hash of the file identified by the `path` argument.
-// The path is interpreted as relative to the tree receiver.
-func (t *Tree) File(path string) (*File, error) {
- e, err := t.FindEntry(path)
- if err != nil {
- return nil, ErrFileNotFound
- }
-
- blob, err := GetBlob(t.s, e.Hash)
- if err != nil {
- if err == plumbing.ErrObjectNotFound {
- return nil, ErrFileNotFound
- }
- return nil, err
- }
-
- return NewFile(path, e.Mode, blob), nil
-}
-
-// Size returns the plaintext size of an object, without reading it
-// into memory.
-func (t *Tree) Size(path string) (int64, error) {
- e, err := t.FindEntry(path)
- if err != nil {
- return 0, ErrEntryNotFound
- }
-
- return t.s.EncodedObjectSize(e.Hash)
-}
-
-// Tree returns the tree identified by the `path` argument.
-// The path is interpreted as relative to the tree receiver.
-func (t *Tree) Tree(path string) (*Tree, error) {
- e, err := t.FindEntry(path)
- if err != nil {
- return nil, ErrDirectoryNotFound
- }
-
- tree, err := GetTree(t.s, e.Hash)
- if err == plumbing.ErrObjectNotFound {
- return nil, ErrDirectoryNotFound
- }
-
- return tree, err
-}
-
-// TreeEntryFile returns the *File for a given *TreeEntry.
-func (t *Tree) TreeEntryFile(e *TreeEntry) (*File, error) {
- blob, err := GetBlob(t.s, e.Hash)
- if err != nil {
- return nil, err
- }
-
- return NewFile(e.Name, e.Mode, blob), nil
-}
-
-// FindEntry search a TreeEntry in this tree or any subtree.
-func (t *Tree) FindEntry(path string) (*TreeEntry, error) {
- if t.t == nil {
- t.t = make(map[string]*Tree)
- }
-
- pathParts := strings.Split(path, "/")
- startingTree := t
- pathCurrent := ""
-
- // search for the longest path in the tree path cache
- for i := len(pathParts) - 1; i > 1; i-- {
- path := filepath.Join(pathParts[:i]...)
-
- tree, ok := t.t[path]
- if ok {
- startingTree = tree
- pathParts = pathParts[i:]
- pathCurrent = path
-
- break
- }
- }
-
- var tree *Tree
- var err error
- for tree = startingTree; len(pathParts) > 1; pathParts = pathParts[1:] {
- if tree, err = tree.dir(pathParts[0]); err != nil {
- return nil, err
- }
-
- pathCurrent = filepath.Join(pathCurrent, pathParts[0])
- t.t[pathCurrent] = tree
- }
-
- return tree.entry(pathParts[0])
-}
-
-func (t *Tree) dir(baseName string) (*Tree, error) {
- entry, err := t.entry(baseName)
- if err != nil {
- return nil, ErrDirectoryNotFound
- }
-
- obj, err := t.s.EncodedObject(plumbing.TreeObject, entry.Hash)
- if err != nil {
- return nil, err
- }
-
- tree := &Tree{s: t.s}
- err = tree.Decode(obj)
-
- return tree, err
-}
-
-func (t *Tree) entry(baseName string) (*TreeEntry, error) {
- if t.m == nil {
- t.buildMap()
- }
-
- entry, ok := t.m[baseName]
- if !ok {
- return nil, ErrEntryNotFound
- }
-
- return entry, nil
-}
-
-// Files returns a FileIter allowing to iterate over the Tree
-func (t *Tree) Files() *FileIter {
- return NewFileIter(t.s, t)
-}
-
-// ID returns the object ID of the tree. The returned value will always match
-// the current value of Tree.Hash.
-//
-// ID is present to fulfill the Object interface.
-func (t *Tree) ID() plumbing.Hash {
- return t.Hash
-}
-
-// Type returns the type of object. It always returns plumbing.TreeObject.
-func (t *Tree) Type() plumbing.ObjectType {
- return plumbing.TreeObject
-}
-
-// Decode transform an plumbing.EncodedObject into a Tree struct
-func (t *Tree) Decode(o plumbing.EncodedObject) (err error) {
- if o.Type() != plumbing.TreeObject {
- return ErrUnsupportedObject
- }
-
- t.Hash = o.Hash()
- if o.Size() == 0 {
- return nil
- }
-
- t.Entries = nil
- t.m = nil
-
- reader, err := o.Reader()
- if err != nil {
- return err
- }
- defer ioutil.CheckClose(reader, &err)
-
- r := sync.GetBufioReader(reader)
- defer sync.PutBufioReader(r)
-
- for {
- str, err := r.ReadString(' ')
- if err != nil {
- if err == io.EOF {
- break
- }
-
- return err
- }
- str = str[:len(str)-1] // strip last byte (' ')
-
- mode, err := filemode.New(str)
- if err != nil {
- return err
- }
-
- name, err := r.ReadString(0)
- if err != nil && err != io.EOF {
- return err
- }
-
- var hash plumbing.Hash
- if _, err = io.ReadFull(r, hash[:]); err != nil {
- return err
- }
-
- baseName := name[:len(name)-1]
- t.Entries = append(t.Entries, TreeEntry{
- Hash: hash,
- Mode: mode,
- Name: baseName,
- })
- }
-
- return nil
-}
-
-type TreeEntrySorter []TreeEntry
-
-func (s TreeEntrySorter) Len() int {
- return len(s)
-}
-
-func (s TreeEntrySorter) Less(i, j int) bool {
- name1 := s[i].Name
- name2 := s[j].Name
- if s[i].Mode == filemode.Dir {
- name1 += "/"
- }
- if s[j].Mode == filemode.Dir {
- name2 += "/"
- }
- return name1 < name2
-}
-
-func (s TreeEntrySorter) Swap(i, j int) {
- s[i], s[j] = s[j], s[i]
-}
-
-// Encode transforms a Tree into a plumbing.EncodedObject.
-func (t *Tree) Encode(o plumbing.EncodedObject) (err error) {
- o.SetType(plumbing.TreeObject)
- w, err := o.Writer()
- if err != nil {
- return err
- }
-
- defer ioutil.CheckClose(w, &err)
-
- if !sort.IsSorted(TreeEntrySorter(t.Entries)) {
- return ErrEntriesNotSorted
- }
-
- for _, entry := range t.Entries {
- if strings.IndexByte(entry.Name, 0) != -1 {
- return fmt.Errorf("malformed filename %q", entry.Name)
- }
- if _, err = fmt.Fprintf(w, "%o %s", entry.Mode, entry.Name); err != nil {
- return err
- }
-
- if _, err = w.Write([]byte{0x00}); err != nil {
- return err
- }
-
- if _, err = w.Write(entry.Hash[:]); err != nil {
- return err
- }
- }
-
- return err
-}
-
-func (t *Tree) buildMap() {
- t.m = make(map[string]*TreeEntry)
- for i := 0; i < len(t.Entries); i++ {
- t.m[t.Entries[i].Name] = &t.Entries[i]
- }
-}
-
-// Diff returns a list of changes between this tree and the provided one
-func (t *Tree) Diff(to *Tree) (Changes, error) {
- return t.DiffContext(context.Background(), to)
-}
-
-// DiffContext returns a list of changes between this tree and the provided one
-// Error will be returned if context expires. Provided context must be non nil.
-//
-// NOTE: Since version 5.1.0 the renames are correctly handled, the settings
-// used are the recommended options DefaultDiffTreeOptions.
-func (t *Tree) DiffContext(ctx context.Context, to *Tree) (Changes, error) {
- return DiffTreeWithOptions(ctx, t, to, DefaultDiffTreeOptions)
-}
-
-// Patch returns a slice of Patch objects with all the changes between trees
-// in chunks. This representation can be used to create several diff outputs.
-func (t *Tree) Patch(to *Tree) (*Patch, error) {
- return t.PatchContext(context.Background(), to)
-}
-
-// PatchContext returns a slice of Patch objects with all the changes between
-// trees in chunks. This representation can be used to create several diff
-// outputs. If context expires, an error will be returned. Provided context must
-// be non-nil.
-//
-// NOTE: Since version 5.1.0 the renames are correctly handled, the settings
-// used are the recommended options DefaultDiffTreeOptions.
-func (t *Tree) PatchContext(ctx context.Context, to *Tree) (*Patch, error) {
- changes, err := t.DiffContext(ctx, to)
- if err != nil {
- return nil, err
- }
-
- return changes.PatchContext(ctx)
-}
-
-// treeEntryIter facilitates iterating through the TreeEntry objects in a Tree.
-type treeEntryIter struct {
- t *Tree
- pos int
-}
-
-func (iter *treeEntryIter) Next() (TreeEntry, error) {
- if iter.pos >= len(iter.t.Entries) {
- return TreeEntry{}, io.EOF
- }
- iter.pos++
- return iter.t.Entries[iter.pos-1], nil
-}
-
-// TreeWalker provides a means of walking through all of the entries in a Tree.
-type TreeWalker struct {
- stack []*treeEntryIter
- base string
- recursive bool
- seen map[plumbing.Hash]bool
-
- s storer.EncodedObjectStorer
- t *Tree
-}
-
-// NewTreeWalker returns a new TreeWalker for the given tree.
-//
-// It is the caller's responsibility to call Close() when finished with the
-// tree walker.
-func NewTreeWalker(t *Tree, recursive bool, seen map[plumbing.Hash]bool) *TreeWalker {
- stack := make([]*treeEntryIter, 0, startingStackSize)
- stack = append(stack, &treeEntryIter{t, 0})
-
- return &TreeWalker{
- stack: stack,
- recursive: recursive,
- seen: seen,
-
- s: t.s,
- t: t,
- }
-}
-
-// Next returns the next object from the tree. Objects are returned in order
-// and subtrees are included. After the last object has been returned further
-// calls to Next() will return io.EOF.
-//
-// In the current implementation any objects which cannot be found in the
-// underlying repository will be skipped automatically. It is possible that this
-// may change in future versions.
-func (w *TreeWalker) Next() (name string, entry TreeEntry, err error) {
- var obj *Tree
- for {
- current := len(w.stack) - 1
- if current < 0 {
- // Nothing left on the stack so we're finished
- err = io.EOF
- return
- }
-
- if current > maxTreeDepth {
- // We're probably following bad data or some self-referencing tree
- err = ErrMaxTreeDepth
- return
- }
-
- entry, err = w.stack[current].Next()
- if err == io.EOF {
- // Finished with the current tree, move back up to the parent
- w.stack = w.stack[:current]
- w.base, _ = path.Split(w.base)
- w.base = strings.TrimSuffix(w.base, "/")
- continue
- }
-
- if err != nil {
- return
- }
-
- if w.seen[entry.Hash] {
- continue
- }
-
- if entry.Mode == filemode.Dir {
- obj, err = GetTree(w.s, entry.Hash)
- }
-
- name = simpleJoin(w.base, entry.Name)
-
- if err != nil {
- err = io.EOF
- return
- }
-
- break
- }
-
- if !w.recursive {
- return
- }
-
- if obj != nil {
- w.stack = append(w.stack, &treeEntryIter{obj, 0})
- w.base = simpleJoin(w.base, entry.Name)
- }
-
- return
-}
-
-// Tree returns the tree that the tree walker most recently operated on.
-func (w *TreeWalker) Tree() *Tree {
- current := len(w.stack) - 1
- if w.stack[current].pos == 0 {
- current--
- }
-
- if current < 0 {
- return nil
- }
-
- return w.stack[current].t
-}
-
-// Close releases any resources used by the TreeWalker.
-func (w *TreeWalker) Close() {
- w.stack = nil
-}
-
-// TreeIter provides an iterator for a set of trees.
-type TreeIter struct {
- storer.EncodedObjectIter
- s storer.EncodedObjectStorer
-}
-
-// NewTreeIter takes a storer.EncodedObjectStorer and a
-// storer.EncodedObjectIter and returns a *TreeIter that iterates over all
-// tree contained in the storer.EncodedObjectIter.
-//
-// Any non-tree object returned by the storer.EncodedObjectIter is skipped.
-func NewTreeIter(s storer.EncodedObjectStorer, iter storer.EncodedObjectIter) *TreeIter {
- return &TreeIter{iter, s}
-}
-
-// Next moves the iterator to the next tree and returns a pointer to it. If
-// there are no more trees, it returns io.EOF.
-func (iter *TreeIter) Next() (*Tree, error) {
- for {
- obj, err := iter.EncodedObjectIter.Next()
- if err != nil {
- return nil, err
- }
-
- if obj.Type() != plumbing.TreeObject {
- continue
- }
-
- return DecodeTree(iter.s, obj)
- }
-}
-
-// ForEach call the cb function for each tree contained on this iter until
-// an error happens or the end of the iter is reached. If ErrStop is sent
-// the iteration is stop but no error is returned. The iterator is closed.
-func (iter *TreeIter) ForEach(cb func(*Tree) error) error {
- return iter.EncodedObjectIter.ForEach(func(obj plumbing.EncodedObject) error {
- if obj.Type() != plumbing.TreeObject {
- return nil
- }
-
- t, err := DecodeTree(iter.s, obj)
- if err != nil {
- return err
- }
-
- return cb(t)
- })
-}
-
-func simpleJoin(parent, child string) string {
- if len(parent) > 0 {
- return parent + "/" + child
- }
- return child
-}
diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/object/treenoder.go b/vendor/github.com/go-git/go-git/v5/plumbing/object/treenoder.go
deleted file mode 100644
index 2adb6452880..00000000000
--- a/vendor/github.com/go-git/go-git/v5/plumbing/object/treenoder.go
+++ /dev/null
@@ -1,142 +0,0 @@
-package object
-
-import (
- "io"
-
- "github.com/go-git/go-git/v5/plumbing"
- "github.com/go-git/go-git/v5/plumbing/filemode"
- "github.com/go-git/go-git/v5/utils/merkletrie/noder"
-)
-
-// A treenoder is a helper type that wraps git trees into merkletrie
-// noders.
-//
-// As a merkletrie noder doesn't understand the concept of modes (e.g.
-// file permissions), the treenoder includes the mode of the git tree in
-// the hash, so changes in the modes will be detected as modifications
-// to the file contents by the merkletrie difftree algorithm. This is
-// consistent with how the "git diff-tree" command works.
-type treeNoder struct {
- parent *Tree // the root node is its own parent
- name string // empty string for the root node
- mode filemode.FileMode
- hash plumbing.Hash
- children []noder.Noder // memoized
-}
-
-// NewTreeRootNode returns the root node of a Tree
-func NewTreeRootNode(t *Tree) noder.Noder {
- if t == nil {
- return &treeNoder{}
- }
-
- return &treeNoder{
- parent: t,
- name: "",
- mode: filemode.Dir,
- hash: t.Hash,
- }
-}
-
-func (t *treeNoder) Skip() bool {
- return false
-}
-
-func (t *treeNoder) isRoot() bool {
- return t.name == ""
-}
-
-func (t *treeNoder) String() string {
- return "treeNoder <" + t.name + ">"
-}
-
-func (t *treeNoder) Hash() []byte {
- if t.mode == filemode.Deprecated {
- return append(t.hash[:], filemode.Regular.Bytes()...)
- }
- return append(t.hash[:], t.mode.Bytes()...)
-}
-
-func (t *treeNoder) Name() string {
- return t.name
-}
-
-func (t *treeNoder) IsDir() bool {
- return t.mode == filemode.Dir
-}
-
-// Children will return the children of a treenoder as treenoders,
-// building them from the children of the wrapped git tree.
-func (t *treeNoder) Children() ([]noder.Noder, error) {
- if t.mode != filemode.Dir {
- return noder.NoChildren, nil
- }
-
- // children are memoized for efficiency
- if t.children != nil {
- return t.children, nil
- }
-
- // the parent of the returned children will be ourself as a tree if
- // we are a not the root treenoder. The root is special as it
- // is is own parent.
- parent := t.parent
- if !t.isRoot() {
- var err error
- if parent, err = t.parent.Tree(t.name); err != nil {
- return nil, err
- }
- }
-
- var err error
- t.children, err = transformChildren(parent)
- return t.children, err
-}
-
-// Returns the children of a tree as treenoders.
-// Efficiency is key here.
-func transformChildren(t *Tree) ([]noder.Noder, error) {
- var err error
- var e TreeEntry
-
- // there will be more tree entries than children in the tree,
- // due to submodules and empty directories, but I think it is still
- // worth it to pre-allocate the whole array now, even if sometimes
- // is bigger than needed.
- ret := make([]noder.Noder, 0, len(t.Entries))
-
- walker := NewTreeWalker(t, false, nil) // don't recurse
- // don't defer walker.Close() for efficiency reasons.
- for {
- _, e, err = walker.Next()
- if err == io.EOF {
- break
- }
- if err != nil {
- walker.Close()
- return nil, err
- }
-
- ret = append(ret, &treeNoder{
- parent: t,
- name: e.Name,
- mode: e.Mode,
- hash: e.Hash,
- })
- }
- walker.Close()
-
- return ret, nil
-}
-
-// len(t.tree.Entries) != the number of elements walked by treewalker
-// for some reason because of empty directories, submodules, etc, so we
-// have to walk here.
-func (t *treeNoder) NumChildren() (int, error) {
- children, err := t.Children()
- if err != nil {
- return 0, err
- }
-
- return len(children), nil
-}
diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/advrefs.go b/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/advrefs.go
deleted file mode 100644
index f93ad304710..00000000000
--- a/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/advrefs.go
+++ /dev/null
@@ -1,211 +0,0 @@
-package packp
-
-import (
- "fmt"
- "sort"
- "strings"
-
- "github.com/go-git/go-git/v5/plumbing"
- "github.com/go-git/go-git/v5/plumbing/protocol/packp/capability"
- "github.com/go-git/go-git/v5/plumbing/storer"
- "github.com/go-git/go-git/v5/storage/memory"
-)
-
-// AdvRefs values represent the information transmitted on an
-// advertised-refs message. Values from this type are not zero-value
-// safe, use the New function instead.
-type AdvRefs struct {
- // Prefix stores prefix payloads.
- //
- // When using this message over (smart) HTTP, you have to add a pktline
- // before the whole thing with the following payload:
- //
- // '# service=$servicename" LF
- //
- // Moreover, some (all) git HTTP smart servers will send a flush-pkt
- // just after the first pkt-line.
- //
- // To accommodate both situations, the Prefix field allow you to store
- // any data you want to send before the actual pktlines. It will also
- // be filled up with whatever is found on the line.
- Prefix [][]byte
- // Head stores the resolved HEAD reference if present.
- // This can be present with git-upload-pack, not with git-receive-pack.
- Head *plumbing.Hash
- // Capabilities are the capabilities.
- Capabilities *capability.List
- // References are the hash references.
- References map[string]plumbing.Hash
- // Peeled are the peeled hash references.
- Peeled map[string]plumbing.Hash
- // Shallows are the shallow object ids.
- Shallows []plumbing.Hash
-}
-
-// NewAdvRefs returns a pointer to a new AdvRefs value, ready to be used.
-func NewAdvRefs() *AdvRefs {
- return &AdvRefs{
- Prefix: [][]byte{},
- Capabilities: capability.NewList(),
- References: make(map[string]plumbing.Hash),
- Peeled: make(map[string]plumbing.Hash),
- Shallows: []plumbing.Hash{},
- }
-}
-
-func (a *AdvRefs) AddReference(r *plumbing.Reference) error {
- switch r.Type() {
- case plumbing.SymbolicReference:
- v := fmt.Sprintf("%s:%s", r.Name().String(), r.Target().String())
- return a.Capabilities.Add(capability.SymRef, v)
- case plumbing.HashReference:
- a.References[r.Name().String()] = r.Hash()
- default:
- return plumbing.ErrInvalidType
- }
-
- return nil
-}
-
-func (a *AdvRefs) AllReferences() (memory.ReferenceStorage, error) {
- s := memory.ReferenceStorage{}
- if err := a.addRefs(s); err != nil {
- return s, plumbing.NewUnexpectedError(err)
- }
-
- return s, nil
-}
-
-func (a *AdvRefs) addRefs(s storer.ReferenceStorer) error {
- for name, hash := range a.References {
- ref := plumbing.NewReferenceFromStrings(name, hash.String())
- if err := s.SetReference(ref); err != nil {
- return err
- }
- }
-
- if a.supportSymrefs() {
- return a.addSymbolicRefs(s)
- }
-
- return a.resolveHead(s)
-}
-
-// If the server does not support symrefs capability,
-// we need to guess the reference where HEAD is pointing to.
-//
-// Git versions prior to 1.8.4.3 has an special procedure to get
-// the reference where is pointing to HEAD:
-// - Check if a reference called master exists. If exists and it
-// has the same hash as HEAD hash, we can say that HEAD is pointing to master
-// - If master does not exists or does not have the same hash as HEAD,
-// order references and check in that order if that reference has the same
-// hash than HEAD. If yes, set HEAD pointing to that branch hash
-// - If no reference is found, throw an error
-func (a *AdvRefs) resolveHead(s storer.ReferenceStorer) error {
- if a.Head == nil {
- return nil
- }
-
- ref, err := s.Reference(plumbing.Master)
-
- // check first if HEAD is pointing to master
- if err == nil {
- ok, err := a.createHeadIfCorrectReference(ref, s)
- if err != nil {
- return err
- }
-
- if ok {
- return nil
- }
- }
-
- if err != nil && err != plumbing.ErrReferenceNotFound {
- return err
- }
-
- // From here we are trying to guess the branch that HEAD is pointing
- refIter, err := s.IterReferences()
- if err != nil {
- return err
- }
-
- var refNames []string
- err = refIter.ForEach(func(r *plumbing.Reference) error {
- refNames = append(refNames, string(r.Name()))
- return nil
- })
- if err != nil {
- return err
- }
-
- sort.Strings(refNames)
-
- var headSet bool
- for _, refName := range refNames {
- ref, err := s.Reference(plumbing.ReferenceName(refName))
- if err != nil {
- return err
- }
- ok, err := a.createHeadIfCorrectReference(ref, s)
- if err != nil {
- return err
- }
- if ok {
- headSet = true
- break
- }
- }
-
- if !headSet {
- return plumbing.ErrReferenceNotFound
- }
-
- return nil
-}
-
-func (a *AdvRefs) createHeadIfCorrectReference(
- reference *plumbing.Reference,
- s storer.ReferenceStorer) (bool, error) {
- if reference.Hash() == *a.Head {
- headRef := plumbing.NewSymbolicReference(plumbing.HEAD, reference.Name())
- if err := s.SetReference(headRef); err != nil {
- return false, err
- }
-
- return true, nil
- }
-
- return false, nil
-}
-
-func (a *AdvRefs) addSymbolicRefs(s storer.ReferenceStorer) error {
- for _, symref := range a.Capabilities.Get(capability.SymRef) {
- chunks := strings.Split(symref, ":")
- if len(chunks) != 2 {
- err := fmt.Errorf("bad number of `:` in symref value (%q)", symref)
- return plumbing.NewUnexpectedError(err)
- }
- name := plumbing.ReferenceName(chunks[0])
- target := plumbing.ReferenceName(chunks[1])
- ref := plumbing.NewSymbolicReference(name, target)
- if err := s.SetReference(ref); err != nil {
- return nil
- }
- }
-
- return nil
-}
-
-func (a *AdvRefs) supportSymrefs() bool {
- return a.Capabilities.Supports(capability.SymRef)
-}
-
-// IsEmpty returns true if doesn't contain any reference.
-func (a *AdvRefs) IsEmpty() bool {
- return a.Head == nil &&
- len(a.References) == 0 &&
- len(a.Peeled) == 0 &&
- len(a.Shallows) == 0
-}
diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/advrefs_decode.go b/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/advrefs_decode.go
deleted file mode 100644
index f8d26a28e4b..00000000000
--- a/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/advrefs_decode.go
+++ /dev/null
@@ -1,289 +0,0 @@
-package packp
-
-import (
- "bytes"
- "encoding/hex"
- "errors"
- "fmt"
- "io"
-
- "github.com/go-git/go-git/v5/plumbing"
- "github.com/go-git/go-git/v5/plumbing/format/pktline"
-)
-
-// Decode reads the next advertised-refs message form its input and
-// stores it in the AdvRefs.
-func (a *AdvRefs) Decode(r io.Reader) error {
- d := newAdvRefsDecoder(r)
- return d.Decode(a)
-}
-
-type advRefsDecoder struct {
- s *pktline.Scanner // a pkt-line scanner from the input stream
- line []byte // current pkt-line contents, use parser.nextLine() to make it advance
- nLine int // current pkt-line number for debugging, begins at 1
- hash plumbing.Hash // last hash read
- err error // sticky error, use the parser.error() method to fill this out
- data *AdvRefs // parsed data is stored here
-}
-
-var (
- // ErrEmptyAdvRefs is returned by Decode if it gets an empty advertised
- // references message.
- ErrEmptyAdvRefs = errors.New("empty advertised-ref message")
- // ErrEmptyInput is returned by Decode if the input is empty.
- ErrEmptyInput = errors.New("empty input")
-)
-
-func newAdvRefsDecoder(r io.Reader) *advRefsDecoder {
- return &advRefsDecoder{
- s: pktline.NewScanner(r),
- }
-}
-
-func (d *advRefsDecoder) Decode(v *AdvRefs) error {
- d.data = v
-
- for state := decodePrefix; state != nil; {
- state = state(d)
- }
-
- return d.err
-}
-
-type decoderStateFn func(*advRefsDecoder) decoderStateFn
-
-// fills out the parser sticky error
-func (d *advRefsDecoder) error(format string, a ...interface{}) {
- msg := fmt.Sprintf(
- "pkt-line %d: %s", d.nLine,
- fmt.Sprintf(format, a...),
- )
-
- d.err = NewErrUnexpectedData(msg, d.line)
-}
-
-// Reads a new pkt-line from the scanner, makes its payload available as
-// p.line and increments p.nLine. A successful invocation returns true,
-// otherwise, false is returned and the sticky error is filled out
-// accordingly. Trims eols at the end of the payloads.
-func (d *advRefsDecoder) nextLine() bool {
- d.nLine++
-
- if !d.s.Scan() {
- if d.err = d.s.Err(); d.err != nil {
- return false
- }
-
- if d.nLine == 1 {
- d.err = ErrEmptyInput
- return false
- }
-
- d.error("EOF")
- return false
- }
-
- d.line = d.s.Bytes()
- d.line = bytes.TrimSuffix(d.line, eol)
-
- return true
-}
-
-// The HTTP smart prefix is often followed by a flush-pkt.
-func decodePrefix(d *advRefsDecoder) decoderStateFn {
- if ok := d.nextLine(); !ok {
- return nil
- }
-
- if !isPrefix(d.line) {
- return decodeFirstHash
- }
-
- tmp := make([]byte, len(d.line))
- copy(tmp, d.line)
- d.data.Prefix = append(d.data.Prefix, tmp)
- if ok := d.nextLine(); !ok {
- return nil
- }
-
- if !isFlush(d.line) {
- return decodeFirstHash
- }
-
- d.data.Prefix = append(d.data.Prefix, pktline.Flush)
- if ok := d.nextLine(); !ok {
- return nil
- }
-
- return decodeFirstHash
-}
-
-func isPrefix(payload []byte) bool {
- return len(payload) > 0 && payload[0] == '#'
-}
-
-// If the first hash is zero, then a no-refs is coming. Otherwise, a
-// list-of-refs is coming, and the hash will be followed by the first
-// advertised ref.
-func decodeFirstHash(p *advRefsDecoder) decoderStateFn {
- // If the repository is empty, we receive a flush here (HTTP).
- if isFlush(p.line) {
- p.err = ErrEmptyAdvRefs
- return nil
- }
-
- // TODO: Use object-format (when available) for hash size. Git 2.41+
- if len(p.line) < hashSize {
- p.error("cannot read hash, pkt-line too short")
- return nil
- }
-
- if _, err := hex.Decode(p.hash[:], p.line[:hashSize]); err != nil {
- p.error("invalid hash text: %s", err)
- return nil
- }
-
- p.line = p.line[hashSize:]
-
- if p.hash.IsZero() {
- return decodeSkipNoRefs
- }
-
- return decodeFirstRef
-}
-
-// Skips SP "capabilities^{}" NUL
-func decodeSkipNoRefs(p *advRefsDecoder) decoderStateFn {
- if len(p.line) < len(noHeadMark) {
- p.error("too short zero-id ref")
- return nil
- }
-
- if !bytes.HasPrefix(p.line, noHeadMark) {
- p.error("malformed zero-id ref")
- return nil
- }
-
- p.line = p.line[len(noHeadMark):]
-
- return decodeCaps
-}
-
-// decode the refname, expects SP refname NULL
-func decodeFirstRef(l *advRefsDecoder) decoderStateFn {
- if len(l.line) < 3 {
- l.error("line too short after hash")
- return nil
- }
-
- if !bytes.HasPrefix(l.line, sp) {
- l.error("no space after hash")
- return nil
- }
- l.line = l.line[1:]
-
- chunks := bytes.SplitN(l.line, null, 2)
- if len(chunks) < 2 {
- l.error("NULL not found")
- return nil
- }
- ref := chunks[0]
- l.line = chunks[1]
-
- if bytes.Equal(ref, []byte(head)) {
- l.data.Head = &l.hash
- } else {
- l.data.References[string(ref)] = l.hash
- }
-
- return decodeCaps
-}
-
-func decodeCaps(p *advRefsDecoder) decoderStateFn {
- if err := p.data.Capabilities.Decode(p.line); err != nil {
- p.error("invalid capabilities: %s", err)
- return nil
- }
-
- return decodeOtherRefs
-}
-
-// The refs are either tips (obj-id SP refname) or a peeled (obj-id SP refname^{}).
-// If there are no refs, then there might be a shallow or flush-ptk.
-func decodeOtherRefs(p *advRefsDecoder) decoderStateFn {
- if ok := p.nextLine(); !ok {
- return nil
- }
-
- if bytes.HasPrefix(p.line, shallow) {
- return decodeShallow
- }
-
- if len(p.line) == 0 {
- return nil
- }
-
- saveTo := p.data.References
- if bytes.HasSuffix(p.line, peeled) {
- p.line = bytes.TrimSuffix(p.line, peeled)
- saveTo = p.data.Peeled
- }
-
- ref, hash, err := readRef(p.line)
- if err != nil {
- p.error("%s", err)
- return nil
- }
- saveTo[ref] = hash
-
- return decodeOtherRefs
-}
-
-// Reads a ref-name
-func readRef(data []byte) (string, plumbing.Hash, error) {
- chunks := bytes.Split(data, sp)
- switch {
- case len(chunks) == 1:
- return "", plumbing.ZeroHash, fmt.Errorf("malformed ref data: no space was found")
- case len(chunks) > 2:
- return "", plumbing.ZeroHash, fmt.Errorf("malformed ref data: more than one space found")
- default:
- return string(chunks[1]), plumbing.NewHash(string(chunks[0])), nil
- }
-}
-
-// Keeps reading shallows until a flush-pkt is found
-func decodeShallow(p *advRefsDecoder) decoderStateFn {
- if !bytes.HasPrefix(p.line, shallow) {
- p.error("malformed shallow prefix, found %q... instead", p.line[:len(shallow)])
- return nil
- }
- p.line = bytes.TrimPrefix(p.line, shallow)
-
- if len(p.line) != hashSize {
- p.error(fmt.Sprintf(
- "malformed shallow hash: wrong length, expected 40 bytes, read %d bytes",
- len(p.line)))
- return nil
- }
-
- text := p.line[:hashSize]
- var h plumbing.Hash
- if _, err := hex.Decode(h[:], text); err != nil {
- p.error("invalid hash text: %s", err)
- return nil
- }
-
- p.data.Shallows = append(p.data.Shallows, h)
-
- if ok := p.nextLine(); !ok {
- return nil
- }
-
- if len(p.line) == 0 {
- return nil // successful parse of the advertised-refs message
- }
-
- return decodeShallow
-}
diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/advrefs_encode.go b/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/advrefs_encode.go
deleted file mode 100644
index fb9bd883fce..00000000000
--- a/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/advrefs_encode.go
+++ /dev/null
@@ -1,176 +0,0 @@
-package packp
-
-import (
- "bytes"
- "fmt"
- "io"
- "sort"
-
- "github.com/go-git/go-git/v5/plumbing"
- "github.com/go-git/go-git/v5/plumbing/format/pktline"
- "github.com/go-git/go-git/v5/plumbing/protocol/packp/capability"
-)
-
-// Encode writes the AdvRefs encoding to a writer.
-//
-// All the payloads will end with a newline character. Capabilities,
-// references and shallows are written in alphabetical order, except for
-// peeled references that always follow their corresponding references.
-func (a *AdvRefs) Encode(w io.Writer) error {
- e := newAdvRefsEncoder(w)
- return e.Encode(a)
-}
-
-type advRefsEncoder struct {
- data *AdvRefs // data to encode
- pe *pktline.Encoder // where to write the encoded data
- firstRefName string // reference name to encode in the first pkt-line (HEAD if present)
- firstRefHash plumbing.Hash // hash referenced to encode in the first pkt-line (HEAD if present)
- sortedRefs []string // hash references to encode ordered by increasing order
- err error // sticky error
-
-}
-
-func newAdvRefsEncoder(w io.Writer) *advRefsEncoder {
- return &advRefsEncoder{
- pe: pktline.NewEncoder(w),
- }
-}
-
-func (e *advRefsEncoder) Encode(v *AdvRefs) error {
- e.data = v
- e.sortRefs()
- e.setFirstRef()
-
- for state := encodePrefix; state != nil; {
- state = state(e)
- }
-
- return e.err
-}
-
-func (e *advRefsEncoder) sortRefs() {
- if len(e.data.References) > 0 {
- refs := make([]string, 0, len(e.data.References))
- for refName := range e.data.References {
- refs = append(refs, refName)
- }
-
- sort.Strings(refs)
- e.sortedRefs = refs
- }
-}
-
-func (e *advRefsEncoder) setFirstRef() {
- if e.data.Head != nil {
- e.firstRefName = head
- e.firstRefHash = *e.data.Head
- return
- }
-
- if len(e.sortedRefs) > 0 {
- refName := e.sortedRefs[0]
- e.firstRefName = refName
- e.firstRefHash = e.data.References[refName]
- }
-}
-
-type encoderStateFn func(*advRefsEncoder) encoderStateFn
-
-func encodePrefix(e *advRefsEncoder) encoderStateFn {
- for _, p := range e.data.Prefix {
- if bytes.Equal(p, pktline.Flush) {
- if e.err = e.pe.Flush(); e.err != nil {
- return nil
- }
- continue
- }
- if e.err = e.pe.Encodef("%s\n", string(p)); e.err != nil {
- return nil
- }
- }
-
- return encodeFirstLine
-}
-
-// Adds the first pkt-line payload: head hash, head ref and capabilities.
-// If HEAD ref is not found, the first reference ordered in increasing order will be used.
-// If there aren't HEAD neither refs, the first line will be "PKT-LINE(zero-id SP "capabilities^{}" NUL capability-list)".
-// See: https://github.com/git/git/blob/master/Documentation/technical/pack-protocol.txt
-// See: https://github.com/git/git/blob/master/Documentation/technical/protocol-common.txt
-func encodeFirstLine(e *advRefsEncoder) encoderStateFn {
- const formatFirstLine = "%s %s\x00%s\n"
- var firstLine string
- capabilities := formatCaps(e.data.Capabilities)
-
- if e.firstRefName == "" {
- firstLine = fmt.Sprintf(formatFirstLine, plumbing.ZeroHash.String(), "capabilities^{}", capabilities)
- } else {
- firstLine = fmt.Sprintf(formatFirstLine, e.firstRefHash.String(), e.firstRefName, capabilities)
-
- }
-
- if e.err = e.pe.EncodeString(firstLine); e.err != nil {
- return nil
- }
-
- return encodeRefs
-}
-
-func formatCaps(c *capability.List) string {
- if c == nil {
- return ""
- }
-
- return c.String()
-}
-
-// Adds the (sorted) refs: hash SP refname EOL
-// and their peeled refs if any.
-func encodeRefs(e *advRefsEncoder) encoderStateFn {
- for _, r := range e.sortedRefs {
- if r == e.firstRefName {
- continue
- }
-
- hash := e.data.References[r]
- if e.err = e.pe.Encodef("%s %s\n", hash.String(), r); e.err != nil {
- return nil
- }
-
- if hash, ok := e.data.Peeled[r]; ok {
- if e.err = e.pe.Encodef("%s %s^{}\n", hash.String(), r); e.err != nil {
- return nil
- }
- }
- }
-
- return encodeShallow
-}
-
-// Adds the (sorted) shallows: "shallow" SP hash EOL
-func encodeShallow(e *advRefsEncoder) encoderStateFn {
- sorted := sortShallows(e.data.Shallows)
- for _, hash := range sorted {
- if e.err = e.pe.Encodef("shallow %s\n", hash); e.err != nil {
- return nil
- }
- }
-
- return encodeFlush
-}
-
-func sortShallows(c []plumbing.Hash) []string {
- ret := []string{}
- for _, h := range c {
- ret = append(ret, h.String())
- }
- sort.Strings(ret)
-
- return ret
-}
-
-func encodeFlush(e *advRefsEncoder) encoderStateFn {
- e.err = e.pe.Flush()
- return nil
-}
diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/capability/capability.go b/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/capability/capability.go
deleted file mode 100644
index b52e8a49d51..00000000000
--- a/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/capability/capability.go
+++ /dev/null
@@ -1,272 +0,0 @@
-// Package capability defines the server and client capabilities.
-package capability
-
-import (
- "fmt"
- "os"
-)
-
-// Capability describes a server or client capability.
-type Capability string
-
-func (n Capability) String() string {
- return string(n)
-}
-
-const (
- // MultiACK capability allows the server to return "ACK obj-id continue" as
- // soon as it finds a commit that it can use as a common base, between the
- // client's wants and the client's have set.
- //
- // By sending this early, the server can potentially head off the client
- // from walking any further down that particular branch of the client's
- // repository history. The client may still need to walk down other
- // branches, sending have lines for those, until the server has a
- // complete cut across the DAG, or the client has said "done".
- //
- // Without multi_ack, a client sends have lines in --date-order until
- // the server has found a common base. That means the client will send
- // have lines that are already known by the server to be common, because
- // they overlap in time with another branch that the server hasn't found
- // a common base on yet.
- //
- // For example suppose the client has commits in caps that the server
- // doesn't and the server has commits in lower case that the client
- // doesn't, as in the following diagram:
- //
- // +---- u ---------------------- x
- // / +----- y
- // / /
- // a -- b -- c -- d -- E -- F
- // \
- // +--- Q -- R -- S
- //
- // If the client wants x,y and starts out by saying have F,S, the server
- // doesn't know what F,S is. Eventually the client says "have d" and
- // the server sends "ACK d continue" to let the client know to stop
- // walking down that line (so don't send c-b-a), but it's not done yet,
- // it needs a base for x. The client keeps going with S-R-Q, until a
- // gets reached, at which point the server has a clear base and it all
- // ends.
- //
- // Without multi_ack the client would have sent that c-b-a chain anyway,
- // interleaved with S-R-Q.
- MultiACK Capability = "multi_ack"
- // MultiACKDetailed is an extension of multi_ack that permits client to
- // better understand the server's in-memory state.
- MultiACKDetailed Capability = "multi_ack_detailed"
- // NoDone should only be used with the smart HTTP protocol. If
- // multi_ack_detailed and no-done are both present, then the sender is
- // free to immediately send a pack following its first "ACK obj-id ready"
- // message.
- //
- // Without no-done in the smart HTTP protocol, the server session would
- // end and the client has to make another trip to send "done" before
- // the server can send the pack. no-done removes the last round and
- // thus slightly reduces latency.
- NoDone Capability = "no-done"
- // ThinPack is one with deltas which reference base objects not
- // contained within the pack (but are known to exist at the receiving
- // end). This can reduce the network traffic significantly, but it
- // requires the receiving end to know how to "thicken" these packs by
- // adding the missing bases to the pack.
- //
- // The upload-pack server advertises 'thin-pack' when it can generate
- // and send a thin pack. A client requests the 'thin-pack' capability
- // when it understands how to "thicken" it, notifying the server that
- // it can receive such a pack. A client MUST NOT request the
- // 'thin-pack' capability if it cannot turn a thin pack into a
- // self-contained pack.
- //
- // Receive-pack, on the other hand, is assumed by default to be able to
- // handle thin packs, but can ask the client not to use the feature by
- // advertising the 'no-thin' capability. A client MUST NOT send a thin
- // pack if the server advertises the 'no-thin' capability.
- //
- // The reasons for this asymmetry are historical. The receive-pack
- // program did not exist until after the invention of thin packs, so
- // historically the reference implementation of receive-pack always
- // understood thin packs. Adding 'no-thin' later allowed receive-pack
- // to disable the feature in a backwards-compatible manner.
- ThinPack Capability = "thin-pack"
- // Sideband means that server can send, and client understand multiplexed
- // progress reports and error info interleaved with the packfile itself.
- //
- // These two options are mutually exclusive. A modern client always
- // favors Sideband64k.
- //
- // Either mode indicates that the packfile data will be streamed broken
- // up into packets of up to either 1000 bytes in the case of 'side_band',
- // or 65520 bytes in the case of 'side_band_64k'. Each packet is made up
- // of a leading 4-byte pkt-line length of how much data is in the packet,
- // followed by a 1-byte stream code, followed by the actual data.
- //
- // The stream code can be one of:
- //
- // 1 - pack data
- // 2 - progress messages
- // 3 - fatal error message just before stream aborts
- //
- // The "side-band-64k" capability came about as a way for newer clients
- // that can handle much larger packets to request packets that are
- // actually crammed nearly full, while maintaining backward compatibility
- // for the older clients.
- //
- // Further, with side-band and its up to 1000-byte messages, it's actually
- // 999 bytes of payload and 1 byte for the stream code. With side-band-64k,
- // same deal, you have up to 65519 bytes of data and 1 byte for the stream
- // code.
- //
- // The client MUST send only maximum of one of "side-band" and "side-
- // band-64k". Server MUST diagnose it as an error if client requests
- // both.
- Sideband Capability = "side-band"
- Sideband64k Capability = "side-band-64k"
- // OFSDelta server can send, and client understand PACKv2 with delta
- // referring to its base by position in pack rather than by an obj-id. That
- // is, they can send/read OBJ_OFS_DELTA (aka type 6) in a packfile.
- OFSDelta Capability = "ofs-delta"
- // Agent the server may optionally send this capability to notify the client
- // that the server is running version `X`. The client may optionally return
- // its own agent string by responding with an `agent=Y` capability (but it
- // MUST NOT do so if the server did not mention the agent capability). The
- // `X` and `Y` strings may contain any printable ASCII characters except
- // space (i.e., the byte range 32 < x < 127), and are typically of the form
- // "package/version" (e.g., "git/1.8.3.1"). The agent strings are purely
- // informative for statistics and debugging purposes, and MUST NOT be used
- // to programmatically assume the presence or absence of particular features.
- Agent Capability = "agent"
- // Shallow capability adds "deepen", "shallow" and "unshallow" commands to
- // the fetch-pack/upload-pack protocol so clients can request shallow
- // clones.
- Shallow Capability = "shallow"
- // DeepenSince adds "deepen-since" command to fetch-pack/upload-pack
- // protocol so the client can request shallow clones that are cut at a
- // specific time, instead of depth. Internally it's equivalent of doing
- // "rev-list --max-age=" on the server side. "deepen-since"
- // cannot be used with "deepen".
- DeepenSince Capability = "deepen-since"
- // DeepenNot adds "deepen-not" command to fetch-pack/upload-pack
- // protocol so the client can request shallow clones that are cut at a
- // specific revision, instead of depth. Internally it's equivalent of
- // doing "rev-list --not " on the server side. "deepen-not"
- // cannot be used with "deepen", but can be used with "deepen-since".
- DeepenNot Capability = "deepen-not"
- // DeepenRelative if this capability is requested by the client, the
- // semantics of "deepen" command is changed. The "depth" argument is the
- // depth from the current shallow boundary, instead of the depth from
- // remote refs.
- DeepenRelative Capability = "deepen-relative"
- // NoProgress the client was started with "git clone -q" or something, and
- // doesn't want that side band 2. Basically the client just says "I do not
- // wish to receive stream 2 on sideband, so do not send it to me, and if
- // you did, I will drop it on the floor anyway". However, the sideband
- // channel 3 is still used for error responses.
- NoProgress Capability = "no-progress"
- // IncludeTag capability is about sending annotated tags if we are
- // sending objects they point to. If we pack an object to the client, and
- // a tag object points exactly at that object, we pack the tag object too.
- // In general this allows a client to get all new annotated tags when it
- // fetches a branch, in a single network connection.
- //
- // Clients MAY always send include-tag, hardcoding it into a request when
- // the server advertises this capability. The decision for a client to
- // request include-tag only has to do with the client's desires for tag
- // data, whether or not a server had advertised objects in the
- // refs/tags/* namespace.
- //
- // Servers MUST pack the tags if their referrant is packed and the client
- // has requested include-tags.
- //
- // Clients MUST be prepared for the case where a server has ignored
- // include-tag and has not actually sent tags in the pack. In such
- // cases the client SHOULD issue a subsequent fetch to acquire the tags
- // that include-tag would have otherwise given the client.
- //
- // The server SHOULD send include-tag, if it supports it, regardless
- // of whether or not there are tags available.
- IncludeTag Capability = "include-tag"
- // ReportStatus the receive-pack process can receive a 'report-status'
- // capability, which tells it that the client wants a report of what
- // happened after a packfile upload and reference update. If the pushing
- // client requests this capability, after unpacking and updating references
- // the server will respond with whether the packfile unpacked successfully
- // and if each reference was updated successfully. If any of those were not
- // successful, it will send back an error message. See pack-protocol.txt
- // for example messages.
- ReportStatus Capability = "report-status"
- // DeleteRefs If the server sends back this capability, it means that
- // it is capable of accepting a zero-id value as the target
- // value of a reference update. It is not sent back by the client, it
- // simply informs the client that it can be sent zero-id values
- // to delete references
- DeleteRefs Capability = "delete-refs"
- // Quiet If the receive-pack server advertises this capability, it is
- // capable of silencing human-readable progress output which otherwise may
- // be shown when processing the received pack. A send-pack client should
- // respond with the 'quiet' capability to suppress server-side progress
- // reporting if the local progress reporting is also being suppressed
- // (e.g., via `push -q`, or if stderr does not go to a tty).
- Quiet Capability = "quiet"
- // Atomic If the server sends this capability it is capable of accepting
- // atomic pushes. If the pushing client requests this capability, the server
- // will update the refs in one atomic transaction. Either all refs are
- // updated or none.
- Atomic Capability = "atomic"
- // PushOptions If the server sends this capability it is able to accept
- // push options after the update commands have been sent, but before the
- // packfile is streamed. If the pushing client requests this capability,
- // the server will pass the options to the pre- and post- receive hooks
- // that process this push request.
- PushOptions Capability = "push-options"
- // AllowTipSHA1InWant if the upload-pack server advertises this capability,
- // fetch-pack may send "want" lines with SHA-1s that exist at the server but
- // are not advertised by upload-pack.
- AllowTipSHA1InWant Capability = "allow-tip-sha1-in-want"
- // AllowReachableSHA1InWant if the upload-pack server advertises this
- // capability, fetch-pack may send "want" lines with SHA-1s that exist at
- // the server but are not advertised by upload-pack.
- AllowReachableSHA1InWant Capability = "allow-reachable-sha1-in-want"
- // PushCert the receive-pack server that advertises this capability is
- // willing to accept a signed push certificate, and asks the to be
- // included in the push certificate. A send-pack client MUST NOT
- // send a push-cert packet unless the receive-pack server advertises
- // this capability.
- PushCert Capability = "push-cert"
- // SymRef symbolic reference support for better negotiation.
- SymRef Capability = "symref"
- // ObjectFormat takes a hash algorithm as an argument, indicates that the
- // server supports the given hash algorithms.
- ObjectFormat Capability = "object-format"
- // Filter if present, fetch-pack may send "filter" commands to request a
- // partial clone or partial fetch and request that the server omit various objects from the packfile
- Filter Capability = "filter"
-)
-
-const userAgent = "go-git/5.x"
-
-// DefaultAgent provides the user agent string.
-func DefaultAgent() string {
- if envUserAgent, ok := os.LookupEnv("GO_GIT_USER_AGENT_EXTRA"); ok {
- return fmt.Sprintf("%s %s", userAgent, envUserAgent)
- }
- return userAgent
-}
-
-var known = map[Capability]bool{
- MultiACK: true, MultiACKDetailed: true, NoDone: true, ThinPack: true,
- Sideband: true, Sideband64k: true, OFSDelta: true, Agent: true,
- Shallow: true, DeepenSince: true, DeepenNot: true, DeepenRelative: true,
- NoProgress: true, IncludeTag: true, ReportStatus: true, DeleteRefs: true,
- Quiet: true, Atomic: true, PushOptions: true, AllowTipSHA1InWant: true,
- AllowReachableSHA1InWant: true, PushCert: true, SymRef: true,
- ObjectFormat: true, Filter: true,
-}
-
-var requiresArgument = map[Capability]bool{
- Agent: true, PushCert: true, SymRef: true, ObjectFormat: true,
-}
-
-var multipleArgument = map[Capability]bool{
- SymRef: true,
-}
diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/capability/list.go b/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/capability/list.go
deleted file mode 100644
index 553d81cbe4d..00000000000
--- a/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/capability/list.go
+++ /dev/null
@@ -1,195 +0,0 @@
-package capability
-
-import (
- "bytes"
- "errors"
- "fmt"
- "strings"
-)
-
-var (
- // ErrArgumentsRequired is returned if no arguments are giving with a
- // capability that requires arguments
- ErrArgumentsRequired = errors.New("arguments required")
- // ErrArguments is returned if arguments are given with a capabilities that
- // not supports arguments
- ErrArguments = errors.New("arguments not allowed")
- // ErrEmptyArgument is returned when an empty value is given
- ErrEmptyArgument = errors.New("empty argument")
- // ErrMultipleArguments multiple argument given to a capabilities that not
- // support it
- ErrMultipleArguments = errors.New("multiple arguments not allowed")
-)
-
-// List represents a list of capabilities
-type List struct {
- m map[Capability]*entry
- sort []string
-}
-
-type entry struct {
- Name Capability
- Values []string
-}
-
-// NewList returns a new List of capabilities
-func NewList() *List {
- return &List{
- m: make(map[Capability]*entry),
- }
-}
-
-// IsEmpty returns true if the List is empty
-func (l *List) IsEmpty() bool {
- return len(l.sort) == 0
-}
-
-// Decode decodes list of capabilities from raw into the list
-func (l *List) Decode(raw []byte) error {
- // git 1.x receive pack used to send a leading space on its
- // git-receive-pack capabilities announcement. We just trim space to be
- // tolerant to space changes in different versions.
- raw = bytes.TrimSpace(raw)
-
- if len(raw) == 0 {
- return nil
- }
-
- for _, data := range bytes.Split(raw, []byte{' '}) {
- pair := bytes.SplitN(data, []byte{'='}, 2)
-
- c := Capability(pair[0])
- if len(pair) == 1 {
- if err := l.Add(c); err != nil {
- return err
- }
-
- continue
- }
-
- if err := l.Add(c, string(pair[1])); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-// Get returns the values for a capability
-func (l *List) Get(capability Capability) []string {
- if _, ok := l.m[capability]; !ok {
- return nil
- }
-
- return l.m[capability].Values
-}
-
-// Set sets a capability removing the previous values
-func (l *List) Set(capability Capability, values ...string) error {
- if _, ok := l.m[capability]; ok {
- l.m[capability].Values = l.m[capability].Values[:0]
- }
- return l.Add(capability, values...)
-}
-
-// Add adds a capability, values are optional
-func (l *List) Add(c Capability, values ...string) error {
- if err := l.validate(c, values); err != nil {
- return err
- }
-
- if !l.Supports(c) {
- l.m[c] = &entry{Name: c}
- l.sort = append(l.sort, c.String())
- }
-
- if len(values) == 0 {
- return nil
- }
-
- if known[c] && !multipleArgument[c] && len(l.m[c].Values) > 0 {
- return ErrMultipleArguments
- }
-
- l.m[c].Values = append(l.m[c].Values, values...)
- return nil
-}
-
-func (l *List) validateNoEmptyArgs(values []string) error {
- for _, v := range values {
- if v == "" {
- return ErrEmptyArgument
- }
- }
- return nil
-}
-
-func (l *List) validate(c Capability, values []string) error {
- if !known[c] {
- return l.validateNoEmptyArgs(values)
- }
- if requiresArgument[c] && len(values) == 0 {
- return ErrArgumentsRequired
- }
-
- if !requiresArgument[c] && len(values) != 0 {
- return ErrArguments
- }
-
- if !multipleArgument[c] && len(values) > 1 {
- return ErrMultipleArguments
- }
- return l.validateNoEmptyArgs(values)
-}
-
-// Supports returns true if capability is present
-func (l *List) Supports(capability Capability) bool {
- _, ok := l.m[capability]
- return ok
-}
-
-// Delete deletes a capability from the List
-func (l *List) Delete(capability Capability) {
- if !l.Supports(capability) {
- return
- }
-
- delete(l.m, capability)
- for i, c := range l.sort {
- if c != string(capability) {
- continue
- }
-
- l.sort = append(l.sort[:i], l.sort[i+1:]...)
- return
- }
-}
-
-// All returns a slice with all defined capabilities.
-func (l *List) All() []Capability {
- var cs []Capability
- for _, key := range l.sort {
- cs = append(cs, Capability(key))
- }
-
- return cs
-}
-
-// String generates the capabilities strings, the capabilities are sorted in
-// insertion order
-func (l *List) String() string {
- var o []string
- for _, key := range l.sort {
- cap := l.m[Capability(key)]
- if len(cap.Values) == 0 {
- o = append(o, key)
- continue
- }
-
- for _, value := range cap.Values {
- o = append(o, fmt.Sprintf("%s=%s", key, value))
- }
- }
-
- return strings.Join(o, " ")
-}
diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/common.go b/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/common.go
deleted file mode 100644
index a858323e79b..00000000000
--- a/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/common.go
+++ /dev/null
@@ -1,74 +0,0 @@
-package packp
-
-import (
- "fmt"
-)
-
-type stateFn func() stateFn
-
-const (
- // common
- hashSize = 40
-
- // advrefs
- head = "HEAD"
- noHead = "capabilities^{}"
-)
-
-var (
- // common
- sp = []byte(" ")
- eol = []byte("\n")
-
- // advertised-refs
- null = []byte("\x00")
- peeled = []byte("^{}")
- noHeadMark = []byte(" capabilities^{}\x00")
-
- // upload-request
- want = []byte("want ")
- shallow = []byte("shallow ")
- deepen = []byte("deepen")
- deepenCommits = []byte("deepen ")
- deepenSince = []byte("deepen-since ")
- deepenReference = []byte("deepen-not ")
-
- // shallow-update
- unshallow = []byte("unshallow ")
-
- // server-response
- ack = []byte("ACK")
- nak = []byte("NAK")
-
- // updreq
- shallowNoSp = []byte("shallow")
-)
-
-func isFlush(payload []byte) bool {
- return len(payload) == 0
-}
-
-var (
- // ErrNilWriter is returned when a nil writer is passed to the encoder.
- ErrNilWriter = fmt.Errorf("nil writer")
-)
-
-// ErrUnexpectedData represents an unexpected data decoding a message
-type ErrUnexpectedData struct {
- Msg string
- Data []byte
-}
-
-// NewErrUnexpectedData returns a new ErrUnexpectedData containing the data and
-// the message given
-func NewErrUnexpectedData(msg string, data []byte) error {
- return &ErrUnexpectedData{Msg: msg, Data: data}
-}
-
-func (err *ErrUnexpectedData) Error() string {
- if len(err.Data) == 0 {
- return err.Msg
- }
-
- return fmt.Sprintf("%s (%s)", err.Msg, err.Data)
-}
diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/doc.go b/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/doc.go
deleted file mode 100644
index 4950d1d6625..00000000000
--- a/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/doc.go
+++ /dev/null
@@ -1,724 +0,0 @@
-package packp
-
-/*
-
-A nice way to trace the real data transmitted and received by git, use:
-
-GIT_TRACE_PACKET=true git ls-remote http://github.com/src-d/go-git
-GIT_TRACE_PACKET=true git clone http://github.com/src-d/go-git
-
-Here follows a copy of the current protocol specification at the time of
-this writing.
-
-(Please notice that most http git servers will add a flush-pkt after the
-first pkt-line when using HTTP smart.)
-
-
-Documentation Common to Pack and Http Protocols
-===============================================
-
-ABNF Notation
--------------
-
-ABNF notation as described by RFC 5234 is used within the protocol documents,
-except the following replacement core rules are used:
-----
- HEXDIG = DIGIT / "a" / "b" / "c" / "d" / "e" / "f"
-----
-
-We also define the following common rules:
-----
- NUL = %x00
- zero-id = 40*"0"
- obj-id = 40*(HEXDIGIT)
-
- refname = "HEAD"
- refname /= "refs/"
-----
-
-A refname is a hierarchical octet string beginning with "refs/" and
-not violating the 'git-check-ref-format' command's validation rules.
-More specifically, they:
-
-. They can include slash `/` for hierarchical (directory)
- grouping, but no slash-separated component can begin with a
- dot `.`.
-
-. They must contain at least one `/`. This enforces the presence of a
- category like `heads/`, `tags/` etc. but the actual names are not
- restricted.
-
-. They cannot have two consecutive dots `..` anywhere.
-
-. They cannot have ASCII control characters (i.e. bytes whose
- values are lower than \040, or \177 `DEL`), space, tilde `~`,
- caret `^`, colon `:`, question-mark `?`, asterisk `*`,
- or open bracket `[` anywhere.
-
-. They cannot end with a slash `/` or a dot `.`.
-
-. They cannot end with the sequence `.lock`.
-
-. They cannot contain a sequence `@{`.
-
-. They cannot contain a `\\`.
-
-
-pkt-line Format
----------------
-
-Much (but not all) of the payload is described around pkt-lines.
-
-A pkt-line is a variable length binary string. The first four bytes
-of the line, the pkt-len, indicates the total length of the line,
-in hexadecimal. The pkt-len includes the 4 bytes used to contain
-the length's hexadecimal representation.
-
-A pkt-line MAY contain binary data, so implementors MUST ensure
-pkt-line parsing/formatting routines are 8-bit clean.
-
-A non-binary line SHOULD BE terminated by an LF, which if present
-MUST be included in the total length. Receivers MUST treat pkt-lines
-with non-binary data the same whether or not they contain the trailing
-LF (stripping the LF if present, and not complaining when it is
-missing).
-
-The maximum length of a pkt-line's data component is 65516 bytes.
-Implementations MUST NOT send pkt-line whose length exceeds 65520
-(65516 bytes of payload + 4 bytes of length data).
-
-Implementations SHOULD NOT send an empty pkt-line ("0004").
-
-A pkt-line with a length field of 0 ("0000"), called a flush-pkt,
-is a special case and MUST be handled differently than an empty
-pkt-line ("0004").
-
-----
- pkt-line = data-pkt / flush-pkt
-
- data-pkt = pkt-len pkt-payload
- pkt-len = 4*(HEXDIG)
- pkt-payload = (pkt-len - 4)*(OCTET)
-
- flush-pkt = "0000"
-----
-
-Examples (as C-style strings):
-
-----
- pkt-line actual value
- ---------------------------------
- "0006a\n" "a\n"
- "0005a" "a"
- "000bfoobar\n" "foobar\n"
- "0004" ""
-----
-
-Packfile transfer protocols
-===========================
-
-Git supports transferring data in packfiles over the ssh://, git://, http:// and
-file:// transports. There exist two sets of protocols, one for pushing
-data from a client to a server and another for fetching data from a
-server to a client. The three transports (ssh, git, file) use the same
-protocol to transfer data. http is documented in http-protocol.txt.
-
-The processes invoked in the canonical Git implementation are 'upload-pack'
-on the server side and 'fetch-pack' on the client side for fetching data;
-then 'receive-pack' on the server and 'send-pack' on the client for pushing
-data. The protocol functions to have a server tell a client what is
-currently on the server, then for the two to negotiate the smallest amount
-of data to send in order to fully update one or the other.
-
-pkt-line Format
----------------
-
-The descriptions below build on the pkt-line format described in
-protocol-common.txt. When the grammar indicate `PKT-LINE(...)`, unless
-otherwise noted the usual pkt-line LF rules apply: the sender SHOULD
-include a LF, but the receiver MUST NOT complain if it is not present.
-
-Transports
-----------
-There are three transports over which the packfile protocol is
-initiated. The Git transport is a simple, unauthenticated server that
-takes the command (almost always 'upload-pack', though Git
-servers can be configured to be globally writable, in which 'receive-
-pack' initiation is also allowed) with which the client wishes to
-communicate and executes it and connects it to the requesting
-process.
-
-In the SSH transport, the client just runs the 'upload-pack'
-or 'receive-pack' process on the server over the SSH protocol and then
-communicates with that invoked process over the SSH connection.
-
-The file:// transport runs the 'upload-pack' or 'receive-pack'
-process locally and communicates with it over a pipe.
-
-Git Transport
--------------
-
-The Git transport starts off by sending the command and repository
-on the wire using the pkt-line format, followed by a NUL byte and a
-hostname parameter, terminated by a NUL byte.
-
- 0032git-upload-pack /project.git\0host=myserver.com\0
-
---
- git-proto-request = request-command SP pathname NUL [ host-parameter NUL ]
- request-command = "git-upload-pack" / "git-receive-pack" /
- "git-upload-archive" ; case sensitive
- pathname = *( %x01-ff ) ; exclude NUL
- host-parameter = "host=" hostname [ ":" port ]
---
-
-Only host-parameter is allowed in the git-proto-request. Clients
-MUST NOT attempt to send additional parameters. It is used for the
-git-daemon name based virtual hosting. See --interpolated-path
-option to git daemon, with the %H/%CH format characters.
-
-Basically what the Git client is doing to connect to an 'upload-pack'
-process on the server side over the Git protocol is this:
-
- $ echo -e -n \
- "0039git-upload-pack /schacon/gitbook.git\0host=example.com\0" |
- nc -v example.com 9418
-
-If the server refuses the request for some reasons, it could abort
-gracefully with an error message.
-
-----
- error-line = PKT-LINE("ERR" SP explanation-text)
-----
-
-
-SSH Transport
--------------
-
-Initiating the upload-pack or receive-pack processes over SSH is
-executing the binary on the server via SSH remote execution.
-It is basically equivalent to running this:
-
- $ ssh git.example.com "git-upload-pack '/project.git'"
-
-For a server to support Git pushing and pulling for a given user over
-SSH, that user needs to be able to execute one or both of those
-commands via the SSH shell that they are provided on login. On some
-systems, that shell access is limited to only being able to run those
-two commands, or even just one of them.
-
-In an ssh:// format URI, it's absolute in the URI, so the '/' after
-the host name (or port number) is sent as an argument, which is then
-read by the remote git-upload-pack exactly as is, so it's effectively
-an absolute path in the remote filesystem.
-
- git clone ssh://user@example.com/project.git
- |
- v
- ssh user@example.com "git-upload-pack '/project.git'"
-
-In a "user@host:path" format URI, its relative to the user's home
-directory, because the Git client will run:
-
- git clone user@example.com:project.git
- |
- v
- ssh user@example.com "git-upload-pack 'project.git'"
-
-The exception is if a '~' is used, in which case
-we execute it without the leading '/'.
-
- ssh://user@example.com/~alice/project.git,
- |
- v
- ssh user@example.com "git-upload-pack '~alice/project.git'"
-
-A few things to remember here:
-
-- The "command name" is spelled with dash (e.g. git-upload-pack), but
- this can be overridden by the client;
-
-- The repository path is always quoted with single quotes.
-
-Fetching Data From a Server
----------------------------
-
-When one Git repository wants to get data that a second repository
-has, the first can 'fetch' from the second. This operation determines
-what data the server has that the client does not then streams that
-data down to the client in packfile format.
-
-
-Reference Discovery
--------------------
-
-When the client initially connects the server will immediately respond
-with a listing of each reference it has (all branches and tags) along
-with the object name that each reference currently points to.
-
- $ echo -e -n "0039git-upload-pack /schacon/gitbook.git\0host=example.com\0" |
- nc -v example.com 9418
- 00887217a7c7e582c46cec22a130adf4b9d7d950fba0 HEAD\0multi_ack thin-pack
- side-band side-band-64k ofs-delta shallow no-progress include-tag
- 00441d3fcd5ced445d1abc402225c0b8a1299641f497 refs/heads/integration
- 003f7217a7c7e582c46cec22a130adf4b9d7d950fba0 refs/heads/master
- 003cb88d2441cac0977faf98efc80305012112238d9d refs/tags/v0.9
- 003c525128480b96c89e6418b1e40909bf6c5b2d580f refs/tags/v1.0
- 003fe92df48743b7bc7d26bcaabfddde0a1e20cae47c refs/tags/v1.0^{}
- 0000
-
-The returned response is a pkt-line stream describing each ref and
-its current value. The stream MUST be sorted by name according to
-the C locale ordering.
-
-If HEAD is a valid ref, HEAD MUST appear as the first advertised
-ref. If HEAD is not a valid ref, HEAD MUST NOT appear in the
-advertisement list at all, but other refs may still appear.
-
-The stream MUST include capability declarations behind a NUL on the
-first ref. The peeled value of a ref (that is "ref^{}") MUST be
-immediately after the ref itself, if presented. A conforming server
-MUST peel the ref if it's an annotated tag.
-
-----
- advertised-refs = (no-refs / list-of-refs)
- *shallow
- flush-pkt
-
- no-refs = PKT-LINE(zero-id SP "capabilities^{}"
- NUL capability-list)
-
- list-of-refs = first-ref *other-ref
- first-ref = PKT-LINE(obj-id SP refname
- NUL capability-list)
-
- other-ref = PKT-LINE(other-tip / other-peeled)
- other-tip = obj-id SP refname
- other-peeled = obj-id SP refname "^{}"
-
- shallow = PKT-LINE("shallow" SP obj-id)
-
- capability-list = capability *(SP capability)
- capability = 1*(LC_ALPHA / DIGIT / "-" / "_")
- LC_ALPHA = %x61-7A
-----
-
-Server and client MUST use lowercase for obj-id, both MUST treat obj-id
-as case-insensitive.
-
-See protocol-capabilities.txt for a list of allowed server capabilities
-and descriptions.
-
-Packfile Negotiation
---------------------
-After reference and capabilities discovery, the client can decide to
-terminate the connection by sending a flush-pkt, telling the server it can
-now gracefully terminate, and disconnect, when it does not need any pack
-data. This can happen with the ls-remote command, and also can happen when
-the client already is up-to-date.
-
-Otherwise, it enters the negotiation phase, where the client and
-server determine what the minimal packfile necessary for transport is,
-by telling the server what objects it wants, its shallow objects
-(if any), and the maximum commit depth it wants (if any). The client
-will also send a list of the capabilities it wants to be in effect,
-out of what the server said it could do with the first 'want' line.
-
-----
- upload-request = want-list
- *shallow-line
- *1depth-request
- flush-pkt
-
- want-list = first-want
- *additional-want
-
- shallow-line = PKT-LINE("shallow" SP obj-id)
-
- depth-request = PKT-LINE("deepen" SP depth) /
- PKT-LINE("deepen-since" SP timestamp) /
- PKT-LINE("deepen-not" SP ref)
-
- first-want = PKT-LINE("want" SP obj-id SP capability-list)
- additional-want = PKT-LINE("want" SP obj-id)
-
- depth = 1*DIGIT
-----
-
-Clients MUST send all the obj-ids it wants from the reference
-discovery phase as 'want' lines. Clients MUST send at least one
-'want' command in the request body. Clients MUST NOT mention an
-obj-id in a 'want' command which did not appear in the response
-obtained through ref discovery.
-
-The client MUST write all obj-ids which it only has shallow copies
-of (meaning that it does not have the parents of a commit) as
-'shallow' lines so that the server is aware of the limitations of
-the client's history.
-
-The client now sends the maximum commit history depth it wants for
-this transaction, which is the number of commits it wants from the
-tip of the history, if any, as a 'deepen' line. A depth of 0 is the
-same as not making a depth request. The client does not want to receive
-any commits beyond this depth, nor does it want objects needed only to
-complete those commits. Commits whose parents are not received as a
-result are defined as shallow and marked as such in the server. This
-information is sent back to the client in the next step.
-
-Once all the 'want's and 'shallow's (and optional 'deepen') are
-transferred, clients MUST send a flush-pkt, to tell the server side
-that it is done sending the list.
-
-Otherwise, if the client sent a positive depth request, the server
-will determine which commits will and will not be shallow and
-send this information to the client. If the client did not request
-a positive depth, this step is skipped.
-
-----
- shallow-update = *shallow-line
- *unshallow-line
- flush-pkt
-
- shallow-line = PKT-LINE("shallow" SP obj-id)
-
- unshallow-line = PKT-LINE("unshallow" SP obj-id)
-----
-
-If the client has requested a positive depth, the server will compute
-the set of commits which are no deeper than the desired depth. The set
-of commits start at the client's wants.
-
-The server writes 'shallow' lines for each
-commit whose parents will not be sent as a result. The server writes
-an 'unshallow' line for each commit which the client has indicated is
-shallow, but is no longer shallow at the currently requested depth
-(that is, its parents will now be sent). The server MUST NOT mark
-as unshallow anything which the client has not indicated was shallow.
-
-Now the client will send a list of the obj-ids it has using 'have'
-lines, so the server can make a packfile that only contains the objects
-that the client needs. In multi_ack mode, the canonical implementation
-will send up to 32 of these at a time, then will send a flush-pkt. The
-canonical implementation will skip ahead and send the next 32 immediately,
-so that there is always a block of 32 "in-flight on the wire" at a time.
-
-----
- upload-haves = have-list
- compute-end
-
- have-list = *have-line
- have-line = PKT-LINE("have" SP obj-id)
- compute-end = flush-pkt / PKT-LINE("done")
-----
-
-If the server reads 'have' lines, it then will respond by ACKing any
-of the obj-ids the client said it had that the server also has. The
-server will ACK obj-ids differently depending on which ack mode is
-chosen by the client.
-
-In multi_ack mode:
-
- * the server will respond with 'ACK obj-id continue' for any common
- commits.
-
- * once the server has found an acceptable common base commit and is
- ready to make a packfile, it will blindly ACK all 'have' obj-ids
- back to the client.
-
- * the server will then send a 'NAK' and then wait for another response
- from the client - either a 'done' or another list of 'have' lines.
-
-In multi_ack_detailed mode:
-
- * the server will differentiate the ACKs where it is signaling
- that it is ready to send data with 'ACK obj-id ready' lines, and
- signals the identified common commits with 'ACK obj-id common' lines.
-
-Without either multi_ack or multi_ack_detailed:
-
- * upload-pack sends "ACK obj-id" on the first common object it finds.
- After that it says nothing until the client gives it a "done".
-
- * upload-pack sends "NAK" on a flush-pkt if no common object
- has been found yet. If one has been found, and thus an ACK
- was already sent, it's silent on the flush-pkt.
-
-After the client has gotten enough ACK responses that it can determine
-that the server has enough information to send an efficient packfile
-(in the canonical implementation, this is determined when it has received
-enough ACKs that it can color everything left in the --date-order queue
-as common with the server, or the --date-order queue is empty), or the
-client determines that it wants to give up (in the canonical implementation,
-this is determined when the client sends 256 'have' lines without getting
-any of them ACKed by the server - meaning there is nothing in common and
-the server should just send all of its objects), then the client will send
-a 'done' command. The 'done' command signals to the server that the client
-is ready to receive its packfile data.
-
-However, the 256 limit *only* turns on in the canonical client
-implementation if we have received at least one "ACK %s continue"
-during a prior round. This helps to ensure that at least one common
-ancestor is found before we give up entirely.
-
-Once the 'done' line is read from the client, the server will either
-send a final 'ACK obj-id' or it will send a 'NAK'. 'obj-id' is the object
-name of the last commit determined to be common. The server only sends
-ACK after 'done' if there is at least one common base and multi_ack or
-multi_ack_detailed is enabled. The server always sends NAK after 'done'
-if there is no common base found.
-
-Then the server will start sending its packfile data.
-
-----
- server-response = *ack_multi ack / nak
- ack_multi = PKT-LINE("ACK" SP obj-id ack_status)
- ack_status = "continue" / "common" / "ready"
- ack = PKT-LINE("ACK" SP obj-id)
- nak = PKT-LINE("NAK")
-----
-
-A simple clone may look like this (with no 'have' lines):
-
-----
- C: 0054want 74730d410fcb6603ace96f1dc55ea6196122532d multi_ack \
- side-band-64k ofs-delta\n
- C: 0032want 7d1665144a3a975c05f1f43902ddaf084e784dbe\n
- C: 0032want 5a3f6be755bbb7deae50065988cbfa1ffa9ab68a\n
- C: 0032want 7e47fe2bd8d01d481f44d7af0531bd93d3b21c01\n
- C: 0032want 74730d410fcb6603ace96f1dc55ea6196122532d\n
- C: 0000
- C: 0009done\n
-
- S: 0008NAK\n
- S: [PACKFILE]
-----
-
-An incremental update (fetch) response might look like this:
-
-----
- C: 0054want 74730d410fcb6603ace96f1dc55ea6196122532d multi_ack \
- side-band-64k ofs-delta\n
- C: 0032want 7d1665144a3a975c05f1f43902ddaf084e784dbe\n
- C: 0032want 5a3f6be755bbb7deae50065988cbfa1ffa9ab68a\n
- C: 0000
- C: 0032have 7e47fe2bd8d01d481f44d7af0531bd93d3b21c01\n
- C: [30 more have lines]
- C: 0032have 74730d410fcb6603ace96f1dc55ea6196122532d\n
- C: 0000
-
- S: 003aACK 7e47fe2bd8d01d481f44d7af0531bd93d3b21c01 continue\n
- S: 003aACK 74730d410fcb6603ace96f1dc55ea6196122532d continue\n
- S: 0008NAK\n
-
- C: 0009done\n
-
- S: 0031ACK 74730d410fcb6603ace96f1dc55ea6196122532d\n
- S: [PACKFILE]
-----
-
-
-Packfile Data
--------------
-
-Now that the client and server have finished negotiation about what
-the minimal amount of data that needs to be sent to the client is, the server
-will construct and send the required data in packfile format.
-
-See pack-format.txt for what the packfile itself actually looks like.
-
-If 'side-band' or 'side-band-64k' capabilities have been specified by
-the client, the server will send the packfile data multiplexed.
-
-Each packet starting with the packet-line length of the amount of data
-that follows, followed by a single byte specifying the sideband the
-following data is coming in on.
-
-In 'side-band' mode, it will send up to 999 data bytes plus 1 control
-code, for a total of up to 1000 bytes in a pkt-line. In 'side-band-64k'
-mode it will send up to 65519 data bytes plus 1 control code, for a
-total of up to 65520 bytes in a pkt-line.
-
-The sideband byte will be a '1', '2' or a '3'. Sideband '1' will contain
-packfile data, sideband '2' will be used for progress information that the
-client will generally print to stderr and sideband '3' is used for error
-information.
-
-If no 'side-band' capability was specified, the server will stream the
-entire packfile without multiplexing.
-
-
-Pushing Data To a Server
-------------------------
-
-Pushing data to a server will invoke the 'receive-pack' process on the
-server, which will allow the client to tell it which references it should
-update and then send all the data the server will need for those new
-references to be complete. Once all the data is received and validated,
-the server will then update its references to what the client specified.
-
-Authentication
---------------
-
-The protocol itself contains no authentication mechanisms. That is to be
-handled by the transport, such as SSH, before the 'receive-pack' process is
-invoked. If 'receive-pack' is configured over the Git transport, those
-repositories will be writable by anyone who can access that port (9418) as
-that transport is unauthenticated.
-
-Reference Discovery
--------------------
-
-The reference discovery phase is done nearly the same way as it is in the
-fetching protocol. Each reference obj-id and name on the server is sent
-in packet-line format to the client, followed by a flush-pkt. The only
-real difference is that the capability listing is different - the only
-possible values are 'report-status', 'delete-refs', 'ofs-delta' and
-'push-options'.
-
-Reference Update Request and Packfile Transfer
-----------------------------------------------
-
-Once the client knows what references the server is at, it can send a
-list of reference update requests. For each reference on the server
-that it wants to update, it sends a line listing the obj-id currently on
-the server, the obj-id the client would like to update it to and the name
-of the reference.
-
-This list is followed by a flush-pkt. Then the push options are transmitted
-one per packet followed by another flush-pkt. After that the packfile that
-should contain all the objects that the server will need to complete the new
-references will be sent.
-
-----
- update-request = *shallow ( command-list | push-cert ) [packfile]
-
- shallow = PKT-LINE("shallow" SP obj-id)
-
- command-list = PKT-LINE(command NUL capability-list)
- *PKT-LINE(command)
- flush-pkt
-
- command = create / delete / update
- create = zero-id SP new-id SP name
- delete = old-id SP zero-id SP name
- update = old-id SP new-id SP name
-
- old-id = obj-id
- new-id = obj-id
-
- push-cert = PKT-LINE("push-cert" NUL capability-list LF)
- PKT-LINE("certificate version 0.1" LF)
- PKT-LINE("pusher" SP ident LF)
- PKT-LINE("pushee" SP url LF)
- PKT-LINE("nonce" SP nonce LF)
- PKT-LINE(LF)
- *PKT-LINE(command LF)
- *PKT-LINE(gpg-signature-lines LF)
- PKT-LINE("push-cert-end" LF)
-
- packfile = "PACK" 28*(OCTET)
-----
-
-If the receiving end does not support delete-refs, the sending end MUST
-NOT ask for delete command.
-
-If the receiving end does not support push-cert, the sending end
-MUST NOT send a push-cert command. When a push-cert command is
-sent, command-list MUST NOT be sent; the commands recorded in the
-push certificate is used instead.
-
-The packfile MUST NOT be sent if the only command used is 'delete'.
-
-A packfile MUST be sent if either create or update command is used,
-even if the server already has all the necessary objects. In this
-case the client MUST send an empty packfile. The only time this
-is likely to happen is if the client is creating
-a new branch or a tag that points to an existing obj-id.
-
-The server will receive the packfile, unpack it, then validate each
-reference that is being updated that it hasn't changed while the request
-was being processed (the obj-id is still the same as the old-id), and
-it will run any update hooks to make sure that the update is acceptable.
-If all of that is fine, the server will then update the references.
-
-Push Certificate
-----------------
-
-A push certificate begins with a set of header lines. After the
-header and an empty line, the protocol commands follow, one per
-line. Note that the trailing LF in push-cert PKT-LINEs is _not_
-optional; it must be present.
-
-Currently, the following header fields are defined:
-
-`pusher` ident::
- Identify the GPG key in "Human Readable Name "
- format.
-
-`pushee` url::
- The repository URL (anonymized, if the URL contains
- authentication material) the user who ran `git push`
- intended to push into.
-
-`nonce` nonce::
- The 'nonce' string the receiving repository asked the
- pushing user to include in the certificate, to prevent
- replay attacks.
-
-The GPG signature lines are a detached signature for the contents
-recorded in the push certificate before the signature block begins.
-The detached signature is used to certify that the commands were
-given by the pusher, who must be the signer.
-
-Report Status
--------------
-
-After receiving the pack data from the sender, the receiver sends a
-report if 'report-status' capability is in effect.
-It is a short listing of what happened in that update. It will first
-list the status of the packfile unpacking as either 'unpack ok' or
-'unpack [error]'. Then it will list the status for each of the references
-that it tried to update. Each line is either 'ok [refname]' if the
-update was successful, or 'ng [refname] [error]' if the update was not.
-
-----
- report-status = unpack-status
- 1*(command-status)
- flush-pkt
-
- unpack-status = PKT-LINE("unpack" SP unpack-result)
- unpack-result = "ok" / error-msg
-
- command-status = command-ok / command-fail
- command-ok = PKT-LINE("ok" SP refname)
- command-fail = PKT-LINE("ng" SP refname SP error-msg)
-
- error-msg = 1*(OCTECT) ; where not "ok"
-----
-
-Updates can be unsuccessful for a number of reasons. The reference can have
-changed since the reference discovery phase was originally sent, meaning
-someone pushed in the meantime. The reference being pushed could be a
-non-fast-forward reference and the update hooks or configuration could be
-set to not allow that, etc. Also, some references can be updated while others
-can be rejected.
-
-An example client/server communication might look like this:
-
-----
- S: 007c74730d410fcb6603ace96f1dc55ea6196122532d refs/heads/local\0report-status delete-refs ofs-delta\n
- S: 003e7d1665144a3a975c05f1f43902ddaf084e784dbe refs/heads/debug\n
- S: 003f74730d410fcb6603ace96f1dc55ea6196122532d refs/heads/master\n
- S: 003f74730d410fcb6603ace96f1dc55ea6196122532d refs/heads/team\n
- S: 0000
-
- C: 003e7d1665144a3a975c05f1f43902ddaf084e784dbe 74730d410fcb6603ace96f1dc55ea6196122532d refs/heads/debug\n
- C: 003e74730d410fcb6603ace96f1dc55ea6196122532d 5a3f6be755bbb7deae50065988cbfa1ffa9ab68a refs/heads/master\n
- C: 0000
- C: [PACKDATA]
-
- S: 000eunpack ok\n
- S: 0018ok refs/heads/debug\n
- S: 002ang refs/heads/master non-fast-forward\n
-----
-*/
diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/gitproto.go b/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/gitproto.go
deleted file mode 100644
index 0b7ff8f8260..00000000000
--- a/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/gitproto.go
+++ /dev/null
@@ -1,120 +0,0 @@
-package packp
-
-import (
- "fmt"
- "io"
- "strings"
-
- "github.com/go-git/go-git/v5/plumbing/format/pktline"
-)
-
-var (
- // ErrInvalidGitProtoRequest is returned by Decode if the input is not a
- // valid git protocol request.
- ErrInvalidGitProtoRequest = fmt.Errorf("invalid git protocol request")
-)
-
-// GitProtoRequest is a command request for the git protocol.
-// It is used to send the command, endpoint, and extra parameters to the
-// remote.
-// See https://git-scm.com/docs/pack-protocol#_git_transport
-type GitProtoRequest struct {
- RequestCommand string
- Pathname string
-
- // Optional
- Host string
-
- // Optional
- ExtraParams []string
-}
-
-// validate validates the request.
-func (g *GitProtoRequest) validate() error {
- if g.RequestCommand == "" {
- return fmt.Errorf("%w: empty request command", ErrInvalidGitProtoRequest)
- }
-
- if g.Pathname == "" {
- return fmt.Errorf("%w: empty pathname", ErrInvalidGitProtoRequest)
- }
-
- return nil
-}
-
-// Encode encodes the request into the writer.
-func (g *GitProtoRequest) Encode(w io.Writer) error {
- if w == nil {
- return ErrNilWriter
- }
-
- if err := g.validate(); err != nil {
- return err
- }
-
- p := pktline.NewEncoder(w)
- req := fmt.Sprintf("%s %s\x00", g.RequestCommand, g.Pathname)
- if host := g.Host; host != "" {
- req += fmt.Sprintf("host=%s\x00", host)
- }
-
- if len(g.ExtraParams) > 0 {
- req += "\x00"
- for _, param := range g.ExtraParams {
- req += param + "\x00"
- }
- }
-
- if err := p.Encode([]byte(req)); err != nil {
- return err
- }
-
- return nil
-}
-
-// Decode decodes the request from the reader.
-func (g *GitProtoRequest) Decode(r io.Reader) error {
- s := pktline.NewScanner(r)
- if !s.Scan() {
- err := s.Err()
- if err == nil {
- return ErrInvalidGitProtoRequest
- }
- return err
- }
-
- line := string(s.Bytes())
- if len(line) == 0 {
- return io.EOF
- }
-
- if line[len(line)-1] != 0 {
- return fmt.Errorf("%w: missing null terminator", ErrInvalidGitProtoRequest)
- }
-
- parts := strings.SplitN(line, " ", 2)
- if len(parts) != 2 {
- return fmt.Errorf("%w: short request", ErrInvalidGitProtoRequest)
- }
-
- g.RequestCommand = parts[0]
- params := strings.Split(parts[1], string(null))
- if len(params) < 1 {
- return fmt.Errorf("%w: missing pathname", ErrInvalidGitProtoRequest)
- }
-
- g.Pathname = params[0]
- if len(params) > 1 {
- g.Host = strings.TrimPrefix(params[1], "host=")
- }
-
- if len(params) > 2 {
- for _, param := range params[2:] {
- if param != "" {
- g.ExtraParams = append(g.ExtraParams, param)
- }
- }
- }
-
- return nil
-}
diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/report_status.go b/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/report_status.go
deleted file mode 100644
index e2a0a108b24..00000000000
--- a/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/report_status.go
+++ /dev/null
@@ -1,165 +0,0 @@
-package packp
-
-import (
- "bytes"
- "fmt"
- "io"
- "strings"
-
- "github.com/go-git/go-git/v5/plumbing"
- "github.com/go-git/go-git/v5/plumbing/format/pktline"
-)
-
-const (
- ok = "ok"
-)
-
-// ReportStatus is a report status message, as used in the git-receive-pack
-// process whenever the 'report-status' capability is negotiated.
-type ReportStatus struct {
- UnpackStatus string
- CommandStatuses []*CommandStatus
-}
-
-// NewReportStatus creates a new ReportStatus message.
-func NewReportStatus() *ReportStatus {
- return &ReportStatus{}
-}
-
-// Error returns the first error if any.
-func (s *ReportStatus) Error() error {
- if s.UnpackStatus != ok {
- return fmt.Errorf("unpack error: %s", s.UnpackStatus)
- }
-
- for _, s := range s.CommandStatuses {
- if err := s.Error(); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-// Encode writes the report status to a writer.
-func (s *ReportStatus) Encode(w io.Writer) error {
- e := pktline.NewEncoder(w)
- if err := e.Encodef("unpack %s\n", s.UnpackStatus); err != nil {
- return err
- }
-
- for _, cs := range s.CommandStatuses {
- if err := cs.encode(w); err != nil {
- return err
- }
- }
-
- return e.Flush()
-}
-
-// Decode reads from the given reader and decodes a report-status message. It
-// does not read more input than what is needed to fill the report status.
-func (s *ReportStatus) Decode(r io.Reader) error {
- scan := pktline.NewScanner(r)
- if err := s.scanFirstLine(scan); err != nil {
- return err
- }
-
- if err := s.decodeReportStatus(scan.Bytes()); err != nil {
- return err
- }
-
- flushed := false
- for scan.Scan() {
- b := scan.Bytes()
- if isFlush(b) {
- flushed = true
- break
- }
-
- if err := s.decodeCommandStatus(b); err != nil {
- return err
- }
- }
-
- if !flushed {
- return fmt.Errorf("missing flush")
- }
-
- return scan.Err()
-}
-
-func (s *ReportStatus) scanFirstLine(scan *pktline.Scanner) error {
- if scan.Scan() {
- return nil
- }
-
- if scan.Err() != nil {
- return scan.Err()
- }
-
- return io.ErrUnexpectedEOF
-}
-
-func (s *ReportStatus) decodeReportStatus(b []byte) error {
- if isFlush(b) {
- return fmt.Errorf("premature flush")
- }
-
- b = bytes.TrimSuffix(b, eol)
-
- line := string(b)
- fields := strings.SplitN(line, " ", 2)
- if len(fields) != 2 || fields[0] != "unpack" {
- return fmt.Errorf("malformed unpack status: %s", line)
- }
-
- s.UnpackStatus = fields[1]
- return nil
-}
-
-func (s *ReportStatus) decodeCommandStatus(b []byte) error {
- b = bytes.TrimSuffix(b, eol)
-
- line := string(b)
- fields := strings.SplitN(line, " ", 3)
- status := ok
- if len(fields) == 3 && fields[0] == "ng" {
- status = fields[2]
- } else if len(fields) != 2 || fields[0] != "ok" {
- return fmt.Errorf("malformed command status: %s", line)
- }
-
- cs := &CommandStatus{
- ReferenceName: plumbing.ReferenceName(fields[1]),
- Status: status,
- }
- s.CommandStatuses = append(s.CommandStatuses, cs)
- return nil
-}
-
-// CommandStatus is the status of a reference in a report status.
-// See ReportStatus struct.
-type CommandStatus struct {
- ReferenceName plumbing.ReferenceName
- Status string
-}
-
-// Error returns the error, if any.
-func (s *CommandStatus) Error() error {
- if s.Status == ok {
- return nil
- }
-
- return fmt.Errorf("command error on %s: %s",
- s.ReferenceName.String(), s.Status)
-}
-
-func (s *CommandStatus) encode(w io.Writer) error {
- e := pktline.NewEncoder(w)
- if s.Error() == nil {
- return e.Encodef("ok %s\n", s.ReferenceName.String())
- }
-
- return e.Encodef("ng %s %s\n", s.ReferenceName.String(), s.Status)
-}
diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/shallowupd.go b/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/shallowupd.go
deleted file mode 100644
index fe4fe688795..00000000000
--- a/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/shallowupd.go
+++ /dev/null
@@ -1,92 +0,0 @@
-package packp
-
-import (
- "bytes"
- "fmt"
- "io"
-
- "github.com/go-git/go-git/v5/plumbing"
- "github.com/go-git/go-git/v5/plumbing/format/pktline"
-)
-
-const (
- shallowLineLen = 48
- unshallowLineLen = 50
-)
-
-type ShallowUpdate struct {
- Shallows []plumbing.Hash
- Unshallows []plumbing.Hash
-}
-
-func (r *ShallowUpdate) Decode(reader io.Reader) error {
- s := pktline.NewScanner(reader)
-
- for s.Scan() {
- line := s.Bytes()
- line = bytes.TrimSpace(line)
-
- var err error
- switch {
- case bytes.HasPrefix(line, shallow):
- err = r.decodeShallowLine(line)
- case bytes.HasPrefix(line, unshallow):
- err = r.decodeUnshallowLine(line)
- case bytes.Equal(line, pktline.Flush):
- return nil
- }
-
- if err != nil {
- return err
- }
- }
-
- return s.Err()
-}
-
-func (r *ShallowUpdate) decodeShallowLine(line []byte) error {
- hash, err := r.decodeLine(line, shallow, shallowLineLen)
- if err != nil {
- return err
- }
-
- r.Shallows = append(r.Shallows, hash)
- return nil
-}
-
-func (r *ShallowUpdate) decodeUnshallowLine(line []byte) error {
- hash, err := r.decodeLine(line, unshallow, unshallowLineLen)
- if err != nil {
- return err
- }
-
- r.Unshallows = append(r.Unshallows, hash)
- return nil
-}
-
-func (r *ShallowUpdate) decodeLine(line, prefix []byte, expLen int) (plumbing.Hash, error) {
- if len(line) != expLen {
- return plumbing.ZeroHash, fmt.Errorf("malformed %s%q", prefix, line)
- }
-
- raw := string(line[expLen-40 : expLen])
- return plumbing.NewHash(raw), nil
-}
-
-func (r *ShallowUpdate) Encode(w io.Writer) error {
- e := pktline.NewEncoder(w)
-
- for _, h := range r.Shallows {
- if err := e.Encodef("%s%s\n", shallow, h.String()); err != nil {
- return err
- }
- }
-
- for _, h := range r.Unshallows {
- if err := e.Encodef("%s%s\n", unshallow, h.String()); err != nil {
- return err
- }
- }
-
- return e.Flush()
-}
diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/sideband/common.go b/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/sideband/common.go
deleted file mode 100644
index de5001281fd..00000000000
--- a/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/sideband/common.go
+++ /dev/null
@@ -1,33 +0,0 @@
-package sideband
-
-// Type sideband type "side-band" or "side-band-64k"
-type Type int8
-
-const (
- // Sideband legacy sideband type up to 1000-byte messages
- Sideband Type = iota
- // Sideband64k sideband type up to 65519-byte messages
- Sideband64k Type = iota
-
- // MaxPackedSize for Sideband type
- MaxPackedSize = 1000
- // MaxPackedSize64k for Sideband64k type
- MaxPackedSize64k = 65520
-)
-
-// Channel sideband channel
-type Channel byte
-
-// WithPayload encode the payload as a message
-func (ch Channel) WithPayload(payload []byte) []byte {
- return append([]byte{byte(ch)}, payload...)
-}
-
-const (
- // PackData packfile content
- PackData Channel = 1
- // ProgressMessage progress messages
- ProgressMessage Channel = 2
- // ErrorMessage fatal error message just before stream aborts
- ErrorMessage Channel = 3
-)
diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/sideband/demux.go b/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/sideband/demux.go
deleted file mode 100644
index 0116f962ef2..00000000000
--- a/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/sideband/demux.go
+++ /dev/null
@@ -1,148 +0,0 @@
-package sideband
-
-import (
- "errors"
- "fmt"
- "io"
-
- "github.com/go-git/go-git/v5/plumbing/format/pktline"
-)
-
-// ErrMaxPackedExceeded returned by Read, if the maximum packed size is exceeded
-var ErrMaxPackedExceeded = errors.New("max. packed size exceeded")
-
-// Progress where the progress information is stored
-type Progress interface {
- io.Writer
-}
-
-// Demuxer demultiplexes the progress reports and error info interleaved with the
-// packfile itself.
-//
-// A sideband has three different channels the main one, called PackData, contains
-// the packfile data; the ErrorMessage channel, that contains server errors; and
-// the last one, ProgressMessage channel, containing information about the ongoing
-// task happening in the server (optional, can be suppressed sending NoProgress
-// or Quiet capabilities to the server)
-//
-// In order to demultiplex the data stream, method `Read` should be called to
-// retrieve the PackData channel, the incoming data from the ProgressMessage is
-// written at `Progress` (if any), if any message is retrieved from the
-// ErrorMessage channel an error is returned and we can assume that the
-// connection has been closed.
-type Demuxer struct {
- t Type
- r io.Reader
- s *pktline.Scanner
-
- max int
- pending []byte
-
- // Progress is where the progress messages are stored
- Progress Progress
-}
-
-// NewDemuxer returns a new Demuxer for the given t and read from r
-func NewDemuxer(t Type, r io.Reader) *Demuxer {
- max := MaxPackedSize64k
- if t == Sideband {
- max = MaxPackedSize
- }
-
- return &Demuxer{
- t: t,
- r: r,
- max: max,
- s: pktline.NewScanner(r),
- }
-}
-
-// Read reads up to len(p) bytes from the PackData channel into p, an error can
-// be return if an error happens when reading or if a message is sent in the
-// ErrorMessage channel.
-//
-// When a ProgressMessage is read, is not copy to b, instead of this is written
-// to the Progress
-func (d *Demuxer) Read(b []byte) (n int, err error) {
- var read, req int
-
- req = len(b)
- for read < req {
- n, err := d.doRead(b[read:req])
- read += n
-
- if err != nil {
- return read, err
- }
- }
-
- return read, nil
-}
-
-func (d *Demuxer) doRead(b []byte) (int, error) {
- read, err := d.nextPackData()
- size := len(read)
- wanted := len(b)
-
- if size > wanted {
- d.pending = read[wanted:]
- }
-
- if wanted > size {
- wanted = size
- }
-
- size = copy(b, read[:wanted])
- return size, err
-}
-
-func (d *Demuxer) nextPackData() ([]byte, error) {
- content := d.getPending()
- if len(content) != 0 {
- return content, nil
- }
-
- if !d.s.Scan() {
- if err := d.s.Err(); err != nil {
- return nil, err
- }
-
- return nil, io.EOF
- }
-
- content = d.s.Bytes()
-
- size := len(content)
- if size == 0 {
- return nil, nil
- } else if size > d.max {
- return nil, ErrMaxPackedExceeded
- }
-
- switch Channel(content[0]) {
- case PackData:
- return content[1:], nil
- case ProgressMessage:
- if d.Progress != nil {
- _, err := d.Progress.Write(content[1:])
- return nil, err
- }
- case ErrorMessage:
- return nil, fmt.Errorf("unexpected error: %s", content[1:])
- default:
- return nil, fmt.Errorf("unknown channel %s", content)
- }
-
- return nil, nil
-}
-
-func (d *Demuxer) getPending() (b []byte) {
- if len(d.pending) == 0 {
- return nil
- }
-
- content := d.pending
- d.pending = nil
-
- return content
-}
diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/sideband/doc.go b/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/sideband/doc.go
deleted file mode 100644
index c5d24295291..00000000000
--- a/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/sideband/doc.go
+++ /dev/null
@@ -1,31 +0,0 @@
-// Package sideband implements a sideband mutiplex/demultiplexer
-package sideband
-
-// If 'side-band' or 'side-band-64k' capabilities have been specified by
-// the client, the server will send the packfile data multiplexed.
-//
-// Either mode indicates that the packfile data will be streamed broken
-// up into packets of up to either 1000 bytes in the case of 'side_band',
-// or 65520 bytes in the case of 'side_band_64k'. Each packet is made up
-// of a leading 4-byte pkt-line length of how much data is in the packet,
-// followed by a 1-byte stream code, followed by the actual data.
-//
-// The stream code can be one of:
-//
-// 1 - pack data
-// 2 - progress messages
-// 3 - fatal error message just before stream aborts
-//
-// The "side-band-64k" capability came about as a way for newer clients
-// that can handle much larger packets to request packets that are
-// actually crammed nearly full, while maintaining backward compatibility
-// for the older clients.
-//
-// Further, with side-band and its up to 1000-byte messages, it's actually
-// 999 bytes of payload and 1 byte for the stream code. With side-band-64k,
-// same deal, you have up to 65519 bytes of data and 1 byte for the stream
-// code.
-//
-// The client MUST send only maximum of one of "side-band" and "side-
-// band-64k". Server MUST diagnose it as an error if client requests
-// both.
diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/sideband/muxer.go b/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/sideband/muxer.go
deleted file mode 100644
index d51ac826952..00000000000
--- a/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/sideband/muxer.go
+++ /dev/null
@@ -1,65 +0,0 @@
-package sideband
-
-import (
- "io"
-
- "github.com/go-git/go-git/v5/plumbing/format/pktline"
-)
-
-// Muxer multiplex the packfile along with the progress messages and the error
-// information. The multiplex is perform using pktline format.
-type Muxer struct {
- max int
- e *pktline.Encoder
-}
-
-const chLen = 1
-
-// NewMuxer returns a new Muxer for the given t that writes on w.
-//
-// If t is equal to `Sideband` the max pack size is set to MaxPackedSize, in any
-// other value is given, max pack is set to MaxPackedSize64k, that is the
-// maximum length of a line in pktline format.
-func NewMuxer(t Type, w io.Writer) *Muxer {
- max := MaxPackedSize64k
- if t == Sideband {
- max = MaxPackedSize
- }
-
- return &Muxer{
- max: max - chLen,
- e: pktline.NewEncoder(w),
- }
-}
-
-// Write writes p in the PackData channel
-func (m *Muxer) Write(p []byte) (int, error) {
- return m.WriteChannel(PackData, p)
-}
-
-// WriteChannel writes p in the given channel. This method can be used with any
-// channel, but is recommend use it only for the ProgressMessage and
-// ErrorMessage channels and use Write for the PackData channel
-func (m *Muxer) WriteChannel(t Channel, p []byte) (int, error) {
- wrote := 0
- size := len(p)
- for wrote < size {
- n, err := m.doWrite(t, p[wrote:])
- wrote += n
-
- if err != nil {
- return wrote, err
- }
- }
-
- return wrote, nil
-}
-
-func (m *Muxer) doWrite(ch Channel, p []byte) (int, error) {
- sz := len(p)
- if sz > m.max {
- sz = m.max
- }
-
- return sz, m.e.Encode(ch.WithPayload(p[:sz]))
-}
diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/srvresp.go b/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/srvresp.go
deleted file mode 100644
index a9ddb538b27..00000000000
--- a/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/srvresp.go
+++ /dev/null
@@ -1,141 +0,0 @@
-package packp
-
-import (
- "bufio"
- "bytes"
- "errors"
- "fmt"
- "io"
-
- "github.com/go-git/go-git/v5/plumbing"
- "github.com/go-git/go-git/v5/plumbing/format/pktline"
-)
-
-const ackLineLen = 44
-
-// ServerResponse object acknowledgement from upload-pack service
-type ServerResponse struct {
- ACKs []plumbing.Hash
-}
-
-// Decode decodes the response into the struct, isMultiACK should be true, if
-// the request was done with multi_ack or multi_ack_detailed capabilities.
-func (r *ServerResponse) Decode(reader *bufio.Reader, isMultiACK bool) error {
- s := pktline.NewScanner(reader)
-
- for s.Scan() {
- line := s.Bytes()
-
- if err := r.decodeLine(line); err != nil {
- return err
- }
-
- // we need to detect when the end of a response header and the beginning
- // of a packfile header happened, some requests to the git daemon
- // produces a duplicate ACK header even when multi_ack is not supported.
- stop, err := r.stopReading(reader)
- if err != nil {
- return err
- }
-
- if stop {
- break
- }
- }
-
- // isMultiACK is true when the remote server advertises the related
- // capabilities when they are not in transport.UnsupportedCapabilities.
- //
- // Users may decide to remove multi_ack and multi_ack_detailed from the
- // unsupported capabilities list, which allows them to do initial clones
- // from Azure DevOps.
- //
- // Follow-up fetches may error, therefore errors are wrapped with additional
- // information highlighting that this capabilities are not supported by go-git.
- //
- // TODO: Implement support for multi_ack or multi_ack_detailed responses.
- err := s.Err()
- if err != nil && isMultiACK {
- return fmt.Errorf("multi_ack and multi_ack_detailed are not supported: %w", err)
- }
-
- return err
-}
-
-// stopReading detects when a valid command such as ACK or NAK is found to be
-// read in the buffer without moving the read pointer.
-func (r *ServerResponse) stopReading(reader *bufio.Reader) (bool, error) {
- ahead, err := reader.Peek(7)
- if err == io.EOF {
- return true, nil
- }
-
- if err != nil {
- return false, err
- }
-
- if len(ahead) > 4 && r.isValidCommand(ahead[0:3]) {
- return false, nil
- }
-
- if len(ahead) == 7 && r.isValidCommand(ahead[4:]) {
- return false, nil
- }
-
- return true, nil
-}
-
-func (r *ServerResponse) isValidCommand(b []byte) bool {
- commands := [][]byte{ack, nak}
- for _, c := range commands {
- if bytes.Equal(b, c) {
- return true
- }
- }
-
- return false
-}
-
-func (r *ServerResponse) decodeLine(line []byte) error {
- if len(line) == 0 {
- return fmt.Errorf("unexpected flush")
- }
-
- if len(line) >= 3 {
- if bytes.Equal(line[0:3], ack) {
- return r.decodeACKLine(line)
- }
-
- if bytes.Equal(line[0:3], nak) {
- return nil
- }
- }
-
- return fmt.Errorf("unexpected content %q", string(line))
-}
-
-func (r *ServerResponse) decodeACKLine(line []byte) error {
- if len(line) < ackLineLen {
- return fmt.Errorf("malformed ACK %q", line)
- }
-
- sp := bytes.Index(line, []byte(" "))
- h := plumbing.NewHash(string(line[sp+1 : sp+41]))
- r.ACKs = append(r.ACKs, h)
- return nil
-}
-
-// Encode encodes the ServerResponse into a writer.
-func (r *ServerResponse) Encode(w io.Writer, isMultiACK bool) error {
- if len(r.ACKs) > 1 && !isMultiACK {
- // For further information, refer to comments in the Decode func above.
- return errors.New("multi_ack and multi_ack_detailed are not supported")
- }
-
- e := pktline.NewEncoder(w)
- if len(r.ACKs) == 0 {
- return e.Encodef("%s\n", nak)
- }
-
- return e.Encodef("%s %s\n", ack, r.ACKs[0].String())
-}
diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/ulreq.go b/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/ulreq.go
deleted file mode 100644
index 344f8c7e3ab..00000000000
--- a/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/ulreq.go
+++ /dev/null
@@ -1,168 +0,0 @@
-package packp
-
-import (
- "fmt"
- "time"
-
- "github.com/go-git/go-git/v5/plumbing"
- "github.com/go-git/go-git/v5/plumbing/protocol/packp/capability"
-)
-
-// UploadRequest values represent the information transmitted on a
-// upload-request message. Values from this type are not zero-value
-// safe, use the New function instead.
-// This is a low level type, use UploadPackRequest instead.
-type UploadRequest struct {
- Capabilities *capability.List
- Wants []plumbing.Hash
- Shallows []plumbing.Hash
- Depth Depth
-}
-
-// Depth values stores the desired depth of the requested packfile: see
-// DepthCommit, DepthSince and DepthReference.
-type Depth interface {
- isDepth()
- IsZero() bool
-}
-
-// DepthCommits values stores the maximum number of requested commits in
-// the packfile. Zero means infinite. A negative value will have
-// undefined consequences.
-type DepthCommits int
-
-func (d DepthCommits) isDepth() {}
-
-func (d DepthCommits) IsZero() bool {
- return d == 0
-}
-
-// DepthSince values requests only commits newer than the specified time.
-type DepthSince time.Time
-
-func (d DepthSince) isDepth() {}
-
-func (d DepthSince) IsZero() bool {
- return time.Time(d).IsZero()
-}
-
-// DepthReference requests only commits not to found in the specified reference.
-type DepthReference string
-
-func (d DepthReference) isDepth() {}
-
-func (d DepthReference) IsZero() bool {
- return string(d) == ""
-}
-
-// NewUploadRequest returns a pointer to a new UploadRequest value, ready to be
-// used. It has no capabilities, wants or shallows and an infinite depth. Please
-// note that to encode an upload-request it has to have at least one wanted hash.
-func NewUploadRequest() *UploadRequest {
- return &UploadRequest{
- Capabilities: capability.NewList(),
- Wants: []plumbing.Hash{},
- Shallows: []plumbing.Hash{},
- Depth: DepthCommits(0),
- }
-}
-
-// NewUploadRequestFromCapabilities returns a pointer to a new UploadRequest
-// value, the request capabilities are filled with the most optimal ones, based
-// on the adv value (advertised capabilities), the UploadRequest generated it
-// has no wants or shallows and an infinite depth.
-func NewUploadRequestFromCapabilities(adv *capability.List) *UploadRequest {
- r := NewUploadRequest()
-
- if adv.Supports(capability.MultiACKDetailed) {
- r.Capabilities.Set(capability.MultiACKDetailed)
- } else if adv.Supports(capability.MultiACK) {
- r.Capabilities.Set(capability.MultiACK)
- }
-
- if adv.Supports(capability.Sideband64k) {
- r.Capabilities.Set(capability.Sideband64k)
- } else if adv.Supports(capability.Sideband) {
- r.Capabilities.Set(capability.Sideband)
- }
-
- if adv.Supports(capability.ThinPack) {
- r.Capabilities.Set(capability.ThinPack)
- }
-
- if adv.Supports(capability.OFSDelta) {
- r.Capabilities.Set(capability.OFSDelta)
- }
-
- if adv.Supports(capability.Agent) {
- r.Capabilities.Set(capability.Agent, capability.DefaultAgent())
- }
-
- return r
-}
-
-// Validate validates the content of UploadRequest, following the next rules:
-// - Wants MUST have at least one reference
-// - capability.Shallow MUST be present if Shallows is not empty
-// - is a non-zero DepthCommits is given capability.Shallow MUST be present
-// - is a DepthSince is given capability.Shallow MUST be present
-// - is a DepthReference is given capability.DeepenNot MUST be present
-// - MUST contain only maximum of one of capability.Sideband and capability.Sideband64k
-// - MUST contain only maximum of one of capability.MultiACK and capability.MultiACKDetailed
-func (req *UploadRequest) Validate() error {
- if len(req.Wants) == 0 {
- return fmt.Errorf("want can't be empty")
- }
-
- if err := req.validateRequiredCapabilities(); err != nil {
- return err
- }
-
- if err := req.validateConflictCapabilities(); err != nil {
- return err
- }
-
- return nil
-}
-
-func (req *UploadRequest) validateRequiredCapabilities() error {
- msg := "missing capability %s"
-
- if len(req.Shallows) != 0 && !req.Capabilities.Supports(capability.Shallow) {
- return fmt.Errorf(msg, capability.Shallow)
- }
-
- switch req.Depth.(type) {
- case DepthCommits:
- if req.Depth != DepthCommits(0) {
- if !req.Capabilities.Supports(capability.Shallow) {
- return fmt.Errorf(msg, capability.Shallow)
- }
- }
- case DepthSince:
- if !req.Capabilities.Supports(capability.DeepenSince) {
- return fmt.Errorf(msg, capability.DeepenSince)
- }
- case DepthReference:
- if !req.Capabilities.Supports(capability.DeepenNot) {
- return fmt.Errorf(msg, capability.DeepenNot)
- }
- }
-
- return nil
-}
-
-func (req *UploadRequest) validateConflictCapabilities() error {
- msg := "capabilities %s and %s are mutually exclusive"
- if req.Capabilities.Supports(capability.Sideband) &&
- req.Capabilities.Supports(capability.Sideband64k) {
- return fmt.Errorf(msg, capability.Sideband, capability.Sideband64k)
- }
-
- if req.Capabilities.Supports(capability.MultiACK) &&
- req.Capabilities.Supports(capability.MultiACKDetailed) {
- return fmt.Errorf(msg, capability.MultiACK, capability.MultiACKDetailed)
- }
-
- return nil
-}
diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/ulreq_decode.go b/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/ulreq_decode.go
deleted file mode 100644
index 3da29985e25..00000000000
--- a/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/ulreq_decode.go
+++ /dev/null
@@ -1,257 +0,0 @@
-package packp
-
-import (
- "bytes"
- "encoding/hex"
- "fmt"
- "io"
- "strconv"
- "time"
-
- "github.com/go-git/go-git/v5/plumbing"
- "github.com/go-git/go-git/v5/plumbing/format/pktline"
-)
-
-// Decode reads the next upload-request form its input and
-// stores it in the UploadRequest.
-func (req *UploadRequest) Decode(r io.Reader) error {
- d := newUlReqDecoder(r)
- return d.Decode(req)
-}
-
-type ulReqDecoder struct {
- s *pktline.Scanner // a pkt-line scanner from the input stream
- line []byte // current pkt-line contents, use parser.nextLine() to make it advance
- nLine int // current pkt-line number for debugging, begins at 1
- err error // sticky error, use the parser.error() method to fill this out
- data *UploadRequest // parsed data is stored here
-}
-
-func newUlReqDecoder(r io.Reader) *ulReqDecoder {
- return &ulReqDecoder{
- s: pktline.NewScanner(r),
- }
-}
-
-func (d *ulReqDecoder) Decode(v *UploadRequest) error {
- d.data = v
-
- for state := d.decodeFirstWant; state != nil; {
- state = state()
- }
-
- return d.err
-}
-
-// fills out the parser sticky error
-func (d *ulReqDecoder) error(format string, a ...interface{}) {
- msg := fmt.Sprintf(
- "pkt-line %d: %s", d.nLine,
- fmt.Sprintf(format, a...),
- )
-
- d.err = NewErrUnexpectedData(msg, d.line)
-}
-
-// Reads a new pkt-line from the scanner, makes its payload available as
-// p.line and increments p.nLine. A successful invocation returns true,
-// otherwise, false is returned and the sticky error is filled out
-// accordingly. Trims eols at the end of the payloads.
-func (d *ulReqDecoder) nextLine() bool {
- d.nLine++
-
- if !d.s.Scan() {
- if d.err = d.s.Err(); d.err != nil {
- return false
- }
-
- d.error("EOF")
- return false
- }
-
- d.line = d.s.Bytes()
- d.line = bytes.TrimSuffix(d.line, eol)
-
- return true
-}
-
-// Expected format: want [ capabilities]
-func (d *ulReqDecoder) decodeFirstWant() stateFn {
- if ok := d.nextLine(); !ok {
- return nil
- }
-
- if !bytes.HasPrefix(d.line, want) {
- d.error("missing 'want ' prefix")
- return nil
- }
- d.line = bytes.TrimPrefix(d.line, want)
-
- hash, ok := d.readHash()
- if !ok {
- return nil
- }
- d.data.Wants = append(d.data.Wants, hash)
-
- return d.decodeCaps
-}
-
-func (d *ulReqDecoder) readHash() (plumbing.Hash, bool) {
- if len(d.line) < hashSize {
- d.err = fmt.Errorf("malformed hash: %v", d.line)
- return plumbing.ZeroHash, false
- }
-
- var hash plumbing.Hash
- if _, err := hex.Decode(hash[:], d.line[:hashSize]); err != nil {
- d.error("invalid hash text: %s", err)
- return plumbing.ZeroHash, false
- }
- d.line = d.line[hashSize:]
-
- return hash, true
-}
-
-// Expected format: sp cap1 sp cap2 sp cap3...
-func (d *ulReqDecoder) decodeCaps() stateFn {
- d.line = bytes.TrimPrefix(d.line, sp)
- if err := d.data.Capabilities.Decode(d.line); err != nil {
- d.error("invalid capabilities: %s", err)
- }
-
- return d.decodeOtherWants
-}
-
-// Expected format: want
-func (d *ulReqDecoder) decodeOtherWants() stateFn {
- if ok := d.nextLine(); !ok {
- return nil
- }
-
- if bytes.HasPrefix(d.line, shallow) {
- return d.decodeShallow
- }
-
- if bytes.HasPrefix(d.line, deepen) {
- return d.decodeDeepen
- }
-
- if len(d.line) == 0 {
- return nil
- }
-
- if !bytes.HasPrefix(d.line, want) {
- d.error("unexpected payload while expecting a want: %q", d.line)
- return nil
- }
- d.line = bytes.TrimPrefix(d.line, want)
-
- hash, ok := d.readHash()
- if !ok {
- return nil
- }
- d.data.Wants = append(d.data.Wants, hash)
-
- return d.decodeOtherWants
-}
-
-// Expected format: shallow
-func (d *ulReqDecoder) decodeShallow() stateFn {
- if bytes.HasPrefix(d.line, deepen) {
- return d.decodeDeepen
- }
-
- if len(d.line) == 0 {
- return nil
- }
-
- if !bytes.HasPrefix(d.line, shallow) {
- d.error("unexpected payload while expecting a shallow: %q", d.line)
- return nil
- }
- d.line = bytes.TrimPrefix(d.line, shallow)
-
- hash, ok := d.readHash()
- if !ok {
- return nil
- }
- d.data.Shallows = append(d.data.Shallows, hash)
-
- if ok := d.nextLine(); !ok {
- return nil
- }
-
- return d.decodeShallow
-}
-
-// Expected format: deepen / deepen-since / deepen-not [
-func (d *ulReqDecoder) decodeDeepen() stateFn {
- if bytes.HasPrefix(d.line, deepenCommits) {
- return d.decodeDeepenCommits
- }
-
- if bytes.HasPrefix(d.line, deepenSince) {
- return d.decodeDeepenSince
- }
-
- if bytes.HasPrefix(d.line, deepenReference) {
- return d.decodeDeepenReference
- }
-
- if len(d.line) == 0 {
- return nil
- }
-
- d.error("unexpected deepen specification: %q", d.line)
- return nil
-}
-
-func (d *ulReqDecoder) decodeDeepenCommits() stateFn {
- d.line = bytes.TrimPrefix(d.line, deepenCommits)
-
- var n int
- if n, d.err = strconv.Atoi(string(d.line)); d.err != nil {
- return nil
- }
- if n < 0 {
- d.err = fmt.Errorf("negative depth")
- return nil
- }
- d.data.Depth = DepthCommits(n)
-
- return d.decodeFlush
-}
-
-func (d *ulReqDecoder) decodeDeepenSince() stateFn {
- d.line = bytes.TrimPrefix(d.line, deepenSince)
-
- var secs int64
- secs, d.err = strconv.ParseInt(string(d.line), 10, 64)
- if d.err != nil {
- return nil
- }
- t := time.Unix(secs, 0).UTC()
- d.data.Depth = DepthSince(t)
-
- return d.decodeFlush
-}
-
-func (d *ulReqDecoder) decodeDeepenReference() stateFn {
- d.line = bytes.TrimPrefix(d.line, deepenReference)
-
- d.data.Depth = DepthReference(string(d.line))
-
- return d.decodeFlush
-}
-
-func (d *ulReqDecoder) decodeFlush() stateFn {
- if ok := d.nextLine(); !ok {
- return nil
- }
-
- if len(d.line) != 0 {
- d.err = fmt.Errorf("unexpected payload while expecting a flush-pkt: %q", d.line)
- }
-
- return nil
-}
diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/ulreq_encode.go b/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/ulreq_encode.go
deleted file mode 100644
index c451e231640..00000000000
--- a/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/ulreq_encode.go
+++ /dev/null
@@ -1,145 +0,0 @@
-package packp
-
-import (
- "bytes"
- "fmt"
- "io"
- "time"
-
- "github.com/go-git/go-git/v5/plumbing"
- "github.com/go-git/go-git/v5/plumbing/format/pktline"
-)
-
-// Encode writes the UlReq encoding of u to the stream.
-//
-// All the payloads will end with a newline character. Wants and
-// shallows are sorted alphabetically. A depth of 0 means no depth
-// request is sent.
-func (req *UploadRequest) Encode(w io.Writer) error {
- e := newUlReqEncoder(w)
- return e.Encode(req)
-}
-
-type ulReqEncoder struct {
- pe *pktline.Encoder // where to write the encoded data
- data *UploadRequest // the data to encode
- err error // sticky error
-}
-
-func newUlReqEncoder(w io.Writer) *ulReqEncoder {
- return &ulReqEncoder{
- pe: pktline.NewEncoder(w),
- }
-}
-
-func (e *ulReqEncoder) Encode(v *UploadRequest) error {
- e.data = v
-
- if len(v.Wants) == 0 {
- return fmt.Errorf("empty wants provided")
- }
-
- plumbing.HashesSort(e.data.Wants)
- for state := e.encodeFirstWant; state != nil; {
- state = state()
- }
-
- return e.err
-}
-
-func (e *ulReqEncoder) encodeFirstWant() stateFn {
- var err error
- if e.data.Capabilities.IsEmpty() {
- err = e.pe.Encodef("want %s\n", e.data.Wants[0])
- } else {
- err = e.pe.Encodef(
- "want %s %s\n",
- e.data.Wants[0],
- e.data.Capabilities.String(),
- )
- }
-
- if err != nil {
- e.err = fmt.Errorf("encoding first want line: %s", err)
- return nil
- }
-
- return e.encodeAdditionalWants
-}
-
-func (e *ulReqEncoder) encodeAdditionalWants() stateFn {
- last := e.data.Wants[0]
- for _, w := range e.data.Wants[1:] {
- if bytes.Equal(last[:], w[:]) {
- continue
- }
-
- if err := e.pe.Encodef("want %s\n", w); err != nil {
- e.err = fmt.Errorf("encoding want %q: %s", w, err)
- return nil
- }
-
- last = w
- }
-
- return e.encodeShallows
-}
-
-func (e *ulReqEncoder) encodeShallows() stateFn {
- plumbing.HashesSort(e.data.Shallows)
-
- var last plumbing.Hash
- for _, s := range e.data.Shallows {
- if bytes.Equal(last[:], s[:]) {
- continue
- }
-
- if err := e.pe.Encodef("shallow %s\n", s); err != nil {
- e.err = fmt.Errorf("encoding shallow %q: %s", s, err)
- return nil
- }
-
- last = s
- }
-
- return e.encodeDepth
-}
-
-func (e *ulReqEncoder) encodeDepth() stateFn {
- switch depth := e.data.Depth.(type) {
- case DepthCommits:
- if depth != 0 {
- commits := int(depth)
- if err := e.pe.Encodef("deepen %d\n", commits); err != nil {
- e.err = fmt.Errorf("encoding depth %d: %s", depth, err)
- return nil
- }
- }
- case DepthSince:
- when := time.Time(depth).UTC()
- if err := e.pe.Encodef("deepen-since %d\n", when.Unix()); err != nil {
- e.err = fmt.Errorf("encoding depth %s: %s", when, err)
- return nil
- }
- case DepthReference:
- reference := string(depth)
- if err := e.pe.Encodef("deepen-not %s\n", reference); err != nil {
- e.err = fmt.Errorf("encoding depth %s: %s", reference, err)
- return nil
- }
- default:
- e.err = fmt.Errorf("unsupported depth type")
- return nil
- }
-
- return e.encodeFlush
-}
-
-func (e *ulReqEncoder) encodeFlush() stateFn {
- if err := e.pe.Flush(); err != nil {
- e.err = fmt.Errorf("encoding flush-pkt: %s", err)
- return nil
- }
-
- return nil
-}
diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/updreq.go b/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/updreq.go
deleted file mode 100644
index 8f39b39cbd3..00000000000
--- a/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/updreq.go
+++ /dev/null
@@ -1,128 +0,0 @@
-package packp
-
-import (
- "errors"
- "io"
-
- "github.com/go-git/go-git/v5/plumbing"
- "github.com/go-git/go-git/v5/plumbing/protocol/packp/capability"
- "github.com/go-git/go-git/v5/plumbing/protocol/packp/sideband"
-)
-
-var (
- ErrEmptyCommands = errors.New("commands cannot be empty")
- ErrMalformedCommand = errors.New("malformed command")
-)
-
-// ReferenceUpdateRequest values represent reference upload requests.
-// Values from this type are not zero-value safe, use the New function instead.
-type ReferenceUpdateRequest struct {
- Capabilities *capability.List
- Commands []*Command
- Options []*Option
- Shallow *plumbing.Hash
- // Packfile contains an optional packfile reader.
- Packfile io.ReadCloser
-
- // Progress receives sideband progress messages from the server
- Progress sideband.Progress
-}
-
-// New returns a pointer to a new ReferenceUpdateRequest value.
-func NewReferenceUpdateRequest() *ReferenceUpdateRequest {
- return &ReferenceUpdateRequest{
- // TODO: Add support for push-cert
- Capabilities: capability.NewList(),
- Commands: nil,
- }
-}
-
-// NewReferenceUpdateRequestFromCapabilities returns a pointer to a new
-// ReferenceUpdateRequest value, the request capabilities are filled with the
-// most optimal ones, based on the adv value (advertised capabilities), the
-// ReferenceUpdateRequest contains no commands
-//
-// It does set the following capabilities:
-// - agent
-// - report-status
-// - ofs-delta
-// - ref-delta
-// - delete-refs
-// It leaves up to the user to add the following capabilities later:
-// - atomic
-// - ofs-delta
-// - side-band
-// - side-band-64k
-// - quiet
-// - push-cert
-func NewReferenceUpdateRequestFromCapabilities(adv *capability.List) *ReferenceUpdateRequest {
- r := NewReferenceUpdateRequest()
-
- if adv.Supports(capability.Agent) {
- r.Capabilities.Set(capability.Agent, capability.DefaultAgent())
- }
-
- if adv.Supports(capability.ReportStatus) {
- r.Capabilities.Set(capability.ReportStatus)
- }
-
- return r
-}
-
-func (req *ReferenceUpdateRequest) validate() error {
- if len(req.Commands) == 0 {
- return ErrEmptyCommands
- }
-
- for _, c := range req.Commands {
- if err := c.validate(); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-type Action string
-
-const (
- Create Action = "create"
- Update Action = "update"
- Delete Action = "delete"
- Invalid Action = "invalid"
-)
-
-type Command struct {
- Name plumbing.ReferenceName
- Old plumbing.Hash
- New plumbing.Hash
-}
-
-func (c *Command) Action() Action {
- if c.Old == plumbing.ZeroHash && c.New == plumbing.ZeroHash {
- return Invalid
- }
-
- if c.Old == plumbing.ZeroHash {
- return Create
- }
-
- if c.New == plumbing.ZeroHash {
- return Delete
- }
-
- return Update
-}
-
-func (c *Command) validate() error {
- if c.Action() == Invalid {
- return ErrMalformedCommand
- }
-
- return nil
-}
-
-type Option struct {
- Key string
- Value string
-}
diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/updreq_decode.go b/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/updreq_decode.go
deleted file mode 100644
index 076de545f8a..00000000000
--- a/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/updreq_decode.go
+++ /dev/null
@@ -1,249 +0,0 @@
-package packp
-
-import (
- "bytes"
- "encoding/hex"
- "errors"
- "fmt"
- "io"
-
- "github.com/go-git/go-git/v5/plumbing"
- "github.com/go-git/go-git/v5/plumbing/format/pktline"
-)
-
-var (
- shallowLineLength = len(shallow) + hashSize
- minCommandLength = hashSize*2 + 2 + 1
- minCommandAndCapsLength = minCommandLength + 1
-)
-
-var (
- ErrEmpty = errors.New("empty update-request message")
- errNoCommands = errors.New("unexpected EOF before any command")
- errMissingCapabilitiesDelimiter = errors.New("capabilities delimiter not found")
-)
-
-func errMalformedRequest(reason string) error {
- return fmt.Errorf("malformed request: %s", reason)
-}
-
-func errInvalidHashSize(got int) error {
- return fmt.Errorf("invalid hash size: expected %d, got %d",
- hashSize, got)
-}
-
-func errInvalidHash(err error) error {
- return fmt.Errorf("invalid hash: %s", err.Error())
-}
-
-func errInvalidShallowLineLength(got int) error {
- return errMalformedRequest(fmt.Sprintf(
- "invalid shallow line length: expected %d, got %d",
- shallowLineLength, got))
-}
-
-func errInvalidCommandCapabilitiesLineLength(got int) error {
- return errMalformedRequest(fmt.Sprintf(
- "invalid command and capabilities line length: expected at least %d, got %d",
- minCommandAndCapsLength, got))
-}
-
-func errInvalidCommandLineLength(got int) error {
- return errMalformedRequest(fmt.Sprintf(
- "invalid command line length: expected at least %d, got %d",
- minCommandLength, got))
-}
-
-func errInvalidShallowObjId(err error) error {
- return errMalformedRequest(
- fmt.Sprintf("invalid shallow object id: %s", err.Error()))
-}
-
-func errInvalidOldObjId(err error) error {
- return errMalformedRequest(
- fmt.Sprintf("invalid old object id: %s", err.Error()))
-}
-
-func errInvalidNewObjId(err error) error {
- return errMalformedRequest(
- fmt.Sprintf("invalid new object id: %s", err.Error()))
-}
-
-func errMalformedCommand(err error) error {
- return errMalformedRequest(fmt.Sprintf(
- "malformed command: %s", err.Error()))
-}
-
-// Decode reads the next update-request message form the reader and wr
-func (req *ReferenceUpdateRequest) Decode(r io.Reader) error {
- var rc io.ReadCloser
- var ok bool
- rc, ok = r.(io.ReadCloser)
- if !ok {
- rc = io.NopCloser(r)
- }
-
- d := &updReqDecoder{r: rc, s: pktline.NewScanner(r)}
- return d.Decode(req)
-}
-
-type updReqDecoder struct {
- r io.ReadCloser
- s *pktline.Scanner
- req *ReferenceUpdateRequest
-}
-
-func (d *updReqDecoder) Decode(req *ReferenceUpdateRequest) error {
- d.req = req
- funcs := []func() error{
- d.scanLine,
- d.decodeShallow,
- d.decodeCommandAndCapabilities,
- d.decodeCommands,
- d.setPackfile,
- req.validate,
- }
-
- for _, f := range funcs {
- if err := f(); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (d *updReqDecoder) scanLine() error {
- if ok := d.s.Scan(); !ok {
- return d.scanErrorOr(ErrEmpty)
- }
-
- return nil
-}
-
-func (d *updReqDecoder) decodeShallow() error {
- b := d.s.Bytes()
-
- if !bytes.HasPrefix(b, shallowNoSp) {
- return nil
- }
-
- if len(b) != shallowLineLength {
- return errInvalidShallowLineLength(len(b))
- }
-
- h, err := parseHash(string(b[len(shallow):]))
- if err != nil {
- return errInvalidShallowObjId(err)
- }
-
- if ok := d.s.Scan(); !ok {
- return d.scanErrorOr(errNoCommands)
- }
-
- d.req.Shallow = &h
-
- return nil
-}
-
-func (d *updReqDecoder) decodeCommands() error {
- for {
- b := d.s.Bytes()
- if bytes.Equal(b, pktline.Flush) {
- return nil
- }
-
- c, err := parseCommand(b)
- if err != nil {
- return err
- }
-
- d.req.Commands = append(d.req.Commands, c)
-
- if ok := d.s.Scan(); !ok {
- return d.s.Err()
- }
- }
-}
-
-func (d *updReqDecoder) decodeCommandAndCapabilities() error {
- b := d.s.Bytes()
- i := bytes.IndexByte(b, 0)
- if i == -1 {
- return errMissingCapabilitiesDelimiter
- }
-
- if len(b) < minCommandAndCapsLength {
- return errInvalidCommandCapabilitiesLineLength(len(b))
- }
-
- cmd, err := parseCommand(b[:i])
- if err != nil {
- return err
- }
-
- d.req.Commands = append(d.req.Commands, cmd)
-
- if err := d.req.Capabilities.Decode(b[i+1:]); err != nil {
- return err
- }
-
- if err := d.scanLine(); err != nil {
- return err
- }
-
- return nil
-}
-
-func (d *updReqDecoder) setPackfile() error {
- d.req.Packfile = d.r
-
- return nil
-}
-
-func parseCommand(b []byte) (*Command, error) {
- if len(b) < minCommandLength {
- return nil, errInvalidCommandLineLength(len(b))
- }
-
- var (
- os, ns string
- n plumbing.ReferenceName
- )
- if _, err := fmt.Sscanf(string(b), "%s %s %s", &os, &ns, &n); err != nil {
- return nil, errMalformedCommand(err)
- }
-
- oh, err := parseHash(os)
- if err != nil {
- return nil, errInvalidOldObjId(err)
- }
-
- nh, err := parseHash(ns)
- if err != nil {
- return nil, errInvalidNewObjId(err)
- }
-
- return &Command{Old: oh, New: nh, Name: n}, nil
-}
-
-func parseHash(s string) (plumbing.Hash, error) {
- if len(s) != hashSize {
- return plumbing.ZeroHash, errInvalidHashSize(len(s))
- }
-
- if _, err := hex.DecodeString(s); err != nil {
- return plumbing.ZeroHash, errInvalidHash(err)
- }
-
- h := plumbing.NewHash(s)
- return h, nil
-}
-
-func (d *updReqDecoder) scanErrorOr(origErr error) error {
- if err := d.s.Err(); err != nil {
- return err
- }
-
- return origErr
-}
diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/updreq_encode.go b/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/updreq_encode.go
deleted file mode 100644
index 1205cfaf18d..00000000000
--- a/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/updreq_encode.go
+++ /dev/null
@@ -1,89 +0,0 @@
-package packp
-
-import (
- "fmt"
- "io"
-
- "github.com/go-git/go-git/v5/plumbing"
- "github.com/go-git/go-git/v5/plumbing/format/pktline"
- "github.com/go-git/go-git/v5/plumbing/protocol/packp/capability"
-)
-
-// Encode writes the ReferenceUpdateRequest encoding to the stream.
-func (req *ReferenceUpdateRequest) Encode(w io.Writer) error {
- if err := req.validate(); err != nil {
- return err
- }
-
- e := pktline.NewEncoder(w)
-
- if err := req.encodeShallow(e, req.Shallow); err != nil {
- return err
- }
-
- if err := req.encodeCommands(e, req.Commands, req.Capabilities); err != nil {
- return err
- }
-
- if req.Capabilities.Supports(capability.PushOptions) {
- if err := req.encodeOptions(e, req.Options); err != nil {
- return err
- }
- }
-
- if req.Packfile != nil {
- if _, err := io.Copy(w, req.Packfile); err != nil {
- return err
- }
-
- return req.Packfile.Close()
- }
-
- return nil
-}
-
-func (req *ReferenceUpdateRequest) encodeShallow(e *pktline.Encoder,
- h *plumbing.Hash) error {
-
- if h == nil {
- return nil
- }
-
- objId := []byte(h.String())
- return e.Encodef("%s%s", shallow, objId)
-}
-
-func (req *ReferenceUpdateRequest) encodeCommands(e *pktline.Encoder,
- cmds []*Command, cap *capability.List) error {
-
- if err := e.Encodef("%s\x00%s",
- formatCommand(cmds[0]), cap.String()); err != nil {
- return err
- }
-
- for _, cmd := range cmds[1:] {
- if err := e.Encodef(formatCommand(cmd)); err != nil {
- return err
- }
- }
-
- return e.Flush()
-}
-
-func formatCommand(cmd *Command) string {
- o := cmd.Old.String()
- n := cmd.New.String()
- return fmt.Sprintf("%s %s %s", o, n, cmd.Name)
-}
-
-func (req *ReferenceUpdateRequest) encodeOptions(e *pktline.Encoder,
- opts []*Option) error {
-
- for _, opt := range opts {
- if err := e.Encodef("%s=%s", opt.Key, opt.Value); err != nil {
- return err
- }
- }
-
- return e.Flush()
-}
diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/uppackreq.go b/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/uppackreq.go
deleted file mode 100644
index 48f44385647..00000000000
--- a/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/uppackreq.go
+++ /dev/null
@@ -1,98 +0,0 @@
-package packp
-
-import (
- "bytes"
- "fmt"
- "io"
-
- "github.com/go-git/go-git/v5/plumbing"
- "github.com/go-git/go-git/v5/plumbing/format/pktline"
- "github.com/go-git/go-git/v5/plumbing/protocol/packp/capability"
-)
-
-// UploadPackRequest represents a upload-pack request.
-// Zero-value is not safe, use NewUploadPackRequest instead.
-type UploadPackRequest struct {
- UploadRequest
- UploadHaves
-}
-
-// NewUploadPackRequest creates a new UploadPackRequest and returns a pointer.
-func NewUploadPackRequest() *UploadPackRequest {
- ur := NewUploadRequest()
- return &UploadPackRequest{
- UploadHaves: UploadHaves{},
- UploadRequest: *ur,
- }
-}
-
-// NewUploadPackRequestFromCapabilities creates a new UploadPackRequest and
-// returns a pointer. The request capabilities are filled with the most optimal
-// ones, based on the adv value (advertised capabilities), the UploadPackRequest
-// it has no wants, haves or shallows and an infinite depth
-func NewUploadPackRequestFromCapabilities(adv *capability.List) *UploadPackRequest {
- ur := NewUploadRequestFromCapabilities(adv)
- return &UploadPackRequest{
- UploadHaves: UploadHaves{},
- UploadRequest: *ur,
- }
-}
-
-// IsEmpty returns whether a request is empty - it is empty if Haves are contained
-// in the Wants, or if Wants length is zero, and we don't have any shallows
-func (r *UploadPackRequest) IsEmpty() bool {
- return isSubset(r.Wants, r.Haves) && len(r.Shallows) == 0
-}
-
-func isSubset(needle []plumbing.Hash, haystack []plumbing.Hash) bool {
- for _, h := range needle {
- found := false
- for _, oh := range haystack {
- if h == oh {
- found = true
- break
- }
- }
-
- if !found {
- return false
- }
- }
-
- return true
-}
-
-// UploadHaves is a message to signal the references that a client has in a
-// upload-pack. Do not use this directly. Use UploadPackRequest request instead.
-type UploadHaves struct {
- Haves []plumbing.Hash
-}
-
-// Encode encodes the UploadHaves into the Writer. If flush is true, a flush
-// command will be encoded at the end of the writer content.
-func (u *UploadHaves) Encode(w io.Writer, flush bool) error {
- e := pktline.NewEncoder(w)
-
- plumbing.HashesSort(u.Haves)
-
- var last plumbing.Hash
- for _, have := range u.Haves {
- if bytes.Equal(last[:], have[:]) {
- continue
- }
-
- if err := e.Encodef("have %s\n", have); err != nil {
- return fmt.Errorf("sending haves for %q: %s", have, err)
- }
-
- last = have
- }
-
- if flush && len(u.Haves) != 0 {
- if err := e.Flush(); err != nil {
- return fmt.Errorf("sending flush-pkt after haves: %s", err)
- }
- }
-
- return nil
-}
diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/uppackresp.go b/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/uppackresp.go
deleted file mode 100644
index a485cb7b268..00000000000
--- a/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/uppackresp.go
+++ /dev/null
@@ -1,108 +0,0 @@
-package packp
-
-import (
- "errors"
- "io"
-
- "bufio"
-
- "github.com/go-git/go-git/v5/plumbing/protocol/packp/capability"
- "github.com/go-git/go-git/v5/utils/ioutil"
-)
-
-// ErrUploadPackResponseNotDecoded is returned if Read is called without
-// decoding first
-var ErrUploadPackResponseNotDecoded = errors.New("upload-pack-response should be decoded")
-
-// UploadPackResponse contains all the information responded by the upload-pack
-// service, the response implements io.ReadCloser that allows to read the
-// packfile directly from it.
-type UploadPackResponse struct {
- ShallowUpdate
- ServerResponse
-
- r io.ReadCloser
- isShallow bool
- isMultiACK bool
-}
-
-// NewUploadPackResponse create a new UploadPackResponse instance, the request
-// being responded by the response is required.
-func NewUploadPackResponse(req *UploadPackRequest) *UploadPackResponse {
- isShallow := !req.Depth.IsZero()
- isMultiACK := req.Capabilities.Supports(capability.MultiACK) ||
- req.Capabilities.Supports(capability.MultiACKDetailed)
-
- return &UploadPackResponse{
- isShallow: isShallow,
- isMultiACK: isMultiACK,
- }
-}
-
-// NewUploadPackResponseWithPackfile creates a new UploadPackResponse instance,
-// and sets its packfile reader.
-func NewUploadPackResponseWithPackfile(req *UploadPackRequest,
- pf io.ReadCloser) *UploadPackResponse {
-
- r := NewUploadPackResponse(req)
- r.r = pf
- return r
-}
-
-// Decode decodes all the responses sent by upload-pack service into the struct
-// and prepares it to read the packfile using the Read method
-func (r *UploadPackResponse) Decode(reader io.ReadCloser) error {
- buf := bufio.NewReader(reader)
-
- if r.isShallow {
- if err := r.ShallowUpdate.Decode(buf); err != nil {
- return err
- }
- }
-
- if err := r.ServerResponse.Decode(buf, r.isMultiACK); err != nil {
- return err
- }
-
- // now the reader is ready to read the packfile content
- r.r = ioutil.NewReadCloser(buf, reader)
-
- return nil
-}
-
-// Encode encodes an UploadPackResponse.
-func (r *UploadPackResponse) Encode(w io.Writer) (err error) {
- if r.isShallow {
- if err := r.ShallowUpdate.Encode(w); err != nil {
- return err
- }
- }
-
- if err := r.ServerResponse.Encode(w, r.isMultiACK); err != nil {
- return err
- }
-
- defer ioutil.CheckClose(r.r, &err)
- _, err = io.Copy(w, r.r)
- return err
-}
-
-// Read reads the packfile data, if the request was done with any Sideband
-// capability the content read should be demultiplexed. If the methods wasn't
-// called before the ErrUploadPackResponseNotDecoded will be return
-func (r *UploadPackResponse) Read(p []byte) (int, error) {
- if r.r == nil {
- return 0, ErrUploadPackResponseNotDecoded
- }
-
- return r.r.Read(p)
-}
-
-// Close the underlying reader, if any
-func (r *UploadPackResponse) Close() error {
- if r.r == nil {
- return nil
- }
-
- return r.r.Close()
-}
diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/reference.go b/vendor/github.com/go-git/go-git/v5/plumbing/reference.go
deleted file mode 100644
index ddba930292d..00000000000
--- a/vendor/github.com/go-git/go-git/v5/plumbing/reference.go
+++ /dev/null
@@ -1,315 +0,0 @@
-package plumbing
-
-import (
- "errors"
- "fmt"
- "regexp"
- "strings"
-)
-
-const (
- refPrefix = "refs/"
- refHeadPrefix = refPrefix + "heads/"
- refTagPrefix = refPrefix + "tags/"
- refRemotePrefix = refPrefix + "remotes/"
- refNotePrefix = refPrefix + "notes/"
- symrefPrefix = "ref: "
-)
-
-// RefRevParseRules are a set of rules to parse references into short names, or expand into a full reference.
-// These are the same rules as used by git in shorten_unambiguous_ref and expand_ref.
-// See: https://github.com/git/git/blob/e0aaa1b6532cfce93d87af9bc813fb2e7a7ce9d7/refs.c#L417
-var RefRevParseRules = []string{
- "%s",
- "refs/%s",
- "refs/tags/%s",
- "refs/heads/%s",
- "refs/remotes/%s",
- "refs/remotes/%s/HEAD",
-}
-
-var (
- ErrReferenceNotFound = errors.New("reference not found")
-
- // ErrInvalidReferenceName is returned when a reference name is invalid.
- ErrInvalidReferenceName = errors.New("invalid reference name")
-)
-
-// ReferenceType reference type's
-type ReferenceType int8
-
-const (
- InvalidReference ReferenceType = 0
- HashReference ReferenceType = 1
- SymbolicReference ReferenceType = 2
-)
-
-func (r ReferenceType) String() string {
- switch r {
- case InvalidReference:
- return "invalid-reference"
- case HashReference:
- return "hash-reference"
- case SymbolicReference:
- return "symbolic-reference"
- }
-
- return ""
-}
-
-// ReferenceName reference name's
-type ReferenceName string
-
-// NewBranchReferenceName returns a reference name describing a branch based on
-// his short name.
-func NewBranchReferenceName(name string) ReferenceName {
- return ReferenceName(refHeadPrefix + name)
-}
-
-// NewNoteReferenceName returns a reference name describing a note based on his
-// short name.
-func NewNoteReferenceName(name string) ReferenceName {
- return ReferenceName(refNotePrefix + name)
-}
-
-// NewRemoteReferenceName returns a reference name describing a remote branch
-// based on his short name and the remote name.
-func NewRemoteReferenceName(remote, name string) ReferenceName {
- return ReferenceName(refRemotePrefix + fmt.Sprintf("%s/%s", remote, name))
-}
-
-// NewRemoteHEADReferenceName returns a reference name describing a the HEAD
-// branch of a remote.
-func NewRemoteHEADReferenceName(remote string) ReferenceName {
- return ReferenceName(refRemotePrefix + fmt.Sprintf("%s/%s", remote, HEAD))
-}
-
-// NewTagReferenceName returns a reference name describing a tag based on short
-// his name.
-func NewTagReferenceName(name string) ReferenceName {
- return ReferenceName(refTagPrefix + name)
-}
-
-// IsBranch check if a reference is a branch
-func (r ReferenceName) IsBranch() bool {
- return strings.HasPrefix(string(r), refHeadPrefix)
-}
-
-// IsNote check if a reference is a note
-func (r ReferenceName) IsNote() bool {
- return strings.HasPrefix(string(r), refNotePrefix)
-}
-
-// IsRemote check if a reference is a remote
-func (r ReferenceName) IsRemote() bool {
- return strings.HasPrefix(string(r), refRemotePrefix)
-}
-
-// IsTag check if a reference is a tag
-func (r ReferenceName) IsTag() bool {
- return strings.HasPrefix(string(r), refTagPrefix)
-}
-
-func (r ReferenceName) String() string {
- return string(r)
-}
-
-// Short returns the short name of a ReferenceName
-func (r ReferenceName) Short() string {
- s := string(r)
- res := s
- for _, format := range RefRevParseRules[1:] {
- _, err := fmt.Sscanf(s, format, &res)
- if err == nil {
- continue
- }
- }
-
- return res
-}
-
-var (
- ctrlSeqs = regexp.MustCompile(`[\000-\037\177]`)
-)
-
-// Validate validates a reference name.
-// This follows the git-check-ref-format rules.
-// See https://git-scm.com/docs/git-check-ref-format
-//
-// It is important to note that this function does not check if the reference
-// exists in the repository.
-// It only checks if the reference name is valid.
-// This functions does not support the --refspec-pattern, --normalize, and
-// --allow-onelevel options.
-//
-// Git imposes the following rules on how references are named:
-//
-// 1. They can include slash / for hierarchical (directory) grouping, but no
-// slash-separated component can begin with a dot . or end with the
-// sequence .lock.
-// 2. They must contain at least one /. This enforces the presence of a
-// category like heads/, tags/ etc. but the actual names are not
-// restricted. If the --allow-onelevel option is used, this rule is
-// waived.
-// 3. They cannot have two consecutive dots .. anywhere.
-// 4. They cannot have ASCII control characters (i.e. bytes whose values are
-// lower than \040, or \177 DEL), space, tilde ~, caret ^, or colon :
-// anywhere.
-// 5. They cannot have question-mark ?, asterisk *, or open bracket [
-// anywhere. See the --refspec-pattern option below for an exception to this
-// rule.
-// 6. They cannot begin or end with a slash / or contain multiple consecutive
-// slashes (see the --normalize option below for an exception to this rule).
-// 7. They cannot end with a dot ..
-// 8. They cannot contain a sequence @{.
-// 9. They cannot be the single character @.
-// 10. They cannot contain a \.
-func (r ReferenceName) Validate() error {
- s := string(r)
- if len(s) == 0 {
- return ErrInvalidReferenceName
- }
-
- // HEAD is a special case
- if r == HEAD {
- return nil
- }
-
- // rule 7
- if strings.HasSuffix(s, ".") {
- return ErrInvalidReferenceName
- }
-
- // rule 2
- parts := strings.Split(s, "/")
- if len(parts) < 2 {
- return ErrInvalidReferenceName
- }
-
- isBranch := r.IsBranch()
- isTag := r.IsTag()
- for _, part := range parts {
- // rule 6
- if len(part) == 0 {
- return ErrInvalidReferenceName
- }
-
- if strings.HasPrefix(part, ".") || // rule 1
- strings.Contains(part, "..") || // rule 3
- ctrlSeqs.MatchString(part) || // rule 4
- strings.ContainsAny(part, "~^:?*[ \t\n") || // rule 4 & 5
- strings.Contains(part, "@{") || // rule 8
- part == "@" || // rule 9
- strings.Contains(part, "\\") || // rule 10
- strings.HasSuffix(part, ".lock") { // rule 1
- return ErrInvalidReferenceName
- }
-
- if (isBranch || isTag) && strings.HasPrefix(part, "-") { // branches & tags can't start with -
- return ErrInvalidReferenceName
- }
- }
-
- return nil
-}
-
-const (
- HEAD ReferenceName = "HEAD"
- Master ReferenceName = "refs/heads/master"
- Main ReferenceName = "refs/heads/main"
-)
-
-// Reference is a representation of git reference
-type Reference struct {
- t ReferenceType
- n ReferenceName
- h Hash
- target ReferenceName
-}
-
-// NewReferenceFromStrings creates a reference from name and target as string,
-// the resulting reference can be a SymbolicReference or a HashReference base
-// on the target provided
-func NewReferenceFromStrings(name, target string) *Reference {
- n := ReferenceName(name)
-
- if strings.HasPrefix(target, symrefPrefix) {
- target := ReferenceName(target[len(symrefPrefix):])
- return NewSymbolicReference(n, target)
- }
-
- return NewHashReference(n, NewHash(target))
-}
-
-// NewSymbolicReference creates a new SymbolicReference reference
-func NewSymbolicReference(n, target ReferenceName) *Reference {
- return &Reference{
- t: SymbolicReference,
- n: n,
- target: target,
- }
-}
-
-// NewHashReference creates a new HashReference reference
-func NewHashReference(n ReferenceName, h Hash) *Reference {
- return &Reference{
- t: HashReference,
- n: n,
- h: h,
- }
-}
-
-// Type returns the type of a reference
-func (r *Reference) Type() ReferenceType {
- return r.t
-}
-
-// Name returns the name of a reference
-func (r *Reference) Name() ReferenceName {
- return r.n
-}
-
-// Hash returns the hash of a hash reference
-func (r *Reference) Hash() Hash {
- return r.h
-}
-
-// Target returns the target of a symbolic reference
-func (r *Reference) Target() ReferenceName {
- return r.target
-}
-
-// Strings dump a reference as a [2]string
-func (r *Reference) Strings() [2]string {
- var o [2]string
- o[0] = r.Name().String()
-
- switch r.Type() {
- case HashReference:
- o[1] = r.Hash().String()
- case SymbolicReference:
- o[1] = symrefPrefix + r.Target().String()
- }
-
- return o
-}
-
-func (r *Reference) String() string {
- ref := ""
- switch r.Type() {
- case HashReference:
- ref = r.Hash().String()
- case SymbolicReference:
- ref = symrefPrefix + r.Target().String()
- default:
- return ""
- }
-
- name := r.Name().String()
- var v strings.Builder
- v.Grow(len(ref) + len(name) + 1)
- v.WriteString(ref)
- v.WriteString(" ")
- v.WriteString(name)
- return v.String()
-}
diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/revision.go b/vendor/github.com/go-git/go-git/v5/plumbing/revision.go
deleted file mode 100644
index 5f053b200c0..00000000000
--- a/vendor/github.com/go-git/go-git/v5/plumbing/revision.go
+++ /dev/null
@@ -1,11 +0,0 @@
-package plumbing
-
-// Revision represents a git revision
-// to get more details about git revisions
-// please check git manual page :
-// https://www.kernel.org/pub/software/scm/git/docs/gitrevisions.html
-type Revision string
-
-func (r Revision) String() string {
- return string(r)
-}
diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/revlist/revlist.go b/vendor/github.com/go-git/go-git/v5/plumbing/revlist/revlist.go
deleted file mode 100644
index b9109870f00..00000000000
--- a/vendor/github.com/go-git/go-git/v5/plumbing/revlist/revlist.go
+++ /dev/null
@@ -1,230 +0,0 @@
-// Package revlist provides support to access the ancestors of commits, in a
-// similar way as the git-rev-list command.
-package revlist
-
-import (
- "fmt"
- "io"
-
- "github.com/go-git/go-git/v5/plumbing"
- "github.com/go-git/go-git/v5/plumbing/filemode"
- "github.com/go-git/go-git/v5/plumbing/object"
- "github.com/go-git/go-git/v5/plumbing/storer"
-)
-
-// Objects applies a complementary set. It gets all the hashes from all
-// the reachable objects from the given objects. Ignore param are object hashes
-// that we want to ignore on the result. All that objects must be accessible
-// from the object storer.
-func Objects(
- s storer.EncodedObjectStorer,
- objs,
- ignore []plumbing.Hash,
-) ([]plumbing.Hash, error) {
- return ObjectsWithStorageForIgnores(s, s, objs, ignore)
-}
-
-// ObjectsWithStorageForIgnores is the same as Objects, but a
-// secondary storage layer can be provided, to be used to finding the
-// full set of objects to be ignored while finding the reachable
-// objects. This is useful when the main `s` storage layer is slow
-// and/or remote, while the ignore list is available somewhere local.
-func ObjectsWithStorageForIgnores(
- s, ignoreStore storer.EncodedObjectStorer,
- objs,
- ignore []plumbing.Hash,
-) ([]plumbing.Hash, error) {
- ignore, err := objects(ignoreStore, ignore, nil, true)
- if err != nil {
- return nil, err
- }
-
- return objects(s, objs, ignore, false)
-}
-
-func objects(
- s storer.EncodedObjectStorer,
- objects,
- ignore []plumbing.Hash,
- allowMissingObjects bool,
-) ([]plumbing.Hash, error) {
- seen := hashListToSet(ignore)
- result := make(map[plumbing.Hash]bool)
- visited := make(map[plumbing.Hash]bool)
-
- walkerFunc := func(h plumbing.Hash) {
- if !seen[h] {
- result[h] = true
- seen[h] = true
- }
- }
-
- for _, h := range objects {
- if err := processObject(s, h, seen, visited, ignore, walkerFunc); err != nil {
- if allowMissingObjects && err == plumbing.ErrObjectNotFound {
- continue
- }
-
- return nil, err
- }
- }
-
- return hashSetToList(result), nil
-}
-
-// processObject obtains the object using the hash an process it depending of its type
-func processObject(
- s storer.EncodedObjectStorer,
- h plumbing.Hash,
- seen map[plumbing.Hash]bool,
- visited map[plumbing.Hash]bool,
- ignore []plumbing.Hash,
- walkerFunc func(h plumbing.Hash),
-) error {
- if seen[h] {
- return nil
- }
-
- o, err := s.EncodedObject(plumbing.AnyObject, h)
- if err != nil {
- return err
- }
-
- do, err := object.DecodeObject(s, o)
- if err != nil {
- return err
- }
-
- switch do := do.(type) {
- case *object.Commit:
- return reachableObjects(do, seen, visited, ignore, walkerFunc)
- case *object.Tree:
- return iterateCommitTrees(seen, do, walkerFunc)
- case *object.Tag:
- walkerFunc(do.Hash)
- return processObject(s, do.Target, seen, visited, ignore, walkerFunc)
- case *object.Blob:
- walkerFunc(do.Hash)
- default:
- return fmt.Errorf("object type not valid: %s. "+
- "Object reference: %s", o.Type(), o.Hash())
- }
-
- return nil
-}
-
-// reachableObjects returns, using the callback function, all the reachable
-// objects from the specified commit. To avoid to iterate over seen commits,
-// if a commit hash is into the 'seen' set, we will not iterate all his trees
-// and blobs objects.
-func reachableObjects(
- commit *object.Commit,
- seen map[plumbing.Hash]bool,
- visited map[plumbing.Hash]bool,
- ignore []plumbing.Hash,
- cb func(h plumbing.Hash),
-) error {
- i := object.NewCommitPreorderIter(commit, seen, ignore)
- pending := make(map[plumbing.Hash]bool)
- addPendingParents(pending, visited, commit)
- for {
- commit, err := i.Next()
- if err == io.EOF {
- break
- }
-
- if err != nil {
- return err
- }
-
- if pending[commit.Hash] {
- delete(pending, commit.Hash)
- }
-
- addPendingParents(pending, visited, commit)
-
- if visited[commit.Hash] && len(pending) == 0 {
- break
- }
-
- if seen[commit.Hash] {
- continue
- }
-
- cb(commit.Hash)
-
- tree, err := commit.Tree()
- if err != nil {
- return err
- }
-
- if err := iterateCommitTrees(seen, tree, cb); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func addPendingParents(pending, visited map[plumbing.Hash]bool, commit *object.Commit) {
- for _, p := range commit.ParentHashes {
- if !visited[p] {
- pending[p] = true
- }
- }
-}
-
-// iterateCommitTrees iterate all reachable trees from the given commit
-func iterateCommitTrees(
- seen map[plumbing.Hash]bool,
- tree *object.Tree,
- cb func(h plumbing.Hash),
-) error {
- if seen[tree.Hash] {
- return nil
- }
-
- cb(tree.Hash)
-
- treeWalker := object.NewTreeWalker(tree, true, seen)
-
- for {
- _, e, err := treeWalker.Next()
- if err == io.EOF {
- break
- }
- if err != nil {
- return err
- }
-
- if e.Mode == filemode.Submodule {
- continue
- }
-
- if seen[e.Hash] {
- continue
- }
-
- cb(e.Hash)
- }
-
- return nil
-}
-
-func hashSetToList(hashes map[plumbing.Hash]bool) []plumbing.Hash {
- var result []plumbing.Hash
- for key := range hashes {
- result = append(result, key)
- }
-
- return result
-}
-
-func hashListToSet(hashes []plumbing.Hash) map[plumbing.Hash]bool {
- result := make(map[plumbing.Hash]bool)
- for _, h := range hashes {
- result[h] = true
- }
-
- return result
-}
diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/storer/doc.go b/vendor/github.com/go-git/go-git/v5/plumbing/storer/doc.go
deleted file mode 100644
index 4d4f179c618..00000000000
--- a/vendor/github.com/go-git/go-git/v5/plumbing/storer/doc.go
+++ /dev/null
@@ -1,2 +0,0 @@
-// Package storer defines the interfaces to store objects, references, etc.
-package storer
diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/storer/index.go b/vendor/github.com/go-git/go-git/v5/plumbing/storer/index.go
deleted file mode 100644
index 33113949b3a..00000000000
--- a/vendor/github.com/go-git/go-git/v5/plumbing/storer/index.go
+++ /dev/null
@@ -1,9 +0,0 @@
-package storer
-
-import "github.com/go-git/go-git/v5/plumbing/format/index"
-
-// IndexStorer generic storage of index.Index
-type IndexStorer interface {
- SetIndex(*index.Index) error
- Index() (*index.Index, error)
-}
diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/storer/object.go b/vendor/github.com/go-git/go-git/v5/plumbing/storer/object.go
deleted file mode 100644
index 126b3742d83..00000000000
--- a/vendor/github.com/go-git/go-git/v5/plumbing/storer/object.go
+++ /dev/null
@@ -1,289 +0,0 @@
-package storer
-
-import (
- "errors"
- "io"
- "time"
-
- "github.com/go-git/go-git/v5/plumbing"
-)
-
-var (
- //ErrStop is used to stop a ForEach function in an Iter
- ErrStop = errors.New("stop iter")
-)
-
-// EncodedObjectStorer generic storage of objects
-type EncodedObjectStorer interface {
- // NewEncodedObject returns a new plumbing.EncodedObject, the real type
- // of the object can be a custom implementation or the default one,
- // plumbing.MemoryObject.
- NewEncodedObject() plumbing.EncodedObject
- // SetEncodedObject saves an object into the storage, the object should
- // be create with the NewEncodedObject, method, and file if the type is
- // not supported.
- SetEncodedObject(plumbing.EncodedObject) (plumbing.Hash, error)
- // EncodedObject gets an object by hash with the given
- // plumbing.ObjectType. Implementors should return
- // (nil, plumbing.ErrObjectNotFound) if an object doesn't exist with
- // both the given hash and object type.
- //
- // Valid plumbing.ObjectType values are CommitObject, BlobObject, TagObject,
- // TreeObject and AnyObject. If plumbing.AnyObject is given, the object must
- // be looked up regardless of its type.
- EncodedObject(plumbing.ObjectType, plumbing.Hash) (plumbing.EncodedObject, error)
- // IterObjects returns a custom EncodedObjectStorer over all the object
- // on the storage.
- //
- // Valid plumbing.ObjectType values are CommitObject, BlobObject, TagObject,
- IterEncodedObjects(plumbing.ObjectType) (EncodedObjectIter, error)
- // HasEncodedObject returns ErrObjNotFound if the object doesn't
- // exist. If the object does exist, it returns nil.
- HasEncodedObject(plumbing.Hash) error
- // EncodedObjectSize returns the plaintext size of the encoded object.
- EncodedObjectSize(plumbing.Hash) (int64, error)
- AddAlternate(remote string) error
-}
-
-// DeltaObjectStorer is an EncodedObjectStorer that can return delta
-// objects.
-type DeltaObjectStorer interface {
- // DeltaObject is the same as EncodedObject but without resolving deltas.
- // Deltas will be returned as plumbing.DeltaObject instances.
- DeltaObject(plumbing.ObjectType, plumbing.Hash) (plumbing.EncodedObject, error)
-}
-
-// Transactioner is a optional method for ObjectStorer, it enables transactional read and write
-// operations.
-type Transactioner interface {
- // Begin starts a transaction.
- Begin() Transaction
-}
-
-// LooseObjectStorer is an optional interface for managing "loose"
-// objects, i.e. those not in packfiles.
-type LooseObjectStorer interface {
- // ForEachObjectHash iterates over all the (loose) object hashes
- // in the repository without necessarily having to read those objects.
- // Objects only inside pack files may be omitted.
- // If ErrStop is sent the iteration is stop but no error is returned.
- ForEachObjectHash(func(plumbing.Hash) error) error
- // LooseObjectTime looks up the (m)time associated with the
- // loose object (that is not in a pack file). Some
- // implementations (e.g. without loose objects)
- // always return an error.
- LooseObjectTime(plumbing.Hash) (time.Time, error)
- // DeleteLooseObject deletes a loose object if it exists.
- DeleteLooseObject(plumbing.Hash) error
-}
-
-// PackedObjectStorer is an optional interface for managing objects in
-// packfiles.
-type PackedObjectStorer interface {
- // ObjectPacks returns hashes of object packs if the underlying
- // implementation has pack files.
- ObjectPacks() ([]plumbing.Hash, error)
- // DeleteOldObjectPackAndIndex deletes an object pack and the corresponding index file if they exist.
- // Deletion is only performed if the pack is older than the supplied time (or the time is zero).
- DeleteOldObjectPackAndIndex(plumbing.Hash, time.Time) error
-}
-
-// PackfileWriter is an optional method for ObjectStorer, it enables directly writing
-// a packfile to storage.
-type PackfileWriter interface {
- // PackfileWriter returns a writer for writing a packfile to the storage
- //
- // If the Storer not implements PackfileWriter the objects should be written
- // using the Set method.
- PackfileWriter() (io.WriteCloser, error)
-}
-
-// EncodedObjectIter is a generic closable interface for iterating over objects.
-type EncodedObjectIter interface {
- Next() (plumbing.EncodedObject, error)
- ForEach(func(plumbing.EncodedObject) error) error
- Close()
-}
-
-// Transaction is an in-progress storage transaction. A transaction must end
-// with a call to Commit or Rollback.
-type Transaction interface {
- SetEncodedObject(plumbing.EncodedObject) (plumbing.Hash, error)
- EncodedObject(plumbing.ObjectType, plumbing.Hash) (plumbing.EncodedObject, error)
- Commit() error
- Rollback() error
-}
-
-// EncodedObjectLookupIter implements EncodedObjectIter. It iterates over a
-// series of object hashes and yields their associated objects by retrieving
-// each one from object storage. The retrievals are lazy and only occur when the
-// iterator moves forward with a call to Next().
-//
-// The EncodedObjectLookupIter must be closed with a call to Close() when it is
-// no longer needed.
-type EncodedObjectLookupIter struct {
- storage EncodedObjectStorer
- series []plumbing.Hash
- t plumbing.ObjectType
- pos int
-}
-
-// NewEncodedObjectLookupIter returns an object iterator given an object storage
-// and a slice of object hashes.
-func NewEncodedObjectLookupIter(
- storage EncodedObjectStorer, t plumbing.ObjectType, series []plumbing.Hash) *EncodedObjectLookupIter {
- return &EncodedObjectLookupIter{
- storage: storage,
- series: series,
- t: t,
- }
-}
-
-// Next returns the next object from the iterator. If the iterator has reached
-// the end it will return io.EOF as an error. If the object can't be found in
-// the object storage, it will return plumbing.ErrObjectNotFound as an error.
-// If the object is retrieved successfully error will be nil.
-func (iter *EncodedObjectLookupIter) Next() (plumbing.EncodedObject, error) {
- if iter.pos >= len(iter.series) {
- return nil, io.EOF
- }
-
- hash := iter.series[iter.pos]
- obj, err := iter.storage.EncodedObject(iter.t, hash)
- if err == nil {
- iter.pos++
- }
-
- return obj, err
-}
-
-// ForEach call the cb function for each object contained on this iter until
-// an error happens or the end of the iter is reached. If ErrStop is sent
-// the iteration is stop but no error is returned. The iterator is closed.
-func (iter *EncodedObjectLookupIter) ForEach(cb func(plumbing.EncodedObject) error) error {
- return ForEachIterator(iter, cb)
-}
-
-// Close releases any resources used by the iterator.
-func (iter *EncodedObjectLookupIter) Close() {
- iter.pos = len(iter.series)
-}
-
-// EncodedObjectSliceIter implements EncodedObjectIter. It iterates over a
-// series of objects stored in a slice and yields each one in turn when Next()
-// is called.
-//
-// The EncodedObjectSliceIter must be closed with a call to Close() when it is
-// no longer needed.
-type EncodedObjectSliceIter struct {
- series []plumbing.EncodedObject
-}
-
-// NewEncodedObjectSliceIter returns an object iterator for the given slice of
-// objects.
-func NewEncodedObjectSliceIter(series []plumbing.EncodedObject) *EncodedObjectSliceIter {
- return &EncodedObjectSliceIter{
- series: series,
- }
-}
-
-// Next returns the next object from the iterator. If the iterator has reached
-// the end it will return io.EOF as an error. If the object is retrieved
-// successfully error will be nil.
-func (iter *EncodedObjectSliceIter) Next() (plumbing.EncodedObject, error) {
- if len(iter.series) == 0 {
- return nil, io.EOF
- }
-
- obj := iter.series[0]
- iter.series = iter.series[1:]
-
- return obj, nil
-}
-
-// ForEach call the cb function for each object contained on this iter until
-// an error happens or the end of the iter is reached. If ErrStop is sent
-// the iteration is stop but no error is returned. The iterator is closed.
-func (iter *EncodedObjectSliceIter) ForEach(cb func(plumbing.EncodedObject) error) error {
- return ForEachIterator(iter, cb)
-}
-
-// Close releases any resources used by the iterator.
-func (iter *EncodedObjectSliceIter) Close() {
- iter.series = []plumbing.EncodedObject{}
-}
-
-// MultiEncodedObjectIter implements EncodedObjectIter. It iterates over several
-// EncodedObjectIter,
-//
-// The MultiObjectIter must be closed with a call to Close() when it is no
-// longer needed.
-type MultiEncodedObjectIter struct {
- iters []EncodedObjectIter
-}
-
-// NewMultiEncodedObjectIter returns an object iterator for the given slice of
-// EncodedObjectIters.
-func NewMultiEncodedObjectIter(iters []EncodedObjectIter) EncodedObjectIter {
- return &MultiEncodedObjectIter{iters: iters}
-}
-
-// Next returns the next object from the iterator, if one iterator reach io.EOF
-// is removed and the next one is used.
-func (iter *MultiEncodedObjectIter) Next() (plumbing.EncodedObject, error) {
- if len(iter.iters) == 0 {
- return nil, io.EOF
- }
-
- obj, err := iter.iters[0].Next()
- if err == io.EOF {
- iter.iters[0].Close()
- iter.iters = iter.iters[1:]
- return iter.Next()
- }
-
- return obj, err
-}
-
-// ForEach call the cb function for each object contained on this iter until
-// an error happens or the end of the iter is reached. If ErrStop is sent
-// the iteration is stop but no error is returned. The iterator is closed.
-func (iter *MultiEncodedObjectIter) ForEach(cb func(plumbing.EncodedObject) error) error {
- return ForEachIterator(iter, cb)
-}
-
-// Close releases any resources used by the iterator.
-func (iter *MultiEncodedObjectIter) Close() {
- for _, i := range iter.iters {
- i.Close()
- }
-}
-
-type bareIterator interface {
- Next() (plumbing.EncodedObject, error)
- Close()
-}
-
-// ForEachIterator is a helper function to build iterators without need to
-// rewrite the same ForEach function each time.
-func ForEachIterator(iter bareIterator, cb func(plumbing.EncodedObject) error) error {
- defer iter.Close()
- for {
- obj, err := iter.Next()
- if err != nil {
- if err == io.EOF {
- return nil
- }
-
- return err
- }
-
- if err := cb(obj); err != nil {
- if err == ErrStop {
- return nil
- }
-
- return err
- }
- }
-}
diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/storer/reference.go b/vendor/github.com/go-git/go-git/v5/plumbing/storer/reference.go
deleted file mode 100644
index 1d74ef3c6ac..00000000000
--- a/vendor/github.com/go-git/go-git/v5/plumbing/storer/reference.go
+++ /dev/null
@@ -1,240 +0,0 @@
-package storer
-
-import (
- "errors"
- "io"
-
- "github.com/go-git/go-git/v5/plumbing"
-)
-
-const MaxResolveRecursion = 1024
-
-// ErrMaxResolveRecursion is returned by ResolveReference is MaxResolveRecursion
-// is exceeded
-var ErrMaxResolveRecursion = errors.New("max. recursion level reached")
-
-// ReferenceStorer is a generic storage of references.
-type ReferenceStorer interface {
- SetReference(*plumbing.Reference) error
- // CheckAndSetReference sets the reference `new`, but if `old` is
- // not `nil`, it first checks that the current stored value for
- // `old.Name()` matches the given reference value in `old`. If
- // not, it returns an error and doesn't update `new`.
- CheckAndSetReference(new, old *plumbing.Reference) error
- Reference(plumbing.ReferenceName) (*plumbing.Reference, error)
- IterReferences() (ReferenceIter, error)
- RemoveReference(plumbing.ReferenceName) error
- CountLooseRefs() (int, error)
- PackRefs() error
-}
-
-// ReferenceIter is a generic closable interface for iterating over references.
-type ReferenceIter interface {
- Next() (*plumbing.Reference, error)
- ForEach(func(*plumbing.Reference) error) error
- Close()
-}
-
-type referenceFilteredIter struct {
- ff func(r *plumbing.Reference) bool
- iter ReferenceIter
-}
-
-// NewReferenceFilteredIter returns a reference iterator for the given reference
-// Iterator. This iterator will iterate only references that accomplish the
-// provided function.
-func NewReferenceFilteredIter(
- ff func(r *plumbing.Reference) bool, iter ReferenceIter) ReferenceIter {
- return &referenceFilteredIter{ff, iter}
-}
-
-// Next returns the next reference from the iterator. If the iterator has reached
-// the end it will return io.EOF as an error.
-func (iter *referenceFilteredIter) Next() (*plumbing.Reference, error) {
- for {
- r, err := iter.iter.Next()
- if err != nil {
- return nil, err
- }
-
- if iter.ff(r) {
- return r, nil
- }
-
- continue
- }
-}
-
-// ForEach call the cb function for each reference contained on this iter until
-// an error happens or the end of the iter is reached. If ErrStop is sent
-// the iteration is stopped but no error is returned. The iterator is closed.
-func (iter *referenceFilteredIter) ForEach(cb func(*plumbing.Reference) error) error {
- defer iter.Close()
- for {
- r, err := iter.Next()
- if err == io.EOF {
- break
- }
- if err != nil {
- return err
- }
-
- if err := cb(r); err != nil {
- if err == ErrStop {
- break
- }
-
- return err
- }
- }
-
- return nil
-}
-
-// Close releases any resources used by the iterator.
-func (iter *referenceFilteredIter) Close() {
- iter.iter.Close()
-}
-
-// ReferenceSliceIter implements ReferenceIter. It iterates over a series of
-// references stored in a slice and yields each one in turn when Next() is
-// called.
-//
-// The ReferenceSliceIter must be closed with a call to Close() when it is no
-// longer needed.
-type ReferenceSliceIter struct {
- series []*plumbing.Reference
- pos int
-}
-
-// NewReferenceSliceIter returns a reference iterator for the given slice of
-// objects.
-func NewReferenceSliceIter(series []*plumbing.Reference) ReferenceIter {
- return &ReferenceSliceIter{
- series: series,
- }
-}
-
-// Next returns the next reference from the iterator. If the iterator has
-// reached the end it will return io.EOF as an error.
-func (iter *ReferenceSliceIter) Next() (*plumbing.Reference, error) {
- if iter.pos >= len(iter.series) {
- return nil, io.EOF
- }
-
- obj := iter.series[iter.pos]
- iter.pos++
- return obj, nil
-}
-
-// ForEach call the cb function for each reference contained on this iter until
-// an error happens or the end of the iter is reached. If ErrStop is sent
-// the iteration is stop but no error is returned. The iterator is closed.
-func (iter *ReferenceSliceIter) ForEach(cb func(*plumbing.Reference) error) error {
- return forEachReferenceIter(iter, cb)
-}
-
-type bareReferenceIterator interface {
- Next() (*plumbing.Reference, error)
- Close()
-}
-
-func forEachReferenceIter(iter bareReferenceIterator, cb func(*plumbing.Reference) error) error {
- defer iter.Close()
- for {
- obj, err := iter.Next()
- if err != nil {
- if err == io.EOF {
- return nil
- }
-
- return err
- }
-
- if err := cb(obj); err != nil {
- if err == ErrStop {
- return nil
- }
-
- return err
- }
- }
-}
-
-// Close releases any resources used by the iterator.
-func (iter *ReferenceSliceIter) Close() {
- iter.pos = len(iter.series)
-}
-
-// MultiReferenceIter implements ReferenceIter. It iterates over several
-// ReferenceIter,
-//
-// The MultiReferenceIter must be closed with a call to Close() when it is no
-// longer needed.
-type MultiReferenceIter struct {
- iters []ReferenceIter
-}
-
-// NewMultiReferenceIter returns an reference iterator for the given slice of
-// EncodedObjectIters.
-func NewMultiReferenceIter(iters []ReferenceIter) ReferenceIter {
- return &MultiReferenceIter{iters: iters}
-}
-
-// Next returns the next reference from the iterator, if one iterator reach
-// io.EOF is removed and the next one is used.
-func (iter *MultiReferenceIter) Next() (*plumbing.Reference, error) {
- if len(iter.iters) == 0 {
- return nil, io.EOF
- }
-
- obj, err := iter.iters[0].Next()
- if err == io.EOF {
- iter.iters[0].Close()
- iter.iters = iter.iters[1:]
- return iter.Next()
- }
-
- return obj, err
-}
-
-// ForEach call the cb function for each reference contained on this iter until
-// an error happens or the end of the iter is reached. If ErrStop is sent
-// the iteration is stop but no error is returned. The iterator is closed.
-func (iter *MultiReferenceIter) ForEach(cb func(*plumbing.Reference) error) error {
- return forEachReferenceIter(iter, cb)
-}
-
-// Close releases any resources used by the iterator.
-func (iter *MultiReferenceIter) Close() {
- for _, i := range iter.iters {
- i.Close()
- }
-}
-
-// ResolveReference resolves a SymbolicReference to a HashReference.
-func ResolveReference(s ReferenceStorer, n plumbing.ReferenceName) (*plumbing.Reference, error) {
- r, err := s.Reference(n)
- if err != nil || r == nil {
- return r, err
- }
- return resolveReference(s, r, 0)
-}
-
-func resolveReference(s ReferenceStorer, r *plumbing.Reference, recursion int) (*plumbing.Reference, error) {
- if r.Type() != plumbing.SymbolicReference {
- return r, nil
- }
-
- if recursion > MaxResolveRecursion {
- return nil, ErrMaxResolveRecursion
- }
-
- t, err := s.Reference(r.Target())
- if err != nil {
- return nil, err
- }
-
- recursion++
- return resolveReference(s, t, recursion)
-}
diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/storer/shallow.go b/vendor/github.com/go-git/go-git/v5/plumbing/storer/shallow.go
deleted file mode 100644
index 39ef5ea5c67..00000000000
--- a/vendor/github.com/go-git/go-git/v5/plumbing/storer/shallow.go
+++ /dev/null
@@ -1,10 +0,0 @@
-package storer
-
-import "github.com/go-git/go-git/v5/plumbing"
-
-// ShallowStorer is a storage of references to shallow commits by hash,
-// meaning that these commits have missing parents because of a shallow fetch.
-type ShallowStorer interface {
- SetShallow([]plumbing.Hash) error
- Shallow() ([]plumbing.Hash, error)
-}
diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/storer/storer.go b/vendor/github.com/go-git/go-git/v5/plumbing/storer/storer.go
deleted file mode 100644
index c7bc65a0c49..00000000000
--- a/vendor/github.com/go-git/go-git/v5/plumbing/storer/storer.go
+++ /dev/null
@@ -1,15 +0,0 @@
-package storer
-
-// Storer is a basic storer for encoded objects and references.
-type Storer interface {
- EncodedObjectStorer
- ReferenceStorer
-}
-
-// Initializer should be implemented by storers that require to perform any
-// operation when creating a new repository (i.e. git init).
-type Initializer interface {
- // Init performs initialization of the storer and returns the error, if
- // any.
- Init() error
-}
diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/transport/client/client.go b/vendor/github.com/go-git/go-git/v5/plumbing/transport/client/client.go
deleted file mode 100644
index 1948c2301af..00000000000
--- a/vendor/github.com/go-git/go-git/v5/plumbing/transport/client/client.go
+++ /dev/null
@@ -1,51 +0,0 @@
-// Package client contains helper function to deal with the different client
-// protocols.
-package client
-
-import (
- "fmt"
-
- "github.com/go-git/go-git/v5/plumbing/transport"
- "github.com/go-git/go-git/v5/plumbing/transport/file"
- "github.com/go-git/go-git/v5/plumbing/transport/git"
- "github.com/go-git/go-git/v5/plumbing/transport/http"
- "github.com/go-git/go-git/v5/plumbing/transport/ssh"
-)
-
-// Protocols are the protocols supported by default.
-var Protocols = map[string]transport.Transport{
- "http": http.DefaultClient,
- "https": http.DefaultClient,
- "ssh": ssh.DefaultClient,
- "git": git.DefaultClient,
- "file": file.DefaultClient,
-}
-
-// InstallProtocol adds or modifies an existing protocol.
-func InstallProtocol(scheme string, c transport.Transport) {
- if c == nil {
- delete(Protocols, scheme)
- return
- }
-
- Protocols[scheme] = c
-}
-
-// NewClient returns the appropriate client among of the set of known protocols:
-// http://, https://, ssh:// and file://.
-// See `InstallProtocol` to add or modify protocols.
-func NewClient(endpoint *transport.Endpoint) (transport.Transport, error) {
- return getTransport(endpoint)
-}
-
-func getTransport(endpoint *transport.Endpoint) (transport.Transport, error) {
- f, ok := Protocols[endpoint.Protocol]
- if !ok {
- return nil, fmt.Errorf("unsupported scheme %q", endpoint.Protocol)
- }
-
- if f == nil {
- return nil, fmt.Errorf("malformed client for scheme %q, client is defined as nil", endpoint.Protocol)
- }
- return f, nil
-}
diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/transport/common.go b/vendor/github.com/go-git/go-git/v5/plumbing/transport/common.go
deleted file mode 100644
index b05437fbfcd..00000000000
--- a/vendor/github.com/go-git/go-git/v5/plumbing/transport/common.go
+++ /dev/null
@@ -1,320 +0,0 @@
-// Package transport includes the implementation for different transport
-// protocols.
-//
-// `Client` can be used to fetch and send packfiles to a git server.
-// The `client` package provides higher level functions to instantiate the
-// appropriate `Client` based on the repository URL.
-//
-// go-git supports HTTP and SSH (see `Protocols`), but you can also install
-// your own protocols (see the `client` package).
-//
-// Each protocol has its own implementation of `Client`, but you should
-// generally not use them directly, use `client.NewClient` instead.
-package transport
-
-import (
- "bytes"
- "context"
- "errors"
- "fmt"
- "io"
- "net/url"
- "strconv"
- "strings"
-
- giturl "github.com/go-git/go-git/v5/internal/url"
- "github.com/go-git/go-git/v5/plumbing"
- "github.com/go-git/go-git/v5/plumbing/protocol/packp"
- "github.com/go-git/go-git/v5/plumbing/protocol/packp/capability"
-)
-
-var (
- ErrRepositoryNotFound = errors.New("repository not found")
- ErrEmptyRemoteRepository = errors.New("remote repository is empty")
- ErrAuthenticationRequired = errors.New("authentication required")
- ErrAuthorizationFailed = errors.New("authorization failed")
- ErrEmptyUploadPackRequest = errors.New("empty git-upload-pack given")
- ErrInvalidAuthMethod = errors.New("invalid auth method")
- ErrAlreadyConnected = errors.New("session already established")
-)
-
-const (
- UploadPackServiceName = "git-upload-pack"
- ReceivePackServiceName = "git-receive-pack"
-)
-
-// Transport can initiate git-upload-pack and git-receive-pack processes.
-// It is implemented both by the client and the server, making this a RPC.
-type Transport interface {
- // NewUploadPackSession starts a git-upload-pack session for an endpoint.
- NewUploadPackSession(*Endpoint, AuthMethod) (UploadPackSession, error)
- // NewReceivePackSession starts a git-receive-pack session for an endpoint.
- NewReceivePackSession(*Endpoint, AuthMethod) (ReceivePackSession, error)
-}
-
-type Session interface {
- // AdvertisedReferences retrieves the advertised references for a
- // repository.
- // If the repository does not exist, returns ErrRepositoryNotFound.
- // If the repository exists, but is empty, returns ErrEmptyRemoteRepository.
- AdvertisedReferences() (*packp.AdvRefs, error)
- // AdvertisedReferencesContext retrieves the advertised references for a
- // repository.
- // If the repository does not exist, returns ErrRepositoryNotFound.
- // If the repository exists, but is empty, returns ErrEmptyRemoteRepository.
- AdvertisedReferencesContext(context.Context) (*packp.AdvRefs, error)
- io.Closer
-}
-
-type AuthMethod interface {
- fmt.Stringer
- Name() string
-}
-
-// UploadPackSession represents a git-upload-pack session.
-// A git-upload-pack session has two steps: reference discovery
-// (AdvertisedReferences) and uploading pack (UploadPack).
-type UploadPackSession interface {
- Session
- // UploadPack takes a git-upload-pack request and returns a response,
- // including a packfile. Don't be confused by terminology, the client
- // side of a git-upload-pack is called git-fetch-pack, although here
- // the same interface is used to make it RPC-like.
- UploadPack(context.Context, *packp.UploadPackRequest) (*packp.UploadPackResponse, error)
-}
-
-// ReceivePackSession represents a git-receive-pack session.
-// A git-receive-pack session has two steps: reference discovery
-// (AdvertisedReferences) and receiving pack (ReceivePack).
-// In that order.
-type ReceivePackSession interface {
- Session
- // ReceivePack sends an update references request and a packfile
- // reader and returns a ReportStatus and error. Don't be confused by
- // terminology, the client side of a git-receive-pack is called
- // git-send-pack, although here the same interface is used to make it
- // RPC-like.
- ReceivePack(context.Context, *packp.ReferenceUpdateRequest) (*packp.ReportStatus, error)
-}
-
-// Endpoint represents a Git URL in any supported protocol.
-type Endpoint struct {
- // Protocol is the protocol of the endpoint (e.g. git, https, file).
- Protocol string
- // User is the user.
- User string
- // Password is the password.
- Password string
- // Host is the host.
- Host string
- // Port is the port to connect, if 0 the default port for the given protocol
- // will be used.
- Port int
- // Path is the repository path.
- Path string
- // InsecureSkipTLS skips ssl verify if protocol is https
- InsecureSkipTLS bool
- // CaBundle specify additional ca bundle with system cert pool
- CaBundle []byte
- // Proxy provides info required for connecting to a proxy.
- Proxy ProxyOptions
-}
-
-type ProxyOptions struct {
- URL string
- Username string
- Password string
-}
-
-func (o *ProxyOptions) Validate() error {
- if o.URL != "" {
- _, err := url.Parse(o.URL)
- return err
- }
- return nil
-}
-
-func (o *ProxyOptions) FullURL() (*url.URL, error) {
- proxyURL, err := url.Parse(o.URL)
- if err != nil {
- return nil, err
- }
- if o.Username != "" {
- if o.Password != "" {
- proxyURL.User = url.UserPassword(o.Username, o.Password)
- } else {
- proxyURL.User = url.User(o.Username)
- }
- }
- return proxyURL, nil
-}
-
-var defaultPorts = map[string]int{
- "http": 80,
- "https": 443,
- "git": 9418,
- "ssh": 22,
-}
-
-// String returns a string representation of the Git URL.
-func (u *Endpoint) String() string {
- var buf bytes.Buffer
- if u.Protocol != "" {
- buf.WriteString(u.Protocol)
- buf.WriteByte(':')
- }
-
- if u.Protocol != "" || u.Host != "" || u.User != "" || u.Password != "" {
- buf.WriteString("//")
-
- if u.User != "" || u.Password != "" {
- buf.WriteString(url.PathEscape(u.User))
- if u.Password != "" {
- buf.WriteByte(':')
- buf.WriteString(url.PathEscape(u.Password))
- }
-
- buf.WriteByte('@')
- }
-
- if u.Host != "" {
- buf.WriteString(u.Host)
-
- if u.Port != 0 {
- port, ok := defaultPorts[strings.ToLower(u.Protocol)]
- if !ok || ok && port != u.Port {
- fmt.Fprintf(&buf, ":%d", u.Port)
- }
- }
- }
- }
-
- if u.Path != "" && u.Path[0] != '/' && u.Host != "" {
- buf.WriteByte('/')
- }
-
- buf.WriteString(u.Path)
- return buf.String()
-}
-
-func NewEndpoint(endpoint string) (*Endpoint, error) {
- if e, ok := parseSCPLike(endpoint); ok {
- return e, nil
- }
-
- if e, ok := parseFile(endpoint); ok {
- return e, nil
- }
-
- return parseURL(endpoint)
-}
-
-func parseURL(endpoint string) (*Endpoint, error) {
- u, err := url.Parse(endpoint)
- if err != nil {
- return nil, err
- }
-
- if !u.IsAbs() {
- return nil, plumbing.NewPermanentError(fmt.Errorf(
- "invalid endpoint: %s", endpoint,
- ))
- }
-
- var user, pass string
- if u.User != nil {
- user = u.User.Username()
- pass, _ = u.User.Password()
- }
-
- host := u.Hostname()
- if strings.Contains(host, ":") {
- // IPv6 address
- host = "[" + host + "]"
- }
-
- return &Endpoint{
- Protocol: u.Scheme,
- User: user,
- Password: pass,
- Host: host,
- Port: getPort(u),
- Path: getPath(u),
- }, nil
-}
-
-func getPort(u *url.URL) int {
- p := u.Port()
- if p == "" {
- return 0
- }
-
- i, err := strconv.Atoi(p)
- if err != nil {
- return 0
- }
-
- return i
-}
-
-func getPath(u *url.URL) string {
- var res string = u.Path
- if u.RawQuery != "" {
- res += "?" + u.RawQuery
- }
-
- if u.Fragment != "" {
- res += "#" + u.Fragment
- }
-
- return res
-}
-
-func parseSCPLike(endpoint string) (*Endpoint, bool) {
- if giturl.MatchesScheme(endpoint) || !giturl.MatchesScpLike(endpoint) {
- return nil, false
- }
-
- user, host, portStr, path := giturl.FindScpLikeComponents(endpoint)
- port, err := strconv.Atoi(portStr)
- if err != nil {
- port = 22
- }
-
- return &Endpoint{
- Protocol: "ssh",
- User: user,
- Host: host,
- Port: port,
- Path: path,
- }, true
-}
-
-func parseFile(endpoint string) (*Endpoint, bool) {
- if giturl.MatchesScheme(endpoint) {
- return nil, false
- }
-
- path := endpoint
- return &Endpoint{
- Protocol: "file",
- Path: path,
- }, true
-}
-
-// UnsupportedCapabilities are the capabilities not supported by any client
-// implementation
-var UnsupportedCapabilities = []capability.Capability{
- capability.MultiACK,
- capability.MultiACKDetailed,
- capability.ThinPack,
-}
-
-// FilterUnsupportedCapabilities it filter out all the UnsupportedCapabilities
-// from a capability.List, the intended usage is on the client implementation
-// to filter the capabilities from an AdvRefs message.
-func FilterUnsupportedCapabilities(list *capability.List) {
- for _, c := range UnsupportedCapabilities {
- list.Delete(c)
- }
-}
diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/transport/file/client.go b/vendor/github.com/go-git/go-git/v5/plumbing/transport/file/client.go
deleted file mode 100644
index 38714e2ad1c..00000000000
--- a/vendor/github.com/go-git/go-git/v5/plumbing/transport/file/client.go
+++ /dev/null
@@ -1,156 +0,0 @@
-// Package file implements the file transport protocol.
-package file
-
-import (
- "bufio"
- "errors"
- "io"
- "os"
- "path/filepath"
- "strings"
-
- "github.com/go-git/go-git/v5/plumbing/transport"
- "github.com/go-git/go-git/v5/plumbing/transport/internal/common"
- "golang.org/x/sys/execabs"
-)
-
-// DefaultClient is the default local client.
-var DefaultClient = NewClient(
- transport.UploadPackServiceName,
- transport.ReceivePackServiceName,
-)
-
-type runner struct {
- UploadPackBin string
- ReceivePackBin string
-}
-
-// NewClient returns a new local client using the given git-upload-pack and
-// git-receive-pack binaries.
-func NewClient(uploadPackBin, receivePackBin string) transport.Transport {
- return common.NewClient(&runner{
- UploadPackBin: uploadPackBin,
- ReceivePackBin: receivePackBin,
- })
-}
-
-func prefixExecPath(cmd string) (string, error) {
- // Use `git --exec-path` to find the exec path.
- execCmd := execabs.Command("git", "--exec-path")
-
- stdout, err := execCmd.StdoutPipe()
- if err != nil {
- return "", err
- }
- stdoutBuf := bufio.NewReader(stdout)
-
- err = execCmd.Start()
- if err != nil {
- return "", err
- }
-
- execPathBytes, isPrefix, err := stdoutBuf.ReadLine()
- if err != nil {
- return "", err
- }
- if isPrefix {
- return "", errors.New("couldn't read exec-path line all at once")
- }
-
- err = execCmd.Wait()
- if err != nil {
- return "", err
- }
- execPath := string(execPathBytes)
- execPath = strings.TrimSpace(execPath)
- cmd = filepath.Join(execPath, cmd)
-
- // Make sure it actually exists.
- _, err = execabs.LookPath(cmd)
- if err != nil {
- return "", err
- }
- return cmd, nil
-}
-
-func (r *runner) Command(cmd string, ep *transport.Endpoint, auth transport.AuthMethod,
-) (common.Command, error) {
-
- switch cmd {
- case transport.UploadPackServiceName:
- cmd = r.UploadPackBin
- case transport.ReceivePackServiceName:
- cmd = r.ReceivePackBin
- }
-
- _, err := execabs.LookPath(cmd)
- if err != nil {
- if e, ok := err.(*execabs.Error); ok && e.Err == execabs.ErrNotFound {
- cmd, err = prefixExecPath(cmd)
- if err != nil {
- return nil, err
- }
- } else {
- return nil, err
- }
- }
-
- return &command{cmd: execabs.Command(cmd, ep.Path)}, nil
-}
-
-type command struct {
- cmd *execabs.Cmd
- stderrCloser io.Closer
- closed bool
-}
-
-func (c *command) Start() error {
- return c.cmd.Start()
-}
-
-func (c *command) StderrPipe() (io.Reader, error) {
- // Pipe returned by Command.StderrPipe has a race with Read + Command.Wait.
- // We use an io.Pipe and close it after the command finishes.
- r, w := io.Pipe()
- c.cmd.Stderr = w
- c.stderrCloser = r
- return r, nil
-}
-
-func (c *command) StdinPipe() (io.WriteCloser, error) {
- return c.cmd.StdinPipe()
-}
-
-func (c *command) StdoutPipe() (io.Reader, error) {
- return c.cmd.StdoutPipe()
-}
-
-func (c *command) Kill() error {
- c.cmd.Process.Kill()
- return c.Close()
-}
-
-// Close waits for the command to exit.
-func (c *command) Close() error {
- if c.closed {
- return nil
- }
-
- defer func() {
- c.closed = true
- _ = c.stderrCloser.Close()
-
- }()
-
- err := c.cmd.Wait()
- if _, ok := err.(*os.PathError); ok {
- return nil
- }
-
- // When a repository does not exist, the command exits with code 128.
- if _, ok := err.(*execabs.ExitError); ok {
- return nil
- }
-
- return err
-}
diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/transport/file/server.go b/vendor/github.com/go-git/go-git/v5/plumbing/transport/file/server.go
deleted file mode 100644
index b45d7a71c2f..00000000000
--- a/vendor/github.com/go-git/go-git/v5/plumbing/transport/file/server.go
+++ /dev/null
@@ -1,53 +0,0 @@
-package file
-
-import (
- "fmt"
- "os"
-
- "github.com/go-git/go-git/v5/plumbing/transport"
- "github.com/go-git/go-git/v5/plumbing/transport/internal/common"
- "github.com/go-git/go-git/v5/plumbing/transport/server"
- "github.com/go-git/go-git/v5/utils/ioutil"
-)
-
-// ServeUploadPack serves a git-upload-pack request using standard output, input
-// and error. This is meant to be used when implementing a git-upload-pack
-// command.
-func ServeUploadPack(path string) error {
- ep, err := transport.NewEndpoint(path)
- if err != nil {
- return err
- }
-
- // TODO: define and implement a server-side AuthMethod
- s, err := server.DefaultServer.NewUploadPackSession(ep, nil)
- if err != nil {
- return fmt.Errorf("error creating session: %s", err)
- }
-
- return common.ServeUploadPack(srvCmd, s)
-}
-
-// ServeReceivePack serves a git-receive-pack request using standard output,
-// input and error. This is meant to be used when implementing a
-// git-receive-pack command.
-func ServeReceivePack(path string) error {
- ep, err := transport.NewEndpoint(path)
- if err != nil {
- return err
- }
-
- // TODO: define and implement a server-side AuthMethod
- s, err := server.DefaultServer.NewReceivePackSession(ep, nil)
- if err != nil {
- return fmt.Errorf("error creating session: %s", err)
- }
-
- return common.ServeReceivePack(srvCmd, s)
-}
-
-var srvCmd = common.ServerCommand{
- Stdin: os.Stdin,
- Stdout: ioutil.WriteNopCloser(os.Stdout),
- Stderr: os.Stderr,
-}
diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/transport/git/common.go b/vendor/github.com/go-git/go-git/v5/plumbing/transport/git/common.go
deleted file mode 100644
index 2b878b0359e..00000000000
--- a/vendor/github.com/go-git/go-git/v5/plumbing/transport/git/common.go
+++ /dev/null
@@ -1,108 +0,0 @@
-// Package git implements the git transport protocol.
-package git
-
-import (
- "io"
- "net"
- "strconv"
-
- "github.com/go-git/go-git/v5/plumbing/protocol/packp"
- "github.com/go-git/go-git/v5/plumbing/transport"
- "github.com/go-git/go-git/v5/plumbing/transport/internal/common"
- "github.com/go-git/go-git/v5/utils/ioutil"
-)
-
-// DefaultClient is the default git client.
-var DefaultClient = common.NewClient(&runner{})
-
-const DefaultPort = 9418
-
-type runner struct{}
-
-// Command returns a new Command for the given cmd in the given Endpoint
-func (r *runner) Command(cmd string, ep *transport.Endpoint, auth transport.AuthMethod) (common.Command, error) {
- // auth not allowed since git protocol doesn't support authentication
- if auth != nil {
- return nil, transport.ErrInvalidAuthMethod
- }
- c := &command{command: cmd, endpoint: ep}
- if err := c.connect(); err != nil {
- return nil, err
- }
- return c, nil
-}
-
-type command struct {
- conn net.Conn
- connected bool
- command string
- endpoint *transport.Endpoint
-}
-
-// Start executes the command sending the required message to the TCP connection
-func (c *command) Start() error {
- req := packp.GitProtoRequest{
- RequestCommand: c.command,
- Pathname: c.endpoint.Path,
- }
- host := c.endpoint.Host
- if c.endpoint.Port != DefaultPort {
- host = net.JoinHostPort(c.endpoint.Host, strconv.Itoa(c.endpoint.Port))
- }
-
- req.Host = host
-
- return req.Encode(c.conn)
-}
-
-func (c *command) connect() error {
- if c.connected {
- return transport.ErrAlreadyConnected
- }
-
- var err error
- c.conn, err = net.Dial("tcp", c.getHostWithPort())
- if err != nil {
- return err
- }
-
- c.connected = true
- return nil
-}
-
-func (c *command) getHostWithPort() string {
- host := c.endpoint.Host
- port := c.endpoint.Port
- if port <= 0 {
- port = DefaultPort
- }
-
- return net.JoinHostPort(host, strconv.Itoa(port))
-}
-
-// StderrPipe git protocol doesn't have any dedicated error channel
-func (c *command) StderrPipe() (io.Reader, error) {
- return nil, nil
-}
-
-// StdinPipe returns the underlying connection as WriteCloser, wrapped to prevent
-// call to the Close function from the connection, a command execution in git
-// protocol can't be closed or killed
-func (c *command) StdinPipe() (io.WriteCloser, error) {
- return ioutil.WriteNopCloser(c.conn), nil
-}
-
-// StdoutPipe returns the underlying connection as Reader
-func (c *command) StdoutPipe() (io.Reader, error) {
- return c.conn, nil
-}
-
-// Close closes the TCP connection and connection.
-func (c *command) Close() error {
- if !c.connected {
- return nil
- }
-
- c.connected = false
- return c.conn.Close()
-}
diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/transport/http/common.go b/vendor/github.com/go-git/go-git/v5/plumbing/transport/http/common.go
deleted file mode 100644
index 1c4ceee68d0..00000000000
--- a/vendor/github.com/go-git/go-git/v5/plumbing/transport/http/common.go
+++ /dev/null
@@ -1,452 +0,0 @@
-// Package http implements the HTTP transport protocol.
-package http
-
-import (
- "bytes"
- "context"
- "crypto/tls"
- "crypto/x509"
- "fmt"
- "net"
- "net/http"
- "net/url"
- "reflect"
- "strconv"
- "strings"
- "sync"
-
- "github.com/go-git/go-git/v5/plumbing"
- "github.com/go-git/go-git/v5/plumbing/protocol/packp"
- "github.com/go-git/go-git/v5/plumbing/transport"
- "github.com/go-git/go-git/v5/utils/ioutil"
- "github.com/golang/groupcache/lru"
-)
-
-// it requires a bytes.Buffer, because we need to know the length
-func applyHeadersToRequest(req *http.Request, content *bytes.Buffer, host string, requestType string) {
- req.Header.Add("User-Agent", "git/1.0")
- req.Header.Add("Host", host) // host:port
-
- if content == nil {
- req.Header.Add("Accept", "*/*")
- return
- }
-
- req.Header.Add("Accept", fmt.Sprintf("application/x-%s-result", requestType))
- req.Header.Add("Content-Type", fmt.Sprintf("application/x-%s-request", requestType))
- req.Header.Add("Content-Length", strconv.Itoa(content.Len()))
-}
-
-const infoRefsPath = "/info/refs"
-
-func advertisedReferences(ctx context.Context, s *session, serviceName string) (ref *packp.AdvRefs, err error) {
- url := fmt.Sprintf(
- "%s%s?service=%s",
- s.endpoint.String(), infoRefsPath, serviceName,
- )
-
- req, err := http.NewRequest(http.MethodGet, url, nil)
- if err != nil {
- return nil, err
- }
-
- s.ApplyAuthToRequest(req)
- applyHeadersToRequest(req, nil, s.endpoint.Host, serviceName)
- res, err := s.client.Do(req.WithContext(ctx))
- if err != nil {
- return nil, err
- }
-
- s.ModifyEndpointIfRedirect(res)
- defer ioutil.CheckClose(res.Body, &err)
-
- if err = NewErr(res); err != nil {
- return nil, err
- }
-
- ar := packp.NewAdvRefs()
- if err = ar.Decode(res.Body); err != nil {
- if err == packp.ErrEmptyAdvRefs {
- err = transport.ErrEmptyRemoteRepository
- }
-
- return nil, err
- }
-
- // Git 2.41+ returns a zero-id plus capabilities when an empty
- // repository is being cloned. This skips the existing logic within
- // advrefs_decode.decodeFirstHash, which expects a flush-pkt instead.
- //
- // This logic aligns with plumbing/transport/internal/common/common.go.
- if ar.IsEmpty() &&
- // Empty repositories are valid for git-receive-pack.
- transport.ReceivePackServiceName != serviceName {
- return nil, transport.ErrEmptyRemoteRepository
- }
-
- transport.FilterUnsupportedCapabilities(ar.Capabilities)
- s.advRefs = ar
-
- return ar, nil
-}
-
-type client struct {
- client *http.Client
- transports *lru.Cache
- mutex sync.RWMutex
-}
-
-// ClientOptions holds user configurable options for the client.
-type ClientOptions struct {
- // CacheMaxEntries is the max no. of entries that the transport objects
- // cache will hold at any given point of time. It must be a positive integer.
- // Calling `client.addTransport()` after the cache has reached the specified
- // size, will result in the least recently used transport getting deleted
- // before the provided transport is added to the cache.
- CacheMaxEntries int
-}
-
-var (
- // defaultTransportCacheSize is the default capacity of the transport objects cache.
- // Its value is 0 because transport caching is turned off by default and is an
- // opt-in feature.
- defaultTransportCacheSize = 0
-
- // DefaultClient is the default HTTP client, which uses a net/http client configured
- // with http.DefaultTransport.
- DefaultClient = NewClient(nil)
-)
-
-// NewClient creates a new client with a custom net/http client.
-// See `InstallProtocol` to install and override default http client.
-// If the net/http client is nil or empty, it will use a net/http client configured
-// with http.DefaultTransport.
-//
-// Note that for HTTP client cannot distinguish between private repositories and
-// unexistent repositories on GitHub. So it returns `ErrAuthorizationRequired`
-// for both.
-func NewClient(c *http.Client) transport.Transport {
- if c == nil {
- c = &http.Client{
- Transport: http.DefaultTransport,
- }
- }
- return NewClientWithOptions(c, &ClientOptions{
- CacheMaxEntries: defaultTransportCacheSize,
- })
-}
-
-// NewClientWithOptions returns a new client configured with the provided net/http client
-// and other custom options specific to the client.
-// If the net/http client is nil or empty, it will use a net/http client configured
-// with http.DefaultTransport.
-func NewClientWithOptions(c *http.Client, opts *ClientOptions) transport.Transport {
- if c == nil {
- c = &http.Client{
- Transport: http.DefaultTransport,
- }
- }
- cl := &client{
- client: c,
- }
-
- if opts != nil {
- if opts.CacheMaxEntries > 0 {
- cl.transports = lru.New(opts.CacheMaxEntries)
- }
- }
- return cl
-}
-
-func (c *client) NewUploadPackSession(ep *transport.Endpoint, auth transport.AuthMethod) (
- transport.UploadPackSession, error) {
-
- return newUploadPackSession(c, ep, auth)
-}
-
-func (c *client) NewReceivePackSession(ep *transport.Endpoint, auth transport.AuthMethod) (
- transport.ReceivePackSession, error) {
-
- return newReceivePackSession(c, ep, auth)
-}
-
-type session struct {
- auth AuthMethod
- client *http.Client
- endpoint *transport.Endpoint
- advRefs *packp.AdvRefs
-}
-
-func transportWithInsecureTLS(transport *http.Transport) {
- if transport.TLSClientConfig == nil {
- transport.TLSClientConfig = &tls.Config{}
- }
- transport.TLSClientConfig.InsecureSkipVerify = true
-}
-
-func transportWithCABundle(transport *http.Transport, caBundle []byte) error {
- rootCAs, err := x509.SystemCertPool()
- if err != nil {
- return err
- }
- if rootCAs == nil {
- rootCAs = x509.NewCertPool()
- }
- rootCAs.AppendCertsFromPEM(caBundle)
- if transport.TLSClientConfig == nil {
- transport.TLSClientConfig = &tls.Config{}
- }
- transport.TLSClientConfig.RootCAs = rootCAs
- return nil
-}
-
-func transportWithProxy(transport *http.Transport, proxyURL *url.URL) {
- transport.Proxy = http.ProxyURL(proxyURL)
-}
-
-func configureTransport(transport *http.Transport, ep *transport.Endpoint) error {
- if len(ep.CaBundle) > 0 {
- if err := transportWithCABundle(transport, ep.CaBundle); err != nil {
- return err
- }
- }
- if ep.InsecureSkipTLS {
- transportWithInsecureTLS(transport)
- }
-
- if ep.Proxy.URL != "" {
- proxyURL, err := ep.Proxy.FullURL()
- if err != nil {
- return err
- }
- transportWithProxy(transport, proxyURL)
- }
- return nil
-}
-
-func newSession(c *client, ep *transport.Endpoint, auth transport.AuthMethod) (*session, error) {
- var httpClient *http.Client
-
- // We need to configure the http transport if there are transport specific
- // options present in the endpoint.
- if len(ep.CaBundle) > 0 || ep.InsecureSkipTLS || ep.Proxy.URL != "" {
- var transport *http.Transport
- // if the client wasn't configured to have a cache for transports then just configure
- // the transport and use it directly, otherwise try to use the cache.
- if c.transports == nil {
- tr, ok := c.client.Transport.(*http.Transport)
- if !ok {
- return nil, fmt.Errorf("expected underlying client transport to be of type: %s; got: %s",
- reflect.TypeOf(transport), reflect.TypeOf(c.client.Transport))
- }
-
- transport = tr.Clone()
- configureTransport(transport, ep)
- } else {
- transportOpts := transportOptions{
- caBundle: string(ep.CaBundle),
- insecureSkipTLS: ep.InsecureSkipTLS,
- }
- if ep.Proxy.URL != "" {
- proxyURL, err := ep.Proxy.FullURL()
- if err != nil {
- return nil, err
- }
- transportOpts.proxyURL = *proxyURL
- }
- var found bool
- transport, found = c.fetchTransport(transportOpts)
-
- if !found {
- transport = c.client.Transport.(*http.Transport).Clone()
- configureTransport(transport, ep)
- c.addTransport(transportOpts, transport)
- }
- }
-
- httpClient = &http.Client{
- Transport: transport,
- CheckRedirect: c.client.CheckRedirect,
- Jar: c.client.Jar,
- Timeout: c.client.Timeout,
- }
- } else {
- httpClient = c.client
- }
-
- s := &session{
- auth: basicAuthFromEndpoint(ep),
- client: httpClient,
- endpoint: ep,
- }
- if auth != nil {
- a, ok := auth.(AuthMethod)
- if !ok {
- return nil, transport.ErrInvalidAuthMethod
- }
-
- s.auth = a
- }
-
- return s, nil
-}
-
-func (s *session) ApplyAuthToRequest(req *http.Request) {
- if s.auth == nil {
- return
- }
-
- s.auth.SetAuth(req)
-}
-
-func (s *session) ModifyEndpointIfRedirect(res *http.Response) {
- if res.Request == nil {
- return
- }
-
- r := res.Request
- if !strings.HasSuffix(r.URL.Path, infoRefsPath) {
- return
- }
-
- h, p, err := net.SplitHostPort(r.URL.Host)
- if err != nil {
- h = r.URL.Host
- }
- if p != "" {
- port, err := strconv.Atoi(p)
- if err == nil {
- s.endpoint.Port = port
- }
- }
- s.endpoint.Host = h
-
- s.endpoint.Protocol = r.URL.Scheme
- s.endpoint.Path = r.URL.Path[:len(r.URL.Path)-len(infoRefsPath)]
-}
-
-func (*session) Close() error {
- return nil
-}
-
-// AuthMethod is concrete implementation of common.AuthMethod for HTTP services
-type AuthMethod interface {
- transport.AuthMethod
- SetAuth(r *http.Request)
-}
-
-func basicAuthFromEndpoint(ep *transport.Endpoint) *BasicAuth {
- u := ep.User
- if u == "" {
- return nil
- }
-
- return &BasicAuth{u, ep.Password}
-}
-
-// BasicAuth represent a HTTP basic auth
-type BasicAuth struct {
- Username, Password string
-}
-
-func (a *BasicAuth) SetAuth(r *http.Request) {
- if a == nil {
- return
- }
-
- r.SetBasicAuth(a.Username, a.Password)
-}
-
-// Name is name of the auth
-func (a *BasicAuth) Name() string {
- return "http-basic-auth"
-}
-
-func (a *BasicAuth) String() string {
- masked := "*******"
- if a.Password == "" {
- masked = ""
- }
-
- return fmt.Sprintf("%s - %s:%s", a.Name(), a.Username, masked)
-}
-
-// TokenAuth implements an http.AuthMethod that can be used with http transport
-// to authenticate with HTTP token authentication (also known as bearer
-// authentication).
-//
-// IMPORTANT: If you are looking to use OAuth tokens with popular servers (e.g.
-// GitHub, Bitbucket, GitLab) you should use BasicAuth instead. These servers
-// use basic HTTP authentication, with the OAuth token as user or password.
-// Check the documentation of your git server for details.
-type TokenAuth struct {
- Token string
-}
-
-func (a *TokenAuth) SetAuth(r *http.Request) {
- if a == nil {
- return
- }
- r.Header.Add("Authorization", fmt.Sprintf("Bearer %s", a.Token))
-}
-
-// Name is name of the auth
-func (a *TokenAuth) Name() string {
- return "http-token-auth"
-}
-
-func (a *TokenAuth) String() string {
- masked := "*******"
- if a.Token == "" {
- masked = ""
- }
- return fmt.Sprintf("%s - %s", a.Name(), masked)
-}
-
-// Err is a dedicated error to return errors based on status code
-type Err struct {
- Response *http.Response
- Reason string
-}
-
-// NewErr returns a new Err based on a http response and closes response body
-// if needed
-func NewErr(r *http.Response) error {
- if r.StatusCode >= http.StatusOK && r.StatusCode < http.StatusMultipleChoices {
- return nil
- }
-
- var reason string
-
- // If a response message is present, add it to error
- var messageBuffer bytes.Buffer
- if r.Body != nil {
- messageLength, _ := messageBuffer.ReadFrom(r.Body)
- if messageLength > 0 {
- reason = messageBuffer.String()
- }
- _ = r.Body.Close()
- }
-
- switch r.StatusCode {
- case http.StatusUnauthorized:
- return transport.ErrAuthenticationRequired
- case http.StatusForbidden:
- return transport.ErrAuthorizationFailed
- case http.StatusNotFound:
- return transport.ErrRepositoryNotFound
- }
-
- return plumbing.NewUnexpectedError(&Err{r, reason})
-}
-
-// StatusCode returns the status code of the response
-func (e *Err) StatusCode() int {
- return e.Response.StatusCode
-}
-
-func (e *Err) Error() string {
- return fmt.Sprintf("unexpected requesting %q status code: %d",
- e.Response.Request.URL, e.Response.StatusCode,
- )
-}
diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/transport/http/receive_pack.go b/vendor/github.com/go-git/go-git/v5/plumbing/transport/http/receive_pack.go
deleted file mode 100644
index 3e736cd95e8..00000000000
--- a/vendor/github.com/go-git/go-git/v5/plumbing/transport/http/receive_pack.go
+++ /dev/null
@@ -1,109 +0,0 @@
-package http
-
-import (
- "bytes"
- "context"
- "fmt"
- "io"
- "net/http"
-
- "github.com/go-git/go-git/v5/plumbing"
- "github.com/go-git/go-git/v5/plumbing/protocol/packp"
- "github.com/go-git/go-git/v5/plumbing/protocol/packp/capability"
- "github.com/go-git/go-git/v5/plumbing/protocol/packp/sideband"
- "github.com/go-git/go-git/v5/plumbing/transport"
- "github.com/go-git/go-git/v5/utils/ioutil"
-)
-
-type rpSession struct {
- *session
-}
-
-func newReceivePackSession(c *client, ep *transport.Endpoint, auth transport.AuthMethod) (transport.ReceivePackSession, error) {
- s, err := newSession(c, ep, auth)
- return &rpSession{s}, err
-}
-
-func (s *rpSession) AdvertisedReferences() (*packp.AdvRefs, error) {
- return advertisedReferences(context.TODO(), s.session, transport.ReceivePackServiceName)
-}
-
-func (s *rpSession) AdvertisedReferencesContext(ctx context.Context) (*packp.AdvRefs, error) {
- return advertisedReferences(ctx, s.session, transport.ReceivePackServiceName)
-}
-
-func (s *rpSession) ReceivePack(ctx context.Context, req *packp.ReferenceUpdateRequest) (
- *packp.ReportStatus, error) {
- url := fmt.Sprintf(
- "%s/%s",
- s.endpoint.String(), transport.ReceivePackServiceName,
- )
-
- buf := bytes.NewBuffer(nil)
- if err := req.Encode(buf); err != nil {
- return nil, err
- }
-
- res, err := s.doRequest(ctx, http.MethodPost, url, buf)
- if err != nil {
- return nil, err
- }
-
- r, err := ioutil.NonEmptyReader(res.Body)
- if err == ioutil.ErrEmptyReader {
- return nil, nil
- }
-
- if err != nil {
- return nil, err
- }
-
- var d *sideband.Demuxer
- if req.Capabilities.Supports(capability.Sideband64k) {
- d = sideband.NewDemuxer(sideband.Sideband64k, r)
- } else if req.Capabilities.Supports(capability.Sideband) {
- d = sideband.NewDemuxer(sideband.Sideband, r)
- }
- if d != nil {
- d.Progress = req.Progress
- r = d
- }
-
- rc := ioutil.NewReadCloser(r, res.Body)
-
- report := packp.NewReportStatus()
- if err := report.Decode(rc); err != nil {
- return nil, err
- }
-
- return report, report.Error()
-}
-
-func (s *rpSession) doRequest(
- ctx context.Context, method, url string, content *bytes.Buffer,
-) (*http.Response, error) {
-
- var body io.Reader
- if content != nil {
- body = content
- }
-
- req, err := http.NewRequest(method, url, body)
- if err != nil {
- return nil, plumbing.NewPermanentError(err)
- }
-
- applyHeadersToRequest(req, content, s.endpoint.Host, transport.ReceivePackServiceName)
- s.ApplyAuthToRequest(req)
-
- res, err := s.client.Do(req.WithContext(ctx))
- if err != nil {
- return nil, plumbing.NewUnexpectedError(err)
- }
-
- if err := NewErr(res); err != nil {
- return nil, err
- }
-
- return res, nil
-}
diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/transport/http/transport.go b/vendor/github.com/go-git/go-git/v5/plumbing/transport/http/transport.go
deleted file mode 100644
index c8db389204a..00000000000
--- a/vendor/github.com/go-git/go-git/v5/plumbing/transport/http/transport.go
+++ /dev/null
@@ -1,40 +0,0 @@
-package http
-
-import (
- "net/http"
- "net/url"
-)
-
-// transportOptions contains transport specific configuration.
-type transportOptions struct {
- insecureSkipTLS bool
- // []byte is not comparable.
- caBundle string
- proxyURL url.URL
-}
-
-func (c *client) addTransport(opts transportOptions, transport *http.Transport) {
- c.mutex.Lock()
- c.transports.Add(opts, transport)
- c.mutex.Unlock()
-}
-
-func (c *client) removeTransport(opts transportOptions) {
- c.mutex.Lock()
- c.transports.Remove(opts)
- c.mutex.Unlock()
-}
-
-func (c *client) fetchTransport(opts transportOptions) (*http.Transport, bool) {
- c.mutex.RLock()
- t, ok := c.transports.Get(opts)
- c.mutex.RUnlock()
- if !ok {
- return nil, false
- }
- transport, ok := t.(*http.Transport)
- if !ok {
- return nil, false
- }
- return transport, true
-}
diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/transport/http/upload_pack.go b/vendor/github.com/go-git/go-git/v5/plumbing/transport/http/upload_pack.go
deleted file mode 100644
index 3432618ab92..00000000000
--- a/vendor/github.com/go-git/go-git/v5/plumbing/transport/http/upload_pack.go
+++ /dev/null
@@ -1,126 +0,0 @@
-package http
-
-import (
- "bytes"
- "context"
- "fmt"
- "io"
- "net/http"
-
- "github.com/go-git/go-git/v5/plumbing"
- "github.com/go-git/go-git/v5/plumbing/format/pktline"
- "github.com/go-git/go-git/v5/plumbing/protocol/packp"
- "github.com/go-git/go-git/v5/plumbing/transport"
- "github.com/go-git/go-git/v5/plumbing/transport/internal/common"
- "github.com/go-git/go-git/v5/utils/ioutil"
-)
-
-type upSession struct {
- *session
-}
-
-func newUploadPackSession(c *client, ep *transport.Endpoint, auth transport.AuthMethod) (transport.UploadPackSession, error) {
- s, err := newSession(c, ep, auth)
- return &upSession{s}, err
-}
-
-func (s *upSession) AdvertisedReferences() (*packp.AdvRefs, error) {
- return advertisedReferences(context.TODO(), s.session, transport.UploadPackServiceName)
-}
-
-func (s *upSession) AdvertisedReferencesContext(ctx context.Context) (*packp.AdvRefs, error) {
- return advertisedReferences(ctx, s.session, transport.UploadPackServiceName)
-}
-
-func (s *upSession) UploadPack(
- ctx context.Context, req *packp.UploadPackRequest,
-) (*packp.UploadPackResponse, error) {
-
- if req.IsEmpty() {
- return nil, transport.ErrEmptyUploadPackRequest
- }
-
- if err := req.Validate(); err != nil {
- return nil, err
- }
-
- url := fmt.Sprintf(
- "%s/%s",
- s.endpoint.String(), transport.UploadPackServiceName,
- )
-
- content, err := uploadPackRequestToReader(req)
- if err != nil {
- return nil, err
- }
-
- res, err := s.doRequest(ctx, http.MethodPost, url, content)
- if err != nil {
- return nil, err
- }
-
- r, err := ioutil.NonEmptyReader(res.Body)
- if err != nil {
- if err == ioutil.ErrEmptyReader || err == io.ErrUnexpectedEOF {
- return nil, transport.ErrEmptyUploadPackRequest
- }
-
- return nil, err
- }
-
- rc := ioutil.NewReadCloser(r, res.Body)
- return common.DecodeUploadPackResponse(rc, req)
-}
-
-// Close does nothing.
-func (s *upSession) Close() error {
- return nil
-}
-
-func (s *upSession) doRequest(
- ctx context.Context, method, url string, content *bytes.Buffer,
-) (*http.Response, error) {
-
- var body io.Reader
- if content != nil {
- body = content
- }
-
- req, err := http.NewRequest(method, url, body)
- if err != nil {
- return nil, plumbing.NewPermanentError(err)
- }
-
- applyHeadersToRequest(req, content, s.endpoint.Host, transport.UploadPackServiceName)
- s.ApplyAuthToRequest(req)
-
- res, err := s.client.Do(req.WithContext(ctx))
- if err != nil {
- return nil, plumbing.NewUnexpectedError(err)
- }
-
- if err := NewErr(res); err != nil {
- return nil, err
- }
-
- return res, nil
-}
-
-func uploadPackRequestToReader(req *packp.UploadPackRequest) (*bytes.Buffer, error) {
- buf := bytes.NewBuffer(nil)
- e := pktline.NewEncoder(buf)
-
- if err := req.UploadRequest.Encode(buf); err != nil {
- return nil, fmt.Errorf("sending upload-req message: %s", err)
- }
-
- if err := req.UploadHaves.Encode(buf, false); err != nil {
- return nil, fmt.Errorf("sending haves message: %s", err)
- }
-
- if err := e.EncodeString("done\n"); err != nil {
- return nil, err
- }
-
- return buf, nil
-}
diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/transport/internal/common/common.go b/vendor/github.com/go-git/go-git/v5/plumbing/transport/internal/common/common.go
deleted file mode 100644
index 9e1d02357f2..00000000000
--- a/vendor/github.com/go-git/go-git/v5/plumbing/transport/internal/common/common.go
+++ /dev/null
@@ -1,492 +0,0 @@
-// Package common implements the git pack protocol with a pluggable transport.
-// This is a low-level package to implement new transports. Use a concrete
-// implementation instead (e.g. http, file, ssh).
-//
-// A simple example of usage can be found in the file package.
-package common
-
-import (
- "bufio"
- "context"
- "errors"
- "fmt"
- "io"
- "regexp"
- "strings"
- "time"
-
- "github.com/go-git/go-git/v5/plumbing/format/pktline"
- "github.com/go-git/go-git/v5/plumbing/protocol/packp"
- "github.com/go-git/go-git/v5/plumbing/protocol/packp/capability"
- "github.com/go-git/go-git/v5/plumbing/protocol/packp/sideband"
- "github.com/go-git/go-git/v5/plumbing/transport"
- "github.com/go-git/go-git/v5/utils/ioutil"
-)
-
-const (
- readErrorSecondsTimeout = 10
-)
-
-var (
- ErrTimeoutExceeded = errors.New("timeout exceeded")
- // stdErrSkipPattern is used for skipping lines from a command's stderr output.
- // Any line matching this pattern will be skipped from further
- // processing and not be returned to calling code.
- stdErrSkipPattern = regexp.MustCompile("^remote:( =*){0,1}$")
-)
-
-// Commander creates Command instances. This is the main entry point for
-// transport implementations.
-type Commander interface {
- // Command creates a new Command for the given git command and
- // endpoint. cmd can be git-upload-pack or git-receive-pack. An
- // error should be returned if the endpoint is not supported or the
- // command cannot be created (e.g. binary does not exist, connection
- // cannot be established).
- Command(cmd string, ep *transport.Endpoint, auth transport.AuthMethod) (Command, error)
-}
-
-// Command is used for a single command execution.
-// This interface is modeled after exec.Cmd and ssh.Session in the standard
-// library.
-type Command interface {
- // StderrPipe returns a pipe that will be connected to the command's
- // standard error when the command starts. It should not be called after
- // Start.
- StderrPipe() (io.Reader, error)
- // StdinPipe returns a pipe that will be connected to the command's
- // standard input when the command starts. It should not be called after
- // Start. The pipe should be closed when no more input is expected.
- StdinPipe() (io.WriteCloser, error)
- // StdoutPipe returns a pipe that will be connected to the command's
- // standard output when the command starts. It should not be called after
- // Start.
- StdoutPipe() (io.Reader, error)
- // Start starts the specified command. It does not wait for it to
- // complete.
- Start() error
- // Close closes the command and releases any resources used by it. It
- // will block until the command exits.
- Close() error
-}
-
-// CommandKiller expands the Command interface, enabling it for being killed.
-type CommandKiller interface {
- // Kill and close the session whatever the state it is. It will block until
- // the command is terminated.
- Kill() error
-}
-
-type client struct {
- cmdr Commander
-}
-
-// NewClient creates a new client using the given Commander.
-func NewClient(runner Commander) transport.Transport {
- return &client{runner}
-}
-
-// NewUploadPackSession creates a new UploadPackSession.
-func (c *client) NewUploadPackSession(ep *transport.Endpoint, auth transport.AuthMethod) (
- transport.UploadPackSession, error) {
-
- return c.newSession(transport.UploadPackServiceName, ep, auth)
-}
-
-// NewReceivePackSession creates a new ReceivePackSession.
-func (c *client) NewReceivePackSession(ep *transport.Endpoint, auth transport.AuthMethod) (
- transport.ReceivePackSession, error) {
-
- return c.newSession(transport.ReceivePackServiceName, ep, auth)
-}
-
-type session struct {
- Stdin io.WriteCloser
- Stdout io.Reader
- Command Command
-
- isReceivePack bool
- advRefs *packp.AdvRefs
- packRun bool
- finished bool
- firstErrLine chan string
-}
-
-func (c *client) newSession(s string, ep *transport.Endpoint, auth transport.AuthMethod) (*session, error) {
- cmd, err := c.cmdr.Command(s, ep, auth)
- if err != nil {
- return nil, err
- }
-
- stdin, err := cmd.StdinPipe()
- if err != nil {
- return nil, err
- }
-
- stdout, err := cmd.StdoutPipe()
- if err != nil {
- return nil, err
- }
-
- stderr, err := cmd.StderrPipe()
- if err != nil {
- return nil, err
- }
-
- if err := cmd.Start(); err != nil {
- return nil, err
- }
-
- return &session{
- Stdin: stdin,
- Stdout: stdout,
- Command: cmd,
- firstErrLine: c.listenFirstError(stderr),
- isReceivePack: s == transport.ReceivePackServiceName,
- }, nil
-}
-
-func (c *client) listenFirstError(r io.Reader) chan string {
- if r == nil {
- return nil
- }
-
- errLine := make(chan string, 1)
- go func() {
- s := bufio.NewScanner(r)
- for {
- if s.Scan() {
- line := s.Text()
- if !stdErrSkipPattern.MatchString(line) {
- errLine <- line
- break
- }
- } else {
- close(errLine)
- break
- }
- }
-
- _, _ = io.Copy(io.Discard, r)
- }()
-
- return errLine
-}
-
-func (s *session) AdvertisedReferences() (*packp.AdvRefs, error) {
- return s.AdvertisedReferencesContext(context.TODO())
-}
-
-// AdvertisedReferences retrieves the advertised references from the server.
-func (s *session) AdvertisedReferencesContext(ctx context.Context) (*packp.AdvRefs, error) {
- if s.advRefs != nil {
- return s.advRefs, nil
- }
-
- ar := packp.NewAdvRefs()
- if err := ar.Decode(s.StdoutContext(ctx)); err != nil {
- if err := s.handleAdvRefDecodeError(err); err != nil {
- return nil, err
- }
- }
-
- // Some servers like jGit, announce capabilities instead of returning an
- // packp message with a flush. This verifies that we received a empty
- // adv-refs, even it contains capabilities.
- if !s.isReceivePack && ar.IsEmpty() {
- return nil, transport.ErrEmptyRemoteRepository
- }
-
- transport.FilterUnsupportedCapabilities(ar.Capabilities)
- s.advRefs = ar
- return ar, nil
-}
-
-func (s *session) handleAdvRefDecodeError(err error) error {
- var errLine *pktline.ErrorLine
- if errors.As(err, &errLine) {
- if isRepoNotFoundError(errLine.Text) {
- return transport.ErrRepositoryNotFound
- }
-
- return errLine
- }
-
- // If repository is not found, we get empty stdout and server writes an
- // error to stderr.
- if errors.Is(err, packp.ErrEmptyInput) {
- // TODO:(v6): handle this error in a better way.
- // Instead of checking the stderr output for a specific error message,
- // define an ExitError and embed the stderr output and exit (if one
- // exists) in the error struct. Just like exec.ExitError.
- s.finished = true
- if err := s.checkNotFoundError(); err != nil {
- return err
- }
-
- return io.ErrUnexpectedEOF
- }
-
- // For empty (but existing) repositories, we get empty advertised-references
- // message. But valid. That is, it includes at least a flush.
- if err == packp.ErrEmptyAdvRefs {
- // Empty repositories are valid for git-receive-pack.
- if s.isReceivePack {
- return nil
- }
-
- if err := s.finish(); err != nil {
- return err
- }
-
- return transport.ErrEmptyRemoteRepository
- }
-
- // Some server sends the errors as normal content (git protocol), so when
- // we try to decode it fails, we need to check the content of it, to detect
- // not found errors
- if uerr, ok := err.(*packp.ErrUnexpectedData); ok {
- if isRepoNotFoundError(string(uerr.Data)) {
- return transport.ErrRepositoryNotFound
- }
- }
-
- return err
-}
-
-// UploadPack performs a request to the server to fetch a packfile. A reader is
-// returned with the packfile content. The reader must be closed after reading.
-func (s *session) UploadPack(ctx context.Context, req *packp.UploadPackRequest) (*packp.UploadPackResponse, error) {
- if req.IsEmpty() {
- // XXX: IsEmpty means haves are a subset of wants, in that case we have
- // everything we asked for. Close the connection and return nil.
- if err := s.finish(); err != nil {
- return nil, err
- }
- // TODO:(v6) return nil here
- return nil, transport.ErrEmptyUploadPackRequest
- }
-
- if err := req.Validate(); err != nil {
- return nil, err
- }
-
- if _, err := s.AdvertisedReferencesContext(ctx); err != nil {
- return nil, err
- }
-
- s.packRun = true
-
- in := s.StdinContext(ctx)
- out := s.StdoutContext(ctx)
-
- if err := uploadPack(in, out, req); err != nil {
- return nil, err
- }
-
- r, err := ioutil.NonEmptyReader(out)
- if err == ioutil.ErrEmptyReader {
- if c, ok := s.Stdout.(io.Closer); ok {
- _ = c.Close()
- }
-
- return nil, transport.ErrEmptyUploadPackRequest
- }
-
- if err != nil {
- return nil, err
- }
-
- rc := ioutil.NewReadCloser(r, s)
- return DecodeUploadPackResponse(rc, req)
-}
-
-func (s *session) StdinContext(ctx context.Context) io.WriteCloser {
- return ioutil.NewWriteCloserOnError(
- ioutil.NewContextWriteCloser(ctx, s.Stdin),
- s.onError,
- )
-}
-
-func (s *session) StdoutContext(ctx context.Context) io.Reader {
- return ioutil.NewReaderOnError(
- ioutil.NewContextReader(ctx, s.Stdout),
- s.onError,
- )
-}
-
-func (s *session) onError(err error) {
- if k, ok := s.Command.(CommandKiller); ok {
- _ = k.Kill()
- }
-
- _ = s.Close()
-}
-
-func (s *session) ReceivePack(ctx context.Context, req *packp.ReferenceUpdateRequest) (*packp.ReportStatus, error) {
- if _, err := s.AdvertisedReferences(); err != nil {
- return nil, err
- }
-
- s.packRun = true
-
- w := s.StdinContext(ctx)
- if err := req.Encode(w); err != nil {
- return nil, err
- }
-
- if err := w.Close(); err != nil {
- return nil, err
- }
-
- if !req.Capabilities.Supports(capability.ReportStatus) {
- // If we don't have report-status, we can only
- // check return value error.
- return nil, s.Command.Close()
- }
-
- r := s.StdoutContext(ctx)
-
- var d *sideband.Demuxer
- if req.Capabilities.Supports(capability.Sideband64k) {
- d = sideband.NewDemuxer(sideband.Sideband64k, r)
- } else if req.Capabilities.Supports(capability.Sideband) {
- d = sideband.NewDemuxer(sideband.Sideband, r)
- }
- if d != nil {
- d.Progress = req.Progress
- r = d
- }
-
- report := packp.NewReportStatus()
- if err := report.Decode(r); err != nil {
- return nil, err
- }
-
- if err := report.Error(); err != nil {
- defer s.Close()
- return report, err
- }
-
- return report, s.Command.Close()
-}
-
-func (s *session) finish() error {
- if s.finished {
- return nil
- }
-
- s.finished = true
-
- // If we did not run a upload/receive-pack, we close the connection
- // gracefully by sending a flush packet to the server. If the server
- // operates correctly, it will exit with status 0.
- if !s.packRun {
- _, err := s.Stdin.Write(pktline.FlushPkt)
- return err
- }
-
- return nil
-}
-
-func (s *session) Close() (err error) {
- err = s.finish()
-
- defer ioutil.CheckClose(s.Command, &err)
- return
-}
-
-func (s *session) checkNotFoundError() error {
- t := time.NewTicker(time.Second * readErrorSecondsTimeout)
- defer t.Stop()
-
- select {
- case <-t.C:
- return ErrTimeoutExceeded
- case line, ok := <-s.firstErrLine:
- if !ok || len(line) == 0 {
- return nil
- }
-
- if isRepoNotFoundError(line) {
- return transport.ErrRepositoryNotFound
- }
-
- // TODO:(v6): return server error just as it is without a prefix
- return fmt.Errorf("unknown error: %s", line)
- }
-}
-
-const (
- githubRepoNotFoundErr = "Repository not found."
- bitbucketRepoNotFoundErr = "repository does not exist."
- localRepoNotFoundErr = "does not appear to be a git repository"
- gitProtocolNotFoundErr = "Repository not found."
- gitProtocolNoSuchErr = "no such repository"
- gitProtocolAccessDeniedErr = "access denied"
- gogsAccessDeniedErr = "Repository does not exist or you do not have access"
- gitlabRepoNotFoundErr = "The project you were looking for could not be found"
-)
-
-func isRepoNotFoundError(s string) bool {
- for _, err := range []string{
- githubRepoNotFoundErr,
- bitbucketRepoNotFoundErr,
- localRepoNotFoundErr,
- gitProtocolNotFoundErr,
- gitProtocolNoSuchErr,
- gitProtocolAccessDeniedErr,
- gogsAccessDeniedErr,
- gitlabRepoNotFoundErr,
- } {
- if strings.Contains(s, err) {
- return true
- }
- }
-
- return false
-}
-
-// uploadPack implements the git-upload-pack protocol.
-func uploadPack(w io.WriteCloser, _ io.Reader, req *packp.UploadPackRequest) error {
- // TODO support multi_ack mode
- // TODO support multi_ack_detailed mode
- // TODO support acks for common objects
- // TODO build a proper state machine for all these processing options
-
- if err := req.UploadRequest.Encode(w); err != nil {
- return fmt.Errorf("sending upload-req message: %s", err)
- }
-
- if err := req.UploadHaves.Encode(w, true); err != nil {
- return fmt.Errorf("sending haves message: %s", err)
- }
-
- if err := sendDone(w); err != nil {
- return fmt.Errorf("sending done message: %s", err)
- }
-
- if err := w.Close(); err != nil {
- return fmt.Errorf("closing input: %s", err)
- }
-
- return nil
-}
-
-func sendDone(w io.Writer) error {
- e := pktline.NewEncoder(w)
-
- return e.Encodef("done\n")
-}
-
-// DecodeUploadPackResponse decodes r into a new packp.UploadPackResponse
-func DecodeUploadPackResponse(r io.ReadCloser, req *packp.UploadPackRequest) (
- *packp.UploadPackResponse, error,
-) {
- res := packp.NewUploadPackResponse(req)
- if err := res.Decode(r); err != nil {
- return nil, fmt.Errorf("error decoding upload-pack response: %s", err)
- }
-
- return res, nil
-}
diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/transport/internal/common/mocks.go b/vendor/github.com/go-git/go-git/v5/plumbing/transport/internal/common/mocks.go
deleted file mode 100644
index bc18b27e81c..00000000000
--- a/vendor/github.com/go-git/go-git/v5/plumbing/transport/internal/common/mocks.go
+++ /dev/null
@@ -1,46 +0,0 @@
-package common
-
-import (
- "bytes"
- "io"
-
- gogitioutil "github.com/go-git/go-git/v5/utils/ioutil"
-
- "github.com/go-git/go-git/v5/plumbing/transport"
-)
-
-type MockCommand struct {
- stdin bytes.Buffer
- stdout bytes.Buffer
- stderr bytes.Buffer
-}
-
-func (c MockCommand) StderrPipe() (io.Reader, error) {
- return &c.stderr, nil
-}
-
-func (c MockCommand) StdinPipe() (io.WriteCloser, error) {
- return gogitioutil.WriteNopCloser(&c.stdin), nil
-}
-
-func (c MockCommand) StdoutPipe() (io.Reader, error) {
- return &c.stdout, nil
-}
-
-func (c MockCommand) Start() error {
- return nil
-}
-
-func (c MockCommand) Close() error {
- panic("not implemented")
-}
-
-type MockCommander struct {
- stderr string
-}
-
-func (c MockCommander) Command(cmd string, ep *transport.Endpoint, auth transport.AuthMethod) (Command, error) {
- return &MockCommand{
- stderr: *bytes.NewBufferString(c.stderr),
- }, nil
-}
diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/transport/internal/common/server.go b/vendor/github.com/go-git/go-git/v5/plumbing/transport/internal/common/server.go
deleted file mode 100644
index e2480848a47..00000000000
--- a/vendor/github.com/go-git/go-git/v5/plumbing/transport/internal/common/server.go
+++ /dev/null
@@ -1,73 +0,0 @@
-package common
-
-import (
- "context"
- "fmt"
- "io"
-
- "github.com/go-git/go-git/v5/plumbing/protocol/packp"
- "github.com/go-git/go-git/v5/plumbing/transport"
- "github.com/go-git/go-git/v5/utils/ioutil"
-)
-
-// ServerCommand is used for a single server command execution.
-type ServerCommand struct {
- Stderr io.Writer
- Stdout io.WriteCloser
- Stdin io.Reader
-}
-
-func ServeUploadPack(cmd ServerCommand, s transport.UploadPackSession) (err error) {
- ioutil.CheckClose(cmd.Stdout, &err)
-
- ar, err := s.AdvertisedReferences()
- if err != nil {
- return err
- }
-
- if err := ar.Encode(cmd.Stdout); err != nil {
- return err
- }
-
- req := packp.NewUploadPackRequest()
- if err := req.Decode(cmd.Stdin); err != nil {
- return err
- }
-
- var resp *packp.UploadPackResponse
- resp, err = s.UploadPack(context.TODO(), req)
- if err != nil {
- return err
- }
-
- return resp.Encode(cmd.Stdout)
-}
-
-func ServeReceivePack(cmd ServerCommand, s transport.ReceivePackSession) error {
- ar, err := s.AdvertisedReferences()
- if err != nil {
- return fmt.Errorf("internal error in advertised references: %s", err)
- }
-
- if err := ar.Encode(cmd.Stdout); err != nil {
- return fmt.Errorf("error in advertised references encoding: %s", err)
- }
-
- req := packp.NewReferenceUpdateRequest()
- if err := req.Decode(cmd.Stdin); err != nil {
- return fmt.Errorf("error decoding: %s", err)
- }
-
- rs, err := s.ReceivePack(context.TODO(), req)
- if rs != nil {
- if err := rs.Encode(cmd.Stdout); err != nil {
- return fmt.Errorf("error in encoding report status %s", err)
- }
- }
-
- if err != nil {
- return fmt.Errorf("error in receive pack: %s", err)
- }
-
- return nil
-}
diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/transport/server/loader.go b/vendor/github.com/go-git/go-git/v5/plumbing/transport/server/loader.go
deleted file mode 100644
index e7e2b075e5e..00000000000
--- a/vendor/github.com/go-git/go-git/v5/plumbing/transport/server/loader.go
+++ /dev/null
@@ -1,64 +0,0 @@
-package server
-
-import (
- "github.com/go-git/go-git/v5/plumbing/cache"
- "github.com/go-git/go-git/v5/plumbing/storer"
- "github.com/go-git/go-git/v5/plumbing/transport"
- "github.com/go-git/go-git/v5/storage/filesystem"
-
- "github.com/go-git/go-billy/v5"
- "github.com/go-git/go-billy/v5/osfs"
-)
-
-// DefaultLoader is a filesystem loader ignoring host and resolving paths to /.
-var DefaultLoader = NewFilesystemLoader(osfs.New(""))
-
-// Loader loads repository's storer.Storer based on an optional host and a path.
-type Loader interface {
- // Load loads a storer.Storer given a transport.Endpoint.
- // Returns transport.ErrRepositoryNotFound if the repository does not
- // exist.
- Load(ep *transport.Endpoint) (storer.Storer, error)
-}
-
-type fsLoader struct {
- base billy.Filesystem
-}
-
-// NewFilesystemLoader creates a Loader that ignores host and resolves paths
-// with a given base filesystem.
-func NewFilesystemLoader(base billy.Filesystem) Loader {
- return &fsLoader{base}
-}
-
-// Load looks up the endpoint's path in the base file system and returns a
-// storer for it. Returns transport.ErrRepositoryNotFound if a repository does
-// not exist in the given path.
-func (l *fsLoader) Load(ep *transport.Endpoint) (storer.Storer, error) {
- fs, err := l.base.Chroot(ep.Path)
- if err != nil {
- return nil, err
- }
-
- if _, err := fs.Stat("config"); err != nil {
- return nil, transport.ErrRepositoryNotFound
- }
-
- return filesystem.NewStorage(fs, cache.NewObjectLRUDefault()), nil
-}
-
-// MapLoader is a Loader that uses a lookup map of storer.Storer by
-// transport.Endpoint.
-type MapLoader map[string]storer.Storer
-
-// Load returns a storer.Storer for given a transport.Endpoint by looking it up
-// in the map. Returns transport.ErrRepositoryNotFound if the endpoint does not
-// exist.
-func (l MapLoader) Load(ep *transport.Endpoint) (storer.Storer, error) {
- s, ok := l[ep.String()]
- if !ok {
- return nil, transport.ErrRepositoryNotFound
- }
-
- return s, nil
-}
diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/transport/server/server.go b/vendor/github.com/go-git/go-git/v5/plumbing/transport/server/server.go
deleted file mode 100644
index cf5d6f43feb..00000000000
--- a/vendor/github.com/go-git/go-git/v5/plumbing/transport/server/server.go
+++ /dev/null
@@ -1,432 +0,0 @@
-// Package server implements the git server protocol. For most use cases, the
-// transport-specific implementations should be used.
-package server
-
-import (
- "context"
- "errors"
- "fmt"
- "io"
-
- "github.com/go-git/go-git/v5/plumbing"
- "github.com/go-git/go-git/v5/plumbing/format/packfile"
- "github.com/go-git/go-git/v5/plumbing/protocol/packp"
- "github.com/go-git/go-git/v5/plumbing/protocol/packp/capability"
- "github.com/go-git/go-git/v5/plumbing/revlist"
- "github.com/go-git/go-git/v5/plumbing/storer"
- "github.com/go-git/go-git/v5/plumbing/transport"
- "github.com/go-git/go-git/v5/utils/ioutil"
-)
-
-var DefaultServer = NewServer(DefaultLoader)
-
-type server struct {
- loader Loader
- handler *handler
-}
-
-// NewServer returns a transport.Transport implementing a git server,
-// independent of transport. Each transport must wrap this.
-func NewServer(loader Loader) transport.Transport {
- return &server{
- loader,
- &handler{asClient: false},
- }
-}
-
-// NewClient returns a transport.Transport implementing a client with an
-// embedded server.
-func NewClient(loader Loader) transport.Transport {
- return &server{
- loader,
- &handler{asClient: true},
- }
-}
-
-func (s *server) NewUploadPackSession(ep *transport.Endpoint, auth transport.AuthMethod) (transport.UploadPackSession, error) {
- sto, err := s.loader.Load(ep)
- if err != nil {
- return nil, err
- }
-
- return s.handler.NewUploadPackSession(sto)
-}
-
-func (s *server) NewReceivePackSession(ep *transport.Endpoint, auth transport.AuthMethod) (transport.ReceivePackSession, error) {
- sto, err := s.loader.Load(ep)
- if err != nil {
- return nil, err
- }
-
- return s.handler.NewReceivePackSession(sto)
-}
-
-type handler struct {
- asClient bool
-}
-
-func (h *handler) NewUploadPackSession(s storer.Storer) (transport.UploadPackSession, error) {
- return &upSession{
- session: session{storer: s, asClient: h.asClient},
- }, nil
-}
-
-func (h *handler) NewReceivePackSession(s storer.Storer) (transport.ReceivePackSession, error) {
- return &rpSession{
- session: session{storer: s, asClient: h.asClient},
- cmdStatus: map[plumbing.ReferenceName]error{},
- }, nil
-}
-
-type session struct {
- storer storer.Storer
- caps *capability.List
- asClient bool
-}
-
-func (s *session) Close() error {
- return nil
-}
-
-func (s *session) SetAuth(transport.AuthMethod) error {
- //TODO: deprecate
- return nil
-}
-
-func (s *session) checkSupportedCapabilities(cl *capability.List) error {
- for _, c := range cl.All() {
- if !s.caps.Supports(c) {
- return fmt.Errorf("unsupported capability: %s", c)
- }
- }
-
- return nil
-}
-
-type upSession struct {
- session
-}
-
-func (s *upSession) AdvertisedReferences() (*packp.AdvRefs, error) {
- return s.AdvertisedReferencesContext(context.TODO())
-}
-
-func (s *upSession) AdvertisedReferencesContext(ctx context.Context) (*packp.AdvRefs, error) {
- ar := packp.NewAdvRefs()
-
- if err := s.setSupportedCapabilities(ar.Capabilities); err != nil {
- return nil, err
- }
-
- s.caps = ar.Capabilities
-
- if err := setReferences(s.storer, ar); err != nil {
- return nil, err
- }
-
- if err := setHEAD(s.storer, ar); err != nil {
- return nil, err
- }
-
- if s.asClient && len(ar.References) == 0 {
- return nil, transport.ErrEmptyRemoteRepository
- }
-
- return ar, nil
-}
-
-func (s *upSession) UploadPack(ctx context.Context, req *packp.UploadPackRequest) (*packp.UploadPackResponse, error) {
- if req.IsEmpty() {
- return nil, transport.ErrEmptyUploadPackRequest
- }
-
- if err := req.Validate(); err != nil {
- return nil, err
- }
-
- if s.caps == nil {
- s.caps = capability.NewList()
- if err := s.setSupportedCapabilities(s.caps); err != nil {
- return nil, err
- }
- }
-
- if err := s.checkSupportedCapabilities(req.Capabilities); err != nil {
- return nil, err
- }
-
- s.caps = req.Capabilities
-
- if len(req.Shallows) > 0 {
- return nil, fmt.Errorf("shallow not supported")
- }
-
- objs, err := s.objectsToUpload(req)
- if err != nil {
- return nil, err
- }
-
- pr, pw := io.Pipe()
- e := packfile.NewEncoder(pw, s.storer, false)
- go func() {
- // TODO: plumb through a pack window.
- _, err := e.Encode(objs, 10)
- pw.CloseWithError(err)
- }()
-
- return packp.NewUploadPackResponseWithPackfile(req,
- ioutil.NewContextReadCloser(ctx, pr),
- ), nil
-}
-
-func (s *upSession) objectsToUpload(req *packp.UploadPackRequest) ([]plumbing.Hash, error) {
- haves, err := revlist.Objects(s.storer, req.Haves, nil)
- if err != nil {
- return nil, err
- }
-
- return revlist.Objects(s.storer, req.Wants, haves)
-}
-
-func (*upSession) setSupportedCapabilities(c *capability.List) error {
- if err := c.Set(capability.Agent, capability.DefaultAgent()); err != nil {
- return err
- }
-
- if err := c.Set(capability.OFSDelta); err != nil {
- return err
- }
-
- return nil
-}
-
-type rpSession struct {
- session
- cmdStatus map[plumbing.ReferenceName]error
- firstErr error
- unpackErr error
-}
-
-func (s *rpSession) AdvertisedReferences() (*packp.AdvRefs, error) {
- return s.AdvertisedReferencesContext(context.TODO())
-}
-
-func (s *rpSession) AdvertisedReferencesContext(ctx context.Context) (*packp.AdvRefs, error) {
- ar := packp.NewAdvRefs()
-
- if err := s.setSupportedCapabilities(ar.Capabilities); err != nil {
- return nil, err
- }
-
- s.caps = ar.Capabilities
-
- if err := setReferences(s.storer, ar); err != nil {
- return nil, err
- }
-
- if err := setHEAD(s.storer, ar); err != nil {
- return nil, err
- }
-
- return ar, nil
-}
-
-var (
- ErrUpdateReference = errors.New("failed to update ref")
-)
-
-func (s *rpSession) ReceivePack(ctx context.Context, req *packp.ReferenceUpdateRequest) (*packp.ReportStatus, error) {
- if s.caps == nil {
- s.caps = capability.NewList()
- if err := s.setSupportedCapabilities(s.caps); err != nil {
- return nil, err
- }
- }
-
- if err := s.checkSupportedCapabilities(req.Capabilities); err != nil {
- return nil, err
- }
-
- s.caps = req.Capabilities
-
- //TODO: Implement 'atomic' update of references.
-
- if req.Packfile != nil {
- r := ioutil.NewContextReadCloser(ctx, req.Packfile)
- if err := s.writePackfile(r); err != nil {
- s.unpackErr = err
- s.firstErr = err
- return s.reportStatus(), err
- }
- }
-
- s.updateReferences(req)
- return s.reportStatus(), s.firstErr
-}
-
-func (s *rpSession) updateReferences(req *packp.ReferenceUpdateRequest) {
- for _, cmd := range req.Commands {
- exists, err := referenceExists(s.storer, cmd.Name)
- if err != nil {
- s.setStatus(cmd.Name, err)
- continue
- }
-
- switch cmd.Action() {
- case packp.Create:
- if exists {
- s.setStatus(cmd.Name, ErrUpdateReference)
- continue
- }
-
- ref := plumbing.NewHashReference(cmd.Name, cmd.New)
- err := s.storer.SetReference(ref)
- s.setStatus(cmd.Name, err)
- case packp.Delete:
- if !exists {
- s.setStatus(cmd.Name, ErrUpdateReference)
- continue
- }
-
- err := s.storer.RemoveReference(cmd.Name)
- s.setStatus(cmd.Name, err)
- case packp.Update:
- if !exists {
- s.setStatus(cmd.Name, ErrUpdateReference)
- continue
- }
-
- ref := plumbing.NewHashReference(cmd.Name, cmd.New)
- err := s.storer.SetReference(ref)
- s.setStatus(cmd.Name, err)
- }
- }
-}
-
-func (s *rpSession) writePackfile(r io.ReadCloser) error {
- if r == nil {
- return nil
- }
-
- if err := packfile.UpdateObjectStorage(s.storer, r); err != nil {
- _ = r.Close()
- return err
- }
-
- return r.Close()
-}
-
-func (s *rpSession) setStatus(ref plumbing.ReferenceName, err error) {
- s.cmdStatus[ref] = err
- if s.firstErr == nil && err != nil {
- s.firstErr = err
- }
-}
-
-func (s *rpSession) reportStatus() *packp.ReportStatus {
- if !s.caps.Supports(capability.ReportStatus) {
- return nil
- }
-
- rs := packp.NewReportStatus()
- rs.UnpackStatus = "ok"
-
- if s.unpackErr != nil {
- rs.UnpackStatus = s.unpackErr.Error()
- }
-
- if s.cmdStatus == nil {
- return rs
- }
-
- for ref, err := range s.cmdStatus {
- msg := "ok"
- if err != nil {
- msg = err.Error()
- }
- status := &packp.CommandStatus{
- ReferenceName: ref,
- Status: msg,
- }
- rs.CommandStatuses = append(rs.CommandStatuses, status)
- }
-
- return rs
-}
-
-func (*rpSession) setSupportedCapabilities(c *capability.List) error {
- if err := c.Set(capability.Agent, capability.DefaultAgent()); err != nil {
- return err
- }
-
- if err := c.Set(capability.OFSDelta); err != nil {
- return err
- }
-
- if err := c.Set(capability.DeleteRefs); err != nil {
- return err
- }
-
- return c.Set(capability.ReportStatus)
-}
-
-func setHEAD(s storer.Storer, ar *packp.AdvRefs) error {
- ref, err := s.Reference(plumbing.HEAD)
- if err == plumbing.ErrReferenceNotFound {
- return nil
- }
-
- if err != nil {
- return err
- }
-
- if ref.Type() == plumbing.SymbolicReference {
- if err := ar.AddReference(ref); err != nil {
- return nil
- }
-
- ref, err = storer.ResolveReference(s, ref.Target())
- if err == plumbing.ErrReferenceNotFound {
- return nil
- }
-
- if err != nil {
- return err
- }
- }
-
- if ref.Type() != plumbing.HashReference {
- return plumbing.ErrInvalidType
- }
-
- h := ref.Hash()
- ar.Head = &h
-
- return nil
-}
-
-func setReferences(s storer.Storer, ar *packp.AdvRefs) error {
- //TODO: add peeled references.
- iter, err := s.IterReferences()
- if err != nil {
- return err
- }
-
- return iter.ForEach(func(ref *plumbing.Reference) error {
- if ref.Type() != plumbing.HashReference {
- return nil
- }
-
- ar.References[ref.Name().String()] = ref.Hash()
- return nil
- })
-}
-
-func referenceExists(s storer.ReferenceStorer, n plumbing.ReferenceName) (bool, error) {
- _, err := s.Reference(n)
- if err == plumbing.ErrReferenceNotFound {
- return false, nil
- }
-
- return err == nil, err
-}
diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/transport/ssh/auth_method.go b/vendor/github.com/go-git/go-git/v5/plumbing/transport/ssh/auth_method.go
deleted file mode 100644
index ac4e3583c8c..00000000000
--- a/vendor/github.com/go-git/go-git/v5/plumbing/transport/ssh/auth_method.go
+++ /dev/null
@@ -1,313 +0,0 @@
-package ssh
-
-import (
- "errors"
- "fmt"
- "os"
- "os/user"
- "path/filepath"
-
- "github.com/go-git/go-git/v5/plumbing/transport"
-
- "github.com/skeema/knownhosts"
- sshagent "github.com/xanzy/ssh-agent"
- "golang.org/x/crypto/ssh"
-)
-
-const DefaultUsername = "git"
-
-// AuthMethod is the interface all auth methods for the ssh client
-// must implement. The clientConfig method returns the ssh client
-// configuration needed to establish an ssh connection.
-type AuthMethod interface {
- transport.AuthMethod
- // ClientConfig should return a valid ssh.ClientConfig to be used to create
- // a connection to the SSH server.
- ClientConfig() (*ssh.ClientConfig, error)
-}
-
-// The names of the AuthMethod implementations. To be returned by the
-// Name() method. Most git servers only allow PublicKeysName and
-// PublicKeysCallbackName.
-const (
- KeyboardInteractiveName = "ssh-keyboard-interactive"
- PasswordName = "ssh-password"
- PasswordCallbackName = "ssh-password-callback"
- PublicKeysName = "ssh-public-keys"
- PublicKeysCallbackName = "ssh-public-key-callback"
-)
-
-// KeyboardInteractive implements AuthMethod by using a
-// prompt/response sequence controlled by the server.
-type KeyboardInteractive struct {
- User string
- Challenge ssh.KeyboardInteractiveChallenge
- HostKeyCallbackHelper
-}
-
-func (a *KeyboardInteractive) Name() string {
- return KeyboardInteractiveName
-}
-
-func (a *KeyboardInteractive) String() string {
- return fmt.Sprintf("user: %s, name: %s", a.User, a.Name())
-}
-
-func (a *KeyboardInteractive) ClientConfig() (*ssh.ClientConfig, error) {
- return a.SetHostKeyCallback(&ssh.ClientConfig{
- User: a.User,
- Auth: []ssh.AuthMethod{
- a.Challenge,
- },
- })
-}
-
-// Password implements AuthMethod by using the given password.
-type Password struct {
- User string
- Password string
- HostKeyCallbackHelper
-}
-
-func (a *Password) Name() string {
- return PasswordName
-}
-
-func (a *Password) String() string {
- return fmt.Sprintf("user: %s, name: %s", a.User, a.Name())
-}
-
-func (a *Password) ClientConfig() (*ssh.ClientConfig, error) {
- return a.SetHostKeyCallback(&ssh.ClientConfig{
- User: a.User,
- Auth: []ssh.AuthMethod{ssh.Password(a.Password)},
- })
-}
-
-// PasswordCallback implements AuthMethod by using a callback
-// to fetch the password.
-type PasswordCallback struct {
- User string
- Callback func() (pass string, err error)
- HostKeyCallbackHelper
-}
-
-func (a *PasswordCallback) Name() string {
- return PasswordCallbackName
-}
-
-func (a *PasswordCallback) String() string {
- return fmt.Sprintf("user: %s, name: %s", a.User, a.Name())
-}
-
-func (a *PasswordCallback) ClientConfig() (*ssh.ClientConfig, error) {
- return a.SetHostKeyCallback(&ssh.ClientConfig{
- User: a.User,
- Auth: []ssh.AuthMethod{ssh.PasswordCallback(a.Callback)},
- })
-}
-
-// PublicKeys implements AuthMethod by using the given key pairs.
-type PublicKeys struct {
- User string
- Signer ssh.Signer
- HostKeyCallbackHelper
-}
-
-// NewPublicKeys returns a PublicKeys from a PEM encoded private key. An
-// encryption password should be given if the pemBytes contains a password
-// encrypted PEM block otherwise password should be empty. It supports RSA
-// (PKCS#1), PKCS#8, DSA (OpenSSL), and ECDSA private keys.
-func NewPublicKeys(user string, pemBytes []byte, password string) (*PublicKeys, error) {
- signer, err := ssh.ParsePrivateKey(pemBytes)
- if _, ok := err.(*ssh.PassphraseMissingError); ok {
- signer, err = ssh.ParsePrivateKeyWithPassphrase(pemBytes, []byte(password))
- }
- if err != nil {
- return nil, err
- }
- return &PublicKeys{User: user, Signer: signer}, nil
-}
-
-// NewPublicKeysFromFile returns a PublicKeys from a file containing a PEM
-// encoded private key. An encryption password should be given if the pemBytes
-// contains a password encrypted PEM block otherwise password should be empty.
-func NewPublicKeysFromFile(user, pemFile, password string) (*PublicKeys, error) {
- bytes, err := os.ReadFile(pemFile)
- if err != nil {
- return nil, err
- }
-
- return NewPublicKeys(user, bytes, password)
-}
-
-func (a *PublicKeys) Name() string {
- return PublicKeysName
-}
-
-func (a *PublicKeys) String() string {
- return fmt.Sprintf("user: %s, name: %s", a.User, a.Name())
-}
-
-func (a *PublicKeys) ClientConfig() (*ssh.ClientConfig, error) {
- return a.SetHostKeyCallback(&ssh.ClientConfig{
- User: a.User,
- Auth: []ssh.AuthMethod{ssh.PublicKeys(a.Signer)},
- })
-}
-
-func username() (string, error) {
- var username string
- if user, err := user.Current(); err == nil {
- username = user.Username
- } else {
- username = os.Getenv("USER")
- }
-
- if username == "" {
- return "", errors.New("failed to get username")
- }
-
- return username, nil
-}
-
-// PublicKeysCallback implements AuthMethod by asking a
-// ssh.agent.Agent to act as a signer.
-type PublicKeysCallback struct {
- User string
- Callback func() (signers []ssh.Signer, err error)
- HostKeyCallbackHelper
-}
-
-// NewSSHAgentAuth returns a PublicKeysCallback based on a SSH agent, it opens
-// a pipe with the SSH agent and uses the pipe as the implementer of the public
-// key callback function.
-func NewSSHAgentAuth(u string) (*PublicKeysCallback, error) {
- var err error
- if u == "" {
- u, err = username()
- if err != nil {
- return nil, err
- }
- }
-
- a, _, err := sshagent.New()
- if err != nil {
- return nil, fmt.Errorf("error creating SSH agent: %q", err)
- }
-
- return &PublicKeysCallback{
- User: u,
- Callback: a.Signers,
- }, nil
-}
-
-func (a *PublicKeysCallback) Name() string {
- return PublicKeysCallbackName
-}
-
-func (a *PublicKeysCallback) String() string {
- return fmt.Sprintf("user: %s, name: %s", a.User, a.Name())
-}
-
-func (a *PublicKeysCallback) ClientConfig() (*ssh.ClientConfig, error) {
- return a.SetHostKeyCallback(&ssh.ClientConfig{
- User: a.User,
- Auth: []ssh.AuthMethod{ssh.PublicKeysCallback(a.Callback)},
- })
-}
-
-// NewKnownHostsCallback returns ssh.HostKeyCallback based on a file based on a
-// known_hosts file. http://man.openbsd.org/sshd#SSH_KNOWN_HOSTS_FILE_FORMAT
-//
-// If list of files is empty, then it will be read from the SSH_KNOWN_HOSTS
-// environment variable, example:
-//
-// /home/foo/custom_known_hosts_file:/etc/custom_known/hosts_file
-//
-// If SSH_KNOWN_HOSTS is not set the following file locations will be used:
-//
-// ~/.ssh/known_hosts
-// /etc/ssh/ssh_known_hosts
-func NewKnownHostsCallback(files ...string) (ssh.HostKeyCallback, error) {
- kh, err := newKnownHosts(files...)
- return ssh.HostKeyCallback(kh), err
-}
-
-func newKnownHosts(files ...string) (knownhosts.HostKeyCallback, error) {
- var err error
-
- if len(files) == 0 {
- if files, err = getDefaultKnownHostsFiles(); err != nil {
- return nil, err
- }
- }
-
- if files, err = filterKnownHostsFiles(files...); err != nil {
- return nil, err
- }
-
- return knownhosts.New(files...)
-}
-
-func getDefaultKnownHostsFiles() ([]string, error) {
- files := filepath.SplitList(os.Getenv("SSH_KNOWN_HOSTS"))
- if len(files) != 0 {
- return files, nil
- }
-
- homeDirPath, err := os.UserHomeDir()
- if err != nil {
- return nil, err
- }
-
- return []string{
- filepath.Join(homeDirPath, "/.ssh/known_hosts"),
- "/etc/ssh/ssh_known_hosts",
- }, nil
-}
-
-func filterKnownHostsFiles(files ...string) ([]string, error) {
- var out []string
- for _, file := range files {
- _, err := os.Stat(file)
- if err == nil {
- out = append(out, file)
- continue
- }
-
- if !os.IsNotExist(err) {
- return nil, err
- }
- }
-
- if len(out) == 0 {
- return nil, fmt.Errorf("unable to find any valid known_hosts file, set SSH_KNOWN_HOSTS env variable")
- }
-
- return out, nil
-}
-
-// HostKeyCallbackHelper is a helper that provides common functionality to
-// configure HostKeyCallback into a ssh.ClientConfig.
-type HostKeyCallbackHelper struct {
- // HostKeyCallback is the function type used for verifying server keys.
- // If nil default callback will be create using NewKnownHostsCallback
- // without argument.
- HostKeyCallback ssh.HostKeyCallback
-}
-
-// SetHostKeyCallback sets the field HostKeyCallback in the given cfg. If
-// HostKeyCallback is empty a default callback is created using
-// NewKnownHostsCallback.
-func (m *HostKeyCallbackHelper) SetHostKeyCallback(cfg *ssh.ClientConfig) (*ssh.ClientConfig, error) {
- var err error
- if m.HostKeyCallback == nil {
- if m.HostKeyCallback, err = NewKnownHostsCallback(); err != nil {
- return cfg, err
- }
- }
-
- cfg.HostKeyCallback = m.HostKeyCallback
- return cfg, nil
-}
diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/transport/ssh/common.go b/vendor/github.com/go-git/go-git/v5/plumbing/transport/ssh/common.go
deleted file mode 100644
index 05dea448f8f..00000000000
--- a/vendor/github.com/go-git/go-git/v5/plumbing/transport/ssh/common.go
+++ /dev/null
@@ -1,276 +0,0 @@
-// Package ssh implements the SSH transport protocol.
-package ssh
-
-import (
- "context"
- "fmt"
- "net"
- "reflect"
- "strconv"
- "strings"
-
- "github.com/go-git/go-git/v5/plumbing/transport"
- "github.com/go-git/go-git/v5/plumbing/transport/internal/common"
- "github.com/skeema/knownhosts"
-
- "github.com/kevinburke/ssh_config"
- "golang.org/x/crypto/ssh"
- "golang.org/x/net/proxy"
-)
-
-// DefaultClient is the default SSH client.
-var DefaultClient = NewClient(nil)
-
-// DefaultSSHConfig is the reader used to access parameters stored in the
-// system's ssh_config files. If nil all the ssh_config are ignored.
-var DefaultSSHConfig sshConfig = ssh_config.DefaultUserSettings
-
-type sshConfig interface {
- Get(alias, key string) string
-}
-
-// NewClient creates a new SSH client with an optional *ssh.ClientConfig.
-func NewClient(config *ssh.ClientConfig) transport.Transport {
- return common.NewClient(&runner{config: config})
-}
-
-// DefaultAuthBuilder is the function used to create a default AuthMethod, when
-// the user doesn't provide any.
-var DefaultAuthBuilder = func(user string) (AuthMethod, error) {
- return NewSSHAgentAuth(user)
-}
-
-const DefaultPort = 22
-
-type runner struct {
- config *ssh.ClientConfig
-}
-
-func (r *runner) Command(cmd string, ep *transport.Endpoint, auth transport.AuthMethod) (common.Command, error) {
- c := &command{command: cmd, endpoint: ep, config: r.config}
- if auth != nil {
- if err := c.setAuth(auth); err != nil {
- return nil, err
- }
- }
-
- if err := c.connect(); err != nil {
- return nil, err
- }
- return c, nil
-}
-
-type command struct {
- *ssh.Session
- connected bool
- command string
- endpoint *transport.Endpoint
- client *ssh.Client
- auth AuthMethod
- config *ssh.ClientConfig
-}
-
-func (c *command) setAuth(auth transport.AuthMethod) error {
- a, ok := auth.(AuthMethod)
- if !ok {
- return transport.ErrInvalidAuthMethod
- }
-
- c.auth = a
- return nil
-}
-
-func (c *command) Start() error {
- return c.Session.Start(endpointToCommand(c.command, c.endpoint))
-}
-
-// Close closes the SSH session and connection.
-func (c *command) Close() error {
- if !c.connected {
- return nil
- }
-
- c.connected = false
-
- //XXX: If did read the full packfile, then the session might be already
- // closed.
- _ = c.Session.Close()
- err := c.client.Close()
-
- //XXX: in go1.16+ we can use errors.Is(err, net.ErrClosed)
- if err != nil && strings.HasSuffix(err.Error(), "use of closed network connection") {
- return nil
- }
-
- return err
-}
-
-// connect connects to the SSH server, unless a AuthMethod was set with
-// SetAuth method, by default uses an auth method based on PublicKeysCallback,
-// it connects to a SSH agent, using the address stored in the SSH_AUTH_SOCK
-// environment var.
-func (c *command) connect() error {
- if c.connected {
- return transport.ErrAlreadyConnected
- }
-
- if c.auth == nil {
- if err := c.setAuthFromEndpoint(); err != nil {
- return err
- }
- }
-
- var err error
- config, err := c.auth.ClientConfig()
- if err != nil {
- return err
- }
- hostWithPort := c.getHostWithPort()
- if config.HostKeyCallback == nil {
- kh, err := newKnownHosts()
- if err != nil {
- return err
- }
- config.HostKeyCallback = kh.HostKeyCallback()
- config.HostKeyAlgorithms = kh.HostKeyAlgorithms(hostWithPort)
- } else if len(config.HostKeyAlgorithms) == 0 {
- // Set the HostKeyAlgorithms based on HostKeyCallback.
- // For background see https://github.com/go-git/go-git/issues/411 as well as
- // https://github.com/golang/go/issues/29286 for root cause.
- config.HostKeyAlgorithms = knownhosts.HostKeyAlgorithms(config.HostKeyCallback, hostWithPort)
- }
-
- overrideConfig(c.config, config)
-
- c.client, err = dial("tcp", hostWithPort, c.endpoint.Proxy, config)
- if err != nil {
- return err
- }
-
- c.Session, err = c.client.NewSession()
- if err != nil {
- _ = c.client.Close()
- return err
- }
-
- c.connected = true
- return nil
-}
-
-func dial(network, addr string, proxyOpts transport.ProxyOptions, config *ssh.ClientConfig) (*ssh.Client, error) {
- var (
- ctx = context.Background()
- cancel context.CancelFunc
- )
- if config.Timeout > 0 {
- ctx, cancel = context.WithTimeout(ctx, config.Timeout)
- } else {
- ctx, cancel = context.WithCancel(ctx)
- }
- defer cancel()
-
- var conn net.Conn
- var dialErr error
-
- if proxyOpts.URL != "" {
- proxyUrl, err := proxyOpts.FullURL()
- if err != nil {
- return nil, err
- }
- dialer, err := proxy.FromURL(proxyUrl, proxy.Direct)
- if err != nil {
- return nil, err
- }
-
- // Try to use a ContextDialer, but fall back to a Dialer if that goes south.
- ctxDialer, ok := dialer.(proxy.ContextDialer)
- if !ok {
- return nil, fmt.Errorf("expected ssh proxy dialer to be of type %s; got %s",
- reflect.TypeOf(ctxDialer), reflect.TypeOf(dialer))
- }
- conn, dialErr = ctxDialer.DialContext(ctx, "tcp", addr)
- } else {
- conn, dialErr = proxy.Dial(ctx, network, addr)
- }
- if dialErr != nil {
- return nil, dialErr
- }
-
- c, chans, reqs, err := ssh.NewClientConn(conn, addr, config)
- if err != nil {
- return nil, err
- }
- return ssh.NewClient(c, chans, reqs), nil
-}
-
-func (c *command) getHostWithPort() string {
- if addr, found := c.doGetHostWithPortFromSSHConfig(); found {
- return addr
- }
-
- host := c.endpoint.Host
- port := c.endpoint.Port
- if port <= 0 {
- port = DefaultPort
- }
-
- return net.JoinHostPort(host, strconv.Itoa(port))
-}
-
-func (c *command) doGetHostWithPortFromSSHConfig() (addr string, found bool) {
- if DefaultSSHConfig == nil {
- return
- }
-
- host := c.endpoint.Host
- port := c.endpoint.Port
-
- configHost := DefaultSSHConfig.Get(c.endpoint.Host, "Hostname")
- if configHost != "" {
- host = configHost
- found = true
- }
-
- if !found {
- return
- }
-
- configPort := DefaultSSHConfig.Get(c.endpoint.Host, "Port")
- if configPort != "" {
- if i, err := strconv.Atoi(configPort); err == nil {
- port = i
- }
- }
-
- addr = net.JoinHostPort(host, strconv.Itoa(port))
- return
-}
-
-func (c *command) setAuthFromEndpoint() error {
- var err error
- c.auth, err = DefaultAuthBuilder(c.endpoint.User)
- return err
-}
-
-func endpointToCommand(cmd string, ep *transport.Endpoint) string {
- return fmt.Sprintf("%s '%s'", cmd, ep.Path)
-}
-
-func overrideConfig(overrides *ssh.ClientConfig, c *ssh.ClientConfig) {
- if overrides == nil {
- return
- }
-
- t := reflect.TypeOf(*c)
- vc := reflect.ValueOf(c).Elem()
- vo := reflect.ValueOf(overrides).Elem()
-
- for i := 0; i < t.NumField(); i++ {
- f := t.Field(i)
- vcf := vc.FieldByName(f.Name)
- vof := vo.FieldByName(f.Name)
- vcf.Set(vof)
- }
-
- *c = vc.Interface().(ssh.ClientConfig)
-}
diff --git a/vendor/github.com/go-git/go-git/v5/prune.go b/vendor/github.com/go-git/go-git/v5/prune.go
deleted file mode 100644
index 8e35b994e71..00000000000
--- a/vendor/github.com/go-git/go-git/v5/prune.go
+++ /dev/null
@@ -1,66 +0,0 @@
-package git
-
-import (
- "errors"
- "time"
-
- "github.com/go-git/go-git/v5/plumbing"
- "github.com/go-git/go-git/v5/plumbing/storer"
-)
-
-type PruneHandler func(unreferencedObjectHash plumbing.Hash) error
-type PruneOptions struct {
- // OnlyObjectsOlderThan if set to non-zero value
- // selects only objects older than the time provided.
- OnlyObjectsOlderThan time.Time
- // Handler is called on matching objects
- Handler PruneHandler
-}
-
-var ErrLooseObjectsNotSupported = errors.New("loose objects not supported")
-
-// DeleteObject deletes an object from a repository.
-// The type conveniently matches PruneHandler.
-func (r *Repository) DeleteObject(hash plumbing.Hash) error {
- los, ok := r.Storer.(storer.LooseObjectStorer)
- if !ok {
- return ErrLooseObjectsNotSupported
- }
-
- return los.DeleteLooseObject(hash)
-}
-
-func (r *Repository) Prune(opt PruneOptions) error {
- los, ok := r.Storer.(storer.LooseObjectStorer)
- if !ok {
- return ErrLooseObjectsNotSupported
- }
-
- pw := newObjectWalker(r.Storer)
- err := pw.walkAllRefs()
- if err != nil {
- return err
- }
- // Now walk all (loose) objects in storage.
- return los.ForEachObjectHash(func(hash plumbing.Hash) error {
- // Get out if we have seen this object.
- if pw.isSeen(hash) {
- return nil
- }
- // Otherwise it is a candidate for pruning.
- // Check out for too new objects next.
- if !opt.OnlyObjectsOlderThan.IsZero() {
- // Errors here are non-fatal. The object may be e.g. packed.
- // Or concurrently deleted. Skip such objects.
- t, err := los.LooseObjectTime(hash)
- if err != nil {
- return nil
- }
- // Skip too new objects.
- if !t.Before(opt.OnlyObjectsOlderThan) {
- return nil
- }
- }
- return opt.Handler(hash)
- })
-}
diff --git a/vendor/github.com/go-git/go-git/v5/remote.go b/vendor/github.com/go-git/go-git/v5/remote.go
deleted file mode 100644
index 7cc0db9b7db..00000000000
--- a/vendor/github.com/go-git/go-git/v5/remote.go
+++ /dev/null
@@ -1,1528 +0,0 @@
-package git
-
-import (
- "context"
- "errors"
- "fmt"
- "io"
- "strings"
- "time"
-
- "github.com/go-git/go-billy/v5/osfs"
- "github.com/go-git/go-git/v5/config"
- "github.com/go-git/go-git/v5/internal/url"
- "github.com/go-git/go-git/v5/plumbing"
- "github.com/go-git/go-git/v5/plumbing/cache"
- "github.com/go-git/go-git/v5/plumbing/format/packfile"
- "github.com/go-git/go-git/v5/plumbing/object"
- "github.com/go-git/go-git/v5/plumbing/protocol/packp"
- "github.com/go-git/go-git/v5/plumbing/protocol/packp/capability"
- "github.com/go-git/go-git/v5/plumbing/protocol/packp/sideband"
- "github.com/go-git/go-git/v5/plumbing/revlist"
- "github.com/go-git/go-git/v5/plumbing/storer"
- "github.com/go-git/go-git/v5/plumbing/transport"
- "github.com/go-git/go-git/v5/plumbing/transport/client"
- "github.com/go-git/go-git/v5/storage"
- "github.com/go-git/go-git/v5/storage/filesystem"
- "github.com/go-git/go-git/v5/storage/memory"
- "github.com/go-git/go-git/v5/utils/ioutil"
-)
-
-var (
- NoErrAlreadyUpToDate = errors.New("already up-to-date")
- ErrDeleteRefNotSupported = errors.New("server does not support delete-refs")
- ErrForceNeeded = errors.New("some refs were not updated")
- ErrExactSHA1NotSupported = errors.New("server does not support exact SHA1 refspec")
- ErrEmptyUrls = errors.New("URLs cannot be empty")
-)
-
-type NoMatchingRefSpecError struct {
- refSpec config.RefSpec
-}
-
-func (e NoMatchingRefSpecError) Error() string {
- return fmt.Sprintf("couldn't find remote ref %q", e.refSpec.Src())
-}
-
-func (e NoMatchingRefSpecError) Is(target error) bool {
- _, ok := target.(NoMatchingRefSpecError)
- return ok
-}
-
-const (
- // This describes the maximum number of commits to walk when
- // computing the haves to send to a server, for each ref in the
- // repo containing this remote, when not using the multi-ack
- // protocol. Setting this to 0 means there is no limit.
- maxHavesToVisitPerRef = 100
-
- // peeledSuffix is the suffix used to build peeled reference names.
- peeledSuffix = "^{}"
-)
-
-// Remote represents a connection to a remote repository.
-type Remote struct {
- c *config.RemoteConfig
- s storage.Storer
-}
-
-// NewRemote creates a new Remote.
-// The intended purpose is to use the Remote for tasks such as listing remote references (like using git ls-remote).
-// Otherwise Remotes should be created via the use of a Repository.
-func NewRemote(s storage.Storer, c *config.RemoteConfig) *Remote {
- return &Remote{s: s, c: c}
-}
-
-// Config returns the RemoteConfig object used to instantiate this Remote.
-func (r *Remote) Config() *config.RemoteConfig {
- return r.c
-}
-
-func (r *Remote) String() string {
- var fetch, push string
- if len(r.c.URLs) > 0 {
- fetch = r.c.URLs[0]
- push = r.c.URLs[0]
- }
-
- return fmt.Sprintf("%s\t%s (fetch)\n%[1]s\t%[3]s (push)", r.c.Name, fetch, push)
-}
-
-// Push performs a push to the remote. Returns NoErrAlreadyUpToDate if the
-// remote was already up-to-date.
-func (r *Remote) Push(o *PushOptions) error {
- return r.PushContext(context.Background(), o)
-}
-
-// PushContext performs a push to the remote. Returns NoErrAlreadyUpToDate if
-// the remote was already up-to-date.
-//
-// The provided Context must be non-nil. If the context expires before the
-// operation is complete, an error is returned. The context only affects the
-// transport operations.
-func (r *Remote) PushContext(ctx context.Context, o *PushOptions) (err error) {
- if err := o.Validate(); err != nil {
- return err
- }
-
- if o.RemoteName != r.c.Name {
- return fmt.Errorf("remote names don't match: %s != %s", o.RemoteName, r.c.Name)
- }
-
- if o.RemoteURL == "" {
- o.RemoteURL = r.c.URLs[0]
- }
-
- s, err := newSendPackSession(o.RemoteURL, o.Auth, o.InsecureSkipTLS, o.CABundle, o.ProxyOptions)
- if err != nil {
- return err
- }
-
- defer ioutil.CheckClose(s, &err)
-
- ar, err := s.AdvertisedReferencesContext(ctx)
- if err != nil {
- return err
- }
-
- remoteRefs, err := ar.AllReferences()
- if err != nil {
- return err
- }
-
- if err := r.checkRequireRemoteRefs(o.RequireRemoteRefs, remoteRefs); err != nil {
- return err
- }
-
- isDelete := false
- allDelete := true
- for _, rs := range o.RefSpecs {
- if rs.IsDelete() {
- isDelete = true
- } else {
- allDelete = false
- }
- if isDelete && !allDelete {
- break
- }
- }
-
- if isDelete && !ar.Capabilities.Supports(capability.DeleteRefs) {
- return ErrDeleteRefNotSupported
- }
-
- if o.Force {
- for i := 0; i < len(o.RefSpecs); i++ {
- rs := &o.RefSpecs[i]
- if !rs.IsForceUpdate() && !rs.IsDelete() {
- o.RefSpecs[i] = config.RefSpec("+" + rs.String())
- }
- }
- }
-
- localRefs, err := r.references()
- if err != nil {
- return err
- }
-
- req, err := r.newReferenceUpdateRequest(o, localRefs, remoteRefs, ar)
- if err != nil {
- return err
- }
-
- if len(req.Commands) == 0 {
- return NoErrAlreadyUpToDate
- }
-
- objects := objectsToPush(req.Commands)
-
- haves, err := referencesToHashes(remoteRefs)
- if err != nil {
- return err
- }
-
- stop, err := r.s.Shallow()
- if err != nil {
- return err
- }
-
- // if we have shallow we should include this as part of the objects that
- // we are aware.
- haves = append(haves, stop...)
-
- var hashesToPush []plumbing.Hash
- // Avoid the expensive revlist operation if we're only doing deletes.
- if !allDelete {
- if url.IsLocalEndpoint(o.RemoteURL) {
- // If we're are pushing to a local repo, it might be much
- // faster to use a local storage layer to get the commits
- // to ignore, when calculating the object revlist.
- localStorer := filesystem.NewStorage(
- osfs.New(o.RemoteURL), cache.NewObjectLRUDefault())
- hashesToPush, err = revlist.ObjectsWithStorageForIgnores(
- r.s, localStorer, objects, haves)
- } else {
- hashesToPush, err = revlist.Objects(r.s, objects, haves)
- }
- if err != nil {
- return err
- }
- }
-
- if len(hashesToPush) == 0 {
- allDelete = true
- for _, command := range req.Commands {
- if command.Action() != packp.Delete {
- allDelete = false
- break
- }
- }
- }
-
- rs, err := pushHashes(ctx, s, r.s, req, hashesToPush, r.useRefDeltas(ar), allDelete)
- if err != nil {
- return err
- }
-
- if rs != nil {
- if err = rs.Error(); err != nil {
- return err
- }
- }
-
- return r.updateRemoteReferenceStorage(req)
-}
-
-func (r *Remote) useRefDeltas(ar *packp.AdvRefs) bool {
- return !ar.Capabilities.Supports(capability.OFSDelta)
-}
-
-func (r *Remote) addReachableTags(localRefs []*plumbing.Reference, remoteRefs storer.ReferenceStorer, req *packp.ReferenceUpdateRequest) error {
- tags := make(map[plumbing.Reference]struct{})
- // get a list of all tags locally
- for _, ref := range localRefs {
- if strings.HasPrefix(string(ref.Name()), "refs/tags") {
- tags[*ref] = struct{}{}
- }
- }
-
- remoteRefIter, err := remoteRefs.IterReferences()
- if err != nil {
- return err
- }
-
- // remove any that are already on the remote
- if err := remoteRefIter.ForEach(func(reference *plumbing.Reference) error {
- delete(tags, *reference)
- return nil
- }); err != nil {
- return err
- }
-
- for tag := range tags {
- tagObject, err := object.GetObject(r.s, tag.Hash())
- var tagCommit *object.Commit
- if err != nil {
- return fmt.Errorf("get tag object: %w", err)
- }
-
- if tagObject.Type() != plumbing.TagObject {
- continue
- }
-
- annotatedTag, ok := tagObject.(*object.Tag)
- if !ok {
- return errors.New("could not get annotated tag object")
- }
-
- tagCommit, err = object.GetCommit(r.s, annotatedTag.Target)
- if err != nil {
- return fmt.Errorf("get annotated tag commit: %w", err)
- }
-
- // only include tags that are reachable from one of the refs
- // already being pushed
- for _, cmd := range req.Commands {
- if tag.Name() == cmd.Name {
- continue
- }
-
- if strings.HasPrefix(cmd.Name.String(), "refs/tags") {
- continue
- }
-
- c, err := object.GetCommit(r.s, cmd.New)
- if err != nil {
- return fmt.Errorf("get commit %v: %w", cmd.Name, err)
- }
-
- if isAncestor, err := tagCommit.IsAncestor(c); err == nil && isAncestor {
- req.Commands = append(req.Commands, &packp.Command{Name: tag.Name(), New: tag.Hash()})
- }
- }
- }
-
- return nil
-}
-
-func (r *Remote) newReferenceUpdateRequest(
- o *PushOptions,
- localRefs []*plumbing.Reference,
- remoteRefs storer.ReferenceStorer,
- ar *packp.AdvRefs,
-) (*packp.ReferenceUpdateRequest, error) {
- req := packp.NewReferenceUpdateRequestFromCapabilities(ar.Capabilities)
-
- if o.Progress != nil {
- req.Progress = o.Progress
- if ar.Capabilities.Supports(capability.Sideband64k) {
- _ = req.Capabilities.Set(capability.Sideband64k)
- } else if ar.Capabilities.Supports(capability.Sideband) {
- _ = req.Capabilities.Set(capability.Sideband)
- }
- }
-
- if ar.Capabilities.Supports(capability.PushOptions) {
- _ = req.Capabilities.Set(capability.PushOptions)
- for k, v := range o.Options {
- req.Options = append(req.Options, &packp.Option{Key: k, Value: v})
- }
- }
-
- if o.Atomic && ar.Capabilities.Supports(capability.Atomic) {
- _ = req.Capabilities.Set(capability.Atomic)
- }
-
- if err := r.addReferencesToUpdate(o.RefSpecs, localRefs, remoteRefs, req, o.Prune, o.ForceWithLease); err != nil {
-
- return nil, err
- }
-
- if o.FollowTags {
- if err := r.addReachableTags(localRefs, remoteRefs, req); err != nil {
- return nil, err
- }
- }
-
- return req, nil
-}
-
-func (r *Remote) updateRemoteReferenceStorage(
- req *packp.ReferenceUpdateRequest,
-) error {
-
- for _, spec := range r.c.Fetch {
- for _, c := range req.Commands {
- if !spec.Match(c.Name) {
- continue
- }
-
- local := spec.Dst(c.Name)
- ref := plumbing.NewHashReference(local, c.New)
- switch c.Action() {
- case packp.Create, packp.Update:
- if err := r.s.SetReference(ref); err != nil {
- return err
- }
- case packp.Delete:
- if err := r.s.RemoveReference(local); err != nil {
- return err
- }
- }
- }
- }
-
- return nil
-}
-
-// FetchContext fetches references along with the objects necessary to complete
-// their histories.
-//
-// Returns nil if the operation is successful, NoErrAlreadyUpToDate if there are
-// no changes to be fetched, or an error.
-//
-// The provided Context must be non-nil. If the context expires before the
-// operation is complete, an error is returned. The context only affects the
-// transport operations.
-func (r *Remote) FetchContext(ctx context.Context, o *FetchOptions) error {
- _, err := r.fetch(ctx, o)
- return err
-}
-
-// Fetch fetches references along with the objects necessary to complete their
-// histories.
-//
-// Returns nil if the operation is successful, NoErrAlreadyUpToDate if there are
-// no changes to be fetched, or an error.
-func (r *Remote) Fetch(o *FetchOptions) error {
- return r.FetchContext(context.Background(), o)
-}
-
-func (r *Remote) fetch(ctx context.Context, o *FetchOptions) (sto storer.ReferenceStorer, err error) {
- if o.RemoteName == "" {
- o.RemoteName = r.c.Name
- }
-
- if err = o.Validate(); err != nil {
- return nil, err
- }
-
- if len(o.RefSpecs) == 0 {
- o.RefSpecs = r.c.Fetch
- }
-
- if o.RemoteURL == "" {
- o.RemoteURL = r.c.URLs[0]
- }
-
- s, err := newUploadPackSession(o.RemoteURL, o.Auth, o.InsecureSkipTLS, o.CABundle, o.ProxyOptions)
- if err != nil {
- return nil, err
- }
-
- defer ioutil.CheckClose(s, &err)
-
- ar, err := s.AdvertisedReferencesContext(ctx)
- if err != nil {
- return nil, err
- }
-
- req, err := r.newUploadPackRequest(o, ar)
- if err != nil {
- return nil, err
- }
-
- if err := r.isSupportedRefSpec(o.RefSpecs, ar); err != nil {
- return nil, err
- }
-
- remoteRefs, err := ar.AllReferences()
- if err != nil {
- return nil, err
- }
-
- localRefs, err := r.references()
- if err != nil {
- return nil, err
- }
-
- refs, specToRefs, err := calculateRefs(o.RefSpecs, remoteRefs, o.Tags)
- if err != nil {
- return nil, err
- }
-
- if !req.Depth.IsZero() {
- req.Shallows, err = r.s.Shallow()
- if err != nil {
- return nil, fmt.Errorf("existing checkout is not shallow")
- }
- }
-
- req.Wants, err = getWants(r.s, refs, o.Depth)
- if len(req.Wants) > 0 {
- req.Haves, err = getHaves(localRefs, remoteRefs, r.s, o.Depth)
- if err != nil {
- return nil, err
- }
-
- if err = r.fetchPack(ctx, o, s, req); err != nil {
- return nil, err
- }
- }
-
- var updatedPrune bool
- if o.Prune {
- updatedPrune, err = r.pruneRemotes(o.RefSpecs, localRefs, remoteRefs)
- if err != nil {
- return nil, err
- }
- }
-
- updated, err := r.updateLocalReferenceStorage(o.RefSpecs, refs, remoteRefs, specToRefs, o.Tags, o.Force)
- if err != nil {
- return nil, err
- }
-
- if !updated {
- updated, err = depthChanged(req.Shallows, r.s)
- if err != nil {
- return nil, fmt.Errorf("error checking depth change: %v", err)
- }
- }
-
- if !updated && !updatedPrune {
- return remoteRefs, NoErrAlreadyUpToDate
- }
-
- return remoteRefs, nil
-}
-
-func depthChanged(before []plumbing.Hash, s storage.Storer) (bool, error) {
- after, err := s.Shallow()
- if err != nil {
- return false, err
- }
-
- if len(before) != len(after) {
- return true, nil
- }
-
- bm := make(map[plumbing.Hash]bool, len(before))
- for _, b := range before {
- bm[b] = true
- }
- for _, a := range after {
- if _, ok := bm[a]; !ok {
- return true, nil
- }
- }
-
- return false, nil
-}
-
-func newUploadPackSession(url string, auth transport.AuthMethod, insecure bool, cabundle []byte, proxyOpts transport.ProxyOptions) (transport.UploadPackSession, error) {
- c, ep, err := newClient(url, insecure, cabundle, proxyOpts)
- if err != nil {
- return nil, err
- }
-
- return c.NewUploadPackSession(ep, auth)
-}
-
-func newSendPackSession(url string, auth transport.AuthMethod, insecure bool, cabundle []byte, proxyOpts transport.ProxyOptions) (transport.ReceivePackSession, error) {
- c, ep, err := newClient(url, insecure, cabundle, proxyOpts)
- if err != nil {
- return nil, err
- }
-
- return c.NewReceivePackSession(ep, auth)
-}
-
-func newClient(url string, insecure bool, cabundle []byte, proxyOpts transport.ProxyOptions) (transport.Transport, *transport.Endpoint, error) {
- ep, err := transport.NewEndpoint(url)
- if err != nil {
- return nil, nil, err
- }
- ep.InsecureSkipTLS = insecure
- ep.CaBundle = cabundle
- ep.Proxy = proxyOpts
-
- c, err := client.NewClient(ep)
- if err != nil {
- return nil, nil, err
- }
-
- return c, ep, err
-}
-
-func (r *Remote) fetchPack(ctx context.Context, o *FetchOptions, s transport.UploadPackSession,
- req *packp.UploadPackRequest) (err error) {
-
- reader, err := s.UploadPack(ctx, req)
- if err != nil {
- if errors.Is(err, transport.ErrEmptyUploadPackRequest) {
- // XXX: no packfile provided, everything is up-to-date.
- return nil
- }
- return err
- }
-
- defer ioutil.CheckClose(reader, &err)
-
- if err = r.updateShallow(o, reader); err != nil {
- return err
- }
-
- if err = packfile.UpdateObjectStorage(r.s,
- buildSidebandIfSupported(req.Capabilities, reader, o.Progress),
- ); err != nil {
- return err
- }
-
- return err
-}
-
-func (r *Remote) pruneRemotes(specs []config.RefSpec, localRefs []*plumbing.Reference, remoteRefs memory.ReferenceStorage) (bool, error) {
- var updatedPrune bool
- for _, spec := range specs {
- rev := spec.Reverse()
- for _, ref := range localRefs {
- if !rev.Match(ref.Name()) {
- continue
- }
- _, err := remoteRefs.Reference(rev.Dst(ref.Name()))
- if errors.Is(err, plumbing.ErrReferenceNotFound) {
- updatedPrune = true
- err := r.s.RemoveReference(ref.Name())
- if err != nil {
- return false, err
- }
- }
- }
- }
- return updatedPrune, nil
-}
-
-func (r *Remote) addReferencesToUpdate(
- refspecs []config.RefSpec,
- localRefs []*plumbing.Reference,
- remoteRefs storer.ReferenceStorer,
- req *packp.ReferenceUpdateRequest,
- prune bool,
- forceWithLease *ForceWithLease,
-) error {
- // This references dictionary will be used to search references by name.
- refsDict := make(map[string]*plumbing.Reference)
- for _, ref := range localRefs {
- refsDict[ref.Name().String()] = ref
- }
-
- for _, rs := range refspecs {
- if rs.IsDelete() {
- if err := r.deleteReferences(rs, remoteRefs, refsDict, req, false); err != nil {
- return err
- }
- } else {
- err := r.addOrUpdateReferences(rs, localRefs, refsDict, remoteRefs, req, forceWithLease)
- if err != nil {
- return err
- }
-
- if prune {
- if err := r.deleteReferences(rs, remoteRefs, refsDict, req, true); err != nil {
- return err
- }
- }
- }
- }
-
- return nil
-}
-
-func (r *Remote) addOrUpdateReferences(
- rs config.RefSpec,
- localRefs []*plumbing.Reference,
- refsDict map[string]*plumbing.Reference,
- remoteRefs storer.ReferenceStorer,
- req *packp.ReferenceUpdateRequest,
- forceWithLease *ForceWithLease,
-) error {
- // If it is not a wildcard refspec we can directly search for the reference
- // in the references dictionary.
- if !rs.IsWildcard() {
- ref, ok := refsDict[rs.Src()]
- if !ok {
- commit, err := object.GetCommit(r.s, plumbing.NewHash(rs.Src()))
- if err == nil {
- return r.addCommit(rs, remoteRefs, commit.Hash, req)
- }
- return nil
- }
-
- return r.addReferenceIfRefSpecMatches(rs, remoteRefs, ref, req, forceWithLease)
- }
-
- for _, ref := range localRefs {
- err := r.addReferenceIfRefSpecMatches(rs, remoteRefs, ref, req, forceWithLease)
- if err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (r *Remote) deleteReferences(rs config.RefSpec,
- remoteRefs storer.ReferenceStorer,
- refsDict map[string]*plumbing.Reference,
- req *packp.ReferenceUpdateRequest,
- prune bool) error {
- iter, err := remoteRefs.IterReferences()
- if err != nil {
- return err
- }
-
- return iter.ForEach(func(ref *plumbing.Reference) error {
- if ref.Type() != plumbing.HashReference {
- return nil
- }
-
- if prune {
- rs := rs.Reverse()
- if !rs.Match(ref.Name()) {
- return nil
- }
-
- if _, ok := refsDict[rs.Dst(ref.Name()).String()]; ok {
- return nil
- }
- } else if rs.Dst("") != ref.Name() {
- return nil
- }
-
- cmd := &packp.Command{
- Name: ref.Name(),
- Old: ref.Hash(),
- New: plumbing.ZeroHash,
- }
- req.Commands = append(req.Commands, cmd)
- return nil
- })
-}
-
-func (r *Remote) addCommit(rs config.RefSpec,
- remoteRefs storer.ReferenceStorer, localCommit plumbing.Hash,
- req *packp.ReferenceUpdateRequest) error {
-
- if rs.IsWildcard() {
- return errors.New("can't use wildcard together with hash refspecs")
- }
-
- cmd := &packp.Command{
- Name: rs.Dst(""),
- Old: plumbing.ZeroHash,
- New: localCommit,
- }
- remoteRef, err := remoteRefs.Reference(cmd.Name)
- if err == nil {
- if remoteRef.Type() != plumbing.HashReference {
- // TODO: check actual git behavior here
- return nil
- }
-
- cmd.Old = remoteRef.Hash()
- } else if err != plumbing.ErrReferenceNotFound {
- return err
- }
- if cmd.Old == cmd.New {
- return nil
- }
- if !rs.IsForceUpdate() {
- if err := checkFastForwardUpdate(r.s, remoteRefs, cmd); err != nil {
- return err
- }
- }
-
- req.Commands = append(req.Commands, cmd)
- return nil
-}
-
-func (r *Remote) addReferenceIfRefSpecMatches(rs config.RefSpec,
- remoteRefs storer.ReferenceStorer, localRef *plumbing.Reference,
- req *packp.ReferenceUpdateRequest, forceWithLease *ForceWithLease) error {
-
- if localRef.Type() != plumbing.HashReference {
- return nil
- }
-
- if !rs.Match(localRef.Name()) {
- return nil
- }
-
- cmd := &packp.Command{
- Name: rs.Dst(localRef.Name()),
- Old: plumbing.ZeroHash,
- New: localRef.Hash(),
- }
-
- remoteRef, err := remoteRefs.Reference(cmd.Name)
- if err == nil {
- if remoteRef.Type() != plumbing.HashReference {
- // TODO: check actual git behavior here
- return nil
- }
-
- cmd.Old = remoteRef.Hash()
- } else if err != plumbing.ErrReferenceNotFound {
- return err
- }
-
- if cmd.Old == cmd.New {
- return nil
- }
-
- if forceWithLease != nil {
- if err = r.checkForceWithLease(localRef, cmd, forceWithLease); err != nil {
- return err
- }
- } else if !rs.IsForceUpdate() {
- if err := checkFastForwardUpdate(r.s, remoteRefs, cmd); err != nil {
- return err
- }
- }
-
- req.Commands = append(req.Commands, cmd)
- return nil
-}
-
-func (r *Remote) checkForceWithLease(localRef *plumbing.Reference, cmd *packp.Command, forceWithLease *ForceWithLease) error {
- remotePrefix := fmt.Sprintf("refs/remotes/%s/", r.Config().Name)
-
- ref, err := storer.ResolveReference(
- r.s,
- plumbing.ReferenceName(remotePrefix+strings.Replace(localRef.Name().String(), "refs/heads/", "", -1)))
- if err != nil {
- return err
- }
-
- if forceWithLease.RefName.String() == "" || (forceWithLease.RefName == cmd.Name) {
- expectedOID := ref.Hash()
-
- if !forceWithLease.Hash.IsZero() {
- expectedOID = forceWithLease.Hash
- }
-
- if cmd.Old != expectedOID {
- return fmt.Errorf("non-fast-forward update: %s", cmd.Name.String())
- }
- }
-
- return nil
-}
-
-func (r *Remote) references() ([]*plumbing.Reference, error) {
- var localRefs []*plumbing.Reference
-
- iter, err := r.s.IterReferences()
- if err != nil {
- return nil, err
- }
-
- for {
- ref, err := iter.Next()
- if err == io.EOF {
- break
- }
-
- if err != nil {
- return nil, err
- }
-
- localRefs = append(localRefs, ref)
- }
-
- return localRefs, nil
-}
-
-func getRemoteRefsFromStorer(remoteRefStorer storer.ReferenceStorer) (
- map[plumbing.Hash]bool, error) {
- remoteRefs := map[plumbing.Hash]bool{}
- iter, err := remoteRefStorer.IterReferences()
- if err != nil {
- return nil, err
- }
- err = iter.ForEach(func(ref *plumbing.Reference) error {
- if ref.Type() != plumbing.HashReference {
- return nil
- }
- remoteRefs[ref.Hash()] = true
- return nil
- })
- if err != nil {
- return nil, err
- }
- return remoteRefs, nil
-}
-
-// getHavesFromRef populates the given `haves` map with the given
-// reference, and up to `maxHavesToVisitPerRef` ancestor commits.
-func getHavesFromRef(
- ref *plumbing.Reference,
- remoteRefs map[plumbing.Hash]bool,
- s storage.Storer,
- haves map[plumbing.Hash]bool,
- depth int,
-) error {
- h := ref.Hash()
- if haves[h] {
- return nil
- }
-
- // No need to load the commit if we know the remote already
- // has this hash.
- if remoteRefs[h] {
- haves[h] = true
- return nil
- }
-
- commit, err := object.GetCommit(s, h)
- if err != nil {
- // Ignore the error if this isn't a commit.
- haves[ref.Hash()] = true
- return nil
- }
-
- // Until go-git supports proper commit negotiation during an
- // upload pack request, include up to `maxHavesToVisitPerRef`
- // commits from the history of each ref.
- walker := object.NewCommitPreorderIter(commit, haves, nil)
- toVisit := maxHavesToVisitPerRef
- // But only need up to the requested depth
- if depth > 0 && depth < maxHavesToVisitPerRef {
- toVisit = depth
- }
- // It is safe to ignore any error here as we are just trying to find the references that we already have
- // An example of a legitimate failure is we have a shallow clone and don't have the previous commit(s)
- _ = walker.ForEach(func(c *object.Commit) error {
- haves[c.Hash] = true
- toVisit--
- // If toVisit starts out at 0 (indicating there is no
- // max), then it will be negative here and we won't stop
- // early.
- if toVisit == 0 || remoteRefs[c.Hash] {
- return storer.ErrStop
- }
- return nil
- })
-
- return nil
-}
-
-func getHaves(
- localRefs []*plumbing.Reference,
- remoteRefStorer storer.ReferenceStorer,
- s storage.Storer,
- depth int,
-) ([]plumbing.Hash, error) {
- haves := map[plumbing.Hash]bool{}
-
- // Build a map of all the remote references, to avoid loading too
- // many parent commits for references we know don't need to be
- // transferred.
- remoteRefs, err := getRemoteRefsFromStorer(remoteRefStorer)
- if err != nil {
- return nil, err
- }
-
- for _, ref := range localRefs {
- if haves[ref.Hash()] {
- continue
- }
-
- if ref.Type() != plumbing.HashReference {
- continue
- }
-
- err = getHavesFromRef(ref, remoteRefs, s, haves, depth)
- if err != nil {
- return nil, err
- }
- }
-
- var result []plumbing.Hash
- for h := range haves {
- result = append(result, h)
- }
-
- return result, nil
-}
-
-const refspecAllTags = "+refs/tags/*:refs/tags/*"
-
-func calculateRefs(
- spec []config.RefSpec,
- remoteRefs storer.ReferenceStorer,
- tagMode TagMode,
-) (memory.ReferenceStorage, [][]*plumbing.Reference, error) {
- if tagMode == AllTags {
- spec = append(spec, refspecAllTags)
- }
-
- refs := make(memory.ReferenceStorage)
- // list of references matched for each spec
- specToRefs := make([][]*plumbing.Reference, len(spec))
- for i := range spec {
- var err error
- specToRefs[i], err = doCalculateRefs(spec[i], remoteRefs, refs)
- if err != nil {
- return nil, nil, err
- }
- }
-
- return refs, specToRefs, nil
-}
-
-func doCalculateRefs(
- s config.RefSpec,
- remoteRefs storer.ReferenceStorer,
- refs memory.ReferenceStorage,
-) ([]*plumbing.Reference, error) {
- var refList []*plumbing.Reference
-
- if s.IsExactSHA1() {
- ref := plumbing.NewHashReference(s.Dst(""), plumbing.NewHash(s.Src()))
-
- refList = append(refList, ref)
- return refList, refs.SetReference(ref)
- }
-
- var matched bool
- onMatched := func(ref *plumbing.Reference) error {
- if ref.Type() == plumbing.SymbolicReference {
- target, err := storer.ResolveReference(remoteRefs, ref.Name())
- if err != nil {
- return err
- }
-
- ref = plumbing.NewHashReference(ref.Name(), target.Hash())
- }
-
- if ref.Type() != plumbing.HashReference {
- return nil
- }
-
- matched = true
- refList = append(refList, ref)
- return refs.SetReference(ref)
- }
-
- var ret error
- if s.IsWildcard() {
- iter, err := remoteRefs.IterReferences()
- if err != nil {
- return nil, err
- }
- ret = iter.ForEach(func(ref *plumbing.Reference) error {
- if !s.Match(ref.Name()) {
- return nil
- }
-
- return onMatched(ref)
- })
- } else {
- var resolvedRef *plumbing.Reference
- src := s.Src()
- resolvedRef, ret = expand_ref(remoteRefs, plumbing.ReferenceName(src))
- if ret == nil {
- ret = onMatched(resolvedRef)
- }
- }
-
- if !matched && !s.IsWildcard() {
- return nil, NoMatchingRefSpecError{refSpec: s}
- }
-
- return refList, ret
-}
-
-func getWants(localStorer storage.Storer, refs memory.ReferenceStorage, depth int) ([]plumbing.Hash, error) {
- // If depth is anything other than 1 and the repo has shallow commits then just because we have the commit
- // at the reference doesn't mean that we don't still need to fetch the parents
- shallow := false
- if depth != 1 {
- if s, _ := localStorer.Shallow(); len(s) > 0 {
- shallow = true
- }
- }
-
- wants := map[plumbing.Hash]bool{}
- for _, ref := range refs {
- hash := ref.Hash()
- exists, err := objectExists(localStorer, ref.Hash())
- if err != nil {
- return nil, err
- }
-
- if !exists || shallow {
- wants[hash] = true
- }
- }
-
- var result []plumbing.Hash
- for h := range wants {
- result = append(result, h)
- }
-
- return result, nil
-}
-
-func objectExists(s storer.EncodedObjectStorer, h plumbing.Hash) (bool, error) {
- _, err := s.EncodedObject(plumbing.AnyObject, h)
- if err == plumbing.ErrObjectNotFound {
- return false, nil
- }
-
- return true, err
-}
-
-func checkFastForwardUpdate(s storer.EncodedObjectStorer, remoteRefs storer.ReferenceStorer, cmd *packp.Command) error {
- if cmd.Old == plumbing.ZeroHash {
- _, err := remoteRefs.Reference(cmd.Name)
- if err == plumbing.ErrReferenceNotFound {
- return nil
- }
-
- if err != nil {
- return err
- }
-
- return fmt.Errorf("non-fast-forward update: %s", cmd.Name.String())
- }
-
- ff, err := isFastForward(s, cmd.Old, cmd.New, nil)
- if err != nil {
- return err
- }
-
- if !ff {
- return fmt.Errorf("non-fast-forward update: %s", cmd.Name.String())
- }
-
- return nil
-}
-
-func isFastForward(s storer.EncodedObjectStorer, old, new plumbing.Hash, earliestShallow *plumbing.Hash) (bool, error) {
- c, err := object.GetCommit(s, new)
- if err != nil {
- return false, err
- }
-
- parentsToIgnore := []plumbing.Hash{}
- if earliestShallow != nil {
- earliestCommit, err := object.GetCommit(s, *earliestShallow)
- if err != nil {
- return false, err
- }
-
- parentsToIgnore = earliestCommit.ParentHashes
- }
-
- found := false
- // stop iterating at the earliest shallow commit, ignoring its parents
- // note: when pull depth is smaller than the number of new changes on the remote, this fails due to missing parents.
- // as far as i can tell, without the commits in-between the shallow pull and the earliest shallow, there's no
- // real way of telling whether it will be a fast-forward merge.
- iter := object.NewCommitPreorderIter(c, nil, parentsToIgnore)
- err = iter.ForEach(func(c *object.Commit) error {
- if c.Hash != old {
- return nil
- }
-
- found = true
- return storer.ErrStop
- })
- return found, err
-}
-
-func (r *Remote) newUploadPackRequest(o *FetchOptions,
- ar *packp.AdvRefs) (*packp.UploadPackRequest, error) {
-
- req := packp.NewUploadPackRequestFromCapabilities(ar.Capabilities)
-
- if o.Depth != 0 {
- req.Depth = packp.DepthCommits(o.Depth)
- if err := req.Capabilities.Set(capability.Shallow); err != nil {
- return nil, err
- }
- }
-
- if o.Progress == nil && ar.Capabilities.Supports(capability.NoProgress) {
- if err := req.Capabilities.Set(capability.NoProgress); err != nil {
- return nil, err
- }
- }
-
- isWildcard := true
- for _, s := range o.RefSpecs {
- if !s.IsWildcard() {
- isWildcard = false
- break
- }
- }
-
- if isWildcard && o.Tags == TagFollowing && ar.Capabilities.Supports(capability.IncludeTag) {
- if err := req.Capabilities.Set(capability.IncludeTag); err != nil {
- return nil, err
- }
- }
-
- return req, nil
-}
-
-func (r *Remote) isSupportedRefSpec(refs []config.RefSpec, ar *packp.AdvRefs) error {
- var containsIsExact bool
- for _, ref := range refs {
- if ref.IsExactSHA1() {
- containsIsExact = true
- }
- }
-
- if !containsIsExact {
- return nil
- }
-
- if ar.Capabilities.Supports(capability.AllowReachableSHA1InWant) ||
- ar.Capabilities.Supports(capability.AllowTipSHA1InWant) {
- return nil
- }
-
- return ErrExactSHA1NotSupported
-}
-
-func buildSidebandIfSupported(l *capability.List, reader io.Reader, p sideband.Progress) io.Reader {
- var t sideband.Type
-
- switch {
- case l.Supports(capability.Sideband):
- t = sideband.Sideband
- case l.Supports(capability.Sideband64k):
- t = sideband.Sideband64k
- default:
- return reader
- }
-
- d := sideband.NewDemuxer(t, reader)
- d.Progress = p
-
- return d
-}
-
-func (r *Remote) updateLocalReferenceStorage(
- specs []config.RefSpec,
- fetchedRefs, remoteRefs memory.ReferenceStorage,
- specToRefs [][]*plumbing.Reference,
- tagMode TagMode,
- force bool,
-) (updated bool, err error) {
- isWildcard := true
- forceNeeded := false
-
- for i, spec := range specs {
- if !spec.IsWildcard() {
- isWildcard = false
- }
-
- for _, ref := range specToRefs[i] {
- if ref.Type() != plumbing.HashReference {
- continue
- }
-
- localName := spec.Dst(ref.Name())
- // If localName doesn't start with "refs/" then treat as a branch.
- if !strings.HasPrefix(localName.String(), "refs/") {
- localName = plumbing.NewBranchReferenceName(localName.String())
- }
- old, _ := storer.ResolveReference(r.s, localName)
- new := plumbing.NewHashReference(localName, ref.Hash())
-
- // If the ref exists locally as a non-tag and force is not
- // specified, only update if the new ref is an ancestor of the old
- if old != nil && !old.Name().IsTag() && !force && !spec.IsForceUpdate() {
- ff, err := isFastForward(r.s, old.Hash(), new.Hash(), nil)
- if err != nil {
- return updated, err
- }
-
- if !ff {
- forceNeeded = true
- continue
- }
- }
-
- refUpdated, err := checkAndUpdateReferenceStorerIfNeeded(r.s, new, old)
- if err != nil {
- return updated, err
- }
-
- if refUpdated {
- updated = true
- }
- }
- }
-
- if tagMode == NoTags {
- return updated, nil
- }
-
- tags := fetchedRefs
- if isWildcard {
- tags = remoteRefs
- }
- tagUpdated, err := r.buildFetchedTags(tags)
- if err != nil {
- return updated, err
- }
-
- if tagUpdated {
- updated = true
- }
-
- if forceNeeded {
- err = ErrForceNeeded
- }
-
- return
-}
-
-func (r *Remote) buildFetchedTags(refs memory.ReferenceStorage) (updated bool, err error) {
- for _, ref := range refs {
- if !ref.Name().IsTag() {
- continue
- }
-
- _, err := r.s.EncodedObject(plumbing.AnyObject, ref.Hash())
- if err == plumbing.ErrObjectNotFound {
- continue
- }
-
- if err != nil {
- return false, err
- }
-
- refUpdated, err := updateReferenceStorerIfNeeded(r.s, ref)
- if err != nil {
- return updated, err
- }
-
- if refUpdated {
- updated = true
- }
- }
-
- return
-}
-
-// List the references on the remote repository.
-// The provided Context must be non-nil. If the context expires before the
-// operation is complete, an error is returned. The context only affects to the
-// transport operations.
-func (r *Remote) ListContext(ctx context.Context, o *ListOptions) (rfs []*plumbing.Reference, err error) {
- return r.list(ctx, o)
-}
-
-func (r *Remote) List(o *ListOptions) (rfs []*plumbing.Reference, err error) {
- timeout := o.Timeout
- // Default to the old hardcoded 10s value if a timeout is not explicitly set.
- if timeout == 0 {
- timeout = 10
- }
- if timeout < 0 {
- return nil, fmt.Errorf("invalid timeout: %d", timeout)
- }
- ctx, cancel := context.WithTimeout(context.Background(), time.Duration(timeout)*time.Second)
- defer cancel()
- return r.ListContext(ctx, o)
-}
-
-func (r *Remote) list(ctx context.Context, o *ListOptions) (rfs []*plumbing.Reference, err error) {
- if r.c == nil || len(r.c.URLs) == 0 {
- return nil, ErrEmptyUrls
- }
-
- s, err := newUploadPackSession(r.c.URLs[0], o.Auth, o.InsecureSkipTLS, o.CABundle, o.ProxyOptions)
- if err != nil {
- return nil, err
- }
-
- defer ioutil.CheckClose(s, &err)
-
- ar, err := s.AdvertisedReferencesContext(ctx)
- if err != nil {
- return nil, err
- }
-
- allRefs, err := ar.AllReferences()
- if err != nil {
- return nil, err
- }
-
- refs, err := allRefs.IterReferences()
- if err != nil {
- return nil, err
- }
-
- var resultRefs []*plumbing.Reference
- if o.PeelingOption == AppendPeeled || o.PeelingOption == IgnorePeeled {
- err = refs.ForEach(func(ref *plumbing.Reference) error {
- resultRefs = append(resultRefs, ref)
- return nil
- })
- if err != nil {
- return nil, err
- }
- }
-
- if o.PeelingOption == AppendPeeled || o.PeelingOption == OnlyPeeled {
- for k, v := range ar.Peeled {
- resultRefs = append(resultRefs, plumbing.NewReferenceFromStrings(k+"^{}", v.String()))
- }
- }
-
- return resultRefs, nil
-}
-
-func objectsToPush(commands []*packp.Command) []plumbing.Hash {
- objects := make([]plumbing.Hash, 0, len(commands))
- for _, cmd := range commands {
- if cmd.New == plumbing.ZeroHash {
- continue
- }
- objects = append(objects, cmd.New)
- }
- return objects
-}
-
-func referencesToHashes(refs storer.ReferenceStorer) ([]plumbing.Hash, error) {
- iter, err := refs.IterReferences()
- if err != nil {
- return nil, err
- }
-
- var hs []plumbing.Hash
- err = iter.ForEach(func(ref *plumbing.Reference) error {
- if ref.Type() != plumbing.HashReference {
- return nil
- }
-
- hs = append(hs, ref.Hash())
- return nil
- })
- if err != nil {
- return nil, err
- }
-
- return hs, nil
-}
-
-func pushHashes(
- ctx context.Context,
- sess transport.ReceivePackSession,
- s storage.Storer,
- req *packp.ReferenceUpdateRequest,
- hs []plumbing.Hash,
- useRefDeltas bool,
- allDelete bool,
-) (*packp.ReportStatus, error) {
- rd, wr := io.Pipe()
-
- config, err := s.Config()
- if err != nil {
- return nil, err
- }
-
- // Set buffer size to 1 so the error message can be written when
- // ReceivePack fails. Otherwise the goroutine will be blocked writing
- // to the channel.
- done := make(chan error, 1)
-
- if !allDelete {
- req.Packfile = rd
- go func() {
- e := packfile.NewEncoder(wr, s, useRefDeltas)
- if _, err := e.Encode(hs, config.Pack.Window); err != nil {
- done <- wr.CloseWithError(err)
- return
- }
-
- done <- wr.Close()
- }()
- } else {
- close(done)
- }
-
- rs, err := sess.ReceivePack(ctx, req)
- if err != nil {
- // close the pipe to unlock encode write
- _ = rd.Close()
- return nil, err
- }
-
- if err := <-done; err != nil {
- return nil, err
- }
-
- return rs, nil
-}
-
-func (r *Remote) updateShallow(o *FetchOptions, resp *packp.UploadPackResponse) error {
- if o.Depth == 0 || len(resp.Shallows) == 0 {
- return nil
- }
-
- shallows, err := r.s.Shallow()
- if err != nil {
- return err
- }
-
-outer:
- for _, s := range resp.Shallows {
- for _, oldS := range shallows {
- if s == oldS {
- continue outer
- }
- }
- shallows = append(shallows, s)
- }
-
- return r.s.SetShallow(shallows)
-}
-
-func (r *Remote) checkRequireRemoteRefs(requires []config.RefSpec, remoteRefs storer.ReferenceStorer) error {
- for _, require := range requires {
- if require.IsWildcard() {
- return fmt.Errorf("wildcards not supported in RequireRemoteRefs, got %s", require.String())
- }
-
- name := require.Dst("")
- remote, err := remoteRefs.Reference(name)
- if err != nil {
- return fmt.Errorf("remote ref %s required to be %s but is absent", name.String(), require.Src())
- }
-
- var requireHash string
- if require.IsExactSHA1() {
- requireHash = require.Src()
- } else {
- target, err := storer.ResolveReference(remoteRefs, plumbing.ReferenceName(require.Src()))
- if err != nil {
- return fmt.Errorf("could not resolve ref %s in RequireRemoteRefs", require.Src())
- }
- requireHash = target.Hash().String()
- }
-
- if remote.Hash().String() != requireHash {
- return fmt.Errorf("remote ref %s required to be %s but is %s", name.String(), requireHash, remote.Hash().String())
- }
- }
- return nil
-}
diff --git a/vendor/github.com/go-git/go-git/v5/repository.go b/vendor/github.com/go-git/go-git/v5/repository.go
deleted file mode 100644
index a57c7141f8d..00000000000
--- a/vendor/github.com/go-git/go-git/v5/repository.go
+++ /dev/null
@@ -1,1886 +0,0 @@
-package git
-
-import (
- "bytes"
- "context"
- "crypto"
- "encoding/hex"
- "errors"
- "fmt"
- "io"
- "os"
- "path"
- "path/filepath"
- "strings"
- "time"
-
- "dario.cat/mergo"
- "github.com/ProtonMail/go-crypto/openpgp"
- "github.com/go-git/go-billy/v5"
- "github.com/go-git/go-billy/v5/osfs"
- "github.com/go-git/go-billy/v5/util"
- "github.com/go-git/go-git/v5/config"
- "github.com/go-git/go-git/v5/internal/path_util"
- "github.com/go-git/go-git/v5/internal/revision"
- "github.com/go-git/go-git/v5/internal/url"
- "github.com/go-git/go-git/v5/plumbing"
- "github.com/go-git/go-git/v5/plumbing/cache"
- formatcfg "github.com/go-git/go-git/v5/plumbing/format/config"
- "github.com/go-git/go-git/v5/plumbing/format/packfile"
- "github.com/go-git/go-git/v5/plumbing/hash"
- "github.com/go-git/go-git/v5/plumbing/object"
- "github.com/go-git/go-git/v5/plumbing/storer"
- "github.com/go-git/go-git/v5/storage"
- "github.com/go-git/go-git/v5/storage/filesystem"
- "github.com/go-git/go-git/v5/storage/filesystem/dotgit"
- "github.com/go-git/go-git/v5/utils/ioutil"
-)
-
-// GitDirName this is a special folder where all the git stuff is.
-const GitDirName = ".git"
-
-var (
- // ErrBranchExists an error stating the specified branch already exists
- ErrBranchExists = errors.New("branch already exists")
- // ErrBranchNotFound an error stating the specified branch does not exist
- ErrBranchNotFound = errors.New("branch not found")
- // ErrTagExists an error stating the specified tag already exists
- ErrTagExists = errors.New("tag already exists")
- // ErrTagNotFound an error stating the specified tag does not exist
- ErrTagNotFound = errors.New("tag not found")
- // ErrFetching is returned when the packfile could not be downloaded
- ErrFetching = errors.New("unable to fetch packfile")
-
- ErrInvalidReference = errors.New("invalid reference, should be a tag or a branch")
- ErrRepositoryNotExists = errors.New("repository does not exist")
- ErrRepositoryIncomplete = errors.New("repository's commondir path does not exist")
- ErrRepositoryAlreadyExists = errors.New("repository already exists")
- ErrRemoteNotFound = errors.New("remote not found")
- ErrRemoteExists = errors.New("remote already exists")
- ErrAnonymousRemoteName = errors.New("anonymous remote name must be 'anonymous'")
- ErrWorktreeNotProvided = errors.New("worktree should be provided")
- ErrIsBareRepository = errors.New("worktree not available in a bare repository")
- ErrUnableToResolveCommit = errors.New("unable to resolve commit")
- ErrPackedObjectsNotSupported = errors.New("packed objects not supported")
- ErrSHA256NotSupported = errors.New("go-git was not compiled with SHA256 support")
- ErrAlternatePathNotSupported = errors.New("alternate path must use the file scheme")
- ErrUnsupportedMergeStrategy = errors.New("unsupported merge strategy")
- ErrFastForwardMergeNotPossible = errors.New("not possible to fast-forward merge changes")
-)
-
-// Repository represents a git repository
-type Repository struct {
- Storer storage.Storer
-
- r map[string]*Remote
- wt billy.Filesystem
-}
-
-type InitOptions struct {
- // The default branch (e.g. "refs/heads/master")
- DefaultBranch plumbing.ReferenceName
-}
-
-// Init creates an empty git repository, based on the given Storer and worktree.
-// The worktree Filesystem is optional, if nil a bare repository is created. If
-// the given storer is not empty ErrRepositoryAlreadyExists is returned
-func Init(s storage.Storer, worktree billy.Filesystem) (*Repository, error) {
- options := InitOptions{
- DefaultBranch: plumbing.Master,
- }
- return InitWithOptions(s, worktree, options)
-}
-
-func InitWithOptions(s storage.Storer, worktree billy.Filesystem, options InitOptions) (*Repository, error) {
- if err := initStorer(s); err != nil {
- return nil, err
- }
-
- if options.DefaultBranch == "" {
- options.DefaultBranch = plumbing.Master
- }
-
- if err := options.DefaultBranch.Validate(); err != nil {
- return nil, err
- }
-
- r := newRepository(s, worktree)
- _, err := r.Reference(plumbing.HEAD, false)
- switch err {
- case plumbing.ErrReferenceNotFound:
- case nil:
- return nil, ErrRepositoryAlreadyExists
- default:
- return nil, err
- }
-
- h := plumbing.NewSymbolicReference(plumbing.HEAD, options.DefaultBranch)
- if err := s.SetReference(h); err != nil {
- return nil, err
- }
-
- if worktree == nil {
- _ = r.setIsBare(true)
- return r, nil
- }
-
- return r, setWorktreeAndStoragePaths(r, worktree)
-}
-
-func initStorer(s storer.Storer) error {
- i, ok := s.(storer.Initializer)
- if !ok {
- return nil
- }
-
- return i.Init()
-}
-
-func setWorktreeAndStoragePaths(r *Repository, worktree billy.Filesystem) error {
- type fsBased interface {
- Filesystem() billy.Filesystem
- }
-
- // .git file is only created if the storage is file based and the file
- // system is osfs.OS
- fs, isFSBased := r.Storer.(fsBased)
- if !isFSBased {
- return nil
- }
-
- if err := createDotGitFile(worktree, fs.Filesystem()); err != nil {
- return err
- }
-
- return setConfigWorktree(r, worktree, fs.Filesystem())
-}
-
-func createDotGitFile(worktree, storage billy.Filesystem) error {
- path, err := filepath.Rel(worktree.Root(), storage.Root())
- if err != nil {
- path = storage.Root()
- }
-
- if path == GitDirName {
- // not needed, since the folder is the default place
- return nil
- }
-
- f, err := worktree.Create(GitDirName)
- if err != nil {
- return err
- }
-
- defer f.Close()
- _, err = fmt.Fprintf(f, "gitdir: %s\n", path)
- return err
-}
-
-func setConfigWorktree(r *Repository, worktree, storage billy.Filesystem) error {
- path, err := filepath.Rel(storage.Root(), worktree.Root())
- if err != nil {
- path = worktree.Root()
- }
-
- if path == ".." {
- // not needed, since the folder is the default place
- return nil
- }
-
- cfg, err := r.Config()
- if err != nil {
- return err
- }
-
- cfg.Core.Worktree = path
- return r.Storer.SetConfig(cfg)
-}
-
-// Open opens a git repository using the given Storer and worktree filesystem,
-// if the given storer is complete empty ErrRepositoryNotExists is returned.
-// The worktree can be nil when the repository being opened is bare, if the
-// repository is a normal one (not bare) and worktree is nil the err
-// ErrWorktreeNotProvided is returned
-func Open(s storage.Storer, worktree billy.Filesystem) (*Repository, error) {
- _, err := s.Reference(plumbing.HEAD)
- if err == plumbing.ErrReferenceNotFound {
- return nil, ErrRepositoryNotExists
- }
-
- if err != nil {
- return nil, err
- }
-
- return newRepository(s, worktree), nil
-}
-
-// Clone a repository into the given Storer and worktree Filesystem with the
-// given options, if worktree is nil a bare repository is created. If the given
-// storer is not empty ErrRepositoryAlreadyExists is returned.
-func Clone(s storage.Storer, worktree billy.Filesystem, o *CloneOptions) (*Repository, error) {
- return CloneContext(context.Background(), s, worktree, o)
-}
-
-// CloneContext a repository into the given Storer and worktree Filesystem with
-// the given options, if worktree is nil a bare repository is created. If the
-// given storer is not empty ErrRepositoryAlreadyExists is returned.
-//
-// The provided Context must be non-nil. If the context expires before the
-// operation is complete, an error is returned. The context only affects the
-// transport operations.
-func CloneContext(
- ctx context.Context, s storage.Storer, worktree billy.Filesystem, o *CloneOptions,
-) (*Repository, error) {
- r, err := Init(s, worktree)
- if err != nil {
- return nil, err
- }
-
- return r, r.clone(ctx, o)
-}
-
-// PlainInit create an empty git repository at the given path. isBare defines
-// if the repository will have worktree (non-bare) or not (bare), if the path
-// is not empty ErrRepositoryAlreadyExists is returned.
-func PlainInit(path string, isBare bool) (*Repository, error) {
- return PlainInitWithOptions(path, &PlainInitOptions{
- Bare: isBare,
- })
-}
-
-func PlainInitWithOptions(path string, opts *PlainInitOptions) (*Repository, error) {
- if opts == nil {
- opts = &PlainInitOptions{}
- }
-
- var wt, dot billy.Filesystem
-
- if opts.Bare {
- dot = osfs.New(path)
- } else {
- wt = osfs.New(path)
- dot, _ = wt.Chroot(GitDirName)
- }
-
- s := filesystem.NewStorage(dot, cache.NewObjectLRUDefault())
-
- r, err := InitWithOptions(s, wt, opts.InitOptions)
- if err != nil {
- return nil, err
- }
-
- cfg, err := r.Config()
- if err != nil {
- return nil, err
- }
-
- if opts.ObjectFormat != "" {
- if opts.ObjectFormat == formatcfg.SHA256 && hash.CryptoType != crypto.SHA256 {
- return nil, ErrSHA256NotSupported
- }
-
- cfg.Core.RepositoryFormatVersion = formatcfg.Version_1
- cfg.Extensions.ObjectFormat = opts.ObjectFormat
- }
-
- err = r.Storer.SetConfig(cfg)
- if err != nil {
- return nil, err
- }
-
- return r, err
-}
-
-// PlainOpen opens a git repository from the given path. It detects if the
-// repository is bare or a normal one. If the path doesn't contain a valid
-// repository ErrRepositoryNotExists is returned
-func PlainOpen(path string) (*Repository, error) {
- return PlainOpenWithOptions(path, &PlainOpenOptions{})
-}
-
-// PlainOpenWithOptions opens a git repository from the given path with specific
-// options. See PlainOpen for more info.
-func PlainOpenWithOptions(path string, o *PlainOpenOptions) (*Repository, error) {
- dot, wt, err := dotGitToOSFilesystems(path, o.DetectDotGit)
- if err != nil {
- return nil, err
- }
-
- if _, err := dot.Stat(""); err != nil {
- if os.IsNotExist(err) {
- return nil, ErrRepositoryNotExists
- }
-
- return nil, err
- }
-
- var repositoryFs billy.Filesystem
-
- if o.EnableDotGitCommonDir {
- dotGitCommon, err := dotGitCommonDirectory(dot)
- if err != nil {
- return nil, err
- }
- repositoryFs = dotgit.NewRepositoryFilesystem(dot, dotGitCommon)
- } else {
- repositoryFs = dot
- }
-
- s := filesystem.NewStorage(repositoryFs, cache.NewObjectLRUDefault())
-
- return Open(s, wt)
-}
-
-func dotGitToOSFilesystems(path string, detect bool) (dot, wt billy.Filesystem, err error) {
- path, err = path_util.ReplaceTildeWithHome(path)
- if err != nil {
- return nil, nil, err
- }
-
- if path, err = filepath.Abs(path); err != nil {
- return nil, nil, err
- }
-
- var fs billy.Filesystem
- var fi os.FileInfo
- for {
- fs = osfs.New(path)
-
- pathinfo, err := fs.Stat("/")
- if !os.IsNotExist(err) {
- if pathinfo == nil {
- return nil, nil, err
- }
- if !pathinfo.IsDir() && detect {
- fs = osfs.New(filepath.Dir(path))
- }
- }
-
- fi, err = fs.Stat(GitDirName)
- if err == nil {
- // no error; stop
- break
- }
- if !os.IsNotExist(err) {
- // unknown error; stop
- return nil, nil, err
- }
- if detect {
- // try its parent as long as we haven't reached
- // the root dir
- if dir := filepath.Dir(path); dir != path {
- path = dir
- continue
- }
- }
- // not detecting via parent dirs and the dir does not exist;
- // stop
- return fs, nil, nil
- }
-
- if fi.IsDir() {
- dot, err = fs.Chroot(GitDirName)
- return dot, fs, err
- }
-
- dot, err = dotGitFileToOSFilesystem(path, fs)
- if err != nil {
- return nil, nil, err
- }
-
- return dot, fs, nil
-}
-
-func dotGitFileToOSFilesystem(path string, fs billy.Filesystem) (bfs billy.Filesystem, err error) {
- f, err := fs.Open(GitDirName)
- if err != nil {
- return nil, err
- }
- defer ioutil.CheckClose(f, &err)
-
- b, err := io.ReadAll(f)
- if err != nil {
- return nil, err
- }
-
- line := string(b)
- const prefix = "gitdir: "
- if !strings.HasPrefix(line, prefix) {
- return nil, fmt.Errorf(".git file has no %s prefix", prefix)
- }
-
- gitdir := strings.Split(line[len(prefix):], "\n")[0]
- gitdir = strings.TrimSpace(gitdir)
- if filepath.IsAbs(gitdir) {
- return osfs.New(gitdir), nil
- }
-
- return osfs.New(fs.Join(path, gitdir)), nil
-}
-
-func dotGitCommonDirectory(fs billy.Filesystem) (commonDir billy.Filesystem, err error) {
- f, err := fs.Open("commondir")
- if os.IsNotExist(err) {
- return nil, nil
- }
- if err != nil {
- return nil, err
- }
-
- b, err := io.ReadAll(f)
- if err != nil {
- return nil, err
- }
- if len(b) > 0 {
- path := strings.TrimSpace(string(b))
- if filepath.IsAbs(path) {
- commonDir = osfs.New(path)
- } else {
- commonDir = osfs.New(filepath.Join(fs.Root(), path))
- }
- if _, err := commonDir.Stat(""); err != nil {
- if os.IsNotExist(err) {
- return nil, ErrRepositoryIncomplete
- }
-
- return nil, err
- }
- }
-
- return commonDir, nil
-}
-
-// PlainClone a repository into the path with the given options, isBare defines
-// if the new repository will be bare or normal. If the path is not empty
-// ErrRepositoryAlreadyExists is returned.
-//
-// TODO(mcuadros): move isBare to CloneOptions in v5
-func PlainClone(path string, isBare bool, o *CloneOptions) (*Repository, error) {
- return PlainCloneContext(context.Background(), path, isBare, o)
-}
-
-// PlainCloneContext a repository into the path with the given options, isBare
-// defines if the new repository will be bare or normal. If the path is not empty
-// ErrRepositoryAlreadyExists is returned.
-//
-// The provided Context must be non-nil. If the context expires before the
-// operation is complete, an error is returned. The context only affects the
-// transport operations.
-//
-// TODO(mcuadros): move isBare to CloneOptions in v5
-// TODO(smola): refuse upfront to clone on a non-empty directory in v5, see #1027
-func PlainCloneContext(ctx context.Context, path string, isBare bool, o *CloneOptions) (*Repository, error) {
- cleanup, cleanupParent, err := checkIfCleanupIsNeeded(path)
- if err != nil {
- return nil, err
- }
-
- if o.Mirror {
- isBare = true
- }
- r, err := PlainInit(path, isBare)
- if err != nil {
- return nil, err
- }
-
- err = r.clone(ctx, o)
- if err != nil && err != ErrRepositoryAlreadyExists {
- if cleanup {
- _ = cleanUpDir(path, cleanupParent)
- }
- }
-
- return r, err
-}
-
-func newRepository(s storage.Storer, worktree billy.Filesystem) *Repository {
- return &Repository{
- Storer: s,
- wt: worktree,
- r: make(map[string]*Remote),
- }
-}
-
-func checkIfCleanupIsNeeded(path string) (cleanup bool, cleanParent bool, err error) {
- fi, err := osfs.Default.Stat(path)
- if err != nil {
- if os.IsNotExist(err) {
- return true, true, nil
- }
-
- return false, false, err
- }
-
- if !fi.IsDir() {
- return false, false, fmt.Errorf("path is not a directory: %s", path)
- }
-
- files, err := osfs.Default.ReadDir(path)
- if err != nil {
- return false, false, err
- }
-
- if len(files) == 0 {
- return true, false, nil
- }
-
- return false, false, nil
-}
-
-func cleanUpDir(path string, all bool) error {
- if all {
- return util.RemoveAll(osfs.Default, path)
- }
-
- files, err := osfs.Default.ReadDir(path)
- if err != nil {
- return err
- }
-
- for _, fi := range files {
- if err := util.RemoveAll(osfs.Default, osfs.Default.Join(path, fi.Name())); err != nil {
- return err
- }
- }
-
- return err
-}
-
-// Config return the repository config. In a filesystem backed repository this
-// means read the `.git/config`.
-func (r *Repository) Config() (*config.Config, error) {
- return r.Storer.Config()
-}
-
-// SetConfig marshall and writes the repository config. In a filesystem backed
-// repository this means write the `.git/config`. This function should be called
-// with the result of `Repository.Config` and never with the output of
-// `Repository.ConfigScoped`.
-func (r *Repository) SetConfig(cfg *config.Config) error {
- return r.Storer.SetConfig(cfg)
-}
-
-// ConfigScoped returns the repository config, merged with requested scope and
-// lower. For example if, config.GlobalScope is given the local and global config
-// are returned merged in one config value.
-func (r *Repository) ConfigScoped(scope config.Scope) (*config.Config, error) {
- // TODO(mcuadros): v6, add this as ConfigOptions.Scoped
-
- var err error
- system := config.NewConfig()
- if scope >= config.SystemScope {
- system, err = config.LoadConfig(config.SystemScope)
- if err != nil {
- return nil, err
- }
- }
-
- global := config.NewConfig()
- if scope >= config.GlobalScope {
- global, err = config.LoadConfig(config.GlobalScope)
- if err != nil {
- return nil, err
- }
- }
-
- local, err := r.Storer.Config()
- if err != nil {
- return nil, err
- }
-
- _ = mergo.Merge(global, system)
- _ = mergo.Merge(local, global)
- return local, nil
-}
-
-// Remote return a remote if exists
-func (r *Repository) Remote(name string) (*Remote, error) {
- cfg, err := r.Config()
- if err != nil {
- return nil, err
- }
-
- c, ok := cfg.Remotes[name]
- if !ok {
- return nil, ErrRemoteNotFound
- }
-
- return NewRemote(r.Storer, c), nil
-}
-
-// Remotes returns a list with all the remotes
-func (r *Repository) Remotes() ([]*Remote, error) {
- cfg, err := r.Config()
- if err != nil {
- return nil, err
- }
-
- remotes := make([]*Remote, len(cfg.Remotes))
-
- var i int
- for _, c := range cfg.Remotes {
- remotes[i] = NewRemote(r.Storer, c)
- i++
- }
-
- return remotes, nil
-}
-
-// CreateRemote creates a new remote
-func (r *Repository) CreateRemote(c *config.RemoteConfig) (*Remote, error) {
- if err := c.Validate(); err != nil {
- return nil, err
- }
-
- remote := NewRemote(r.Storer, c)
-
- cfg, err := r.Config()
- if err != nil {
- return nil, err
- }
-
- if _, ok := cfg.Remotes[c.Name]; ok {
- return nil, ErrRemoteExists
- }
-
- cfg.Remotes[c.Name] = c
- return remote, r.Storer.SetConfig(cfg)
-}
-
-// CreateRemoteAnonymous creates a new anonymous remote. c.Name must be "anonymous".
-// It's used like 'git fetch git@github.com:src-d/go-git.git master:master'.
-func (r *Repository) CreateRemoteAnonymous(c *config.RemoteConfig) (*Remote, error) {
- if err := c.Validate(); err != nil {
- return nil, err
- }
-
- if c.Name != "anonymous" {
- return nil, ErrAnonymousRemoteName
- }
-
- remote := NewRemote(r.Storer, c)
-
- return remote, nil
-}
-
-// DeleteRemote delete a remote from the repository and delete the config
-func (r *Repository) DeleteRemote(name string) error {
- cfg, err := r.Config()
- if err != nil {
- return err
- }
-
- if _, ok := cfg.Remotes[name]; !ok {
- return ErrRemoteNotFound
- }
-
- delete(cfg.Remotes, name)
- return r.Storer.SetConfig(cfg)
-}
-
-// Branch return a Branch if exists
-func (r *Repository) Branch(name string) (*config.Branch, error) {
- cfg, err := r.Config()
- if err != nil {
- return nil, err
- }
-
- b, ok := cfg.Branches[name]
- if !ok {
- return nil, ErrBranchNotFound
- }
-
- return b, nil
-}
-
-// CreateBranch creates a new Branch
-func (r *Repository) CreateBranch(c *config.Branch) error {
- if err := c.Validate(); err != nil {
- return err
- }
-
- cfg, err := r.Config()
- if err != nil {
- return err
- }
-
- if _, ok := cfg.Branches[c.Name]; ok {
- return ErrBranchExists
- }
-
- cfg.Branches[c.Name] = c
- return r.Storer.SetConfig(cfg)
-}
-
-// DeleteBranch delete a Branch from the repository and delete the config
-func (r *Repository) DeleteBranch(name string) error {
- cfg, err := r.Config()
- if err != nil {
- return err
- }
-
- if _, ok := cfg.Branches[name]; !ok {
- return ErrBranchNotFound
- }
-
- delete(cfg.Branches, name)
- return r.Storer.SetConfig(cfg)
-}
-
-// CreateTag creates a tag. If opts is included, the tag is an annotated tag,
-// otherwise a lightweight tag is created.
-func (r *Repository) CreateTag(name string, hash plumbing.Hash, opts *CreateTagOptions) (*plumbing.Reference, error) {
- rname := plumbing.NewTagReferenceName(name)
- if err := rname.Validate(); err != nil {
- return nil, err
- }
-
- _, err := r.Storer.Reference(rname)
- switch err {
- case nil:
- // Tag exists, this is an error
- return nil, ErrTagExists
- case plumbing.ErrReferenceNotFound:
- // Tag missing, available for creation, pass this
- default:
- // Some other error
- return nil, err
- }
-
- var target plumbing.Hash
- if opts != nil {
- target, err = r.createTagObject(name, hash, opts)
- if err != nil {
- return nil, err
- }
- } else {
- target = hash
- }
-
- ref := plumbing.NewHashReference(rname, target)
- if err = r.Storer.SetReference(ref); err != nil {
- return nil, err
- }
-
- return ref, nil
-}
-
-func (r *Repository) createTagObject(name string, hash plumbing.Hash, opts *CreateTagOptions) (plumbing.Hash, error) {
- if err := opts.Validate(r, hash); err != nil {
- return plumbing.ZeroHash, err
- }
-
- rawobj, err := object.GetObject(r.Storer, hash)
- if err != nil {
- return plumbing.ZeroHash, err
- }
-
- tag := &object.Tag{
- Name: name,
- Tagger: *opts.Tagger,
- Message: opts.Message,
- TargetType: rawobj.Type(),
- Target: hash,
- }
-
- if opts.SignKey != nil {
- sig, err := r.buildTagSignature(tag, opts.SignKey)
- if err != nil {
- return plumbing.ZeroHash, err
- }
-
- tag.PGPSignature = sig
- }
-
- obj := r.Storer.NewEncodedObject()
- if err := tag.Encode(obj); err != nil {
- return plumbing.ZeroHash, err
- }
-
- return r.Storer.SetEncodedObject(obj)
-}
-
-func (r *Repository) buildTagSignature(tag *object.Tag, signKey *openpgp.Entity) (string, error) {
- encoded := &plumbing.MemoryObject{}
- if err := tag.Encode(encoded); err != nil {
- return "", err
- }
-
- rdr, err := encoded.Reader()
- if err != nil {
- return "", err
- }
-
- var b bytes.Buffer
- if err := openpgp.ArmoredDetachSign(&b, signKey, rdr, nil); err != nil {
- return "", err
- }
-
- return b.String(), nil
-}
-
-// Tag returns a tag from the repository.
-//
-// If you want to check to see if the tag is an annotated tag, you can call
-// TagObject on the hash of the reference in ForEach:
-//
-// ref, err := r.Tag("v0.1.0")
-// if err != nil {
-// // Handle error
-// }
-//
-// obj, err := r.TagObject(ref.Hash())
-// switch err {
-// case nil:
-// // Tag object present
-// case plumbing.ErrObjectNotFound:
-// // Not a tag object
-// default:
-// // Some other error
-// }
-func (r *Repository) Tag(name string) (*plumbing.Reference, error) {
- ref, err := r.Reference(plumbing.ReferenceName(path.Join("refs", "tags", name)), false)
- if err != nil {
- if err == plumbing.ErrReferenceNotFound {
- // Return a friendly error for this one, versus just ReferenceNotFound.
- return nil, ErrTagNotFound
- }
-
- return nil, err
- }
-
- return ref, nil
-}
-
-// DeleteTag deletes a tag from the repository.
-func (r *Repository) DeleteTag(name string) error {
- _, err := r.Tag(name)
- if err != nil {
- return err
- }
-
- return r.Storer.RemoveReference(plumbing.ReferenceName(path.Join("refs", "tags", name)))
-}
-
-func (r *Repository) resolveToCommitHash(h plumbing.Hash) (plumbing.Hash, error) {
- obj, err := r.Storer.EncodedObject(plumbing.AnyObject, h)
- if err != nil {
- return plumbing.ZeroHash, err
- }
- switch obj.Type() {
- case plumbing.TagObject:
- t, err := object.DecodeTag(r.Storer, obj)
- if err != nil {
- return plumbing.ZeroHash, err
- }
- return r.resolveToCommitHash(t.Target)
- case plumbing.CommitObject:
- return h, nil
- default:
- return plumbing.ZeroHash, ErrUnableToResolveCommit
- }
-}
-
-// Clone clones a remote repository
-func (r *Repository) clone(ctx context.Context, o *CloneOptions) error {
- if err := o.Validate(); err != nil {
- return err
- }
-
- c := &config.RemoteConfig{
- Name: o.RemoteName,
- URLs: []string{o.URL},
- Fetch: r.cloneRefSpec(o),
- Mirror: o.Mirror,
- }
-
- if _, err := r.CreateRemote(c); err != nil {
- return err
- }
-
- // When the repository to clone is on the local machine,
- // instead of using hard links, automatically setup .git/objects/info/alternates
- // to share the objects with the source repository
- if o.Shared {
- if !url.IsLocalEndpoint(o.URL) {
- return ErrAlternatePathNotSupported
- }
- altpath := o.URL
- remoteRepo, err := PlainOpen(o.URL)
- if err != nil {
- return fmt.Errorf("failed to open remote repository: %w", err)
- }
- conf, err := remoteRepo.Config()
- if err != nil {
- return fmt.Errorf("failed to read remote repository configuration: %w", err)
- }
- if !conf.Core.IsBare {
- altpath = path.Join(altpath, GitDirName)
- }
- if err := r.Storer.AddAlternate(altpath); err != nil {
- return fmt.Errorf("failed to add alternate file to git objects dir: %w", err)
- }
- }
-
- ref, err := r.fetchAndUpdateReferences(ctx, &FetchOptions{
- RefSpecs: c.Fetch,
- Depth: o.Depth,
- Auth: o.Auth,
- Progress: o.Progress,
- Tags: o.Tags,
- RemoteName: o.RemoteName,
- InsecureSkipTLS: o.InsecureSkipTLS,
- CABundle: o.CABundle,
- ProxyOptions: o.ProxyOptions,
- }, o.ReferenceName)
- if err != nil {
- return err
- }
-
- if r.wt != nil && !o.NoCheckout {
- w, err := r.Worktree()
- if err != nil {
- return err
- }
-
- head, err := r.Head()
- if err != nil {
- return err
- }
-
- if err := w.Reset(&ResetOptions{
- Mode: MergeReset,
- Commit: head.Hash(),
- }); err != nil {
- return err
- }
-
- if o.RecurseSubmodules != NoRecurseSubmodules {
- if err := w.updateSubmodules(&SubmoduleUpdateOptions{
- RecurseSubmodules: o.RecurseSubmodules,
- Depth: func() int {
- if o.ShallowSubmodules {
- return 1
- }
- return 0
- }(),
- Auth: o.Auth,
- }); err != nil {
- return err
- }
- }
- }
-
- if err := r.updateRemoteConfigIfNeeded(o, c, ref); err != nil {
- return err
- }
-
- if !o.Mirror && ref.Name().IsBranch() {
- branchRef := ref.Name()
- branchName := strings.Split(string(branchRef), "refs/heads/")[1]
-
- b := &config.Branch{
- Name: branchName,
- Merge: branchRef,
- }
-
- if o.RemoteName == "" {
- b.Remote = "origin"
- } else {
- b.Remote = o.RemoteName
- }
-
- if err := r.CreateBranch(b); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-const (
- refspecTag = "+refs/tags/%s:refs/tags/%[1]s"
- refspecSingleBranch = "+refs/heads/%s:refs/remotes/%s/%[1]s"
- refspecSingleBranchHEAD = "+HEAD:refs/remotes/%s/HEAD"
-)
-
-func (r *Repository) cloneRefSpec(o *CloneOptions) []config.RefSpec {
- switch {
- case o.Mirror:
- return []config.RefSpec{"+refs/*:refs/*"}
- case o.ReferenceName.IsTag():
- return []config.RefSpec{
- config.RefSpec(fmt.Sprintf(refspecTag, o.ReferenceName.Short())),
- }
- case o.SingleBranch && o.ReferenceName == plumbing.HEAD:
- return []config.RefSpec{
- config.RefSpec(fmt.Sprintf(refspecSingleBranchHEAD, o.RemoteName)),
- }
- case o.SingleBranch:
- return []config.RefSpec{
- config.RefSpec(fmt.Sprintf(refspecSingleBranch, o.ReferenceName.Short(), o.RemoteName)),
- }
- default:
- return []config.RefSpec{
- config.RefSpec(fmt.Sprintf(config.DefaultFetchRefSpec, o.RemoteName)),
- }
- }
-}
-
-func (r *Repository) setIsBare(isBare bool) error {
- cfg, err := r.Config()
- if err != nil {
- return err
- }
-
- cfg.Core.IsBare = isBare
- return r.Storer.SetConfig(cfg)
-}
-
-func (r *Repository) updateRemoteConfigIfNeeded(o *CloneOptions, c *config.RemoteConfig, head *plumbing.Reference) error {
- if !o.SingleBranch {
- return nil
- }
-
- c.Fetch = r.cloneRefSpec(o)
-
- cfg, err := r.Config()
- if err != nil {
- return err
- }
-
- cfg.Remotes[c.Name] = c
- return r.Storer.SetConfig(cfg)
-}
-
-func (r *Repository) fetchAndUpdateReferences(
- ctx context.Context, o *FetchOptions, ref plumbing.ReferenceName,
-) (*plumbing.Reference, error) {
-
- if err := o.Validate(); err != nil {
- return nil, err
- }
-
- remote, err := r.Remote(o.RemoteName)
- if err != nil {
- return nil, err
- }
-
- objsUpdated := true
- remoteRefs, err := remote.fetch(ctx, o)
- if err == NoErrAlreadyUpToDate {
- objsUpdated = false
- } else if err == packfile.ErrEmptyPackfile {
- return nil, ErrFetching
- } else if err != nil {
- return nil, err
- }
-
- resolvedRef, err := expand_ref(remoteRefs, ref)
- if err != nil {
- return nil, err
- }
-
- refsUpdated, err := r.updateReferences(remote.c.Fetch, resolvedRef)
- if err != nil {
- return nil, err
- }
-
- if !objsUpdated && !refsUpdated {
- return nil, NoErrAlreadyUpToDate
- }
-
- return resolvedRef, nil
-}
-
-func (r *Repository) updateReferences(spec []config.RefSpec,
- resolvedRef *plumbing.Reference) (updated bool, err error) {
-
- if !resolvedRef.Name().IsBranch() {
- // Detached HEAD mode
- h, err := r.resolveToCommitHash(resolvedRef.Hash())
- if err != nil {
- return false, err
- }
- head := plumbing.NewHashReference(plumbing.HEAD, h)
- return updateReferenceStorerIfNeeded(r.Storer, head)
- }
-
- refs := []*plumbing.Reference{
- // Create local reference for the resolved ref
- resolvedRef,
- // Create local symbolic HEAD
- plumbing.NewSymbolicReference(plumbing.HEAD, resolvedRef.Name()),
- }
-
- refs = append(refs, r.calculateRemoteHeadReference(spec, resolvedRef)...)
-
- for _, ref := range refs {
- u, err := updateReferenceStorerIfNeeded(r.Storer, ref)
- if err != nil {
- return updated, err
- }
-
- if u {
- updated = true
- }
- }
-
- return
-}
-
-func (r *Repository) calculateRemoteHeadReference(spec []config.RefSpec,
- resolvedHead *plumbing.Reference) []*plumbing.Reference {
-
- var refs []*plumbing.Reference
-
- // Create resolved HEAD reference with remote prefix if it does not
- // exist. This is needed when using single branch and HEAD.
- for _, rs := range spec {
- name := resolvedHead.Name()
- if !rs.Match(name) {
- continue
- }
-
- name = rs.Dst(name)
- _, err := r.Storer.Reference(name)
- if err == plumbing.ErrReferenceNotFound {
- refs = append(refs, plumbing.NewHashReference(name, resolvedHead.Hash()))
- }
- }
-
- return refs
-}
-
-func checkAndUpdateReferenceStorerIfNeeded(
- s storer.ReferenceStorer, r, old *plumbing.Reference) (
- updated bool, err error) {
- p, err := s.Reference(r.Name())
- if err != nil && err != plumbing.ErrReferenceNotFound {
- return false, err
- }
-
- // we use the string method to compare references, is the easiest way
- if err == plumbing.ErrReferenceNotFound || r.String() != p.String() {
- if err := s.CheckAndSetReference(r, old); err != nil {
- return false, err
- }
-
- return true, nil
- }
-
- return false, nil
-}
-
-func updateReferenceStorerIfNeeded(
- s storer.ReferenceStorer, r *plumbing.Reference) (updated bool, err error) {
- return checkAndUpdateReferenceStorerIfNeeded(s, r, nil)
-}
-
-// Fetch fetches references along with the objects necessary to complete
-// their histories, from the remote named as FetchOptions.RemoteName.
-//
-// Returns nil if the operation is successful, NoErrAlreadyUpToDate if there are
-// no changes to be fetched, or an error.
-func (r *Repository) Fetch(o *FetchOptions) error {
- return r.FetchContext(context.Background(), o)
-}
-
-// FetchContext fetches references along with the objects necessary to complete
-// their histories, from the remote named as FetchOptions.RemoteName.
-//
-// Returns nil if the operation is successful, NoErrAlreadyUpToDate if there are
-// no changes to be fetched, or an error.
-//
-// The provided Context must be non-nil. If the context expires before the
-// operation is complete, an error is returned. The context only affects the
-// transport operations.
-func (r *Repository) FetchContext(ctx context.Context, o *FetchOptions) error {
- if err := o.Validate(); err != nil {
- return err
- }
-
- remote, err := r.Remote(o.RemoteName)
- if err != nil {
- return err
- }
-
- return remote.FetchContext(ctx, o)
-}
-
-// Push performs a push to the remote. Returns NoErrAlreadyUpToDate if
-// the remote was already up-to-date, from the remote named as
-// FetchOptions.RemoteName.
-func (r *Repository) Push(o *PushOptions) error {
- return r.PushContext(context.Background(), o)
-}
-
-// PushContext performs a push to the remote. Returns NoErrAlreadyUpToDate if
-// the remote was already up-to-date, from the remote named as
-// FetchOptions.RemoteName.
-//
-// The provided Context must be non-nil. If the context expires before the
-// operation is complete, an error is returned. The context only affects the
-// transport operations.
-func (r *Repository) PushContext(ctx context.Context, o *PushOptions) error {
- if err := o.Validate(); err != nil {
- return err
- }
-
- remote, err := r.Remote(o.RemoteName)
- if err != nil {
- return err
- }
-
- return remote.PushContext(ctx, o)
-}
-
-// Log returns the commit history from the given LogOptions.
-func (r *Repository) Log(o *LogOptions) (object.CommitIter, error) {
- fn := commitIterFunc(o.Order)
- if fn == nil {
- return nil, fmt.Errorf("invalid Order=%v", o.Order)
- }
-
- var (
- it object.CommitIter
- err error
- )
- if o.All {
- it, err = r.logAll(fn)
- } else {
- it, err = r.log(o.From, fn)
- }
-
- if err != nil {
- return nil, err
- }
-
- if o.FileName != nil {
- // for `git log --all` also check parent (if the next commit comes from the real parent)
- it = r.logWithFile(*o.FileName, it, o.All)
- }
- if o.PathFilter != nil {
- it = r.logWithPathFilter(o.PathFilter, it, o.All)
- }
-
- if o.Since != nil || o.Until != nil {
- limitOptions := object.LogLimitOptions{Since: o.Since, Until: o.Until}
- it = r.logWithLimit(it, limitOptions)
- }
-
- return it, nil
-}
-
-func (r *Repository) log(from plumbing.Hash, commitIterFunc func(*object.Commit) object.CommitIter) (object.CommitIter, error) {
- h := from
- if from == plumbing.ZeroHash {
- head, err := r.Head()
- if err != nil {
- return nil, err
- }
-
- h = head.Hash()
- }
-
- commit, err := r.CommitObject(h)
- if err != nil {
- return nil, err
- }
- return commitIterFunc(commit), nil
-}
-
-func (r *Repository) logAll(commitIterFunc func(*object.Commit) object.CommitIter) (object.CommitIter, error) {
- return object.NewCommitAllIter(r.Storer, commitIterFunc)
-}
-
-func (*Repository) logWithFile(fileName string, commitIter object.CommitIter, checkParent bool) object.CommitIter {
- return object.NewCommitPathIterFromIter(
- func(path string) bool {
- return path == fileName
- },
- commitIter,
- checkParent,
- )
-}
-
-func (*Repository) logWithPathFilter(pathFilter func(string) bool, commitIter object.CommitIter, checkParent bool) object.CommitIter {
- return object.NewCommitPathIterFromIter(
- pathFilter,
- commitIter,
- checkParent,
- )
-}
-
-func (*Repository) logWithLimit(commitIter object.CommitIter, limitOptions object.LogLimitOptions) object.CommitIter {
- return object.NewCommitLimitIterFromIter(commitIter, limitOptions)
-}
-
-func commitIterFunc(order LogOrder) func(c *object.Commit) object.CommitIter {
- switch order {
- case LogOrderDefault:
- return func(c *object.Commit) object.CommitIter {
- return object.NewCommitPreorderIter(c, nil, nil)
- }
- case LogOrderDFS:
- return func(c *object.Commit) object.CommitIter {
- return object.NewCommitPreorderIter(c, nil, nil)
- }
- case LogOrderDFSPost:
- return func(c *object.Commit) object.CommitIter {
- return object.NewCommitPostorderIter(c, nil)
- }
- case LogOrderBSF:
- return func(c *object.Commit) object.CommitIter {
- return object.NewCommitIterBSF(c, nil, nil)
- }
- case LogOrderCommitterTime:
- return func(c *object.Commit) object.CommitIter {
- return object.NewCommitIterCTime(c, nil, nil)
- }
- }
- return nil
-}
-
-// Tags returns all the tag References in a repository.
-//
-// If you want to check to see if the tag is an annotated tag, you can call
-// TagObject on the hash Reference passed in through ForEach:
-//
-// iter, err := r.Tags()
-// if err != nil {
-// // Handle error
-// }
-//
-// if err := iter.ForEach(func (ref *plumbing.Reference) error {
-// obj, err := r.TagObject(ref.Hash())
-// switch err {
-// case nil:
-// // Tag object present
-// case plumbing.ErrObjectNotFound:
-// // Not a tag object
-// default:
-// // Some other error
-// return err
-// }
-// }); err != nil {
-// // Handle outer iterator error
-// }
-func (r *Repository) Tags() (storer.ReferenceIter, error) {
- refIter, err := r.Storer.IterReferences()
- if err != nil {
- return nil, err
- }
-
- return storer.NewReferenceFilteredIter(
- func(r *plumbing.Reference) bool {
- return r.Name().IsTag()
- }, refIter), nil
-}
-
-// Branches returns all the References that are Branches.
-func (r *Repository) Branches() (storer.ReferenceIter, error) {
- refIter, err := r.Storer.IterReferences()
- if err != nil {
- return nil, err
- }
-
- return storer.NewReferenceFilteredIter(
- func(r *plumbing.Reference) bool {
- return r.Name().IsBranch()
- }, refIter), nil
-}
-
-// Notes returns all the References that are notes. For more information:
-// https://git-scm.com/docs/git-notes
-func (r *Repository) Notes() (storer.ReferenceIter, error) {
- refIter, err := r.Storer.IterReferences()
- if err != nil {
- return nil, err
- }
-
- return storer.NewReferenceFilteredIter(
- func(r *plumbing.Reference) bool {
- return r.Name().IsNote()
- }, refIter), nil
-}
-
-// TreeObject return a Tree with the given hash. If not found
-// plumbing.ErrObjectNotFound is returned
-func (r *Repository) TreeObject(h plumbing.Hash) (*object.Tree, error) {
- return object.GetTree(r.Storer, h)
-}
-
-// TreeObjects returns an unsorted TreeIter with all the trees in the repository
-func (r *Repository) TreeObjects() (*object.TreeIter, error) {
- iter, err := r.Storer.IterEncodedObjects(plumbing.TreeObject)
- if err != nil {
- return nil, err
- }
-
- return object.NewTreeIter(r.Storer, iter), nil
-}
-
-// CommitObject return a Commit with the given hash. If not found
-// plumbing.ErrObjectNotFound is returned.
-func (r *Repository) CommitObject(h plumbing.Hash) (*object.Commit, error) {
- return object.GetCommit(r.Storer, h)
-}
-
-// CommitObjects returns an unsorted CommitIter with all the commits in the repository.
-func (r *Repository) CommitObjects() (object.CommitIter, error) {
- iter, err := r.Storer.IterEncodedObjects(plumbing.CommitObject)
- if err != nil {
- return nil, err
- }
-
- return object.NewCommitIter(r.Storer, iter), nil
-}
-
-// BlobObject returns a Blob with the given hash. If not found
-// plumbing.ErrObjectNotFound is returned.
-func (r *Repository) BlobObject(h plumbing.Hash) (*object.Blob, error) {
- return object.GetBlob(r.Storer, h)
-}
-
-// BlobObjects returns an unsorted BlobIter with all the blobs in the repository.
-func (r *Repository) BlobObjects() (*object.BlobIter, error) {
- iter, err := r.Storer.IterEncodedObjects(plumbing.BlobObject)
- if err != nil {
- return nil, err
- }
-
- return object.NewBlobIter(r.Storer, iter), nil
-}
-
-// TagObject returns a Tag with the given hash. If not found
-// plumbing.ErrObjectNotFound is returned. This method only returns
-// annotated Tags, no lightweight Tags.
-func (r *Repository) TagObject(h plumbing.Hash) (*object.Tag, error) {
- return object.GetTag(r.Storer, h)
-}
-
-// TagObjects returns a unsorted TagIter that can step through all of the annotated
-// tags in the repository.
-func (r *Repository) TagObjects() (*object.TagIter, error) {
- iter, err := r.Storer.IterEncodedObjects(plumbing.TagObject)
- if err != nil {
- return nil, err
- }
-
- return object.NewTagIter(r.Storer, iter), nil
-}
-
-// Object returns an Object with the given hash. If not found
-// plumbing.ErrObjectNotFound is returned.
-func (r *Repository) Object(t plumbing.ObjectType, h plumbing.Hash) (object.Object, error) {
- obj, err := r.Storer.EncodedObject(t, h)
- if err != nil {
- return nil, err
- }
-
- return object.DecodeObject(r.Storer, obj)
-}
-
-// Objects returns an unsorted ObjectIter with all the objects in the repository.
-func (r *Repository) Objects() (*object.ObjectIter, error) {
- iter, err := r.Storer.IterEncodedObjects(plumbing.AnyObject)
- if err != nil {
- return nil, err
- }
-
- return object.NewObjectIter(r.Storer, iter), nil
-}
-
-// Head returns the reference where HEAD is pointing to.
-func (r *Repository) Head() (*plumbing.Reference, error) {
- return storer.ResolveReference(r.Storer, plumbing.HEAD)
-}
-
-// Reference returns the reference for a given reference name. If resolved is
-// true, any symbolic reference will be resolved.
-func (r *Repository) Reference(name plumbing.ReferenceName, resolved bool) (
- *plumbing.Reference, error) {
-
- if resolved {
- return storer.ResolveReference(r.Storer, name)
- }
-
- return r.Storer.Reference(name)
-}
-
-// References returns an unsorted ReferenceIter for all references.
-func (r *Repository) References() (storer.ReferenceIter, error) {
- return r.Storer.IterReferences()
-}
-
-// Worktree returns a worktree based on the given fs, if nil the default
-// worktree will be used.
-func (r *Repository) Worktree() (*Worktree, error) {
- if r.wt == nil {
- return nil, ErrIsBareRepository
- }
-
- return &Worktree{r: r, Filesystem: r.wt}, nil
-}
-
-func expand_ref(s storer.ReferenceStorer, ref plumbing.ReferenceName) (*plumbing.Reference, error) {
- // For improving troubleshooting, this preserves the error for the provided `ref`,
- // and returns the error for that specific ref in case all parse rules fails.
- var ret error
- for _, rule := range plumbing.RefRevParseRules {
- resolvedRef, err := storer.ResolveReference(s, plumbing.ReferenceName(fmt.Sprintf(rule, ref)))
-
- if err == nil {
- return resolvedRef, nil
- } else if ret == nil {
- ret = err
- }
- }
-
- return nil, ret
-}
-
-// ResolveRevision resolves revision to corresponding hash. It will always
-// resolve to a commit hash, not a tree or annotated tag.
-//
-// Implemented resolvers : HEAD, branch, tag, heads/branch, refs/heads/branch,
-// refs/tags/tag, refs/remotes/origin/branch, refs/remotes/origin/HEAD, tilde and caret (HEAD~1, master~^, tag~2, ref/heads/master~1, ...), selection by text (HEAD^{/fix nasty bug}), hash (prefix and full)
-func (r *Repository) ResolveRevision(in plumbing.Revision) (*plumbing.Hash, error) {
- rev := in.String()
- if rev == "" {
- return &plumbing.ZeroHash, plumbing.ErrReferenceNotFound
- }
-
- p := revision.NewParserFromString(rev)
- items, err := p.Parse()
-
- if err != nil {
- return nil, err
- }
-
- var commit *object.Commit
-
- for _, item := range items {
- switch item := item.(type) {
- case revision.Ref:
- revisionRef := item
-
- var tryHashes []plumbing.Hash
-
- tryHashes = append(tryHashes, r.resolveHashPrefix(string(revisionRef))...)
-
- ref, err := expand_ref(r.Storer, plumbing.ReferenceName(revisionRef))
- if err == nil {
- tryHashes = append(tryHashes, ref.Hash())
- }
-
- // in ambiguous cases, `git rev-parse` will emit a warning, but
- // will always return the oid in preference to a ref; we don't have
- // the ability to emit a warning here, so (for speed purposes)
- // don't bother to detect the ambiguity either, just return in the
- // priority that git would.
- gotOne := false
- for _, hash := range tryHashes {
- commitObj, err := r.CommitObject(hash)
- if err == nil {
- commit = commitObj
- gotOne = true
- break
- }
-
- tagObj, err := r.TagObject(hash)
- if err == nil {
- // If the tag target lookup fails here, this most likely
- // represents some sort of repo corruption, so let the
- // error bubble up.
- tagCommit, err := tagObj.Commit()
- if err != nil {
- return &plumbing.ZeroHash, err
- }
- commit = tagCommit
- gotOne = true
- break
- }
- }
-
- if !gotOne {
- return &plumbing.ZeroHash, plumbing.ErrReferenceNotFound
- }
-
- case revision.CaretPath:
- depth := item.Depth
-
- if depth == 0 {
- break
- }
-
- iter := commit.Parents()
-
- c, err := iter.Next()
-
- if err != nil {
- return &plumbing.ZeroHash, err
- }
-
- if depth == 1 {
- commit = c
-
- break
- }
-
- c, err = iter.Next()
-
- if err != nil {
- return &plumbing.ZeroHash, err
- }
-
- commit = c
- case revision.TildePath:
- for i := 0; i < item.Depth; i++ {
- c, err := commit.Parents().Next()
-
- if err != nil {
- return &plumbing.ZeroHash, err
- }
-
- commit = c
- }
- case revision.CaretReg:
- history := object.NewCommitPreorderIter(commit, nil, nil)
-
- re := item.Regexp
- negate := item.Negate
-
- var c *object.Commit
-
- err := history.ForEach(func(hc *object.Commit) error {
- if !negate && re.MatchString(hc.Message) {
- c = hc
- return storer.ErrStop
- }
-
- if negate && !re.MatchString(hc.Message) {
- c = hc
- return storer.ErrStop
- }
-
- return nil
- })
- if err != nil {
- return &plumbing.ZeroHash, err
- }
-
- if c == nil {
- return &plumbing.ZeroHash, fmt.Errorf("no commit message match regexp: %q", re.String())
- }
-
- commit = c
- }
- }
-
- if commit == nil {
- return &plumbing.ZeroHash, plumbing.ErrReferenceNotFound
- }
-
- return &commit.Hash, nil
-}
-
-// resolveHashPrefix returns a list of potential hashes that the given string
-// is a prefix of. It quietly swallows errors, returning nil.
-func (r *Repository) resolveHashPrefix(hashStr string) []plumbing.Hash {
- // Handle complete and partial hashes.
- // plumbing.NewHash forces args into a full 20 byte hash, which isn't suitable
- // for partial hashes since they will become zero-filled.
-
- if hashStr == "" {
- return nil
- }
- if len(hashStr) == len(plumbing.ZeroHash)*2 {
- // Only a full hash is possible.
- hexb, err := hex.DecodeString(hashStr)
- if err != nil {
- return nil
- }
- var h plumbing.Hash
- copy(h[:], hexb)
- return []plumbing.Hash{h}
- }
-
- // Partial hash.
- // hex.DecodeString only decodes to complete bytes, so only works with pairs of hex digits.
- evenHex := hashStr[:len(hashStr)&^1]
- hexb, err := hex.DecodeString(evenHex)
- if err != nil {
- return nil
- }
- candidates := expandPartialHash(r.Storer, hexb)
- if len(evenHex) == len(hashStr) {
- // The prefix was an exact number of bytes.
- return candidates
- }
- // Do another prefix check to ensure the dangling nybble is correct.
- var hashes []plumbing.Hash
- for _, h := range candidates {
- if strings.HasPrefix(h.String(), hashStr) {
- hashes = append(hashes, h)
- }
- }
- return hashes
-}
-
-type RepackConfig struct {
- // UseRefDeltas configures whether packfile encoder will use reference deltas.
- // By default OFSDeltaObject is used.
- UseRefDeltas bool
- // OnlyDeletePacksOlderThan if set to non-zero value
- // selects only objects older than the time provided.
- OnlyDeletePacksOlderThan time.Time
-}
-
-func (r *Repository) RepackObjects(cfg *RepackConfig) (err error) {
- pos, ok := r.Storer.(storer.PackedObjectStorer)
- if !ok {
- return ErrPackedObjectsNotSupported
- }
-
- // Get the existing object packs.
- hs, err := pos.ObjectPacks()
- if err != nil {
- return err
- }
-
- // Create a new pack.
- nh, err := r.createNewObjectPack(cfg)
- if err != nil {
- return err
- }
-
- // Delete old packs.
- for _, h := range hs {
- // Skip if new hash is the same as an old one.
- if h == nh {
- continue
- }
- err = pos.DeleteOldObjectPackAndIndex(h, cfg.OnlyDeletePacksOlderThan)
- if err != nil {
- return err
- }
- }
-
- return nil
-}
-
-// Merge merges the reference branch into the current branch.
-//
-// If the merge is not possible (or supported) returns an error without changing
-// the HEAD for the current branch. Possible errors include:
-// - The merge strategy is not supported.
-// - The specific strategy cannot be used (e.g. using FastForwardMerge when one is not possible).
-func (r *Repository) Merge(ref plumbing.Reference, opts MergeOptions) error {
- if opts.Strategy != FastForwardMerge {
- return ErrUnsupportedMergeStrategy
- }
-
- // Ignore error as not having a shallow list is optional here.
- shallowList, _ := r.Storer.Shallow()
- var earliestShallow *plumbing.Hash
- if len(shallowList) > 0 {
- earliestShallow = &shallowList[0]
- }
-
- head, err := r.Head()
- if err != nil {
- return err
- }
-
- ff, err := isFastForward(r.Storer, head.Hash(), ref.Hash(), earliestShallow)
- if err != nil {
- return err
- }
-
- if !ff {
- return ErrFastForwardMergeNotPossible
- }
-
- return r.Storer.SetReference(plumbing.NewHashReference(head.Name(), ref.Hash()))
-}
-
-// createNewObjectPack is a helper for RepackObjects taking care
-// of creating a new pack. It is used so the PackfileWriter
-// deferred close has the right scope.
-func (r *Repository) createNewObjectPack(cfg *RepackConfig) (h plumbing.Hash, err error) {
- ow := newObjectWalker(r.Storer)
- err = ow.walkAllRefs()
- if err != nil {
- return h, err
- }
- objs := make([]plumbing.Hash, 0, len(ow.seen))
- for h := range ow.seen {
- objs = append(objs, h)
- }
- pfw, ok := r.Storer.(storer.PackfileWriter)
- if !ok {
- return h, fmt.Errorf("Repository storer is not a storer.PackfileWriter")
- }
- wc, err := pfw.PackfileWriter()
- if err != nil {
- return h, err
- }
- defer ioutil.CheckClose(wc, &err)
- scfg, err := r.Config()
- if err != nil {
- return h, err
- }
- enc := packfile.NewEncoder(wc, r.Storer, cfg.UseRefDeltas)
- h, err = enc.Encode(objs, scfg.Pack.Window)
- if err != nil {
- return h, err
- }
-
- // Delete the packed, loose objects.
- if los, ok := r.Storer.(storer.LooseObjectStorer); ok {
- err = los.ForEachObjectHash(func(hash plumbing.Hash) error {
- if ow.isSeen(hash) {
- err = los.DeleteLooseObject(hash)
- if err != nil {
- return err
- }
- }
- return nil
- })
- if err != nil {
- return h, err
- }
- }
-
- return h, err
-}
-
-func expandPartialHash(st storer.EncodedObjectStorer, prefix []byte) (hashes []plumbing.Hash) {
- // The fast version is implemented by storage/filesystem.ObjectStorage.
- type fastIter interface {
- HashesWithPrefix(prefix []byte) ([]plumbing.Hash, error)
- }
- if fi, ok := st.(fastIter); ok {
- h, err := fi.HashesWithPrefix(prefix)
- if err != nil {
- return nil
- }
- return h
- }
-
- // Slow path.
- iter, err := st.IterEncodedObjects(plumbing.AnyObject)
- if err != nil {
- return nil
- }
- iter.ForEach(func(obj plumbing.EncodedObject) error {
- h := obj.Hash()
- if bytes.HasPrefix(h[:], prefix) {
- hashes = append(hashes, h)
- }
- return nil
- })
- return
-}
diff --git a/vendor/github.com/go-git/go-git/v5/signer.go b/vendor/github.com/go-git/go-git/v5/signer.go
deleted file mode 100644
index e3ef7ebd31d..00000000000
--- a/vendor/github.com/go-git/go-git/v5/signer.go
+++ /dev/null
@@ -1,33 +0,0 @@
-package git
-
-import (
- "io"
-
- "github.com/go-git/go-git/v5/plumbing"
-)
-
-// signableObject is an object which can be signed.
-type signableObject interface {
- EncodeWithoutSignature(o plumbing.EncodedObject) error
-}
-
-// Signer is an interface for signing git objects.
-// message is a reader containing the encoded object to be signed.
-// Implementors should return the encoded signature and an error if any.
-// See https://git-scm.com/docs/gitformat-signature for more information.
-type Signer interface {
- Sign(message io.Reader) ([]byte, error)
-}
-
-func signObject(signer Signer, obj signableObject) ([]byte, error) {
- encoded := &plumbing.MemoryObject{}
- if err := obj.EncodeWithoutSignature(encoded); err != nil {
- return nil, err
- }
- r, err := encoded.Reader()
- if err != nil {
- return nil, err
- }
-
- return signer.Sign(r)
-}
diff --git a/vendor/github.com/go-git/go-git/v5/status.go b/vendor/github.com/go-git/go-git/v5/status.go
deleted file mode 100644
index 7f18e02278b..00000000000
--- a/vendor/github.com/go-git/go-git/v5/status.go
+++ /dev/null
@@ -1,79 +0,0 @@
-package git
-
-import (
- "bytes"
- "fmt"
- "path/filepath"
-)
-
-// Status represents the current status of a Worktree.
-// The key of the map is the path of the file.
-type Status map[string]*FileStatus
-
-// File returns the FileStatus for a given path, if the FileStatus doesn't
-// exists a new FileStatus is added to the map using the path as key.
-func (s Status) File(path string) *FileStatus {
- if _, ok := (s)[path]; !ok {
- s[path] = &FileStatus{Worktree: Untracked, Staging: Untracked}
- }
-
- return s[path]
-}
-
-// IsUntracked checks if file for given path is 'Untracked'
-func (s Status) IsUntracked(path string) bool {
- stat, ok := (s)[filepath.ToSlash(path)]
- return ok && stat.Worktree == Untracked
-}
-
-// IsClean returns true if all the files are in Unmodified status.
-func (s Status) IsClean() bool {
- for _, status := range s {
- if status.Worktree != Unmodified || status.Staging != Unmodified {
- return false
- }
- }
-
- return true
-}
-
-func (s Status) String() string {
- buf := bytes.NewBuffer(nil)
- for path, status := range s {
- if status.Staging == Unmodified && status.Worktree == Unmodified {
- continue
- }
-
- if status.Staging == Renamed {
- path = fmt.Sprintf("%s -> %s", path, status.Extra)
- }
-
- fmt.Fprintf(buf, "%c%c %s\n", status.Staging, status.Worktree, path)
- }
-
- return buf.String()
-}
-
-// FileStatus contains the status of a file in the worktree
-type FileStatus struct {
- // Staging is the status of a file in the staging area
- Staging StatusCode
- // Worktree is the status of a file in the worktree
- Worktree StatusCode
- // Extra contains extra information, such as the previous name in a rename
- Extra string
-}
-
-// StatusCode status code of a file in the Worktree
-type StatusCode byte
-
-const (
- Unmodified StatusCode = ' '
- Untracked StatusCode = '?'
- Modified StatusCode = 'M'
- Added StatusCode = 'A'
- Deleted StatusCode = 'D'
- Renamed StatusCode = 'R'
- Copied StatusCode = 'C'
- UpdatedButUnmerged StatusCode = 'U'
-)
diff --git a/vendor/github.com/go-git/go-git/v5/storage/filesystem/config.go b/vendor/github.com/go-git/go-git/v5/storage/filesystem/config.go
deleted file mode 100644
index 78a646465a2..00000000000
--- a/vendor/github.com/go-git/go-git/v5/storage/filesystem/config.go
+++ /dev/null
@@ -1,48 +0,0 @@
-package filesystem
-
-import (
- "os"
-
- "github.com/go-git/go-git/v5/config"
- "github.com/go-git/go-git/v5/storage/filesystem/dotgit"
- "github.com/go-git/go-git/v5/utils/ioutil"
-)
-
-type ConfigStorage struct {
- dir *dotgit.DotGit
-}
-
-func (c *ConfigStorage) Config() (conf *config.Config, err error) {
- f, err := c.dir.Config()
- if err != nil {
- if os.IsNotExist(err) {
- return config.NewConfig(), nil
- }
-
- return nil, err
- }
-
- defer ioutil.CheckClose(f, &err)
- return config.ReadConfig(f)
-}
-
-func (c *ConfigStorage) SetConfig(cfg *config.Config) (err error) {
- if err = cfg.Validate(); err != nil {
- return err
- }
-
- f, err := c.dir.ConfigWriter()
- if err != nil {
- return err
- }
-
- defer ioutil.CheckClose(f, &err)
-
- b, err := cfg.Marshal()
- if err != nil {
- return err
- }
-
- _, err = f.Write(b)
- return err
-}
diff --git a/vendor/github.com/go-git/go-git/v5/storage/filesystem/deltaobject.go b/vendor/github.com/go-git/go-git/v5/storage/filesystem/deltaobject.go
deleted file mode 100644
index 6ab2cdf38aa..00000000000
--- a/vendor/github.com/go-git/go-git/v5/storage/filesystem/deltaobject.go
+++ /dev/null
@@ -1,37 +0,0 @@
-package filesystem
-
-import (
- "github.com/go-git/go-git/v5/plumbing"
-)
-
-type deltaObject struct {
- plumbing.EncodedObject
- base plumbing.Hash
- hash plumbing.Hash
- size int64
-}
-
-func newDeltaObject(
- obj plumbing.EncodedObject,
- hash plumbing.Hash,
- base plumbing.Hash,
- size int64) plumbing.DeltaObject {
- return &deltaObject{
- EncodedObject: obj,
- hash: hash,
- base: base,
- size: size,
- }
-}
-
-func (o *deltaObject) BaseHash() plumbing.Hash {
- return o.base
-}
-
-func (o *deltaObject) ActualSize() int64 {
- return o.size
-}
-
-func (o *deltaObject) ActualHash() plumbing.Hash {
- return o.hash
-}
diff --git a/vendor/github.com/go-git/go-git/v5/storage/filesystem/dotgit/dotgit.go b/vendor/github.com/go-git/go-git/v5/storage/filesystem/dotgit/dotgit.go
deleted file mode 100644
index 31c46948164..00000000000
--- a/vendor/github.com/go-git/go-git/v5/storage/filesystem/dotgit/dotgit.go
+++ /dev/null
@@ -1,1255 +0,0 @@
-// https://github.com/git/git/blob/master/Documentation/gitrepository-layout.txt
-package dotgit
-
-import (
- "bufio"
- "bytes"
- "errors"
- "fmt"
- "io"
- "os"
- "path"
- "path/filepath"
- "reflect"
- "runtime"
- "sort"
- "strings"
- "time"
-
- "github.com/go-git/go-git/v5/plumbing"
- "github.com/go-git/go-git/v5/plumbing/hash"
- "github.com/go-git/go-git/v5/storage"
- "github.com/go-git/go-git/v5/utils/ioutil"
-
- "github.com/go-git/go-billy/v5"
- "github.com/go-git/go-billy/v5/helper/chroot"
-)
-
-const (
- suffix = ".git"
- packedRefsPath = "packed-refs"
- configPath = "config"
- indexPath = "index"
- shallowPath = "shallow"
- modulePath = "modules"
- objectsPath = "objects"
- packPath = "pack"
- refsPath = "refs"
- branchesPath = "branches"
- hooksPath = "hooks"
- infoPath = "info"
- remotesPath = "remotes"
- logsPath = "logs"
- worktreesPath = "worktrees"
- alternatesPath = "alternates"
-
- tmpPackedRefsPrefix = "._packed-refs"
-
- packPrefix = "pack-"
- packExt = ".pack"
- idxExt = ".idx"
-)
-
-var (
- // ErrNotFound is returned by New when the path is not found.
- ErrNotFound = errors.New("path not found")
- // ErrIdxNotFound is returned by Idxfile when the idx file is not found
- ErrIdxNotFound = errors.New("idx file not found")
- // ErrPackfileNotFound is returned by Packfile when the packfile is not found
- ErrPackfileNotFound = errors.New("packfile not found")
- // ErrConfigNotFound is returned by Config when the config is not found
- ErrConfigNotFound = errors.New("config file not found")
- // ErrPackedRefsDuplicatedRef is returned when a duplicated reference is
- // found in the packed-ref file. This is usually the case for corrupted git
- // repositories.
- ErrPackedRefsDuplicatedRef = errors.New("duplicated ref found in packed-ref file")
- // ErrPackedRefsBadFormat is returned when the packed-ref file corrupt.
- ErrPackedRefsBadFormat = errors.New("malformed packed-ref")
- // ErrSymRefTargetNotFound is returned when a symbolic reference is
- // targeting a non-existing object. This usually means the repository
- // is corrupt.
- ErrSymRefTargetNotFound = errors.New("symbolic reference target not found")
- // ErrIsDir is returned when a reference file is attempting to be read,
- // but the path specified is a directory.
- ErrIsDir = errors.New("reference path is a directory")
-)
-
-// Options holds configuration for the storage.
-type Options struct {
- // ExclusiveAccess means that the filesystem is not modified externally
- // while the repo is open.
- ExclusiveAccess bool
- // KeepDescriptors makes the file descriptors to be reused but they will
- // need to be manually closed calling Close().
- KeepDescriptors bool
- // AlternatesFS provides the billy filesystem to be used for Git Alternates.
- // If none is provided, it falls back to using the underlying instance used for
- // DotGit.
- AlternatesFS billy.Filesystem
-}
-
-// The DotGit type represents a local git repository on disk. This
-// type is not zero-value-safe, use the New function to initialize it.
-type DotGit struct {
- options Options
- fs billy.Filesystem
-
- // incoming object directory information
- incomingChecked bool
- incomingDirName string
-
- objectList []plumbing.Hash // sorted
- objectMap map[plumbing.Hash]struct{}
- packList []plumbing.Hash
- packMap map[plumbing.Hash]struct{}
-
- files map[plumbing.Hash]billy.File
-}
-
-// New returns a DotGit value ready to be used. The path argument must
-// be the absolute path of a git repository directory (e.g.
-// "/foo/bar/.git").
-func New(fs billy.Filesystem) *DotGit {
- return NewWithOptions(fs, Options{})
-}
-
-// NewWithOptions sets non default configuration options.
-// See New for complete help.
-func NewWithOptions(fs billy.Filesystem, o Options) *DotGit {
- return &DotGit{
- options: o,
- fs: fs,
- }
-}
-
-// Initialize creates all the folder scaffolding.
-func (d *DotGit) Initialize() error {
- mustExists := []string{
- d.fs.Join("objects", "info"),
- d.fs.Join("objects", "pack"),
- d.fs.Join("refs", "heads"),
- d.fs.Join("refs", "tags"),
- }
-
- for _, path := range mustExists {
- _, err := d.fs.Stat(path)
- if err == nil {
- continue
- }
-
- if !os.IsNotExist(err) {
- return err
- }
-
- if err := d.fs.MkdirAll(path, os.ModeDir|os.ModePerm); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-// Close closes all opened files.
-func (d *DotGit) Close() error {
- var firstError error
- if d.files != nil {
- for _, f := range d.files {
- err := f.Close()
- if err != nil && firstError == nil {
- firstError = err
- continue
- }
- }
-
- d.files = nil
- }
-
- if firstError != nil {
- return firstError
- }
-
- return nil
-}
-
-// ConfigWriter returns a file pointer for write to the config file
-func (d *DotGit) ConfigWriter() (billy.File, error) {
- return d.fs.Create(configPath)
-}
-
-// Config returns a file pointer for read to the config file
-func (d *DotGit) Config() (billy.File, error) {
- return d.fs.Open(configPath)
-}
-
-// IndexWriter returns a file pointer for write to the index file
-func (d *DotGit) IndexWriter() (billy.File, error) {
- return d.fs.Create(indexPath)
-}
-
-// Index returns a file pointer for read to the index file
-func (d *DotGit) Index() (billy.File, error) {
- return d.fs.Open(indexPath)
-}
-
-// ShallowWriter returns a file pointer for write to the shallow file
-func (d *DotGit) ShallowWriter() (billy.File, error) {
- return d.fs.Create(shallowPath)
-}
-
-// Shallow returns a file pointer for read to the shallow file
-func (d *DotGit) Shallow() (billy.File, error) {
- f, err := d.fs.Open(shallowPath)
- if err != nil {
- if os.IsNotExist(err) {
- return nil, nil
- }
-
- return nil, err
- }
-
- return f, nil
-}
-
-// NewObjectPack return a writer for a new packfile, it saves the packfile to
-// disk and also generates and save the index for the given packfile.
-func (d *DotGit) NewObjectPack() (*PackWriter, error) {
- d.cleanPackList()
- return newPackWrite(d.fs)
-}
-
-// ObjectPacks returns the list of availables packfiles
-func (d *DotGit) ObjectPacks() ([]plumbing.Hash, error) {
- if !d.options.ExclusiveAccess {
- return d.objectPacks()
- }
-
- err := d.genPackList()
- if err != nil {
- return nil, err
- }
-
- return d.packList, nil
-}
-
-func (d *DotGit) objectPacks() ([]plumbing.Hash, error) {
- packDir := d.fs.Join(objectsPath, packPath)
- files, err := d.fs.ReadDir(packDir)
- if err != nil {
- if os.IsNotExist(err) {
- return nil, nil
- }
-
- return nil, err
- }
-
- var packs []plumbing.Hash
- for _, f := range files {
- n := f.Name()
- if !strings.HasSuffix(n, packExt) || !strings.HasPrefix(n, packPrefix) {
- continue
- }
-
- h := plumbing.NewHash(n[5 : len(n)-5]) //pack-(hash).pack
- if h.IsZero() {
- // Ignore files with badly-formatted names.
- continue
- }
- packs = append(packs, h)
- }
-
- return packs, nil
-}
-
-func (d *DotGit) objectPackPath(hash plumbing.Hash, extension string) string {
- return d.fs.Join(objectsPath, packPath, fmt.Sprintf("pack-%s.%s", hash.String(), extension))
-}
-
-func (d *DotGit) objectPackOpen(hash plumbing.Hash, extension string) (billy.File, error) {
- if d.options.KeepDescriptors && extension == "pack" {
- if d.files == nil {
- d.files = make(map[plumbing.Hash]billy.File)
- }
-
- f, ok := d.files[hash]
- if ok {
- return f, nil
- }
- }
-
- err := d.hasPack(hash)
- if err != nil {
- return nil, err
- }
-
- path := d.objectPackPath(hash, extension)
- pack, err := d.fs.Open(path)
- if err != nil {
- if os.IsNotExist(err) {
- return nil, ErrPackfileNotFound
- }
-
- return nil, err
- }
-
- if d.options.KeepDescriptors && extension == "pack" {
- d.files[hash] = pack
- }
-
- return pack, nil
-}
-
-// ObjectPack returns a fs.File of the given packfile
-func (d *DotGit) ObjectPack(hash plumbing.Hash) (billy.File, error) {
- err := d.hasPack(hash)
- if err != nil {
- return nil, err
- }
-
- return d.objectPackOpen(hash, `pack`)
-}
-
-// ObjectPackIdx returns a fs.File of the index file for a given packfile
-func (d *DotGit) ObjectPackIdx(hash plumbing.Hash) (billy.File, error) {
- err := d.hasPack(hash)
- if err != nil {
- return nil, err
- }
-
- return d.objectPackOpen(hash, `idx`)
-}
-
-func (d *DotGit) DeleteOldObjectPackAndIndex(hash plumbing.Hash, t time.Time) error {
- d.cleanPackList()
-
- path := d.objectPackPath(hash, `pack`)
- if !t.IsZero() {
- fi, err := d.fs.Stat(path)
- if err != nil {
- return err
- }
- // too new, skip deletion.
- if !fi.ModTime().Before(t) {
- return nil
- }
- }
- err := d.fs.Remove(path)
- if err != nil {
- return err
- }
- return d.fs.Remove(d.objectPackPath(hash, `idx`))
-}
-
-// NewObject return a writer for a new object file.
-func (d *DotGit) NewObject() (*ObjectWriter, error) {
- d.cleanObjectList()
-
- return newObjectWriter(d.fs)
-}
-
-// ObjectsWithPrefix returns the hashes of objects that have the given prefix.
-func (d *DotGit) ObjectsWithPrefix(prefix []byte) ([]plumbing.Hash, error) {
- // Handle edge cases.
- if len(prefix) < 1 {
- return d.Objects()
- } else if len(prefix) > len(plumbing.ZeroHash) {
- return nil, nil
- }
-
- if d.options.ExclusiveAccess {
- err := d.genObjectList()
- if err != nil {
- return nil, err
- }
-
- // Rely on d.objectList being sorted.
- // Figure out the half-open interval defined by the prefix.
- first := sort.Search(len(d.objectList), func(i int) bool {
- // Same as plumbing.HashSlice.Less.
- return bytes.Compare(d.objectList[i][:], prefix) >= 0
- })
- lim := len(d.objectList)
- if limPrefix, overflow := incBytes(prefix); !overflow {
- lim = sort.Search(len(d.objectList), func(i int) bool {
- // Same as plumbing.HashSlice.Less.
- return bytes.Compare(d.objectList[i][:], limPrefix) >= 0
- })
- }
- return d.objectList[first:lim], nil
- }
-
- // This is the slow path.
- var objects []plumbing.Hash
- var n int
- err := d.ForEachObjectHash(func(hash plumbing.Hash) error {
- n++
- if bytes.HasPrefix(hash[:], prefix) {
- objects = append(objects, hash)
- }
- return nil
- })
- if err != nil {
- return nil, err
- }
- return objects, nil
-}
-
-// Objects returns a slice with the hashes of objects found under the
-// .git/objects/ directory.
-func (d *DotGit) Objects() ([]plumbing.Hash, error) {
- if d.options.ExclusiveAccess {
- err := d.genObjectList()
- if err != nil {
- return nil, err
- }
-
- return d.objectList, nil
- }
-
- var objects []plumbing.Hash
- err := d.ForEachObjectHash(func(hash plumbing.Hash) error {
- objects = append(objects, hash)
- return nil
- })
- if err != nil {
- return nil, err
- }
- return objects, nil
-}
-
-// ForEachObjectHash iterates over the hashes of objects found under the
-// .git/objects/ directory and executes the provided function.
-func (d *DotGit) ForEachObjectHash(fun func(plumbing.Hash) error) error {
- if !d.options.ExclusiveAccess {
- return d.forEachObjectHash(fun)
- }
-
- err := d.genObjectList()
- if err != nil {
- return err
- }
-
- for _, h := range d.objectList {
- err := fun(h)
- if err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (d *DotGit) forEachObjectHash(fun func(plumbing.Hash) error) error {
- files, err := d.fs.ReadDir(objectsPath)
- if err != nil {
- if os.IsNotExist(err) {
- return nil
- }
-
- return err
- }
-
- for _, f := range files {
- if f.IsDir() && len(f.Name()) == 2 && isHex(f.Name()) {
- base := f.Name()
- d, err := d.fs.ReadDir(d.fs.Join(objectsPath, base))
- if err != nil {
- return err
- }
-
- for _, o := range d {
- h := plumbing.NewHash(base + o.Name())
- if h.IsZero() {
- // Ignore files with badly-formatted names.
- continue
- }
- err = fun(h)
- if err != nil {
- return err
- }
- }
- }
- }
-
- return nil
-}
-
-func (d *DotGit) cleanObjectList() {
- d.objectMap = nil
- d.objectList = nil
-}
-
-func (d *DotGit) genObjectList() error {
- if d.objectMap != nil {
- return nil
- }
-
- d.objectMap = make(map[plumbing.Hash]struct{})
- populate := func(h plumbing.Hash) error {
- d.objectList = append(d.objectList, h)
- d.objectMap[h] = struct{}{}
-
- return nil
- }
- if err := d.forEachObjectHash(populate); err != nil {
- return err
- }
- plumbing.HashesSort(d.objectList)
- return nil
-}
-
-func (d *DotGit) hasObject(h plumbing.Hash) error {
- if !d.options.ExclusiveAccess {
- return nil
- }
-
- err := d.genObjectList()
- if err != nil {
- return err
- }
-
- _, ok := d.objectMap[h]
- if !ok {
- return plumbing.ErrObjectNotFound
- }
-
- return nil
-}
-
-func (d *DotGit) cleanPackList() {
- d.packMap = nil
- d.packList = nil
-}
-
-func (d *DotGit) genPackList() error {
- if d.packMap != nil {
- return nil
- }
-
- op, err := d.objectPacks()
- if err != nil {
- return err
- }
-
- d.packMap = make(map[plumbing.Hash]struct{})
- d.packList = nil
-
- for _, h := range op {
- d.packList = append(d.packList, h)
- d.packMap[h] = struct{}{}
- }
-
- return nil
-}
-
-func (d *DotGit) hasPack(h plumbing.Hash) error {
- if !d.options.ExclusiveAccess {
- return nil
- }
-
- err := d.genPackList()
- if err != nil {
- return err
- }
-
- _, ok := d.packMap[h]
- if !ok {
- return ErrPackfileNotFound
- }
-
- return nil
-}
-
-func (d *DotGit) objectPath(h plumbing.Hash) string {
- hex := h.String()
- return d.fs.Join(objectsPath, hex[0:2], hex[2:hash.HexSize])
-}
-
-// incomingObjectPath is intended to add support for a git pre-receive hook
-// to be written it adds support for go-git to find objects in an "incoming"
-// directory, so that the library can be used to write a pre-receive hook
-// that deals with the incoming objects.
-//
-// More on git hooks found here : https://git-scm.com/docs/githooks
-// More on 'quarantine'/incoming directory here:
-//
-// https://git-scm.com/docs/git-receive-pack
-func (d *DotGit) incomingObjectPath(h plumbing.Hash) string {
- hString := h.String()
-
- if d.incomingDirName == "" {
- return d.fs.Join(objectsPath, hString[0:2], hString[2:hash.HexSize])
- }
-
- return d.fs.Join(objectsPath, d.incomingDirName, hString[0:2], hString[2:hash.HexSize])
-}
-
-// hasIncomingObjects searches for an incoming directory and keeps its name
-// so it doesn't have to be found each time an object is accessed.
-func (d *DotGit) hasIncomingObjects() bool {
- if !d.incomingChecked {
- directoryContents, err := d.fs.ReadDir(objectsPath)
- if err == nil {
- for _, file := range directoryContents {
- if file.IsDir() && (strings.HasPrefix(file.Name(), "tmp_objdir-incoming-") ||
- // Before Git 2.35 incoming commits directory had another prefix
- strings.HasPrefix(file.Name(), "incoming-")) {
- d.incomingDirName = file.Name()
- }
- }
- }
-
- d.incomingChecked = true
- }
-
- return d.incomingDirName != ""
-}
-
-// Object returns a fs.File pointing the object file, if exists
-func (d *DotGit) Object(h plumbing.Hash) (billy.File, error) {
- err := d.hasObject(h)
- if err != nil {
- return nil, err
- }
-
- obj1, err1 := d.fs.Open(d.objectPath(h))
- if os.IsNotExist(err1) && d.hasIncomingObjects() {
- obj2, err2 := d.fs.Open(d.incomingObjectPath(h))
- if err2 != nil {
- return obj1, err1
- }
- return obj2, err2
- }
- return obj1, err1
-}
-
-// ObjectStat returns a os.FileInfo pointing the object file, if exists
-func (d *DotGit) ObjectStat(h plumbing.Hash) (os.FileInfo, error) {
- err := d.hasObject(h)
- if err != nil {
- return nil, err
- }
-
- obj1, err1 := d.fs.Stat(d.objectPath(h))
- if os.IsNotExist(err1) && d.hasIncomingObjects() {
- obj2, err2 := d.fs.Stat(d.incomingObjectPath(h))
- if err2 != nil {
- return obj1, err1
- }
- return obj2, err2
- }
- return obj1, err1
-}
-
-// ObjectDelete removes the object file, if exists
-func (d *DotGit) ObjectDelete(h plumbing.Hash) error {
- d.cleanObjectList()
-
- err1 := d.fs.Remove(d.objectPath(h))
- if os.IsNotExist(err1) && d.hasIncomingObjects() {
- err2 := d.fs.Remove(d.incomingObjectPath(h))
- if err2 != nil {
- return err1
- }
- return err2
- }
- return err1
-}
-
-func (d *DotGit) readReferenceFrom(rd io.Reader, name string) (ref *plumbing.Reference, err error) {
- b, err := io.ReadAll(rd)
- if err != nil {
- return nil, err
- }
-
- line := strings.TrimSpace(string(b))
- return plumbing.NewReferenceFromStrings(name, line), nil
-}
-
-func (d *DotGit) checkReferenceAndTruncate(f billy.File, old *plumbing.Reference) error {
- if old == nil {
- return nil
- }
- ref, err := d.readReferenceFrom(f, old.Name().String())
- if err != nil {
- return err
- }
- if ref.Hash() != old.Hash() {
- return storage.ErrReferenceHasChanged
- }
- _, err = f.Seek(0, io.SeekStart)
- if err != nil {
- return err
- }
- return f.Truncate(0)
-}
-
-func (d *DotGit) SetRef(r, old *plumbing.Reference) error {
- var content string
- switch r.Type() {
- case plumbing.SymbolicReference:
- content = fmt.Sprintf("ref: %s\n", r.Target())
- case plumbing.HashReference:
- content = fmt.Sprintln(r.Hash().String())
- }
-
- fileName := r.Name().String()
-
- return d.setRef(fileName, content, old)
-}
-
-// Refs scans the git directory collecting references, which it returns.
-// Symbolic references are resolved and included in the output.
-func (d *DotGit) Refs() ([]*plumbing.Reference, error) {
- var refs []*plumbing.Reference
- var seen = make(map[plumbing.ReferenceName]bool)
- if err := d.addRefsFromRefDir(&refs, seen); err != nil {
- return nil, err
- }
-
- if err := d.addRefsFromPackedRefs(&refs, seen); err != nil {
- return nil, err
- }
-
- if err := d.addRefFromHEAD(&refs); err != nil {
- return nil, err
- }
-
- return refs, nil
-}
-
-// Ref returns the reference for a given reference name.
-func (d *DotGit) Ref(name plumbing.ReferenceName) (*plumbing.Reference, error) {
- ref, err := d.readReferenceFile(".", name.String())
- if err == nil {
- return ref, nil
- }
-
- return d.packedRef(name)
-}
-
-func (d *DotGit) findPackedRefsInFile(f billy.File, recv refsRecv) error {
- s := bufio.NewScanner(f)
- for s.Scan() {
- ref, err := d.processLine(s.Text())
- if err != nil {
- return err
- }
-
- if !recv(ref) {
- // skip parse
- return nil
- }
- }
- if err := s.Err(); err != nil {
- return err
- }
- return nil
-}
-
-// refsRecv: returning true means that the reference continues to be resolved, otherwise it is stopped, which will speed up the lookup of a single reference.
-type refsRecv func(*plumbing.Reference) bool
-
-func (d *DotGit) findPackedRefs(recv refsRecv) error {
- f, err := d.fs.Open(packedRefsPath)
- if err != nil {
- if os.IsNotExist(err) {
- return nil
- }
- return err
- }
-
- defer ioutil.CheckClose(f, &err)
- return d.findPackedRefsInFile(f, recv)
-}
-
-func (d *DotGit) packedRef(name plumbing.ReferenceName) (*plumbing.Reference, error) {
- var ref *plumbing.Reference
- if err := d.findPackedRefs(func(r *plumbing.Reference) bool {
- if r != nil && r.Name() == name {
- ref = r
- // ref found
- return false
- }
- return true
- }); err != nil {
- return nil, err
- }
- if ref != nil {
- return ref, nil
- }
- return nil, plumbing.ErrReferenceNotFound
-}
-
-// RemoveRef removes a reference by name.
-func (d *DotGit) RemoveRef(name plumbing.ReferenceName) error {
- path := d.fs.Join(".", name.String())
- _, err := d.fs.Stat(path)
- if err == nil {
- err = d.fs.Remove(path)
- // Drop down to remove it from the packed refs file, too.
- }
-
- if err != nil && !os.IsNotExist(err) {
- return err
- }
-
- return d.rewritePackedRefsWithoutRef(name)
-}
-
-func refsRecvFunc(refs *[]*plumbing.Reference, seen map[plumbing.ReferenceName]bool) refsRecv {
- return func(r *plumbing.Reference) bool {
- if r != nil && !seen[r.Name()] {
- *refs = append(*refs, r)
- seen[r.Name()] = true
- }
- return true
- }
-}
-
-func (d *DotGit) addRefsFromPackedRefs(refs *[]*plumbing.Reference, seen map[plumbing.ReferenceName]bool) (err error) {
- return d.findPackedRefs(refsRecvFunc(refs, seen))
-}
-
-func (d *DotGit) addRefsFromPackedRefsFile(refs *[]*plumbing.Reference, f billy.File, seen map[plumbing.ReferenceName]bool) (err error) {
- return d.findPackedRefsInFile(f, refsRecvFunc(refs, seen))
-}
-
-func (d *DotGit) openAndLockPackedRefs(doCreate bool) (
- pr billy.File, err error) {
- var f billy.File
- defer func() {
- if err != nil && f != nil {
- ioutil.CheckClose(f, &err)
- }
- }()
-
- // File mode is retrieved from a constant defined in the target specific
- // files (dotgit_rewrite_packed_refs_*). Some modes are not available
- // in all filesystems.
- openFlags := d.openAndLockPackedRefsMode()
- if doCreate {
- openFlags |= os.O_CREATE
- }
-
- // Keep trying to open and lock the file until we're sure the file
- // didn't change between the open and the lock.
- for {
- f, err = d.fs.OpenFile(packedRefsPath, openFlags, 0600)
- if err != nil {
- if os.IsNotExist(err) && !doCreate {
- return nil, nil
- }
-
- return nil, err
- }
- fi, err := d.fs.Stat(packedRefsPath)
- if err != nil {
- return nil, err
- }
- mtime := fi.ModTime()
-
- err = f.Lock()
- if err != nil {
- return nil, err
- }
-
- fi, err = d.fs.Stat(packedRefsPath)
- if err != nil {
- return nil, err
- }
- if mtime.Equal(fi.ModTime()) {
- break
- }
- // The file has changed since we opened it. Close and retry.
- err = f.Close()
- if err != nil {
- return nil, err
- }
- }
- return f, nil
-}
-
-func (d *DotGit) rewritePackedRefsWithoutRef(name plumbing.ReferenceName) (err error) {
- pr, err := d.openAndLockPackedRefs(false)
- if err != nil {
- return err
- }
- if pr == nil {
- return nil
- }
- defer ioutil.CheckClose(pr, &err)
-
- // Creating the temp file in the same directory as the target file
- // improves our chances for rename operation to be atomic.
- tmp, err := d.fs.TempFile("", tmpPackedRefsPrefix)
- if err != nil {
- return err
- }
- tmpName := tmp.Name()
- defer func() {
- ioutil.CheckClose(tmp, &err)
- _ = d.fs.Remove(tmpName) // don't check err, we might have renamed it
- }()
-
- s := bufio.NewScanner(pr)
- found := false
- for s.Scan() {
- line := s.Text()
- ref, err := d.processLine(line)
- if err != nil {
- return err
- }
-
- if ref != nil && ref.Name() == name {
- found = true
- continue
- }
-
- if _, err := fmt.Fprintln(tmp, line); err != nil {
- return err
- }
- }
-
- if err := s.Err(); err != nil {
- return err
- }
-
- if !found {
- return nil
- }
-
- return d.rewritePackedRefsWhileLocked(tmp, pr)
-}
-
-// process lines from a packed-refs file
-func (d *DotGit) processLine(line string) (*plumbing.Reference, error) {
- if len(line) == 0 {
- return nil, nil
- }
-
- switch line[0] {
- case '#': // comment - ignore
- return nil, nil
- case '^': // annotated tag commit of the previous line - ignore
- return nil, nil
- default:
- ws := strings.Split(line, " ") // hash then ref
- if len(ws) != 2 {
- return nil, ErrPackedRefsBadFormat
- }
-
- return plumbing.NewReferenceFromStrings(ws[1], ws[0]), nil
- }
-}
-
-func (d *DotGit) addRefsFromRefDir(refs *[]*plumbing.Reference, seen map[plumbing.ReferenceName]bool) error {
- return d.walkReferencesTree(refs, []string{refsPath}, seen)
-}
-
-func (d *DotGit) walkReferencesTree(refs *[]*plumbing.Reference, relPath []string, seen map[plumbing.ReferenceName]bool) error {
- files, err := d.fs.ReadDir(d.fs.Join(relPath...))
- if err != nil {
- if os.IsNotExist(err) {
- // a race happened, and our directory is gone now
- return nil
- }
-
- return err
- }
-
- for _, f := range files {
- newRelPath := append(append([]string(nil), relPath...), f.Name())
- if f.IsDir() {
- if err = d.walkReferencesTree(refs, newRelPath, seen); err != nil {
- return err
- }
-
- continue
- }
-
- ref, err := d.readReferenceFile(".", strings.Join(newRelPath, "/"))
- if os.IsNotExist(err) {
- // a race happened, and our file is gone now
- continue
- }
- if err != nil {
- return err
- }
-
- if ref != nil && !seen[ref.Name()] {
- *refs = append(*refs, ref)
- seen[ref.Name()] = true
- }
- }
-
- return nil
-}
-
-func (d *DotGit) addRefFromHEAD(refs *[]*plumbing.Reference) error {
- ref, err := d.readReferenceFile(".", "HEAD")
- if err != nil {
- if os.IsNotExist(err) {
- return nil
- }
-
- return err
- }
-
- *refs = append(*refs, ref)
- return nil
-}
-
-func (d *DotGit) readReferenceFile(path, name string) (ref *plumbing.Reference, err error) {
- path = d.fs.Join(path, d.fs.Join(strings.Split(name, "/")...))
- st, err := d.fs.Stat(path)
- if err != nil {
- return nil, err
- }
- if st.IsDir() {
- return nil, ErrIsDir
- }
-
- f, err := d.fs.Open(path)
- if err != nil {
- return nil, err
- }
- defer ioutil.CheckClose(f, &err)
-
- return d.readReferenceFrom(f, name)
-}
-
-func (d *DotGit) CountLooseRefs() (int, error) {
- var refs []*plumbing.Reference
- var seen = make(map[plumbing.ReferenceName]bool)
- if err := d.addRefsFromRefDir(&refs, seen); err != nil {
- return 0, err
- }
-
- return len(refs), nil
-}
-
-// PackRefs packs all loose refs into the packed-refs file.
-//
-// This implementation only works under the assumption that the view
-// of the file system won't be updated during this operation. This
-// strategy would not work on a general file system though, without
-// locking each loose reference and checking it again before deleting
-// the file, because otherwise an updated reference could sneak in and
-// then be deleted by the packed-refs process. Alternatively, every
-// ref update could also lock packed-refs, so only one lock is
-// required during ref-packing. But that would worsen performance in
-// the common case.
-//
-// TODO: add an "all" boolean like the `git pack-refs --all` flag.
-// When `all` is false, it would only pack refs that have already been
-// packed, plus all tags.
-func (d *DotGit) PackRefs() (err error) {
- // Lock packed-refs, and create it if it doesn't exist yet.
- f, err := d.openAndLockPackedRefs(true)
- if err != nil {
- return err
- }
- defer ioutil.CheckClose(f, &err)
-
- // Gather all refs using addRefsFromRefDir and addRefsFromPackedRefs.
- var refs []*plumbing.Reference
- seen := make(map[plumbing.ReferenceName]bool)
- if err = d.addRefsFromRefDir(&refs, seen); err != nil {
- return err
- }
- if len(refs) == 0 {
- // Nothing to do!
- return nil
- }
- numLooseRefs := len(refs)
- if err = d.addRefsFromPackedRefsFile(&refs, f, seen); err != nil {
- return err
- }
-
- // Write them all to a new temp packed-refs file.
- tmp, err := d.fs.TempFile("", tmpPackedRefsPrefix)
- if err != nil {
- return err
- }
- tmpName := tmp.Name()
- defer func() {
- ioutil.CheckClose(tmp, &err)
- _ = d.fs.Remove(tmpName) // don't check err, we might have renamed it
- }()
-
- w := bufio.NewWriter(tmp)
- for _, ref := range refs {
- _, err = w.WriteString(ref.String() + "\n")
- if err != nil {
- return err
- }
- }
- err = w.Flush()
- if err != nil {
- return err
- }
-
- // Rename the temp packed-refs file.
- err = d.rewritePackedRefsWhileLocked(tmp, f)
- if err != nil {
- return err
- }
-
- // Delete all the loose refs, while still holding the packed-refs
- // lock.
- for _, ref := range refs[:numLooseRefs] {
- path := d.fs.Join(".", ref.Name().String())
- err = d.fs.Remove(path)
- if err != nil && !os.IsNotExist(err) {
- return err
- }
- }
-
- return nil
-}
-
-// Module return a billy.Filesystem pointing to the module folder
-func (d *DotGit) Module(name string) (billy.Filesystem, error) {
- return d.fs.Chroot(d.fs.Join(modulePath, name))
-}
-
-func (d *DotGit) AddAlternate(remote string) error {
- altpath := d.fs.Join(objectsPath, infoPath, alternatesPath)
-
- f, err := d.fs.OpenFile(altpath, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0640)
- if err != nil {
- return fmt.Errorf("cannot open file: %w", err)
- }
- defer f.Close()
-
- // locking in windows throws an error, based on comments
- // https://github.com/go-git/go-git/pull/860#issuecomment-1751823044
- // do not lock on windows platform.
- if runtime.GOOS != "windows" {
- if err = f.Lock(); err != nil {
- return fmt.Errorf("cannot lock file: %w", err)
- }
- defer f.Unlock()
- }
-
- line := path.Join(remote, objectsPath) + "\n"
- _, err = io.WriteString(f, line)
- if err != nil {
- return fmt.Errorf("error writing 'alternates' file: %w", err)
- }
-
- return nil
-}
-
-// Alternates returns DotGit(s) based off paths in objects/info/alternates if
-// available. This can be used to checks if it's a shared repository.
-func (d *DotGit) Alternates() ([]*DotGit, error) {
- altpath := d.fs.Join(objectsPath, infoPath, alternatesPath)
- f, err := d.fs.Open(altpath)
- if err != nil {
- return nil, err
- }
- defer f.Close()
-
- fs := d.options.AlternatesFS
- if fs == nil {
- fs = d.fs
- }
-
- var alternates []*DotGit
- seen := make(map[string]struct{})
-
- // Read alternate paths line-by-line and create DotGit objects.
- scanner := bufio.NewScanner(f)
- for scanner.Scan() {
- path := scanner.Text()
-
- // Avoid creating multiple dotgits for the same alternative path.
- if _, ok := seen[path]; ok {
- continue
- }
-
- seen[path] = struct{}{}
-
- if filepath.IsAbs(path) {
- // Handling absolute paths should be straight-forward. However, the default osfs (Chroot)
- // tries to concatenate an abs path with the root path in some operations (e.g. Stat),
- // which leads to unexpected errors. Therefore, make the path relative to the current FS instead.
- if reflect.TypeOf(fs) == reflect.TypeOf(&chroot.ChrootHelper{}) {
- path, err = filepath.Rel(fs.Root(), path)
- if err != nil {
- return nil, fmt.Errorf("cannot make path %q relative: %w", path, err)
- }
- }
- } else {
- // By Git conventions, relative paths should be based on the object database (.git/objects/info)
- // location as per: https://www.kernel.org/pub/software/scm/git/docs/gitrepository-layout.html
- // However, due to the nature of go-git and its filesystem handling via Billy, paths cannot
- // cross its "chroot boundaries". Therefore, ignore any "../" and treat the path from the
- // fs root. If this is not correct based on the dotgit fs, set a different one via AlternatesFS.
- abs := filepath.Join(string(filepath.Separator), filepath.ToSlash(path))
- path = filepath.FromSlash(abs)
- }
-
- // Aligns with upstream behavior: exit if target path is not a valid directory.
- if fi, err := fs.Stat(path); err != nil || !fi.IsDir() {
- return nil, fmt.Errorf("invalid object directory %q: %w", path, err)
- }
- afs, err := fs.Chroot(filepath.Dir(path))
- if err != nil {
- return nil, fmt.Errorf("cannot chroot %q: %w", path, err)
- }
- alternates = append(alternates, New(afs))
- }
-
- if err = scanner.Err(); err != nil {
- return nil, err
- }
-
- return alternates, nil
-}
-
-// Fs returns the underlying filesystem of the DotGit folder.
-func (d *DotGit) Fs() billy.Filesystem {
- return d.fs
-}
-
-func isHex(s string) bool {
- for _, b := range []byte(s) {
- if isNum(b) {
- continue
- }
- if isHexAlpha(b) {
- continue
- }
-
- return false
- }
-
- return true
-}
-
-func isNum(b byte) bool {
- return b >= '0' && b <= '9'
-}
-
-func isHexAlpha(b byte) bool {
- return b >= 'a' && b <= 'f' || b >= 'A' && b <= 'F'
-}
-
-// incBytes increments a byte slice, which involves incrementing the
-// right-most byte, and following carry leftward.
-// It makes a copy so that the provided slice's underlying array is not modified.
-// If the overall operation overflows (e.g. incBytes(0xff, 0xff)), the second return parameter indicates that.
-func incBytes(in []byte) (out []byte, overflow bool) {
- out = make([]byte, len(in))
- copy(out, in)
- for i := len(out) - 1; i >= 0; i-- {
- out[i]++
- if out[i] != 0 {
- return // Didn't overflow.
- }
- }
- overflow = true
- return
-}
diff --git a/vendor/github.com/go-git/go-git/v5/storage/filesystem/dotgit/dotgit_rewrite_packed_refs.go b/vendor/github.com/go-git/go-git/v5/storage/filesystem/dotgit/dotgit_rewrite_packed_refs.go
deleted file mode 100644
index 43263eadfa0..00000000000
--- a/vendor/github.com/go-git/go-git/v5/storage/filesystem/dotgit/dotgit_rewrite_packed_refs.go
+++ /dev/null
@@ -1,81 +0,0 @@
-package dotgit
-
-import (
- "io"
- "os"
- "runtime"
-
- "github.com/go-git/go-billy/v5"
- "github.com/go-git/go-git/v5/utils/ioutil"
-)
-
-func (d *DotGit) openAndLockPackedRefsMode() int {
- if billy.CapabilityCheck(d.fs, billy.ReadAndWriteCapability) {
- return os.O_RDWR
- }
-
- return os.O_RDONLY
-}
-
-func (d *DotGit) rewritePackedRefsWhileLocked(
- tmp billy.File, pr billy.File) error {
- // Try plain rename. If we aren't using the bare Windows filesystem as the
- // storage layer, we might be able to get away with a rename over a locked
- // file.
- err := d.fs.Rename(tmp.Name(), pr.Name())
- if err == nil {
- return nil
- }
-
- // If we are in a filesystem that does not support rename (e.g. sivafs)
- // a full copy is done.
- if err == billy.ErrNotSupported {
- return d.copyNewFile(tmp, pr)
- }
-
- if runtime.GOOS != "windows" {
- return err
- }
-
- // Otherwise, Windows doesn't let us rename over a locked file, so
- // we have to do a straight copy. Unfortunately this could result
- // in a partially-written file if the process fails before the
- // copy completes.
- return d.copyToExistingFile(tmp, pr)
-}
-
-func (d *DotGit) copyToExistingFile(tmp, pr billy.File) error {
- _, err := pr.Seek(0, io.SeekStart)
- if err != nil {
- return err
- }
- err = pr.Truncate(0)
- if err != nil {
- return err
- }
- _, err = tmp.Seek(0, io.SeekStart)
- if err != nil {
- return err
- }
- _, err = io.Copy(pr, tmp)
-
- return err
-}
-
-func (d *DotGit) copyNewFile(tmp billy.File, pr billy.File) (err error) {
- prWrite, err := d.fs.Create(pr.Name())
- if err != nil {
- return err
- }
-
- defer ioutil.CheckClose(prWrite, &err)
-
- _, err = tmp.Seek(0, io.SeekStart)
- if err != nil {
- return err
- }
-
- _, err = io.Copy(prWrite, tmp)
-
- return err
-}
diff --git a/vendor/github.com/go-git/go-git/v5/storage/filesystem/dotgit/dotgit_setref.go b/vendor/github.com/go-git/go-git/v5/storage/filesystem/dotgit/dotgit_setref.go
deleted file mode 100644
index c057f5c4865..00000000000
--- a/vendor/github.com/go-git/go-git/v5/storage/filesystem/dotgit/dotgit_setref.go
+++ /dev/null
@@ -1,90 +0,0 @@
-package dotgit
-
-import (
- "fmt"
- "os"
-
- "github.com/go-git/go-git/v5/plumbing"
- "github.com/go-git/go-git/v5/utils/ioutil"
-
- "github.com/go-git/go-billy/v5"
-)
-
-func (d *DotGit) setRef(fileName, content string, old *plumbing.Reference) (err error) {
- if billy.CapabilityCheck(d.fs, billy.ReadAndWriteCapability) {
- return d.setRefRwfs(fileName, content, old)
- }
-
- return d.setRefNorwfs(fileName, content, old)
-}
-
-func (d *DotGit) setRefRwfs(fileName, content string, old *plumbing.Reference) (err error) {
- // If we are not checking an old ref, just truncate the file.
- mode := os.O_RDWR | os.O_CREATE
- if old == nil {
- mode |= os.O_TRUNC
- }
-
- f, err := d.fs.OpenFile(fileName, mode, 0666)
- if err != nil {
- return err
- }
-
- defer ioutil.CheckClose(f, &err)
-
- // Lock is unlocked by the deferred Close above. This is because Unlock
- // does not imply a fsync and thus there would be a race between
- // Unlock+Close and other concurrent writers. Adding Sync to go-billy
- // could work, but this is better (and avoids superfluous syncs).
- err = f.Lock()
- if err != nil {
- return err
- }
-
- // this is a no-op to call even when old is nil.
- err = d.checkReferenceAndTruncate(f, old)
- if err != nil {
- return err
- }
-
- _, err = f.Write([]byte(content))
- return err
-}
-
-// There are some filesystems that don't support opening files in RDWD mode.
-// In these filesystems the standard SetRef function can not be used as it
-// reads the reference file to check that it's not modified before updating it.
-//
-// This version of the function writes the reference without extra checks
-// making it compatible with these simple filesystems. This is usually not
-// a problem as they should be accessed by only one process at a time.
-func (d *DotGit) setRefNorwfs(fileName, content string, old *plumbing.Reference) error {
- _, err := d.fs.Stat(fileName)
- if err == nil && old != nil {
- fRead, err := d.fs.Open(fileName)
- if err != nil {
- return err
- }
-
- ref, err := d.readReferenceFrom(fRead, old.Name().String())
- fRead.Close()
-
- if err != nil {
- return err
- }
-
- if ref.Hash() != old.Hash() {
- return fmt.Errorf("reference has changed concurrently")
- }
- }
-
- f, err := d.fs.Create(fileName)
- if err != nil {
- return err
- }
-
- defer f.Close()
-
- _, err = f.Write([]byte(content))
- return err
-}
diff --git a/vendor/github.com/go-git/go-git/v5/storage/filesystem/dotgit/reader.go b/vendor/github.com/go-git/go-git/v5/storage/filesystem/dotgit/reader.go
deleted file mode 100644
index 975f92ac6a0..00000000000
--- a/vendor/github.com/go-git/go-git/v5/storage/filesystem/dotgit/reader.go
+++ /dev/null
@@ -1,79 +0,0 @@
-package dotgit
-
-import (
- "fmt"
- "io"
- "os"
-
- "github.com/go-git/go-git/v5/plumbing"
- "github.com/go-git/go-git/v5/plumbing/format/objfile"
- "github.com/go-git/go-git/v5/utils/ioutil"
-)
-
-var _ (plumbing.EncodedObject) = &EncodedObject{}
-
-type EncodedObject struct {
- dir *DotGit
- h plumbing.Hash
- t plumbing.ObjectType
- sz int64
-}
-
-func (e *EncodedObject) Hash() plumbing.Hash {
- return e.h
-}
-
-func (e *EncodedObject) Reader() (io.ReadCloser, error) {
- f, err := e.dir.Object(e.h)
- if err != nil {
- if os.IsNotExist(err) {
- return nil, plumbing.ErrObjectNotFound
- }
-
- return nil, err
- }
- r, err := objfile.NewReader(f)
- if err != nil {
- return nil, err
- }
-
- t, size, err := r.Header()
- if err != nil {
- _ = r.Close()
- return nil, err
- }
- if t != e.t {
- _ = r.Close()
- return nil, objfile.ErrHeader
- }
- if size != e.sz {
- _ = r.Close()
- return nil, objfile.ErrHeader
- }
- return ioutil.NewReadCloserWithCloser(r, f.Close), nil
-}
-
-func (e *EncodedObject) SetType(plumbing.ObjectType) {}
-
-func (e *EncodedObject) Type() plumbing.ObjectType {
- return e.t
-}
-
-func (e *EncodedObject) Size() int64 {
- return e.sz
-}
-
-func (e *EncodedObject) SetSize(int64) {}
-
-func (e *EncodedObject) Writer() (io.WriteCloser, error) {
- return nil, fmt.Errorf("not supported")
-}
-
-func NewEncodedObject(dir *DotGit, h plumbing.Hash, t plumbing.ObjectType, size int64) *EncodedObject {
- return &EncodedObject{
- dir: dir,
- h: h,
- t: t,
- sz: size,
- }
-}
diff --git a/vendor/github.com/go-git/go-git/v5/storage/filesystem/dotgit/repository_filesystem.go b/vendor/github.com/go-git/go-git/v5/storage/filesystem/dotgit/repository_filesystem.go
deleted file mode 100644
index 8d243efea1f..00000000000
--- a/vendor/github.com/go-git/go-git/v5/storage/filesystem/dotgit/repository_filesystem.go
+++ /dev/null
@@ -1,111 +0,0 @@
-package dotgit
-
-import (
- "os"
- "path/filepath"
- "strings"
-
- "github.com/go-git/go-billy/v5"
-)
-
-// RepositoryFilesystem is a billy.Filesystem compatible object wrapper
-// which handles dot-git filesystem operations and supports commondir according to git scm layout:
-// https://github.com/git/git/blob/master/Documentation/gitrepository-layout.txt
-type RepositoryFilesystem struct {
- dotGitFs billy.Filesystem
- commonDotGitFs billy.Filesystem
-}
-
-func NewRepositoryFilesystem(dotGitFs, commonDotGitFs billy.Filesystem) *RepositoryFilesystem {
- return &RepositoryFilesystem{
- dotGitFs: dotGitFs,
- commonDotGitFs: commonDotGitFs,
- }
-}
-
-func (fs *RepositoryFilesystem) mapToRepositoryFsByPath(path string) billy.Filesystem {
- // Nothing to decide if commondir not defined
- if fs.commonDotGitFs == nil {
- return fs.dotGitFs
- }
-
- cleanPath := filepath.Clean(path)
-
- // Check exceptions for commondir (https://git-scm.com/docs/gitrepository-layout#Documentation/gitrepository-layout.txt)
- switch cleanPath {
- case fs.dotGitFs.Join(logsPath, "HEAD"):
- return fs.dotGitFs
- case fs.dotGitFs.Join(refsPath, "bisect"), fs.dotGitFs.Join(refsPath, "rewritten"), fs.dotGitFs.Join(refsPath, "worktree"):
- return fs.dotGitFs
- }
-
- // Determine dot-git root by first path element.
- // There are some elements which should always use commondir when commondir defined.
- // Usual dot-git root will be used for the rest of files.
- switch strings.Split(cleanPath, string(filepath.Separator))[0] {
- case objectsPath, refsPath, packedRefsPath, configPath, branchesPath, hooksPath, infoPath, remotesPath, logsPath, shallowPath, worktreesPath:
- return fs.commonDotGitFs
- default:
- return fs.dotGitFs
- }
-}
-
-func (fs *RepositoryFilesystem) Create(filename string) (billy.File, error) {
- return fs.mapToRepositoryFsByPath(filename).Create(filename)
-}
-
-func (fs *RepositoryFilesystem) Open(filename string) (billy.File, error) {
- return fs.mapToRepositoryFsByPath(filename).Open(filename)
-}
-
-func (fs *RepositoryFilesystem) OpenFile(filename string, flag int, perm os.FileMode) (billy.File, error) {
- return fs.mapToRepositoryFsByPath(filename).OpenFile(filename, flag, perm)
-}
-
-func (fs *RepositoryFilesystem) Stat(filename string) (os.FileInfo, error) {
- return fs.mapToRepositoryFsByPath(filename).Stat(filename)
-}
-
-func (fs *RepositoryFilesystem) Rename(oldpath, newpath string) error {
- return fs.mapToRepositoryFsByPath(oldpath).Rename(oldpath, newpath)
-}
-
-func (fs *RepositoryFilesystem) Remove(filename string) error {
- return fs.mapToRepositoryFsByPath(filename).Remove(filename)
-}
-
-func (fs *RepositoryFilesystem) Join(elem ...string) string {
- return fs.dotGitFs.Join(elem...)
-}
-
-func (fs *RepositoryFilesystem) TempFile(dir, prefix string) (billy.File, error) {
- return fs.mapToRepositoryFsByPath(dir).TempFile(dir, prefix)
-}
-
-func (fs *RepositoryFilesystem) ReadDir(path string) ([]os.FileInfo, error) {
- return fs.mapToRepositoryFsByPath(path).ReadDir(path)
-}
-
-func (fs *RepositoryFilesystem) MkdirAll(filename string, perm os.FileMode) error {
- return fs.mapToRepositoryFsByPath(filename).MkdirAll(filename, perm)
-}
-
-func (fs *RepositoryFilesystem) Lstat(filename string) (os.FileInfo, error) {
- return fs.mapToRepositoryFsByPath(filename).Lstat(filename)
-}
-
-func (fs *RepositoryFilesystem) Symlink(target, link string) error {
- return fs.mapToRepositoryFsByPath(target).Symlink(target, link)
-}
-
-func (fs *RepositoryFilesystem) Readlink(link string) (string, error) {
- return fs.mapToRepositoryFsByPath(link).Readlink(link)
-}
-
-func (fs *RepositoryFilesystem) Chroot(path string) (billy.Filesystem, error) {
- return fs.mapToRepositoryFsByPath(path).Chroot(path)
-}
-
-func (fs *RepositoryFilesystem) Root() string {
- return fs.dotGitFs.Root()
-}
diff --git a/vendor/github.com/go-git/go-git/v5/storage/filesystem/dotgit/writers.go b/vendor/github.com/go-git/go-git/v5/storage/filesystem/dotgit/writers.go
deleted file mode 100644
index 849b7a17606..00000000000
--- a/vendor/github.com/go-git/go-git/v5/storage/filesystem/dotgit/writers.go
+++ /dev/null
@@ -1,285 +0,0 @@
-package dotgit
-
-import (
- "fmt"
- "io"
- "sync/atomic"
-
- "github.com/go-git/go-git/v5/plumbing"
- "github.com/go-git/go-git/v5/plumbing/format/idxfile"
- "github.com/go-git/go-git/v5/plumbing/format/objfile"
- "github.com/go-git/go-git/v5/plumbing/format/packfile"
- "github.com/go-git/go-git/v5/plumbing/hash"
-
- "github.com/go-git/go-billy/v5"
-)
-
-// PackWriter is a io.Writer that generates the packfile index simultaneously,
-// a packfile.Decoder is used with a file reader to read the file being written
-// this operation is synchronized with the write operations.
-// The packfile is written in a temp file, when Close is called this file
-// is renamed/moved (depends on the Filesystem implementation) to the final
-// location, if the PackWriter is not used, nothing is written
-type PackWriter struct {
- Notify func(plumbing.Hash, *idxfile.Writer)
-
- fs billy.Filesystem
- fr, fw billy.File
- synced *syncedReader
- checksum plumbing.Hash
- parser *packfile.Parser
- writer *idxfile.Writer
- result chan error
-}
-
-func newPackWrite(fs billy.Filesystem) (*PackWriter, error) {
- fw, err := fs.TempFile(fs.Join(objectsPath, packPath), "tmp_pack_")
- if err != nil {
- return nil, err
- }
-
- fr, err := fs.Open(fw.Name())
- if err != nil {
- return nil, err
- }
-
- writer := &PackWriter{
- fs: fs,
- fw: fw,
- fr: fr,
- synced: newSyncedReader(fw, fr),
- result: make(chan error),
- }
-
- go writer.buildIndex()
- return writer, nil
-}
-
-func (w *PackWriter) buildIndex() {
- s := packfile.NewScanner(w.synced)
- w.writer = new(idxfile.Writer)
- var err error
- w.parser, err = packfile.NewParser(s, w.writer)
- if err != nil {
- w.result <- err
- return
- }
-
- checksum, err := w.parser.Parse()
- if err != nil {
- w.result <- err
- return
- }
-
- w.checksum = checksum
- w.result <- err
-}
-
-// waitBuildIndex waits until buildIndex function finishes, this can terminate
-// with a packfile.ErrEmptyPackfile, this means that nothing was written so we
-// ignore the error
-func (w *PackWriter) waitBuildIndex() error {
- err := <-w.result
- if err == packfile.ErrEmptyPackfile {
- return nil
- }
-
- return err
-}
-
-func (w *PackWriter) Write(p []byte) (int, error) {
- return w.synced.Write(p)
-}
-
-// Close closes all the file descriptors and save the final packfile, if nothing
-// was written, the tempfiles are deleted without writing a packfile.
-func (w *PackWriter) Close() error {
- defer func() {
- if w.Notify != nil && w.writer != nil && w.writer.Finished() {
- w.Notify(w.checksum, w.writer)
- }
-
- close(w.result)
- }()
-
- if err := w.synced.Close(); err != nil {
- return err
- }
-
- if err := w.waitBuildIndex(); err != nil {
- return err
- }
-
- if err := w.fr.Close(); err != nil {
- return err
- }
-
- if err := w.fw.Close(); err != nil {
- return err
- }
-
- if w.writer == nil || !w.writer.Finished() {
- return w.clean()
- }
-
- return w.save()
-}
-
-func (w *PackWriter) clean() error {
- return w.fs.Remove(w.fw.Name())
-}
-
-func (w *PackWriter) save() error {
- base := w.fs.Join(objectsPath, packPath, fmt.Sprintf("pack-%s", w.checksum))
- idx, err := w.fs.Create(fmt.Sprintf("%s.idx", base))
- if err != nil {
- return err
- }
-
- if err := w.encodeIdx(idx); err != nil {
- return err
- }
-
- if err := idx.Close(); err != nil {
- return err
- }
-
- return w.fs.Rename(w.fw.Name(), fmt.Sprintf("%s.pack", base))
-}
-
-func (w *PackWriter) encodeIdx(writer io.Writer) error {
- idx, err := w.writer.Index()
- if err != nil {
- return err
- }
-
- e := idxfile.NewEncoder(writer)
- _, err = e.Encode(idx)
- return err
-}
-
-type syncedReader struct {
- w io.Writer
- r io.ReadSeeker
-
- blocked, done uint32
- written, read uint64
- news chan bool
-}
-
-func newSyncedReader(w io.Writer, r io.ReadSeeker) *syncedReader {
- return &syncedReader{
- w: w,
- r: r,
- news: make(chan bool),
- }
-}
-
-func (s *syncedReader) Write(p []byte) (n int, err error) {
- defer func() {
- written := atomic.AddUint64(&s.written, uint64(n))
- read := atomic.LoadUint64(&s.read)
- if written > read {
- s.wake()
- }
- }()
-
- n, err = s.w.Write(p)
- return
-}
-
-func (s *syncedReader) Read(p []byte) (n int, err error) {
- defer func() { atomic.AddUint64(&s.read, uint64(n)) }()
-
- for {
- s.sleep()
- n, err = s.r.Read(p)
- if err == io.EOF && !s.isDone() && n == 0 {
- continue
- }
-
- break
- }
-
- return
-}
-
-func (s *syncedReader) isDone() bool {
- return atomic.LoadUint32(&s.done) == 1
-}
-
-func (s *syncedReader) isBlocked() bool {
- return atomic.LoadUint32(&s.blocked) == 1
-}
-
-func (s *syncedReader) wake() {
- if s.isBlocked() {
- atomic.StoreUint32(&s.blocked, 0)
- s.news <- true
- }
-}
-
-func (s *syncedReader) sleep() {
- read := atomic.LoadUint64(&s.read)
- written := atomic.LoadUint64(&s.written)
- if read >= written {
- atomic.StoreUint32(&s.blocked, 1)
- <-s.news
- }
-
-}
-
-func (s *syncedReader) Seek(offset int64, whence int) (int64, error) {
- if whence == io.SeekCurrent {
- return s.r.Seek(offset, whence)
- }
-
- p, err := s.r.Seek(offset, whence)
- atomic.StoreUint64(&s.read, uint64(p))
-
- return p, err
-}
-
-func (s *syncedReader) Close() error {
- atomic.StoreUint32(&s.done, 1)
- close(s.news)
- return nil
-}
-
-type ObjectWriter struct {
- objfile.Writer
- fs billy.Filesystem
- f billy.File
-}
-
-func newObjectWriter(fs billy.Filesystem) (*ObjectWriter, error) {
- f, err := fs.TempFile(fs.Join(objectsPath, packPath), "tmp_obj_")
- if err != nil {
- return nil, err
- }
-
- return &ObjectWriter{
- Writer: (*objfile.NewWriter(f)),
- fs: fs,
- f: f,
- }, nil
-}
-
-func (w *ObjectWriter) Close() error {
- if err := w.Writer.Close(); err != nil {
- return err
- }
-
- if err := w.f.Close(); err != nil {
- return err
- }
-
- return w.save()
-}
-
-func (w *ObjectWriter) save() error {
- hex := w.Hash().String()
- file := w.fs.Join(objectsPath, hex[0:2], hex[2:hash.HexSize])
-
- return w.fs.Rename(w.f.Name(), file)
-}
diff --git a/vendor/github.com/go-git/go-git/v5/storage/filesystem/index.go b/vendor/github.com/go-git/go-git/v5/storage/filesystem/index.go
deleted file mode 100644
index a19176f83db..00000000000
--- a/vendor/github.com/go-git/go-git/v5/storage/filesystem/index.go
+++ /dev/null
@@ -1,54 +0,0 @@
-package filesystem
-
-import (
- "bufio"
- "os"
-
- "github.com/go-git/go-git/v5/plumbing/format/index"
- "github.com/go-git/go-git/v5/storage/filesystem/dotgit"
- "github.com/go-git/go-git/v5/utils/ioutil"
-)
-
-type IndexStorage struct {
- dir *dotgit.DotGit
-}
-
-func (s *IndexStorage) SetIndex(idx *index.Index) (err error) {
- f, err := s.dir.IndexWriter()
- if err != nil {
- return err
- }
-
- defer ioutil.CheckClose(f, &err)
- bw := bufio.NewWriter(f)
- defer func() {
- if e := bw.Flush(); err == nil && e != nil {
- err = e
- }
- }()
-
- e := index.NewEncoder(bw)
- err = e.Encode(idx)
- return err
-}
-
-func (s *IndexStorage) Index() (i *index.Index, err error) {
- idx := &index.Index{
- Version: 2,
- }
-
- f, err := s.dir.Index()
- if err != nil {
- if os.IsNotExist(err) {
- return idx, nil
- }
-
- return nil, err
- }
-
- defer ioutil.CheckClose(f, &err)
-
- d := index.NewDecoder(bufio.NewReader(f))
- err = d.Decode(idx)
- return idx, err
-}
diff --git a/vendor/github.com/go-git/go-git/v5/storage/filesystem/module.go b/vendor/github.com/go-git/go-git/v5/storage/filesystem/module.go
deleted file mode 100644
index 20336c11846..00000000000
--- a/vendor/github.com/go-git/go-git/v5/storage/filesystem/module.go
+++ /dev/null
@@ -1,20 +0,0 @@
-package filesystem
-
-import (
- "github.com/go-git/go-git/v5/plumbing/cache"
- "github.com/go-git/go-git/v5/storage"
- "github.com/go-git/go-git/v5/storage/filesystem/dotgit"
-)
-
-type ModuleStorage struct {
- dir *dotgit.DotGit
-}
-
-func (s *ModuleStorage) Module(name string) (storage.Storer, error) {
- fs, err := s.dir.Module(name)
- if err != nil {
- return nil, err
- }
-
- return NewStorage(fs, cache.NewObjectLRUDefault()), nil
-}
diff --git a/vendor/github.com/go-git/go-git/v5/storage/filesystem/object.go b/vendor/github.com/go-git/go-git/v5/storage/filesystem/object.go
deleted file mode 100644
index e812fe934d8..00000000000
--- a/vendor/github.com/go-git/go-git/v5/storage/filesystem/object.go
+++ /dev/null
@@ -1,892 +0,0 @@
-package filesystem
-
-import (
- "bytes"
- "io"
- "os"
- "sync"
- "time"
-
- "github.com/go-git/go-git/v5/plumbing"
- "github.com/go-git/go-git/v5/plumbing/cache"
- "github.com/go-git/go-git/v5/plumbing/format/idxfile"
- "github.com/go-git/go-git/v5/plumbing/format/objfile"
- "github.com/go-git/go-git/v5/plumbing/format/packfile"
- "github.com/go-git/go-git/v5/plumbing/storer"
- "github.com/go-git/go-git/v5/storage/filesystem/dotgit"
- "github.com/go-git/go-git/v5/utils/ioutil"
-
- "github.com/go-git/go-billy/v5"
-)
-
-type ObjectStorage struct {
- options Options
-
- // objectCache is an object cache uses to cache delta's bases and also recently
- // loaded loose objects
- objectCache cache.Object
-
- dir *dotgit.DotGit
- index map[plumbing.Hash]idxfile.Index
-
- packList []plumbing.Hash
- packListIdx int
- packfiles map[plumbing.Hash]*packfile.Packfile
-}
-
-// NewObjectStorage creates a new ObjectStorage with the given .git directory and cache.
-func NewObjectStorage(dir *dotgit.DotGit, objectCache cache.Object) *ObjectStorage {
- return NewObjectStorageWithOptions(dir, objectCache, Options{})
-}
-
-// NewObjectStorageWithOptions creates a new ObjectStorage with the given .git directory, cache and extra options
-func NewObjectStorageWithOptions(dir *dotgit.DotGit, objectCache cache.Object, ops Options) *ObjectStorage {
- return &ObjectStorage{
- options: ops,
- objectCache: objectCache,
- dir: dir,
- }
-}
-
-func (s *ObjectStorage) requireIndex() error {
- if s.index != nil {
- return nil
- }
-
- s.index = make(map[plumbing.Hash]idxfile.Index)
- packs, err := s.dir.ObjectPacks()
- if err != nil {
- return err
- }
-
- for _, h := range packs {
- if err := s.loadIdxFile(h); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-// Reindex indexes again all packfiles. Useful if git changed packfiles externally
-func (s *ObjectStorage) Reindex() {
- s.index = nil
-}
-
-func (s *ObjectStorage) loadIdxFile(h plumbing.Hash) (err error) {
- f, err := s.dir.ObjectPackIdx(h)
- if err != nil {
- return err
- }
-
- defer ioutil.CheckClose(f, &err)
-
- idxf := idxfile.NewMemoryIndex()
- d := idxfile.NewDecoder(f)
- if err = d.Decode(idxf); err != nil {
- return err
- }
-
- s.index[h] = idxf
- return err
-}
-
-func (s *ObjectStorage) NewEncodedObject() plumbing.EncodedObject {
- return &plumbing.MemoryObject{}
-}
-
-func (s *ObjectStorage) PackfileWriter() (io.WriteCloser, error) {
- if err := s.requireIndex(); err != nil {
- return nil, err
- }
-
- w, err := s.dir.NewObjectPack()
- if err != nil {
- return nil, err
- }
-
- w.Notify = func(h plumbing.Hash, writer *idxfile.Writer) {
- index, err := writer.Index()
- if err == nil {
- s.index[h] = index
- }
- }
-
- return w, nil
-}
-
-// SetEncodedObject adds a new object to the storage.
-func (s *ObjectStorage) SetEncodedObject(o plumbing.EncodedObject) (h plumbing.Hash, err error) {
- if o.Type() == plumbing.OFSDeltaObject || o.Type() == plumbing.REFDeltaObject {
- return plumbing.ZeroHash, plumbing.ErrInvalidType
- }
-
- ow, err := s.dir.NewObject()
- if err != nil {
- return plumbing.ZeroHash, err
- }
-
- defer ioutil.CheckClose(ow, &err)
-
- or, err := o.Reader()
- if err != nil {
- return plumbing.ZeroHash, err
- }
-
- defer ioutil.CheckClose(or, &err)
-
- if err = ow.WriteHeader(o.Type(), o.Size()); err != nil {
- return plumbing.ZeroHash, err
- }
-
- if _, err = io.Copy(ow, or); err != nil {
- return plumbing.ZeroHash, err
- }
-
- return o.Hash(), err
-}
-
-// LazyWriter returns a lazy ObjectWriter that is bound to a DotGit file.
-// It first write the header passing on the object type and size, so
-// that the object contents can be written later, without the need to
-// create a MemoryObject and buffering its entire contents into memory.
-func (s *ObjectStorage) LazyWriter() (w io.WriteCloser, wh func(typ plumbing.ObjectType, sz int64) error, err error) {
- ow, err := s.dir.NewObject()
- if err != nil {
- return nil, nil, err
- }
-
- return ow, ow.WriteHeader, nil
-}
-
-// HasEncodedObject returns nil if the object exists, without actually
-// reading the object data from storage.
-func (s *ObjectStorage) HasEncodedObject(h plumbing.Hash) (err error) {
- // Check unpacked objects
- f, err := s.dir.Object(h)
- if err != nil {
- if !os.IsNotExist(err) {
- return err
- }
- // Fall through to check packed objects.
- } else {
- defer ioutil.CheckClose(f, &err)
- return nil
- }
-
- // Check packed objects.
- if err := s.requireIndex(); err != nil {
- return err
- }
- _, _, offset := s.findObjectInPackfile(h)
- if offset == -1 {
- return plumbing.ErrObjectNotFound
- }
- return nil
-}
-
-func (s *ObjectStorage) encodedObjectSizeFromUnpacked(h plumbing.Hash) (
- size int64, err error) {
- f, err := s.dir.Object(h)
- if err != nil {
- if os.IsNotExist(err) {
- return 0, plumbing.ErrObjectNotFound
- }
-
- return 0, err
- }
-
- r, err := objfile.NewReader(f)
- if err != nil {
- return 0, err
- }
- defer ioutil.CheckClose(r, &err)
-
- _, size, err = r.Header()
- return size, err
-}
-
-func (s *ObjectStorage) packfile(idx idxfile.Index, pack plumbing.Hash) (*packfile.Packfile, error) {
- if p := s.packfileFromCache(pack); p != nil {
- return p, nil
- }
-
- f, err := s.dir.ObjectPack(pack)
- if err != nil {
- return nil, err
- }
-
- var p *packfile.Packfile
- if s.objectCache != nil {
- p = packfile.NewPackfileWithCache(idx, s.dir.Fs(), f, s.objectCache, s.options.LargeObjectThreshold)
- } else {
- p = packfile.NewPackfile(idx, s.dir.Fs(), f, s.options.LargeObjectThreshold)
- }
-
- return p, s.storePackfileInCache(pack, p)
-}
-
-func (s *ObjectStorage) packfileFromCache(hash plumbing.Hash) *packfile.Packfile {
- if s.packfiles == nil {
- if s.options.KeepDescriptors {
- s.packfiles = make(map[plumbing.Hash]*packfile.Packfile)
- } else if s.options.MaxOpenDescriptors > 0 {
- s.packList = make([]plumbing.Hash, s.options.MaxOpenDescriptors)
- s.packfiles = make(map[plumbing.Hash]*packfile.Packfile, s.options.MaxOpenDescriptors)
- }
- }
-
- return s.packfiles[hash]
-}
-
-func (s *ObjectStorage) storePackfileInCache(hash plumbing.Hash, p *packfile.Packfile) error {
- if s.options.KeepDescriptors {
- s.packfiles[hash] = p
- return nil
- }
-
- if s.options.MaxOpenDescriptors <= 0 {
- return nil
- }
-
- // start over as the limit of packList is hit
- if s.packListIdx >= len(s.packList) {
- s.packListIdx = 0
- }
-
- // close the existing packfile if open
- if next := s.packList[s.packListIdx]; !next.IsZero() {
- open := s.packfiles[next]
- delete(s.packfiles, next)
- if open != nil {
- if err := open.Close(); err != nil {
- return err
- }
- }
- }
-
- // cache newly open packfile
- s.packList[s.packListIdx] = hash
- s.packfiles[hash] = p
- s.packListIdx++
-
- return nil
-}
-
-func (s *ObjectStorage) encodedObjectSizeFromPackfile(h plumbing.Hash) (
- size int64, err error) {
- if err := s.requireIndex(); err != nil {
- return 0, err
- }
-
- pack, _, offset := s.findObjectInPackfile(h)
- if offset == -1 {
- return 0, plumbing.ErrObjectNotFound
- }
-
- idx := s.index[pack]
- hash, err := idx.FindHash(offset)
- if err == nil {
- obj, ok := s.objectCache.Get(hash)
- if ok {
- return obj.Size(), nil
- }
- } else if err != nil && err != plumbing.ErrObjectNotFound {
- return 0, err
- }
-
- p, err := s.packfile(idx, pack)
- if err != nil {
- return 0, err
- }
-
- if !s.options.KeepDescriptors && s.options.MaxOpenDescriptors == 0 {
- defer ioutil.CheckClose(p, &err)
- }
-
- return p.GetSizeByOffset(offset)
-}
-
-// EncodedObjectSize returns the plaintext size of the given object,
-// without actually reading the full object data from storage.
-func (s *ObjectStorage) EncodedObjectSize(h plumbing.Hash) (
- size int64, err error) {
- size, err = s.encodedObjectSizeFromUnpacked(h)
- if err != nil && err != plumbing.ErrObjectNotFound {
- return 0, err
- } else if err == nil {
- return size, nil
- }
-
- return s.encodedObjectSizeFromPackfile(h)
-}
-
-// EncodedObject returns the object with the given hash, by searching for it in
-// the packfile and the git object directories.
-func (s *ObjectStorage) EncodedObject(t plumbing.ObjectType, h plumbing.Hash) (plumbing.EncodedObject, error) {
- var obj plumbing.EncodedObject
- var err error
-
- if s.index != nil {
- obj, err = s.getFromPackfile(h, false)
- if err == plumbing.ErrObjectNotFound {
- obj, err = s.getFromUnpacked(h)
- }
- } else {
- obj, err = s.getFromUnpacked(h)
- if err == plumbing.ErrObjectNotFound {
- obj, err = s.getFromPackfile(h, false)
- }
- }
-
- // If the error is still object not found, check if it's a shared object
- // repository.
- if err == plumbing.ErrObjectNotFound {
- dotgits, e := s.dir.Alternates()
- if e == nil {
- // Create a new object storage with the DotGit(s) and check for the
- // required hash object. Skip when not found.
- for _, dg := range dotgits {
- o := NewObjectStorage(dg, s.objectCache)
- enobj, enerr := o.EncodedObject(t, h)
- if enerr != nil {
- continue
- }
- return enobj, nil
- }
- }
- }
-
- if err != nil {
- return nil, err
- }
-
- if plumbing.AnyObject != t && obj.Type() != t {
- return nil, plumbing.ErrObjectNotFound
- }
-
- return obj, nil
-}
-
-// DeltaObject returns the object with the given hash, by searching for
-// it in the packfile and the git object directories.
-func (s *ObjectStorage) DeltaObject(t plumbing.ObjectType,
- h plumbing.Hash) (plumbing.EncodedObject, error) {
- obj, err := s.getFromUnpacked(h)
- if err == plumbing.ErrObjectNotFound {
- obj, err = s.getFromPackfile(h, true)
- }
-
- if err != nil {
- return nil, err
- }
-
- if plumbing.AnyObject != t && obj.Type() != t {
- return nil, plumbing.ErrObjectNotFound
- }
-
- return obj, nil
-}
-
-func (s *ObjectStorage) getFromUnpacked(h plumbing.Hash) (obj plumbing.EncodedObject, err error) {
- f, err := s.dir.Object(h)
- if err != nil {
- if os.IsNotExist(err) {
- return nil, plumbing.ErrObjectNotFound
- }
-
- return nil, err
- }
- defer ioutil.CheckClose(f, &err)
-
- if cacheObj, found := s.objectCache.Get(h); found {
- return cacheObj, nil
- }
-
- r, err := objfile.NewReader(f)
- if err != nil {
- return nil, err
- }
-
- defer ioutil.CheckClose(r, &err)
-
- t, size, err := r.Header()
- if err != nil {
- return nil, err
- }
-
- if s.options.LargeObjectThreshold > 0 && size > s.options.LargeObjectThreshold {
- obj = dotgit.NewEncodedObject(s.dir, h, t, size)
- return obj, nil
- }
-
- obj = s.NewEncodedObject()
-
- obj.SetType(t)
- obj.SetSize(size)
- w, err := obj.Writer()
- if err != nil {
- return nil, err
- }
-
- defer ioutil.CheckClose(w, &err)
-
- s.objectCache.Put(obj)
-
- bufp := copyBufferPool.Get().(*[]byte)
- buf := *bufp
- _, err = io.CopyBuffer(w, r, buf)
- copyBufferPool.Put(bufp)
-
- return obj, err
-}
-
-var copyBufferPool = sync.Pool{
- New: func() interface{} {
- b := make([]byte, 32*1024)
- return &b
- },
-}
-
-// Get returns the object with the given hash, by searching for it in
-// the packfile.
-func (s *ObjectStorage) getFromPackfile(h plumbing.Hash, canBeDelta bool) (
- plumbing.EncodedObject, error) {
-
- if err := s.requireIndex(); err != nil {
- return nil, err
- }
-
- pack, hash, offset := s.findObjectInPackfile(h)
- if offset == -1 {
- return nil, plumbing.ErrObjectNotFound
- }
-
- idx := s.index[pack]
- p, err := s.packfile(idx, pack)
- if err != nil {
- return nil, err
- }
-
- if !s.options.KeepDescriptors && s.options.MaxOpenDescriptors == 0 {
- defer ioutil.CheckClose(p, &err)
- }
-
- if canBeDelta {
- return s.decodeDeltaObjectAt(p, offset, hash)
- }
-
- return s.decodeObjectAt(p, offset)
-}
-
-func (s *ObjectStorage) decodeObjectAt(
- p *packfile.Packfile,
- offset int64,
-) (plumbing.EncodedObject, error) {
- hash, err := p.FindHash(offset)
- if err == nil {
- obj, ok := s.objectCache.Get(hash)
- if ok {
- return obj, nil
- }
- }
-
- if err != nil && err != plumbing.ErrObjectNotFound {
- return nil, err
- }
-
- return p.GetByOffset(offset)
-}
-
-func (s *ObjectStorage) decodeDeltaObjectAt(
- p *packfile.Packfile,
- offset int64,
- hash plumbing.Hash,
-) (plumbing.EncodedObject, error) {
- scan := p.Scanner()
- header, err := scan.SeekObjectHeader(offset)
- if err != nil {
- return nil, err
- }
-
- var (
- base plumbing.Hash
- )
-
- switch header.Type {
- case plumbing.REFDeltaObject:
- base = header.Reference
- case plumbing.OFSDeltaObject:
- base, err = p.FindHash(header.OffsetReference)
- if err != nil {
- return nil, err
- }
- default:
- return s.decodeObjectAt(p, offset)
- }
-
- obj := &plumbing.MemoryObject{}
- obj.SetType(header.Type)
- w, err := obj.Writer()
- if err != nil {
- return nil, err
- }
-
- if _, _, err := scan.NextObject(w); err != nil {
- return nil, err
- }
-
- return newDeltaObject(obj, hash, base, header.Length), nil
-}
-
-func (s *ObjectStorage) findObjectInPackfile(h plumbing.Hash) (plumbing.Hash, plumbing.Hash, int64) {
- for packfile, index := range s.index {
- offset, err := index.FindOffset(h)
- if err == nil {
- return packfile, h, offset
- }
- }
-
- return plumbing.ZeroHash, plumbing.ZeroHash, -1
-}
-
-// HashesWithPrefix returns all objects with a hash that starts with a prefix by searching for
-// them in the packfile and the git object directories.
-func (s *ObjectStorage) HashesWithPrefix(prefix []byte) ([]plumbing.Hash, error) {
- hashes, err := s.dir.ObjectsWithPrefix(prefix)
- if err != nil {
- return nil, err
- }
-
- seen := hashListAsMap(hashes)
-
- // TODO: This could be faster with some idxfile changes,
- // or diving into the packfile.
- if err := s.requireIndex(); err != nil {
- return nil, err
- }
- for _, index := range s.index {
- ei, err := index.Entries()
- if err != nil {
- return nil, err
- }
- for {
- e, err := ei.Next()
- if err == io.EOF {
- break
- } else if err != nil {
- return nil, err
- }
- if bytes.HasPrefix(e.Hash[:], prefix) {
- if _, ok := seen[e.Hash]; ok {
- continue
- }
- hashes = append(hashes, e.Hash)
- }
- }
- ei.Close()
- }
-
- return hashes, nil
-}
-
-// IterEncodedObjects returns an iterator for all the objects in the packfile
-// with the given type.
-func (s *ObjectStorage) IterEncodedObjects(t plumbing.ObjectType) (storer.EncodedObjectIter, error) {
- objects, err := s.dir.Objects()
- if err != nil {
- return nil, err
- }
-
- seen := make(map[plumbing.Hash]struct{})
- var iters []storer.EncodedObjectIter
- if len(objects) != 0 {
- iters = append(iters, &objectsIter{s: s, t: t, h: objects})
- seen = hashListAsMap(objects)
- }
-
- packi, err := s.buildPackfileIters(t, seen)
- if err != nil {
- return nil, err
- }
-
- iters = append(iters, packi)
- return storer.NewMultiEncodedObjectIter(iters), nil
-}
-
-func (s *ObjectStorage) buildPackfileIters(
- t plumbing.ObjectType,
- seen map[plumbing.Hash]struct{},
-) (storer.EncodedObjectIter, error) {
- if err := s.requireIndex(); err != nil {
- return nil, err
- }
-
- packs, err := s.dir.ObjectPacks()
- if err != nil {
- return nil, err
- }
- return &lazyPackfilesIter{
- hashes: packs,
- open: func(h plumbing.Hash) (storer.EncodedObjectIter, error) {
- pack, err := s.dir.ObjectPack(h)
- if err != nil {
- return nil, err
- }
- return newPackfileIter(
- s.dir.Fs(), pack, t, seen, s.index[h],
- s.objectCache, s.options.KeepDescriptors,
- s.options.LargeObjectThreshold,
- )
- },
- }, nil
-}
-
-// Close closes all opened files.
-func (s *ObjectStorage) Close() error {
- var firstError error
- if s.options.KeepDescriptors || s.options.MaxOpenDescriptors > 0 {
- for _, packfile := range s.packfiles {
- err := packfile.Close()
- if firstError == nil && err != nil {
- firstError = err
- }
- }
- }
-
- s.packfiles = nil
- s.dir.Close()
-
- return firstError
-}
-
-type lazyPackfilesIter struct {
- hashes []plumbing.Hash
- open func(h plumbing.Hash) (storer.EncodedObjectIter, error)
- cur storer.EncodedObjectIter
-}
-
-func (it *lazyPackfilesIter) Next() (plumbing.EncodedObject, error) {
- for {
- if it.cur == nil {
- if len(it.hashes) == 0 {
- return nil, io.EOF
- }
- h := it.hashes[0]
- it.hashes = it.hashes[1:]
-
- sub, err := it.open(h)
- if err == io.EOF {
- continue
- } else if err != nil {
- return nil, err
- }
- it.cur = sub
- }
- ob, err := it.cur.Next()
- if err == io.EOF {
- it.cur.Close()
- it.cur = nil
- continue
- } else if err != nil {
- return nil, err
- }
- return ob, nil
- }
-}
-
-func (it *lazyPackfilesIter) ForEach(cb func(plumbing.EncodedObject) error) error {
- return storer.ForEachIterator(it, cb)
-}
-
-func (it *lazyPackfilesIter) Close() {
- if it.cur != nil {
- it.cur.Close()
- it.cur = nil
- }
- it.hashes = nil
-}
-
-type packfileIter struct {
- pack billy.File
- iter storer.EncodedObjectIter
- seen map[plumbing.Hash]struct{}
-
- // tells whether the pack file should be left open after iteration or not
- keepPack bool
-}
-
-// NewPackfileIter returns a new EncodedObjectIter for the provided packfile
-// and object type. Packfile and index file will be closed after they're
-// used. If keepPack is true the packfile won't be closed after the iteration
-// finished.
-func NewPackfileIter(
- fs billy.Filesystem,
- f billy.File,
- idxFile billy.File,
- t plumbing.ObjectType,
- keepPack bool,
- largeObjectThreshold int64,
-) (storer.EncodedObjectIter, error) {
- idx := idxfile.NewMemoryIndex()
- if err := idxfile.NewDecoder(idxFile).Decode(idx); err != nil {
- return nil, err
- }
-
- if err := idxFile.Close(); err != nil {
- return nil, err
- }
-
- seen := make(map[plumbing.Hash]struct{})
- return newPackfileIter(fs, f, t, seen, idx, nil, keepPack, largeObjectThreshold)
-}
-
-func newPackfileIter(
- fs billy.Filesystem,
- f billy.File,
- t plumbing.ObjectType,
- seen map[plumbing.Hash]struct{},
- index idxfile.Index,
- cache cache.Object,
- keepPack bool,
- largeObjectThreshold int64,
-) (storer.EncodedObjectIter, error) {
- var p *packfile.Packfile
- if cache != nil {
- p = packfile.NewPackfileWithCache(index, fs, f, cache, largeObjectThreshold)
- } else {
- p = packfile.NewPackfile(index, fs, f, largeObjectThreshold)
- }
-
- iter, err := p.GetByType(t)
- if err != nil {
- return nil, err
- }
-
- return &packfileIter{
- pack: f,
- iter: iter,
- seen: seen,
- keepPack: keepPack,
- }, nil
-}
-
-func (iter *packfileIter) Next() (plumbing.EncodedObject, error) {
- for {
- obj, err := iter.iter.Next()
- if err != nil {
- return nil, err
- }
-
- if _, ok := iter.seen[obj.Hash()]; ok {
- continue
- }
-
- return obj, nil
- }
-}
-
-func (iter *packfileIter) ForEach(cb func(plumbing.EncodedObject) error) error {
- for {
- o, err := iter.Next()
- if err != nil {
- if err == io.EOF {
- iter.Close()
- return nil
- }
- return err
- }
-
- if err := cb(o); err != nil {
- return err
- }
- }
-}
-
-func (iter *packfileIter) Close() {
- iter.iter.Close()
- if !iter.keepPack {
- _ = iter.pack.Close()
- }
-}
-
-type objectsIter struct {
- s *ObjectStorage
- t plumbing.ObjectType
- h []plumbing.Hash
-}
-
-func (iter *objectsIter) Next() (plumbing.EncodedObject, error) {
- if len(iter.h) == 0 {
- return nil, io.EOF
- }
-
- obj, err := iter.s.getFromUnpacked(iter.h[0])
- iter.h = iter.h[1:]
-
- if err != nil {
- return nil, err
- }
-
- if iter.t != plumbing.AnyObject && iter.t != obj.Type() {
- return iter.Next()
- }
-
- return obj, err
-}
-
-func (iter *objectsIter) ForEach(cb func(plumbing.EncodedObject) error) error {
- for {
- o, err := iter.Next()
- if err != nil {
- if err == io.EOF {
- return nil
- }
- return err
- }
-
- if err := cb(o); err != nil {
- return err
- }
- }
-}
-
-func (iter *objectsIter) Close() {
- iter.h = []plumbing.Hash{}
-}
-
-func hashListAsMap(l []plumbing.Hash) map[plumbing.Hash]struct{} {
- m := make(map[plumbing.Hash]struct{}, len(l))
- for _, h := range l {
- m[h] = struct{}{}
- }
- return m
-}
-
-func (s *ObjectStorage) ForEachObjectHash(fun func(plumbing.Hash) error) error {
- err := s.dir.ForEachObjectHash(fun)
- if err == storer.ErrStop {
- return nil
- }
- return err
-}
-
-func (s *ObjectStorage) LooseObjectTime(hash plumbing.Hash) (time.Time, error) {
- fi, err := s.dir.ObjectStat(hash)
- if err != nil {
- return time.Time{}, err
- }
- return fi.ModTime(), nil
-}
-
-func (s *ObjectStorage) DeleteLooseObject(hash plumbing.Hash) error {
- return s.dir.ObjectDelete(hash)
-}
-
-func (s *ObjectStorage) ObjectPacks() ([]plumbing.Hash, error) {
- return s.dir.ObjectPacks()
-}
-
-func (s *ObjectStorage) DeleteOldObjectPackAndIndex(h plumbing.Hash, t time.Time) error {
- return s.dir.DeleteOldObjectPackAndIndex(h, t)
-}
diff --git a/vendor/github.com/go-git/go-git/v5/storage/filesystem/reference.go b/vendor/github.com/go-git/go-git/v5/storage/filesystem/reference.go
deleted file mode 100644
index aabcd7308d6..00000000000
--- a/vendor/github.com/go-git/go-git/v5/storage/filesystem/reference.go
+++ /dev/null
@@ -1,44 +0,0 @@
-package filesystem
-
-import (
- "github.com/go-git/go-git/v5/plumbing"
- "github.com/go-git/go-git/v5/plumbing/storer"
- "github.com/go-git/go-git/v5/storage/filesystem/dotgit"
-)
-
-type ReferenceStorage struct {
- dir *dotgit.DotGit
-}
-
-func (r *ReferenceStorage) SetReference(ref *plumbing.Reference) error {
- return r.dir.SetRef(ref, nil)
-}
-
-func (r *ReferenceStorage) CheckAndSetReference(ref, old *plumbing.Reference) error {
- return r.dir.SetRef(ref, old)
-}
-
-func (r *ReferenceStorage) Reference(n plumbing.ReferenceName) (*plumbing.Reference, error) {
- return r.dir.Ref(n)
-}
-
-func (r *ReferenceStorage) IterReferences() (storer.ReferenceIter, error) {
- refs, err := r.dir.Refs()
- if err != nil {
- return nil, err
- }
-
- return storer.NewReferenceSliceIter(refs), nil
-}
-
-func (r *ReferenceStorage) RemoveReference(n plumbing.ReferenceName) error {
- return r.dir.RemoveRef(n)
-}
-
-func (r *ReferenceStorage) CountLooseRefs() (int, error) {
- return r.dir.CountLooseRefs()
-}
-
-func (r *ReferenceStorage) PackRefs() error {
- return r.dir.PackRefs()
-}
diff --git a/vendor/github.com/go-git/go-git/v5/storage/filesystem/shallow.go b/vendor/github.com/go-git/go-git/v5/storage/filesystem/shallow.go
deleted file mode 100644
index ac48fdfbb32..00000000000
--- a/vendor/github.com/go-git/go-git/v5/storage/filesystem/shallow.go
+++ /dev/null
@@ -1,54 +0,0 @@
-package filesystem
-
-import (
- "bufio"
- "fmt"
-
- "github.com/go-git/go-git/v5/plumbing"
- "github.com/go-git/go-git/v5/storage/filesystem/dotgit"
- "github.com/go-git/go-git/v5/utils/ioutil"
-)
-
-// ShallowStorage where the shallow commits are stored, an internal to
-// manipulate the shallow file
-type ShallowStorage struct {
- dir *dotgit.DotGit
-}
-
-// SetShallow save the shallows in the shallow file in the .git folder as one
-// commit per line represented by 40-byte hexadecimal object terminated by a
-// newline.
-func (s *ShallowStorage) SetShallow(commits []plumbing.Hash) error {
- f, err := s.dir.ShallowWriter()
- if err != nil {
- return err
- }
-
- defer ioutil.CheckClose(f, &err)
- for _, h := range commits {
- if _, err := fmt.Fprintf(f, "%s\n", h); err != nil {
- return err
- }
- }
-
- return err
-}
-
-// Shallow returns the shallow commits reading from shallo file from .git
-func (s *ShallowStorage) Shallow() ([]plumbing.Hash, error) {
- f, err := s.dir.Shallow()
- if f == nil || err != nil {
- return nil, err
- }
-
- defer ioutil.CheckClose(f, &err)
-
- var hash []plumbing.Hash
-
- scn := bufio.NewScanner(f)
- for scn.Scan() {
- hash = append(hash, plumbing.NewHash(scn.Text()))
- }
-
- return hash, scn.Err()
-}
diff --git a/vendor/github.com/go-git/go-git/v5/storage/filesystem/storage.go b/vendor/github.com/go-git/go-git/v5/storage/filesystem/storage.go
deleted file mode 100644
index 951ea00c831..00000000000
--- a/vendor/github.com/go-git/go-git/v5/storage/filesystem/storage.go
+++ /dev/null
@@ -1,85 +0,0 @@
-// Package filesystem is a storage backend base on filesystems
-package filesystem
-
-import (
- "github.com/go-git/go-git/v5/plumbing/cache"
- "github.com/go-git/go-git/v5/storage/filesystem/dotgit"
-
- "github.com/go-git/go-billy/v5"
-)
-
-// Storage is an implementation of git.Storer that stores data on disk in the
-// standard git format (this is, the .git directory). Zero values of this type
-// are not safe to use, see the NewStorage function below.
-type Storage struct {
- fs billy.Filesystem
- dir *dotgit.DotGit
-
- ObjectStorage
- ReferenceStorage
- IndexStorage
- ShallowStorage
- ConfigStorage
- ModuleStorage
-}
-
-// Options holds configuration for the storage.
-type Options struct {
- // ExclusiveAccess means that the filesystem is not modified externally
- // while the repo is open.
- ExclusiveAccess bool
- // KeepDescriptors makes the file descriptors to be reused but they will
- // need to be manually closed calling Close().
- KeepDescriptors bool
- // MaxOpenDescriptors is the max number of file descriptors to keep
- // open. If KeepDescriptors is true, all file descriptors will remain open.
- MaxOpenDescriptors int
- // LargeObjectThreshold maximum object size (in bytes) that will be read in to memory.
- // If left unset or set to 0 there is no limit
- LargeObjectThreshold int64
- // AlternatesFS provides the billy filesystem to be used for Git Alternates.
- // If none is provided, it falls back to using the underlying instance used for
- // DotGit.
- AlternatesFS billy.Filesystem
-}
-
-// NewStorage returns a new Storage backed by a given `fs.Filesystem` and cache.
-func NewStorage(fs billy.Filesystem, cache cache.Object) *Storage {
- return NewStorageWithOptions(fs, cache, Options{})
-}
-
-// NewStorageWithOptions returns a new Storage with extra options,
-// backed by a given `fs.Filesystem` and cache.
-func NewStorageWithOptions(fs billy.Filesystem, cache cache.Object, ops Options) *Storage {
- dirOps := dotgit.Options{
- ExclusiveAccess: ops.ExclusiveAccess,
- AlternatesFS: ops.AlternatesFS,
- }
- dir := dotgit.NewWithOptions(fs, dirOps)
-
- return &Storage{
- fs: fs,
- dir: dir,
-
- ObjectStorage: *NewObjectStorageWithOptions(dir, cache, ops),
- ReferenceStorage: ReferenceStorage{dir: dir},
- IndexStorage: IndexStorage{dir: dir},
- ShallowStorage: ShallowStorage{dir: dir},
- ConfigStorage: ConfigStorage{dir: dir},
- ModuleStorage: ModuleStorage{dir: dir},
- }
-}
-
-// Filesystem returns the underlying filesystem
-func (s *Storage) Filesystem() billy.Filesystem {
- return s.fs
-}
-
-// Init initializes .git directory
-func (s *Storage) Init() error {
- return s.dir.Initialize()
-}
-
-func (s *Storage) AddAlternate(remote string) error {
- return s.dir.AddAlternate(remote)
-}
diff --git a/vendor/github.com/go-git/go-git/v5/storage/memory/storage.go b/vendor/github.com/go-git/go-git/v5/storage/memory/storage.go
deleted file mode 100644
index 79211c7c062..00000000000
--- a/vendor/github.com/go-git/go-git/v5/storage/memory/storage.go
+++ /dev/null
@@ -1,324 +0,0 @@
-// Package memory is a storage backend base on memory
-package memory
-
-import (
- "fmt"
- "time"
-
- "github.com/go-git/go-git/v5/config"
- "github.com/go-git/go-git/v5/plumbing"
- "github.com/go-git/go-git/v5/plumbing/format/index"
- "github.com/go-git/go-git/v5/plumbing/storer"
- "github.com/go-git/go-git/v5/storage"
-)
-
-var ErrUnsupportedObjectType = fmt.Errorf("unsupported object type")
-
-// Storage is an implementation of git.Storer that stores data on memory, being
-// ephemeral. The use of this storage should be done in controlled environments,
-// since the representation in memory of some repository can fill the machine
-// memory. in the other hand this storage has the best performance.
-type Storage struct {
- ConfigStorage
- ObjectStorage
- ShallowStorage
- IndexStorage
- ReferenceStorage
- ModuleStorage
-}
-
-// NewStorage returns a new Storage base on memory
-func NewStorage() *Storage {
- return &Storage{
- ReferenceStorage: make(ReferenceStorage),
- ConfigStorage: ConfigStorage{},
- ShallowStorage: ShallowStorage{},
- ObjectStorage: ObjectStorage{
- Objects: make(map[plumbing.Hash]plumbing.EncodedObject),
- Commits: make(map[plumbing.Hash]plumbing.EncodedObject),
- Trees: make(map[plumbing.Hash]plumbing.EncodedObject),
- Blobs: make(map[plumbing.Hash]plumbing.EncodedObject),
- Tags: make(map[plumbing.Hash]plumbing.EncodedObject),
- },
- ModuleStorage: make(ModuleStorage),
- }
-}
-
-type ConfigStorage struct {
- config *config.Config
-}
-
-func (c *ConfigStorage) SetConfig(cfg *config.Config) error {
- if err := cfg.Validate(); err != nil {
- return err
- }
-
- c.config = cfg
- return nil
-}
-
-func (c *ConfigStorage) Config() (*config.Config, error) {
- if c.config == nil {
- c.config = config.NewConfig()
- }
-
- return c.config, nil
-}
-
-type IndexStorage struct {
- index *index.Index
-}
-
-func (c *IndexStorage) SetIndex(idx *index.Index) error {
- c.index = idx
- return nil
-}
-
-func (c *IndexStorage) Index() (*index.Index, error) {
- if c.index == nil {
- c.index = &index.Index{Version: 2}
- }
-
- return c.index, nil
-}
-
-type ObjectStorage struct {
- Objects map[plumbing.Hash]plumbing.EncodedObject
- Commits map[plumbing.Hash]plumbing.EncodedObject
- Trees map[plumbing.Hash]plumbing.EncodedObject
- Blobs map[plumbing.Hash]plumbing.EncodedObject
- Tags map[plumbing.Hash]plumbing.EncodedObject
-}
-
-func (o *ObjectStorage) NewEncodedObject() plumbing.EncodedObject {
- return &plumbing.MemoryObject{}
-}
-
-func (o *ObjectStorage) SetEncodedObject(obj plumbing.EncodedObject) (plumbing.Hash, error) {
- h := obj.Hash()
- o.Objects[h] = obj
-
- switch obj.Type() {
- case plumbing.CommitObject:
- o.Commits[h] = o.Objects[h]
- case plumbing.TreeObject:
- o.Trees[h] = o.Objects[h]
- case plumbing.BlobObject:
- o.Blobs[h] = o.Objects[h]
- case plumbing.TagObject:
- o.Tags[h] = o.Objects[h]
- default:
- return h, ErrUnsupportedObjectType
- }
-
- return h, nil
-}
-
-func (o *ObjectStorage) HasEncodedObject(h plumbing.Hash) (err error) {
- if _, ok := o.Objects[h]; !ok {
- return plumbing.ErrObjectNotFound
- }
- return nil
-}
-
-func (o *ObjectStorage) EncodedObjectSize(h plumbing.Hash) (
- size int64, err error) {
- obj, ok := o.Objects[h]
- if !ok {
- return 0, plumbing.ErrObjectNotFound
- }
-
- return obj.Size(), nil
-}
-
-func (o *ObjectStorage) EncodedObject(t plumbing.ObjectType, h plumbing.Hash) (plumbing.EncodedObject, error) {
- obj, ok := o.Objects[h]
- if !ok || (plumbing.AnyObject != t && obj.Type() != t) {
- return nil, plumbing.ErrObjectNotFound
- }
-
- return obj, nil
-}
-
-func (o *ObjectStorage) IterEncodedObjects(t plumbing.ObjectType) (storer.EncodedObjectIter, error) {
- var series []plumbing.EncodedObject
- switch t {
- case plumbing.AnyObject:
- series = flattenObjectMap(o.Objects)
- case plumbing.CommitObject:
- series = flattenObjectMap(o.Commits)
- case plumbing.TreeObject:
- series = flattenObjectMap(o.Trees)
- case plumbing.BlobObject:
- series = flattenObjectMap(o.Blobs)
- case plumbing.TagObject:
- series = flattenObjectMap(o.Tags)
- }
-
- return storer.NewEncodedObjectSliceIter(series), nil
-}
-
-func flattenObjectMap(m map[plumbing.Hash]plumbing.EncodedObject) []plumbing.EncodedObject {
- objects := make([]plumbing.EncodedObject, 0, len(m))
- for _, obj := range m {
- objects = append(objects, obj)
- }
- return objects
-}
-
-func (o *ObjectStorage) Begin() storer.Transaction {
- return &TxObjectStorage{
- Storage: o,
- Objects: make(map[plumbing.Hash]plumbing.EncodedObject),
- }
-}
-
-func (o *ObjectStorage) ForEachObjectHash(fun func(plumbing.Hash) error) error {
- for h := range o.Objects {
- err := fun(h)
- if err != nil {
- if err == storer.ErrStop {
- return nil
- }
- return err
- }
- }
- return nil
-}
-
-func (o *ObjectStorage) ObjectPacks() ([]plumbing.Hash, error) {
- return nil, nil
-}
-func (o *ObjectStorage) DeleteOldObjectPackAndIndex(plumbing.Hash, time.Time) error {
- return nil
-}
-
-var errNotSupported = fmt.Errorf("not supported")
-
-func (o *ObjectStorage) LooseObjectTime(hash plumbing.Hash) (time.Time, error) {
- return time.Time{}, errNotSupported
-}
-func (o *ObjectStorage) DeleteLooseObject(plumbing.Hash) error {
- return errNotSupported
-}
-
-func (o *ObjectStorage) AddAlternate(remote string) error {
- return errNotSupported
-}
-
-type TxObjectStorage struct {
- Storage *ObjectStorage
- Objects map[plumbing.Hash]plumbing.EncodedObject
-}
-
-func (tx *TxObjectStorage) SetEncodedObject(obj plumbing.EncodedObject) (plumbing.Hash, error) {
- h := obj.Hash()
- tx.Objects[h] = obj
-
- return h, nil
-}
-
-func (tx *TxObjectStorage) EncodedObject(t plumbing.ObjectType, h plumbing.Hash) (plumbing.EncodedObject, error) {
- obj, ok := tx.Objects[h]
- if !ok || (plumbing.AnyObject != t && obj.Type() != t) {
- return nil, plumbing.ErrObjectNotFound
- }
-
- return obj, nil
-}
-
-func (tx *TxObjectStorage) Commit() error {
- for h, obj := range tx.Objects {
- delete(tx.Objects, h)
- if _, err := tx.Storage.SetEncodedObject(obj); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (tx *TxObjectStorage) Rollback() error {
- tx.Objects = make(map[plumbing.Hash]plumbing.EncodedObject)
- return nil
-}
-
-type ReferenceStorage map[plumbing.ReferenceName]*plumbing.Reference
-
-func (r ReferenceStorage) SetReference(ref *plumbing.Reference) error {
- if ref != nil {
- r[ref.Name()] = ref
- }
-
- return nil
-}
-
-func (r ReferenceStorage) CheckAndSetReference(ref, old *plumbing.Reference) error {
- if ref == nil {
- return nil
- }
-
- if old != nil {
- tmp := r[ref.Name()]
- if tmp != nil && tmp.Hash() != old.Hash() {
- return storage.ErrReferenceHasChanged
- }
- }
- r[ref.Name()] = ref
- return nil
-}
-
-func (r ReferenceStorage) Reference(n plumbing.ReferenceName) (*plumbing.Reference, error) {
- ref, ok := r[n]
- if !ok {
- return nil, plumbing.ErrReferenceNotFound
- }
-
- return ref, nil
-}
-
-func (r ReferenceStorage) IterReferences() (storer.ReferenceIter, error) {
- var refs []*plumbing.Reference
- for _, ref := range r {
- refs = append(refs, ref)
- }
-
- return storer.NewReferenceSliceIter(refs), nil
-}
-
-func (r ReferenceStorage) CountLooseRefs() (int, error) {
- return len(r), nil
-}
-
-func (r ReferenceStorage) PackRefs() error {
- return nil
-}
-
-func (r ReferenceStorage) RemoveReference(n plumbing.ReferenceName) error {
- delete(r, n)
- return nil
-}
-
-type ShallowStorage []plumbing.Hash
-
-func (s *ShallowStorage) SetShallow(commits []plumbing.Hash) error {
- *s = commits
- return nil
-}
-
-func (s ShallowStorage) Shallow() ([]plumbing.Hash, error) {
- return s, nil
-}
-
-type ModuleStorage map[string]*Storage
-
-func (s ModuleStorage) Module(name string) (storage.Storer, error) {
- if m, ok := s[name]; ok {
- return m, nil
- }
-
- m := NewStorage()
- s[name] = m
-
- return m, nil
-}
diff --git a/vendor/github.com/go-git/go-git/v5/storage/storer.go b/vendor/github.com/go-git/go-git/v5/storage/storer.go
deleted file mode 100644
index 4800ac7ba07..00000000000
--- a/vendor/github.com/go-git/go-git/v5/storage/storer.go
+++ /dev/null
@@ -1,30 +0,0 @@
-package storage
-
-import (
- "errors"
-
- "github.com/go-git/go-git/v5/config"
- "github.com/go-git/go-git/v5/plumbing/storer"
-)
-
-var ErrReferenceHasChanged = errors.New("reference has changed concurrently")
-
-// Storer is a generic storage of objects, references and any information
-// related to a particular repository. The package github.com/go-git/go-git/v5/storage
-// contains two implementation a filesystem base implementation (such as `.git`)
-// and a memory implementations being ephemeral
-type Storer interface {
- storer.EncodedObjectStorer
- storer.ReferenceStorer
- storer.ShallowStorer
- storer.IndexStorer
- config.ConfigStorer
- ModuleStorer
-}
-
-// ModuleStorer allows interact with the modules' Storers
-type ModuleStorer interface {
- // Module returns a Storer representing a submodule, if not exists returns a
- // new empty Storer is returned
- Module(name string) (Storer, error)
-}
diff --git a/vendor/github.com/go-git/go-git/v5/submodule.go b/vendor/github.com/go-git/go-git/v5/submodule.go
deleted file mode 100644
index 84f020dc72d..00000000000
--- a/vendor/github.com/go-git/go-git/v5/submodule.go
+++ /dev/null
@@ -1,398 +0,0 @@
-package git
-
-import (
- "bytes"
- "context"
- "errors"
- "fmt"
- "path"
-
- "github.com/go-git/go-billy/v5"
- "github.com/go-git/go-git/v5/config"
- "github.com/go-git/go-git/v5/plumbing"
- "github.com/go-git/go-git/v5/plumbing/format/index"
- "github.com/go-git/go-git/v5/plumbing/transport"
-)
-
-var (
- ErrSubmoduleAlreadyInitialized = errors.New("submodule already initialized")
- ErrSubmoduleNotInitialized = errors.New("submodule not initialized")
-)
-
-// Submodule a submodule allows you to keep another Git repository in a
-// subdirectory of your repository.
-type Submodule struct {
- // initialized defines if a submodule was already initialized.
- initialized bool
-
- c *config.Submodule
- w *Worktree
-}
-
-// Config returns the submodule config
-func (s *Submodule) Config() *config.Submodule {
- return s.c
-}
-
-// Init initialize the submodule reading the recorded Entry in the index for
-// the given submodule
-func (s *Submodule) Init() error {
- cfg, err := s.w.r.Config()
- if err != nil {
- return err
- }
-
- _, ok := cfg.Submodules[s.c.Name]
- if ok {
- return ErrSubmoduleAlreadyInitialized
- }
-
- s.initialized = true
-
- cfg.Submodules[s.c.Name] = s.c
- return s.w.r.Storer.SetConfig(cfg)
-}
-
-// Status returns the status of the submodule.
-func (s *Submodule) Status() (*SubmoduleStatus, error) {
- idx, err := s.w.r.Storer.Index()
- if err != nil {
- return nil, err
- }
-
- return s.status(idx)
-}
-
-func (s *Submodule) status(idx *index.Index) (*SubmoduleStatus, error) {
- status := &SubmoduleStatus{
- Path: s.c.Path,
- }
-
- e, err := idx.Entry(s.c.Path)
- if err != nil && err != index.ErrEntryNotFound {
- return nil, err
- }
-
- if e != nil {
- status.Expected = e.Hash
- }
-
- if !s.initialized {
- return status, nil
- }
-
- r, err := s.Repository()
- if err != nil {
- return nil, err
- }
-
- head, err := r.Head()
- if err == nil {
- status.Current = head.Hash()
- }
-
- if err != nil && err == plumbing.ErrReferenceNotFound {
- err = nil
- }
-
- return status, err
-}
-
-// Repository returns the Repository represented by this submodule
-func (s *Submodule) Repository() (*Repository, error) {
- if !s.initialized {
- return nil, ErrSubmoduleNotInitialized
- }
-
- storer, err := s.w.r.Storer.Module(s.c.Name)
- if err != nil {
- return nil, err
- }
-
- _, err = storer.Reference(plumbing.HEAD)
- if err != nil && err != plumbing.ErrReferenceNotFound {
- return nil, err
- }
-
- var exists bool
- if err == nil {
- exists = true
- }
-
- var worktree billy.Filesystem
- if worktree, err = s.w.Filesystem.Chroot(s.c.Path); err != nil {
- return nil, err
- }
-
- if exists {
- return Open(storer, worktree)
- }
-
- r, err := Init(storer, worktree)
- if err != nil {
- return nil, err
- }
-
- moduleEndpoint, err := transport.NewEndpoint(s.c.URL)
- if err != nil {
- return nil, err
- }
-
- if !path.IsAbs(moduleEndpoint.Path) && moduleEndpoint.Protocol == "file" {
- remotes, err := s.w.r.Remotes()
- if err != nil {
- return nil, err
- }
-
- rootEndpoint, err := transport.NewEndpoint(remotes[0].c.URLs[0])
- if err != nil {
- return nil, err
- }
-
- rootEndpoint.Path = path.Join(rootEndpoint.Path, moduleEndpoint.Path)
- *moduleEndpoint = *rootEndpoint
- }
-
- _, err = r.CreateRemote(&config.RemoteConfig{
- Name: DefaultRemoteName,
- URLs: []string{moduleEndpoint.String()},
- })
-
- return r, err
-}
-
-// Update the registered submodule to match what the superproject expects, the
-// submodule should be initialized first calling the Init method or setting in
-// the options SubmoduleUpdateOptions.Init equals true
-func (s *Submodule) Update(o *SubmoduleUpdateOptions) error {
- return s.UpdateContext(context.Background(), o)
-}
-
-// UpdateContext the registered submodule to match what the superproject
-// expects, the submodule should be initialized first calling the Init method or
-// setting in the options SubmoduleUpdateOptions.Init equals true.
-//
-// The provided Context must be non-nil. If the context expires before the
-// operation is complete, an error is returned. The context only affects the
-// transport operations.
-func (s *Submodule) UpdateContext(ctx context.Context, o *SubmoduleUpdateOptions) error {
- return s.update(ctx, o, plumbing.ZeroHash)
-}
-
-func (s *Submodule) update(ctx context.Context, o *SubmoduleUpdateOptions, forceHash plumbing.Hash) error {
- if !s.initialized && !o.Init {
- return ErrSubmoduleNotInitialized
- }
-
- if !s.initialized && o.Init {
- if err := s.Init(); err != nil {
- return err
- }
- }
-
- idx, err := s.w.r.Storer.Index()
- if err != nil {
- return err
- }
-
- hash := forceHash
- if hash.IsZero() {
- e, err := idx.Entry(s.c.Path)
- if err != nil {
- return err
- }
-
- hash = e.Hash
- }
-
- r, err := s.Repository()
- if err != nil {
- return err
- }
-
- if err := s.fetchAndCheckout(ctx, r, o, hash); err != nil {
- return err
- }
-
- return s.doRecursiveUpdate(r, o)
-}
-
-func (s *Submodule) doRecursiveUpdate(r *Repository, o *SubmoduleUpdateOptions) error {
- if o.RecurseSubmodules == NoRecurseSubmodules {
- return nil
- }
-
- w, err := r.Worktree()
- if err != nil {
- return err
- }
-
- l, err := w.Submodules()
- if err != nil {
- return err
- }
-
- new := &SubmoduleUpdateOptions{}
- *new = *o
-
- new.RecurseSubmodules--
- return l.Update(new)
-}
-
-func (s *Submodule) fetchAndCheckout(
- ctx context.Context, r *Repository, o *SubmoduleUpdateOptions, hash plumbing.Hash,
-) error {
- if !o.NoFetch {
- err := r.FetchContext(ctx, &FetchOptions{Auth: o.Auth, Depth: o.Depth})
- if err != nil && err != NoErrAlreadyUpToDate {
- return err
- }
- }
-
- w, err := r.Worktree()
- if err != nil {
- return err
- }
-
- // Handle a case when submodule refers to an orphaned commit that's still reachable
- // through Git server using a special protocol capability[1].
- //
- // [1]: https://git-scm.com/docs/protocol-capabilities#_allow_reachable_sha1_in_want
- if !o.NoFetch {
- if _, err := w.r.Object(plumbing.AnyObject, hash); err != nil {
- refSpec := config.RefSpec("+" + hash.String() + ":" + hash.String())
-
- err := r.FetchContext(ctx, &FetchOptions{
- Auth: o.Auth,
- RefSpecs: []config.RefSpec{refSpec},
- Depth: o.Depth,
- })
- if err != nil && err != NoErrAlreadyUpToDate && err != ErrExactSHA1NotSupported {
- return err
- }
- }
- }
-
- if err := w.Checkout(&CheckoutOptions{Hash: hash}); err != nil {
- return err
- }
-
- head := plumbing.NewHashReference(plumbing.HEAD, hash)
- return r.Storer.SetReference(head)
-}
-
-// Submodules list of several submodules from the same repository.
-type Submodules []*Submodule
-
-// Init initializes the submodules in this list.
-func (s Submodules) Init() error {
- for _, sub := range s {
- if err := sub.Init(); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-// Update updates all the submodules in this list.
-func (s Submodules) Update(o *SubmoduleUpdateOptions) error {
- return s.UpdateContext(context.Background(), o)
-}
-
-// UpdateContext updates all the submodules in this list.
-//
-// The provided Context must be non-nil. If the context expires before the
-// operation is complete, an error is returned. The context only affects the
-// transport operations.
-func (s Submodules) UpdateContext(ctx context.Context, o *SubmoduleUpdateOptions) error {
- for _, sub := range s {
- if err := sub.UpdateContext(ctx, o); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-// Status returns the status of the submodules.
-func (s Submodules) Status() (SubmodulesStatus, error) {
- var list SubmodulesStatus
-
- var r *Repository
- for _, sub := range s {
- if r == nil {
- r = sub.w.r
- }
-
- idx, err := r.Storer.Index()
- if err != nil {
- return nil, err
- }
-
- status, err := sub.status(idx)
- if err != nil {
- return nil, err
- }
-
- list = append(list, status)
- }
-
- return list, nil
-}
-
-// SubmodulesStatus contains the status for all submodiles in the worktree
-type SubmodulesStatus []*SubmoduleStatus
-
-// String is equivalent to `git submodule status`
-func (s SubmodulesStatus) String() string {
- buf := bytes.NewBuffer(nil)
- for _, sub := range s {
- fmt.Fprintln(buf, sub)
- }
-
- return buf.String()
-}
-
-// SubmoduleStatus contains the status for a submodule in the worktree
-type SubmoduleStatus struct {
- Path string
- Current plumbing.Hash
- Expected plumbing.Hash
- Branch plumbing.ReferenceName
-}
-
-// IsClean is the HEAD of the submodule is equals to the expected commit
-func (s *SubmoduleStatus) IsClean() bool {
- return s.Current == s.Expected
-}
-
-// String is equivalent to `git submodule status `
-//
-// This will print the SHA-1 of the currently checked out commit for a
-// submodule, along with the submodule path and the output of git describe fo
-// the SHA-1. Each SHA-1 will be prefixed with - if the submodule is not
-// initialized, + if the currently checked out submodule commit does not match
-// the SHA-1 found in the index of the containing repository.
-func (s *SubmoduleStatus) String() string {
- var extra string
- var status = ' '
-
- if s.Current.IsZero() {
- status = '-'
- } else if !s.IsClean() {
- status = '+'
- }
-
- if len(s.Branch) != 0 {
- extra = string(s.Branch[5:])
- } else if !s.Current.IsZero() {
- extra = s.Current.String()[:7]
- }
-
- if extra != "" {
- extra = fmt.Sprintf(" (%s)", extra)
- }
-
- return fmt.Sprintf("%c%s %s%s", status, s.Expected, s.Path, extra)
-}
diff --git a/vendor/github.com/go-git/go-git/v5/utils/binary/read.go b/vendor/github.com/go-git/go-git/v5/utils/binary/read.go
deleted file mode 100644
index b8f9df1a244..00000000000
--- a/vendor/github.com/go-git/go-git/v5/utils/binary/read.go
+++ /dev/null
@@ -1,180 +0,0 @@
-// Package binary implements syntax-sugar functions on top of the standard
-// library binary package
-package binary
-
-import (
- "bufio"
- "encoding/binary"
- "io"
-
- "github.com/go-git/go-git/v5/plumbing"
-)
-
-// Read reads structured binary data from r into data. Bytes are read and
-// decoded in BigEndian order
-// https://golang.org/pkg/encoding/binary/#Read
-func Read(r io.Reader, data ...interface{}) error {
- for _, v := range data {
- if err := binary.Read(r, binary.BigEndian, v); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-// ReadUntil reads from r untin delim is found
-func ReadUntil(r io.Reader, delim byte) ([]byte, error) {
- if bufr, ok := r.(*bufio.Reader); ok {
- return ReadUntilFromBufioReader(bufr, delim)
- }
-
- var buf [1]byte
- value := make([]byte, 0, 16)
- for {
- if _, err := io.ReadFull(r, buf[:]); err != nil {
- if err == io.EOF {
- return nil, err
- }
-
- return nil, err
- }
-
- if buf[0] == delim {
- return value, nil
- }
-
- value = append(value, buf[0])
- }
-}
-
-// ReadUntilFromBufioReader is like bufio.ReadBytes but drops the delimiter
-// from the result.
-func ReadUntilFromBufioReader(r *bufio.Reader, delim byte) ([]byte, error) {
- value, err := r.ReadBytes(delim)
- if err != nil || len(value) == 0 {
- return nil, err
- }
-
- return value[:len(value)-1], nil
-}
-
-// ReadVariableWidthInt reads and returns an int in Git VLQ special format:
-//
-// Ordinary VLQ has some redundancies, example: the number 358 can be
-// encoded as the 2-octet VLQ 0x8166 or the 3-octet VLQ 0x808166 or the
-// 4-octet VLQ 0x80808166 and so forth.
-//
-// To avoid these redundancies, the VLQ format used in Git removes this
-// prepending redundancy and extends the representable range of shorter
-// VLQs by adding an offset to VLQs of 2 or more octets in such a way
-// that the lowest possible value for such an (N+1)-octet VLQ becomes
-// exactly one more than the maximum possible value for an N-octet VLQ.
-// In particular, since a 1-octet VLQ can store a maximum value of 127,
-// the minimum 2-octet VLQ (0x8000) is assigned the value 128 instead of
-// 0. Conversely, the maximum value of such a 2-octet VLQ (0xff7f) is
-// 16511 instead of just 16383. Similarly, the minimum 3-octet VLQ
-// (0x808000) has a value of 16512 instead of zero, which means
-// that the maximum 3-octet VLQ (0xffff7f) is 2113663 instead of
-// just 2097151. And so forth.
-//
-// This is how the offset is saved in C:
-//
-// dheader[pos] = ofs & 127;
-// while (ofs >>= 7)
-// dheader[--pos] = 128 | (--ofs & 127);
-//
-func ReadVariableWidthInt(r io.Reader) (int64, error) {
- var c byte
- if err := Read(r, &c); err != nil {
- return 0, err
- }
-
- var v = int64(c & maskLength)
- for c&maskContinue > 0 {
- v++
- if err := Read(r, &c); err != nil {
- return 0, err
- }
-
- v = (v << lengthBits) + int64(c&maskLength)
- }
-
- return v, nil
-}
-
-const (
- maskContinue = uint8(128) // 1000 000
- maskLength = uint8(127) // 0111 1111
- lengthBits = uint8(7) // subsequent bytes has 7 bits to store the length
-)
-
-// ReadUint64 reads 8 bytes and returns them as a BigEndian uint32
-func ReadUint64(r io.Reader) (uint64, error) {
- var v uint64
- if err := binary.Read(r, binary.BigEndian, &v); err != nil {
- return 0, err
- }
-
- return v, nil
-}
-
-// ReadUint32 reads 4 bytes and returns them as a BigEndian uint32
-func ReadUint32(r io.Reader) (uint32, error) {
- var v uint32
- if err := binary.Read(r, binary.BigEndian, &v); err != nil {
- return 0, err
- }
-
- return v, nil
-}
-
-// ReadUint16 reads 2 bytes and returns them as a BigEndian uint16
-func ReadUint16(r io.Reader) (uint16, error) {
- var v uint16
- if err := binary.Read(r, binary.BigEndian, &v); err != nil {
- return 0, err
- }
-
- return v, nil
-}
-
-// ReadHash reads a plumbing.Hash from r
-func ReadHash(r io.Reader) (plumbing.Hash, error) {
- var h plumbing.Hash
- if err := binary.Read(r, binary.BigEndian, h[:]); err != nil {
- return plumbing.ZeroHash, err
- }
-
- return h, nil
-}
-
-const sniffLen = 8000
-
-// IsBinary detects if data is a binary value based on:
-// http://git.kernel.org/cgit/git/git.git/tree/xdiff-interface.c?id=HEAD#n198
-func IsBinary(r io.Reader) (bool, error) {
- reader := bufio.NewReader(r)
- c := 0
- for {
- if c == sniffLen {
- break
- }
-
- b, err := reader.ReadByte()
- if err == io.EOF {
- break
- }
- if err != nil {
- return false, err
- }
-
- if b == byte(0) {
- return true, nil
- }
-
- c++
- }
-
- return false, nil
-}
diff --git a/vendor/github.com/go-git/go-git/v5/utils/binary/write.go b/vendor/github.com/go-git/go-git/v5/utils/binary/write.go
deleted file mode 100644
index c08c73a06b2..00000000000
--- a/vendor/github.com/go-git/go-git/v5/utils/binary/write.go
+++ /dev/null
@@ -1,50 +0,0 @@
-package binary
-
-import (
- "encoding/binary"
- "io"
-)
-
-// Write writes the binary representation of data into w, using BigEndian order
-// https://golang.org/pkg/encoding/binary/#Write
-func Write(w io.Writer, data ...interface{}) error {
- for _, v := range data {
- if err := binary.Write(w, binary.BigEndian, v); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func WriteVariableWidthInt(w io.Writer, n int64) error {
- buf := []byte{byte(n & 0x7f)}
- n >>= 7
- for n != 0 {
- n--
- buf = append([]byte{0x80 | (byte(n & 0x7f))}, buf...)
- n >>= 7
- }
-
- _, err := w.Write(buf)
-
- return err
-}
-
-// WriteUint64 writes the binary representation of a uint64 into w, in BigEndian
-// order
-func WriteUint64(w io.Writer, value uint64) error {
- return binary.Write(w, binary.BigEndian, value)
-}
-
-// WriteUint32 writes the binary representation of a uint32 into w, in BigEndian
-// order
-func WriteUint32(w io.Writer, value uint32) error {
- return binary.Write(w, binary.BigEndian, value)
-}
-
-// WriteUint16 writes the binary representation of a uint16 into w, in BigEndian
-// order
-func WriteUint16(w io.Writer, value uint16) error {
- return binary.Write(w, binary.BigEndian, value)
-}
diff --git a/vendor/github.com/go-git/go-git/v5/utils/diff/diff.go b/vendor/github.com/go-git/go-git/v5/utils/diff/diff.go
deleted file mode 100644
index 70054949fd9..00000000000
--- a/vendor/github.com/go-git/go-git/v5/utils/diff/diff.go
+++ /dev/null
@@ -1,61 +0,0 @@
-// Package diff implements line oriented diffs, similar to the ancient
-// Unix diff command.
-//
-// The current implementation is just a wrapper around Sergi's
-// go-diff/diffmatchpatch library, which is a go port of Neil
-// Fraser's google-diff-match-patch code
-package diff
-
-import (
- "bytes"
- "time"
-
- "github.com/sergi/go-diff/diffmatchpatch"
-)
-
-// Do computes the (line oriented) modifications needed to turn the src
-// string into the dst string. The underlying algorithm is Meyers,
-// its complexity is O(N*d) where N is min(lines(src), lines(dst)) and d
-// is the size of the diff.
-func Do(src, dst string) (diffs []diffmatchpatch.Diff) {
- // the default timeout is time.Second which may be too small under heavy load
- return DoWithTimeout(src, dst, time.Hour)
-}
-
-// DoWithTimeout computes the (line oriented) modifications needed to turn the src
-// string into the dst string. The `timeout` argument specifies the maximum
-// amount of time it is allowed to spend in this function. If the timeout
-// is exceeded, the parts of the strings which were not considered are turned into
-// a bulk delete+insert and the half-baked suboptimal result is returned at once.
-// The underlying algorithm is Meyers, its complexity is O(N*d) where N is
-// min(lines(src), lines(dst)) and d is the size of the diff.
-func DoWithTimeout(src, dst string, timeout time.Duration) (diffs []diffmatchpatch.Diff) {
- dmp := diffmatchpatch.New()
- dmp.DiffTimeout = timeout
- wSrc, wDst, warray := dmp.DiffLinesToRunes(src, dst)
- diffs = dmp.DiffMainRunes(wSrc, wDst, false)
- diffs = dmp.DiffCharsToLines(diffs, warray)
- return diffs
-}
-
-// Dst computes and returns the destination text.
-func Dst(diffs []diffmatchpatch.Diff) string {
- var text bytes.Buffer
- for _, d := range diffs {
- if d.Type != diffmatchpatch.DiffDelete {
- text.WriteString(d.Text)
- }
- }
- return text.String()
-}
-
-// Src computes and returns the source text
-func Src(diffs []diffmatchpatch.Diff) string {
- var text bytes.Buffer
- for _, d := range diffs {
- if d.Type != diffmatchpatch.DiffInsert {
- text.WriteString(d.Text)
- }
- }
- return text.String()
-}
diff --git a/vendor/github.com/go-git/go-git/v5/utils/ioutil/common.go b/vendor/github.com/go-git/go-git/v5/utils/ioutil/common.go
deleted file mode 100644
index 235af717bcb..00000000000
--- a/vendor/github.com/go-git/go-git/v5/utils/ioutil/common.go
+++ /dev/null
@@ -1,210 +0,0 @@
-// Package ioutil implements some I/O utility functions.
-package ioutil
-
-import (
- "bufio"
- "context"
- "errors"
- "io"
-
- ctxio "github.com/jbenet/go-context/io"
-)
-
-type readPeeker interface {
- io.Reader
- Peek(int) ([]byte, error)
-}
-
-var (
- ErrEmptyReader = errors.New("reader is empty")
-)
-
-// NonEmptyReader takes a reader and returns it if it is not empty, or
-// `ErrEmptyReader` if it is empty. If there is an error when reading the first
-// byte of the given reader, it will be propagated.
-func NonEmptyReader(r io.Reader) (io.Reader, error) {
- pr, ok := r.(readPeeker)
- if !ok {
- pr = bufio.NewReader(r)
- }
-
- _, err := pr.Peek(1)
- if err == io.EOF {
- return nil, ErrEmptyReader
- }
-
- if err != nil {
- return nil, err
- }
-
- return pr, nil
-}
-
-type readCloser struct {
- io.Reader
- closer io.Closer
-}
-
-func (r *readCloser) Close() error {
- return r.closer.Close()
-}
-
-// NewReadCloser creates an `io.ReadCloser` with the given `io.Reader` and
-// `io.Closer`.
-func NewReadCloser(r io.Reader, c io.Closer) io.ReadCloser {
- return &readCloser{Reader: r, closer: c}
-}
-
-type readCloserCloser struct {
- io.ReadCloser
- closer func() error
-}
-
-func (r *readCloserCloser) Close() (err error) {
- defer func() {
- if err == nil {
- err = r.closer()
- return
- }
- _ = r.closer()
- }()
- return r.ReadCloser.Close()
-}
-
-// NewReadCloserWithCloser creates an `io.ReadCloser` with the given `io.ReaderCloser` and
-// `io.Closer` that ensures that the closer is closed on close
-func NewReadCloserWithCloser(r io.ReadCloser, c func() error) io.ReadCloser {
- return &readCloserCloser{ReadCloser: r, closer: c}
-}
-
-type writeCloser struct {
- io.Writer
- closer io.Closer
-}
-
-func (r *writeCloser) Close() error {
- return r.closer.Close()
-}
-
-// NewWriteCloser creates an `io.WriteCloser` with the given `io.Writer` and
-// `io.Closer`.
-func NewWriteCloser(w io.Writer, c io.Closer) io.WriteCloser {
- return &writeCloser{Writer: w, closer: c}
-}
-
-type writeNopCloser struct {
- io.Writer
-}
-
-func (writeNopCloser) Close() error { return nil }
-
-// WriteNopCloser returns a WriteCloser with a no-op Close method wrapping
-// the provided Writer w.
-func WriteNopCloser(w io.Writer) io.WriteCloser {
- return writeNopCloser{w}
-}
-
-type readerAtAsReader struct {
- io.ReaderAt
- offset int64
-}
-
-func (r *readerAtAsReader) Read(bs []byte) (int, error) {
- n, err := r.ReaderAt.ReadAt(bs, r.offset)
- r.offset += int64(n)
- return n, err
-}
-
-func NewReaderUsingReaderAt(r io.ReaderAt, offset int64) io.Reader {
- return &readerAtAsReader{
- ReaderAt: r,
- offset: offset,
- }
-}
-
-// CheckClose calls Close on the given io.Closer. If the given *error points to
-// nil, it will be assigned the error returned by Close. Otherwise, any error
-// returned by Close will be ignored. CheckClose is usually called with defer.
-func CheckClose(c io.Closer, err *error) {
- if cerr := c.Close(); cerr != nil && *err == nil {
- *err = cerr
- }
-}
-
-// NewContextWriter wraps a writer to make it respect given Context.
-// If there is a blocking write, the returned Writer will return whenever the
-// context is cancelled (the return values are n=0 and err=ctx.Err()).
-func NewContextWriter(ctx context.Context, w io.Writer) io.Writer {
- return ctxio.NewWriter(ctx, w)
-}
-
-// NewContextReader wraps a reader to make it respect given Context.
-// If there is a blocking read, the returned Reader will return whenever the
-// context is cancelled (the return values are n=0 and err=ctx.Err()).
-func NewContextReader(ctx context.Context, r io.Reader) io.Reader {
- return ctxio.NewReader(ctx, r)
-}
-
-// NewContextWriteCloser as NewContextWriter but with io.Closer interface.
-func NewContextWriteCloser(ctx context.Context, w io.WriteCloser) io.WriteCloser {
- ctxw := ctxio.NewWriter(ctx, w)
- return NewWriteCloser(ctxw, w)
-}
-
-// NewContextReadCloser as NewContextReader but with io.Closer interface.
-func NewContextReadCloser(ctx context.Context, r io.ReadCloser) io.ReadCloser {
- ctxr := ctxio.NewReader(ctx, r)
- return NewReadCloser(ctxr, r)
-}
-
-type readerOnError struct {
- io.Reader
- notify func(error)
-}
-
-// NewReaderOnError returns a io.Reader that call the notify function when an
-// unexpected (!io.EOF) error happens, after call Read function.
-func NewReaderOnError(r io.Reader, notify func(error)) io.Reader {
- return &readerOnError{r, notify}
-}
-
-// NewReadCloserOnError returns a io.ReadCloser that call the notify function
-// when an unexpected (!io.EOF) error happens, after call Read function.
-func NewReadCloserOnError(r io.ReadCloser, notify func(error)) io.ReadCloser {
- return NewReadCloser(NewReaderOnError(r, notify), r)
-}
-
-func (r *readerOnError) Read(buf []byte) (n int, err error) {
- n, err = r.Reader.Read(buf)
- if err != nil && err != io.EOF {
- r.notify(err)
- }
-
- return
-}
-
-type writerOnError struct {
- io.Writer
- notify func(error)
-}
-
-// NewWriterOnError returns a io.Writer that call the notify function when an
-// unexpected (!io.EOF) error happens, after call Write function.
-func NewWriterOnError(w io.Writer, notify func(error)) io.Writer {
- return &writerOnError{w, notify}
-}
-
-// NewWriteCloserOnError returns a io.WriteCloser that call the notify function
-// when an unexpected (!io.EOF) error happens, after call Write function.
-func NewWriteCloserOnError(w io.WriteCloser, notify func(error)) io.WriteCloser {
- return NewWriteCloser(NewWriterOnError(w, notify), w)
-}
-
-func (r *writerOnError) Write(p []byte) (n int, err error) {
- n, err = r.Writer.Write(p)
- if err != nil && err != io.EOF {
- r.notify(err)
- }
-
- return
-}
diff --git a/vendor/github.com/go-git/go-git/v5/utils/merkletrie/change.go b/vendor/github.com/go-git/go-git/v5/utils/merkletrie/change.go
deleted file mode 100644
index cc6dc890716..00000000000
--- a/vendor/github.com/go-git/go-git/v5/utils/merkletrie/change.go
+++ /dev/null
@@ -1,149 +0,0 @@
-package merkletrie
-
-import (
- "fmt"
- "io"
-
- "github.com/go-git/go-git/v5/utils/merkletrie/noder"
-)
-
-// Action values represent the kind of things a Change can represent:
-// insertion, deletions or modifications of files.
-type Action int
-
-// The set of possible actions in a change.
-const (
- _ Action = iota
- Insert
- Delete
- Modify
-)
-
-// String returns the action as a human readable text.
-func (a Action) String() string {
- switch a {
- case Insert:
- return "Insert"
- case Delete:
- return "Delete"
- case Modify:
- return "Modify"
- default:
- panic(fmt.Sprintf("unsupported action: %d", a))
- }
-}
-
-// A Change value represent how a noder has change between to merkletries.
-type Change struct {
- // The noder before the change or nil if it was inserted.
- From noder.Path
- // The noder after the change or nil if it was deleted.
- To noder.Path
-}
-
-// Action is convenience method that returns what Action c represents.
-func (c *Change) Action() (Action, error) {
- if c.From == nil && c.To == nil {
- return Action(0), fmt.Errorf("malformed change: nil from and to")
- }
- if c.From == nil {
- return Insert, nil
- }
- if c.To == nil {
- return Delete, nil
- }
-
- return Modify, nil
-}
-
-// NewInsert returns a new Change representing the insertion of n.
-func NewInsert(n noder.Path) Change { return Change{To: n} }
-
-// NewDelete returns a new Change representing the deletion of n.
-func NewDelete(n noder.Path) Change { return Change{From: n} }
-
-// NewModify returns a new Change representing that a has been modified and
-// it is now b.
-func NewModify(a, b noder.Path) Change {
- return Change{
- From: a,
- To: b,
- }
-}
-
-// String returns a single change in human readable form, using the
-// format: '<' + action + space + path + '>'. The contents of the file
-// before or after the change are not included in this format.
-//
-// Example: inserting a file at the path a/b/c.txt will return "".
-func (c Change) String() string {
- action, err := c.Action()
- if err != nil {
- panic(err)
- }
-
- var path string
- if action == Delete {
- path = c.From.String()
- } else {
- path = c.To.String()
- }
-
- return fmt.Sprintf("<%s %s>", action, path)
-}
-
-// Changes is a list of changes between to merkletries.
-type Changes []Change
-
-// NewChanges returns an empty list of changes.
-func NewChanges() Changes {
- return Changes{}
-}
-
-// Add adds the change c to the list of changes.
-func (l *Changes) Add(c Change) {
- *l = append(*l, c)
-}
-
-// AddRecursiveInsert adds the required changes to insert all the
-// file-like noders found in root, recursively.
-func (l *Changes) AddRecursiveInsert(root noder.Path) error {
- return l.addRecursive(root, NewInsert)
-}
-
-// AddRecursiveDelete adds the required changes to delete all the
-// file-like noders found in root, recursively.
-func (l *Changes) AddRecursiveDelete(root noder.Path) error {
- return l.addRecursive(root, NewDelete)
-}
-
-type noderToChangeFn func(noder.Path) Change // NewInsert or NewDelete
-
-func (l *Changes) addRecursive(root noder.Path, ctor noderToChangeFn) error {
- if !root.IsDir() {
- l.Add(ctor(root))
- return nil
- }
-
- i, err := NewIterFromPath(root)
- if err != nil {
- return err
- }
-
- var current noder.Path
- for {
- if current, err = i.Step(); err != nil {
- if err == io.EOF {
- break
- }
- return err
- }
- if current.IsDir() {
- continue
- }
- l.Add(ctor(current))
- }
-
- return nil
-}
diff --git a/vendor/github.com/go-git/go-git/v5/utils/merkletrie/difftree.go b/vendor/github.com/go-git/go-git/v5/utils/merkletrie/difftree.go
deleted file mode 100644
index 8090942ddbf..00000000000
--- a/vendor/github.com/go-git/go-git/v5/utils/merkletrie/difftree.go
+++ /dev/null
@@ -1,453 +0,0 @@
-package merkletrie
-
-// The focus of this difftree implementation is to save time by
-// skipping whole directories if their hash is the same in both
-// trees.
-//
-// The diff algorithm implemented here is based on the doubleiter
-// type defined in this same package; we will iterate over both
-// trees at the same time, while comparing the current noders in
-// each iterator. Depending on how they differ we will output the
-// corresponding changes and move the iterators further over both
-// trees.
-//
-// The table bellow show all the possible comparison results, along
-// with what changes should we produce and how to advance the
-// iterators.
-//
-// The table is implemented by the switches in this function,
-// diffTwoNodes, diffTwoNodesSameName and diffTwoDirs.
-//
-// Many Bothans died to bring us this information, make sure you
-// understand the table before modifying this code.
-
-// # Cases
-//
-// When comparing noders in both trees you will find yourself in
-// one of 169 possible cases, but if we ignore moves, we can
-// simplify a lot the search space into the following table:
-//
-// - "-": nothing, no file or directory
-// - a<>: an empty file named "a".
-// - a<1>: a file named "a", with "1" as its contents.
-// - a<2>: a file named "a", with "2" as its contents.
-// - a(): an empty dir named "a".
-// - a(...): a dir named "a", with some files and/or dirs inside (possibly
-// empty).
-// - a(;;;): a dir named "a", with some other files and/or dirs inside
-// (possibly empty), which different from the ones in "a(...)".
-//
-// \ to - a<> a<1> a<2> a() a(...) a(;;;)
-// from \
-// - 00 01 02 03 04 05 06
-// a<> 10 11 12 13 14 15 16
-// a<1> 20 21 22 23 24 25 26
-// a<2> 30 31 32 33 34 35 36
-// a() 40 41 42 43 44 45 46
-// a(...) 50 51 52 53 54 55 56
-// a(;;;) 60 61 62 63 64 65 66
-//
-// Every (from, to) combination in the table is a special case, but
-// some of them can be merged into some more general cases, for
-// instance 11 and 22 can be merged into the general case: both
-// noders are equal.
-//
-// Here is a full list of all the cases that are similar and how to
-// merge them together into more general cases. Each general case
-// is labeled with an uppercase letter for further reference, and it
-// is followed by the pseudocode of the checks you have to perform
-// on both noders to see if you are in such a case, the actions to
-// perform (i.e. what changes to output) and how to advance the
-// iterators of each tree to continue the comparison process.
-//
-// ## A. Impossible: 00
-//
-// ## B. Same thing on both sides: 11, 22, 33, 44, 55, 66
-// - check: `SameName() && SameHash()`
-// - action: do nothing.
-// - advance: `FromNext(); ToNext()`
-//
-// ### C. To was created: 01, 02, 03, 04, 05, 06
-// - check: `DifferentName() && ToBeforeFrom()`
-// - action: insertRecursively(to)
-// - advance: `ToNext()`
-//
-// ### D. From was deleted: 10, 20, 30, 40, 50, 60
-// - check: `DifferentName() && FromBeforeTo()`
-// - action: `DeleteRecursively(from)`
-// - advance: `FromNext()`
-//
-// ### E. Empty file to file with contents: 12, 13
-// - check: `SameName() && DifferentHash() && FromIsFile() &&
-// ToIsFile() && FromIsEmpty()`
-// - action: `modifyFile(from, to)`
-// - advance: `FromNext()` or `FromStep()`
-//
-// ### E'. file with contents to empty file: 21, 31
-// - check: `SameName() && DifferentHash() && FromIsFile() &&
-// ToIsFile() && ToIsEmpty()`
-// - action: `modifyFile(from, to)`
-// - advance: `FromNext()` or `FromStep()`
-//
-// ### F. empty file to empty dir with the same name: 14
-// - check: `SameName() && FromIsFile() && FromIsEmpty() &&
-// ToIsDir() && ToIsEmpty()`
-// - action: `DeleteFile(from); InsertEmptyDir(to)`
-// - advance: `FromNext(); ToNext()`
-//
-// ### F'. empty dir to empty file of the same name: 41
-// - check: `SameName() && FromIsDir() && FromIsEmpty &&
-// ToIsFile() && ToIsEmpty()`
-// - action: `DeleteEmptyDir(from); InsertFile(to)`
-// - advance: `FromNext(); ToNext()` or step for any of them.
-//
-// ### G. empty file to non-empty dir of the same name: 15, 16
-// - check: `SameName() && FromIsFile() && ToIsDir() &&
-// FromIsEmpty() && ToIsNotEmpty()`
-// - action: `DeleteFile(from); InsertDirRecursively(to)`
-// - advance: `FromNext(); ToNext()`
-//
-// ### G'. non-empty dir to empty file of the same name: 51, 61
-// - check: `SameName() && FromIsDir() && FromIsNotEmpty() &&
-// ToIsFile() && FromIsEmpty()`
-// - action: `DeleteDirRecursively(from); InsertFile(to)`
-// - advance: `FromNext(); ToNext()`
-//
-// ### H. modify file contents: 23, 32
-// - check: `SameName() && FromIsFile() && ToIsFile() &&
-// FromIsNotEmpty() && ToIsNotEmpty()`
-// - action: `ModifyFile(from, to)`
-// - advance: `FromNext(); ToNext()`
-//
-// ### I. file with contents to empty dir: 24, 34
-// - check: `SameName() && DifferentHash() && FromIsFile() &&
-// FromIsNotEmpty() && ToIsDir() && ToIsEmpty()`
-// - action: `DeleteFile(from); InsertEmptyDir(to)`
-// - advance: `FromNext(); ToNext()`
-//
-// ### I'. empty dir to file with contents: 42, 43
-// - check: `SameName() && DifferentHash() && FromIsDir() &&
-// FromIsEmpty() && ToIsFile() && ToIsEmpty()`
-// - action: `DeleteDir(from); InsertFile(to)`
-// - advance: `FromNext(); ToNext()`
-//
-// ### J. file with contents to dir with contents: 25, 26, 35, 36
-// - check: `SameName() && DifferentHash() && FromIsFile() &&
-// FromIsNotEmpty() && ToIsDir() && ToIsNotEmpty()`
-// - action: `DeleteFile(from); InsertDirRecursively(to)`
-// - advance: `FromNext(); ToNext()`
-//
-// ### J'. dir with contents to file with contents: 52, 62, 53, 63
-// - check: `SameName() && DifferentHash() && FromIsDir() &&
-// FromIsNotEmpty() && ToIsFile() && ToIsNotEmpty()`
-// - action: `DeleteDirRecursively(from); InsertFile(to)`
-// - advance: `FromNext(); ToNext()`
-//
-// ### K. empty dir to dir with contents: 45, 46
-// - check: `SameName() && DifferentHash() && FromIsDir() &&
-// FromIsEmpty() && ToIsDir() && ToIsNotEmpty()`
-// - action: `InsertChildrenRecursively(to)`
-// - advance: `FromNext(); ToNext()`
-//
-// ### K'. dir with contents to empty dir: 54, 64
-// - check: `SameName() && DifferentHash() && FromIsDir() &&
-// FromIsEmpty() && ToIsDir() && ToIsNotEmpty()`
-// - action: `DeleteChildrenRecursively(from)`
-// - advance: `FromNext(); ToNext()`
-//
-// ### L. dir with contents to dir with different contents: 56, 65
-// - check: `SameName() && DifferentHash() && FromIsDir() &&
-// FromIsNotEmpty() && ToIsDir() && ToIsNotEmpty()`
-// - action: nothing
-// - advance: `FromStep(); ToStep()`
-//
-//
-
-// All these cases can be further simplified by a truth table
-// reduction process, in which we gather similar checks together to
-// make the final code easier to read and understand.
-//
-// The first 6 columns are the outputs of the checks to perform on
-// both noders. I have labeled them 1 to 6, this is what they mean:
-//
-// 1: SameName()
-// 2: SameHash()
-// 3: FromIsDir()
-// 4: ToIsDir()
-// 5: FromIsEmpty()
-// 6: ToIsEmpty()
-//
-// The from and to columns are a fsnoder example of the elements
-// that you will find on each tree under the specified comparison
-// results (columns 1 to 6).
-//
-// The type column identifies the case we are into, from the list above.
-//
-// The type' column identifies the new set of reduced cases, using
-// lowercase letters, and they are explained after the table.
-//
-// The last column is the set of actions and advances for each case.
-//
-// "---" means impossible except in case of hash collision.
-//
-// advance meaning:
-// - NN: from.Next(); to.Next()
-// - SS: from.Step(); to.Step()
-//
-// 1 2 3 4 5 6 | from | to |type|type'|action ; advance
-// ------------+--------+--------+----+------------------------------------
-// 0 0 0 0 0 0 | | | | | if !SameName() {
-// . | | | | | if FromBeforeTo() {
-// . | | | D | d | delete(from); from.Next()
-// . | | | | | } else {
-// . | | | C | c | insert(to); to.Next()
-// . | | | | | }
-// 0 1 1 1 1 1 | | | | | }
-// 1 0 0 0 0 0 | a<1> | a<2> | H | e | modify(from, to); NN
-// 1 0 0 0 0 1 | a<1> | a<> | E' | e | modify(from, to); NN
-// 1 0 0 0 1 0 | a<> | a<1> | E | e | modify(from, to); NN
-// 1 0 0 0 1 1 | ---- | ---- | | e |
-// 1 0 0 1 0 0 | a<1> | a(...) | J | f | delete(from); insert(to); NN
-// 1 0 0 1 0 1 | a<1> | a() | I | f | delete(from); insert(to); NN
-// 1 0 0 1 1 0 | a<> | a(...) | G | f | delete(from); insert(to); NN
-// 1 0 0 1 1 1 | a<> | a() | F | f | delete(from); insert(to); NN
-// 1 0 1 0 0 0 | a(...) | a<1> | J' | f | delete(from); insert(to); NN
-// 1 0 1 0 0 1 | a(...) | a<> | G' | f | delete(from); insert(to); NN
-// 1 0 1 0 1 0 | a() | a<1> | I' | f | delete(from); insert(to); NN
-// 1 0 1 0 1 1 | a() | a<> | F' | f | delete(from); insert(to); NN
-// 1 0 1 1 0 0 | a(...) | a(;;;) | L | g | nothing; SS
-// 1 0 1 1 0 1 | a(...) | a() | K' | h | deleteChildren(from); NN
-// 1 0 1 1 1 0 | a() | a(...) | K | i | insertChildren(to); NN
-// 1 0 1 1 1 1 | ---- | ---- | | |
-// 1 1 0 0 0 0 | a<1> | a<1> | B | b | nothing; NN
-// 1 1 0 0 0 1 | ---- | ---- | | b |
-// 1 1 0 0 1 0 | ---- | ---- | | b |
-// 1 1 0 0 1 1 | a<> | a<> | B | b | nothing; NN
-// 1 1 0 1 0 0 | ---- | ---- | | b |
-// 1 1 0 1 0 1 | ---- | ---- | | b |
-// 1 1 0 1 1 0 | ---- | ---- | | b |
-// 1 1 0 1 1 1 | ---- | ---- | | b |
-// 1 1 1 0 0 0 | ---- | ---- | | b |
-// 1 1 1 0 0 1 | ---- | ---- | | b |
-// 1 1 1 0 1 0 | ---- | ---- | | b |
-// 1 1 1 0 1 1 | ---- | ---- | | b |
-// 1 1 1 1 0 0 | a(...) | a(...) | B | b | nothing; NN
-// 1 1 1 1 0 1 | ---- | ---- | | b |
-// 1 1 1 1 1 0 | ---- | ---- | | b |
-// 1 1 1 1 1 1 | a() | a() | B | b | nothing; NN
-//
-// c and d:
-// if !SameName()
-// d if FromBeforeTo()
-// c else
-// b: SameName) && sameHash()
-// e: SameName() && !sameHash() && BothAreFiles()
-// f: SameName() && !sameHash() && FileAndDir()
-// g: SameName() && !sameHash() && BothAreDirs() && NoneIsEmpty
-// i: SameName() && !sameHash() && BothAreDirs() && FromIsEmpty
-// h: else of i
-
-import (
- "context"
- "errors"
- "fmt"
-
- "github.com/go-git/go-git/v5/utils/merkletrie/noder"
-)
-
-var (
- // ErrCanceled is returned whenever the operation is canceled.
- ErrCanceled = errors.New("operation canceled")
-)
-
-// DiffTree calculates the list of changes between two merkletries. It
-// uses the provided hashEqual callback to compare noders.
-func DiffTree(
- fromTree,
- toTree noder.Noder,
- hashEqual noder.Equal,
-) (Changes, error) {
- return DiffTreeContext(context.Background(), fromTree, toTree, hashEqual)
-}
-
-// DiffTreeContext calculates the list of changes between two merkletries. It
-// uses the provided hashEqual callback to compare noders.
-// Error will be returned if context expires
-// Provided context must be non nil
-func DiffTreeContext(ctx context.Context, fromTree, toTree noder.Noder,
- hashEqual noder.Equal) (Changes, error) {
- ret := NewChanges()
-
- ii, err := newDoubleIter(fromTree, toTree, hashEqual)
- if err != nil {
- return nil, err
- }
-
- for {
- select {
- case <-ctx.Done():
- return nil, ErrCanceled
- default:
- }
-
- from := ii.from.current
- to := ii.to.current
-
- switch r := ii.remaining(); r {
- case noMoreNoders:
- return ret, nil
- case onlyFromRemains:
- if err = ret.AddRecursiveDelete(from); err != nil {
- return nil, err
- }
- if err = ii.nextFrom(); err != nil {
- return nil, err
- }
- case onlyToRemains:
- if to.Skip() {
- if err = ret.AddRecursiveDelete(to); err != nil {
- return nil, err
- }
- } else {
- if err = ret.AddRecursiveInsert(to); err != nil {
- return nil, err
- }
- }
- if err = ii.nextTo(); err != nil {
- return nil, err
- }
- case bothHaveNodes:
- if from.Skip() {
- if err = ret.AddRecursiveDelete(from); err != nil {
- return nil, err
- }
- if err := ii.nextBoth(); err != nil {
- return nil, err
- }
- break
- }
- if to.Skip() {
- if err = ret.AddRecursiveDelete(to); err != nil {
- return nil, err
- }
- if err := ii.nextBoth(); err != nil {
- return nil, err
- }
- break
- }
-
- if err = diffNodes(&ret, ii); err != nil {
- return nil, err
- }
- default:
- panic(fmt.Sprintf("unknown remaining value: %d", r))
- }
- }
-}
-
-func diffNodes(changes *Changes, ii *doubleIter) error {
- from := ii.from.current
- to := ii.to.current
- var err error
-
- // compare their full paths as strings
- switch from.Compare(to) {
- case -1:
- if err = changes.AddRecursiveDelete(from); err != nil {
- return err
- }
- if err = ii.nextFrom(); err != nil {
- return err
- }
- case 1:
- if err = changes.AddRecursiveInsert(to); err != nil {
- return err
- }
- if err = ii.nextTo(); err != nil {
- return err
- }
- default:
- if err := diffNodesSameName(changes, ii); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func diffNodesSameName(changes *Changes, ii *doubleIter) error {
- from := ii.from.current
- to := ii.to.current
-
- status, err := ii.compare()
- if err != nil {
- return err
- }
-
- switch {
- case status.sameHash:
- // do nothing
- if err = ii.nextBoth(); err != nil {
- return err
- }
- case status.bothAreFiles:
- changes.Add(NewModify(from, to))
- if err = ii.nextBoth(); err != nil {
- return err
- }
- case status.fileAndDir:
- if err = changes.AddRecursiveDelete(from); err != nil {
- return err
- }
- if err = changes.AddRecursiveInsert(to); err != nil {
- return err
- }
- if err = ii.nextBoth(); err != nil {
- return err
- }
- case status.bothAreDirs:
- if err = diffDirs(changes, ii); err != nil {
- return err
- }
- default:
- return fmt.Errorf("bad status from double iterator")
- }
-
- return nil
-}
-
-func diffDirs(changes *Changes, ii *doubleIter) error {
- from := ii.from.current
- to := ii.to.current
-
- status, err := ii.compare()
- if err != nil {
- return err
- }
-
- switch {
- case status.fromIsEmptyDir:
- if err = changes.AddRecursiveInsert(to); err != nil {
- return err
- }
- if err = ii.nextBoth(); err != nil {
- return err
- }
- case status.toIsEmptyDir:
- if err = changes.AddRecursiveDelete(from); err != nil {
- return err
- }
- if err = ii.nextBoth(); err != nil {
- return err
- }
- case !status.fromIsEmptyDir && !status.toIsEmptyDir:
- // do nothing
- if err = ii.stepBoth(); err != nil {
- return err
- }
- default:
- return fmt.Errorf("both dirs are empty but has different hash")
- }
-
- return nil
-}
diff --git a/vendor/github.com/go-git/go-git/v5/utils/merkletrie/doc.go b/vendor/github.com/go-git/go-git/v5/utils/merkletrie/doc.go
deleted file mode 100644
index 5204024ad4f..00000000000
--- a/vendor/github.com/go-git/go-git/v5/utils/merkletrie/doc.go
+++ /dev/null
@@ -1,34 +0,0 @@
-/*
-Package merkletrie provides support for n-ary trees that are at the same
-time Merkle trees and Radix trees (tries).
-
-Git trees are Radix n-ary trees in virtue of the names of their
-tree entries. At the same time, git trees are Merkle trees thanks to
-their hashes.
-
-This package defines Merkle tries as nodes that should have:
-
-- a hash: the Merkle part of the Merkle trie
-
-- a key: the Radix part of the Merkle trie
-
-The Merkle hash condition is not enforced by this package though. This
-means that the hash of a node doesn't have to take into account the hashes of
-their children, which is good for testing purposes.
-
-Nodes in the Merkle trie are abstracted by the Noder interface. The
-intended use is that git trees implements this interface, either
-directly or using a simple wrapper.
-
-This package provides an iterator for merkletries that can skip whole
-directory-like noders and an efficient merkletrie comparison algorithm.
-
-When comparing git trees, the simple approach of alphabetically sorting
-their elements and comparing the resulting lists is too slow as it
-depends linearly on the number of files in the trees: When a directory
-has lots of files but none of them has been modified, this approach is
-very expensive. We can do better by prunning whole directories that
-have not change, just by looking at their hashes. This package provides
-the tools to do exactly that.
-*/
-package merkletrie
diff --git a/vendor/github.com/go-git/go-git/v5/utils/merkletrie/doubleiter.go b/vendor/github.com/go-git/go-git/v5/utils/merkletrie/doubleiter.go
deleted file mode 100644
index 4a4341b3875..00000000000
--- a/vendor/github.com/go-git/go-git/v5/utils/merkletrie/doubleiter.go
+++ /dev/null
@@ -1,187 +0,0 @@
-package merkletrie
-
-import (
- "fmt"
- "io"
-
- "github.com/go-git/go-git/v5/utils/merkletrie/noder"
-)
-
-// A doubleIter is a convenience type to keep track of the current
-// noders in two merkletries that are going to be iterated in parallel.
-// It has methods for:
-//
-// - iterating over the merkletries, both at the same time or
-// individually: nextFrom, nextTo, nextBoth, stepBoth
-//
-// - checking if there are noders left in one or both of them with the
-// remaining method and its associated returned type.
-//
-// - comparing the current noders of both merkletries in several ways,
-// with the compare method and its associated returned type.
-type doubleIter struct {
- from struct {
- iter *Iter
- current noder.Path // nil if no more nodes
- }
- to struct {
- iter *Iter
- current noder.Path // nil if no more nodes
- }
- hashEqual noder.Equal
-}
-
-// NewdoubleIter returns a new doubleIter for the merkletries "from" and
-// "to". The hashEqual callback function will be used by the doubleIter
-// to compare the hash of the noders in the merkletries. The doubleIter
-// will be initialized to the first elements in each merkletrie if any.
-func newDoubleIter(from, to noder.Noder, hashEqual noder.Equal) (
- *doubleIter, error) {
- var ii doubleIter
- var err error
-
- if ii.from.iter, err = NewIter(from); err != nil {
- return nil, fmt.Errorf("from: %s", err)
- }
- if ii.from.current, err = ii.from.iter.Next(); turnEOFIntoNil(err) != nil {
- return nil, fmt.Errorf("from: %s", err)
- }
-
- if ii.to.iter, err = NewIter(to); err != nil {
- return nil, fmt.Errorf("to: %s", err)
- }
- if ii.to.current, err = ii.to.iter.Next(); turnEOFIntoNil(err) != nil {
- return nil, fmt.Errorf("to: %s", err)
- }
-
- ii.hashEqual = hashEqual
-
- return &ii, nil
-}
-
-func turnEOFIntoNil(e error) error {
- if e != nil && e != io.EOF {
- return e
- }
- return nil
-}
-
-// NextBoth makes d advance to the next noder in both merkletries. If
-// any of them is a directory, it skips its contents.
-func (d *doubleIter) nextBoth() error {
- if err := d.nextFrom(); err != nil {
- return err
- }
- if err := d.nextTo(); err != nil {
- return err
- }
-
- return nil
-}
-
-// NextFrom makes d advance to the next noder in the "from" merkletrie,
-// skipping its contents if it is a directory.
-func (d *doubleIter) nextFrom() (err error) {
- d.from.current, err = d.from.iter.Next()
- return turnEOFIntoNil(err)
-}
-
-// NextTo makes d advance to the next noder in the "to" merkletrie,
-// skipping its contents if it is a directory.
-func (d *doubleIter) nextTo() (err error) {
- d.to.current, err = d.to.iter.Next()
- return turnEOFIntoNil(err)
-}
-
-// StepBoth makes d advance to the next noder in both merkletries,
-// getting deeper into directories if that is the case.
-func (d *doubleIter) stepBoth() (err error) {
- if d.from.current, err = d.from.iter.Step(); turnEOFIntoNil(err) != nil {
- return err
- }
- if d.to.current, err = d.to.iter.Step(); turnEOFIntoNil(err) != nil {
- return err
- }
- return nil
-}
-
-// Remaining returns if there are no more noders in the tree, if both
-// have noders or if one of them doesn't.
-func (d *doubleIter) remaining() remaining {
- if d.from.current == nil && d.to.current == nil {
- return noMoreNoders
- }
-
- if d.from.current == nil && d.to.current != nil {
- return onlyToRemains
- }
-
- if d.from.current != nil && d.to.current == nil {
- return onlyFromRemains
- }
-
- return bothHaveNodes
-}
-
-// Remaining values tells you whether both trees still have noders, or
-// only one of them or none of them.
-type remaining int
-
-const (
- noMoreNoders remaining = iota
- onlyToRemains
- onlyFromRemains
- bothHaveNodes
-)
-
-// Compare returns the comparison between the current elements in the
-// merkletries.
-func (d *doubleIter) compare() (s comparison, err error) {
- s.sameHash = d.hashEqual(d.from.current, d.to.current)
-
- fromIsDir := d.from.current.IsDir()
- toIsDir := d.to.current.IsDir()
-
- s.bothAreDirs = fromIsDir && toIsDir
- s.bothAreFiles = !fromIsDir && !toIsDir
- s.fileAndDir = !s.bothAreDirs && !s.bothAreFiles
-
- fromNumChildren, err := d.from.current.NumChildren()
- if err != nil {
- return comparison{}, fmt.Errorf("from: %s", err)
- }
-
- toNumChildren, err := d.to.current.NumChildren()
- if err != nil {
- return comparison{}, fmt.Errorf("to: %s", err)
- }
-
- s.fromIsEmptyDir = fromIsDir && fromNumChildren == 0
- s.toIsEmptyDir = toIsDir && toNumChildren == 0
-
- return
-}
-
-// Answers to a lot of questions you can ask about how to noders are
-// equal or different.
-type comparison struct {
- // the following are only valid if both nodes have the same name
- // (i.e. nameComparison == 0)
-
- // Do both nodes have the same hash?
- sameHash bool
- // Are both nodes files?
- bothAreFiles bool
-
- // the following are only valid if any of the noders are dirs,
- // this is, if !bothAreFiles
-
- // Is one a file and the other a dir?
- fileAndDir bool
- // Are both nodes dirs?
- bothAreDirs bool
- // Is the from node an empty dir?
- fromIsEmptyDir bool
- // Is the to Node an empty dir?
- toIsEmptyDir bool
-}
diff --git a/vendor/github.com/go-git/go-git/v5/utils/merkletrie/filesystem/node.go b/vendor/github.com/go-git/go-git/v5/utils/merkletrie/filesystem/node.go
deleted file mode 100644
index 33800627de7..00000000000
--- a/vendor/github.com/go-git/go-git/v5/utils/merkletrie/filesystem/node.go
+++ /dev/null
@@ -1,205 +0,0 @@
-package filesystem
-
-import (
- "io"
- "os"
- "path"
-
- "github.com/go-git/go-git/v5/plumbing"
- "github.com/go-git/go-git/v5/plumbing/filemode"
- "github.com/go-git/go-git/v5/utils/merkletrie/noder"
-
- "github.com/go-git/go-billy/v5"
-)
-
-var ignore = map[string]bool{
- ".git": true,
-}
-
-// The node represents a file or a directory in a billy.Filesystem. It
-// implements the interface noder.Noder of merkletrie package.
-//
-// This implementation implements a "standard" hash method being able to be
-// compared with any other noder.Noder implementation inside of go-git.
-type node struct {
- fs billy.Filesystem
- submodules map[string]plumbing.Hash
-
- path string
- hash []byte
- children []noder.Noder
- isDir bool
- mode os.FileMode
- size int64
-}
-
-// NewRootNode returns the root node based on a given billy.Filesystem.
-//
-// In order to provide the submodule hash status, a map[string]plumbing.Hash
-// should be provided where the key is the path of the submodule and the commit
-// of the submodule HEAD
-func NewRootNode(
- fs billy.Filesystem,
- submodules map[string]plumbing.Hash,
-) noder.Noder {
- return &node{fs: fs, submodules: submodules, isDir: true}
-}
-
-// Hash the hash of a filesystem is the result of concatenating the computed
-// plumbing.Hash of the file as a Blob and its plumbing.FileMode; that way the
-// difftree algorithm will detect changes in the contents of files and also in
-// their mode.
-//
-// Please note that the hash is calculated on first invocation of Hash(),
-// meaning that it will not update when the underlying file changes
-// between invocations.
-//
-// The hash of a directory is always a 24-bytes slice of zero values
-func (n *node) Hash() []byte {
- if n.hash == nil {
- n.calculateHash()
- }
- return n.hash
-}
-
-func (n *node) Name() string {
- return path.Base(n.path)
-}
-
-func (n *node) IsDir() bool {
- return n.isDir
-}
-
-func (n *node) Skip() bool {
- return false
-}
-
-func (n *node) Children() ([]noder.Noder, error) {
- if err := n.calculateChildren(); err != nil {
- return nil, err
- }
-
- return n.children, nil
-}
-
-func (n *node) NumChildren() (int, error) {
- if err := n.calculateChildren(); err != nil {
- return -1, err
- }
-
- return len(n.children), nil
-}
-
-func (n *node) calculateChildren() error {
- if !n.IsDir() {
- return nil
- }
-
- if len(n.children) != 0 {
- return nil
- }
-
- files, err := n.fs.ReadDir(n.path)
- if err != nil {
- if os.IsNotExist(err) {
- return nil
- }
- return err
- }
-
- for _, file := range files {
- if _, ok := ignore[file.Name()]; ok {
- continue
- }
-
- if file.Mode()&os.ModeSocket != 0 {
- continue
- }
-
- c, err := n.newChildNode(file)
- if err != nil {
- return err
- }
-
- n.children = append(n.children, c)
- }
-
- return nil
-}
-
-func (n *node) newChildNode(file os.FileInfo) (*node, error) {
- path := path.Join(n.path, file.Name())
-
- node := &node{
- fs: n.fs,
- submodules: n.submodules,
-
- path: path,
- isDir: file.IsDir(),
- size: file.Size(),
- mode: file.Mode(),
- }
-
- if _, isSubmodule := n.submodules[path]; isSubmodule {
- node.isDir = false
- }
-
- return node, nil
-}
-
-func (n *node) calculateHash() {
- if n.isDir {
- n.hash = make([]byte, 24)
- return
- }
- mode, err := filemode.NewFromOSFileMode(n.mode)
- if err != nil {
- n.hash = plumbing.ZeroHash[:]
- return
- }
- if submoduleHash, isSubmodule := n.submodules[n.path]; isSubmodule {
- n.hash = append(submoduleHash[:], filemode.Submodule.Bytes()...)
- return
- }
- var hash plumbing.Hash
- if n.mode&os.ModeSymlink != 0 {
- hash = n.doCalculateHashForSymlink()
- } else {
- hash = n.doCalculateHashForRegular()
- }
- n.hash = append(hash[:], mode.Bytes()...)
-}
-
-func (n *node) doCalculateHashForRegular() plumbing.Hash {
- f, err := n.fs.Open(n.path)
- if err != nil {
- return plumbing.ZeroHash
- }
-
- defer f.Close()
-
- h := plumbing.NewHasher(plumbing.BlobObject, n.size)
- if _, err := io.Copy(h, f); err != nil {
- return plumbing.ZeroHash
- }
-
- return h.Sum()
-}
-
-func (n *node) doCalculateHashForSymlink() plumbing.Hash {
- target, err := n.fs.Readlink(n.path)
- if err != nil {
- return plumbing.ZeroHash
- }
-
- h := plumbing.NewHasher(plumbing.BlobObject, n.size)
- if _, err := h.Write([]byte(target)); err != nil {
- return plumbing.ZeroHash
- }
-
- return h.Sum()
-}
-
-func (n *node) String() string {
- return n.path
-}
diff --git a/vendor/github.com/go-git/go-git/v5/utils/merkletrie/index/node.go b/vendor/github.com/go-git/go-git/v5/utils/merkletrie/index/node.go
deleted file mode 100644
index c1809f7ecd9..00000000000
--- a/vendor/github.com/go-git/go-git/v5/utils/merkletrie/index/node.go
+++ /dev/null
@@ -1,95 +0,0 @@
-package index
-
-import (
- "path"
- "strings"
-
- "github.com/go-git/go-git/v5/plumbing/format/index"
- "github.com/go-git/go-git/v5/utils/merkletrie/noder"
-)
-
-// The node represents a index.Entry or a directory inferred from the path
-// of all entries. It implements the interface noder.Noder of merkletrie
-// package.
-//
-// This implementation implements a "standard" hash method being able to be
-// compared with any other noder.Noder implementation inside of go-git
-type node struct {
- path string
- entry *index.Entry
- children []noder.Noder
- isDir bool
- skip bool
-}
-
-// NewRootNode returns the root node of a computed tree from a index.Index,
-func NewRootNode(idx *index.Index) noder.Noder {
- const rootNode = ""
-
- m := map[string]*node{rootNode: {isDir: true}}
-
- for _, e := range idx.Entries {
- parts := strings.Split(e.Name, string("/"))
-
- var fullpath string
- for _, part := range parts {
- parent := fullpath
- fullpath = path.Join(fullpath, part)
-
- if _, ok := m[fullpath]; ok {
- continue
- }
-
- n := &node{path: fullpath, skip: e.SkipWorktree}
- if fullpath == e.Name {
- n.entry = e
- } else {
- n.isDir = true
- }
-
- m[n.path] = n
- m[parent].children = append(m[parent].children, n)
- }
- }
-
- return m[rootNode]
-}
-
-func (n *node) String() string {
- return n.path
-}
-
-func (n *node) Skip() bool {
- return n.skip
-}
-
-// Hash the hash of a filesystem is a 24-byte slice, is the result of
-// concatenating the computed plumbing.Hash of the file as a Blob and its
-// plumbing.FileMode; that way the difftree algorithm will detect changes in the
-// contents of files and also in their mode.
-//
-// If the node is computed and not based on a index.Entry the hash is equals
-// to a 24-bytes slices of zero values.
-func (n *node) Hash() []byte {
- if n.entry == nil {
- return make([]byte, 24)
- }
-
- return append(n.entry.Hash[:], n.entry.Mode.Bytes()...)
-}
-
-func (n *node) Name() string {
- return path.Base(n.path)
-}
-
-func (n *node) IsDir() bool {
- return n.isDir
-}
-
-func (n *node) Children() ([]noder.Noder, error) {
- return n.children, nil
-}
-
-func (n *node) NumChildren() (int, error) {
- return len(n.children), nil
-}
diff --git a/vendor/github.com/go-git/go-git/v5/utils/merkletrie/internal/frame/frame.go b/vendor/github.com/go-git/go-git/v5/utils/merkletrie/internal/frame/frame.go
deleted file mode 100644
index 131878a1c7a..00000000000
--- a/vendor/github.com/go-git/go-git/v5/utils/merkletrie/internal/frame/frame.go
+++ /dev/null
@@ -1,91 +0,0 @@
-package frame
-
-import (
- "bytes"
- "fmt"
- "sort"
- "strings"
-
- "github.com/go-git/go-git/v5/utils/merkletrie/noder"
-)
-
-// A Frame is a collection of siblings in a trie, sorted alphabetically
-// by name.
-type Frame struct {
- // siblings, sorted in reverse alphabetical order by name
- stack []noder.Noder
-}
-
-type byName []noder.Noder
-
-func (a byName) Len() int { return len(a) }
-func (a byName) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
-func (a byName) Less(i, j int) bool {
- return strings.Compare(a[i].Name(), a[j].Name()) < 0
-}
-
-// New returns a frame with the children of the provided node.
-func New(n noder.Noder) (*Frame, error) {
- children, err := n.Children()
- if err != nil {
- return nil, err
- }
-
- sort.Sort(sort.Reverse(byName(children)))
- return &Frame{
- stack: children,
- }, nil
-}
-
-// String returns the quoted names of the noders in the frame sorted in
-// alphabetical order by name, surrounded by square brackets and
-// separated by comas.
-//
-// Examples:
-// []
-// ["a", "b"]
-func (f *Frame) String() string {
- var buf bytes.Buffer
- _ = buf.WriteByte('[')
-
- sep := ""
- for i := f.Len() - 1; i >= 0; i-- {
- _, _ = buf.WriteString(sep)
- sep = ", "
- _, _ = buf.WriteString(fmt.Sprintf("%q", f.stack[i].Name()))
- }
-
- _ = buf.WriteByte(']')
-
- return buf.String()
-}
-
-// First returns, but dont extract, the noder with the alphabetically
-// smaller name in the frame and true if the frame was not empty.
-// Otherwise it returns nil and false.
-func (f *Frame) First() (noder.Noder, bool) {
- if f.Len() == 0 {
- return nil, false
- }
-
- top := f.Len() - 1
-
- return f.stack[top], true
-}
-
-// Drop extracts the noder with the alphabetically smaller name in the
-// frame or does nothing if the frame was empty.
-func (f *Frame) Drop() {
- if f.Len() == 0 {
- return
- }
-
- top := f.Len() - 1
- f.stack[top] = nil
- f.stack = f.stack[:top]
-}
-
-// Len returns the number of noders in the frame.
-func (f *Frame) Len() int {
- return len(f.stack)
-}
diff --git a/vendor/github.com/go-git/go-git/v5/utils/merkletrie/iter.go b/vendor/github.com/go-git/go-git/v5/utils/merkletrie/iter.go
deleted file mode 100644
index d75afec4643..00000000000
--- a/vendor/github.com/go-git/go-git/v5/utils/merkletrie/iter.go
+++ /dev/null
@@ -1,216 +0,0 @@
-package merkletrie
-
-import (
- "fmt"
- "io"
-
- "github.com/go-git/go-git/v5/utils/merkletrie/internal/frame"
- "github.com/go-git/go-git/v5/utils/merkletrie/noder"
-)
-
-// Iter is an iterator for merkletries (only the trie part of the
-// merkletrie is relevant here, it does not use the Hasher interface).
-//
-// The iteration is performed in depth-first pre-order. Entries at each
-// depth are traversed in (case-sensitive) alphabetical order.
-//
-// This is the kind of traversal you will expect when listing ordinary
-// files and directories recursively, for example:
-//
-// Trie Traversal order
-// ---- ---------------
-// .
-// / | \ c
-// / | \ d/
-// d c z ===> d/a
-// / \ d/b
-// b a z
-//
-//
-// This iterator is somewhat especial as you can chose to skip whole
-// "directories" when iterating:
-//
-// - The Step method will iterate normally.
-//
-// - the Next method will not descend deeper into the tree.
-//
-// For example, if the iterator is at `d/`, the Step method will return
-// `d/a` while the Next would have returned `z` instead (skipping `d/`
-// and its descendants). The name of the these two methods are based on
-// the well known "next" and "step" operations, quite common in
-// debuggers, like gdb.
-//
-// The paths returned by the iterator will be relative, if the iterator
-// was created from a single node, or absolute, if the iterator was
-// created from the path to the node (the path will be prefixed to all
-// returned paths).
-type Iter struct {
- // Tells if the iteration has started.
- hasStarted bool
- // The top of this stack has the current node and its siblings. The
- // rest of the stack keeps the ancestors of the current node and
- // their corresponding siblings. The current element is always the
- // top element of the top frame.
- //
- // When "step"ping into a node, its children are pushed as a new
- // frame.
- //
- // When "next"ing pass a node, the current element is dropped by
- // popping the top frame.
- frameStack []*frame.Frame
- // The base path used to turn the relative paths used internally by
- // the iterator into absolute paths used by external applications.
- // For relative iterator this will be nil.
- base noder.Path
-}
-
-// NewIter returns a new relative iterator using the provider noder as
-// its unnamed root. When iterating, all returned paths will be
-// relative to node.
-func NewIter(n noder.Noder) (*Iter, error) {
- return newIter(n, nil)
-}
-
-// NewIterFromPath returns a new absolute iterator from the noder at the
-// end of the path p. When iterating, all returned paths will be
-// absolute, using the root of the path p as their root.
-func NewIterFromPath(p noder.Path) (*Iter, error) {
- return newIter(p, p) // Path implements Noder
-}
-
-func newIter(root noder.Noder, base noder.Path) (*Iter, error) {
- ret := &Iter{
- base: base,
- }
-
- if root == nil {
- return ret, nil
- }
-
- frame, err := frame.New(root)
- if err != nil {
- return nil, err
- }
- ret.push(frame)
-
- return ret, nil
-}
-
-func (iter *Iter) top() (*frame.Frame, bool) {
- if len(iter.frameStack) == 0 {
- return nil, false
- }
- top := len(iter.frameStack) - 1
-
- return iter.frameStack[top], true
-}
-
-func (iter *Iter) push(f *frame.Frame) {
- iter.frameStack = append(iter.frameStack, f)
-}
-
-const (
- doDescend = true
- dontDescend = false
-)
-
-// Next returns the path of the next node without descending deeper into
-// the trie and nil. If there are no more entries in the trie it
-// returns nil and io.EOF. In case of error, it will return nil and the
-// error.
-func (iter *Iter) Next() (noder.Path, error) {
- return iter.advance(dontDescend)
-}
-
-// Step returns the path to the next node in the trie, descending deeper
-// into it if needed, and nil. If there are no more nodes in the trie,
-// it returns nil and io.EOF. In case of error, it will return nil and
-// the error.
-func (iter *Iter) Step() (noder.Path, error) {
- return iter.advance(doDescend)
-}
-
-// Advances the iterator in the desired direction: descend or
-// dontDescend.
-//
-// Returns the new current element and a nil error on success. If there
-// are no more elements in the trie below the base, it returns nil, and
-// io.EOF. Returns nil and an error in case of errors.
-func (iter *Iter) advance(wantDescend bool) (noder.Path, error) {
- current, err := iter.current()
- if err != nil {
- return nil, err
- }
-
- // The first time we just return the current node.
- if !iter.hasStarted {
- iter.hasStarted = true
- return current, nil
- }
-
- // Advances means getting a next current node, either its first child or
- // its next sibling, depending if we must descend or not.
- numChildren, err := current.NumChildren()
- if err != nil {
- return nil, err
- }
-
- mustDescend := numChildren != 0 && wantDescend
- if mustDescend {
- // descend: add a new frame with the current's children.
- frame, err := frame.New(current)
- if err != nil {
- return nil, err
- }
- iter.push(frame)
- } else {
- // don't descend: just drop the current node
- iter.drop()
- }
-
- return iter.current()
-}
-
-// Returns the path to the current node, adding the base if there was
-// one, and a nil error. If there were no noders left, it returns nil
-// and io.EOF. If an error occurred, it returns nil and the error.
-func (iter *Iter) current() (noder.Path, error) {
- if topFrame, ok := iter.top(); !ok {
- return nil, io.EOF
- } else if _, ok := topFrame.First(); !ok {
- return nil, io.EOF
- }
-
- ret := make(noder.Path, 0, len(iter.base)+len(iter.frameStack))
-
- // concat the base...
- ret = append(ret, iter.base...)
- // ... and the current node and all its ancestors
- for i, f := range iter.frameStack {
- t, ok := f.First()
- if !ok {
- panic(fmt.Sprintf("frame %d is empty", i))
- }
- ret = append(ret, t)
- }
-
- return ret, nil
-}
-
-// removes the current node if any, and all the frames that become empty as a
-// consequence of this action.
-func (iter *Iter) drop() {
- frame, ok := iter.top()
- if !ok {
- return
- }
-
- frame.Drop()
- // if the frame is empty, remove it and its parent, recursively
- if frame.Len() == 0 {
- top := len(iter.frameStack) - 1
- iter.frameStack[top] = nil
- iter.frameStack = iter.frameStack[:top]
- iter.drop()
- }
-}
diff --git a/vendor/github.com/go-git/go-git/v5/utils/merkletrie/noder/noder.go b/vendor/github.com/go-git/go-git/v5/utils/merkletrie/noder/noder.go
deleted file mode 100644
index 6d22b8c14ec..00000000000
--- a/vendor/github.com/go-git/go-git/v5/utils/merkletrie/noder/noder.go
+++ /dev/null
@@ -1,60 +0,0 @@
-// Package noder provide an interface for defining nodes in a
-// merkletrie, their hashes and their paths (a noders and its
-// ancestors).
-//
-// The hasher interface is easy to implement naively by elements that
-// already have a hash, like git blobs and trees. More sophisticated
-// implementations can implement the Equal function in exotic ways
-// though: for instance, comparing the modification time of directories
-// in a filesystem.
-package noder
-
-import "fmt"
-
-// Hasher interface is implemented by types that can tell you
-// their hash.
-type Hasher interface {
- Hash() []byte
-}
-
-// Equal functions take two hashers and return if they are equal.
-//
-// These functions are expected to be faster than reflect.Equal or
-// reflect.DeepEqual because they can compare just the hash of the
-// objects, instead of their contents, so they are expected to be O(1).
-type Equal func(a, b Hasher) bool
-
-// The Noder interface is implemented by the elements of a Merkle Trie.
-//
-// There are two types of elements in a Merkle Trie:
-//
-// - file-like nodes: they cannot have children.
-//
-// - directory-like nodes: they can have 0 or more children and their
-// hash is calculated by combining their children hashes.
-type Noder interface {
- Hasher
- fmt.Stringer // for testing purposes
- // Name returns the name of an element (relative, not its full
- // path).
- Name() string
- // IsDir returns true if the element is a directory-like node or
- // false if it is a file-like node.
- IsDir() bool
- // Children returns the children of the element. Note that empty
- // directory-like noders and file-like noders will both return
- // NoChildren.
- Children() ([]Noder, error)
- // NumChildren returns the number of children this element has.
- //
- // This method is an optimization: the number of children is easily
- // calculated as the length of the value returned by the Children
- // method (above); yet, some implementations will be able to
- // implement NumChildren in O(1) while Children is usually more
- // complex.
- NumChildren() (int, error)
- Skip() bool
-}
-
-// NoChildren represents the children of a noder without children.
-var NoChildren = []Noder{}
diff --git a/vendor/github.com/go-git/go-git/v5/utils/merkletrie/noder/path.go b/vendor/github.com/go-git/go-git/v5/utils/merkletrie/noder/path.go
deleted file mode 100644
index 6c1d363320b..00000000000
--- a/vendor/github.com/go-git/go-git/v5/utils/merkletrie/noder/path.go
+++ /dev/null
@@ -1,98 +0,0 @@
-package noder
-
-import (
- "bytes"
- "strings"
-)
-
-// Path values represent a noder and its ancestors. The root goes first
-// and the actual final noder the path is referring to will be the last.
-//
-// A path implements the Noder interface, redirecting all the interface
-// calls to its final noder.
-//
-// Paths build from an empty Noder slice are not valid paths and should
-// not be used.
-type Path []Noder
-
-func (p Path) Skip() bool {
- if len(p) > 0 {
- return p.Last().Skip()
- }
-
- return false
-}
-
-// String returns the full path of the final noder as a string, using
-// "/" as the separator.
-func (p Path) String() string {
- var buf bytes.Buffer
- sep := ""
- for _, e := range p {
- _, _ = buf.WriteString(sep)
- sep = "/"
- _, _ = buf.WriteString(e.Name())
- }
-
- return buf.String()
-}
-
-// Last returns the final noder in the path.
-func (p Path) Last() Noder {
- return p[len(p)-1]
-}
-
-// Hash returns the hash of the final noder of the path.
-func (p Path) Hash() []byte {
- return p.Last().Hash()
-}
-
-// Name returns the name of the final noder of the path.
-func (p Path) Name() string {
- return p.Last().Name()
-}
-
-// IsDir returns if the final noder of the path is a directory-like
-// noder.
-func (p Path) IsDir() bool {
- return p.Last().IsDir()
-}
-
-// Children returns the children of the final noder in the path.
-func (p Path) Children() ([]Noder, error) {
- return p.Last().Children()
-}
-
-// NumChildren returns the number of children the final noder of the
-// path has.
-func (p Path) NumChildren() (int, error) {
- return p.Last().NumChildren()
-}
-
-// Compare returns -1, 0 or 1 if the path p is smaller, equal or bigger
-// than other, in "directory order"; for example:
-//
-// "a" < "b"
-// "a/b/c/d/z" < "b"
-// "a/b/a" > "a/b"
-func (p Path) Compare(other Path) int {
- i := 0
- for {
- switch {
- case len(other) == len(p) && i == len(p):
- return 0
- case i == len(other):
- return 1
- case i == len(p):
- return -1
- default:
- // We do *not* normalize Unicode here. CGit doesn't.
- // https://github.com/src-d/go-git/issues/1057
- cmp := strings.Compare(p[i].Name(), other[i].Name())
- if cmp != 0 {
- return cmp
- }
- }
- i++
- }
-}
diff --git a/vendor/github.com/go-git/go-git/v5/utils/sync/bufio.go b/vendor/github.com/go-git/go-git/v5/utils/sync/bufio.go
deleted file mode 100644
index 5009ea8047b..00000000000
--- a/vendor/github.com/go-git/go-git/v5/utils/sync/bufio.go
+++ /dev/null
@@ -1,29 +0,0 @@
-package sync
-
-import (
- "bufio"
- "io"
- "sync"
-)
-
-var bufioReader = sync.Pool{
- New: func() interface{} {
- return bufio.NewReader(nil)
- },
-}
-
-// GetBufioReader returns a *bufio.Reader that is managed by a sync.Pool.
-// Returns a bufio.Reader that is resetted with reader and ready for use.
-//
-// After use, the *bufio.Reader should be put back into the sync.Pool
-// by calling PutBufioReader.
-func GetBufioReader(reader io.Reader) *bufio.Reader {
- r := bufioReader.Get().(*bufio.Reader)
- r.Reset(reader)
- return r
-}
-
-// PutBufioReader puts reader back into its sync.Pool.
-func PutBufioReader(reader *bufio.Reader) {
- bufioReader.Put(reader)
-}
diff --git a/vendor/github.com/go-git/go-git/v5/utils/sync/bytes.go b/vendor/github.com/go-git/go-git/v5/utils/sync/bytes.go
deleted file mode 100644
index dd06fc0bc6e..00000000000
--- a/vendor/github.com/go-git/go-git/v5/utils/sync/bytes.go
+++ /dev/null
@@ -1,51 +0,0 @@
-package sync
-
-import (
- "bytes"
- "sync"
-)
-
-var (
- byteSlice = sync.Pool{
- New: func() interface{} {
- b := make([]byte, 16*1024)
- return &b
- },
- }
- bytesBuffer = sync.Pool{
- New: func() interface{} {
- return bytes.NewBuffer(nil)
- },
- }
-)
-
-// GetByteSlice returns a *[]byte that is managed by a sync.Pool.
-// The initial slice length will be 16384 (16kb).
-//
-// After use, the *[]byte should be put back into the sync.Pool
-// by calling PutByteSlice.
-func GetByteSlice() *[]byte {
- buf := byteSlice.Get().(*[]byte)
- return buf
-}
-
-// PutByteSlice puts buf back into its sync.Pool.
-func PutByteSlice(buf *[]byte) {
- byteSlice.Put(buf)
-}
-
-// GetBytesBuffer returns a *bytes.Buffer that is managed by a sync.Pool.
-// Returns a buffer that is resetted and ready for use.
-//
-// After use, the *bytes.Buffer should be put back into the sync.Pool
-// by calling PutBytesBuffer.
-func GetBytesBuffer() *bytes.Buffer {
- buf := bytesBuffer.Get().(*bytes.Buffer)
- buf.Reset()
- return buf
-}
-
-// PutBytesBuffer puts buf back into its sync.Pool.
-func PutBytesBuffer(buf *bytes.Buffer) {
- bytesBuffer.Put(buf)
-}
diff --git a/vendor/github.com/go-git/go-git/v5/utils/sync/zlib.go b/vendor/github.com/go-git/go-git/v5/utils/sync/zlib.go
deleted file mode 100644
index c613885957c..00000000000
--- a/vendor/github.com/go-git/go-git/v5/utils/sync/zlib.go
+++ /dev/null
@@ -1,74 +0,0 @@
-package sync
-
-import (
- "bytes"
- "compress/zlib"
- "io"
- "sync"
-)
-
-var (
- zlibInitBytes = []byte{0x78, 0x9c, 0x01, 0x00, 0x00, 0xff, 0xff, 0x00, 0x00, 0x00, 0x01}
- zlibReader = sync.Pool{
- New: func() interface{} {
- r, _ := zlib.NewReader(bytes.NewReader(zlibInitBytes))
- return ZLibReader{
- Reader: r.(zlibReadCloser),
- }
- },
- }
- zlibWriter = sync.Pool{
- New: func() interface{} {
- return zlib.NewWriter(nil)
- },
- }
-)
-
-type zlibReadCloser interface {
- io.ReadCloser
- zlib.Resetter
-}
-
-type ZLibReader struct {
- dict *[]byte
- Reader zlibReadCloser
-}
-
-// GetZlibReader returns a ZLibReader that is managed by a sync.Pool.
-// Returns a ZLibReader that is resetted using a dictionary that is
-// also managed by a sync.Pool.
-//
-// After use, the ZLibReader should be put back into the sync.Pool
-// by calling PutZlibReader.
-func GetZlibReader(r io.Reader) (ZLibReader, error) {
- z := zlibReader.Get().(ZLibReader)
- z.dict = GetByteSlice()
-
- err := z.Reader.Reset(r, *z.dict)
-
- return z, err
-}
-
-// PutZlibReader puts z back into its sync.Pool, first closing the reader.
-// The Byte slice dictionary is also put back into its sync.Pool.
-func PutZlibReader(z ZLibReader) {
- z.Reader.Close()
- PutByteSlice(z.dict)
- zlibReader.Put(z)
-}
-
-// GetZlibWriter returns a *zlib.Writer that is managed by a sync.Pool.
-// Returns a writer that is resetted with w and ready for use.
-//
-// After use, the *zlib.Writer should be put back into the sync.Pool
-// by calling PutZlibWriter.
-func GetZlibWriter(w io.Writer) *zlib.Writer {
- z := zlibWriter.Get().(*zlib.Writer)
- z.Reset(w)
- return z
-}
-
-// PutZlibWriter puts w back into its sync.Pool.
-func PutZlibWriter(w *zlib.Writer) {
- zlibWriter.Put(w)
-}
diff --git a/vendor/github.com/go-git/go-git/v5/utils/trace/trace.go b/vendor/github.com/go-git/go-git/v5/utils/trace/trace.go
deleted file mode 100644
index 3e15c5b9f90..00000000000
--- a/vendor/github.com/go-git/go-git/v5/utils/trace/trace.go
+++ /dev/null
@@ -1,55 +0,0 @@
-package trace
-
-import (
- "fmt"
- "log"
- "os"
- "sync/atomic"
-)
-
-var (
- // logger is the logger to use for tracing.
- logger = newLogger()
-
- // current is the targets that are enabled for tracing.
- current atomic.Int32
-)
-
-func newLogger() *log.Logger {
- return log.New(os.Stderr, "", log.Ltime|log.Lmicroseconds|log.Lshortfile)
-}
-
-// Target is a tracing target.
-type Target int32
-
-const (
- // General traces general operations.
- General Target = 1 << iota
-
- // Packet traces git packets.
- Packet
-)
-
-// SetTarget sets the tracing targets.
-func SetTarget(target Target) {
- current.Store(int32(target))
-}
-
-// SetLogger sets the logger to use for tracing.
-func SetLogger(l *log.Logger) {
- logger = l
-}
-
-// Print prints the given message only if the target is enabled.
-func (t Target) Print(args ...interface{}) {
- if int32(t)¤t.Load() != 0 {
- logger.Output(2, fmt.Sprint(args...)) // nolint: errcheck
- }
-}
-
-// Printf prints the given message only if the target is enabled.
-func (t Target) Printf(format string, args ...interface{}) {
- if int32(t)¤t.Load() != 0 {
- logger.Output(2, fmt.Sprintf(format, args...)) // nolint: errcheck
- }
-}
diff --git a/vendor/github.com/go-git/go-git/v5/worktree.go b/vendor/github.com/go-git/go-git/v5/worktree.go
deleted file mode 100644
index ab11d42db83..00000000000
--- a/vendor/github.com/go-git/go-git/v5/worktree.go
+++ /dev/null
@@ -1,1126 +0,0 @@
-package git
-
-import (
- "context"
- "errors"
- "fmt"
- "io"
- "os"
- "path/filepath"
- "runtime"
- "strings"
-
- "github.com/go-git/go-billy/v5"
- "github.com/go-git/go-billy/v5/util"
- "github.com/go-git/go-git/v5/config"
- "github.com/go-git/go-git/v5/plumbing"
- "github.com/go-git/go-git/v5/plumbing/filemode"
- "github.com/go-git/go-git/v5/plumbing/format/gitignore"
- "github.com/go-git/go-git/v5/plumbing/format/index"
- "github.com/go-git/go-git/v5/plumbing/object"
- "github.com/go-git/go-git/v5/plumbing/storer"
- "github.com/go-git/go-git/v5/utils/ioutil"
- "github.com/go-git/go-git/v5/utils/merkletrie"
- "github.com/go-git/go-git/v5/utils/sync"
-)
-
-var (
- ErrWorktreeNotClean = errors.New("worktree is not clean")
- ErrSubmoduleNotFound = errors.New("submodule not found")
- ErrUnstagedChanges = errors.New("worktree contains unstaged changes")
- ErrGitModulesSymlink = errors.New(gitmodulesFile + " is a symlink")
- ErrNonFastForwardUpdate = errors.New("non-fast-forward update")
-)
-
-// Worktree represents a git worktree.
-type Worktree struct {
- // Filesystem underlying filesystem.
- Filesystem billy.Filesystem
- // External excludes not found in the repository .gitignore
- Excludes []gitignore.Pattern
-
- r *Repository
-}
-
-// Pull incorporates changes from a remote repository into the current branch.
-// Returns nil if the operation is successful, NoErrAlreadyUpToDate if there are
-// no changes to be fetched, or an error.
-//
-// Pull only supports merges where the can be resolved as a fast-forward.
-func (w *Worktree) Pull(o *PullOptions) error {
- return w.PullContext(context.Background(), o)
-}
-
-// PullContext incorporates changes from a remote repository into the current
-// branch. Returns nil if the operation is successful, NoErrAlreadyUpToDate if
-// there are no changes to be fetched, or an error.
-//
-// Pull only supports merges where the can be resolved as a fast-forward.
-//
-// The provided Context must be non-nil. If the context expires before the
-// operation is complete, an error is returned. The context only affects the
-// transport operations.
-func (w *Worktree) PullContext(ctx context.Context, o *PullOptions) error {
- if err := o.Validate(); err != nil {
- return err
- }
-
- remote, err := w.r.Remote(o.RemoteName)
- if err != nil {
- return err
- }
-
- fetchHead, err := remote.fetch(ctx, &FetchOptions{
- RemoteName: o.RemoteName,
- RemoteURL: o.RemoteURL,
- Depth: o.Depth,
- Auth: o.Auth,
- Progress: o.Progress,
- Force: o.Force,
- InsecureSkipTLS: o.InsecureSkipTLS,
- CABundle: o.CABundle,
- ProxyOptions: o.ProxyOptions,
- })
-
- updated := true
- if err == NoErrAlreadyUpToDate {
- updated = false
- } else if err != nil {
- return err
- }
-
- ref, err := storer.ResolveReference(fetchHead, o.ReferenceName)
- if err != nil {
- return err
- }
-
- head, err := w.r.Head()
- if err == nil {
- // if we don't have a shallows list, just ignore it
- shallowList, _ := w.r.Storer.Shallow()
-
- var earliestShallow *plumbing.Hash
- if len(shallowList) > 0 {
- earliestShallow = &shallowList[0]
- }
-
- headAheadOfRef, err := isFastForward(w.r.Storer, ref.Hash(), head.Hash(), earliestShallow)
- if err != nil {
- return err
- }
-
- if !updated && headAheadOfRef {
- return NoErrAlreadyUpToDate
- }
-
- ff, err := isFastForward(w.r.Storer, head.Hash(), ref.Hash(), earliestShallow)
- if err != nil {
- return err
- }
-
- if !ff {
- return ErrNonFastForwardUpdate
- }
- }
-
- if err != nil && err != plumbing.ErrReferenceNotFound {
- return err
- }
-
- if err := w.updateHEAD(ref.Hash()); err != nil {
- return err
- }
-
- if err := w.Reset(&ResetOptions{
- Mode: MergeReset,
- Commit: ref.Hash(),
- }); err != nil {
- return err
- }
-
- if o.RecurseSubmodules != NoRecurseSubmodules {
- return w.updateSubmodules(&SubmoduleUpdateOptions{
- RecurseSubmodules: o.RecurseSubmodules,
- Auth: o.Auth,
- })
- }
-
- return nil
-}
-
-func (w *Worktree) updateSubmodules(o *SubmoduleUpdateOptions) error {
- s, err := w.Submodules()
- if err != nil {
- return err
- }
- o.Init = true
- return s.Update(o)
-}
-
-// Checkout switch branches or restore working tree files.
-func (w *Worktree) Checkout(opts *CheckoutOptions) error {
- if err := opts.Validate(); err != nil {
- return err
- }
-
- if opts.Create {
- if err := w.createBranch(opts); err != nil {
- return err
- }
- }
-
- c, err := w.getCommitFromCheckoutOptions(opts)
- if err != nil {
- return err
- }
-
- ro := &ResetOptions{Commit: c, Mode: MergeReset}
- if opts.Force {
- ro.Mode = HardReset
- } else if opts.Keep {
- ro.Mode = SoftReset
- }
-
- if !opts.Hash.IsZero() && !opts.Create {
- err = w.setHEADToCommit(opts.Hash)
- } else {
- err = w.setHEADToBranch(opts.Branch, c)
- }
-
- if err != nil {
- return err
- }
-
- if len(opts.SparseCheckoutDirectories) > 0 {
- return w.ResetSparsely(ro, opts.SparseCheckoutDirectories)
- }
-
- return w.Reset(ro)
-}
-
-func (w *Worktree) createBranch(opts *CheckoutOptions) error {
- if err := opts.Branch.Validate(); err != nil {
- return err
- }
-
- _, err := w.r.Storer.Reference(opts.Branch)
- if err == nil {
- return fmt.Errorf("a branch named %q already exists", opts.Branch)
- }
-
- if err != plumbing.ErrReferenceNotFound {
- return err
- }
-
- if opts.Hash.IsZero() {
- ref, err := w.r.Head()
- if err != nil {
- return err
- }
-
- opts.Hash = ref.Hash()
- }
-
- return w.r.Storer.SetReference(
- plumbing.NewHashReference(opts.Branch, opts.Hash),
- )
-}
-
-func (w *Worktree) getCommitFromCheckoutOptions(opts *CheckoutOptions) (plumbing.Hash, error) {
- hash := opts.Hash
- if hash.IsZero() {
- b, err := w.r.Reference(opts.Branch, true)
- if err != nil {
- return plumbing.ZeroHash, err
- }
-
- hash = b.Hash()
- }
-
- o, err := w.r.Object(plumbing.AnyObject, hash)
- if err != nil {
- return plumbing.ZeroHash, err
- }
-
- switch o := o.(type) {
- case *object.Tag:
- if o.TargetType != plumbing.CommitObject {
- return plumbing.ZeroHash, fmt.Errorf("%w: tag target %q", object.ErrUnsupportedObject, o.TargetType)
- }
-
- return o.Target, nil
- case *object.Commit:
- return o.Hash, nil
- }
-
- return plumbing.ZeroHash, fmt.Errorf("%w: %q", object.ErrUnsupportedObject, o.Type())
-}
-
-func (w *Worktree) setHEADToCommit(commit plumbing.Hash) error {
- head := plumbing.NewHashReference(plumbing.HEAD, commit)
- return w.r.Storer.SetReference(head)
-}
-
-func (w *Worktree) setHEADToBranch(branch plumbing.ReferenceName, commit plumbing.Hash) error {
- target, err := w.r.Storer.Reference(branch)
- if err != nil {
- return err
- }
-
- var head *plumbing.Reference
- if target.Name().IsBranch() {
- head = plumbing.NewSymbolicReference(plumbing.HEAD, target.Name())
- } else {
- head = plumbing.NewHashReference(plumbing.HEAD, commit)
- }
-
- return w.r.Storer.SetReference(head)
-}
-
-func (w *Worktree) ResetSparsely(opts *ResetOptions, dirs []string) error {
- if err := opts.Validate(w.r); err != nil {
- return err
- }
-
- if opts.Mode == MergeReset {
- unstaged, err := w.containsUnstagedChanges()
- if err != nil {
- return err
- }
-
- if unstaged {
- return ErrUnstagedChanges
- }
- }
-
- if err := w.setHEADCommit(opts.Commit); err != nil {
- return err
- }
-
- if opts.Mode == SoftReset {
- return nil
- }
-
- t, err := w.r.getTreeFromCommitHash(opts.Commit)
- if err != nil {
- return err
- }
-
- if opts.Mode == MixedReset || opts.Mode == MergeReset || opts.Mode == HardReset {
- if err := w.resetIndex(t, dirs); err != nil {
- return err
- }
- }
-
- if opts.Mode == MergeReset || opts.Mode == HardReset {
- if err := w.resetWorktree(t); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-// Reset the worktree to a specified state.
-func (w *Worktree) Reset(opts *ResetOptions) error {
- return w.ResetSparsely(opts, nil)
-}
-
-func (w *Worktree) resetIndex(t *object.Tree, dirs []string) error {
- idx, err := w.r.Storer.Index()
- if len(dirs) > 0 {
- idx.SkipUnless(dirs)
- }
-
- if err != nil {
- return err
- }
- b := newIndexBuilder(idx)
-
- changes, err := w.diffTreeWithStaging(t, true)
- if err != nil {
- return err
- }
-
- for _, ch := range changes {
- a, err := ch.Action()
- if err != nil {
- return err
- }
-
- var name string
- var e *object.TreeEntry
-
- switch a {
- case merkletrie.Modify, merkletrie.Insert:
- name = ch.To.String()
- e, err = t.FindEntry(name)
- if err != nil {
- return err
- }
- case merkletrie.Delete:
- name = ch.From.String()
- }
-
- b.Remove(name)
- if e == nil {
- continue
- }
-
- b.Add(&index.Entry{
- Name: name,
- Hash: e.Hash,
- Mode: e.Mode,
- })
-
- }
-
- b.Write(idx)
- return w.r.Storer.SetIndex(idx)
-}
-
-func (w *Worktree) resetWorktree(t *object.Tree) error {
- changes, err := w.diffStagingWithWorktree(true, false)
- if err != nil {
- return err
- }
-
- idx, err := w.r.Storer.Index()
- if err != nil {
- return err
- }
- b := newIndexBuilder(idx)
-
- for _, ch := range changes {
- if err := w.validChange(ch); err != nil {
- return err
- }
- if err := w.checkoutChange(ch, t, b); err != nil {
- return err
- }
- }
-
- b.Write(idx)
- return w.r.Storer.SetIndex(idx)
-}
-
-// worktreeDeny is a list of paths that are not allowed
-// to be used when resetting the worktree.
-var worktreeDeny = map[string]struct{}{
- // .git
- GitDirName: {},
-
- // For other historical reasons, file names that do not conform to the 8.3
- // format (up to eight characters for the basename, three for the file
- // extension, certain characters not allowed such as `+`, etc) are associated
- // with a so-called "short name", at least on the `C:` drive by default.
- // Which means that `git~1/` is a valid way to refer to `.git/`.
- "git~1": {},
-}
-
-// validPath checks whether paths are valid.
-// The rules around invalid paths could differ from upstream based on how
-// filesystems are managed within go-git, but they are largely the same.
-//
-// For upstream rules:
-// https://github.com/git/git/blob/564d0252ca632e0264ed670534a51d18a689ef5d/read-cache.c#L946
-// https://github.com/git/git/blob/564d0252ca632e0264ed670534a51d18a689ef5d/path.c#L1383
-func validPath(paths ...string) error {
- for _, p := range paths {
- parts := strings.FieldsFunc(p, func(r rune) bool { return (r == '\\' || r == '/') })
- if len(parts) == 0 {
- return fmt.Errorf("invalid path: %q", p)
- }
-
- if _, denied := worktreeDeny[strings.ToLower(parts[0])]; denied {
- return fmt.Errorf("invalid path prefix: %q", p)
- }
-
- if runtime.GOOS == "windows" {
- // Volume names are not supported, in both formats: \\ and :.
- if vol := filepath.VolumeName(p); vol != "" {
- return fmt.Errorf("invalid path: %q", p)
- }
-
- if !windowsValidPath(parts[0]) {
- return fmt.Errorf("invalid path: %q", p)
- }
- }
-
- for _, part := range parts {
- if part == ".." {
- return fmt.Errorf("invalid path %q: cannot use '..'", p)
- }
- }
- }
- return nil
-}
-
-// windowsPathReplacer defines the chars that need to be replaced
-// as part of windowsValidPath.
-var windowsPathReplacer *strings.Replacer
-
-func init() {
- windowsPathReplacer = strings.NewReplacer(" ", "", ".", "")
-}
-
-func windowsValidPath(part string) bool {
- if len(part) > 3 && strings.EqualFold(part[:4], GitDirName) {
- // For historical reasons, file names that end in spaces or periods are
- // automatically trimmed. Therefore, `.git . . ./` is a valid way to refer
- // to `.git/`.
- if windowsPathReplacer.Replace(part[4:]) == "" {
- return false
- }
-
- // For yet other historical reasons, NTFS supports so-called "Alternate Data
- // Streams", i.e. metadata associated with a given file, referred to via
- // `::`. There exists a default stream
- // type for directories, allowing `.git/` to be accessed via
- // `.git::$INDEX_ALLOCATION/`.
- //
- // For performance reasons, _all_ Alternate Data Streams of `.git/` are
- // forbidden, not just `::$INDEX_ALLOCATION`.
- if len(part) > 4 && part[4:5] == ":" {
- return false
- }
- }
- return true
-}
-
-func (w *Worktree) validChange(ch merkletrie.Change) error {
- action, err := ch.Action()
- if err != nil {
- return nil
- }
-
- switch action {
- case merkletrie.Delete:
- return validPath(ch.From.String())
- case merkletrie.Insert:
- return validPath(ch.To.String())
- case merkletrie.Modify:
- return validPath(ch.From.String(), ch.To.String())
- }
-
- return nil
-}
-
-func (w *Worktree) checkoutChange(ch merkletrie.Change, t *object.Tree, idx *indexBuilder) error {
- a, err := ch.Action()
- if err != nil {
- return err
- }
-
- var e *object.TreeEntry
- var name string
- var isSubmodule bool
-
- switch a {
- case merkletrie.Modify, merkletrie.Insert:
- name = ch.To.String()
- e, err = t.FindEntry(name)
- if err != nil {
- return err
- }
-
- isSubmodule = e.Mode == filemode.Submodule
- case merkletrie.Delete:
- return rmFileAndDirsIfEmpty(w.Filesystem, ch.From.String())
- }
-
- if isSubmodule {
- return w.checkoutChangeSubmodule(name, a, e, idx)
- }
-
- return w.checkoutChangeRegularFile(name, a, t, e, idx)
-}
-
-func (w *Worktree) containsUnstagedChanges() (bool, error) {
- ch, err := w.diffStagingWithWorktree(false, true)
- if err != nil {
- return false, err
- }
-
- for _, c := range ch {
- a, err := c.Action()
- if err != nil {
- return false, err
- }
-
- if a == merkletrie.Insert {
- continue
- }
-
- return true, nil
- }
-
- return false, nil
-}
-
-func (w *Worktree) setHEADCommit(commit plumbing.Hash) error {
- head, err := w.r.Reference(plumbing.HEAD, false)
- if err != nil {
- return err
- }
-
- if head.Type() == plumbing.HashReference {
- head = plumbing.NewHashReference(plumbing.HEAD, commit)
- return w.r.Storer.SetReference(head)
- }
-
- branch, err := w.r.Reference(head.Target(), false)
- if err != nil {
- return err
- }
-
- if !branch.Name().IsBranch() {
- return fmt.Errorf("invalid HEAD target should be a branch, found %s", branch.Type())
- }
-
- branch = plumbing.NewHashReference(branch.Name(), commit)
- return w.r.Storer.SetReference(branch)
-}
-
-func (w *Worktree) checkoutChangeSubmodule(name string,
- a merkletrie.Action,
- e *object.TreeEntry,
- idx *indexBuilder,
-) error {
- switch a {
- case merkletrie.Modify:
- sub, err := w.Submodule(name)
- if err != nil {
- return err
- }
-
- if !sub.initialized {
- return nil
- }
-
- return w.addIndexFromTreeEntry(name, e, idx)
- case merkletrie.Insert:
- mode, err := e.Mode.ToOSFileMode()
- if err != nil {
- return err
- }
-
- if err := w.Filesystem.MkdirAll(name, mode); err != nil {
- return err
- }
-
- return w.addIndexFromTreeEntry(name, e, idx)
- }
-
- return nil
-}
-
-func (w *Worktree) checkoutChangeRegularFile(name string,
- a merkletrie.Action,
- t *object.Tree,
- e *object.TreeEntry,
- idx *indexBuilder,
-) error {
- switch a {
- case merkletrie.Modify:
- idx.Remove(name)
-
- // to apply perm changes the file is deleted, billy doesn't implement
- // chmod
- if err := w.Filesystem.Remove(name); err != nil {
- return err
- }
-
- fallthrough
- case merkletrie.Insert:
- f, err := t.File(name)
- if err != nil {
- return err
- }
-
- if err := w.checkoutFile(f); err != nil {
- return err
- }
-
- return w.addIndexFromFile(name, e.Hash, idx)
- }
-
- return nil
-}
-
-func (w *Worktree) checkoutFile(f *object.File) (err error) {
- mode, err := f.Mode.ToOSFileMode()
- if err != nil {
- return
- }
-
- if mode&os.ModeSymlink != 0 {
- return w.checkoutFileSymlink(f)
- }
-
- from, err := f.Reader()
- if err != nil {
- return
- }
-
- defer ioutil.CheckClose(from, &err)
-
- to, err := w.Filesystem.OpenFile(f.Name, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, mode.Perm())
- if err != nil {
- return
- }
-
- defer ioutil.CheckClose(to, &err)
- buf := sync.GetByteSlice()
- _, err = io.CopyBuffer(to, from, *buf)
- sync.PutByteSlice(buf)
- return
-}
-
-func (w *Worktree) checkoutFileSymlink(f *object.File) (err error) {
- // https://github.com/git/git/commit/10ecfa76491e4923988337b2e2243b05376b40de
- if strings.EqualFold(f.Name, gitmodulesFile) {
- return ErrGitModulesSymlink
- }
-
- from, err := f.Reader()
- if err != nil {
- return
- }
-
- defer ioutil.CheckClose(from, &err)
-
- bytes, err := io.ReadAll(from)
- if err != nil {
- return
- }
-
- err = w.Filesystem.Symlink(string(bytes), f.Name)
-
- // On windows, this might fail.
- // Follow Git on Windows behavior by writing the link as it is.
- if err != nil && isSymlinkWindowsNonAdmin(err) {
- mode, _ := f.Mode.ToOSFileMode()
-
- to, err := w.Filesystem.OpenFile(f.Name, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, mode.Perm())
- if err != nil {
- return err
- }
-
- defer ioutil.CheckClose(to, &err)
-
- _, err = to.Write(bytes)
- return err
- }
- return
-}
-
-func (w *Worktree) addIndexFromTreeEntry(name string, f *object.TreeEntry, idx *indexBuilder) error {
- idx.Remove(name)
- idx.Add(&index.Entry{
- Hash: f.Hash,
- Name: name,
- Mode: filemode.Submodule,
- })
- return nil
-}
-
-func (w *Worktree) addIndexFromFile(name string, h plumbing.Hash, idx *indexBuilder) error {
- idx.Remove(name)
- fi, err := w.Filesystem.Lstat(name)
- if err != nil {
- return err
- }
-
- mode, err := filemode.NewFromOSFileMode(fi.Mode())
- if err != nil {
- return err
- }
-
- e := &index.Entry{
- Hash: h,
- Name: name,
- Mode: mode,
- ModifiedAt: fi.ModTime(),
- Size: uint32(fi.Size()),
- }
-
- // if the FileInfo.Sys() comes from os the ctime, dev, inode, uid and gid
- // can be retrieved, otherwise this doesn't apply
- if fillSystemInfo != nil {
- fillSystemInfo(e, fi.Sys())
- }
- idx.Add(e)
- return nil
-}
-
-func (r *Repository) getTreeFromCommitHash(commit plumbing.Hash) (*object.Tree, error) {
- c, err := r.CommitObject(commit)
- if err != nil {
- return nil, err
- }
-
- return c.Tree()
-}
-
-var fillSystemInfo func(e *index.Entry, sys interface{})
-
-const gitmodulesFile = ".gitmodules"
-
-// Submodule returns the submodule with the given name
-func (w *Worktree) Submodule(name string) (*Submodule, error) {
- l, err := w.Submodules()
- if err != nil {
- return nil, err
- }
-
- for _, m := range l {
- if m.Config().Name == name {
- return m, nil
- }
- }
-
- return nil, ErrSubmoduleNotFound
-}
-
-// Submodules returns all the available submodules
-func (w *Worktree) Submodules() (Submodules, error) {
- l := make(Submodules, 0)
- m, err := w.readGitmodulesFile()
- if err != nil || m == nil {
- return l, err
- }
-
- c, err := w.r.Config()
- if err != nil {
- return nil, err
- }
-
- for _, s := range m.Submodules {
- l = append(l, w.newSubmodule(s, c.Submodules[s.Name]))
- }
-
- return l, nil
-}
-
-func (w *Worktree) newSubmodule(fromModules, fromConfig *config.Submodule) *Submodule {
- m := &Submodule{w: w}
- m.initialized = fromConfig != nil
-
- if !m.initialized {
- m.c = fromModules
- return m
- }
-
- m.c = fromConfig
- m.c.Path = fromModules.Path
- return m
-}
-
-func (w *Worktree) isSymlink(path string) bool {
- if s, err := w.Filesystem.Lstat(path); err == nil {
- return s.Mode()&os.ModeSymlink != 0
- }
- return false
-}
-
-func (w *Worktree) readGitmodulesFile() (*config.Modules, error) {
- if w.isSymlink(gitmodulesFile) {
- return nil, ErrGitModulesSymlink
- }
-
- f, err := w.Filesystem.Open(gitmodulesFile)
- if err != nil {
- if os.IsNotExist(err) {
- return nil, nil
- }
-
- return nil, err
- }
-
- defer f.Close()
- input, err := io.ReadAll(f)
- if err != nil {
- return nil, err
- }
-
- m := config.NewModules()
- if err := m.Unmarshal(input); err != nil {
- return m, err
- }
-
- return m, nil
-}
-
-// Clean the worktree by removing untracked files.
-// An empty dir could be removed - this is what `git clean -f -d .` does.
-func (w *Worktree) Clean(opts *CleanOptions) error {
- s, err := w.Status()
- if err != nil {
- return err
- }
-
- root := ""
- files, err := w.Filesystem.ReadDir(root)
- if err != nil {
- return err
- }
- return w.doClean(s, opts, root, files)
-}
-
-func (w *Worktree) doClean(status Status, opts *CleanOptions, dir string, files []os.FileInfo) error {
- for _, fi := range files {
- if fi.Name() == GitDirName {
- continue
- }
-
- // relative path under the root
- path := filepath.Join(dir, fi.Name())
- if fi.IsDir() {
- if !opts.Dir {
- continue
- }
-
- subfiles, err := w.Filesystem.ReadDir(path)
- if err != nil {
- return err
- }
- err = w.doClean(status, opts, path, subfiles)
- if err != nil {
- return err
- }
- } else {
- if status.IsUntracked(path) {
- if err := w.Filesystem.Remove(path); err != nil {
- return err
- }
- }
- }
- }
-
- if opts.Dir && dir != "" {
- _, err := removeDirIfEmpty(w.Filesystem, dir)
- return err
- }
-
- return nil
-}
-
-// GrepResult is structure of a grep result.
-type GrepResult struct {
- // FileName is the name of file which contains match.
- FileName string
- // LineNumber is the line number of a file at which a match was found.
- LineNumber int
- // Content is the content of the file at the matching line.
- Content string
- // TreeName is the name of the tree (reference name/commit hash) at
- // which the match was performed.
- TreeName string
-}
-
-func (gr GrepResult) String() string {
- return fmt.Sprintf("%s:%s:%d:%s", gr.TreeName, gr.FileName, gr.LineNumber, gr.Content)
-}
-
-// Grep performs grep on a repository.
-func (r *Repository) Grep(opts *GrepOptions) ([]GrepResult, error) {
- if err := opts.validate(r); err != nil {
- return nil, err
- }
-
- // Obtain commit hash from options (CommitHash or ReferenceName).
- var commitHash plumbing.Hash
- // treeName contains the value of TreeName in GrepResult.
- var treeName string
-
- if opts.ReferenceName != "" {
- ref, err := r.Reference(opts.ReferenceName, true)
- if err != nil {
- return nil, err
- }
- commitHash = ref.Hash()
- treeName = opts.ReferenceName.String()
- } else if !opts.CommitHash.IsZero() {
- commitHash = opts.CommitHash
- treeName = opts.CommitHash.String()
- }
-
- // Obtain a tree from the commit hash and get a tracked files iterator from
- // the tree.
- tree, err := r.getTreeFromCommitHash(commitHash)
- if err != nil {
- return nil, err
- }
- fileiter := tree.Files()
-
- return findMatchInFiles(fileiter, treeName, opts)
-}
-
-// Grep performs grep on a worktree.
-func (w *Worktree) Grep(opts *GrepOptions) ([]GrepResult, error) {
- return w.r.Grep(opts)
-}
-
-// findMatchInFiles takes a FileIter, worktree name and GrepOptions, and
-// returns a slice of GrepResult containing the result of regex pattern matching
-// in content of all the files.
-func findMatchInFiles(fileiter *object.FileIter, treeName string, opts *GrepOptions) ([]GrepResult, error) {
- var results []GrepResult
-
- err := fileiter.ForEach(func(file *object.File) error {
- var fileInPathSpec bool
-
- // When no pathspecs are provided, search all the files.
- if len(opts.PathSpecs) == 0 {
- fileInPathSpec = true
- }
-
- // Check if the file name matches with the pathspec. Break out of the
- // loop once a match is found.
- for _, pathSpec := range opts.PathSpecs {
- if pathSpec != nil && pathSpec.MatchString(file.Name) {
- fileInPathSpec = true
- break
- }
- }
-
- // If the file does not match with any of the pathspec, skip it.
- if !fileInPathSpec {
- return nil
- }
-
- grepResults, err := findMatchInFile(file, treeName, opts)
- if err != nil {
- return err
- }
- results = append(results, grepResults...)
-
- return nil
- })
-
- return results, err
-}
-
-// findMatchInFile takes a single File, worktree name and GrepOptions,
-// and returns a slice of GrepResult containing the result of regex pattern
-// matching in the given file.
-func findMatchInFile(file *object.File, treeName string, opts *GrepOptions) ([]GrepResult, error) {
- var grepResults []GrepResult
-
- content, err := file.Contents()
- if err != nil {
- return grepResults, err
- }
-
- // Split the file content and parse line-by-line.
- contentByLine := strings.Split(content, "\n")
- for lineNum, cnt := range contentByLine {
- addToResult := false
-
- // Match the patterns and content. Break out of the loop once a
- // match is found.
- for _, pattern := range opts.Patterns {
- if pattern != nil && pattern.MatchString(cnt) {
- // Add to result only if invert match is not enabled.
- if !opts.InvertMatch {
- addToResult = true
- break
- }
- } else if opts.InvertMatch {
- // If matching fails, and invert match is enabled, add to
- // results.
- addToResult = true
- break
- }
- }
-
- if addToResult {
- grepResults = append(grepResults, GrepResult{
- FileName: file.Name,
- LineNumber: lineNum + 1,
- Content: cnt,
- TreeName: treeName,
- })
- }
- }
-
- return grepResults, nil
-}
-
-// will walk up the directory tree removing all encountered empty
-// directories, not just the one containing this file
-func rmFileAndDirsIfEmpty(fs billy.Filesystem, name string) error {
- if err := util.RemoveAll(fs, name); err != nil {
- return err
- }
-
- dir := filepath.Dir(name)
- for {
- removed, err := removeDirIfEmpty(fs, dir)
- if err != nil {
- return err
- }
-
- if !removed {
- // directory was not empty and not removed,
- // stop checking parents
- break
- }
-
- // move to parent directory
- dir = filepath.Dir(dir)
- }
-
- return nil
-}
-
-// removeDirIfEmpty will remove the supplied directory `dir` if
-// `dir` is empty
-// returns true if the directory was removed
-func removeDirIfEmpty(fs billy.Filesystem, dir string) (bool, error) {
- files, err := fs.ReadDir(dir)
- if err != nil {
- return false, err
- }
-
- if len(files) > 0 {
- return false, nil
- }
-
- err = fs.Remove(dir)
- if err != nil {
- return false, err
- }
-
- return true, nil
-}
-
-type indexBuilder struct {
- entries map[string]*index.Entry
-}
-
-func newIndexBuilder(idx *index.Index) *indexBuilder {
- entries := make(map[string]*index.Entry, len(idx.Entries))
- for _, e := range idx.Entries {
- entries[e.Name] = e
- }
- return &indexBuilder{
- entries: entries,
- }
-}
-
-func (b *indexBuilder) Write(idx *index.Index) {
- idx.Entries = idx.Entries[:0]
- for _, e := range b.entries {
- idx.Entries = append(idx.Entries, e)
- }
-}
-
-func (b *indexBuilder) Add(e *index.Entry) {
- b.entries[e.Name] = e
-}
-
-func (b *indexBuilder) Remove(name string) {
- delete(b.entries, filepath.ToSlash(name))
-}
diff --git a/vendor/github.com/go-git/go-git/v5/worktree_bsd.go b/vendor/github.com/go-git/go-git/v5/worktree_bsd.go
deleted file mode 100644
index d4682eb83be..00000000000
--- a/vendor/github.com/go-git/go-git/v5/worktree_bsd.go
+++ /dev/null
@@ -1,26 +0,0 @@
-// +build darwin freebsd netbsd
-
-package git
-
-import (
- "syscall"
- "time"
-
- "github.com/go-git/go-git/v5/plumbing/format/index"
-)
-
-func init() {
- fillSystemInfo = func(e *index.Entry, sys interface{}) {
- if os, ok := sys.(*syscall.Stat_t); ok {
- e.CreatedAt = time.Unix(os.Atimespec.Unix())
- e.Dev = uint32(os.Dev)
- e.Inode = uint32(os.Ino)
- e.GID = os.Gid
- e.UID = os.Uid
- }
- }
-}
-
-func isSymlinkWindowsNonAdmin(err error) bool {
- return false
-}
diff --git a/vendor/github.com/go-git/go-git/v5/worktree_commit.go b/vendor/github.com/go-git/go-git/v5/worktree_commit.go
deleted file mode 100644
index f62054bcb44..00000000000
--- a/vendor/github.com/go-git/go-git/v5/worktree_commit.go
+++ /dev/null
@@ -1,271 +0,0 @@
-package git
-
-import (
- "bytes"
- "errors"
- "io"
- "path"
- "sort"
- "strings"
-
- "github.com/go-git/go-git/v5/plumbing"
- "github.com/go-git/go-git/v5/plumbing/filemode"
- "github.com/go-git/go-git/v5/plumbing/format/index"
- "github.com/go-git/go-git/v5/plumbing/object"
- "github.com/go-git/go-git/v5/storage"
-
- "github.com/ProtonMail/go-crypto/openpgp"
- "github.com/ProtonMail/go-crypto/openpgp/packet"
- "github.com/go-git/go-billy/v5"
-)
-
-var (
- // ErrEmptyCommit occurs when a commit is attempted using a clean
- // working tree, with no changes to be committed.
- ErrEmptyCommit = errors.New("cannot create empty commit: clean working tree")
-)
-
-// Commit stores the current contents of the index in a new commit along with
-// a log message from the user describing the changes.
-func (w *Worktree) Commit(msg string, opts *CommitOptions) (plumbing.Hash, error) {
- if err := opts.Validate(w.r); err != nil {
- return plumbing.ZeroHash, err
- }
-
- if opts.All {
- if err := w.autoAddModifiedAndDeleted(); err != nil {
- return plumbing.ZeroHash, err
- }
- }
-
- var treeHash plumbing.Hash
-
- if opts.Amend {
- head, err := w.r.Head()
- if err != nil {
- return plumbing.ZeroHash, err
- }
- headCommit, err := w.r.CommitObject(head.Hash())
- if err != nil {
- return plumbing.ZeroHash, err
- }
-
- opts.Parents = nil
- if len(headCommit.ParentHashes) != 0 {
- opts.Parents = []plumbing.Hash{headCommit.ParentHashes[0]}
- }
- }
-
- idx, err := w.r.Storer.Index()
- if err != nil {
- return plumbing.ZeroHash, err
- }
-
- h := &buildTreeHelper{
- fs: w.Filesystem,
- s: w.r.Storer,
- }
-
- treeHash, err = h.BuildTree(idx, opts)
- if err != nil {
- return plumbing.ZeroHash, err
- }
-
- commit, err := w.buildCommitObject(msg, opts, treeHash)
- if err != nil {
- return plumbing.ZeroHash, err
- }
-
- return commit, w.updateHEAD(commit)
-}
-
-func (w *Worktree) autoAddModifiedAndDeleted() error {
- s, err := w.Status()
- if err != nil {
- return err
- }
-
- idx, err := w.r.Storer.Index()
- if err != nil {
- return err
- }
-
- for path, fs := range s {
- if fs.Worktree != Modified && fs.Worktree != Deleted {
- continue
- }
-
- if _, _, err := w.doAddFile(idx, s, path, nil); err != nil {
- return err
- }
-
- }
-
- return w.r.Storer.SetIndex(idx)
-}
-
-func (w *Worktree) updateHEAD(commit plumbing.Hash) error {
- head, err := w.r.Storer.Reference(plumbing.HEAD)
- if err != nil {
- return err
- }
-
- name := plumbing.HEAD
- if head.Type() != plumbing.HashReference {
- name = head.Target()
- }
-
- ref := plumbing.NewHashReference(name, commit)
- return w.r.Storer.SetReference(ref)
-}
-
-func (w *Worktree) buildCommitObject(msg string, opts *CommitOptions, tree plumbing.Hash) (plumbing.Hash, error) {
- commit := &object.Commit{
- Author: *opts.Author,
- Committer: *opts.Committer,
- Message: msg,
- TreeHash: tree,
- ParentHashes: opts.Parents,
- }
-
- // Convert SignKey into a Signer if set. Existing Signer should take priority.
- signer := opts.Signer
- if signer == nil && opts.SignKey != nil {
- signer = &gpgSigner{key: opts.SignKey}
- }
- if signer != nil {
- sig, err := signObject(signer, commit)
- if err != nil {
- return plumbing.ZeroHash, err
- }
- commit.PGPSignature = string(sig)
- }
-
- obj := w.r.Storer.NewEncodedObject()
- if err := commit.Encode(obj); err != nil {
- return plumbing.ZeroHash, err
- }
- return w.r.Storer.SetEncodedObject(obj)
-}
-
-type gpgSigner struct {
- key *openpgp.Entity
- cfg *packet.Config
-}
-
-func (s *gpgSigner) Sign(message io.Reader) ([]byte, error) {
- var b bytes.Buffer
- if err := openpgp.ArmoredDetachSign(&b, s.key, message, s.cfg); err != nil {
- return nil, err
- }
- return b.Bytes(), nil
-}
-
-// buildTreeHelper converts a given index.Index file into multiple git objects
-// reading the blobs from the given filesystem and creating the trees from the
-// index structure. The created objects are pushed to a given Storer.
-type buildTreeHelper struct {
- fs billy.Filesystem
- s storage.Storer
-
- trees map[string]*object.Tree
- entries map[string]*object.TreeEntry
-}
-
-// BuildTree builds the tree objects and push its to the storer, the hash
-// of the root tree is returned.
-func (h *buildTreeHelper) BuildTree(idx *index.Index, opts *CommitOptions) (plumbing.Hash, error) {
- if len(idx.Entries) == 0 && (opts == nil || !opts.AllowEmptyCommits) {
- return plumbing.ZeroHash, ErrEmptyCommit
- }
-
- const rootNode = ""
- h.trees = map[string]*object.Tree{rootNode: {}}
- h.entries = map[string]*object.TreeEntry{}
-
- for _, e := range idx.Entries {
- if err := h.commitIndexEntry(e); err != nil {
- return plumbing.ZeroHash, err
- }
- }
-
- return h.copyTreeToStorageRecursive(rootNode, h.trees[rootNode])
-}
-
-func (h *buildTreeHelper) commitIndexEntry(e *index.Entry) error {
- parts := strings.Split(e.Name, "/")
-
- var fullpath string
- for _, part := range parts {
- parent := fullpath
- fullpath = path.Join(fullpath, part)
-
- h.doBuildTree(e, parent, fullpath)
- }
-
- return nil
-}
-
-func (h *buildTreeHelper) doBuildTree(e *index.Entry, parent, fullpath string) {
- if _, ok := h.trees[fullpath]; ok {
- return
- }
-
- if _, ok := h.entries[fullpath]; ok {
- return
- }
-
- te := object.TreeEntry{Name: path.Base(fullpath)}
-
- if fullpath == e.Name {
- te.Mode = e.Mode
- te.Hash = e.Hash
- } else {
- te.Mode = filemode.Dir
- h.trees[fullpath] = &object.Tree{}
- }
-
- h.trees[parent].Entries = append(h.trees[parent].Entries, te)
-}
-
-type sortableEntries []object.TreeEntry
-
-func (sortableEntries) sortName(te object.TreeEntry) string {
- if te.Mode == filemode.Dir {
- return te.Name + "/"
- }
- return te.Name
-}
-func (se sortableEntries) Len() int { return len(se) }
-func (se sortableEntries) Less(i int, j int) bool { return se.sortName(se[i]) < se.sortName(se[j]) }
-func (se sortableEntries) Swap(i int, j int) { se[i], se[j] = se[j], se[i] }
-
-func (h *buildTreeHelper) copyTreeToStorageRecursive(parent string, t *object.Tree) (plumbing.Hash, error) {
- sort.Sort(sortableEntries(t.Entries))
- for i, e := range t.Entries {
- if e.Mode != filemode.Dir && !e.Hash.IsZero() {
- continue
- }
-
- path := path.Join(parent, e.Name)
-
- var err error
- e.Hash, err = h.copyTreeToStorageRecursive(path, h.trees[path])
- if err != nil {
- return plumbing.ZeroHash, err
- }
-
- t.Entries[i] = e
- }
-
- o := h.s.NewEncodedObject()
- if err := t.Encode(o); err != nil {
- return plumbing.ZeroHash, err
- }
-
- hash := o.Hash()
- if h.s.HasEncodedObject(hash) == nil {
- return hash, nil
- }
- return h.s.SetEncodedObject(o)
-}
diff --git a/vendor/github.com/go-git/go-git/v5/worktree_js.go b/vendor/github.com/go-git/go-git/v5/worktree_js.go
deleted file mode 100644
index 7267d055e7a..00000000000
--- a/vendor/github.com/go-git/go-git/v5/worktree_js.go
+++ /dev/null
@@ -1,26 +0,0 @@
-// +build js
-
-package git
-
-import (
- "syscall"
- "time"
-
- "github.com/go-git/go-git/v5/plumbing/format/index"
-)
-
-func init() {
- fillSystemInfo = func(e *index.Entry, sys interface{}) {
- if os, ok := sys.(*syscall.Stat_t); ok {
- e.CreatedAt = time.Unix(int64(os.Ctime), int64(os.CtimeNsec))
- e.Dev = uint32(os.Dev)
- e.Inode = uint32(os.Ino)
- e.GID = os.Gid
- e.UID = os.Uid
- }
- }
-}
-
-func isSymlinkWindowsNonAdmin(err error) bool {
- return false
-}
diff --git a/vendor/github.com/go-git/go-git/v5/worktree_linux.go b/vendor/github.com/go-git/go-git/v5/worktree_linux.go
deleted file mode 100644
index 6fcace2f93d..00000000000
--- a/vendor/github.com/go-git/go-git/v5/worktree_linux.go
+++ /dev/null
@@ -1,26 +0,0 @@
-// +build linux
-
-package git
-
-import (
- "syscall"
- "time"
-
- "github.com/go-git/go-git/v5/plumbing/format/index"
-)
-
-func init() {
- fillSystemInfo = func(e *index.Entry, sys interface{}) {
- if os, ok := sys.(*syscall.Stat_t); ok {
- e.CreatedAt = time.Unix(os.Ctim.Unix())
- e.Dev = uint32(os.Dev)
- e.Inode = uint32(os.Ino)
- e.GID = os.Gid
- e.UID = os.Uid
- }
- }
-}
-
-func isSymlinkWindowsNonAdmin(err error) bool {
- return false
-}
diff --git a/vendor/github.com/go-git/go-git/v5/worktree_plan9.go b/vendor/github.com/go-git/go-git/v5/worktree_plan9.go
deleted file mode 100644
index 8cedf71a32a..00000000000
--- a/vendor/github.com/go-git/go-git/v5/worktree_plan9.go
+++ /dev/null
@@ -1,31 +0,0 @@
-package git
-
-import (
- "syscall"
- "time"
-
- "github.com/go-git/go-git/v5/plumbing/format/index"
-)
-
-func init() {
- fillSystemInfo = func(e *index.Entry, sys interface{}) {
- if os, ok := sys.(*syscall.Dir); ok {
- // Plan 9 doesn't have a CreatedAt field.
- e.CreatedAt = time.Unix(int64(os.Mtime), 0)
-
- e.Dev = uint32(os.Dev)
-
- // Plan 9 has no Inode.
- // ext2srv(4) appears to store Inode in Qid.Path.
- e.Inode = uint32(os.Qid.Path)
-
- // Plan 9 has string UID/GID
- e.GID = 0
- e.UID = 0
- }
- }
-}
-
-func isSymlinkWindowsNonAdmin(err error) bool {
- return true
-}
diff --git a/vendor/github.com/go-git/go-git/v5/worktree_status.go b/vendor/github.com/go-git/go-git/v5/worktree_status.go
deleted file mode 100644
index dd9b2439cfd..00000000000
--- a/vendor/github.com/go-git/go-git/v5/worktree_status.go
+++ /dev/null
@@ -1,713 +0,0 @@
-package git
-
-import (
- "bytes"
- "errors"
- "io"
- "os"
- "path"
- "path/filepath"
- "strings"
-
- "github.com/go-git/go-billy/v5/util"
- "github.com/go-git/go-git/v5/plumbing"
- "github.com/go-git/go-git/v5/plumbing/filemode"
- "github.com/go-git/go-git/v5/plumbing/format/gitignore"
- "github.com/go-git/go-git/v5/plumbing/format/index"
- "github.com/go-git/go-git/v5/plumbing/object"
- "github.com/go-git/go-git/v5/utils/ioutil"
- "github.com/go-git/go-git/v5/utils/merkletrie"
- "github.com/go-git/go-git/v5/utils/merkletrie/filesystem"
- mindex "github.com/go-git/go-git/v5/utils/merkletrie/index"
- "github.com/go-git/go-git/v5/utils/merkletrie/noder"
-)
-
-var (
- // ErrDestinationExists in an Move operation means that the target exists on
- // the worktree.
- ErrDestinationExists = errors.New("destination exists")
- // ErrGlobNoMatches in an AddGlob if the glob pattern does not match any
- // files in the worktree.
- ErrGlobNoMatches = errors.New("glob pattern did not match any files")
-)
-
-// Status returns the working tree status.
-func (w *Worktree) Status() (Status, error) {
- var hash plumbing.Hash
-
- ref, err := w.r.Head()
- if err != nil && err != plumbing.ErrReferenceNotFound {
- return nil, err
- }
-
- if err == nil {
- hash = ref.Hash()
- }
-
- return w.status(hash)
-}
-
-func (w *Worktree) status(commit plumbing.Hash) (Status, error) {
- s := make(Status)
-
- left, err := w.diffCommitWithStaging(commit, false)
- if err != nil {
- return nil, err
- }
-
- for _, ch := range left {
- a, err := ch.Action()
- if err != nil {
- return nil, err
- }
-
- fs := s.File(nameFromAction(&ch))
- fs.Worktree = Unmodified
-
- switch a {
- case merkletrie.Delete:
- s.File(ch.From.String()).Staging = Deleted
- case merkletrie.Insert:
- s.File(ch.To.String()).Staging = Added
- case merkletrie.Modify:
- s.File(ch.To.String()).Staging = Modified
- }
- }
-
- right, err := w.diffStagingWithWorktree(false, true)
- if err != nil {
- return nil, err
- }
-
- for _, ch := range right {
- a, err := ch.Action()
- if err != nil {
- return nil, err
- }
-
- fs := s.File(nameFromAction(&ch))
- if fs.Staging == Untracked {
- fs.Staging = Unmodified
- }
-
- switch a {
- case merkletrie.Delete:
- fs.Worktree = Deleted
- case merkletrie.Insert:
- fs.Worktree = Untracked
- fs.Staging = Untracked
- case merkletrie.Modify:
- fs.Worktree = Modified
- }
- }
-
- return s, nil
-}
-
-func nameFromAction(ch *merkletrie.Change) string {
- name := ch.To.String()
- if name == "" {
- return ch.From.String()
- }
-
- return name
-}
-
-func (w *Worktree) diffStagingWithWorktree(reverse, excludeIgnoredChanges bool) (merkletrie.Changes, error) {
- idx, err := w.r.Storer.Index()
- if err != nil {
- return nil, err
- }
-
- from := mindex.NewRootNode(idx)
- submodules, err := w.getSubmodulesStatus()
- if err != nil {
- return nil, err
- }
-
- to := filesystem.NewRootNode(w.Filesystem, submodules)
-
- var c merkletrie.Changes
- if reverse {
- c, err = merkletrie.DiffTree(to, from, diffTreeIsEquals)
- } else {
- c, err = merkletrie.DiffTree(from, to, diffTreeIsEquals)
- }
-
- if err != nil {
- return nil, err
- }
-
- if excludeIgnoredChanges {
- return w.excludeIgnoredChanges(c), nil
- }
- return c, nil
-}
-
-func (w *Worktree) excludeIgnoredChanges(changes merkletrie.Changes) merkletrie.Changes {
- patterns, err := gitignore.ReadPatterns(w.Filesystem, nil)
- if err != nil {
- return changes
- }
-
- patterns = append(patterns, w.Excludes...)
-
- if len(patterns) == 0 {
- return changes
- }
-
- m := gitignore.NewMatcher(patterns)
-
- var res merkletrie.Changes
- for _, ch := range changes {
- var path []string
- for _, n := range ch.To {
- path = append(path, n.Name())
- }
- if len(path) == 0 {
- for _, n := range ch.From {
- path = append(path, n.Name())
- }
- }
- if len(path) != 0 {
- isDir := (len(ch.To) > 0 && ch.To.IsDir()) || (len(ch.From) > 0 && ch.From.IsDir())
- if m.Match(path, isDir) {
- if len(ch.From) == 0 {
- continue
- }
- }
- }
- res = append(res, ch)
- }
- return res
-}
-
-func (w *Worktree) getSubmodulesStatus() (map[string]plumbing.Hash, error) {
- o := map[string]plumbing.Hash{}
-
- sub, err := w.Submodules()
- if err != nil {
- return nil, err
- }
-
- status, err := sub.Status()
- if err != nil {
- return nil, err
- }
-
- for _, s := range status {
- if s.Current.IsZero() {
- o[s.Path] = s.Expected
- continue
- }
-
- o[s.Path] = s.Current
- }
-
- return o, nil
-}
-
-func (w *Worktree) diffCommitWithStaging(commit plumbing.Hash, reverse bool) (merkletrie.Changes, error) {
- var t *object.Tree
- if !commit.IsZero() {
- c, err := w.r.CommitObject(commit)
- if err != nil {
- return nil, err
- }
-
- t, err = c.Tree()
- if err != nil {
- return nil, err
- }
- }
-
- return w.diffTreeWithStaging(t, reverse)
-}
-
-func (w *Worktree) diffTreeWithStaging(t *object.Tree, reverse bool) (merkletrie.Changes, error) {
- var from noder.Noder
- if t != nil {
- from = object.NewTreeRootNode(t)
- }
-
- idx, err := w.r.Storer.Index()
- if err != nil {
- return nil, err
- }
-
- to := mindex.NewRootNode(idx)
-
- if reverse {
- return merkletrie.DiffTree(to, from, diffTreeIsEquals)
- }
-
- return merkletrie.DiffTree(from, to, diffTreeIsEquals)
-}
-
-var emptyNoderHash = make([]byte, 24)
-
-// diffTreeIsEquals is a implementation of noder.Equals, used to compare
-// noder.Noder, it compare the content and the length of the hashes.
-//
-// Since some of the noder.Noder implementations doesn't compute a hash for
-// some directories, if any of the hashes is a 24-byte slice of zero values
-// the comparison is not done and the hashes are take as different.
-func diffTreeIsEquals(a, b noder.Hasher) bool {
- hashA := a.Hash()
- hashB := b.Hash()
-
- if bytes.Equal(hashA, emptyNoderHash) || bytes.Equal(hashB, emptyNoderHash) {
- return false
- }
-
- return bytes.Equal(hashA, hashB)
-}
-
-// Add adds the file contents of a file in the worktree to the index. if the
-// file is already staged in the index no error is returned. If a file deleted
-// from the Workspace is given, the file is removed from the index. If a
-// directory given, adds the files and all his sub-directories recursively in
-// the worktree to the index. If any of the files is already staged in the index
-// no error is returned. When path is a file, the blob.Hash is returned.
-func (w *Worktree) Add(path string) (plumbing.Hash, error) {
- // TODO(mcuadros): deprecate in favor of AddWithOption in v6.
- return w.doAdd(path, make([]gitignore.Pattern, 0), false)
-}
-
-func (w *Worktree) doAddDirectory(idx *index.Index, s Status, directory string, ignorePattern []gitignore.Pattern) (added bool, err error) {
- if len(ignorePattern) > 0 {
- m := gitignore.NewMatcher(ignorePattern)
- matchPath := strings.Split(directory, string(os.PathSeparator))
- if m.Match(matchPath, true) {
- // ignore
- return false, nil
- }
- }
-
- directory = filepath.ToSlash(filepath.Clean(directory))
-
- for name := range s {
- if !isPathInDirectory(name, directory) {
- continue
- }
-
- var a bool
- a, _, err = w.doAddFile(idx, s, name, ignorePattern)
- if err != nil {
- return
- }
-
- added = added || a
- }
-
- return
-}
-
-func isPathInDirectory(path, directory string) bool {
- return directory == "." || strings.HasPrefix(path, directory+"/")
-}
-
-// AddWithOptions file contents to the index, updates the index using the
-// current content found in the working tree, to prepare the content staged for
-// the next commit.
-//
-// It typically adds the current content of existing paths as a whole, but with
-// some options it can also be used to add content with only part of the changes
-// made to the working tree files applied, or remove paths that do not exist in
-// the working tree anymore.
-func (w *Worktree) AddWithOptions(opts *AddOptions) error {
- if err := opts.Validate(w.r); err != nil {
- return err
- }
-
- if opts.All {
- _, err := w.doAdd(".", w.Excludes, false)
- return err
- }
-
- if opts.Glob != "" {
- return w.AddGlob(opts.Glob)
- }
-
- _, err := w.doAdd(opts.Path, make([]gitignore.Pattern, 0), opts.SkipStatus)
- return err
-}
-
-func (w *Worktree) doAdd(path string, ignorePattern []gitignore.Pattern, skipStatus bool) (plumbing.Hash, error) {
- idx, err := w.r.Storer.Index()
- if err != nil {
- return plumbing.ZeroHash, err
- }
-
- var h plumbing.Hash
- var added bool
-
- fi, err := w.Filesystem.Lstat(path)
-
- // status is required for doAddDirectory
- var s Status
- var err2 error
- if !skipStatus || fi == nil || fi.IsDir() {
- s, err2 = w.Status()
- if err2 != nil {
- return plumbing.ZeroHash, err2
- }
- }
-
- if err != nil || !fi.IsDir() {
- added, h, err = w.doAddFile(idx, s, path, ignorePattern)
- } else {
- added, err = w.doAddDirectory(idx, s, path, ignorePattern)
- }
-
- if err != nil {
- return h, err
- }
-
- if !added {
- return h, nil
- }
-
- return h, w.r.Storer.SetIndex(idx)
-}
-
-// AddGlob adds all paths, matching pattern, to the index. If pattern matches a
-// directory path, all directory contents are added to the index recursively. No
-// error is returned if all matching paths are already staged in index.
-func (w *Worktree) AddGlob(pattern string) error {
- // TODO(mcuadros): deprecate in favor of AddWithOption in v6.
- files, err := util.Glob(w.Filesystem, pattern)
- if err != nil {
- return err
- }
-
- if len(files) == 0 {
- return ErrGlobNoMatches
- }
-
- s, err := w.Status()
- if err != nil {
- return err
- }
-
- idx, err := w.r.Storer.Index()
- if err != nil {
- return err
- }
-
- var saveIndex bool
- for _, file := range files {
- fi, err := w.Filesystem.Lstat(file)
- if err != nil {
- return err
- }
-
- var added bool
- if fi.IsDir() {
- added, err = w.doAddDirectory(idx, s, file, make([]gitignore.Pattern, 0))
- } else {
- added, _, err = w.doAddFile(idx, s, file, make([]gitignore.Pattern, 0))
- }
-
- if err != nil {
- return err
- }
-
- if !saveIndex && added {
- saveIndex = true
- }
- }
-
- if saveIndex {
- return w.r.Storer.SetIndex(idx)
- }
-
- return nil
-}
-
-// doAddFile create a new blob from path and update the index, added is true if
-// the file added is different from the index.
-// if s status is nil will skip the status check and update the index anyway
-func (w *Worktree) doAddFile(idx *index.Index, s Status, path string, ignorePattern []gitignore.Pattern) (added bool, h plumbing.Hash, err error) {
- if s != nil && s.File(path).Worktree == Unmodified {
- return false, h, nil
- }
- if len(ignorePattern) > 0 {
- m := gitignore.NewMatcher(ignorePattern)
- matchPath := strings.Split(path, string(os.PathSeparator))
- if m.Match(matchPath, true) {
- // ignore
- return false, h, nil
- }
- }
-
- h, err = w.copyFileToStorage(path)
- if err != nil {
- if os.IsNotExist(err) {
- added = true
- h, err = w.deleteFromIndex(idx, path)
- }
-
- return
- }
-
- if err := w.addOrUpdateFileToIndex(idx, path, h); err != nil {
- return false, h, err
- }
-
- return true, h, err
-}
-
-func (w *Worktree) copyFileToStorage(path string) (hash plumbing.Hash, err error) {
- fi, err := w.Filesystem.Lstat(path)
- if err != nil {
- return plumbing.ZeroHash, err
- }
-
- obj := w.r.Storer.NewEncodedObject()
- obj.SetType(plumbing.BlobObject)
- obj.SetSize(fi.Size())
-
- writer, err := obj.Writer()
- if err != nil {
- return plumbing.ZeroHash, err
- }
-
- defer ioutil.CheckClose(writer, &err)
-
- if fi.Mode()&os.ModeSymlink != 0 {
- err = w.fillEncodedObjectFromSymlink(writer, path, fi)
- } else {
- err = w.fillEncodedObjectFromFile(writer, path, fi)
- }
-
- if err != nil {
- return plumbing.ZeroHash, err
- }
-
- return w.r.Storer.SetEncodedObject(obj)
-}
-
-func (w *Worktree) fillEncodedObjectFromFile(dst io.Writer, path string, fi os.FileInfo) (err error) {
- src, err := w.Filesystem.Open(path)
- if err != nil {
- return err
- }
-
- defer ioutil.CheckClose(src, &err)
-
- if _, err := io.Copy(dst, src); err != nil {
- return err
- }
-
- return err
-}
-
-func (w *Worktree) fillEncodedObjectFromSymlink(dst io.Writer, path string, fi os.FileInfo) error {
- target, err := w.Filesystem.Readlink(path)
- if err != nil {
- return err
- }
-
- _, err = dst.Write([]byte(target))
- return err
-}
-
-func (w *Worktree) addOrUpdateFileToIndex(idx *index.Index, filename string, h plumbing.Hash) error {
- e, err := idx.Entry(filename)
- if err != nil && err != index.ErrEntryNotFound {
- return err
- }
-
- if err == index.ErrEntryNotFound {
- return w.doAddFileToIndex(idx, filename, h)
- }
-
- return w.doUpdateFileToIndex(e, filename, h)
-}
-
-func (w *Worktree) doAddFileToIndex(idx *index.Index, filename string, h plumbing.Hash) error {
- return w.doUpdateFileToIndex(idx.Add(filename), filename, h)
-}
-
-func (w *Worktree) doUpdateFileToIndex(e *index.Entry, filename string, h plumbing.Hash) error {
- info, err := w.Filesystem.Lstat(filename)
- if err != nil {
- return err
- }
-
- e.Hash = h
- e.ModifiedAt = info.ModTime()
- e.Mode, err = filemode.NewFromOSFileMode(info.Mode())
- if err != nil {
- return err
- }
-
- if e.Mode.IsRegular() {
- e.Size = uint32(info.Size())
- }
-
- fillSystemInfo(e, info.Sys())
- return nil
-}
-
-// Remove removes files from the working tree and from the index.
-func (w *Worktree) Remove(path string) (plumbing.Hash, error) {
- // TODO(mcuadros): remove plumbing.Hash from signature at v5.
- idx, err := w.r.Storer.Index()
- if err != nil {
- return plumbing.ZeroHash, err
- }
-
- var h plumbing.Hash
-
- fi, err := w.Filesystem.Lstat(path)
- if err != nil || !fi.IsDir() {
- h, err = w.doRemoveFile(idx, path)
- } else {
- _, err = w.doRemoveDirectory(idx, path)
- }
- if err != nil {
- return h, err
- }
-
- return h, w.r.Storer.SetIndex(idx)
-}
-
-func (w *Worktree) doRemoveDirectory(idx *index.Index, directory string) (removed bool, err error) {
- files, err := w.Filesystem.ReadDir(directory)
- if err != nil {
- return false, err
- }
-
- for _, file := range files {
- name := path.Join(directory, file.Name())
-
- var r bool
- if file.IsDir() {
- r, err = w.doRemoveDirectory(idx, name)
- } else {
- _, err = w.doRemoveFile(idx, name)
- if err == index.ErrEntryNotFound {
- err = nil
- }
- }
-
- if err != nil {
- return
- }
-
- if !removed && r {
- removed = true
- }
- }
-
- err = w.removeEmptyDirectory(directory)
- return
-}
-
-func (w *Worktree) removeEmptyDirectory(path string) error {
- files, err := w.Filesystem.ReadDir(path)
- if err != nil {
- return err
- }
-
- if len(files) != 0 {
- return nil
- }
-
- return w.Filesystem.Remove(path)
-}
-
-func (w *Worktree) doRemoveFile(idx *index.Index, path string) (plumbing.Hash, error) {
- hash, err := w.deleteFromIndex(idx, path)
- if err != nil {
- return plumbing.ZeroHash, err
- }
-
- return hash, w.deleteFromFilesystem(path)
-}
-
-func (w *Worktree) deleteFromIndex(idx *index.Index, path string) (plumbing.Hash, error) {
- e, err := idx.Remove(path)
- if err != nil {
- return plumbing.ZeroHash, err
- }
-
- return e.Hash, nil
-}
-
-func (w *Worktree) deleteFromFilesystem(path string) error {
- err := w.Filesystem.Remove(path)
- if os.IsNotExist(err) {
- return nil
- }
-
- return err
-}
-
-// RemoveGlob removes all paths, matching pattern, from the index. If pattern
-// matches a directory path, all directory contents are removed from the index
-// recursively.
-func (w *Worktree) RemoveGlob(pattern string) error {
- idx, err := w.r.Storer.Index()
- if err != nil {
- return err
- }
-
- entries, err := idx.Glob(pattern)
- if err != nil {
- return err
- }
-
- for _, e := range entries {
- file := filepath.FromSlash(e.Name)
- if _, err := w.Filesystem.Lstat(file); err != nil && !os.IsNotExist(err) {
- return err
- }
-
- if _, err := w.doRemoveFile(idx, file); err != nil {
- return err
- }
-
- dir, _ := filepath.Split(file)
- if err := w.removeEmptyDirectory(dir); err != nil {
- return err
- }
- }
-
- return w.r.Storer.SetIndex(idx)
-}
-
-// Move moves or rename a file in the worktree and the index, directories are
-// not supported.
-func (w *Worktree) Move(from, to string) (plumbing.Hash, error) {
- // TODO(mcuadros): support directories and/or implement support for glob
- if _, err := w.Filesystem.Lstat(from); err != nil {
- return plumbing.ZeroHash, err
- }
-
- if _, err := w.Filesystem.Lstat(to); err == nil {
- return plumbing.ZeroHash, ErrDestinationExists
- }
-
- idx, err := w.r.Storer.Index()
- if err != nil {
- return plumbing.ZeroHash, err
- }
-
- hash, err := w.deleteFromIndex(idx, from)
- if err != nil {
- return plumbing.ZeroHash, err
- }
-
- if err := w.Filesystem.Rename(from, to); err != nil {
- return hash, err
- }
-
- if err := w.addOrUpdateFileToIndex(idx, to, hash); err != nil {
- return hash, err
- }
-
- return hash, w.r.Storer.SetIndex(idx)
-}
diff --git a/vendor/github.com/go-git/go-git/v5/worktree_unix_other.go b/vendor/github.com/go-git/go-git/v5/worktree_unix_other.go
deleted file mode 100644
index 5b16e70b70f..00000000000
--- a/vendor/github.com/go-git/go-git/v5/worktree_unix_other.go
+++ /dev/null
@@ -1,26 +0,0 @@
-// +build openbsd dragonfly solaris
-
-package git
-
-import (
- "syscall"
- "time"
-
- "github.com/go-git/go-git/v5/plumbing/format/index"
-)
-
-func init() {
- fillSystemInfo = func(e *index.Entry, sys interface{}) {
- if os, ok := sys.(*syscall.Stat_t); ok {
- e.CreatedAt = time.Unix(os.Atim.Unix())
- e.Dev = uint32(os.Dev)
- e.Inode = uint32(os.Ino)
- e.GID = os.Gid
- e.UID = os.Uid
- }
- }
-}
-
-func isSymlinkWindowsNonAdmin(err error) bool {
- return false
-}
diff --git a/vendor/github.com/go-git/go-git/v5/worktree_windows.go b/vendor/github.com/go-git/go-git/v5/worktree_windows.go
deleted file mode 100644
index 1928f9712e9..00000000000
--- a/vendor/github.com/go-git/go-git/v5/worktree_windows.go
+++ /dev/null
@@ -1,35 +0,0 @@
-// +build windows
-
-package git
-
-import (
- "os"
- "syscall"
- "time"
-
- "github.com/go-git/go-git/v5/plumbing/format/index"
-)
-
-func init() {
- fillSystemInfo = func(e *index.Entry, sys interface{}) {
- if os, ok := sys.(*syscall.Win32FileAttributeData); ok {
- seconds := os.CreationTime.Nanoseconds() / 1000000000
- nanoseconds := os.CreationTime.Nanoseconds() - seconds*1000000000
- e.CreatedAt = time.Unix(seconds, nanoseconds)
- }
- }
-}
-
-func isSymlinkWindowsNonAdmin(err error) bool {
- const ERROR_PRIVILEGE_NOT_HELD syscall.Errno = 1314
-
- if err != nil {
- if errLink, ok := err.(*os.LinkError); ok {
- if errNo, ok := errLink.Err.(syscall.Errno); ok {
- return errNo == ERROR_PRIVILEGE_NOT_HELD
- }
- }
- }
-
- return false
-}
diff --git a/vendor/github.com/gofrs/flock/.golangci.yml b/vendor/github.com/gofrs/flock/.golangci.yml
new file mode 100644
index 00000000000..3ad88a38fcd
--- /dev/null
+++ b/vendor/github.com/gofrs/flock/.golangci.yml
@@ -0,0 +1,114 @@
+run:
+ timeout: 10m
+
+linters:
+ enable:
+ - asasalint
+ - bidichk
+ - dogsled
+ - dupword
+ - durationcheck
+ - err113
+ - errname
+ - errorlint
+ - fatcontext
+ - forbidigo
+ - gocheckcompilerdirectives
+ - gochecknoinits
+ - gocritic
+ - godot
+ - godox
+ - gofumpt
+ - goheader
+ - goimports
+ - gomoddirectives
+ - goprintffuncname
+ - gosec
+ - inamedparam
+ - interfacebloat
+ - ireturn
+ - mirror
+ - misspell
+ - nolintlint
+ - revive
+ - stylecheck
+ - tenv
+ - testifylint
+ - thelper
+ - unconvert
+ - unparam
+ - usestdlibvars
+ - whitespace
+
+linters-settings:
+ misspell:
+ locale: US
+ godox:
+ keywords:
+ - FIXME
+ goheader:
+ template: |-
+ Copyright 2015 Tim Heckman. All rights reserved.
+ Copyright 2018-{{ YEAR }} The Gofrs. All rights reserved.
+ Use of this source code is governed by the BSD 3-Clause
+ license that can be found in the LICENSE file.
+ gofumpt:
+ extra-rules: true
+ gocritic:
+ enabled-tags:
+ - diagnostic
+ - style
+ - performance
+ disabled-checks:
+ - paramTypeCombine # already handle by gofumpt.extra-rules
+ - whyNoLint # already handle by nonolint
+ - unnamedResult
+ - hugeParam
+ - sloppyReassign
+ - rangeValCopy
+ - octalLiteral
+ - ptrToRefParam
+ - appendAssign
+ - ruleguard
+ - httpNoBody
+ - exposedSyncMutex
+
+ revive:
+ rules:
+ - name: struct-tag
+ - name: blank-imports
+ - name: context-as-argument
+ - name: context-keys-type
+ - name: dot-imports
+ - name: error-return
+ - name: error-strings
+ - name: error-naming
+ - name: exported
+ - name: if-return
+ - name: increment-decrement
+ - name: var-naming
+ - name: var-declaration
+ - name: package-comments
+ - name: range
+ - name: receiver-naming
+ - name: time-naming
+ - name: unexported-return
+ - name: indent-error-flow
+ - name: errorf
+ - name: empty-block
+ - name: superfluous-else
+ - name: unused-parameter
+ - name: unreachable-code
+ - name: redefines-builtin-id
+
+issues:
+ exclude-use-default: true
+ max-issues-per-linter: 0
+ max-same-issues: 0
+
+output:
+ show-stats: true
+ sort-results: true
+ sort-order:
+ - linter
+ - file
diff --git a/vendor/github.com/gofrs/flock/.travis.yml b/vendor/github.com/gofrs/flock/.travis.yml
deleted file mode 100644
index b16d040fa89..00000000000
--- a/vendor/github.com/gofrs/flock/.travis.yml
+++ /dev/null
@@ -1,10 +0,0 @@
-language: go
-go:
- - 1.14.x
- - 1.15.x
-script: go test -v -check.vv -race ./...
-sudo: false
-notifications:
- email:
- on_success: never
- on_failure: always
diff --git a/vendor/github.com/gofrs/flock/LICENSE b/vendor/github.com/gofrs/flock/LICENSE
index 8b8ff36fe42..7de525bf027 100644
--- a/vendor/github.com/gofrs/flock/LICENSE
+++ b/vendor/github.com/gofrs/flock/LICENSE
@@ -1,3 +1,4 @@
+Copyright (c) 2018-2024, The Gofrs
Copyright (c) 2015-2020, Tim Heckman
All rights reserved.
diff --git a/vendor/github.com/gofrs/flock/Makefile b/vendor/github.com/gofrs/flock/Makefile
new file mode 100644
index 00000000000..65c139d68c0
--- /dev/null
+++ b/vendor/github.com/gofrs/flock/Makefile
@@ -0,0 +1,15 @@
+.PHONY: lint test test_race build_cross_os
+
+default: lint test build_cross_os
+
+test:
+ go test -v -cover ./...
+
+test_race:
+ CGO_ENABLED=1 go test -v -race ./...
+
+lint:
+ golangci-lint run
+
+build_cross_os:
+ ./build.sh
diff --git a/vendor/github.com/gofrs/flock/README.md b/vendor/github.com/gofrs/flock/README.md
index 71ce63692e8..f7ca0dd9c24 100644
--- a/vendor/github.com/gofrs/flock/README.md
+++ b/vendor/github.com/gofrs/flock/README.md
@@ -1,26 +1,22 @@
# flock
-[](https://travis-ci.org/gofrs/flock)
-[](https://godoc.org/github.com/gofrs/flock)
-[](https://github.com/gofrs/flock/blob/master/LICENSE)
-[](https://goreportcard.com/report/github.com/gofrs/flock)
-`flock` implements a thread-safe sync.Locker interface for file locking. It also
-includes a non-blocking TryLock() function to allow locking without blocking execution.
+[](https://pkg.go.dev/github.com/gofrs/flock)
+[](https://github.com/gofrs/flock/blob/main/LICENSE)
+[](https://goreportcard.com/report/github.com/gofrs/flock)
-## License
-`flock` is released under the BSD 3-Clause License. See the `LICENSE` file for more details.
+`flock` implements a thread-safe file lock.
-## Go Compatibility
-This package makes use of the `context` package that was introduced in Go 1.7. As such, this
-package has an implicit dependency on Go 1.7+.
+It also includes a non-blocking `TryLock()` function to allow locking without blocking execution.
## Installation
-```
+
+```bash
go get -u github.com/gofrs/flock
```
## Usage
-```Go
+
+```go
import "github.com/gofrs/flock"
fileLock := flock.New("/var/lock/go-lock.lock")
@@ -38,4 +34,12 @@ if locked {
```
For more detailed usage information take a look at the package API docs on
-[GoDoc](https://godoc.org/github.com/gofrs/flock).
+[GoDoc](https://pkg.go.dev/github.com/gofrs/flock).
+
+## License
+
+`flock` is released under the BSD 3-Clause License. See the [`LICENSE`](./LICENSE) file for more details.
+
+## Project History
+
+This project was originally `github.com/theckman/go-flock`, it was transferred to Gofrs by the original author [Tim Heckman ](https://github.com/theckman).
diff --git a/vendor/github.com/gofrs/flock/SECURITY.md b/vendor/github.com/gofrs/flock/SECURITY.md
new file mode 100644
index 00000000000..01419bd592a
--- /dev/null
+++ b/vendor/github.com/gofrs/flock/SECURITY.md
@@ -0,0 +1,21 @@
+# Security Policy
+
+## Supported Versions
+
+We support the latest version of this library.
+We do not guarantee support of previous versions.
+
+If a defect is reported, it will generally be fixed on the latest version (provided it exists) irrespective of whether it was introduced in a prior version.
+
+## Reporting a Vulnerability
+
+To report a potential security vulnerability, please create a [security advisory](https://github.com/gofrs/flock/security/advisories/new).
+
+For us to respond to your report most effectively, please include any of the following:
+
+- Steps to reproduce or a proof-of-concept
+- Any relevant information, including the versions used
+
+## Security Scorecard
+
+This project submits security [results](https://scorecard.dev/viewer/?uri=github.com/gofrs/flock) to the [OpenSSF Scorecard](https://securityscorecards.dev/).
diff --git a/vendor/github.com/gofrs/flock/appveyor.yml b/vendor/github.com/gofrs/flock/appveyor.yml
deleted file mode 100644
index 909b4bf7cb4..00000000000
--- a/vendor/github.com/gofrs/flock/appveyor.yml
+++ /dev/null
@@ -1,25 +0,0 @@
-version: '{build}'
-
-build: false
-deploy: false
-
-clone_folder: 'c:\gopath\src\github.com\gofrs\flock'
-
-environment:
- GOPATH: 'c:\gopath'
- GOVERSION: '1.15'
-
-init:
- - git config --global core.autocrlf input
-
-install:
- - rmdir c:\go /s /q
- - appveyor DownloadFile https://storage.googleapis.com/golang/go%GOVERSION%.windows-amd64.msi
- - msiexec /i go%GOVERSION%.windows-amd64.msi /q
- - set Path=c:\go\bin;c:\gopath\bin;%Path%
- - go version
- - go env
-
-test_script:
- - go get -t ./...
- - go test -race -v ./...
diff --git a/vendor/github.com/gofrs/flock/build.sh b/vendor/github.com/gofrs/flock/build.sh
new file mode 100644
index 00000000000..60f7809f065
--- /dev/null
+++ b/vendor/github.com/gofrs/flock/build.sh
@@ -0,0 +1,18 @@
+#!/bin/bash -e
+
+# Not supported by flock:
+# - plan9/*
+# - js/wasm
+# - wasp1/wasm
+
+for row in $(go tool dist list -json | jq -r '.[] | @base64'); do
+ _jq() {
+ echo ${row} | base64 --decode | jq -r ${1}
+ }
+
+ GOOS=$(_jq '.GOOS')
+ GOARCH=$(_jq '.GOARCH')
+
+ echo "$GOOS/$GOARCH"
+ GOOS=$GOOS GOARCH=$GOARCH go build
+done
diff --git a/vendor/github.com/gofrs/flock/flock.go b/vendor/github.com/gofrs/flock/flock.go
index 95c784ca504..ff942b228a6 100644
--- a/vendor/github.com/gofrs/flock/flock.go
+++ b/vendor/github.com/gofrs/flock/flock.go
@@ -1,4 +1,5 @@
// Copyright 2015 Tim Heckman. All rights reserved.
+// Copyright 2018-2024 The Gofrs. All rights reserved.
// Use of this source code is governed by the BSD 3-Clause
// license that can be found in the LICENSE file.
@@ -18,12 +19,29 @@ package flock
import (
"context"
+ "io/fs"
"os"
"runtime"
"sync"
"time"
)
+type Option func(f *Flock)
+
+// SetFlag sets the flag used to create/open the file.
+func SetFlag(flag int) Option {
+ return func(f *Flock) {
+ f.flag = flag
+ }
+}
+
+// SetPermissions sets the OS permissions to set on the file.
+func SetPermissions(perm fs.FileMode) Option {
+ return func(f *Flock) {
+ f.perm = perm
+ }
+}
+
// Flock is the struct type to handle file locking. All fields are unexported,
// with access to some of the fields provided by getter methods (Path() and Locked()).
type Flock struct {
@@ -32,12 +50,37 @@ type Flock struct {
fh *os.File
l bool
r bool
+
+ // flag is the flag used to create/open the file.
+ flag int
+ // perm is the OS permissions to set on the file.
+ perm fs.FileMode
}
// New returns a new instance of *Flock. The only parameter
// it takes is the path to the desired lockfile.
-func New(path string) *Flock {
- return &Flock{path: path}
+func New(path string, opts ...Option) *Flock {
+ // create it if it doesn't exist, and open the file read-only.
+ flags := os.O_CREATE
+ switch runtime.GOOS {
+ case "aix", "solaris", "illumos":
+ // AIX cannot preform write-lock (i.e. exclusive) on a read-only file.
+ flags |= os.O_RDWR
+ default:
+ flags |= os.O_RDONLY
+ }
+
+ f := &Flock{
+ path: path,
+ flag: flags,
+ perm: fs.FileMode(0o600),
+ }
+
+ for _, opt := range opts {
+ opt(f)
+ }
+
+ return f
}
// NewFlock returns a new instance of *Flock. The only parameter
@@ -67,6 +110,7 @@ func (f *Flock) Path() string {
func (f *Flock) Locked() bool {
f.m.RLock()
defer f.m.RUnlock()
+
return f.l
}
@@ -76,6 +120,7 @@ func (f *Flock) Locked() bool {
func (f *Flock) RLocked() bool {
f.m.RLock()
defer f.m.RUnlock()
+
return f.r
}
@@ -83,16 +128,18 @@ func (f *Flock) String() string {
return f.path
}
-// TryLockContext repeatedly tries to take an exclusive lock until one of the
-// conditions is met: TryLock succeeds, TryLock fails with error, or Context
-// Done channel is closed.
+// TryLockContext repeatedly tries to take an exclusive lock until one of the conditions is met:
+// - TryLock succeeds
+// - TryLock fails with error
+// - Context Done channel is closed.
func (f *Flock) TryLockContext(ctx context.Context, retryDelay time.Duration) (bool, error) {
return tryCtx(ctx, f.TryLock, retryDelay)
}
-// TryRLockContext repeatedly tries to take a shared lock until one of the
-// conditions is met: TryRLock succeeds, TryRLock fails with error, or Context
-// Done channel is closed.
+// TryRLockContext repeatedly tries to take a shared lock until one of the conditions is met:
+// - TryRLock succeeds
+// - TryRLock fails with error
+// - Context Done channel is closed.
func (f *Flock) TryRLockContext(ctx context.Context, retryDelay time.Duration) (bool, error) {
return tryCtx(ctx, f.TryRLock, retryDelay)
}
@@ -101,10 +148,12 @@ func tryCtx(ctx context.Context, fn func() (bool, error), retryDelay time.Durati
if ctx.Err() != nil {
return false, ctx.Err()
}
+
for {
if ok, err := fn(); ok || err != nil {
return ok, err
}
+
select {
case <-ctx.Done():
return false, ctx.Err()
@@ -114,31 +163,44 @@ func tryCtx(ctx context.Context, fn func() (bool, error), retryDelay time.Durati
}
}
-func (f *Flock) setFh() error {
+func (f *Flock) setFh(flag int) error {
// open a new os.File instance
- // create it if it doesn't exist, and open the file read-only.
- flags := os.O_CREATE
- if runtime.GOOS == "aix" {
- // AIX cannot preform write-lock (ie exclusive) on a
- // read-only file.
- flags |= os.O_RDWR
- } else {
- flags |= os.O_RDONLY
- }
- fh, err := os.OpenFile(f.path, flags, os.FileMode(0600))
+ fh, err := os.OpenFile(f.path, flag, f.perm)
if err != nil {
return err
}
- // set the filehandle on the struct
+ // set the file handle on the struct
f.fh = fh
+
return nil
}
-// ensure the file handle is closed if no lock is held
+// resetFh resets file handle:
+// - tries to close the file (ignore errors)
+// - sets fh to nil.
+func (f *Flock) resetFh() {
+ if f.fh == nil {
+ return
+ }
+
+ _ = f.fh.Close()
+
+ f.fh = nil
+}
+
+// ensure the file handle is closed if no lock is held.
func (f *Flock) ensureFhState() {
- if !f.l && !f.r && f.fh != nil {
- f.fh.Close()
- f.fh = nil
+ if f.l || f.r || f.fh == nil {
+ return
}
+
+ f.resetFh()
+}
+
+func (f *Flock) reset() {
+ f.l = false
+ f.r = false
+
+ f.resetFh()
}
diff --git a/vendor/github.com/gofrs/flock/flock_aix.go b/vendor/github.com/gofrs/flock/flock_aix.go
deleted file mode 100644
index 7277c1b6b26..00000000000
--- a/vendor/github.com/gofrs/flock/flock_aix.go
+++ /dev/null
@@ -1,281 +0,0 @@
-// Copyright 2019 Tim Heckman. All rights reserved. Use of this source code is
-// governed by the BSD 3-Clause license that can be found in the LICENSE file.
-
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// This code implements the filelock API using POSIX 'fcntl' locks, which attach
-// to an (inode, process) pair rather than a file descriptor. To avoid unlocking
-// files prematurely when the same file is opened through different descriptors,
-// we allow only one read-lock at a time.
-//
-// This code is adapted from the Go package:
-// cmd/go/internal/lockedfile/internal/filelock
-
-//+build aix
-
-package flock
-
-import (
- "errors"
- "io"
- "os"
- "sync"
- "syscall"
-
- "golang.org/x/sys/unix"
-)
-
-type lockType int16
-
-const (
- readLock lockType = unix.F_RDLCK
- writeLock lockType = unix.F_WRLCK
-)
-
-type cmdType int
-
-const (
- tryLock cmdType = unix.F_SETLK
- waitLock cmdType = unix.F_SETLKW
-)
-
-type inode = uint64
-
-type inodeLock struct {
- owner *Flock
- queue []<-chan *Flock
-}
-
-var (
- mu sync.Mutex
- inodes = map[*Flock]inode{}
- locks = map[inode]inodeLock{}
-)
-
-// Lock is a blocking call to try and take an exclusive file lock. It will wait
-// until it is able to obtain the exclusive file lock. It's recommended that
-// TryLock() be used over this function. This function may block the ability to
-// query the current Locked() or RLocked() status due to a RW-mutex lock.
-//
-// If we are already exclusive-locked, this function short-circuits and returns
-// immediately assuming it can take the mutex lock.
-//
-// If the *Flock has a shared lock (RLock), this may transparently replace the
-// shared lock with an exclusive lock on some UNIX-like operating systems. Be
-// careful when using exclusive locks in conjunction with shared locks
-// (RLock()), because calling Unlock() may accidentally release the exclusive
-// lock that was once a shared lock.
-func (f *Flock) Lock() error {
- return f.lock(&f.l, writeLock)
-}
-
-// RLock is a blocking call to try and take a shared file lock. It will wait
-// until it is able to obtain the shared file lock. It's recommended that
-// TryRLock() be used over this function. This function may block the ability to
-// query the current Locked() or RLocked() status due to a RW-mutex lock.
-//
-// If we are already shared-locked, this function short-circuits and returns
-// immediately assuming it can take the mutex lock.
-func (f *Flock) RLock() error {
- return f.lock(&f.r, readLock)
-}
-
-func (f *Flock) lock(locked *bool, flag lockType) error {
- f.m.Lock()
- defer f.m.Unlock()
-
- if *locked {
- return nil
- }
-
- if f.fh == nil {
- if err := f.setFh(); err != nil {
- return err
- }
- defer f.ensureFhState()
- }
-
- if _, err := f.doLock(waitLock, flag, true); err != nil {
- return err
- }
-
- *locked = true
- return nil
-}
-
-func (f *Flock) doLock(cmd cmdType, lt lockType, blocking bool) (bool, error) {
- // POSIX locks apply per inode and process, and the lock for an inode is
- // released when *any* descriptor for that inode is closed. So we need to
- // synchronize access to each inode internally, and must serialize lock and
- // unlock calls that refer to the same inode through different descriptors.
- fi, err := f.fh.Stat()
- if err != nil {
- return false, err
- }
- ino := inode(fi.Sys().(*syscall.Stat_t).Ino)
-
- mu.Lock()
- if i, dup := inodes[f]; dup && i != ino {
- mu.Unlock()
- return false, &os.PathError{
- Path: f.Path(),
- Err: errors.New("inode for file changed since last Lock or RLock"),
- }
- }
-
- inodes[f] = ino
-
- var wait chan *Flock
- l := locks[ino]
- if l.owner == f {
- // This file already owns the lock, but the call may change its lock type.
- } else if l.owner == nil {
- // No owner: it's ours now.
- l.owner = f
- } else if !blocking {
- // Already owned: cannot take the lock.
- mu.Unlock()
- return false, nil
- } else {
- // Already owned: add a channel to wait on.
- wait = make(chan *Flock)
- l.queue = append(l.queue, wait)
- }
- locks[ino] = l
- mu.Unlock()
-
- if wait != nil {
- wait <- f
- }
-
- err = setlkw(f.fh.Fd(), cmd, lt)
-
- if err != nil {
- f.doUnlock()
- if cmd == tryLock && err == unix.EACCES {
- return false, nil
- }
- return false, err
- }
-
- return true, nil
-}
-
-func (f *Flock) Unlock() error {
- f.m.Lock()
- defer f.m.Unlock()
-
- // if we aren't locked or if the lockfile instance is nil
- // just return a nil error because we are unlocked
- if (!f.l && !f.r) || f.fh == nil {
- return nil
- }
-
- if err := f.doUnlock(); err != nil {
- return err
- }
-
- f.fh.Close()
-
- f.l = false
- f.r = false
- f.fh = nil
-
- return nil
-}
-
-func (f *Flock) doUnlock() (err error) {
- var owner *Flock
- mu.Lock()
- ino, ok := inodes[f]
- if ok {
- owner = locks[ino].owner
- }
- mu.Unlock()
-
- if owner == f {
- err = setlkw(f.fh.Fd(), waitLock, unix.F_UNLCK)
- }
-
- mu.Lock()
- l := locks[ino]
- if len(l.queue) == 0 {
- // No waiters: remove the map entry.
- delete(locks, ino)
- } else {
- // The first waiter is sending us their file now.
- // Receive it and update the queue.
- l.owner = <-l.queue[0]
- l.queue = l.queue[1:]
- locks[ino] = l
- }
- delete(inodes, f)
- mu.Unlock()
-
- return err
-}
-
-// TryLock is the preferred function for taking an exclusive file lock. This
-// function takes an RW-mutex lock before it tries to lock the file, so there is
-// the possibility that this function may block for a short time if another
-// goroutine is trying to take any action.
-//
-// The actual file lock is non-blocking. If we are unable to get the exclusive
-// file lock, the function will return false instead of waiting for the lock. If
-// we get the lock, we also set the *Flock instance as being exclusive-locked.
-func (f *Flock) TryLock() (bool, error) {
- return f.try(&f.l, writeLock)
-}
-
-// TryRLock is the preferred function for taking a shared file lock. This
-// function takes an RW-mutex lock before it tries to lock the file, so there is
-// the possibility that this function may block for a short time if another
-// goroutine is trying to take any action.
-//
-// The actual file lock is non-blocking. If we are unable to get the shared file
-// lock, the function will return false instead of waiting for the lock. If we
-// get the lock, we also set the *Flock instance as being share-locked.
-func (f *Flock) TryRLock() (bool, error) {
- return f.try(&f.r, readLock)
-}
-
-func (f *Flock) try(locked *bool, flag lockType) (bool, error) {
- f.m.Lock()
- defer f.m.Unlock()
-
- if *locked {
- return true, nil
- }
-
- if f.fh == nil {
- if err := f.setFh(); err != nil {
- return false, err
- }
- defer f.ensureFhState()
- }
-
- haslock, err := f.doLock(tryLock, flag, false)
- if err != nil {
- return false, err
- }
-
- *locked = haslock
- return haslock, nil
-}
-
-// setlkw calls FcntlFlock with cmd for the entire file indicated by fd.
-func setlkw(fd uintptr, cmd cmdType, lt lockType) error {
- for {
- err := unix.FcntlFlock(fd, int(cmd), &unix.Flock_t{
- Type: int16(lt),
- Whence: io.SeekStart,
- Start: 0,
- Len: 0, // All bytes.
- })
- if err != unix.EINTR {
- return err
- }
- }
-}
diff --git a/vendor/github.com/gofrs/flock/flock_others.go b/vendor/github.com/gofrs/flock/flock_others.go
new file mode 100644
index 00000000000..18b14f1bd7a
--- /dev/null
+++ b/vendor/github.com/gofrs/flock/flock_others.go
@@ -0,0 +1,40 @@
+//go:build (!unix && !windows) || plan9
+
+package flock
+
+import (
+ "errors"
+ "io/fs"
+)
+
+func (f *Flock) Lock() error {
+ return &fs.PathError{
+ Op: "Lock",
+ Path: f.Path(),
+ Err: errors.ErrUnsupported,
+ }
+}
+
+func (f *Flock) RLock() error {
+ return &fs.PathError{
+ Op: "RLock",
+ Path: f.Path(),
+ Err: errors.ErrUnsupported,
+ }
+}
+
+func (f *Flock) Unlock() error {
+ return &fs.PathError{
+ Op: "Unlock",
+ Path: f.Path(),
+ Err: errors.ErrUnsupported,
+ }
+}
+
+func (f *Flock) TryLock() (bool, error) {
+ return false, f.Lock()
+}
+
+func (f *Flock) TryRLock() (bool, error) {
+ return false, f.RLock()
+}
diff --git a/vendor/github.com/gofrs/flock/flock_unix.go b/vendor/github.com/gofrs/flock/flock_unix.go
index c315a3e2908..cf8919c7add 100644
--- a/vendor/github.com/gofrs/flock/flock_unix.go
+++ b/vendor/github.com/gofrs/flock/flock_unix.go
@@ -1,42 +1,44 @@
// Copyright 2015 Tim Heckman. All rights reserved.
+// Copyright 2018-2024 The Gofrs. All rights reserved.
// Use of this source code is governed by the BSD 3-Clause
// license that can be found in the LICENSE file.
-// +build !aix,!windows
+//go:build darwin || dragonfly || freebsd || illumos || linux || netbsd || openbsd
package flock
import (
+ "errors"
"os"
- "syscall"
+
+ "golang.org/x/sys/unix"
)
-// Lock is a blocking call to try and take an exclusive file lock. It will wait
-// until it is able to obtain the exclusive file lock. It's recommended that
-// TryLock() be used over this function. This function may block the ability to
-// query the current Locked() or RLocked() status due to a RW-mutex lock.
+// Lock is a blocking call to try and take an exclusive file lock.
+// It will wait until it is able to obtain the exclusive file lock.
+// It's recommended that TryLock() be used over this function.
+// This function may block the ability to query the current Locked() or RLocked() status due to a RW-mutex lock.
//
-// If we are already exclusive-locked, this function short-circuits and returns
-// immediately assuming it can take the mutex lock.
+// If we are already exclusive-locked,
+// this function short-circuits and returns immediately assuming it can take the mutex lock.
//
-// If the *Flock has a shared lock (RLock), this may transparently replace the
-// shared lock with an exclusive lock on some UNIX-like operating systems. Be
-// careful when using exclusive locks in conjunction with shared locks
-// (RLock()), because calling Unlock() may accidentally release the exclusive
-// lock that was once a shared lock.
+// If the *Flock has a shared lock (RLock),
+// this may transparently replace the shared lock with an exclusive lock on some UNIX-like operating systems.
+// Be careful when using exclusive locks in conjunction with shared locks (RLock()),
+// because calling Unlock() may accidentally release the exclusive lock that was once a shared lock.
func (f *Flock) Lock() error {
- return f.lock(&f.l, syscall.LOCK_EX)
+ return f.lock(&f.l, unix.LOCK_EX)
}
-// RLock is a blocking call to try and take a shared file lock. It will wait
-// until it is able to obtain the shared file lock. It's recommended that
-// TryRLock() be used over this function. This function may block the ability to
-// query the current Locked() or RLocked() status due to a RW-mutex lock.
+// RLock is a blocking call to try and take a shared file lock.
+// It will wait until it is able to obtain the shared file lock.
+// It's recommended that TryRLock() be used over this function.
+// This function may block the ability to query the current Locked() or RLocked() status due to a RW-mutex lock.
//
-// If we are already shared-locked, this function short-circuits and returns
-// immediately assuming it can take the mutex lock.
+// If we are already shared-locked,
+// this function short-circuits and returns immediately assuming it can take the mutex lock.
func (f *Flock) RLock() error {
- return f.lock(&f.r, syscall.LOCK_SH)
+ return f.lock(&f.r, unix.LOCK_SH)
}
func (f *Flock) lock(locked *bool, flag int) error {
@@ -48,13 +50,15 @@ func (f *Flock) lock(locked *bool, flag int) error {
}
if f.fh == nil {
- if err := f.setFh(); err != nil {
+ if err := f.setFh(f.flag); err != nil {
return err
}
+
defer f.ensureFhState()
}
- if err := syscall.Flock(int(f.fh.Fd()), flag); err != nil {
+ err := unix.Flock(int(f.fh.Fd()), flag)
+ if err != nil {
shouldRetry, reopenErr := f.reopenFDOnError(err)
if reopenErr != nil {
return reopenErr
@@ -64,71 +68,74 @@ func (f *Flock) lock(locked *bool, flag int) error {
return err
}
- if err = syscall.Flock(int(f.fh.Fd()), flag); err != nil {
+ err = unix.Flock(int(f.fh.Fd()), flag)
+ if err != nil {
return err
}
}
*locked = true
+
return nil
}
-// Unlock is a function to unlock the file. This file takes a RW-mutex lock, so
-// while it is running the Locked() and RLocked() functions will be blocked.
+// Unlock is a function to unlock the file.
+// This file takes a RW-mutex lock,
+// so while it is running the Locked() and RLocked() functions will be blocked.
//
-// This function short-circuits if we are unlocked already. If not, it calls
-// syscall.LOCK_UN on the file and closes the file descriptor. It does not
-// remove the file from disk. It's up to your application to do.
+// This function short-circuits if we are unlocked already.
+// If not, it calls unix.LOCK_UN on the file and closes the file descriptor.
+// It does not remove the file from disk. It's up to your application to do.
//
-// Please note, if your shared lock became an exclusive lock this may
-// unintentionally drop the exclusive lock if called by the consumer that
-// believes they have a shared lock. Please see Lock() for more details.
+// Please note,
+// if your shared lock became an exclusive lock,
+// this may unintentionally drop the exclusive lock if called by the consumer that believes they have a shared lock.
+// Please see Lock() for more details.
func (f *Flock) Unlock() error {
f.m.Lock()
defer f.m.Unlock()
- // if we aren't locked or if the lockfile instance is nil
- // just return a nil error because we are unlocked
+ // If we aren't locked or if the lockfile instance is nil
+ // just return a nil error because we are unlocked.
if (!f.l && !f.r) || f.fh == nil {
return nil
}
- // mark the file as unlocked
- if err := syscall.Flock(int(f.fh.Fd()), syscall.LOCK_UN); err != nil {
+ // Mark the file as unlocked.
+ err := unix.Flock(int(f.fh.Fd()), unix.LOCK_UN)
+ if err != nil {
return err
}
- f.fh.Close()
-
- f.l = false
- f.r = false
- f.fh = nil
+ f.reset()
return nil
}
-// TryLock is the preferred function for taking an exclusive file lock. This
-// function takes an RW-mutex lock before it tries to lock the file, so there is
-// the possibility that this function may block for a short time if another
-// goroutine is trying to take any action.
+// TryLock is the preferred function for taking an exclusive file lock.
+// This function takes an RW-mutex lock before it tries to lock the file,
+// so there is the possibility that this function may block for a short time
+// if another goroutine is trying to take any action.
//
-// The actual file lock is non-blocking. If we are unable to get the exclusive
-// file lock, the function will return false instead of waiting for the lock. If
-// we get the lock, we also set the *Flock instance as being exclusive-locked.
+// The actual file lock is non-blocking.
+// If we are unable to get the exclusive file lock,
+// the function will return false instead of waiting for the lock.
+// If we get the lock, we also set the *Flock instance as being exclusive-locked.
func (f *Flock) TryLock() (bool, error) {
- return f.try(&f.l, syscall.LOCK_EX)
+ return f.try(&f.l, unix.LOCK_EX)
}
-// TryRLock is the preferred function for taking a shared file lock. This
-// function takes an RW-mutex lock before it tries to lock the file, so there is
-// the possibility that this function may block for a short time if another
-// goroutine is trying to take any action.
+// TryRLock is the preferred function for taking a shared file lock.
+// This function takes an RW-mutex lock before it tries to lock the file,
+// so there is the possibility that this function may block for a short time
+// if another goroutine is trying to take any action.
//
-// The actual file lock is non-blocking. If we are unable to get the shared file
-// lock, the function will return false instead of waiting for the lock. If we
-// get the lock, we also set the *Flock instance as being share-locked.
+// The actual file lock is non-blocking.
+// If we are unable to get the shared file lock,
+// the function will return false instead of waiting for the lock.
+// If we get the lock, we also set the *Flock instance as being share-locked.
func (f *Flock) TryRLock() (bool, error) {
- return f.try(&f.r, syscall.LOCK_SH)
+ return f.try(&f.r, unix.LOCK_SH)
}
func (f *Flock) try(locked *bool, flag int) (bool, error) {
@@ -140,25 +147,28 @@ func (f *Flock) try(locked *bool, flag int) (bool, error) {
}
if f.fh == nil {
- if err := f.setFh(); err != nil {
+ if err := f.setFh(f.flag); err != nil {
return false, err
}
+
defer f.ensureFhState()
}
var retried bool
retry:
- err := syscall.Flock(int(f.fh.Fd()), flag|syscall.LOCK_NB)
+ err := unix.Flock(int(f.fh.Fd()), flag|unix.LOCK_NB)
- switch err {
- case syscall.EWOULDBLOCK:
+ switch {
+ case errors.Is(err, unix.EWOULDBLOCK):
return false, nil
- case nil:
+ case err == nil:
*locked = true
return true, nil
}
+
if !retried {
- if shouldRetry, reopenErr := f.reopenFDOnError(err); reopenErr != nil {
+ shouldRetry, reopenErr := f.reopenFDOnError(err)
+ if reopenErr != nil {
return false, reopenErr
} else if shouldRetry {
retried = true
@@ -169,29 +179,32 @@ retry:
return false, err
}
-// reopenFDOnError determines whether we should reopen the file handle
-// in readwrite mode and try again. This comes from util-linux/sys-utils/flock.c:
-// Since Linux 3.4 (commit 55725513)
-// Probably NFSv4 where flock() is emulated by fcntl().
+// reopenFDOnError determines whether we should reopen the file handle in readwrite mode and try again.
+// This comes from `util-linux/sys-utils/flock.c`:
+// > Since Linux 3.4 (commit 55725513)
+// > Probably NFSv4 where flock() is emulated by fcntl().
+// > https://github.com/util-linux/util-linux/blob/198e920aa24743ef6ace4e07cf6237de527f9261/sys-utils/flock.c#L374-L390
func (f *Flock) reopenFDOnError(err error) (bool, error) {
- if err != syscall.EIO && err != syscall.EBADF {
+ if !errors.Is(err, unix.EIO) && !errors.Is(err, unix.EBADF) {
return false, nil
}
- if st, err := f.fh.Stat(); err == nil {
- // if the file is able to be read and written
- if st.Mode()&0600 == 0600 {
- f.fh.Close()
- f.fh = nil
-
- // reopen in read-write mode and set the filehandle
- fh, err := os.OpenFile(f.path, os.O_CREATE|os.O_RDWR, os.FileMode(0600))
- if err != nil {
- return false, err
- }
- f.fh = fh
- return true, nil
- }
+
+ st, err := f.fh.Stat()
+ if err != nil {
+ return false, nil
+ }
+
+ if st.Mode()&f.perm != f.perm {
+ return false, nil
+ }
+
+ f.resetFh()
+
+ // reopen in read-write mode and set the file handle
+ err = f.setFh(f.flag | os.O_RDWR)
+ if err != nil {
+ return false, err
}
- return false, nil
+ return true, nil
}
diff --git a/vendor/github.com/gofrs/flock/flock_unix_fcntl.go b/vendor/github.com/gofrs/flock/flock_unix_fcntl.go
new file mode 100644
index 00000000000..ea007b47d9a
--- /dev/null
+++ b/vendor/github.com/gofrs/flock/flock_unix_fcntl.go
@@ -0,0 +1,393 @@
+// Copyright 2015 Tim Heckman. All rights reserved.
+// Copyright 2018-2024 The Gofrs. All rights reserved.
+// Use of this source code is governed by the BSD 3-Clause
+// license that can be found in the LICENSE file.
+
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This code implements the filelock API using POSIX 'fcntl' locks,
+// which attach to an (inode, process) pair rather than a file descriptor.
+// To avoid unlocking files prematurely when the same file is opened through different descriptors,
+// we allow only one read-lock at a time.
+//
+// This code is adapted from the Go package (go.22):
+// https://github.com/golang/go/blob/release-branch.go1.22/src/cmd/go/internal/lockedfile/internal/filelock/filelock_fcntl.go
+
+//go:build aix || (solaris && !illumos)
+
+package flock
+
+import (
+ "errors"
+ "io"
+ "io/fs"
+ "math/rand"
+ "sync"
+ "syscall"
+ "time"
+
+ "golang.org/x/sys/unix"
+)
+
+// https://github.com/golang/go/blob/09aeb6e33ab426eff4676a3baf694d5a3019e9fc/src/cmd/go/internal/lockedfile/internal/filelock/filelock_fcntl.go#L28
+type lockType int16
+
+// String returns the name of the function corresponding to lt
+// (Lock, RLock, or Unlock).
+// https://github.com/golang/go/blob/09aeb6e33ab426eff4676a3baf694d5a3019e9fc/src/cmd/go/internal/lockedfile/internal/filelock/filelock.go#L67
+func (lt lockType) String() string {
+ switch lt {
+ case readLock:
+ return "RLock"
+ case writeLock:
+ return "Lock"
+ default:
+ return "Unlock"
+ }
+}
+
+// https://github.com/golang/go/blob/09aeb6e33ab426eff4676a3baf694d5a3019e9fc/src/cmd/go/internal/lockedfile/internal/filelock/filelock_fcntl.go#L30-L33
+const (
+ readLock lockType = unix.F_RDLCK
+ writeLock lockType = unix.F_WRLCK
+)
+
+// https://github.com/golang/go/blob/09aeb6e33ab426eff4676a3baf694d5a3019e9fc/src/cmd/go/internal/lockedfile/internal/filelock/filelock_fcntl.go#L35
+type inode = uint64
+
+// https://github.com/golang/go/blob/09aeb6e33ab426eff4676a3baf694d5a3019e9fc/src/cmd/go/internal/lockedfile/internal/filelock/filelock_fcntl.go#L37-L40
+type inodeLock struct {
+ owner *Flock
+ queue []<-chan *Flock
+}
+
+type cmdType int
+
+const (
+ tryLock cmdType = unix.F_SETLK
+ waitLock cmdType = unix.F_SETLKW
+)
+
+var (
+ mu sync.Mutex
+ inodes = map[*Flock]inode{}
+ locks = map[inode]inodeLock{}
+)
+
+// Lock is a blocking call to try and take an exclusive file lock.
+// It will wait until it is able to obtain the exclusive file lock.
+// It's recommended that TryLock() be used over this function.
+// This function may block the ability to query the current Locked() or RLocked() status due to a RW-mutex lock.
+//
+// If we are already exclusive-locked, this function short-circuits and
+// returns immediately assuming it can take the mutex lock.
+//
+// If the *Flock has a shared lock (RLock),
+// this may transparently replace the shared lock with an exclusive lock on some UNIX-like operating systems.
+// Be careful when using exclusive locks in conjunction with shared locks (RLock()),
+// because calling Unlock() may accidentally release the exclusive lock that was once a shared lock.
+func (f *Flock) Lock() error {
+ return f.lock(&f.l, writeLock)
+}
+
+// RLock is a blocking call to try and take a shared file lock.
+// It will wait until it is able to obtain the shared file lock.
+// It's recommended that TryRLock() be used over this function.
+// This function may block the ability to query the current Locked() or RLocked() status due to a RW-mutex lock.
+//
+// If we are already shared-locked, this function short-circuits and
+// returns immediately assuming it can take the mutex lock.
+func (f *Flock) RLock() error {
+ return f.lock(&f.r, readLock)
+}
+
+func (f *Flock) lock(locked *bool, flag lockType) error {
+ f.m.Lock()
+ defer f.m.Unlock()
+
+ if *locked {
+ return nil
+ }
+
+ if f.fh == nil {
+ if err := f.setFh(f.flag); err != nil {
+ return err
+ }
+
+ defer f.ensureFhState()
+ }
+
+ _, err := f.doLock(waitLock, flag, true)
+ if err != nil {
+ return err
+ }
+
+ *locked = true
+
+ return nil
+}
+
+// https://github.com/golang/go/blob/09aeb6e33ab426eff4676a3baf694d5a3019e9fc/src/cmd/go/internal/lockedfile/internal/filelock/filelock_fcntl.go#L48
+func (f *Flock) doLock(cmd cmdType, lt lockType, blocking bool) (bool, error) {
+ // POSIX locks apply per inode and process,
+ // and the lock for an inode is released when *any* descriptor for that inode is closed.
+ // So we need to synchronize access to each inode internally,
+ // and must serialize lock and unlock calls that refer to the same inode through different descriptors.
+ fi, err := f.fh.Stat()
+ if err != nil {
+ return false, err
+ }
+
+ // Note(ldez): don't replace `syscall.Stat_t` by `unix.Stat_t` because `FileInfo.Sys()` returns `syscall.Stat_t`
+ ino := fi.Sys().(*syscall.Stat_t).Ino
+
+ mu.Lock()
+
+ if i, dup := inodes[f]; dup && i != ino {
+ mu.Unlock()
+ return false, &fs.PathError{
+ Op: lt.String(),
+ Path: f.Path(),
+ Err: errors.New("inode for file changed since last Lock or RLock"),
+ }
+ }
+
+ inodes[f] = ino
+
+ var wait chan *Flock
+
+ l := locks[ino]
+
+ switch {
+ case l.owner == f:
+ // This file already owns the lock, but the call may change its lock type.
+ case l.owner == nil:
+ // No owner: it's ours now.
+ l.owner = f
+
+ case !blocking:
+ // Already owned: cannot take the lock.
+ mu.Unlock()
+ return false, nil
+
+ default:
+ // Already owned: add a channel to wait on.
+ wait = make(chan *Flock)
+ l.queue = append(l.queue, wait)
+ }
+
+ locks[ino] = l
+
+ mu.Unlock()
+
+ if wait != nil {
+ wait <- f
+ }
+
+ // Spurious EDEADLK errors arise on platforms that compute deadlock graphs at
+ // the process, rather than thread, level. Consider processes P and Q, with
+ // threads P.1, P.2, and Q.3. The following trace is NOT a deadlock, but will be
+ // reported as a deadlock on systems that consider only process granularity:
+ //
+ // P.1 locks file A.
+ // Q.3 locks file B.
+ // Q.3 blocks on file A.
+ // P.2 blocks on file B. (This is erroneously reported as a deadlock.)
+ // P.1 unlocks file A.
+ // Q.3 unblocks and locks file A.
+ // Q.3 unlocks files A and B.
+ // P.2 unblocks and locks file B.
+ // P.2 unlocks file B.
+ //
+ // These spurious errors were observed in practice on AIX and Solaris in
+ // cmd/go: see https://golang.org/issue/32817.
+ //
+ // We work around this bug by treating EDEADLK as always spurious. If there
+ // really is a lock-ordering bug between the interacting processes, it will
+ // become a livelock instead, but that's not appreciably worse than if we had
+ // a proper flock implementation (which generally does not even attempt to
+ // diagnose deadlocks).
+ //
+ // In the above example, that changes the trace to:
+ //
+ // P.1 locks file A.
+ // Q.3 locks file B.
+ // Q.3 blocks on file A.
+ // P.2 spuriously fails to lock file B and goes to sleep.
+ // P.1 unlocks file A.
+ // Q.3 unblocks and locks file A.
+ // Q.3 unlocks files A and B.
+ // P.2 wakes up and locks file B.
+ // P.2 unlocks file B.
+ //
+ // We know that the retry loop will not introduce a *spurious* livelock
+ // because, according to the POSIX specification, EDEADLK is only to be
+ // returned when “the lock is blocked by a lock from another process”.
+ // If that process is blocked on some lock that we are holding, then the
+ // resulting livelock is due to a real deadlock (and would manifest as such
+ // when using, for example, the flock implementation of this package).
+ // If the other process is *not* blocked on some other lock that we are
+ // holding, then it will eventually release the requested lock.
+
+ nextSleep := 1 * time.Millisecond
+ const maxSleep = 500 * time.Millisecond
+ for {
+ err = setlkw(f.fh.Fd(), cmd, lt)
+ if !errors.Is(err, unix.EDEADLK) {
+ break
+ }
+
+ time.Sleep(nextSleep)
+
+ nextSleep += nextSleep
+ if nextSleep > maxSleep {
+ nextSleep = maxSleep
+ }
+ // Apply 10% jitter to avoid synchronizing collisions when we finally unblock.
+ nextSleep += time.Duration((0.1*rand.Float64() - 0.05) * float64(nextSleep))
+ }
+
+ if err != nil {
+ f.doUnlock()
+
+ if cmd == tryLock && errors.Is(err, unix.EACCES) {
+ return false, nil
+ }
+
+ return false, &fs.PathError{
+ Op: lt.String(),
+ Path: f.Path(),
+ Err: err,
+ }
+ }
+
+ return true, nil
+}
+
+func (f *Flock) Unlock() error {
+ f.m.Lock()
+ defer f.m.Unlock()
+
+ // If we aren't locked or if the lockfile instance is nil
+ // just return a nil error because we are unlocked.
+ if (!f.l && !f.r) || f.fh == nil {
+ return nil
+ }
+
+ if err := f.doUnlock(); err != nil {
+ return err
+ }
+
+ f.reset()
+
+ return nil
+}
+
+// https://github.com/golang/go/blob/09aeb6e33ab426eff4676a3baf694d5a3019e9fc/src/cmd/go/internal/lockedfile/internal/filelock/filelock_fcntl.go#L163
+func (f *Flock) doUnlock() (err error) {
+ var owner *Flock
+
+ mu.Lock()
+
+ ino, ok := inodes[f]
+ if ok {
+ owner = locks[ino].owner
+ }
+
+ mu.Unlock()
+
+ if owner == f {
+ err = setlkw(f.fh.Fd(), waitLock, unix.F_UNLCK)
+ }
+
+ mu.Lock()
+
+ l := locks[ino]
+
+ if len(l.queue) == 0 {
+ // No waiters: remove the map entry.
+ delete(locks, ino)
+ } else {
+ // The first waiter is sending us their file now.
+ // Receive it and update the queue.
+ l.owner = <-l.queue[0]
+ l.queue = l.queue[1:]
+ locks[ino] = l
+ }
+
+ delete(inodes, f)
+
+ mu.Unlock()
+
+ return err
+}
+
+// TryLock is the preferred function for taking an exclusive file lock.
+// This function takes an RW-mutex lock before it tries to lock the file,
+// so there is the possibility that this function may block for a short time
+// if another goroutine is trying to take any action.
+//
+// The actual file lock is non-blocking.
+// If we are unable to get the exclusive file lock,
+// the function will return false instead of waiting for the lock.
+// If we get the lock, we also set the *Flock instance as being exclusive-locked.
+func (f *Flock) TryLock() (bool, error) {
+ return f.try(&f.l, writeLock)
+}
+
+// TryRLock is the preferred function for taking a shared file lock.
+// This function takes an RW-mutex lock before it tries to lock the file,
+// so there is the possibility that this function may block for a short time
+// if another goroutine is trying to take any action.
+//
+// The actual file lock is non-blocking.
+// If we are unable to get the shared file lock,
+// the function will return false instead of waiting for the lock.
+// If we get the lock, we also set the *Flock instance as being share-locked.
+func (f *Flock) TryRLock() (bool, error) {
+ return f.try(&f.r, readLock)
+}
+
+func (f *Flock) try(locked *bool, flag lockType) (bool, error) {
+ f.m.Lock()
+ defer f.m.Unlock()
+
+ if *locked {
+ return true, nil
+ }
+
+ if f.fh == nil {
+ if err := f.setFh(f.flag); err != nil {
+ return false, err
+ }
+
+ defer f.ensureFhState()
+ }
+
+ hasLock, err := f.doLock(tryLock, flag, false)
+ if err != nil {
+ return false, err
+ }
+
+ *locked = hasLock
+
+ return hasLock, nil
+}
+
+// setlkw calls FcntlFlock with cmd for the entire file indicated by fd.
+// https://github.com/golang/go/blob/09aeb6e33ab426eff4676a3baf694d5a3019e9fc/src/cmd/go/internal/lockedfile/internal/filelock/filelock_fcntl.go#L198
+func setlkw(fd uintptr, cmd cmdType, lt lockType) error {
+ for {
+ err := unix.FcntlFlock(fd, int(cmd), &unix.Flock_t{
+ Type: int16(lt),
+ Whence: io.SeekStart,
+ Start: 0,
+ Len: 0, // All bytes.
+ })
+ if !errors.Is(err, unix.EINTR) {
+ return err
+ }
+ }
+}
diff --git a/vendor/github.com/gofrs/flock/flock_winapi.go b/vendor/github.com/gofrs/flock/flock_winapi.go
deleted file mode 100644
index fe405a255ae..00000000000
--- a/vendor/github.com/gofrs/flock/flock_winapi.go
+++ /dev/null
@@ -1,76 +0,0 @@
-// Copyright 2015 Tim Heckman. All rights reserved.
-// Use of this source code is governed by the BSD 3-Clause
-// license that can be found in the LICENSE file.
-
-// +build windows
-
-package flock
-
-import (
- "syscall"
- "unsafe"
-)
-
-var (
- kernel32, _ = syscall.LoadLibrary("kernel32.dll")
- procLockFileEx, _ = syscall.GetProcAddress(kernel32, "LockFileEx")
- procUnlockFileEx, _ = syscall.GetProcAddress(kernel32, "UnlockFileEx")
-)
-
-const (
- winLockfileFailImmediately = 0x00000001
- winLockfileExclusiveLock = 0x00000002
- winLockfileSharedLock = 0x00000000
-)
-
-// Use of 0x00000000 for the shared lock is a guess based on some the MS Windows
-// `LockFileEX` docs, which document the `LOCKFILE_EXCLUSIVE_LOCK` flag as:
-//
-// > The function requests an exclusive lock. Otherwise, it requests a shared
-// > lock.
-//
-// https://msdn.microsoft.com/en-us/library/windows/desktop/aa365203(v=vs.85).aspx
-
-func lockFileEx(handle syscall.Handle, flags uint32, reserved uint32, numberOfBytesToLockLow uint32, numberOfBytesToLockHigh uint32, offset *syscall.Overlapped) (bool, syscall.Errno) {
- r1, _, errNo := syscall.Syscall6(
- uintptr(procLockFileEx),
- 6,
- uintptr(handle),
- uintptr(flags),
- uintptr(reserved),
- uintptr(numberOfBytesToLockLow),
- uintptr(numberOfBytesToLockHigh),
- uintptr(unsafe.Pointer(offset)))
-
- if r1 != 1 {
- if errNo == 0 {
- return false, syscall.EINVAL
- }
-
- return false, errNo
- }
-
- return true, 0
-}
-
-func unlockFileEx(handle syscall.Handle, reserved uint32, numberOfBytesToLockLow uint32, numberOfBytesToLockHigh uint32, offset *syscall.Overlapped) (bool, syscall.Errno) {
- r1, _, errNo := syscall.Syscall6(
- uintptr(procUnlockFileEx),
- 5,
- uintptr(handle),
- uintptr(reserved),
- uintptr(numberOfBytesToLockLow),
- uintptr(numberOfBytesToLockHigh),
- uintptr(unsafe.Pointer(offset)),
- 0)
-
- if r1 != 1 {
- if errNo == 0 {
- return false, syscall.EINVAL
- }
-
- return false, errNo
- }
-
- return true, 0
-}
diff --git a/vendor/github.com/gofrs/flock/flock_windows.go b/vendor/github.com/gofrs/flock/flock_windows.go
index ddb534ccef0..dfd31e15f50 100644
--- a/vendor/github.com/gofrs/flock/flock_windows.go
+++ b/vendor/github.com/gofrs/flock/flock_windows.go
@@ -1,35 +1,48 @@
// Copyright 2015 Tim Heckman. All rights reserved.
+// Copyright 2018-2024 The Gofrs. All rights reserved.
// Use of this source code is governed by the BSD 3-Clause
// license that can be found in the LICENSE file.
+//go:build windows
+
package flock
import (
- "syscall"
+ "errors"
+
+ "golang.org/x/sys/windows"
)
-// ErrorLockViolation is the error code returned from the Windows syscall when a
-// lock would block and you ask to fail immediately.
-const ErrorLockViolation syscall.Errno = 0x21 // 33
+// Use of 0x00000000 for the shared lock is a guess based on some the MS Windows `LockFileEX` docs,
+// which document the `LOCKFILE_EXCLUSIVE_LOCK` flag as:
+//
+// > The function requests an exclusive lock. Otherwise, it requests a shared lock.
+//
+// https://msdn.microsoft.com/en-us/library/windows/desktop/aa365203(v=vs.85).aspx
+const winLockfileSharedLock = 0x00000000
+
+// ErrorLockViolation is the error code returned from the Windows syscall when a lock would block,
+// and you ask to fail immediately.
+const ErrorLockViolation windows.Errno = 0x21 // 33
-// Lock is a blocking call to try and take an exclusive file lock. It will wait
-// until it is able to obtain the exclusive file lock. It's recommended that
-// TryLock() be used over this function. This function may block the ability to
-// query the current Locked() or RLocked() status due to a RW-mutex lock.
+// Lock is a blocking call to try and take an exclusive file lock.
+// It will wait until it is able to obtain the exclusive file lock.
+// It's recommended that TryLock() be used over this function.
+// This function may block the ability to query the current Locked() or RLocked() status due to a RW-mutex lock.
//
-// If we are already locked, this function short-circuits and returns
-// immediately assuming it can take the mutex lock.
+// If we are already locked, this function short-circuits and
+// returns immediately assuming it can take the mutex lock.
func (f *Flock) Lock() error {
- return f.lock(&f.l, winLockfileExclusiveLock)
+ return f.lock(&f.l, windows.LOCKFILE_EXCLUSIVE_LOCK)
}
-// RLock is a blocking call to try and take a shared file lock. It will wait
-// until it is able to obtain the shared file lock. It's recommended that
-// TryRLock() be used over this function. This function may block the ability to
-// query the current Locked() or RLocked() status due to a RW-mutex lock.
+// RLock is a blocking call to try and take a shared file lock.
+// It will wait until it is able to obtain the shared file lock.
+// It's recommended that TryRLock() be used over this function.
+// This function may block the ability to query the current Locked() or RLocked() status due to a RW-mutex lock.
//
-// If we are already locked, this function short-circuits and returns
-// immediately assuming it can take the mutex lock.
+// If we are already locked, this function short-circuits and
+// returns immediately assuming it can take the mutex lock.
func (f *Flock) RLock() error {
return f.lock(&f.r, winLockfileSharedLock)
}
@@ -43,26 +56,31 @@ func (f *Flock) lock(locked *bool, flag uint32) error {
}
if f.fh == nil {
- if err := f.setFh(); err != nil {
+ if err := f.setFh(f.flag); err != nil {
return err
}
+
defer f.ensureFhState()
}
- if _, errNo := lockFileEx(syscall.Handle(f.fh.Fd()), flag, 0, 1, 0, &syscall.Overlapped{}); errNo > 0 {
- return errNo
+ err := windows.LockFileEx(windows.Handle(f.fh.Fd()), flag, 0, 1, 0, &windows.Overlapped{})
+ if err != nil && !errors.Is(err, windows.Errno(0)) {
+ return err
}
*locked = true
+
return nil
}
-// Unlock is a function to unlock the file. This file takes a RW-mutex lock, so
-// while it is running the Locked() and RLocked() functions will be blocked.
+// Unlock is a function to unlock the file.
+// This file takes a RW-mutex lock,
+// so while it is running the Locked() and RLocked() functions will be blocked.
//
-// This function short-circuits if we are unlocked already. If not, it calls
-// UnlockFileEx() on the file and closes the file descriptor. It does not remove
-// the file from disk. It's up to your application to do.
+// This function short-circuits if we are unlocked already.
+// If not, it calls UnlockFileEx() on the file and closes the file descriptor.
+// It does not remove the file from disk.
+// It's up to your application to do.
func (f *Flock) Unlock() error {
f.m.Lock()
defer f.m.Unlock()
@@ -74,39 +92,37 @@ func (f *Flock) Unlock() error {
}
// mark the file as unlocked
- if _, errNo := unlockFileEx(syscall.Handle(f.fh.Fd()), 0, 1, 0, &syscall.Overlapped{}); errNo > 0 {
- return errNo
+ err := windows.UnlockFileEx(windows.Handle(f.fh.Fd()), 0, 1, 0, &windows.Overlapped{})
+ if err != nil && !errors.Is(err, windows.Errno(0)) {
+ return err
}
- f.fh.Close()
-
- f.l = false
- f.r = false
- f.fh = nil
+ f.reset()
return nil
}
-// TryLock is the preferred function for taking an exclusive file lock. This
-// function does take a RW-mutex lock before it tries to lock the file, so there
-// is the possibility that this function may block for a short time if another
-// goroutine is trying to take any action.
+// TryLock is the preferred function for taking an exclusive file lock.
+// This function does take a RW-mutex lock before it tries to lock the file,
+// so there is the possibility that this function may block for a short time
+// if another goroutine is trying to take any action.
//
-// The actual file lock is non-blocking. If we are unable to get the exclusive
-// file lock, the function will return false instead of waiting for the lock. If
-// we get the lock, we also set the *Flock instance as being exclusive-locked.
+// The actual file lock is non-blocking.
+// If we are unable to get the exclusive file lock,
+// the function will return false instead of waiting for the lock.
+// If we get the lock, we also set the *Flock instance as being exclusive-locked.
func (f *Flock) TryLock() (bool, error) {
- return f.try(&f.l, winLockfileExclusiveLock)
+ return f.try(&f.l, windows.LOCKFILE_EXCLUSIVE_LOCK)
}
-// TryRLock is the preferred function for taking a shared file lock. This
-// function does take a RW-mutex lock before it tries to lock the file, so there
-// is the possibility that this function may block for a short time if another
-// goroutine is trying to take any action.
+// TryRLock is the preferred function for taking a shared file lock.
+// This function does take a RW-mutex lock before it tries to lock the file,
+// so there is the possibility that this function may block for a short time if another goroutine is trying to take any action.
//
-// The actual file lock is non-blocking. If we are unable to get the shared file
-// lock, the function will return false instead of waiting for the lock. If we
-// get the lock, we also set the *Flock instance as being shared-locked.
+// The actual file lock is non-blocking.
+// If we are unable to get the shared file lock,
+// the function will return false instead of waiting for the lock.
+// If we get the lock, we also set the *Flock instance as being shared-locked.
func (f *Flock) TryRLock() (bool, error) {
return f.try(&f.r, winLockfileSharedLock)
}
@@ -120,20 +136,20 @@ func (f *Flock) try(locked *bool, flag uint32) (bool, error) {
}
if f.fh == nil {
- if err := f.setFh(); err != nil {
+ if err := f.setFh(f.flag); err != nil {
return false, err
}
+
defer f.ensureFhState()
}
- _, errNo := lockFileEx(syscall.Handle(f.fh.Fd()), flag|winLockfileFailImmediately, 0, 1, 0, &syscall.Overlapped{})
-
- if errNo > 0 {
- if errNo == ErrorLockViolation || errNo == syscall.ERROR_IO_PENDING {
+ err := windows.LockFileEx(windows.Handle(f.fh.Fd()), flag|windows.LOCKFILE_FAIL_IMMEDIATELY, 0, 1, 0, &windows.Overlapped{})
+ if err != nil && !errors.Is(err, windows.Errno(0)) {
+ if errors.Is(err, ErrorLockViolation) || errors.Is(err, windows.ERROR_IO_PENDING) {
return false, nil
}
- return false, errNo
+ return false, err
}
*locked = true
diff --git a/vendor/github.com/huandu/xstrings/README.md b/vendor/github.com/huandu/xstrings/README.md
index 750c3c7eb69..e809c79abc5 100644
--- a/vendor/github.com/huandu/xstrings/README.md
+++ b/vendor/github.com/huandu/xstrings/README.md
@@ -39,8 +39,8 @@ _Keep this table sorted by Function in ascending order._
| [Count](https://godoc.org/github.com/huandu/xstrings#Count) | `String#count` in Ruby | [#16](https://github.com/huandu/xstrings/issues/16) |
| [Delete](https://godoc.org/github.com/huandu/xstrings#Delete) | `String#delete` in Ruby | [#17](https://github.com/huandu/xstrings/issues/17) |
| [ExpandTabs](https://godoc.org/github.com/huandu/xstrings#ExpandTabs) | `str.expandtabs` in Python | [#27](https://github.com/huandu/xstrings/issues/27) |
-| [FirstRuneToLower](https://godoc.org/github.com/huandu/xstrings#FirstRuneToLower) | `lcfirst` in PHP or Perl | [#15](https://github.com/huandu/xstrings/issues/15) |
-| [FirstRuneToUpper](https://godoc.org/github.com/huandu/xstrings#FirstRuneToUpper) | `String#capitalize` in Ruby; `ucfirst` in PHP or Perl | [#15](https://github.com/huandu/xstrings/issues/15) |
+| [FirstRuneToLower](https://godoc.org/github.com/huandu/xstrings#FirstRuneToLower) | `lcfirst` in PHP or Perl | [#15](https://github.com/huandu/xstrings/issues/15) |
+| [FirstRuneToUpper](https://godoc.org/github.com/huandu/xstrings#FirstRuneToUpper) | `String#capitalize` in Ruby; `ucfirst` in PHP or Perl | [#15](https://github.com/huandu/xstrings/issues/15) |
| [Insert](https://godoc.org/github.com/huandu/xstrings#Insert) | `String#insert` in Ruby | [#18](https://github.com/huandu/xstrings/issues/18) |
| [LastPartition](https://godoc.org/github.com/huandu/xstrings#LastPartition) | `str.rpartition` in Python; `String#rpartition` in Ruby | [#19](https://github.com/huandu/xstrings/issues/19) |
| [LeftJustify](https://godoc.org/github.com/huandu/xstrings#LeftJustify) | `str.ljust` in Python; `String#ljust` in Ruby | [#28](https://github.com/huandu/xstrings/issues/28) |
@@ -50,14 +50,15 @@ _Keep this table sorted by Function in ascending order._
| [RightJustify](https://godoc.org/github.com/huandu/xstrings#RightJustify) | `str.rjust` in Python; `String#rjust` in Ruby | [#29](https://github.com/huandu/xstrings/issues/29) |
| [RuneWidth](https://godoc.org/github.com/huandu/xstrings#RuneWidth) | - | [#27](https://github.com/huandu/xstrings/issues/27) |
| [Scrub](https://godoc.org/github.com/huandu/xstrings#Scrub) | `String#scrub` in Ruby | [#20](https://github.com/huandu/xstrings/issues/20) |
-| [Shuffle](https://godoc.org/github.com/huandu/xstrings#Shuffle) | `str_shuffle` in PHP | [#13](https://github.com/huandu/xstrings/issues/13) |
-| [ShuffleSource](https://godoc.org/github.com/huandu/xstrings#ShuffleSource) | `str_shuffle` in PHP | [#13](https://github.com/huandu/xstrings/issues/13) |
+| [Shuffle](https://godoc.org/github.com/huandu/xstrings#Shuffle) | `str_shuffle` in PHP | [#13](https://github.com/huandu/xstrings/issues/13) |
+| [ShuffleSource](https://godoc.org/github.com/huandu/xstrings#ShuffleSource) | `str_shuffle` in PHP | [#13](https://github.com/huandu/xstrings/issues/13) |
| [Slice](https://godoc.org/github.com/huandu/xstrings#Slice) | `mb_substr` in PHP | [#9](https://github.com/huandu/xstrings/issues/9) |
| [Squeeze](https://godoc.org/github.com/huandu/xstrings#Squeeze) | `String#squeeze` in Ruby | [#11](https://github.com/huandu/xstrings/issues/11) |
| [Successor](https://godoc.org/github.com/huandu/xstrings#Successor) | `String#succ` or `String#next` in Ruby | [#22](https://github.com/huandu/xstrings/issues/22) |
| [SwapCase](https://godoc.org/github.com/huandu/xstrings#SwapCase) | `str.swapcase` in Python; `String#swapcase` in Ruby | [#12](https://github.com/huandu/xstrings/issues/12) |
| [ToCamelCase](https://godoc.org/github.com/huandu/xstrings#ToCamelCase) | `String#camelize` in RoR | [#1](https://github.com/huandu/xstrings/issues/1) |
| [ToKebab](https://godoc.org/github.com/huandu/xstrings#ToKebabCase) | - | [#41](https://github.com/huandu/xstrings/issues/41) |
+| [ToPascalCase](https://godoc.org/github.com/huandu/xstrings#ToPascalCase) | - | [#1](https://github.com/huandu/xstrings/issues/1) |
| [ToSnakeCase](https://godoc.org/github.com/huandu/xstrings#ToSnakeCase) | `String#underscore` in RoR | [#1](https://github.com/huandu/xstrings/issues/1) |
| [Translate](https://godoc.org/github.com/huandu/xstrings#Translate) | `str.translate` in Python; `String#tr` in Ruby; `strtr` in PHP; `tr///` in Perl | [#21](https://github.com/huandu/xstrings/issues/21) |
| [Width](https://godoc.org/github.com/huandu/xstrings#Width) | `mb_strwidth` in PHP | [#26](https://github.com/huandu/xstrings/issues/26) |
diff --git a/vendor/github.com/huandu/xstrings/convert.go b/vendor/github.com/huandu/xstrings/convert.go
index cba0d072520..5d8cfee470b 100644
--- a/vendor/github.com/huandu/xstrings/convert.go
+++ b/vendor/github.com/huandu/xstrings/convert.go
@@ -13,17 +13,37 @@ import (
//
// Some samples.
//
+// "some_words" => "someWords"
+// "http_server" => "httpServer"
+// "no_https" => "noHttps"
+// "_complex__case_" => "_complex_Case_"
+// "some words" => "someWords"
+// "GOLANG_IS_GREAT" => "golangIsGreat"
+func ToCamelCase(str string) string {
+ return toCamelCase(str, false)
+}
+
+// ToPascalCase is to convert words separated by space, underscore and hyphen to pascal case.
+//
+// Some samples.
+//
// "some_words" => "SomeWords"
// "http_server" => "HttpServer"
// "no_https" => "NoHttps"
// "_complex__case_" => "_Complex_Case_"
// "some words" => "SomeWords"
-func ToCamelCase(str string) string {
+// "GOLANG_IS_GREAT" => "GolangIsGreat"
+func ToPascalCase(str string) string {
+ return toCamelCase(str, true)
+}
+
+func toCamelCase(str string, isBig bool) string {
if len(str) == 0 {
return ""
}
buf := &stringBuilder{}
+ var isFirstRuneUpper bool
var r0, r1 rune
var size int
@@ -33,7 +53,14 @@ func ToCamelCase(str string) string {
str = str[size:]
if !isConnector(r0) {
- r0 = unicode.ToUpper(r0)
+ isFirstRuneUpper = unicode.IsUpper(r0)
+
+ if isBig {
+ r0 = unicode.ToUpper(r0)
+ } else {
+ r0 = unicode.ToLower(r0)
+ }
+
break
}
@@ -60,12 +87,25 @@ func ToCamelCase(str string) string {
}
if isConnector(r1) {
+ isFirstRuneUpper = unicode.IsUpper(r0)
r0 = unicode.ToUpper(r0)
} else {
+ if isFirstRuneUpper {
+ if unicode.IsUpper(r0) {
+ r0 = unicode.ToLower(r0)
+ } else {
+ isFirstRuneUpper = false
+ }
+ }
+
buf.WriteRune(r1)
}
}
+ if isFirstRuneUpper && !isBig {
+ r0 = unicode.ToLower(r0)
+ }
+
buf.WriteRune(r0)
return buf.String()
}
diff --git a/vendor/github.com/imdario/mergo/.deepsource.toml b/vendor/github.com/imdario/mergo/.deepsource.toml
deleted file mode 100644
index 8a0681af855..00000000000
--- a/vendor/github.com/imdario/mergo/.deepsource.toml
+++ /dev/null
@@ -1,12 +0,0 @@
-version = 1
-
-test_patterns = [
- "*_test.go"
-]
-
-[[analyzers]]
-name = "go"
-enabled = true
-
- [analyzers.meta]
- import_path = "github.com/imdario/mergo"
\ No newline at end of file
diff --git a/vendor/github.com/imdario/mergo/.gitignore b/vendor/github.com/imdario/mergo/.gitignore
deleted file mode 100644
index 529c3412ba9..00000000000
--- a/vendor/github.com/imdario/mergo/.gitignore
+++ /dev/null
@@ -1,33 +0,0 @@
-#### joe made this: http://goel.io/joe
-
-#### go ####
-# Binaries for programs and plugins
-*.exe
-*.dll
-*.so
-*.dylib
-
-# Test binary, build with `go test -c`
-*.test
-
-# Output of the go coverage tool, specifically when used with LiteIDE
-*.out
-
-# Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736
-.glide/
-
-#### vim ####
-# Swap
-[._]*.s[a-v][a-z]
-[._]*.sw[a-p]
-[._]s[a-v][a-z]
-[._]sw[a-p]
-
-# Session
-Session.vim
-
-# Temporary
-.netrwhist
-*~
-# Auto-generated tag files
-tags
diff --git a/vendor/github.com/imdario/mergo/.travis.yml b/vendor/github.com/imdario/mergo/.travis.yml
deleted file mode 100644
index d324c43ba4d..00000000000
--- a/vendor/github.com/imdario/mergo/.travis.yml
+++ /dev/null
@@ -1,12 +0,0 @@
-language: go
-arch:
- - amd64
- - ppc64le
-install:
- - go get -t
- - go get golang.org/x/tools/cmd/cover
- - go get github.com/mattn/goveralls
-script:
- - go test -race -v ./...
-after_script:
- - $HOME/gopath/bin/goveralls -service=travis-ci -repotoken $COVERALLS_TOKEN
diff --git a/vendor/github.com/imdario/mergo/CODE_OF_CONDUCT.md b/vendor/github.com/imdario/mergo/CODE_OF_CONDUCT.md
deleted file mode 100644
index 469b44907a0..00000000000
--- a/vendor/github.com/imdario/mergo/CODE_OF_CONDUCT.md
+++ /dev/null
@@ -1,46 +0,0 @@
-# Contributor Covenant Code of Conduct
-
-## Our Pledge
-
-In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, nationality, personal appearance, race, religion, or sexual identity and orientation.
-
-## Our Standards
-
-Examples of behavior that contributes to creating a positive environment include:
-
-* Using welcoming and inclusive language
-* Being respectful of differing viewpoints and experiences
-* Gracefully accepting constructive criticism
-* Focusing on what is best for the community
-* Showing empathy towards other community members
-
-Examples of unacceptable behavior by participants include:
-
-* The use of sexualized language or imagery and unwelcome sexual attention or advances
-* Trolling, insulting/derogatory comments, and personal or political attacks
-* Public or private harassment
-* Publishing others' private information, such as a physical or electronic address, without explicit permission
-* Other conduct which could reasonably be considered inappropriate in a professional setting
-
-## Our Responsibilities
-
-Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior.
-
-Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful.
-
-## Scope
-
-This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers.
-
-## Enforcement
-
-Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at i@dario.im. The project team will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately.
-
-Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership.
-
-## Attribution
-
-This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at [http://contributor-covenant.org/version/1/4][version]
-
-[homepage]: http://contributor-covenant.org
-[version]: http://contributor-covenant.org/version/1/4/
diff --git a/vendor/github.com/imdario/mergo/LICENSE b/vendor/github.com/imdario/mergo/LICENSE
deleted file mode 100644
index 686680298da..00000000000
--- a/vendor/github.com/imdario/mergo/LICENSE
+++ /dev/null
@@ -1,28 +0,0 @@
-Copyright (c) 2013 Dario Castañé. All rights reserved.
-Copyright (c) 2012 The Go Authors. All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
-
- * Redistributions of source code must retain the above copyright
-notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above
-copyright notice, this list of conditions and the following disclaimer
-in the documentation and/or other materials provided with the
-distribution.
- * Neither the name of Google Inc. nor the names of its
-contributors may be used to endorse or promote products derived from
-this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/imdario/mergo/README.md b/vendor/github.com/imdario/mergo/README.md
deleted file mode 100644
index 7e6f7aeee82..00000000000
--- a/vendor/github.com/imdario/mergo/README.md
+++ /dev/null
@@ -1,235 +0,0 @@
-# Mergo
-
-
-[![GoDoc][3]][4]
-[![GitHub release][5]][6]
-[![GoCard][7]][8]
-[![Build Status][1]][2]
-[![Coverage Status][9]][10]
-[![Sourcegraph][11]][12]
-[![FOSSA Status][13]][14]
-[![Become my sponsor][15]][16]
-
-[1]: https://travis-ci.org/imdario/mergo.png
-[2]: https://travis-ci.org/imdario/mergo
-[3]: https://godoc.org/github.com/imdario/mergo?status.svg
-[4]: https://godoc.org/github.com/imdario/mergo
-[5]: https://img.shields.io/github/release/imdario/mergo.svg
-[6]: https://github.com/imdario/mergo/releases
-[7]: https://goreportcard.com/badge/imdario/mergo
-[8]: https://goreportcard.com/report/github.com/imdario/mergo
-[9]: https://coveralls.io/repos/github/imdario/mergo/badge.svg?branch=master
-[10]: https://coveralls.io/github/imdario/mergo?branch=master
-[11]: https://sourcegraph.com/github.com/imdario/mergo/-/badge.svg
-[12]: https://sourcegraph.com/github.com/imdario/mergo?badge
-[13]: https://app.fossa.io/api/projects/git%2Bgithub.com%2Fimdario%2Fmergo.svg?type=shield
-[14]: https://app.fossa.io/projects/git%2Bgithub.com%2Fimdario%2Fmergo?ref=badge_shield
-[15]: https://img.shields.io/github/sponsors/imdario
-[16]: https://github.com/sponsors/imdario
-
-A helper to merge structs and maps in Golang. Useful for configuration default values, avoiding messy if-statements.
-
-Mergo merges same-type structs and maps by setting default values in zero-value fields. Mergo won't merge unexported (private) fields. It will do recursively any exported one. It also won't merge structs inside maps (because they are not addressable using Go reflection).
-
-Also a lovely [comune](http://en.wikipedia.org/wiki/Mergo) (municipality) in the Province of Ancona in the Italian region of Marche.
-
-## Status
-
-It is ready for production use. [It is used in several projects by Docker, Google, The Linux Foundation, VMWare, Shopify, Microsoft, etc](https://github.com/imdario/mergo#mergo-in-the-wild).
-
-### Important note
-
-Please keep in mind that a problematic PR broke [0.3.9](//github.com/imdario/mergo/releases/tag/0.3.9). I reverted it in [0.3.10](//github.com/imdario/mergo/releases/tag/0.3.10), and I consider it stable but not bug-free. Also, this version adds support for go modules.
-
-Keep in mind that in [0.3.2](//github.com/imdario/mergo/releases/tag/0.3.2), Mergo changed `Merge()`and `Map()` signatures to support [transformers](#transformers). I added an optional/variadic argument so that it won't break the existing code.
-
-If you were using Mergo before April 6th, 2015, please check your project works as intended after updating your local copy with ```go get -u github.com/imdario/mergo```. I apologize for any issue caused by its previous behavior and any future bug that Mergo could cause in existing projects after the change (release 0.2.0).
-
-### Donations
-
-If Mergo is useful to you, consider buying me a coffee, a beer, or making a monthly donation to allow me to keep building great free software. :heart_eyes:
-
-]
-
-
-
-### Mergo in the wild
-
-- [cli/cli](https://github.com/cli/cli)
-- [moby/moby](https://github.com/moby/moby)
-- [kubernetes/kubernetes](https://github.com/kubernetes/kubernetes)
-- [vmware/dispatch](https://github.com/vmware/dispatch)
-- [Shopify/themekit](https://github.com/Shopify/themekit)
-- [imdario/zas](https://github.com/imdario/zas)
-- [matcornic/hermes](https://github.com/matcornic/hermes)
-- [OpenBazaar/openbazaar-go](https://github.com/OpenBazaar/openbazaar-go)
-- [kataras/iris](https://github.com/kataras/iris)
-- [michaelsauter/crane](https://github.com/michaelsauter/crane)
-- [go-task/task](https://github.com/go-task/task)
-- [sensu/uchiwa](https://github.com/sensu/uchiwa)
-- [ory/hydra](https://github.com/ory/hydra)
-- [sisatech/vcli](https://github.com/sisatech/vcli)
-- [dairycart/dairycart](https://github.com/dairycart/dairycart)
-- [projectcalico/felix](https://github.com/projectcalico/felix)
-- [resin-os/balena](https://github.com/resin-os/balena)
-- [go-kivik/kivik](https://github.com/go-kivik/kivik)
-- [Telefonica/govice](https://github.com/Telefonica/govice)
-- [supergiant/supergiant](supergiant/supergiant)
-- [SergeyTsalkov/brooce](https://github.com/SergeyTsalkov/brooce)
-- [soniah/dnsmadeeasy](https://github.com/soniah/dnsmadeeasy)
-- [ohsu-comp-bio/funnel](https://github.com/ohsu-comp-bio/funnel)
-- [EagerIO/Stout](https://github.com/EagerIO/Stout)
-- [lynndylanhurley/defsynth-api](https://github.com/lynndylanhurley/defsynth-api)
-- [russross/canvasassignments](https://github.com/russross/canvasassignments)
-- [rdegges/cryptly-api](https://github.com/rdegges/cryptly-api)
-- [casualjim/exeggutor](https://github.com/casualjim/exeggutor)
-- [divshot/gitling](https://github.com/divshot/gitling)
-- [RWJMurphy/gorl](https://github.com/RWJMurphy/gorl)
-- [andrerocker/deploy42](https://github.com/andrerocker/deploy42)
-- [elwinar/rambler](https://github.com/elwinar/rambler)
-- [tmaiaroto/gopartman](https://github.com/tmaiaroto/gopartman)
-- [jfbus/impressionist](https://github.com/jfbus/impressionist)
-- [Jmeyering/zealot](https://github.com/Jmeyering/zealot)
-- [godep-migrator/rigger-host](https://github.com/godep-migrator/rigger-host)
-- [Dronevery/MultiwaySwitch-Go](https://github.com/Dronevery/MultiwaySwitch-Go)
-- [thoas/picfit](https://github.com/thoas/picfit)
-- [mantasmatelis/whooplist-server](https://github.com/mantasmatelis/whooplist-server)
-- [jnuthong/item_search](https://github.com/jnuthong/item_search)
-- [bukalapak/snowboard](https://github.com/bukalapak/snowboard)
-- [containerssh/containerssh](https://github.com/containerssh/containerssh)
-- [goreleaser/goreleaser](https://github.com/goreleaser/goreleaser)
-- [tjpnz/structbot](https://github.com/tjpnz/structbot)
-
-## Install
-
- go get github.com/imdario/mergo
-
- // use in your .go code
- import (
- "github.com/imdario/mergo"
- )
-
-## Usage
-
-You can only merge same-type structs with exported fields initialized as zero value of their type and same-types maps. Mergo won't merge unexported (private) fields but will do recursively any exported one. It won't merge empty structs value as [they are zero values](https://golang.org/ref/spec#The_zero_value) too. Also, maps will be merged recursively except for structs inside maps (because they are not addressable using Go reflection).
-
-```go
-if err := mergo.Merge(&dst, src); err != nil {
- // ...
-}
-```
-
-Also, you can merge overwriting values using the transformer `WithOverride`.
-
-```go
-if err := mergo.Merge(&dst, src, mergo.WithOverride); err != nil {
- // ...
-}
-```
-
-Additionally, you can map a `map[string]interface{}` to a struct (and otherwise, from struct to map), following the same restrictions as in `Merge()`. Keys are capitalized to find each corresponding exported field.
-
-```go
-if err := mergo.Map(&dst, srcMap); err != nil {
- // ...
-}
-```
-
-Warning: if you map a struct to map, it won't do it recursively. Don't expect Mergo to map struct members of your struct as `map[string]interface{}`. They will be just assigned as values.
-
-Here is a nice example:
-
-```go
-package main
-
-import (
- "fmt"
- "github.com/imdario/mergo"
-)
-
-type Foo struct {
- A string
- B int64
-}
-
-func main() {
- src := Foo{
- A: "one",
- B: 2,
- }
- dest := Foo{
- A: "two",
- }
- mergo.Merge(&dest, src)
- fmt.Println(dest)
- // Will print
- // {two 2}
-}
-```
-
-Note: if test are failing due missing package, please execute:
-
- go get gopkg.in/yaml.v3
-
-### Transformers
-
-Transformers allow to merge specific types differently than in the default behavior. In other words, now you can customize how some types are merged. For example, `time.Time` is a struct; it doesn't have zero value but IsZero can return true because it has fields with zero value. How can we merge a non-zero `time.Time`?
-
-```go
-package main
-
-import (
- "fmt"
- "github.com/imdario/mergo"
- "reflect"
- "time"
-)
-
-type timeTransformer struct {
-}
-
-func (t timeTransformer) Transformer(typ reflect.Type) func(dst, src reflect.Value) error {
- if typ == reflect.TypeOf(time.Time{}) {
- return func(dst, src reflect.Value) error {
- if dst.CanSet() {
- isZero := dst.MethodByName("IsZero")
- result := isZero.Call([]reflect.Value{})
- if result[0].Bool() {
- dst.Set(src)
- }
- }
- return nil
- }
- }
- return nil
-}
-
-type Snapshot struct {
- Time time.Time
- // ...
-}
-
-func main() {
- src := Snapshot{time.Now()}
- dest := Snapshot{}
- mergo.Merge(&dest, src, mergo.WithTransformers(timeTransformer{}))
- fmt.Println(dest)
- // Will print
- // { 2018-01-12 01:15:00 +0000 UTC m=+0.000000001 }
-}
-```
-
-## Contact me
-
-If I can help you, you have an idea or you are using Mergo in your projects, don't hesitate to drop me a line (or a pull request): [@im_dario](https://twitter.com/im_dario)
-
-## About
-
-Written by [Dario Castañé](http://dario.im).
-
-## License
-
-[BSD 3-Clause](http://opensource.org/licenses/BSD-3-Clause) license, as [Go language](http://golang.org/LICENSE).
-
-
-[](https://app.fossa.io/projects/git%2Bgithub.com%2Fimdario%2Fmergo?ref=badge_large)
diff --git a/vendor/github.com/imdario/mergo/doc.go b/vendor/github.com/imdario/mergo/doc.go
deleted file mode 100644
index fcd985f995d..00000000000
--- a/vendor/github.com/imdario/mergo/doc.go
+++ /dev/null
@@ -1,143 +0,0 @@
-// Copyright 2013 Dario Castañé. All rights reserved.
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-/*
-A helper to merge structs and maps in Golang. Useful for configuration default values, avoiding messy if-statements.
-
-Mergo merges same-type structs and maps by setting default values in zero-value fields. Mergo won't merge unexported (private) fields. It will do recursively any exported one. It also won't merge structs inside maps (because they are not addressable using Go reflection).
-
-Status
-
-It is ready for production use. It is used in several projects by Docker, Google, The Linux Foundation, VMWare, Shopify, etc.
-
-Important note
-
-Please keep in mind that a problematic PR broke 0.3.9. We reverted it in 0.3.10. We consider 0.3.10 as stable but not bug-free. . Also, this version adds suppot for go modules.
-
-Keep in mind that in 0.3.2, Mergo changed Merge() and Map() signatures to support transformers. We added an optional/variadic argument so that it won't break the existing code.
-
-If you were using Mergo before April 6th, 2015, please check your project works as intended after updating your local copy with go get -u github.com/imdario/mergo. I apologize for any issue caused by its previous behavior and any future bug that Mergo could cause in existing projects after the change (release 0.2.0).
-
-Install
-
-Do your usual installation procedure:
-
- go get github.com/imdario/mergo
-
- // use in your .go code
- import (
- "github.com/imdario/mergo"
- )
-
-Usage
-
-You can only merge same-type structs with exported fields initialized as zero value of their type and same-types maps. Mergo won't merge unexported (private) fields but will do recursively any exported one. It won't merge empty structs value as they are zero values too. Also, maps will be merged recursively except for structs inside maps (because they are not addressable using Go reflection).
-
- if err := mergo.Merge(&dst, src); err != nil {
- // ...
- }
-
-Also, you can merge overwriting values using the transformer WithOverride.
-
- if err := mergo.Merge(&dst, src, mergo.WithOverride); err != nil {
- // ...
- }
-
-Additionally, you can map a map[string]interface{} to a struct (and otherwise, from struct to map), following the same restrictions as in Merge(). Keys are capitalized to find each corresponding exported field.
-
- if err := mergo.Map(&dst, srcMap); err != nil {
- // ...
- }
-
-Warning: if you map a struct to map, it won't do it recursively. Don't expect Mergo to map struct members of your struct as map[string]interface{}. They will be just assigned as values.
-
-Here is a nice example:
-
- package main
-
- import (
- "fmt"
- "github.com/imdario/mergo"
- )
-
- type Foo struct {
- A string
- B int64
- }
-
- func main() {
- src := Foo{
- A: "one",
- B: 2,
- }
- dest := Foo{
- A: "two",
- }
- mergo.Merge(&dest, src)
- fmt.Println(dest)
- // Will print
- // {two 2}
- }
-
-Transformers
-
-Transformers allow to merge specific types differently than in the default behavior. In other words, now you can customize how some types are merged. For example, time.Time is a struct; it doesn't have zero value but IsZero can return true because it has fields with zero value. How can we merge a non-zero time.Time?
-
- package main
-
- import (
- "fmt"
- "github.com/imdario/mergo"
- "reflect"
- "time"
- )
-
- type timeTransformer struct {
- }
-
- func (t timeTransformer) Transformer(typ reflect.Type) func(dst, src reflect.Value) error {
- if typ == reflect.TypeOf(time.Time{}) {
- return func(dst, src reflect.Value) error {
- if dst.CanSet() {
- isZero := dst.MethodByName("IsZero")
- result := isZero.Call([]reflect.Value{})
- if result[0].Bool() {
- dst.Set(src)
- }
- }
- return nil
- }
- }
- return nil
- }
-
- type Snapshot struct {
- Time time.Time
- // ...
- }
-
- func main() {
- src := Snapshot{time.Now()}
- dest := Snapshot{}
- mergo.Merge(&dest, src, mergo.WithTransformers(timeTransformer{}))
- fmt.Println(dest)
- // Will print
- // { 2018-01-12 01:15:00 +0000 UTC m=+0.000000001 }
- }
-
-Contact me
-
-If I can help you, you have an idea or you are using Mergo in your projects, don't hesitate to drop me a line (or a pull request): https://twitter.com/im_dario
-
-About
-
-Written by Dario Castañé: https://da.rio.hn
-
-License
-
-BSD 3-Clause license, as Go language.
-
-*/
-package mergo
diff --git a/vendor/github.com/imdario/mergo/map.go b/vendor/github.com/imdario/mergo/map.go
deleted file mode 100644
index a13a7ee46c7..00000000000
--- a/vendor/github.com/imdario/mergo/map.go
+++ /dev/null
@@ -1,178 +0,0 @@
-// Copyright 2014 Dario Castañé. All rights reserved.
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Based on src/pkg/reflect/deepequal.go from official
-// golang's stdlib.
-
-package mergo
-
-import (
- "fmt"
- "reflect"
- "unicode"
- "unicode/utf8"
-)
-
-func changeInitialCase(s string, mapper func(rune) rune) string {
- if s == "" {
- return s
- }
- r, n := utf8.DecodeRuneInString(s)
- return string(mapper(r)) + s[n:]
-}
-
-func isExported(field reflect.StructField) bool {
- r, _ := utf8.DecodeRuneInString(field.Name)
- return r >= 'A' && r <= 'Z'
-}
-
-// Traverses recursively both values, assigning src's fields values to dst.
-// The map argument tracks comparisons that have already been seen, which allows
-// short circuiting on recursive types.
-func deepMap(dst, src reflect.Value, visited map[uintptr]*visit, depth int, config *Config) (err error) {
- overwrite := config.Overwrite
- if dst.CanAddr() {
- addr := dst.UnsafeAddr()
- h := 17 * addr
- seen := visited[h]
- typ := dst.Type()
- for p := seen; p != nil; p = p.next {
- if p.ptr == addr && p.typ == typ {
- return nil
- }
- }
- // Remember, remember...
- visited[h] = &visit{addr, typ, seen}
- }
- zeroValue := reflect.Value{}
- switch dst.Kind() {
- case reflect.Map:
- dstMap := dst.Interface().(map[string]interface{})
- for i, n := 0, src.NumField(); i < n; i++ {
- srcType := src.Type()
- field := srcType.Field(i)
- if !isExported(field) {
- continue
- }
- fieldName := field.Name
- fieldName = changeInitialCase(fieldName, unicode.ToLower)
- if v, ok := dstMap[fieldName]; !ok || (isEmptyValue(reflect.ValueOf(v)) || overwrite) {
- dstMap[fieldName] = src.Field(i).Interface()
- }
- }
- case reflect.Ptr:
- if dst.IsNil() {
- v := reflect.New(dst.Type().Elem())
- dst.Set(v)
- }
- dst = dst.Elem()
- fallthrough
- case reflect.Struct:
- srcMap := src.Interface().(map[string]interface{})
- for key := range srcMap {
- config.overwriteWithEmptyValue = true
- srcValue := srcMap[key]
- fieldName := changeInitialCase(key, unicode.ToUpper)
- dstElement := dst.FieldByName(fieldName)
- if dstElement == zeroValue {
- // We discard it because the field doesn't exist.
- continue
- }
- srcElement := reflect.ValueOf(srcValue)
- dstKind := dstElement.Kind()
- srcKind := srcElement.Kind()
- if srcKind == reflect.Ptr && dstKind != reflect.Ptr {
- srcElement = srcElement.Elem()
- srcKind = reflect.TypeOf(srcElement.Interface()).Kind()
- } else if dstKind == reflect.Ptr {
- // Can this work? I guess it can't.
- if srcKind != reflect.Ptr && srcElement.CanAddr() {
- srcPtr := srcElement.Addr()
- srcElement = reflect.ValueOf(srcPtr)
- srcKind = reflect.Ptr
- }
- }
-
- if !srcElement.IsValid() {
- continue
- }
- if srcKind == dstKind {
- if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil {
- return
- }
- } else if dstKind == reflect.Interface && dstElement.Kind() == reflect.Interface {
- if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil {
- return
- }
- } else if srcKind == reflect.Map {
- if err = deepMap(dstElement, srcElement, visited, depth+1, config); err != nil {
- return
- }
- } else {
- return fmt.Errorf("type mismatch on %s field: found %v, expected %v", fieldName, srcKind, dstKind)
- }
- }
- }
- return
-}
-
-// Map sets fields' values in dst from src.
-// src can be a map with string keys or a struct. dst must be the opposite:
-// if src is a map, dst must be a valid pointer to struct. If src is a struct,
-// dst must be map[string]interface{}.
-// It won't merge unexported (private) fields and will do recursively
-// any exported field.
-// If dst is a map, keys will be src fields' names in lower camel case.
-// Missing key in src that doesn't match a field in dst will be skipped. This
-// doesn't apply if dst is a map.
-// This is separated method from Merge because it is cleaner and it keeps sane
-// semantics: merging equal types, mapping different (restricted) types.
-func Map(dst, src interface{}, opts ...func(*Config)) error {
- return _map(dst, src, opts...)
-}
-
-// MapWithOverwrite will do the same as Map except that non-empty dst attributes will be overridden by
-// non-empty src attribute values.
-// Deprecated: Use Map(…) with WithOverride
-func MapWithOverwrite(dst, src interface{}, opts ...func(*Config)) error {
- return _map(dst, src, append(opts, WithOverride)...)
-}
-
-func _map(dst, src interface{}, opts ...func(*Config)) error {
- if dst != nil && reflect.ValueOf(dst).Kind() != reflect.Ptr {
- return ErrNonPointerAgument
- }
- var (
- vDst, vSrc reflect.Value
- err error
- )
- config := &Config{}
-
- for _, opt := range opts {
- opt(config)
- }
-
- if vDst, vSrc, err = resolveValues(dst, src); err != nil {
- return err
- }
- // To be friction-less, we redirect equal-type arguments
- // to deepMerge. Only because arguments can be anything.
- if vSrc.Kind() == vDst.Kind() {
- return deepMerge(vDst, vSrc, make(map[uintptr]*visit), 0, config)
- }
- switch vSrc.Kind() {
- case reflect.Struct:
- if vDst.Kind() != reflect.Map {
- return ErrExpectedMapAsDestination
- }
- case reflect.Map:
- if vDst.Kind() != reflect.Struct {
- return ErrExpectedStructAsDestination
- }
- default:
- return ErrNotSupported
- }
- return deepMap(vDst, vSrc, make(map[uintptr]*visit), 0, config)
-}
diff --git a/vendor/github.com/imdario/mergo/merge.go b/vendor/github.com/imdario/mergo/merge.go
deleted file mode 100644
index 8b4e2f47a08..00000000000
--- a/vendor/github.com/imdario/mergo/merge.go
+++ /dev/null
@@ -1,380 +0,0 @@
-// Copyright 2013 Dario Castañé. All rights reserved.
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Based on src/pkg/reflect/deepequal.go from official
-// golang's stdlib.
-
-package mergo
-
-import (
- "fmt"
- "reflect"
-)
-
-func hasMergeableFields(dst reflect.Value) (exported bool) {
- for i, n := 0, dst.NumField(); i < n; i++ {
- field := dst.Type().Field(i)
- if field.Anonymous && dst.Field(i).Kind() == reflect.Struct {
- exported = exported || hasMergeableFields(dst.Field(i))
- } else if isExportedComponent(&field) {
- exported = exported || len(field.PkgPath) == 0
- }
- }
- return
-}
-
-func isExportedComponent(field *reflect.StructField) bool {
- pkgPath := field.PkgPath
- if len(pkgPath) > 0 {
- return false
- }
- c := field.Name[0]
- if 'a' <= c && c <= 'z' || c == '_' {
- return false
- }
- return true
-}
-
-type Config struct {
- Overwrite bool
- AppendSlice bool
- TypeCheck bool
- Transformers Transformers
- overwriteWithEmptyValue bool
- overwriteSliceWithEmptyValue bool
- sliceDeepCopy bool
- debug bool
-}
-
-type Transformers interface {
- Transformer(reflect.Type) func(dst, src reflect.Value) error
-}
-
-// Traverses recursively both values, assigning src's fields values to dst.
-// The map argument tracks comparisons that have already been seen, which allows
-// short circuiting on recursive types.
-func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, config *Config) (err error) {
- overwrite := config.Overwrite
- typeCheck := config.TypeCheck
- overwriteWithEmptySrc := config.overwriteWithEmptyValue
- overwriteSliceWithEmptySrc := config.overwriteSliceWithEmptyValue
- sliceDeepCopy := config.sliceDeepCopy
-
- if !src.IsValid() {
- return
- }
- if dst.CanAddr() {
- addr := dst.UnsafeAddr()
- h := 17 * addr
- seen := visited[h]
- typ := dst.Type()
- for p := seen; p != nil; p = p.next {
- if p.ptr == addr && p.typ == typ {
- return nil
- }
- }
- // Remember, remember...
- visited[h] = &visit{addr, typ, seen}
- }
-
- if config.Transformers != nil && !isReflectNil(dst) && dst.IsValid() {
- if fn := config.Transformers.Transformer(dst.Type()); fn != nil {
- err = fn(dst, src)
- return
- }
- }
-
- switch dst.Kind() {
- case reflect.Struct:
- if hasMergeableFields(dst) {
- for i, n := 0, dst.NumField(); i < n; i++ {
- if err = deepMerge(dst.Field(i), src.Field(i), visited, depth+1, config); err != nil {
- return
- }
- }
- } else {
- if dst.CanSet() && (isReflectNil(dst) || overwrite) && (!isEmptyValue(src) || overwriteWithEmptySrc) {
- dst.Set(src)
- }
- }
- case reflect.Map:
- if dst.IsNil() && !src.IsNil() {
- if dst.CanSet() {
- dst.Set(reflect.MakeMap(dst.Type()))
- } else {
- dst = src
- return
- }
- }
-
- if src.Kind() != reflect.Map {
- if overwrite {
- dst.Set(src)
- }
- return
- }
-
- for _, key := range src.MapKeys() {
- srcElement := src.MapIndex(key)
- if !srcElement.IsValid() {
- continue
- }
- dstElement := dst.MapIndex(key)
- switch srcElement.Kind() {
- case reflect.Chan, reflect.Func, reflect.Map, reflect.Interface, reflect.Slice:
- if srcElement.IsNil() {
- if overwrite {
- dst.SetMapIndex(key, srcElement)
- }
- continue
- }
- fallthrough
- default:
- if !srcElement.CanInterface() {
- continue
- }
- switch reflect.TypeOf(srcElement.Interface()).Kind() {
- case reflect.Struct:
- fallthrough
- case reflect.Ptr:
- fallthrough
- case reflect.Map:
- srcMapElm := srcElement
- dstMapElm := dstElement
- if srcMapElm.CanInterface() {
- srcMapElm = reflect.ValueOf(srcMapElm.Interface())
- if dstMapElm.IsValid() {
- dstMapElm = reflect.ValueOf(dstMapElm.Interface())
- }
- }
- if err = deepMerge(dstMapElm, srcMapElm, visited, depth+1, config); err != nil {
- return
- }
- case reflect.Slice:
- srcSlice := reflect.ValueOf(srcElement.Interface())
-
- var dstSlice reflect.Value
- if !dstElement.IsValid() || dstElement.IsNil() {
- dstSlice = reflect.MakeSlice(srcSlice.Type(), 0, srcSlice.Len())
- } else {
- dstSlice = reflect.ValueOf(dstElement.Interface())
- }
-
- if (!isEmptyValue(src) || overwriteWithEmptySrc || overwriteSliceWithEmptySrc) && (overwrite || isEmptyValue(dst)) && !config.AppendSlice && !sliceDeepCopy {
- if typeCheck && srcSlice.Type() != dstSlice.Type() {
- return fmt.Errorf("cannot override two slices with different type (%s, %s)", srcSlice.Type(), dstSlice.Type())
- }
- dstSlice = srcSlice
- } else if config.AppendSlice {
- if srcSlice.Type() != dstSlice.Type() {
- return fmt.Errorf("cannot append two slices with different type (%s, %s)", srcSlice.Type(), dstSlice.Type())
- }
- dstSlice = reflect.AppendSlice(dstSlice, srcSlice)
- } else if sliceDeepCopy {
- i := 0
- for ; i < srcSlice.Len() && i < dstSlice.Len(); i++ {
- srcElement := srcSlice.Index(i)
- dstElement := dstSlice.Index(i)
-
- if srcElement.CanInterface() {
- srcElement = reflect.ValueOf(srcElement.Interface())
- }
- if dstElement.CanInterface() {
- dstElement = reflect.ValueOf(dstElement.Interface())
- }
-
- if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil {
- return
- }
- }
-
- }
- dst.SetMapIndex(key, dstSlice)
- }
- }
- if dstElement.IsValid() && !isEmptyValue(dstElement) && (reflect.TypeOf(srcElement.Interface()).Kind() == reflect.Map || reflect.TypeOf(srcElement.Interface()).Kind() == reflect.Slice) {
- continue
- }
-
- if srcElement.IsValid() && ((srcElement.Kind() != reflect.Ptr && overwrite) || !dstElement.IsValid() || isEmptyValue(dstElement)) {
- if dst.IsNil() {
- dst.Set(reflect.MakeMap(dst.Type()))
- }
- dst.SetMapIndex(key, srcElement)
- }
- }
- case reflect.Slice:
- if !dst.CanSet() {
- break
- }
- if (!isEmptyValue(src) || overwriteWithEmptySrc || overwriteSliceWithEmptySrc) && (overwrite || isEmptyValue(dst)) && !config.AppendSlice && !sliceDeepCopy {
- dst.Set(src)
- } else if config.AppendSlice {
- if src.Type() != dst.Type() {
- return fmt.Errorf("cannot append two slice with different type (%s, %s)", src.Type(), dst.Type())
- }
- dst.Set(reflect.AppendSlice(dst, src))
- } else if sliceDeepCopy {
- for i := 0; i < src.Len() && i < dst.Len(); i++ {
- srcElement := src.Index(i)
- dstElement := dst.Index(i)
- if srcElement.CanInterface() {
- srcElement = reflect.ValueOf(srcElement.Interface())
- }
- if dstElement.CanInterface() {
- dstElement = reflect.ValueOf(dstElement.Interface())
- }
-
- if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil {
- return
- }
- }
- }
- case reflect.Ptr:
- fallthrough
- case reflect.Interface:
- if isReflectNil(src) {
- if overwriteWithEmptySrc && dst.CanSet() && src.Type().AssignableTo(dst.Type()) {
- dst.Set(src)
- }
- break
- }
-
- if src.Kind() != reflect.Interface {
- if dst.IsNil() || (src.Kind() != reflect.Ptr && overwrite) {
- if dst.CanSet() && (overwrite || isEmptyValue(dst)) {
- dst.Set(src)
- }
- } else if src.Kind() == reflect.Ptr {
- if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, config); err != nil {
- return
- }
- } else if dst.Elem().Type() == src.Type() {
- if err = deepMerge(dst.Elem(), src, visited, depth+1, config); err != nil {
- return
- }
- } else {
- return ErrDifferentArgumentsTypes
- }
- break
- }
-
- if dst.IsNil() || overwrite {
- if dst.CanSet() && (overwrite || isEmptyValue(dst)) {
- dst.Set(src)
- }
- break
- }
-
- if dst.Elem().Kind() == src.Elem().Kind() {
- if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, config); err != nil {
- return
- }
- break
- }
- default:
- mustSet := (isEmptyValue(dst) || overwrite) && (!isEmptyValue(src) || overwriteWithEmptySrc)
- if mustSet {
- if dst.CanSet() {
- dst.Set(src)
- } else {
- dst = src
- }
- }
- }
-
- return
-}
-
-// Merge will fill any empty for value type attributes on the dst struct using corresponding
-// src attributes if they themselves are not empty. dst and src must be valid same-type structs
-// and dst must be a pointer to struct.
-// It won't merge unexported (private) fields and will do recursively any exported field.
-func Merge(dst, src interface{}, opts ...func(*Config)) error {
- return merge(dst, src, opts...)
-}
-
-// MergeWithOverwrite will do the same as Merge except that non-empty dst attributes will be overridden by
-// non-empty src attribute values.
-// Deprecated: use Merge(…) with WithOverride
-func MergeWithOverwrite(dst, src interface{}, opts ...func(*Config)) error {
- return merge(dst, src, append(opts, WithOverride)...)
-}
-
-// WithTransformers adds transformers to merge, allowing to customize the merging of some types.
-func WithTransformers(transformers Transformers) func(*Config) {
- return func(config *Config) {
- config.Transformers = transformers
- }
-}
-
-// WithOverride will make merge override non-empty dst attributes with non-empty src attributes values.
-func WithOverride(config *Config) {
- config.Overwrite = true
-}
-
-// WithOverwriteWithEmptyValue will make merge override non empty dst attributes with empty src attributes values.
-func WithOverwriteWithEmptyValue(config *Config) {
- config.Overwrite = true
- config.overwriteWithEmptyValue = true
-}
-
-// WithOverrideEmptySlice will make merge override empty dst slice with empty src slice.
-func WithOverrideEmptySlice(config *Config) {
- config.overwriteSliceWithEmptyValue = true
-}
-
-// WithAppendSlice will make merge append slices instead of overwriting it.
-func WithAppendSlice(config *Config) {
- config.AppendSlice = true
-}
-
-// WithTypeCheck will make merge check types while overwriting it (must be used with WithOverride).
-func WithTypeCheck(config *Config) {
- config.TypeCheck = true
-}
-
-// WithSliceDeepCopy will merge slice element one by one with Overwrite flag.
-func WithSliceDeepCopy(config *Config) {
- config.sliceDeepCopy = true
- config.Overwrite = true
-}
-
-func merge(dst, src interface{}, opts ...func(*Config)) error {
- if dst != nil && reflect.ValueOf(dst).Kind() != reflect.Ptr {
- return ErrNonPointerAgument
- }
- var (
- vDst, vSrc reflect.Value
- err error
- )
-
- config := &Config{}
-
- for _, opt := range opts {
- opt(config)
- }
-
- if vDst, vSrc, err = resolveValues(dst, src); err != nil {
- return err
- }
- if vDst.Type() != vSrc.Type() {
- return ErrDifferentArgumentsTypes
- }
- return deepMerge(vDst, vSrc, make(map[uintptr]*visit), 0, config)
-}
-
-// IsReflectNil is the reflect value provided nil
-func isReflectNil(v reflect.Value) bool {
- k := v.Kind()
- switch k {
- case reflect.Interface, reflect.Slice, reflect.Chan, reflect.Func, reflect.Map, reflect.Ptr:
- // Both interface and slice are nil if first word is 0.
- // Both are always bigger than a word; assume flagIndir.
- return v.IsNil()
- default:
- return false
- }
-}
diff --git a/vendor/github.com/imdario/mergo/mergo.go b/vendor/github.com/imdario/mergo/mergo.go
deleted file mode 100644
index 9fe362d476a..00000000000
--- a/vendor/github.com/imdario/mergo/mergo.go
+++ /dev/null
@@ -1,78 +0,0 @@
-// Copyright 2013 Dario Castañé. All rights reserved.
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Based on src/pkg/reflect/deepequal.go from official
-// golang's stdlib.
-
-package mergo
-
-import (
- "errors"
- "reflect"
-)
-
-// Errors reported by Mergo when it finds invalid arguments.
-var (
- ErrNilArguments = errors.New("src and dst must not be nil")
- ErrDifferentArgumentsTypes = errors.New("src and dst must be of same type")
- ErrNotSupported = errors.New("only structs, maps, and slices are supported")
- ErrExpectedMapAsDestination = errors.New("dst was expected to be a map")
- ErrExpectedStructAsDestination = errors.New("dst was expected to be a struct")
- ErrNonPointerAgument = errors.New("dst must be a pointer")
-)
-
-// During deepMerge, must keep track of checks that are
-// in progress. The comparison algorithm assumes that all
-// checks in progress are true when it reencounters them.
-// Visited are stored in a map indexed by 17 * a1 + a2;
-type visit struct {
- ptr uintptr
- typ reflect.Type
- next *visit
-}
-
-// From src/pkg/encoding/json/encode.go.
-func isEmptyValue(v reflect.Value) bool {
- switch v.Kind() {
- case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
- return v.Len() == 0
- case reflect.Bool:
- return !v.Bool()
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- return v.Int() == 0
- case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
- return v.Uint() == 0
- case reflect.Float32, reflect.Float64:
- return v.Float() == 0
- case reflect.Interface, reflect.Ptr:
- if v.IsNil() {
- return true
- }
- return isEmptyValue(v.Elem())
- case reflect.Func:
- return v.IsNil()
- case reflect.Invalid:
- return true
- }
- return false
-}
-
-func resolveValues(dst, src interface{}) (vDst, vSrc reflect.Value, err error) {
- if dst == nil || src == nil {
- err = ErrNilArguments
- return
- }
- vDst = reflect.ValueOf(dst).Elem()
- if vDst.Kind() != reflect.Struct && vDst.Kind() != reflect.Map && vDst.Kind() != reflect.Slice {
- err = ErrNotSupported
- return
- }
- vSrc = reflect.ValueOf(src)
- // We check if vSrc is a pointer to dereference it.
- if vSrc.Kind() == reflect.Ptr {
- vSrc = vSrc.Elem()
- }
- return
-}
diff --git a/vendor/github.com/jbenet/go-context/LICENSE b/vendor/github.com/jbenet/go-context/LICENSE
deleted file mode 100644
index c7386b3c940..00000000000
--- a/vendor/github.com/jbenet/go-context/LICENSE
+++ /dev/null
@@ -1,21 +0,0 @@
-The MIT License (MIT)
-
-Copyright (c) 2014 Juan Batiz-Benet
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
diff --git a/vendor/github.com/jbenet/go-context/io/ctxio.go b/vendor/github.com/jbenet/go-context/io/ctxio.go
deleted file mode 100644
index b4f2454235a..00000000000
--- a/vendor/github.com/jbenet/go-context/io/ctxio.go
+++ /dev/null
@@ -1,120 +0,0 @@
-// Package ctxio provides io.Reader and io.Writer wrappers that
-// respect context.Contexts. Use these at the interface between
-// your context code and your io.
-//
-// WARNING: read the code. see how writes and reads will continue
-// until you cancel the io. Maybe this package should provide
-// versions of io.ReadCloser and io.WriteCloser that automatically
-// call .Close when the context expires. But for now -- since in my
-// use cases I have long-lived connections with ephemeral io wrappers
-// -- this has yet to be a need.
-package ctxio
-
-import (
- "io"
-
- context "golang.org/x/net/context"
-)
-
-type ioret struct {
- n int
- err error
-}
-
-type Writer interface {
- io.Writer
-}
-
-type ctxWriter struct {
- w io.Writer
- ctx context.Context
-}
-
-// NewWriter wraps a writer to make it respect given Context.
-// If there is a blocking write, the returned Writer will return
-// whenever the context is cancelled (the return values are n=0
-// and err=ctx.Err().)
-//
-// Note well: this wrapper DOES NOT ACTUALLY cancel the underlying
-// write-- there is no way to do that with the standard go io
-// interface. So the read and write _will_ happen or hang. So, use
-// this sparingly, make sure to cancel the read or write as necesary
-// (e.g. closing a connection whose context is up, etc.)
-//
-// Furthermore, in order to protect your memory from being read
-// _after_ you've cancelled the context, this io.Writer will
-// first make a **copy** of the buffer.
-func NewWriter(ctx context.Context, w io.Writer) *ctxWriter {
- if ctx == nil {
- ctx = context.Background()
- }
- return &ctxWriter{ctx: ctx, w: w}
-}
-
-func (w *ctxWriter) Write(buf []byte) (int, error) {
- buf2 := make([]byte, len(buf))
- copy(buf2, buf)
-
- c := make(chan ioret, 1)
-
- go func() {
- n, err := w.w.Write(buf2)
- c <- ioret{n, err}
- close(c)
- }()
-
- select {
- case r := <-c:
- return r.n, r.err
- case <-w.ctx.Done():
- return 0, w.ctx.Err()
- }
-}
-
-type Reader interface {
- io.Reader
-}
-
-type ctxReader struct {
- r io.Reader
- ctx context.Context
-}
-
-// NewReader wraps a reader to make it respect given Context.
-// If there is a blocking read, the returned Reader will return
-// whenever the context is cancelled (the return values are n=0
-// and err=ctx.Err().)
-//
-// Note well: this wrapper DOES NOT ACTUALLY cancel the underlying
-// write-- there is no way to do that with the standard go io
-// interface. So the read and write _will_ happen or hang. So, use
-// this sparingly, make sure to cancel the read or write as necesary
-// (e.g. closing a connection whose context is up, etc.)
-//
-// Furthermore, in order to protect your memory from being read
-// _before_ you've cancelled the context, this io.Reader will
-// allocate a buffer of the same size, and **copy** into the client's
-// if the read succeeds in time.
-func NewReader(ctx context.Context, r io.Reader) *ctxReader {
- return &ctxReader{ctx: ctx, r: r}
-}
-
-func (r *ctxReader) Read(buf []byte) (int, error) {
- buf2 := make([]byte, len(buf))
-
- c := make(chan ioret, 1)
-
- go func() {
- n, err := r.r.Read(buf2)
- c <- ioret{n, err}
- close(c)
- }()
-
- select {
- case ret := <-c:
- copy(buf, buf2)
- return ret.n, ret.err
- case <-r.ctx.Done():
- return 0, r.ctx.Err()
- }
-}
diff --git a/vendor/github.com/kevinburke/ssh_config/.gitattributes b/vendor/github.com/kevinburke/ssh_config/.gitattributes
deleted file mode 100644
index 44db5818894..00000000000
--- a/vendor/github.com/kevinburke/ssh_config/.gitattributes
+++ /dev/null
@@ -1 +0,0 @@
-testdata/dos-lines eol=crlf
diff --git a/vendor/github.com/kevinburke/ssh_config/.gitignore b/vendor/github.com/kevinburke/ssh_config/.gitignore
deleted file mode 100644
index e69de29bb2d..00000000000
diff --git a/vendor/github.com/kevinburke/ssh_config/.mailmap b/vendor/github.com/kevinburke/ssh_config/.mailmap
deleted file mode 100644
index 253406b1cc6..00000000000
--- a/vendor/github.com/kevinburke/ssh_config/.mailmap
+++ /dev/null
@@ -1 +0,0 @@
-Kevin Burke Kevin Burke
diff --git a/vendor/github.com/kevinburke/ssh_config/AUTHORS.txt b/vendor/github.com/kevinburke/ssh_config/AUTHORS.txt
deleted file mode 100644
index 311aeb1b4ff..00000000000
--- a/vendor/github.com/kevinburke/ssh_config/AUTHORS.txt
+++ /dev/null
@@ -1,9 +0,0 @@
-Carlos A Becker
-Dustin Spicuzza
-Eugene Terentev
-Kevin Burke
-Mark Nevill
-Scott Lessans
-Sergey Lukjanov
-Wayne Ashley Berry
-santosh653 <70637961+santosh653@users.noreply.github.com>
diff --git a/vendor/github.com/kevinburke/ssh_config/CHANGELOG.md b/vendor/github.com/kevinburke/ssh_config/CHANGELOG.md
deleted file mode 100644
index d32a3f5106c..00000000000
--- a/vendor/github.com/kevinburke/ssh_config/CHANGELOG.md
+++ /dev/null
@@ -1,19 +0,0 @@
-# Changes
-
-## Version 1.2
-
-Previously, if a Host declaration or a value had trailing whitespace, that
-whitespace would have been included as part of the value. This led to unexpected
-consequences. For example:
-
-```
-Host example # A comment
- HostName example.com # Another comment
-```
-
-Prior to version 1.2, the value for Host would have been "example " and the
-value for HostName would have been "example.com ". Both of these are
-unintuitive.
-
-Instead, we strip the trailing whitespace in the configuration, which leads to
-more intuitive behavior.
diff --git a/vendor/github.com/kevinburke/ssh_config/LICENSE b/vendor/github.com/kevinburke/ssh_config/LICENSE
deleted file mode 100644
index b9a770ac2a9..00000000000
--- a/vendor/github.com/kevinburke/ssh_config/LICENSE
+++ /dev/null
@@ -1,49 +0,0 @@
-Copyright (c) 2017 Kevin Burke.
-
-Permission is hereby granted, free of charge, to any person
-obtaining a copy of this software and associated documentation
-files (the "Software"), to deal in the Software without
-restriction, including without limitation the rights to use,
-copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the
-Software is furnished to do so, subject to the following
-conditions:
-
-The above copyright notice and this permission notice shall be
-included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
-OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
-HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
-WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
-OTHER DEALINGS IN THE SOFTWARE.
-
-===================
-
-The lexer and parser borrow heavily from github.com/pelletier/go-toml. The
-license for that project is copied below.
-
-The MIT License (MIT)
-
-Copyright (c) 2013 - 2017 Thomas Pelletier, Eric Anderton
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
diff --git a/vendor/github.com/kevinburke/ssh_config/Makefile b/vendor/github.com/kevinburke/ssh_config/Makefile
deleted file mode 100644
index df7ee728be6..00000000000
--- a/vendor/github.com/kevinburke/ssh_config/Makefile
+++ /dev/null
@@ -1,33 +0,0 @@
-BUMP_VERSION := $(GOPATH)/bin/bump_version
-STATICCHECK := $(GOPATH)/bin/staticcheck
-WRITE_MAILMAP := $(GOPATH)/bin/write_mailmap
-
-$(STATICCHECK):
- go get honnef.co/go/tools/cmd/staticcheck
-
-lint: $(STATICCHECK)
- go vet ./...
- $(STATICCHECK)
-
-test: lint
- @# the timeout helps guard against infinite recursion
- go test -timeout=250ms ./...
-
-race-test: lint
- go test -timeout=500ms -race ./...
-
-$(BUMP_VERSION):
- go get -u github.com/kevinburke/bump_version
-
-$(WRITE_MAILMAP):
- go get -u github.com/kevinburke/write_mailmap
-
-release: test | $(BUMP_VERSION)
- $(BUMP_VERSION) --tag-prefix=v minor config.go
-
-force: ;
-
-AUTHORS.txt: force | $(WRITE_MAILMAP)
- $(WRITE_MAILMAP) > AUTHORS.txt
-
-authors: AUTHORS.txt
diff --git a/vendor/github.com/kevinburke/ssh_config/README.md b/vendor/github.com/kevinburke/ssh_config/README.md
deleted file mode 100644
index f14b2168f75..00000000000
--- a/vendor/github.com/kevinburke/ssh_config/README.md
+++ /dev/null
@@ -1,92 +0,0 @@
-# ssh_config
-
-This is a Go parser for `ssh_config` files. Importantly, this parser attempts
-to preserve comments in a given file, so you can manipulate a `ssh_config` file
-from a program, if your heart desires.
-
-It's designed to be used with the excellent
-[x/crypto/ssh](https://golang.org/x/crypto/ssh) package, which handles SSH
-negotiation but isn't very easy to configure.
-
-The `ssh_config` `Get()` and `GetStrict()` functions will attempt to read values
-from `$HOME/.ssh/config` and fall back to `/etc/ssh/ssh_config`. The first
-argument is the host name to match on, and the second argument is the key you
-want to retrieve.
-
-```go
-port := ssh_config.Get("myhost", "Port")
-```
-
-Certain directives can occur multiple times for a host (such as `IdentityFile`),
-so you should use the `GetAll` or `GetAllStrict` directive to retrieve those
-instead.
-
-```go
-files := ssh_config.GetAll("myhost", "IdentityFile")
-```
-
-You can also load a config file and read values from it.
-
-```go
-var config = `
-Host *.test
- Compression yes
-`
-
-cfg, err := ssh_config.Decode(strings.NewReader(config))
-fmt.Println(cfg.Get("example.test", "Port"))
-```
-
-Some SSH arguments have default values - for example, the default value for
-`KeyboardAuthentication` is `"yes"`. If you call Get(), and no value for the
-given Host/keyword pair exists in the config, we'll return a default for the
-keyword if one exists.
-
-### Manipulating SSH config files
-
-Here's how you can manipulate an SSH config file, and then write it back to
-disk.
-
-```go
-f, _ := os.Open(filepath.Join(os.Getenv("HOME"), ".ssh", "config"))
-cfg, _ := ssh_config.Decode(f)
-for _, host := range cfg.Hosts {
- fmt.Println("patterns:", host.Patterns)
- for _, node := range host.Nodes {
- // Manipulate the nodes as you see fit, or use a type switch to
- // distinguish between Empty, KV, and Include nodes.
- fmt.Println(node.String())
- }
-}
-
-// Print the config to stdout:
-fmt.Println(cfg.String())
-```
-
-## Spec compliance
-
-Wherever possible we try to implement the specification as documented in
-the `ssh_config` manpage. Unimplemented features should be present in the
-[issues][issues] list.
-
-Notably, the `Match` directive is currently unsupported.
-
-[issues]: https://github.com/kevinburke/ssh_config/issues
-
-## Errata
-
-This is the second [comment-preserving configuration parser][blog] I've written, after
-[an /etc/hosts parser][hostsfile]. Eventually, I will write one for every Linux
-file format.
-
-[blog]: https://kev.inburke.com/kevin/more-comment-preserving-configuration-parsers/
-[hostsfile]: https://github.com/kevinburke/hostsfile
-
-## Donating
-
-I don't get paid to maintain this project. Donations free up time to make
-improvements to the library, and respond to bug reports. You can send donations
-via Paypal's "Send Money" feature to kev@inburke.com. Donations are not tax
-deductible in the USA.
-
-You can also reach out about a consulting engagement: https://burke.services
diff --git a/vendor/github.com/kevinburke/ssh_config/config.go b/vendor/github.com/kevinburke/ssh_config/config.go
deleted file mode 100644
index 00d815c1a92..00000000000
--- a/vendor/github.com/kevinburke/ssh_config/config.go
+++ /dev/null
@@ -1,803 +0,0 @@
-// Package ssh_config provides tools for manipulating SSH config files.
-//
-// Importantly, this parser attempts to preserve comments in a given file, so
-// you can manipulate a `ssh_config` file from a program, if your heart desires.
-//
-// The Get() and GetStrict() functions will attempt to read values from
-// $HOME/.ssh/config, falling back to /etc/ssh/ssh_config. The first argument is
-// the host name to match on ("example.com"), and the second argument is the key
-// you want to retrieve ("Port"). The keywords are case insensitive.
-//
-// port := ssh_config.Get("myhost", "Port")
-//
-// You can also manipulate an SSH config file and then print it or write it back
-// to disk.
-//
-// f, _ := os.Open(filepath.Join(os.Getenv("HOME"), ".ssh", "config"))
-// cfg, _ := ssh_config.Decode(f)
-// for _, host := range cfg.Hosts {
-// fmt.Println("patterns:", host.Patterns)
-// for _, node := range host.Nodes {
-// fmt.Println(node.String())
-// }
-// }
-//
-// // Write the cfg back to disk:
-// fmt.Println(cfg.String())
-//
-// BUG: the Match directive is currently unsupported; parsing a config with
-// a Match directive will trigger an error.
-package ssh_config
-
-import (
- "bytes"
- "errors"
- "fmt"
- "io"
- "os"
- osuser "os/user"
- "path/filepath"
- "regexp"
- "runtime"
- "strings"
- "sync"
-)
-
-const version = "1.2"
-
-var _ = version
-
-type configFinder func() string
-
-// UserSettings checks ~/.ssh and /etc/ssh for configuration files. The config
-// files are parsed and cached the first time Get() or GetStrict() is called.
-type UserSettings struct {
- IgnoreErrors bool
- systemConfig *Config
- systemConfigFinder configFinder
- userConfig *Config
- userConfigFinder configFinder
- loadConfigs sync.Once
- onceErr error
-}
-
-func homedir() string {
- user, err := osuser.Current()
- if err == nil {
- return user.HomeDir
- } else {
- return os.Getenv("HOME")
- }
-}
-
-func userConfigFinder() string {
- return filepath.Join(homedir(), ".ssh", "config")
-}
-
-// DefaultUserSettings is the default UserSettings and is used by Get and
-// GetStrict. It checks both $HOME/.ssh/config and /etc/ssh/ssh_config for keys,
-// and it will return parse errors (if any) instead of swallowing them.
-var DefaultUserSettings = &UserSettings{
- IgnoreErrors: false,
- systemConfigFinder: systemConfigFinder,
- userConfigFinder: userConfigFinder,
-}
-
-func systemConfigFinder() string {
- return filepath.Join("/", "etc", "ssh", "ssh_config")
-}
-
-func findVal(c *Config, alias, key string) (string, error) {
- if c == nil {
- return "", nil
- }
- val, err := c.Get(alias, key)
- if err != nil || val == "" {
- return "", err
- }
- if err := validate(key, val); err != nil {
- return "", err
- }
- return val, nil
-}
-
-func findAll(c *Config, alias, key string) ([]string, error) {
- if c == nil {
- return nil, nil
- }
- return c.GetAll(alias, key)
-}
-
-// Get finds the first value for key within a declaration that matches the
-// alias. Get returns the empty string if no value was found, or if IgnoreErrors
-// is false and we could not parse the configuration file. Use GetStrict to
-// disambiguate the latter cases.
-//
-// The match for key is case insensitive.
-//
-// Get is a wrapper around DefaultUserSettings.Get.
-func Get(alias, key string) string {
- return DefaultUserSettings.Get(alias, key)
-}
-
-// GetAll retrieves zero or more directives for key for the given alias. GetAll
-// returns nil if no value was found, or if IgnoreErrors is false and we could
-// not parse the configuration file. Use GetAllStrict to disambiguate the
-// latter cases.
-//
-// In most cases you want to use Get or GetStrict, which returns a single value.
-// However, a subset of ssh configuration values (IdentityFile, for example)
-// allow you to specify multiple directives.
-//
-// The match for key is case insensitive.
-//
-// GetAll is a wrapper around DefaultUserSettings.GetAll.
-func GetAll(alias, key string) []string {
- return DefaultUserSettings.GetAll(alias, key)
-}
-
-// GetStrict finds the first value for key within a declaration that matches the
-// alias. If key has a default value and no matching configuration is found, the
-// default will be returned. For more information on default values and the way
-// patterns are matched, see the manpage for ssh_config.
-//
-// The returned error will be non-nil if and only if a user's configuration file
-// or the system configuration file could not be parsed, and u.IgnoreErrors is
-// false.
-//
-// GetStrict is a wrapper around DefaultUserSettings.GetStrict.
-func GetStrict(alias, key string) (string, error) {
- return DefaultUserSettings.GetStrict(alias, key)
-}
-
-// GetAllStrict retrieves zero or more directives for key for the given alias.
-//
-// In most cases you want to use Get or GetStrict, which returns a single value.
-// However, a subset of ssh configuration values (IdentityFile, for example)
-// allow you to specify multiple directives.
-//
-// The returned error will be non-nil if and only if a user's configuration file
-// or the system configuration file could not be parsed, and u.IgnoreErrors is
-// false.
-//
-// GetAllStrict is a wrapper around DefaultUserSettings.GetAllStrict.
-func GetAllStrict(alias, key string) ([]string, error) {
- return DefaultUserSettings.GetAllStrict(alias, key)
-}
-
-// Get finds the first value for key within a declaration that matches the
-// alias. Get returns the empty string if no value was found, or if IgnoreErrors
-// is false and we could not parse the configuration file. Use GetStrict to
-// disambiguate the latter cases.
-//
-// The match for key is case insensitive.
-func (u *UserSettings) Get(alias, key string) string {
- val, err := u.GetStrict(alias, key)
- if err != nil {
- return ""
- }
- return val
-}
-
-// GetAll retrieves zero or more directives for key for the given alias. GetAll
-// returns nil if no value was found, or if IgnoreErrors is false and we could
-// not parse the configuration file. Use GetStrict to disambiguate the latter
-// cases.
-//
-// The match for key is case insensitive.
-func (u *UserSettings) GetAll(alias, key string) []string {
- val, _ := u.GetAllStrict(alias, key)
- return val
-}
-
-// GetStrict finds the first value for key within a declaration that matches the
-// alias. If key has a default value and no matching configuration is found, the
-// default will be returned. For more information on default values and the way
-// patterns are matched, see the manpage for ssh_config.
-//
-// error will be non-nil if and only if a user's configuration file or the
-// system configuration file could not be parsed, and u.IgnoreErrors is false.
-func (u *UserSettings) GetStrict(alias, key string) (string, error) {
- u.doLoadConfigs()
- //lint:ignore S1002 I prefer it this way
- if u.onceErr != nil && u.IgnoreErrors == false {
- return "", u.onceErr
- }
- val, err := findVal(u.userConfig, alias, key)
- if err != nil || val != "" {
- return val, err
- }
- val2, err2 := findVal(u.systemConfig, alias, key)
- if err2 != nil || val2 != "" {
- return val2, err2
- }
- return Default(key), nil
-}
-
-// GetAllStrict retrieves zero or more directives for key for the given alias.
-// If key has a default value and no matching configuration is found, the
-// default will be returned. For more information on default values and the way
-// patterns are matched, see the manpage for ssh_config.
-//
-// The returned error will be non-nil if and only if a user's configuration file
-// or the system configuration file could not be parsed, and u.IgnoreErrors is
-// false.
-func (u *UserSettings) GetAllStrict(alias, key string) ([]string, error) {
- u.doLoadConfigs()
- //lint:ignore S1002 I prefer it this way
- if u.onceErr != nil && u.IgnoreErrors == false {
- return nil, u.onceErr
- }
- val, err := findAll(u.userConfig, alias, key)
- if err != nil || val != nil {
- return val, err
- }
- val2, err2 := findAll(u.systemConfig, alias, key)
- if err2 != nil || val2 != nil {
- return val2, err2
- }
- // TODO: IdentityFile has multiple default values that we should return.
- if def := Default(key); def != "" {
- return []string{def}, nil
- }
- return []string{}, nil
-}
-
-func (u *UserSettings) doLoadConfigs() {
- u.loadConfigs.Do(func() {
- // can't parse user file, that's ok.
- var filename string
- if u.userConfigFinder == nil {
- filename = userConfigFinder()
- } else {
- filename = u.userConfigFinder()
- }
- var err error
- u.userConfig, err = parseFile(filename)
- //lint:ignore S1002 I prefer it this way
- if err != nil && os.IsNotExist(err) == false {
- u.onceErr = err
- return
- }
- if u.systemConfigFinder == nil {
- filename = systemConfigFinder()
- } else {
- filename = u.systemConfigFinder()
- }
- u.systemConfig, err = parseFile(filename)
- //lint:ignore S1002 I prefer it this way
- if err != nil && os.IsNotExist(err) == false {
- u.onceErr = err
- return
- }
- })
-}
-
-func parseFile(filename string) (*Config, error) {
- return parseWithDepth(filename, 0)
-}
-
-func parseWithDepth(filename string, depth uint8) (*Config, error) {
- b, err := os.ReadFile(filename)
- if err != nil {
- return nil, err
- }
- return decodeBytes(b, isSystem(filename), depth)
-}
-
-func isSystem(filename string) bool {
- // TODO: not sure this is the best way to detect a system repo
- return strings.HasPrefix(filepath.Clean(filename), "/etc/ssh")
-}
-
-// Decode reads r into a Config, or returns an error if r could not be parsed as
-// an SSH config file.
-func Decode(r io.Reader) (*Config, error) {
- b, err := io.ReadAll(r)
- if err != nil {
- return nil, err
- }
- return decodeBytes(b, false, 0)
-}
-
-// DecodeBytes reads b into a Config, or returns an error if r could not be
-// parsed as an SSH config file.
-func DecodeBytes(b []byte) (*Config, error) {
- return decodeBytes(b, false, 0)
-}
-
-func decodeBytes(b []byte, system bool, depth uint8) (c *Config, err error) {
- defer func() {
- if r := recover(); r != nil {
- if _, ok := r.(runtime.Error); ok {
- panic(r)
- }
- if e, ok := r.(error); ok && e == ErrDepthExceeded {
- err = e
- return
- }
- err = errors.New(r.(string))
- }
- }()
-
- c = parseSSH(lexSSH(b), system, depth)
- return c, err
-}
-
-// Config represents an SSH config file.
-type Config struct {
- // A list of hosts to match against. The file begins with an implicit
- // "Host *" declaration matching all hosts.
- Hosts []*Host
- depth uint8
- position Position
-}
-
-// Get finds the first value in the configuration that matches the alias and
-// contains key. Get returns the empty string if no value was found, or if the
-// Config contains an invalid conditional Include value.
-//
-// The match for key is case insensitive.
-func (c *Config) Get(alias, key string) (string, error) {
- lowerKey := strings.ToLower(key)
- for _, host := range c.Hosts {
- if !host.Matches(alias) {
- continue
- }
- for _, node := range host.Nodes {
- switch t := node.(type) {
- case *Empty:
- continue
- case *KV:
- // "keys are case insensitive" per the spec
- lkey := strings.ToLower(t.Key)
- if lkey == "match" {
- panic("can't handle Match directives")
- }
- if lkey == lowerKey {
- return t.Value, nil
- }
- case *Include:
- val := t.Get(alias, key)
- if val != "" {
- return val, nil
- }
- default:
- return "", fmt.Errorf("unknown Node type %v", t)
- }
- }
- }
- return "", nil
-}
-
-// GetAll returns all values in the configuration that match the alias and
-// contains key, or nil if none are present.
-func (c *Config) GetAll(alias, key string) ([]string, error) {
- lowerKey := strings.ToLower(key)
- all := []string(nil)
- for _, host := range c.Hosts {
- if !host.Matches(alias) {
- continue
- }
- for _, node := range host.Nodes {
- switch t := node.(type) {
- case *Empty:
- continue
- case *KV:
- // "keys are case insensitive" per the spec
- lkey := strings.ToLower(t.Key)
- if lkey == "match" {
- panic("can't handle Match directives")
- }
- if lkey == lowerKey {
- all = append(all, t.Value)
- }
- case *Include:
- val, _ := t.GetAll(alias, key)
- if len(val) > 0 {
- all = append(all, val...)
- }
- default:
- return nil, fmt.Errorf("unknown Node type %v", t)
- }
- }
- }
-
- return all, nil
-}
-
-// String returns a string representation of the Config file.
-func (c Config) String() string {
- return marshal(c).String()
-}
-
-func (c Config) MarshalText() ([]byte, error) {
- return marshal(c).Bytes(), nil
-}
-
-func marshal(c Config) *bytes.Buffer {
- var buf bytes.Buffer
- for i := range c.Hosts {
- buf.WriteString(c.Hosts[i].String())
- }
- return &buf
-}
-
-// Pattern is a pattern in a Host declaration. Patterns are read-only values;
-// create a new one with NewPattern().
-type Pattern struct {
- str string // Its appearance in the file, not the value that gets compiled.
- regex *regexp.Regexp
- not bool // True if this is a negated match
-}
-
-// String prints the string representation of the pattern.
-func (p Pattern) String() string {
- return p.str
-}
-
-// Copied from regexp.go with * and ? removed.
-var specialBytes = []byte(`\.+()|[]{}^$`)
-
-func special(b byte) bool {
- return bytes.IndexByte(specialBytes, b) >= 0
-}
-
-// NewPattern creates a new Pattern for matching hosts. NewPattern("*") creates
-// a Pattern that matches all hosts.
-//
-// From the manpage, a pattern consists of zero or more non-whitespace
-// characters, `*' (a wildcard that matches zero or more characters), or `?' (a
-// wildcard that matches exactly one character). For example, to specify a set
-// of declarations for any host in the ".co.uk" set of domains, the following
-// pattern could be used:
-//
-// Host *.co.uk
-//
-// The following pattern would match any host in the 192.168.0.[0-9] network range:
-//
-// Host 192.168.0.?
-func NewPattern(s string) (*Pattern, error) {
- if s == "" {
- return nil, errors.New("ssh_config: empty pattern")
- }
- negated := false
- if s[0] == '!' {
- negated = true
- s = s[1:]
- }
- var buf bytes.Buffer
- buf.WriteByte('^')
- for i := 0; i < len(s); i++ {
- // A byte loop is correct because all metacharacters are ASCII.
- switch b := s[i]; b {
- case '*':
- buf.WriteString(".*")
- case '?':
- buf.WriteString(".?")
- default:
- // borrowing from QuoteMeta here.
- if special(b) {
- buf.WriteByte('\\')
- }
- buf.WriteByte(b)
- }
- }
- buf.WriteByte('$')
- r, err := regexp.Compile(buf.String())
- if err != nil {
- return nil, err
- }
- return &Pattern{str: s, regex: r, not: negated}, nil
-}
-
-// Host describes a Host directive and the keywords that follow it.
-type Host struct {
- // A list of host patterns that should match this host.
- Patterns []*Pattern
- // A Node is either a key/value pair or a comment line.
- Nodes []Node
- // EOLComment is the comment (if any) terminating the Host line.
- EOLComment string
- // Whitespace if any between the Host declaration and a trailing comment.
- spaceBeforeComment string
-
- hasEquals bool
- leadingSpace int // TODO: handle spaces vs tabs here.
- // The file starts with an implicit "Host *" declaration.
- implicit bool
-}
-
-// Matches returns true if the Host matches for the given alias. For
-// a description of the rules that provide a match, see the manpage for
-// ssh_config.
-func (h *Host) Matches(alias string) bool {
- found := false
- for i := range h.Patterns {
- if h.Patterns[i].regex.MatchString(alias) {
- if h.Patterns[i].not {
- // Negated match. "A pattern entry may be negated by prefixing
- // it with an exclamation mark (`!'). If a negated entry is
- // matched, then the Host entry is ignored, regardless of
- // whether any other patterns on the line match. Negated matches
- // are therefore useful to provide exceptions for wildcard
- // matches."
- return false
- }
- found = true
- }
- }
- return found
-}
-
-// String prints h as it would appear in a config file. Minor tweaks may be
-// present in the whitespace in the printed file.
-func (h *Host) String() string {
- var buf strings.Builder
- //lint:ignore S1002 I prefer to write it this way
- if h.implicit == false {
- buf.WriteString(strings.Repeat(" ", int(h.leadingSpace)))
- buf.WriteString("Host")
- if h.hasEquals {
- buf.WriteString(" = ")
- } else {
- buf.WriteString(" ")
- }
- for i, pat := range h.Patterns {
- buf.WriteString(pat.String())
- if i < len(h.Patterns)-1 {
- buf.WriteString(" ")
- }
- }
- buf.WriteString(h.spaceBeforeComment)
- if h.EOLComment != "" {
- buf.WriteByte('#')
- buf.WriteString(h.EOLComment)
- }
- buf.WriteByte('\n')
- }
- for i := range h.Nodes {
- buf.WriteString(h.Nodes[i].String())
- buf.WriteByte('\n')
- }
- return buf.String()
-}
-
-// Node represents a line in a Config.
-type Node interface {
- Pos() Position
- String() string
-}
-
-// KV is a line in the config file that contains a key, a value, and possibly
-// a comment.
-type KV struct {
- Key string
- Value string
- // Whitespace after the value but before any comment
- spaceAfterValue string
- Comment string
- hasEquals bool
- leadingSpace int // Space before the key. TODO handle spaces vs tabs.
- position Position
-}
-
-// Pos returns k's Position.
-func (k *KV) Pos() Position {
- return k.position
-}
-
-// String prints k as it was parsed in the config file.
-func (k *KV) String() string {
- if k == nil {
- return ""
- }
- equals := " "
- if k.hasEquals {
- equals = " = "
- }
- line := strings.Repeat(" ", int(k.leadingSpace)) + k.Key + equals + k.Value + k.spaceAfterValue
- if k.Comment != "" {
- line += "#" + k.Comment
- }
- return line
-}
-
-// Empty is a line in the config file that contains only whitespace or comments.
-type Empty struct {
- Comment string
- leadingSpace int // TODO handle spaces vs tabs.
- position Position
-}
-
-// Pos returns e's Position.
-func (e *Empty) Pos() Position {
- return e.position
-}
-
-// String prints e as it was parsed in the config file.
-func (e *Empty) String() string {
- if e == nil {
- return ""
- }
- if e.Comment == "" {
- return ""
- }
- return fmt.Sprintf("%s#%s", strings.Repeat(" ", int(e.leadingSpace)), e.Comment)
-}
-
-// Include holds the result of an Include directive, including the config files
-// that have been parsed as part of that directive. At most 5 levels of Include
-// statements will be parsed.
-type Include struct {
- // Comment is the contents of any comment at the end of the Include
- // statement.
- Comment string
- // an include directive can include several different files, and wildcards
- directives []string
-
- mu sync.Mutex
- // 1:1 mapping between matches and keys in files array; matches preserves
- // ordering
- matches []string
- // actual filenames are listed here
- files map[string]*Config
- leadingSpace int
- position Position
- depth uint8
- hasEquals bool
-}
-
-const maxRecurseDepth = 5
-
-// ErrDepthExceeded is returned if too many Include directives are parsed.
-// Usually this indicates a recursive loop (an Include directive pointing to the
-// file it contains).
-var ErrDepthExceeded = errors.New("ssh_config: max recurse depth exceeded")
-
-func removeDups(arr []string) []string {
- // Use map to record duplicates as we find them.
- encountered := make(map[string]bool, len(arr))
- result := make([]string, 0)
-
- for v := range arr {
- //lint:ignore S1002 I prefer it this way
- if encountered[arr[v]] == false {
- encountered[arr[v]] = true
- result = append(result, arr[v])
- }
- }
- return result
-}
-
-// NewInclude creates a new Include with a list of file globs to include.
-// Configuration files are parsed greedily (e.g. as soon as this function runs).
-// Any error encountered while parsing nested configuration files will be
-// returned.
-func NewInclude(directives []string, hasEquals bool, pos Position, comment string, system bool, depth uint8) (*Include, error) {
- if depth > maxRecurseDepth {
- return nil, ErrDepthExceeded
- }
- inc := &Include{
- Comment: comment,
- directives: directives,
- files: make(map[string]*Config),
- position: pos,
- leadingSpace: pos.Col - 1,
- depth: depth,
- hasEquals: hasEquals,
- }
- // no need for inc.mu.Lock() since nothing else can access this inc
- matches := make([]string, 0)
- for i := range directives {
- var path string
- if filepath.IsAbs(directives[i]) {
- path = directives[i]
- } else if system {
- path = filepath.Join("/etc/ssh", directives[i])
- } else {
- path = filepath.Join(homedir(), ".ssh", directives[i])
- }
- theseMatches, err := filepath.Glob(path)
- if err != nil {
- return nil, err
- }
- matches = append(matches, theseMatches...)
- }
- matches = removeDups(matches)
- inc.matches = matches
- for i := range matches {
- config, err := parseWithDepth(matches[i], depth)
- if err != nil {
- return nil, err
- }
- inc.files[matches[i]] = config
- }
- return inc, nil
-}
-
-// Pos returns the position of the Include directive in the larger file.
-func (i *Include) Pos() Position {
- return i.position
-}
-
-// Get finds the first value in the Include statement matching the alias and the
-// given key.
-func (inc *Include) Get(alias, key string) string {
- inc.mu.Lock()
- defer inc.mu.Unlock()
- // TODO: we search files in any order which is not correct
- for i := range inc.matches {
- cfg := inc.files[inc.matches[i]]
- if cfg == nil {
- panic("nil cfg")
- }
- val, err := cfg.Get(alias, key)
- if err == nil && val != "" {
- return val
- }
- }
- return ""
-}
-
-// GetAll finds all values in the Include statement matching the alias and the
-// given key.
-func (inc *Include) GetAll(alias, key string) ([]string, error) {
- inc.mu.Lock()
- defer inc.mu.Unlock()
- var vals []string
-
- // TODO: we search files in any order which is not correct
- for i := range inc.matches {
- cfg := inc.files[inc.matches[i]]
- if cfg == nil {
- panic("nil cfg")
- }
- val, err := cfg.GetAll(alias, key)
- if err == nil && len(val) != 0 {
- // In theory if SupportsMultiple was false for this key we could
- // stop looking here. But the caller has asked us to find all
- // instances of the keyword (and could use Get() if they wanted) so
- // let's keep looking.
- vals = append(vals, val...)
- }
- }
- return vals, nil
-}
-
-// String prints out a string representation of this Include directive. Note
-// included Config files are not printed as part of this representation.
-func (inc *Include) String() string {
- equals := " "
- if inc.hasEquals {
- equals = " = "
- }
- line := fmt.Sprintf("%sInclude%s%s", strings.Repeat(" ", int(inc.leadingSpace)), equals, strings.Join(inc.directives, " "))
- if inc.Comment != "" {
- line += " #" + inc.Comment
- }
- return line
-}
-
-var matchAll *Pattern
-
-func init() {
- var err error
- matchAll, err = NewPattern("*")
- if err != nil {
- panic(err)
- }
-}
-
-func newConfig() *Config {
- return &Config{
- Hosts: []*Host{
- &Host{
- implicit: true,
- Patterns: []*Pattern{matchAll},
- Nodes: make([]Node, 0),
- },
- },
- depth: 0,
- }
-}
diff --git a/vendor/github.com/kevinburke/ssh_config/lexer.go b/vendor/github.com/kevinburke/ssh_config/lexer.go
deleted file mode 100644
index 11680b4c74d..00000000000
--- a/vendor/github.com/kevinburke/ssh_config/lexer.go
+++ /dev/null
@@ -1,240 +0,0 @@
-package ssh_config
-
-import (
- "bytes"
-)
-
-// Define state functions
-type sshLexStateFn func() sshLexStateFn
-
-type sshLexer struct {
- inputIdx int
- input []rune // Textual source
-
- buffer []rune // Runes composing the current token
- tokens chan token
- line int
- col int
- endbufferLine int
- endbufferCol int
-}
-
-func (s *sshLexer) lexComment(previousState sshLexStateFn) sshLexStateFn {
- return func() sshLexStateFn {
- growingString := ""
- for next := s.peek(); next != '\n' && next != eof; next = s.peek() {
- if next == '\r' && s.follow("\r\n") {
- break
- }
- growingString += string(next)
- s.next()
- }
- s.emitWithValue(tokenComment, growingString)
- s.skip()
- return previousState
- }
-}
-
-// lex the space after an equals sign in a function
-func (s *sshLexer) lexRspace() sshLexStateFn {
- for {
- next := s.peek()
- if !isSpace(next) {
- break
- }
- s.skip()
- }
- return s.lexRvalue
-}
-
-func (s *sshLexer) lexEquals() sshLexStateFn {
- for {
- next := s.peek()
- if next == '=' {
- s.emit(tokenEquals)
- s.skip()
- return s.lexRspace
- }
- // TODO error handling here; newline eof etc.
- if !isSpace(next) {
- break
- }
- s.skip()
- }
- return s.lexRvalue
-}
-
-func (s *sshLexer) lexKey() sshLexStateFn {
- growingString := ""
-
- for r := s.peek(); isKeyChar(r); r = s.peek() {
- // simplified a lot here
- if isSpace(r) || r == '=' {
- s.emitWithValue(tokenKey, growingString)
- s.skip()
- return s.lexEquals
- }
- growingString += string(r)
- s.next()
- }
- s.emitWithValue(tokenKey, growingString)
- return s.lexEquals
-}
-
-func (s *sshLexer) lexRvalue() sshLexStateFn {
- growingString := ""
- for {
- next := s.peek()
- switch next {
- case '\r':
- if s.follow("\r\n") {
- s.emitWithValue(tokenString, growingString)
- s.skip()
- return s.lexVoid
- }
- case '\n':
- s.emitWithValue(tokenString, growingString)
- s.skip()
- return s.lexVoid
- case '#':
- s.emitWithValue(tokenString, growingString)
- s.skip()
- return s.lexComment(s.lexVoid)
- case eof:
- s.next()
- }
- if next == eof {
- break
- }
- growingString += string(next)
- s.next()
- }
- s.emit(tokenEOF)
- return nil
-}
-
-func (s *sshLexer) read() rune {
- r := s.peek()
- if r == '\n' {
- s.endbufferLine++
- s.endbufferCol = 1
- } else {
- s.endbufferCol++
- }
- s.inputIdx++
- return r
-}
-
-func (s *sshLexer) next() rune {
- r := s.read()
-
- if r != eof {
- s.buffer = append(s.buffer, r)
- }
- return r
-}
-
-func (s *sshLexer) lexVoid() sshLexStateFn {
- for {
- next := s.peek()
- switch next {
- case '#':
- s.skip()
- return s.lexComment(s.lexVoid)
- case '\r':
- fallthrough
- case '\n':
- s.emit(tokenEmptyLine)
- s.skip()
- continue
- }
-
- if isSpace(next) {
- s.skip()
- }
-
- if isKeyStartChar(next) {
- return s.lexKey
- }
-
- // removed IsKeyStartChar and lexKey. probably will need to readd
-
- if next == eof {
- s.next()
- break
- }
- }
-
- s.emit(tokenEOF)
- return nil
-}
-
-func (s *sshLexer) ignore() {
- s.buffer = make([]rune, 0)
- s.line = s.endbufferLine
- s.col = s.endbufferCol
-}
-
-func (s *sshLexer) skip() {
- s.next()
- s.ignore()
-}
-
-func (s *sshLexer) emit(t tokenType) {
- s.emitWithValue(t, string(s.buffer))
-}
-
-func (s *sshLexer) emitWithValue(t tokenType, value string) {
- tok := token{
- Position: Position{s.line, s.col},
- typ: t,
- val: value,
- }
- s.tokens <- tok
- s.ignore()
-}
-
-func (s *sshLexer) peek() rune {
- if s.inputIdx >= len(s.input) {
- return eof
- }
-
- r := s.input[s.inputIdx]
- return r
-}
-
-func (s *sshLexer) follow(next string) bool {
- inputIdx := s.inputIdx
- for _, expectedRune := range next {
- if inputIdx >= len(s.input) {
- return false
- }
- r := s.input[inputIdx]
- inputIdx++
- if expectedRune != r {
- return false
- }
- }
- return true
-}
-
-func (s *sshLexer) run() {
- for state := s.lexVoid; state != nil; {
- state = state()
- }
- close(s.tokens)
-}
-
-func lexSSH(input []byte) chan token {
- runes := bytes.Runes(input)
- l := &sshLexer{
- input: runes,
- tokens: make(chan token),
- line: 1,
- col: 1,
- endbufferLine: 1,
- endbufferCol: 1,
- }
- go l.run()
- return l.tokens
-}
diff --git a/vendor/github.com/kevinburke/ssh_config/parser.go b/vendor/github.com/kevinburke/ssh_config/parser.go
deleted file mode 100644
index 2b1e718cb3b..00000000000
--- a/vendor/github.com/kevinburke/ssh_config/parser.go
+++ /dev/null
@@ -1,200 +0,0 @@
-package ssh_config
-
-import (
- "fmt"
- "strings"
- "unicode"
-)
-
-type sshParser struct {
- flow chan token
- config *Config
- tokensBuffer []token
- currentTable []string
- seenTableKeys []string
- // /etc/ssh parser or local parser - used to find the default for relative
- // filepaths in the Include directive
- system bool
- depth uint8
-}
-
-type sshParserStateFn func() sshParserStateFn
-
-// Formats and panics an error message based on a token
-func (p *sshParser) raiseErrorf(tok *token, msg string, args ...interface{}) {
- // TODO this format is ugly
- panic(tok.Position.String() + ": " + fmt.Sprintf(msg, args...))
-}
-
-func (p *sshParser) raiseError(tok *token, err error) {
- if err == ErrDepthExceeded {
- panic(err)
- }
- // TODO this format is ugly
- panic(tok.Position.String() + ": " + err.Error())
-}
-
-func (p *sshParser) run() {
- for state := p.parseStart; state != nil; {
- state = state()
- }
-}
-
-func (p *sshParser) peek() *token {
- if len(p.tokensBuffer) != 0 {
- return &(p.tokensBuffer[0])
- }
-
- tok, ok := <-p.flow
- if !ok {
- return nil
- }
- p.tokensBuffer = append(p.tokensBuffer, tok)
- return &tok
-}
-
-func (p *sshParser) getToken() *token {
- if len(p.tokensBuffer) != 0 {
- tok := p.tokensBuffer[0]
- p.tokensBuffer = p.tokensBuffer[1:]
- return &tok
- }
- tok, ok := <-p.flow
- if !ok {
- return nil
- }
- return &tok
-}
-
-func (p *sshParser) parseStart() sshParserStateFn {
- tok := p.peek()
-
- // end of stream, parsing is finished
- if tok == nil {
- return nil
- }
-
- switch tok.typ {
- case tokenComment, tokenEmptyLine:
- return p.parseComment
- case tokenKey:
- return p.parseKV
- case tokenEOF:
- return nil
- default:
- p.raiseErrorf(tok, fmt.Sprintf("unexpected token %q\n", tok))
- }
- return nil
-}
-
-func (p *sshParser) parseKV() sshParserStateFn {
- key := p.getToken()
- hasEquals := false
- val := p.getToken()
- if val.typ == tokenEquals {
- hasEquals = true
- val = p.getToken()
- }
- comment := ""
- tok := p.peek()
- if tok == nil {
- tok = &token{typ: tokenEOF}
- }
- if tok.typ == tokenComment && tok.Position.Line == val.Position.Line {
- tok = p.getToken()
- comment = tok.val
- }
- if strings.ToLower(key.val) == "match" {
- // https://github.com/kevinburke/ssh_config/issues/6
- p.raiseErrorf(val, "ssh_config: Match directive parsing is unsupported")
- return nil
- }
- if strings.ToLower(key.val) == "host" {
- strPatterns := strings.Split(val.val, " ")
- patterns := make([]*Pattern, 0)
- for i := range strPatterns {
- if strPatterns[i] == "" {
- continue
- }
- pat, err := NewPattern(strPatterns[i])
- if err != nil {
- p.raiseErrorf(val, "Invalid host pattern: %v", err)
- return nil
- }
- patterns = append(patterns, pat)
- }
- // val.val at this point could be e.g. "example.com "
- hostval := strings.TrimRightFunc(val.val, unicode.IsSpace)
- spaceBeforeComment := val.val[len(hostval):]
- val.val = hostval
- p.config.Hosts = append(p.config.Hosts, &Host{
- Patterns: patterns,
- Nodes: make([]Node, 0),
- EOLComment: comment,
- spaceBeforeComment: spaceBeforeComment,
- hasEquals: hasEquals,
- })
- return p.parseStart
- }
- lastHost := p.config.Hosts[len(p.config.Hosts)-1]
- if strings.ToLower(key.val) == "include" {
- inc, err := NewInclude(strings.Split(val.val, " "), hasEquals, key.Position, comment, p.system, p.depth+1)
- if err == ErrDepthExceeded {
- p.raiseError(val, err)
- return nil
- }
- if err != nil {
- p.raiseErrorf(val, "Error parsing Include directive: %v", err)
- return nil
- }
- lastHost.Nodes = append(lastHost.Nodes, inc)
- return p.parseStart
- }
- shortval := strings.TrimRightFunc(val.val, unicode.IsSpace)
- spaceAfterValue := val.val[len(shortval):]
- kv := &KV{
- Key: key.val,
- Value: shortval,
- spaceAfterValue: spaceAfterValue,
- Comment: comment,
- hasEquals: hasEquals,
- leadingSpace: key.Position.Col - 1,
- position: key.Position,
- }
- lastHost.Nodes = append(lastHost.Nodes, kv)
- return p.parseStart
-}
-
-func (p *sshParser) parseComment() sshParserStateFn {
- comment := p.getToken()
- lastHost := p.config.Hosts[len(p.config.Hosts)-1]
- lastHost.Nodes = append(lastHost.Nodes, &Empty{
- Comment: comment.val,
- // account for the "#" as well
- leadingSpace: comment.Position.Col - 2,
- position: comment.Position,
- })
- return p.parseStart
-}
-
-func parseSSH(flow chan token, system bool, depth uint8) *Config {
- // Ensure we consume tokens to completion even if parser exits early
- defer func() {
- for range flow {
- }
- }()
-
- result := newConfig()
- result.position = Position{1, 1}
- parser := &sshParser{
- flow: flow,
- config: result,
- tokensBuffer: make([]token, 0),
- currentTable: make([]string, 0),
- seenTableKeys: make([]string, 0),
- system: system,
- depth: depth,
- }
- parser.run()
- return result
-}
diff --git a/vendor/github.com/kevinburke/ssh_config/position.go b/vendor/github.com/kevinburke/ssh_config/position.go
deleted file mode 100644
index e0b5e3fb33c..00000000000
--- a/vendor/github.com/kevinburke/ssh_config/position.go
+++ /dev/null
@@ -1,25 +0,0 @@
-package ssh_config
-
-import "fmt"
-
-// Position of a document element within a SSH document.
-//
-// Line and Col are both 1-indexed positions for the element's line number and
-// column number, respectively. Values of zero or less will cause Invalid(),
-// to return true.
-type Position struct {
- Line int // line within the document
- Col int // column within the line
-}
-
-// String representation of the position.
-// Displays 1-indexed line and column numbers.
-func (p Position) String() string {
- return fmt.Sprintf("(%d, %d)", p.Line, p.Col)
-}
-
-// Invalid returns whether or not the position is valid (i.e. with negative or
-// null values)
-func (p Position) Invalid() bool {
- return p.Line <= 0 || p.Col <= 0
-}
diff --git a/vendor/github.com/kevinburke/ssh_config/token.go b/vendor/github.com/kevinburke/ssh_config/token.go
deleted file mode 100644
index a0ecbb2bb7d..00000000000
--- a/vendor/github.com/kevinburke/ssh_config/token.go
+++ /dev/null
@@ -1,49 +0,0 @@
-package ssh_config
-
-import "fmt"
-
-type token struct {
- Position
- typ tokenType
- val string
-}
-
-func (t token) String() string {
- switch t.typ {
- case tokenEOF:
- return "EOF"
- }
- return fmt.Sprintf("%q", t.val)
-}
-
-type tokenType int
-
-const (
- eof = -(iota + 1)
-)
-
-const (
- tokenError tokenType = iota
- tokenEOF
- tokenEmptyLine
- tokenComment
- tokenKey
- tokenEquals
- tokenString
-)
-
-func isSpace(r rune) bool {
- return r == ' ' || r == '\t'
-}
-
-func isKeyStartChar(r rune) bool {
- return !(isSpace(r) || r == '\r' || r == '\n' || r == eof)
-}
-
-// I'm not sure that this is correct
-func isKeyChar(r rune) bool {
- // Keys start with the first character that isn't whitespace or [ and end
- // with the last non-whitespace character before the equals sign. Keys
- // cannot contain a # character."
- return !(r == '\r' || r == '\n' || r == eof || r == '=')
-}
diff --git a/vendor/github.com/kevinburke/ssh_config/validators.go b/vendor/github.com/kevinburke/ssh_config/validators.go
deleted file mode 100644
index 5977f90960f..00000000000
--- a/vendor/github.com/kevinburke/ssh_config/validators.go
+++ /dev/null
@@ -1,186 +0,0 @@
-package ssh_config
-
-import (
- "fmt"
- "strconv"
- "strings"
-)
-
-// Default returns the default value for the given keyword, for example "22" if
-// the keyword is "Port". Default returns the empty string if the keyword has no
-// default, or if the keyword is unknown. Keyword matching is case-insensitive.
-//
-// Default values are provided by OpenSSH_7.4p1 on a Mac.
-func Default(keyword string) string {
- return defaults[strings.ToLower(keyword)]
-}
-
-// Arguments where the value must be "yes" or "no" and *only* yes or no.
-var yesnos = map[string]bool{
- strings.ToLower("BatchMode"): true,
- strings.ToLower("CanonicalizeFallbackLocal"): true,
- strings.ToLower("ChallengeResponseAuthentication"): true,
- strings.ToLower("CheckHostIP"): true,
- strings.ToLower("ClearAllForwardings"): true,
- strings.ToLower("Compression"): true,
- strings.ToLower("EnableSSHKeysign"): true,
- strings.ToLower("ExitOnForwardFailure"): true,
- strings.ToLower("ForwardAgent"): true,
- strings.ToLower("ForwardX11"): true,
- strings.ToLower("ForwardX11Trusted"): true,
- strings.ToLower("GatewayPorts"): true,
- strings.ToLower("GSSAPIAuthentication"): true,
- strings.ToLower("GSSAPIDelegateCredentials"): true,
- strings.ToLower("HostbasedAuthentication"): true,
- strings.ToLower("IdentitiesOnly"): true,
- strings.ToLower("KbdInteractiveAuthentication"): true,
- strings.ToLower("NoHostAuthenticationForLocalhost"): true,
- strings.ToLower("PasswordAuthentication"): true,
- strings.ToLower("PermitLocalCommand"): true,
- strings.ToLower("PubkeyAuthentication"): true,
- strings.ToLower("RhostsRSAAuthentication"): true,
- strings.ToLower("RSAAuthentication"): true,
- strings.ToLower("StreamLocalBindUnlink"): true,
- strings.ToLower("TCPKeepAlive"): true,
- strings.ToLower("UseKeychain"): true,
- strings.ToLower("UsePrivilegedPort"): true,
- strings.ToLower("VisualHostKey"): true,
-}
-
-var uints = map[string]bool{
- strings.ToLower("CanonicalizeMaxDots"): true,
- strings.ToLower("CompressionLevel"): true, // 1 to 9
- strings.ToLower("ConnectionAttempts"): true,
- strings.ToLower("ConnectTimeout"): true,
- strings.ToLower("NumberOfPasswordPrompts"): true,
- strings.ToLower("Port"): true,
- strings.ToLower("ServerAliveCountMax"): true,
- strings.ToLower("ServerAliveInterval"): true,
-}
-
-func mustBeYesOrNo(lkey string) bool {
- return yesnos[lkey]
-}
-
-func mustBeUint(lkey string) bool {
- return uints[lkey]
-}
-
-func validate(key, val string) error {
- lkey := strings.ToLower(key)
- if mustBeYesOrNo(lkey) && (val != "yes" && val != "no") {
- return fmt.Errorf("ssh_config: value for key %q must be 'yes' or 'no', got %q", key, val)
- }
- if mustBeUint(lkey) {
- _, err := strconv.ParseUint(val, 10, 64)
- if err != nil {
- return fmt.Errorf("ssh_config: %v", err)
- }
- }
- return nil
-}
-
-var defaults = map[string]string{
- strings.ToLower("AddKeysToAgent"): "no",
- strings.ToLower("AddressFamily"): "any",
- strings.ToLower("BatchMode"): "no",
- strings.ToLower("CanonicalizeFallbackLocal"): "yes",
- strings.ToLower("CanonicalizeHostname"): "no",
- strings.ToLower("CanonicalizeMaxDots"): "1",
- strings.ToLower("ChallengeResponseAuthentication"): "yes",
- strings.ToLower("CheckHostIP"): "yes",
- // TODO is this still the correct cipher
- strings.ToLower("Cipher"): "3des",
- strings.ToLower("Ciphers"): "chacha20-poly1305@openssh.com,aes128-ctr,aes192-ctr,aes256-ctr,aes128-gcm@openssh.com,aes256-gcm@openssh.com,aes128-cbc,aes192-cbc,aes256-cbc",
- strings.ToLower("ClearAllForwardings"): "no",
- strings.ToLower("Compression"): "no",
- strings.ToLower("CompressionLevel"): "6",
- strings.ToLower("ConnectionAttempts"): "1",
- strings.ToLower("ControlMaster"): "no",
- strings.ToLower("EnableSSHKeysign"): "no",
- strings.ToLower("EscapeChar"): "~",
- strings.ToLower("ExitOnForwardFailure"): "no",
- strings.ToLower("FingerprintHash"): "sha256",
- strings.ToLower("ForwardAgent"): "no",
- strings.ToLower("ForwardX11"): "no",
- strings.ToLower("ForwardX11Timeout"): "20m",
- strings.ToLower("ForwardX11Trusted"): "no",
- strings.ToLower("GatewayPorts"): "no",
- strings.ToLower("GlobalKnownHostsFile"): "/etc/ssh/ssh_known_hosts /etc/ssh/ssh_known_hosts2",
- strings.ToLower("GSSAPIAuthentication"): "no",
- strings.ToLower("GSSAPIDelegateCredentials"): "no",
- strings.ToLower("HashKnownHosts"): "no",
- strings.ToLower("HostbasedAuthentication"): "no",
-
- strings.ToLower("HostbasedKeyTypes"): "ecdsa-sha2-nistp256-cert-v01@openssh.com,ecdsa-sha2-nistp384-cert-v01@openssh.com,ecdsa-sha2-nistp521-cert-v01@openssh.com,ssh-ed25519-cert-v01@openssh.com,ssh-rsa-cert-v01@openssh.com,ecdsa-sha2-nistp256,ecdsa-sha2-nistp384,ecdsa-sha2-nistp521,ssh-ed25519,ssh-rsa",
- strings.ToLower("HostKeyAlgorithms"): "ecdsa-sha2-nistp256-cert-v01@openssh.com,ecdsa-sha2-nistp384-cert-v01@openssh.com,ecdsa-sha2-nistp521-cert-v01@openssh.com,ssh-ed25519-cert-v01@openssh.com,ssh-rsa-cert-v01@openssh.com,ecdsa-sha2-nistp256,ecdsa-sha2-nistp384,ecdsa-sha2-nistp521,ssh-ed25519,ssh-rsa",
- // HostName has a dynamic default (the value passed at the command line).
-
- strings.ToLower("IdentitiesOnly"): "no",
- strings.ToLower("IdentityFile"): "~/.ssh/identity",
-
- // IPQoS has a dynamic default based on interactive or non-interactive
- // sessions.
-
- strings.ToLower("KbdInteractiveAuthentication"): "yes",
-
- strings.ToLower("KexAlgorithms"): "curve25519-sha256,curve25519-sha256@libssh.org,ecdh-sha2-nistp256,ecdh-sha2-nistp384,ecdh-sha2-nistp521,diffie-hellman-group-exchange-sha256,diffie-hellman-group-exchange-sha1,diffie-hellman-group14-sha1",
- strings.ToLower("LogLevel"): "INFO",
- strings.ToLower("MACs"): "umac-64-etm@openssh.com,umac-128-etm@openssh.com,hmac-sha2-256-etm@openssh.com,hmac-sha2-512-etm@openssh.com,hmac-sha1-etm@openssh.com,umac-64@openssh.com,umac-128@openssh.com,hmac-sha2-256,hmac-sha2-512,hmac-sha1",
-
- strings.ToLower("NoHostAuthenticationForLocalhost"): "no",
- strings.ToLower("NumberOfPasswordPrompts"): "3",
- strings.ToLower("PasswordAuthentication"): "yes",
- strings.ToLower("PermitLocalCommand"): "no",
- strings.ToLower("Port"): "22",
-
- strings.ToLower("PreferredAuthentications"): "gssapi-with-mic,hostbased,publickey,keyboard-interactive,password",
- strings.ToLower("Protocol"): "2",
- strings.ToLower("ProxyUseFdpass"): "no",
- strings.ToLower("PubkeyAcceptedKeyTypes"): "ecdsa-sha2-nistp256-cert-v01@openssh.com,ecdsa-sha2-nistp384-cert-v01@openssh.com,ecdsa-sha2-nistp521-cert-v01@openssh.com,ssh-ed25519-cert-v01@openssh.com,ssh-rsa-cert-v01@openssh.com,ecdsa-sha2-nistp256,ecdsa-sha2-nistp384,ecdsa-sha2-nistp521,ssh-ed25519,ssh-rsa",
- strings.ToLower("PubkeyAuthentication"): "yes",
- strings.ToLower("RekeyLimit"): "default none",
- strings.ToLower("RhostsRSAAuthentication"): "no",
- strings.ToLower("RSAAuthentication"): "yes",
-
- strings.ToLower("ServerAliveCountMax"): "3",
- strings.ToLower("ServerAliveInterval"): "0",
- strings.ToLower("StreamLocalBindMask"): "0177",
- strings.ToLower("StreamLocalBindUnlink"): "no",
- strings.ToLower("StrictHostKeyChecking"): "ask",
- strings.ToLower("TCPKeepAlive"): "yes",
- strings.ToLower("Tunnel"): "no",
- strings.ToLower("TunnelDevice"): "any:any",
- strings.ToLower("UpdateHostKeys"): "no",
- strings.ToLower("UseKeychain"): "no",
- strings.ToLower("UsePrivilegedPort"): "no",
-
- strings.ToLower("UserKnownHostsFile"): "~/.ssh/known_hosts ~/.ssh/known_hosts2",
- strings.ToLower("VerifyHostKeyDNS"): "no",
- strings.ToLower("VisualHostKey"): "no",
- strings.ToLower("XAuthLocation"): "/usr/X11R6/bin/xauth",
-}
-
-// these identities are used for SSH protocol 2
-var defaultProtocol2Identities = []string{
- "~/.ssh/id_dsa",
- "~/.ssh/id_ecdsa",
- "~/.ssh/id_ed25519",
- "~/.ssh/id_rsa",
-}
-
-// these directives support multiple items that can be collected
-// across multiple files
-var pluralDirectives = map[string]bool{
- "CertificateFile": true,
- "IdentityFile": true,
- "DynamicForward": true,
- "RemoteForward": true,
- "SendEnv": true,
- "SetEnv": true,
-}
-
-// SupportsMultiple reports whether a directive can be specified multiple times.
-func SupportsMultiple(key string) bool {
- return pluralDirectives[strings.ToLower(key)]
-}
diff --git a/vendor/github.com/klauspost/compress/.goreleaser.yml b/vendor/github.com/klauspost/compress/.goreleaser.yml
index 4c28dff4655..a22953805c6 100644
--- a/vendor/github.com/klauspost/compress/.goreleaser.yml
+++ b/vendor/github.com/klauspost/compress/.goreleaser.yml
@@ -3,7 +3,6 @@
before:
hooks:
- ./gen.sh
- - go install mvdan.cc/garble@v0.10.1
builds:
-
@@ -32,7 +31,6 @@ builds:
- mips64le
goarm:
- 7
- gobinary: garble
-
id: "s2d"
binary: s2d
@@ -59,7 +57,6 @@ builds:
- mips64le
goarm:
- 7
- gobinary: garble
-
id: "s2sx"
binary: s2sx
@@ -87,7 +84,6 @@ builds:
- mips64le
goarm:
- 7
- gobinary: garble
archives:
-
diff --git a/vendor/github.com/klauspost/compress/README.md b/vendor/github.com/klauspost/compress/README.md
index 7e83f583c00..05c7359e481 100644
--- a/vendor/github.com/klauspost/compress/README.md
+++ b/vendor/github.com/klauspost/compress/README.md
@@ -16,6 +16,30 @@ This package provides various compression algorithms.
# changelog
+* Feb 5th, 2024 - [1.17.6](https://github.com/klauspost/compress/releases/tag/v1.17.6)
+ * zstd: Fix incorrect repeat coding in best mode https://github.com/klauspost/compress/pull/923
+ * s2: Fix DecodeConcurrent deadlock on errors https://github.com/klauspost/compress/pull/925
+
+* Jan 26th, 2024 - [v1.17.5](https://github.com/klauspost/compress/releases/tag/v1.17.5)
+ * flate: Fix reset with dictionary on custom window encodes https://github.com/klauspost/compress/pull/912
+ * zstd: Add Frame header encoding and stripping https://github.com/klauspost/compress/pull/908
+ * zstd: Limit better/best default window to 8MB https://github.com/klauspost/compress/pull/913
+ * zstd: Speed improvements by @greatroar in https://github.com/klauspost/compress/pull/896 https://github.com/klauspost/compress/pull/910
+ * s2: Fix callbacks for skippable blocks and disallow 0xfe (Padding) by @Jille in https://github.com/klauspost/compress/pull/916 https://github.com/klauspost/compress/pull/917
+https://github.com/klauspost/compress/pull/919 https://github.com/klauspost/compress/pull/918
+
+* Dec 1st, 2023 - [v1.17.4](https://github.com/klauspost/compress/releases/tag/v1.17.4)
+ * huff0: Speed up symbol counting by @greatroar in https://github.com/klauspost/compress/pull/887
+ * huff0: Remove byteReader by @greatroar in https://github.com/klauspost/compress/pull/886
+ * gzhttp: Allow overriding decompression on transport https://github.com/klauspost/compress/pull/892
+ * gzhttp: Clamp compression level https://github.com/klauspost/compress/pull/890
+ * gzip: Error out if reserved bits are set https://github.com/klauspost/compress/pull/891
+
+* Nov 15th, 2023 - [v1.17.3](https://github.com/klauspost/compress/releases/tag/v1.17.3)
+ * fse: Fix max header size https://github.com/klauspost/compress/pull/881
+ * zstd: Improve better/best compression https://github.com/klauspost/compress/pull/877
+ * gzhttp: Fix missing content type on Close https://github.com/klauspost/compress/pull/883
+
* Oct 22nd, 2023 - [v1.17.2](https://github.com/klauspost/compress/releases/tag/v1.17.2)
* zstd: Fix rare *CORRUPTION* output in "best" mode. See https://github.com/klauspost/compress/pull/876
@@ -31,6 +55,10 @@ This package provides various compression algorithms.
* s2: Do 2 overlapping match checks https://github.com/klauspost/compress/pull/839
* flate: Add amd64 assembly matchlen https://github.com/klauspost/compress/pull/837
* gzip: Copy bufio.Reader on Reset by @thatguystone in https://github.com/klauspost/compress/pull/860
+
+
+ See changes to v1.16.x
+
* July 1st, 2023 - [v1.16.7](https://github.com/klauspost/compress/releases/tag/v1.16.7)
* zstd: Fix default level first dictionary encode https://github.com/klauspost/compress/pull/829
@@ -69,6 +97,7 @@ This package provides various compression algorithms.
* s2: Add LZ4 block converter. https://github.com/klauspost/compress/pull/748
* s2: Support io.ReaderAt in ReadSeeker. https://github.com/klauspost/compress/pull/747
* s2c/s2sx: Use concurrent decoding. https://github.com/klauspost/compress/pull/746
+
See changes to v1.15.x
@@ -536,6 +565,8 @@ the stateless compress described below.
For compression performance, see: [this spreadsheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing).
+To disable all assembly add `-tags=noasm`. This works across all packages.
+
# Stateless compression
This package offers stateless compression as a special option for gzip/deflate.
@@ -554,7 +585,7 @@ For direct deflate use, NewStatelessWriter and StatelessDeflate are available. S
A `bufio.Writer` can of course be used to control write sizes. For example, to use a 4KB buffer:
-```
+```go
// replace 'ioutil.Discard' with your output.
gzw, err := gzip.NewWriterLevel(ioutil.Discard, gzip.StatelessCompression)
if err != nil {
diff --git a/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go b/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go
index 2aa6a95a028..2754bac6f16 100644
--- a/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go
+++ b/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go
@@ -51,7 +51,7 @@ func emitCopy(dst []byte, offset, length int) int {
i := 0
// The maximum length for a single tagCopy1 or tagCopy2 op is 64 bytes. The
// threshold for this loop is a little higher (at 68 = 64 + 4), and the
- // length emitted down below is is a little lower (at 60 = 64 - 4), because
+ // length emitted down below is a little lower (at 60 = 64 - 4), because
// it's shorter to encode a length 67 copy as a length 60 tagCopy2 followed
// by a length 7 tagCopy1 (which encodes as 3+2 bytes) than to encode it as
// a length 64 tagCopy2 followed by a length 3 tagCopy2 (which encodes as
diff --git a/vendor/github.com/klauspost/compress/s2sx.mod b/vendor/github.com/klauspost/compress/s2sx.mod
index 2263853fcad..5a4412f9070 100644
--- a/vendor/github.com/klauspost/compress/s2sx.mod
+++ b/vendor/github.com/klauspost/compress/s2sx.mod
@@ -1,4 +1,4 @@
module github.com/klauspost/compress
-go 1.16
+go 1.19
diff --git a/vendor/github.com/klauspost/compress/zstd/blockdec.go b/vendor/github.com/klauspost/compress/zstd/blockdec.go
index 9f17ce601ff..03744fbc765 100644
--- a/vendor/github.com/klauspost/compress/zstd/blockdec.go
+++ b/vendor/github.com/klauspost/compress/zstd/blockdec.go
@@ -554,6 +554,9 @@ func (b *blockDec) prepareSequences(in []byte, hist *history) (err error) {
if debugDecoder {
printf("Compression modes: 0b%b", compMode)
}
+ if compMode&3 != 0 {
+ return errors.New("corrupt block: reserved bits not zero")
+ }
for i := uint(0); i < 3; i++ {
mode := seqCompMode((compMode >> (6 - i*2)) & 3)
if debugDecoder {
diff --git a/vendor/github.com/klauspost/compress/zstd/blockenc.go b/vendor/github.com/klauspost/compress/zstd/blockenc.go
index 2cfe925ade5..32a7f401d5d 100644
--- a/vendor/github.com/klauspost/compress/zstd/blockenc.go
+++ b/vendor/github.com/klauspost/compress/zstd/blockenc.go
@@ -427,6 +427,16 @@ func (b *blockEnc) encodeLits(lits []byte, raw bool) error {
return nil
}
+// encodeRLE will encode an RLE block.
+func (b *blockEnc) encodeRLE(val byte, length uint32) {
+ var bh blockHeader
+ bh.setLast(b.last)
+ bh.setSize(length)
+ bh.setType(blockTypeRLE)
+ b.output = bh.appendTo(b.output)
+ b.output = append(b.output, val)
+}
+
// fuzzFseEncoder can be used to fuzz the FSE encoder.
func fuzzFseEncoder(data []byte) int {
if len(data) > maxSequences || len(data) < 2 {
@@ -479,6 +489,16 @@ func (b *blockEnc) encode(org []byte, raw, rawAllLits bool) error {
if len(b.sequences) == 0 {
return b.encodeLits(b.literals, rawAllLits)
}
+ if len(b.sequences) == 1 && len(org) > 0 && len(b.literals) <= 1 {
+ // Check common RLE cases.
+ seq := b.sequences[0]
+ if seq.litLen == uint32(len(b.literals)) && seq.offset-3 == 1 {
+ // Offset == 1 and 0 or 1 literals.
+ b.encodeRLE(org[0], b.sequences[0].matchLen+zstdMinMatch+seq.litLen)
+ return nil
+ }
+ }
+
// We want some difference to at least account for the headers.
saved := b.size - len(b.literals) - (b.size >> 6)
if saved < 16 {
diff --git a/vendor/github.com/klauspost/compress/zstd/decodeheader.go b/vendor/github.com/klauspost/compress/zstd/decodeheader.go
index f6a240970d4..6a5a2988b6f 100644
--- a/vendor/github.com/klauspost/compress/zstd/decodeheader.go
+++ b/vendor/github.com/klauspost/compress/zstd/decodeheader.go
@@ -95,42 +95,54 @@ type Header struct {
// If there isn't enough input, io.ErrUnexpectedEOF is returned.
// The FirstBlock.OK will indicate if enough information was available to decode the first block header.
func (h *Header) Decode(in []byte) error {
+ _, err := h.DecodeAndStrip(in)
+ return err
+}
+
+// DecodeAndStrip will decode the header from the beginning of the stream
+// and on success return the remaining bytes.
+// This will decode the frame header and the first block header if enough bytes are provided.
+// It is recommended to provide at least HeaderMaxSize bytes.
+// If the frame header cannot be read an error will be returned.
+// If there isn't enough input, io.ErrUnexpectedEOF is returned.
+// The FirstBlock.OK will indicate if enough information was available to decode the first block header.
+func (h *Header) DecodeAndStrip(in []byte) (remain []byte, err error) {
*h = Header{}
if len(in) < 4 {
- return io.ErrUnexpectedEOF
+ return nil, io.ErrUnexpectedEOF
}
h.HeaderSize += 4
b, in := in[:4], in[4:]
if string(b) != frameMagic {
if string(b[1:4]) != skippableFrameMagic || b[0]&0xf0 != 0x50 {
- return ErrMagicMismatch
+ return nil, ErrMagicMismatch
}
if len(in) < 4 {
- return io.ErrUnexpectedEOF
+ return nil, io.ErrUnexpectedEOF
}
h.HeaderSize += 4
h.Skippable = true
h.SkippableID = int(b[0] & 0xf)
h.SkippableSize = binary.LittleEndian.Uint32(in)
- return nil
+ return in[4:], nil
}
// Read Window_Descriptor
// https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#window_descriptor
if len(in) < 1 {
- return io.ErrUnexpectedEOF
+ return nil, io.ErrUnexpectedEOF
}
fhd, in := in[0], in[1:]
h.HeaderSize++
h.SingleSegment = fhd&(1<<5) != 0
h.HasCheckSum = fhd&(1<<2) != 0
if fhd&(1<<3) != 0 {
- return errors.New("reserved bit set on frame header")
+ return nil, errors.New("reserved bit set on frame header")
}
if !h.SingleSegment {
if len(in) < 1 {
- return io.ErrUnexpectedEOF
+ return nil, io.ErrUnexpectedEOF
}
var wd byte
wd, in = in[0], in[1:]
@@ -148,7 +160,7 @@ func (h *Header) Decode(in []byte) error {
size = 4
}
if len(in) < int(size) {
- return io.ErrUnexpectedEOF
+ return nil, io.ErrUnexpectedEOF
}
b, in = in[:size], in[size:]
h.HeaderSize += int(size)
@@ -178,7 +190,7 @@ func (h *Header) Decode(in []byte) error {
if fcsSize > 0 {
h.HasFCS = true
if len(in) < fcsSize {
- return io.ErrUnexpectedEOF
+ return nil, io.ErrUnexpectedEOF
}
b, in = in[:fcsSize], in[fcsSize:]
h.HeaderSize += int(fcsSize)
@@ -199,7 +211,7 @@ func (h *Header) Decode(in []byte) error {
// Frame Header done, we will not fail from now on.
if len(in) < 3 {
- return nil
+ return in, nil
}
tmp := in[:3]
bh := uint32(tmp[0]) | (uint32(tmp[1]) << 8) | (uint32(tmp[2]) << 16)
@@ -209,7 +221,7 @@ func (h *Header) Decode(in []byte) error {
cSize := int(bh >> 3)
switch blockType {
case blockTypeReserved:
- return nil
+ return in, nil
case blockTypeRLE:
h.FirstBlock.Compressed = true
h.FirstBlock.DecompressedSize = cSize
@@ -225,5 +237,25 @@ func (h *Header) Decode(in []byte) error {
}
h.FirstBlock.OK = true
- return nil
+ return in, nil
+}
+
+// AppendTo will append the encoded header to the dst slice.
+// There is no error checking performed on the header values.
+func (h *Header) AppendTo(dst []byte) ([]byte, error) {
+ if h.Skippable {
+ magic := [4]byte{0x50, 0x2a, 0x4d, 0x18}
+ magic[0] |= byte(h.SkippableID & 0xf)
+ dst = append(dst, magic[:]...)
+ f := h.SkippableSize
+ return append(dst, uint8(f), uint8(f>>8), uint8(f>>16), uint8(f>>24)), nil
+ }
+ f := frameHeader{
+ ContentSize: h.FrameContentSize,
+ WindowSize: uint32(h.WindowSize),
+ SingleSegment: h.SingleSegment,
+ Checksum: h.HasCheckSum,
+ DictID: h.DictionaryID,
+ }
+ return f.appendTo(dst), nil
}
diff --git a/vendor/github.com/klauspost/compress/zstd/decoder.go b/vendor/github.com/klauspost/compress/zstd/decoder.go
index f04aaa21eb8..bbca17234aa 100644
--- a/vendor/github.com/klauspost/compress/zstd/decoder.go
+++ b/vendor/github.com/klauspost/compress/zstd/decoder.go
@@ -82,7 +82,7 @@ var (
// can run multiple concurrent stateless decodes. It is even possible to
// use stateless decodes while a stream is being decoded.
//
-// The Reset function can be used to initiate a new stream, which is will considerably
+// The Reset function can be used to initiate a new stream, which will considerably
// reduce the allocations normally caused by NewReader.
func NewReader(r io.Reader, opts ...DOption) (*Decoder, error) {
initPredefined()
diff --git a/vendor/github.com/klauspost/compress/zstd/dict.go b/vendor/github.com/klauspost/compress/zstd/dict.go
index 8d5567fe64c..b7b83164bc7 100644
--- a/vendor/github.com/klauspost/compress/zstd/dict.go
+++ b/vendor/github.com/klauspost/compress/zstd/dict.go
@@ -273,6 +273,9 @@ func BuildDict(o BuildDictOptions) ([]byte, error) {
enc.Encode(&block, b)
addValues(&remain, block.literals)
litTotal += len(block.literals)
+ if len(block.sequences) == 0 {
+ continue
+ }
seqs += len(block.sequences)
block.genCodes()
addHist(&ll, block.coders.llEnc.Histogram())
@@ -286,6 +289,9 @@ func BuildDict(o BuildDictOptions) ([]byte, error) {
if offset == 0 {
continue
}
+ if int(offset) >= len(o.History) {
+ continue
+ }
if offset > 3 {
newOffsets[offset-3]++
} else {
@@ -336,6 +342,9 @@ func BuildDict(o BuildDictOptions) ([]byte, error) {
if seqs/nUsed < 512 {
// Use 512 as minimum.
nUsed = seqs / 512
+ if nUsed == 0 {
+ nUsed = 1
+ }
}
copyHist := func(dst *fseEncoder, src *[256]int) ([]byte, error) {
hist := dst.Histogram()
@@ -358,6 +367,28 @@ func BuildDict(o BuildDictOptions) ([]byte, error) {
fakeLength += v
hist[i] = uint32(v)
}
+
+ // Ensure we aren't trying to represent RLE.
+ if maxCount == fakeLength {
+ for i := range hist {
+ if uint8(i) == maxSym {
+ fakeLength++
+ maxSym++
+ hist[i+1] = 1
+ if maxSym > 1 {
+ break
+ }
+ }
+ if hist[0] == 0 {
+ fakeLength++
+ hist[i] = 1
+ if maxSym > 1 {
+ break
+ }
+ }
+ }
+ }
+
dst.HistogramFinished(maxSym, maxCount)
dst.reUsed = false
dst.useRLE = false
diff --git a/vendor/github.com/klauspost/compress/zstd/enc_best.go b/vendor/github.com/klauspost/compress/zstd/enc_best.go
index c81a15357af..4613724e9d1 100644
--- a/vendor/github.com/klauspost/compress/zstd/enc_best.go
+++ b/vendor/github.com/klauspost/compress/zstd/enc_best.go
@@ -135,8 +135,20 @@ func (e *bestFastEncoder) Encode(blk *blockEnc, src []byte) {
break
}
+ // Add block to history
s := e.addBlock(src)
blk.size = len(src)
+
+ // Check RLE first
+ if len(src) > zstdMinMatch {
+ ml := matchLen(src[1:], src)
+ if ml == len(src)-1 {
+ blk.literals = append(blk.literals, src[0])
+ blk.sequences = append(blk.sequences, seq{litLen: 1, matchLen: uint32(len(src)-1) - zstdMinMatch, offset: 1 + 3})
+ return
+ }
+ }
+
if len(src) < minNonLiteralBlockSize {
blk.extraLits = len(src)
blk.literals = blk.literals[:len(src)]
@@ -201,14 +213,6 @@ encodeLoop:
if delta >= e.maxMatchOff || delta <= 0 || load3232(src, offset) != first {
return
}
- if debugAsserts {
- if offset >= s {
- panic(fmt.Sprintf("offset: %d - s:%d - rep: %d - cur :%d - max: %d", offset, s, rep, e.cur, e.maxMatchOff))
- }
- if !bytes.Equal(src[s:s+4], src[offset:offset+4]) {
- panic(fmt.Sprintf("first match mismatch: %v != %v, first: %08x", src[s:s+4], src[offset:offset+4], first))
- }
- }
// Try to quick reject if we already have a long match.
if m.length > 16 {
left := len(src) - int(m.s+m.length)
@@ -227,8 +231,10 @@ encodeLoop:
}
}
l := 4 + e.matchlen(s+4, offset+4, src)
- if true {
+ if m.rep <= 0 {
// Extend candidate match backwards as far as possible.
+ // Do not extend repeats as we can assume they are optimal
+ // and offsets change if s == nextEmit.
tMin := s - e.maxMatchOff
if tMin < 0 {
tMin = 0
@@ -239,7 +245,14 @@ encodeLoop:
l++
}
}
-
+ if debugAsserts {
+ if offset >= s {
+ panic(fmt.Sprintf("offset: %d - s:%d - rep: %d - cur :%d - max: %d", offset, s, rep, e.cur, e.maxMatchOff))
+ }
+ if !bytes.Equal(src[s:s+l], src[offset:offset+l]) {
+ panic(fmt.Sprintf("second match mismatch: %v != %v, first: %08x", src[s:s+4], src[offset:offset+4], first))
+ }
+ }
cand := match{offset: offset, s: s, length: l, rep: rep}
cand.estBits(bitsPerByte)
if m.est >= highScore || cand.est-m.est+(cand.s-m.s)*bitsPerByte>>10 < 0 {
@@ -336,24 +349,31 @@ encodeLoop:
}
if debugAsserts {
+ if best.offset >= best.s {
+ panic(fmt.Sprintf("best.offset > s: %d >= %d", best.offset, best.s))
+ }
+ if best.s < nextEmit {
+ panic(fmt.Sprintf("s %d < nextEmit %d", best.s, nextEmit))
+ }
+ if best.offset < s-e.maxMatchOff {
+ panic(fmt.Sprintf("best.offset < s-e.maxMatchOff: %d < %d", best.offset, s-e.maxMatchOff))
+ }
if !bytes.Equal(src[best.s:best.s+best.length], src[best.offset:best.offset+best.length]) {
panic(fmt.Sprintf("match mismatch: %v != %v", src[best.s:best.s+best.length], src[best.offset:best.offset+best.length]))
}
}
// We have a match, we can store the forward value
+ s = best.s
if best.rep > 0 {
var seq seq
seq.matchLen = uint32(best.length - zstdMinMatch)
- if debugAsserts && s < nextEmit {
- panic("s < nextEmit")
- }
addLiterals(&seq, best.s)
// Repeat. If bit 4 is set, this is a non-lit repeat.
seq.offset = uint32(best.rep & 3)
if debugSequences {
- println("repeat sequence", seq, "next s:", s)
+ println("repeat sequence", seq, "next s:", best.s, "off:", best.s-best.offset)
}
blk.sequences = append(blk.sequences, seq)
@@ -396,7 +416,6 @@ encodeLoop:
// A 4-byte match has been found. Update recent offsets.
// We'll later see if more than 4 bytes.
- s = best.s
t := best.offset
offset1, offset2, offset3 = s-t, offset1, offset2
diff --git a/vendor/github.com/klauspost/compress/zstd/enc_better.go b/vendor/github.com/klauspost/compress/zstd/enc_better.go
index 20d25b0e052..a4f5bf91fc6 100644
--- a/vendor/github.com/klauspost/compress/zstd/enc_better.go
+++ b/vendor/github.com/klauspost/compress/zstd/enc_better.go
@@ -102,9 +102,20 @@ func (e *betterFastEncoder) Encode(blk *blockEnc, src []byte) {
e.cur = e.maxMatchOff
break
}
-
+ // Add block to history
s := e.addBlock(src)
blk.size = len(src)
+
+ // Check RLE first
+ if len(src) > zstdMinMatch {
+ ml := matchLen(src[1:], src)
+ if ml == len(src)-1 {
+ blk.literals = append(blk.literals, src[0])
+ blk.sequences = append(blk.sequences, seq{litLen: 1, matchLen: uint32(len(src)-1) - zstdMinMatch, offset: 1 + 3})
+ return
+ }
+ }
+
if len(src) < minNonLiteralBlockSize {
blk.extraLits = len(src)
blk.literals = blk.literals[:len(src)]
diff --git a/vendor/github.com/klauspost/compress/zstd/encoder_options.go b/vendor/github.com/klauspost/compress/zstd/encoder_options.go
index faaf81921cd..20671dcb91d 100644
--- a/vendor/github.com/klauspost/compress/zstd/encoder_options.go
+++ b/vendor/github.com/klauspost/compress/zstd/encoder_options.go
@@ -94,7 +94,7 @@ func WithEncoderConcurrency(n int) EOption {
// The value must be a power of two between MinWindowSize and MaxWindowSize.
// A larger value will enable better compression but allocate more memory and,
// for above-default values, take considerably longer.
-// The default value is determined by the compression level.
+// The default value is determined by the compression level and max 8MB.
func WithWindowSize(n int) EOption {
return func(o *encoderOptions) error {
switch {
@@ -232,9 +232,9 @@ func WithEncoderLevel(l EncoderLevel) EOption {
case SpeedDefault:
o.windowSize = 8 << 20
case SpeedBetterCompression:
- o.windowSize = 16 << 20
+ o.windowSize = 8 << 20
case SpeedBestCompression:
- o.windowSize = 32 << 20
+ o.windowSize = 8 << 20
}
}
if !o.customALEntropy {
diff --git a/vendor/github.com/klauspost/compress/zstd/frameenc.go b/vendor/github.com/klauspost/compress/zstd/frameenc.go
index 2f5d5ed4546..667ca06794e 100644
--- a/vendor/github.com/klauspost/compress/zstd/frameenc.go
+++ b/vendor/github.com/klauspost/compress/zstd/frameenc.go
@@ -76,7 +76,7 @@ func (f frameHeader) appendTo(dst []byte) []byte {
if f.SingleSegment {
dst = append(dst, uint8(f.ContentSize))
}
- // Unless SingleSegment is set, framessizes < 256 are nto stored.
+ // Unless SingleSegment is set, framessizes < 256 are not stored.
case 1:
f.ContentSize -= 256
dst = append(dst, uint8(f.ContentSize), uint8(f.ContentSize>>8))
diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go b/vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go
index 332e51fe44f..8adfebb0297 100644
--- a/vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go
+++ b/vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go
@@ -20,10 +20,9 @@ func (s *fseDecoder) buildDtable() error {
if v == -1 {
s.dt[highThreshold].setAddBits(uint8(i))
highThreshold--
- symbolNext[i] = 1
- } else {
- symbolNext[i] = uint16(v)
+ v = 1
}
+ symbolNext[i] = uint16(v)
}
}
@@ -35,10 +34,12 @@ func (s *fseDecoder) buildDtable() error {
for ss, v := range s.norm[:s.symbolLen] {
for i := 0; i < int(v); i++ {
s.dt[position].setAddBits(uint8(ss))
- position = (position + step) & tableMask
- for position > highThreshold {
+ for {
// lowprob area
position = (position + step) & tableMask
+ if position <= highThreshold {
+ break
+ }
}
}
}
diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s
index 17901e08040..ae7d4d3295a 100644
--- a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s
+++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s
@@ -162,12 +162,12 @@ finalize:
MOVD h, ret+24(FP)
RET
-// func writeBlocks(d *Digest, b []byte) int
+// func writeBlocks(s *Digest, b []byte) int
TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40
LDP ·primes+0(SB), (prime1, prime2)
// Load state. Assume v[1-4] are stored contiguously.
- MOVD d+0(FP), digest
+ MOVD s+0(FP), digest
LDP 0(digest), (v1, v2)
LDP 16(digest), (v3, v4)
diff --git a/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.s b/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.s
index 9a7655c0f76..0782b86e3d1 100644
--- a/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.s
+++ b/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.s
@@ -5,7 +5,6 @@
#include "textflag.h"
// func matchLen(a []byte, b []byte) int
-// Requires: BMI
TEXT ·matchLen(SB), NOSPLIT, $0-56
MOVQ a_base+0(FP), AX
MOVQ b_base+24(FP), CX
@@ -17,17 +16,16 @@ TEXT ·matchLen(SB), NOSPLIT, $0-56
JB matchlen_match4_standalone
matchlen_loopback_standalone:
- MOVQ (AX)(SI*1), BX
- XORQ (CX)(SI*1), BX
- TESTQ BX, BX
- JZ matchlen_loop_standalone
+ MOVQ (AX)(SI*1), BX
+ XORQ (CX)(SI*1), BX
+ JZ matchlen_loop_standalone
#ifdef GOAMD64_v3
TZCNTQ BX, BX
#else
BSFQ BX, BX
#endif
- SARQ $0x03, BX
+ SHRL $0x03, BX
LEAL (SI)(BX*1), SI
JMP gen_match_len_end
diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s
index 974b99725fd..5b06174b898 100644
--- a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s
+++ b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s
@@ -157,8 +157,7 @@ sequenceDecs_decode_amd64_ll_update_zero:
// Update Literal Length State
MOVBQZX DI, R14
- SHRQ $0x10, DI
- MOVWQZX DI, DI
+ SHRL $0x10, DI
LEAQ (BX)(R14*1), CX
MOVQ DX, R15
MOVQ CX, BX
@@ -177,8 +176,7 @@ sequenceDecs_decode_amd64_ll_update_zero:
// Update Match Length State
MOVBQZX R8, R14
- SHRQ $0x10, R8
- MOVWQZX R8, R8
+ SHRL $0x10, R8
LEAQ (BX)(R14*1), CX
MOVQ DX, R15
MOVQ CX, BX
@@ -197,8 +195,7 @@ sequenceDecs_decode_amd64_ll_update_zero:
// Update Offset State
MOVBQZX R9, R14
- SHRQ $0x10, R9
- MOVWQZX R9, R9
+ SHRL $0x10, R9
LEAQ (BX)(R14*1), CX
MOVQ DX, R15
MOVQ CX, BX
@@ -459,8 +456,7 @@ sequenceDecs_decode_56_amd64_ll_update_zero:
// Update Literal Length State
MOVBQZX DI, R14
- SHRQ $0x10, DI
- MOVWQZX DI, DI
+ SHRL $0x10, DI
LEAQ (BX)(R14*1), CX
MOVQ DX, R15
MOVQ CX, BX
@@ -479,8 +475,7 @@ sequenceDecs_decode_56_amd64_ll_update_zero:
// Update Match Length State
MOVBQZX R8, R14
- SHRQ $0x10, R8
- MOVWQZX R8, R8
+ SHRL $0x10, R8
LEAQ (BX)(R14*1), CX
MOVQ DX, R15
MOVQ CX, BX
@@ -499,8 +494,7 @@ sequenceDecs_decode_56_amd64_ll_update_zero:
// Update Offset State
MOVBQZX R9, R14
- SHRQ $0x10, R9
- MOVWQZX R9, R9
+ SHRL $0x10, R9
LEAQ (BX)(R14*1), CX
MOVQ DX, R15
MOVQ CX, BX
@@ -772,11 +766,10 @@ sequenceDecs_decode_bmi2_fill_2_end:
BZHIQ R14, R15, R15
// Update Offset State
- BZHIQ R8, R15, CX
- SHRXQ R8, R15, R15
- MOVQ $0x00001010, R14
- BEXTRQ R14, R8, R8
- ADDQ CX, R8
+ BZHIQ R8, R15, CX
+ SHRXQ R8, R15, R15
+ SHRL $0x10, R8
+ ADDQ CX, R8
// Load ctx.ofTable
MOVQ ctx+16(FP), CX
@@ -784,11 +777,10 @@ sequenceDecs_decode_bmi2_fill_2_end:
MOVQ (CX)(R8*8), R8
// Update Match Length State
- BZHIQ DI, R15, CX
- SHRXQ DI, R15, R15
- MOVQ $0x00001010, R14
- BEXTRQ R14, DI, DI
- ADDQ CX, DI
+ BZHIQ DI, R15, CX
+ SHRXQ DI, R15, R15
+ SHRL $0x10, DI
+ ADDQ CX, DI
// Load ctx.mlTable
MOVQ ctx+16(FP), CX
@@ -796,10 +788,9 @@ sequenceDecs_decode_bmi2_fill_2_end:
MOVQ (CX)(DI*8), DI
// Update Literal Length State
- BZHIQ SI, R15, CX
- MOVQ $0x00001010, R14
- BEXTRQ R14, SI, SI
- ADDQ CX, SI
+ BZHIQ SI, R15, CX
+ SHRL $0x10, SI
+ ADDQ CX, SI
// Load ctx.llTable
MOVQ ctx+16(FP), CX
@@ -1032,11 +1023,10 @@ sequenceDecs_decode_56_bmi2_fill_end:
BZHIQ R14, R15, R15
// Update Offset State
- BZHIQ R8, R15, CX
- SHRXQ R8, R15, R15
- MOVQ $0x00001010, R14
- BEXTRQ R14, R8, R8
- ADDQ CX, R8
+ BZHIQ R8, R15, CX
+ SHRXQ R8, R15, R15
+ SHRL $0x10, R8
+ ADDQ CX, R8
// Load ctx.ofTable
MOVQ ctx+16(FP), CX
@@ -1044,11 +1034,10 @@ sequenceDecs_decode_56_bmi2_fill_end:
MOVQ (CX)(R8*8), R8
// Update Match Length State
- BZHIQ DI, R15, CX
- SHRXQ DI, R15, R15
- MOVQ $0x00001010, R14
- BEXTRQ R14, DI, DI
- ADDQ CX, DI
+ BZHIQ DI, R15, CX
+ SHRXQ DI, R15, R15
+ SHRL $0x10, DI
+ ADDQ CX, DI
// Load ctx.mlTable
MOVQ ctx+16(FP), CX
@@ -1056,10 +1045,9 @@ sequenceDecs_decode_56_bmi2_fill_end:
MOVQ (CX)(DI*8), DI
// Update Literal Length State
- BZHIQ SI, R15, CX
- MOVQ $0x00001010, R14
- BEXTRQ R14, SI, SI
- ADDQ CX, SI
+ BZHIQ SI, R15, CX
+ SHRL $0x10, SI
+ ADDQ CX, SI
// Load ctx.llTable
MOVQ ctx+16(FP), CX
@@ -1967,8 +1955,7 @@ sequenceDecs_decodeSync_amd64_ll_update_zero:
// Update Literal Length State
MOVBQZX DI, R13
- SHRQ $0x10, DI
- MOVWQZX DI, DI
+ SHRL $0x10, DI
LEAQ (BX)(R13*1), CX
MOVQ DX, R14
MOVQ CX, BX
@@ -1987,8 +1974,7 @@ sequenceDecs_decodeSync_amd64_ll_update_zero:
// Update Match Length State
MOVBQZX R8, R13
- SHRQ $0x10, R8
- MOVWQZX R8, R8
+ SHRL $0x10, R8
LEAQ (BX)(R13*1), CX
MOVQ DX, R14
MOVQ CX, BX
@@ -2007,8 +1993,7 @@ sequenceDecs_decodeSync_amd64_ll_update_zero:
// Update Offset State
MOVBQZX R9, R13
- SHRQ $0x10, R9
- MOVWQZX R9, R9
+ SHRL $0x10, R9
LEAQ (BX)(R13*1), CX
MOVQ DX, R14
MOVQ CX, BX
@@ -2514,11 +2499,10 @@ sequenceDecs_decodeSync_bmi2_fill_2_end:
BZHIQ R13, R14, R14
// Update Offset State
- BZHIQ R8, R14, CX
- SHRXQ R8, R14, R14
- MOVQ $0x00001010, R13
- BEXTRQ R13, R8, R8
- ADDQ CX, R8
+ BZHIQ R8, R14, CX
+ SHRXQ R8, R14, R14
+ SHRL $0x10, R8
+ ADDQ CX, R8
// Load ctx.ofTable
MOVQ ctx+16(FP), CX
@@ -2526,11 +2510,10 @@ sequenceDecs_decodeSync_bmi2_fill_2_end:
MOVQ (CX)(R8*8), R8
// Update Match Length State
- BZHIQ DI, R14, CX
- SHRXQ DI, R14, R14
- MOVQ $0x00001010, R13
- BEXTRQ R13, DI, DI
- ADDQ CX, DI
+ BZHIQ DI, R14, CX
+ SHRXQ DI, R14, R14
+ SHRL $0x10, DI
+ ADDQ CX, DI
// Load ctx.mlTable
MOVQ ctx+16(FP), CX
@@ -2538,10 +2521,9 @@ sequenceDecs_decodeSync_bmi2_fill_2_end:
MOVQ (CX)(DI*8), DI
// Update Literal Length State
- BZHIQ SI, R14, CX
- MOVQ $0x00001010, R13
- BEXTRQ R13, SI, SI
- ADDQ CX, SI
+ BZHIQ SI, R14, CX
+ SHRL $0x10, SI
+ ADDQ CX, SI
// Load ctx.llTable
MOVQ ctx+16(FP), CX
@@ -3055,8 +3037,7 @@ sequenceDecs_decodeSync_safe_amd64_ll_update_zero:
// Update Literal Length State
MOVBQZX DI, R13
- SHRQ $0x10, DI
- MOVWQZX DI, DI
+ SHRL $0x10, DI
LEAQ (BX)(R13*1), CX
MOVQ DX, R14
MOVQ CX, BX
@@ -3075,8 +3056,7 @@ sequenceDecs_decodeSync_safe_amd64_ll_update_zero:
// Update Match Length State
MOVBQZX R8, R13
- SHRQ $0x10, R8
- MOVWQZX R8, R8
+ SHRL $0x10, R8
LEAQ (BX)(R13*1), CX
MOVQ DX, R14
MOVQ CX, BX
@@ -3095,8 +3075,7 @@ sequenceDecs_decodeSync_safe_amd64_ll_update_zero:
// Update Offset State
MOVBQZX R9, R13
- SHRQ $0x10, R9
- MOVWQZX R9, R9
+ SHRL $0x10, R9
LEAQ (BX)(R13*1), CX
MOVQ DX, R14
MOVQ CX, BX
@@ -3704,11 +3683,10 @@ sequenceDecs_decodeSync_safe_bmi2_fill_2_end:
BZHIQ R13, R14, R14
// Update Offset State
- BZHIQ R8, R14, CX
- SHRXQ R8, R14, R14
- MOVQ $0x00001010, R13
- BEXTRQ R13, R8, R8
- ADDQ CX, R8
+ BZHIQ R8, R14, CX
+ SHRXQ R8, R14, R14
+ SHRL $0x10, R8
+ ADDQ CX, R8
// Load ctx.ofTable
MOVQ ctx+16(FP), CX
@@ -3716,11 +3694,10 @@ sequenceDecs_decodeSync_safe_bmi2_fill_2_end:
MOVQ (CX)(R8*8), R8
// Update Match Length State
- BZHIQ DI, R14, CX
- SHRXQ DI, R14, R14
- MOVQ $0x00001010, R13
- BEXTRQ R13, DI, DI
- ADDQ CX, DI
+ BZHIQ DI, R14, CX
+ SHRXQ DI, R14, R14
+ SHRL $0x10, DI
+ ADDQ CX, DI
// Load ctx.mlTable
MOVQ ctx+16(FP), CX
@@ -3728,10 +3705,9 @@ sequenceDecs_decodeSync_safe_bmi2_fill_2_end:
MOVQ (CX)(DI*8), DI
// Update Literal Length State
- BZHIQ SI, R14, CX
- MOVQ $0x00001010, R13
- BEXTRQ R13, SI, SI
- ADDQ CX, SI
+ BZHIQ SI, R14, CX
+ SHRL $0x10, SI
+ ADDQ CX, SI
// Load ctx.llTable
MOVQ ctx+16(FP), CX
diff --git a/vendor/github.com/mudler/entities/pkg/entities/shadow.go b/vendor/github.com/mudler/entities/pkg/entities/shadow.go
index a01531f47c4..0ac3a682b43 100644
--- a/vendor/github.com/mudler/entities/pkg/entities/shadow.go
+++ b/vendor/github.com/mudler/entities/pkg/entities/shadow.go
@@ -141,6 +141,11 @@ func (u Shadow) prepare() Shadow {
// POST: Set in last_changed the current days from 1970
now := time.Now()
days := now.Unix() / 24 / 60 / 60
+ // LastChanged field with value 0 has a special meaning, which is to change password on next login. We should never set it to zero.
+ // This avoids breaking ssh for example in systems that have no RTC clock or a broken one
+ if days == 0 {
+ days = 1
+ }
u.LastChanged = fmt.Sprintf("%d", days)
}
/*
diff --git a/vendor/github.com/pjbgf/sha1cd/Dockerfile.arm b/vendor/github.com/pjbgf/sha1cd/Dockerfile.arm
deleted file mode 100644
index 99761296fba..00000000000
--- a/vendor/github.com/pjbgf/sha1cd/Dockerfile.arm
+++ /dev/null
@@ -1,23 +0,0 @@
-FROM golang:1.20@sha256:2edf6aab2d57644f3fe7407132a0d1770846867465a39c2083770cf62734b05d
-
-ENV GOOS=linux
-ENV GOARCH=arm
-ENV CGO_ENABLED=1
-ENV CC=arm-linux-gnueabihf-gcc
-ENV PATH="/go/bin/${GOOS}_${GOARCH}:${PATH}"
-ENV PKG_CONFIG_PATH=/usr/lib/arm-linux-gnueabihf/pkgconfig
-
-RUN dpkg --add-architecture armhf \
- && apt update \
- && apt install -y --no-install-recommends \
- upx \
- gcc-arm-linux-gnueabihf \
- libc6-dev-armhf-cross \
- pkg-config \
- && rm -rf /var/lib/apt/lists/*
-
-COPY . /src/workdir
-
-WORKDIR /src/workdir
-
-RUN go build ./...
diff --git a/vendor/github.com/pjbgf/sha1cd/Dockerfile.arm64 b/vendor/github.com/pjbgf/sha1cd/Dockerfile.arm64
deleted file mode 100644
index 66bd09474fe..00000000000
--- a/vendor/github.com/pjbgf/sha1cd/Dockerfile.arm64
+++ /dev/null
@@ -1,23 +0,0 @@
-FROM golang:1.20@sha256:2edf6aab2d57644f3fe7407132a0d1770846867465a39c2083770cf62734b05d
-
-ENV GOOS=linux
-ENV GOARCH=arm64
-ENV CGO_ENABLED=1
-ENV CC=aarch64-linux-gnu-gcc
-ENV PATH="/go/bin/${GOOS}_${GOARCH}:${PATH}"
-ENV PKG_CONFIG_PATH=/usr/lib/aarch64-linux-gnu/pkgconfig
-
-# install build & runtime dependencies
-RUN dpkg --add-architecture arm64 \
- && apt update \
- && apt install -y --no-install-recommends \
- gcc-aarch64-linux-gnu \
- libc6-dev-arm64-cross \
- pkg-config \
- && rm -rf /var/lib/apt/lists/*
-
-COPY . /src/workdir
-
-WORKDIR /src/workdir
-
-RUN go build ./...
diff --git a/vendor/github.com/pjbgf/sha1cd/LICENSE b/vendor/github.com/pjbgf/sha1cd/LICENSE
deleted file mode 100644
index 261eeb9e9f8..00000000000
--- a/vendor/github.com/pjbgf/sha1cd/LICENSE
+++ /dev/null
@@ -1,201 +0,0 @@
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright [yyyy] [name of copyright owner]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/vendor/github.com/pjbgf/sha1cd/Makefile b/vendor/github.com/pjbgf/sha1cd/Makefile
deleted file mode 100644
index b24f2cbad61..00000000000
--- a/vendor/github.com/pjbgf/sha1cd/Makefile
+++ /dev/null
@@ -1,40 +0,0 @@
-FUZZ_TIME ?= 1m
-
-export CGO_ENABLED := 1
-
-.PHONY: test
-test:
- go test ./...
-
-.PHONY: bench
-bench:
- go test -benchmem -run=^$$ -bench ^Benchmark ./...
-
-.PHONY: fuzz
-fuzz:
- go test -tags gofuzz -fuzz=. -fuzztime=$(FUZZ_TIME) ./test/
-
-# Cross build project in arm/v7.
-build-arm:
- docker build -t sha1cd-arm -f Dockerfile.arm .
- docker run --rm sha1cd-arm
-
-# Cross build project in arm64.
-build-arm64:
- docker build -t sha1cd-arm64 -f Dockerfile.arm64 .
- docker run --rm sha1cd-arm64
-
-# Build with cgo disabled.
-build-nocgo:
- CGO_ENABLED=0 go build ./cgo
-
-# Run cross-compilation to assure supported architectures.
-cross-build: build-arm build-arm64 build-nocgo
-
-generate:
- go run sha1cdblock_amd64_asm.go -out sha1cdblock_amd64.s
- sed -i 's;&\samd64;&\n// +build !noasm,gc,amd64;g' sha1cdblock_amd64.s
-
-verify: generate
- git diff --exit-code
- go vet ./...
diff --git a/vendor/github.com/pjbgf/sha1cd/README.md b/vendor/github.com/pjbgf/sha1cd/README.md
deleted file mode 100644
index 378cf78cf7d..00000000000
--- a/vendor/github.com/pjbgf/sha1cd/README.md
+++ /dev/null
@@ -1,58 +0,0 @@
-# sha1cd
-
-A Go implementation of SHA1 with counter-cryptanalysis, which detects
-collision attacks.
-
-The `cgo/lib` code is a carbon copy of the [original code], based on
-the award winning [white paper] by Marc Stevens.
-
-The Go implementation is largely based off Go's generic sha1.
-At present no SIMD optimisations have been implemented.
-
-## Usage
-
-`sha1cd` can be used as a drop-in replacement for `crypto/sha1`:
-
-```golang
-import "github.com/pjbgf/sha1cd"
-
-func test(){
- data := []byte("data to be sha1 hashed")
- h := sha1cd.Sum(data)
- fmt.Printf("hash: %q\n", hex.EncodeToString(h))
-}
-```
-
-To obtain information as to whether a collision was found, use the
-func `CollisionResistantSum`.
-
-```golang
-import "github.com/pjbgf/sha1cd"
-
-func test(){
- data := []byte("data to be sha1 hashed")
- h, col := sha1cd.CollisionResistantSum(data)
- if col {
- fmt.Println("collision found!")
- }
- fmt.Printf("hash: %q", hex.EncodeToString(h))
-}
-```
-
-Note that the algorithm will automatically avoid collision, by
-extending the SHA1 to 240-steps, instead of 80 when a collision
-attempt is detected. Therefore, inputs that contains the unavoidable
-bit conditions will yield a different hash from `sha1cd`, when compared
-with results using `crypto/sha1`. Valid inputs will have matching the outputs.
-
-## References
-- https://shattered.io/
-- https://github.com/cr-marcstevens/sha1collisiondetection
-- https://csrc.nist.gov/Projects/Cryptographic-Algorithm-Validation-Program/Secure-Hashing#shavs
-
-## Use of the Original Implementation
-- https://github.com/git/git/commit/28dc98e343ca4eb370a29ceec4c19beac9b5c01e
-- https://github.com/libgit2/libgit2/pull/4136
-
-[original code]: https://github.com/cr-marcstevens/sha1collisiondetection
-[white paper]: https://marc-stevens.nl/research/papers/C13-S.pdf
diff --git a/vendor/github.com/pjbgf/sha1cd/detection.go b/vendor/github.com/pjbgf/sha1cd/detection.go
deleted file mode 100644
index a1458748c73..00000000000
--- a/vendor/github.com/pjbgf/sha1cd/detection.go
+++ /dev/null
@@ -1,11 +0,0 @@
-package sha1cd
-
-import "hash"
-
-type CollisionResistantHash interface {
- // CollisionResistantSum extends on Sum by returning an additional boolean
- // which indicates whether a collision was found during the hashing process.
- CollisionResistantSum(b []byte) ([]byte, bool)
-
- hash.Hash
-}
diff --git a/vendor/github.com/pjbgf/sha1cd/internal/const.go b/vendor/github.com/pjbgf/sha1cd/internal/const.go
deleted file mode 100644
index 944a131d39f..00000000000
--- a/vendor/github.com/pjbgf/sha1cd/internal/const.go
+++ /dev/null
@@ -1,42 +0,0 @@
-package shared
-
-const (
- // Constants for the SHA-1 hash function.
- K0 = 0x5A827999
- K1 = 0x6ED9EBA1
- K2 = 0x8F1BBCDC
- K3 = 0xCA62C1D6
-
- // Initial values for the buffer variables: h0, h1, h2, h3, h4.
- Init0 = 0x67452301
- Init1 = 0xEFCDAB89
- Init2 = 0x98BADCFE
- Init3 = 0x10325476
- Init4 = 0xC3D2E1F0
-
- // Initial values for the temporary variables (ihvtmp0, ihvtmp1, ihvtmp2, ihvtmp3, ihvtmp4) during the SHA recompression step.
- InitTmp0 = 0xD5
- InitTmp1 = 0x394
- InitTmp2 = 0x8152A8
- InitTmp3 = 0x0
- InitTmp4 = 0xA7ECE0
-
- // SHA1 contains 2 buffers, each based off 5 32-bit words.
- WordBuffers = 5
-
- // The output of SHA1 is 20 bytes (160 bits).
- Size = 20
-
- // Rounds represents the number of steps required to process each chunk.
- Rounds = 80
-
- // SHA1 processes the input data in chunks. Each chunk contains 64 bytes.
- Chunk = 64
-
- // The number of pre-step compression state to store.
- // Currently there are 3 pre-step compression states required: 0, 58, 65.
- PreStepState = 3
-
- Magic = "shacd\x01"
- MarshaledSize = len(Magic) + 5*4 + Chunk + 8
-)
diff --git a/vendor/github.com/pjbgf/sha1cd/sha1cd.go b/vendor/github.com/pjbgf/sha1cd/sha1cd.go
deleted file mode 100644
index a69e480ee97..00000000000
--- a/vendor/github.com/pjbgf/sha1cd/sha1cd.go
+++ /dev/null
@@ -1,227 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package sha1cd implements collision detection based on the whitepaper
-// Counter-cryptanalysis from Marc Stevens. The original ubc implementation
-// was done by Marc Stevens and Dan Shumow, and can be found at:
-// https://github.com/cr-marcstevens/sha1collisiondetection
-package sha1cd
-
-// This SHA1 implementation is based on Go's generic SHA1.
-// Original: https://github.com/golang/go/blob/master/src/crypto/sha1/sha1.go
-
-import (
- "crypto"
- "encoding/binary"
- "errors"
- "hash"
-
- shared "github.com/pjbgf/sha1cd/internal"
-)
-
-func init() {
- crypto.RegisterHash(crypto.SHA1, New)
-}
-
-// The size of a SHA-1 checksum in bytes.
-const Size = shared.Size
-
-// The blocksize of SHA-1 in bytes.
-const BlockSize = shared.Chunk
-
-// digest represents the partial evaluation of a checksum.
-type digest struct {
- h [shared.WordBuffers]uint32
- x [shared.Chunk]byte
- nx int
- len uint64
-
- // col defines whether a collision has been found.
- col bool
- blockFunc func(dig *digest, p []byte)
-}
-
-func (d *digest) MarshalBinary() ([]byte, error) {
- b := make([]byte, 0, shared.MarshaledSize)
- b = append(b, shared.Magic...)
- b = appendUint32(b, d.h[0])
- b = appendUint32(b, d.h[1])
- b = appendUint32(b, d.h[2])
- b = appendUint32(b, d.h[3])
- b = appendUint32(b, d.h[4])
- b = append(b, d.x[:d.nx]...)
- b = b[:len(b)+len(d.x)-d.nx] // already zero
- b = appendUint64(b, d.len)
- return b, nil
-}
-
-func appendUint32(b []byte, v uint32) []byte {
- return append(b,
- byte(v>>24),
- byte(v>>16),
- byte(v>>8),
- byte(v),
- )
-}
-
-func appendUint64(b []byte, v uint64) []byte {
- return append(b,
- byte(v>>56),
- byte(v>>48),
- byte(v>>40),
- byte(v>>32),
- byte(v>>24),
- byte(v>>16),
- byte(v>>8),
- byte(v),
- )
-}
-
-func (d *digest) UnmarshalBinary(b []byte) error {
- if len(b) < len(shared.Magic) || string(b[:len(shared.Magic)]) != shared.Magic {
- return errors.New("crypto/sha1: invalid hash state identifier")
- }
- if len(b) != shared.MarshaledSize {
- return errors.New("crypto/sha1: invalid hash state size")
- }
- b = b[len(shared.Magic):]
- b, d.h[0] = consumeUint32(b)
- b, d.h[1] = consumeUint32(b)
- b, d.h[2] = consumeUint32(b)
- b, d.h[3] = consumeUint32(b)
- b, d.h[4] = consumeUint32(b)
- b = b[copy(d.x[:], b):]
- b, d.len = consumeUint64(b)
- d.nx = int(d.len % shared.Chunk)
- return nil
-}
-
-func consumeUint64(b []byte) ([]byte, uint64) {
- _ = b[7]
- x := uint64(b[7]) | uint64(b[6])<<8 | uint64(b[shared.WordBuffers])<<16 | uint64(b[4])<<24 |
- uint64(b[3])<<32 | uint64(b[2])<<40 | uint64(b[1])<<48 | uint64(b[0])<<56
- return b[8:], x
-}
-
-func consumeUint32(b []byte) ([]byte, uint32) {
- _ = b[3]
- x := uint32(b[3]) | uint32(b[2])<<8 | uint32(b[1])<<16 | uint32(b[0])<<24
- return b[4:], x
-}
-
-func (d *digest) Reset() {
- d.h[0] = shared.Init0
- d.h[1] = shared.Init1
- d.h[2] = shared.Init2
- d.h[3] = shared.Init3
- d.h[4] = shared.Init4
- d.nx = 0
- d.len = 0
-
- d.col = false
-}
-
-// New returns a new hash.Hash computing the SHA1 checksum. The Hash also
-// implements encoding.BinaryMarshaler and encoding.BinaryUnmarshaler to
-// marshal and unmarshal the internal state of the hash.
-func New() hash.Hash {
- d := new(digest)
-
- d.blockFunc = block
- d.Reset()
- return d
-}
-
-// NewGeneric is equivalent to New but uses the Go generic implementation,
-// avoiding any processor-specific optimizations.
-func NewGeneric() hash.Hash {
- d := new(digest)
-
- d.blockFunc = blockGeneric
- d.Reset()
- return d
-}
-
-func (d *digest) Size() int { return Size }
-
-func (d *digest) BlockSize() int { return BlockSize }
-
-func (d *digest) Write(p []byte) (nn int, err error) {
- if len(p) == 0 {
- return
- }
-
- nn = len(p)
- d.len += uint64(nn)
- if d.nx > 0 {
- n := copy(d.x[d.nx:], p)
- d.nx += n
- if d.nx == shared.Chunk {
- d.blockFunc(d, d.x[:])
- d.nx = 0
- }
- p = p[n:]
- }
- if len(p) >= shared.Chunk {
- n := len(p) &^ (shared.Chunk - 1)
- d.blockFunc(d, p[:n])
- p = p[n:]
- }
- if len(p) > 0 {
- d.nx = copy(d.x[:], p)
- }
- return
-}
-
-func (d *digest) Sum(in []byte) []byte {
- // Make a copy of d so that caller can keep writing and summing.
- d0 := *d
- hash := d0.checkSum()
- return append(in, hash[:]...)
-}
-
-func (d *digest) checkSum() [Size]byte {
- len := d.len
- // Padding. Add a 1 bit and 0 bits until 56 bytes mod 64.
- var tmp [64]byte
- tmp[0] = 0x80
- if len%64 < 56 {
- d.Write(tmp[0 : 56-len%64])
- } else {
- d.Write(tmp[0 : 64+56-len%64])
- }
-
- // Length in bits.
- len <<= 3
- binary.BigEndian.PutUint64(tmp[:], len)
- d.Write(tmp[0:8])
-
- if d.nx != 0 {
- panic("d.nx != 0")
- }
-
- var digest [Size]byte
-
- binary.BigEndian.PutUint32(digest[0:], d.h[0])
- binary.BigEndian.PutUint32(digest[4:], d.h[1])
- binary.BigEndian.PutUint32(digest[8:], d.h[2])
- binary.BigEndian.PutUint32(digest[12:], d.h[3])
- binary.BigEndian.PutUint32(digest[16:], d.h[4])
-
- return digest
-}
-
-// Sum returns the SHA-1 checksum of the data.
-func Sum(data []byte) ([Size]byte, bool) {
- d := New().(*digest)
- d.Write(data)
- return d.checkSum(), d.col
-}
-
-func (d *digest) CollisionResistantSum(in []byte) ([]byte, bool) {
- // Make a copy of d so that caller can keep writing and summing.
- d0 := *d
- hash := d0.checkSum()
- return append(in, hash[:]...), d0.col
-}
diff --git a/vendor/github.com/pjbgf/sha1cd/sha1cdblock_amd64.go b/vendor/github.com/pjbgf/sha1cd/sha1cdblock_amd64.go
deleted file mode 100644
index 95e08308420..00000000000
--- a/vendor/github.com/pjbgf/sha1cd/sha1cdblock_amd64.go
+++ /dev/null
@@ -1,50 +0,0 @@
-//go:build !noasm && gc && amd64
-// +build !noasm,gc,amd64
-
-package sha1cd
-
-import (
- "math"
- "unsafe"
-
- shared "github.com/pjbgf/sha1cd/internal"
-)
-
-type sliceHeader struct {
- base uintptr
- len int
- cap int
-}
-
-// blockAMD64 hashes the message p into the current state in dig.
-// Both m1 and cs are used to store intermediate results which are used by the collision detection logic.
-//
-//go:noescape
-func blockAMD64(dig *digest, p sliceHeader, m1 []uint32, cs [][5]uint32)
-
-func block(dig *digest, p []byte) {
- m1 := [shared.Rounds]uint32{}
- cs := [shared.PreStepState][shared.WordBuffers]uint32{}
-
- for len(p) >= shared.Chunk {
- // Only send a block to be processed, as the collission detection
- // works on a block by block basis.
- ips := sliceHeader{
- base: uintptr(unsafe.Pointer(&p[0])),
- len: int(math.Min(float64(len(p)), float64(shared.Chunk))),
- cap: shared.Chunk,
- }
-
- blockAMD64(dig, ips, m1[:], cs[:])
-
- col := checkCollision(m1, cs, dig.h)
- if col {
- dig.col = true
-
- blockAMD64(dig, ips, m1[:], cs[:])
- blockAMD64(dig, ips, m1[:], cs[:])
- }
-
- p = p[shared.Chunk:]
- }
-}
diff --git a/vendor/github.com/pjbgf/sha1cd/sha1cdblock_amd64.s b/vendor/github.com/pjbgf/sha1cd/sha1cdblock_amd64.s
deleted file mode 100644
index 86f9821caba..00000000000
--- a/vendor/github.com/pjbgf/sha1cd/sha1cdblock_amd64.s
+++ /dev/null
@@ -1,2274 +0,0 @@
-// Code generated by command: go run sha1cdblock_amd64_asm.go -out sha1cdblock_amd64.s. DO NOT EDIT.
-
-//go:build !noasm && gc && amd64
-// +build !noasm,gc,amd64
-
-#include "textflag.h"
-
-// func blockAMD64(dig *digest, p []byte, m1 []uint32, cs [][5]uint32)
-TEXT ·blockAMD64(SB), NOSPLIT, $64-80
- MOVQ dig+0(FP), R8
- MOVQ p_base+8(FP), DI
- MOVQ p_len+16(FP), DX
- SHRQ $+6, DX
- SHLQ $+6, DX
- LEAQ (DI)(DX*1), SI
-
- // Load h0, h1, h2, h3, h4.
- MOVL (R8), AX
- MOVL 4(R8), BX
- MOVL 8(R8), CX
- MOVL 12(R8), DX
- MOVL 16(R8), BP
-
- // len(p) >= chunk
- CMPQ DI, SI
- JEQ end
-
-loop:
- // Initialize registers a, b, c, d, e.
- MOVL AX, R10
- MOVL BX, R11
- MOVL CX, R12
- MOVL DX, R13
- MOVL BP, R14
-
- // ROUND1 (steps 0-15)
- // Load cs
- MOVQ cs_base+56(FP), R8
- MOVL R10, (R8)
- MOVL R11, 4(R8)
- MOVL R12, 8(R8)
- MOVL R13, 12(R8)
- MOVL R14, 16(R8)
-
- // ROUND1(0)
- // LOAD
- MOVL (DI), R9
- BSWAPL R9
- MOVL R9, (SP)
-
- // FUNC1
- MOVL R13, R15
- XORL R12, R15
- ANDL R11, R15
- XORL R13, R15
-
- // MIX
- ROLL $+30, R11
- ADDL R15, R14
- MOVL R10, R8
- ROLL $+5, R8
- LEAL 1518500249(R14)(R9*1), R14
- ADDL R8, R14
-
- // Load m1
- MOVQ m1_base+32(FP), R8
- MOVL (SP), R9
- MOVL R9, (R8)
-
- // ROUND1(1)
- // LOAD
- MOVL 4(DI), R9
- BSWAPL R9
- MOVL R9, 4(SP)
-
- // FUNC1
- MOVL R12, R15
- XORL R11, R15
- ANDL R10, R15
- XORL R12, R15
-
- // MIX
- ROLL $+30, R10
- ADDL R15, R13
- MOVL R14, R8
- ROLL $+5, R8
- LEAL 1518500249(R13)(R9*1), R13
- ADDL R8, R13
-
- // Load m1
- MOVQ m1_base+32(FP), R8
- MOVL 4(SP), R9
- MOVL R9, 4(R8)
-
- // ROUND1(2)
- // LOAD
- MOVL 8(DI), R9
- BSWAPL R9
- MOVL R9, 8(SP)
-
- // FUNC1
- MOVL R11, R15
- XORL R10, R15
- ANDL R14, R15
- XORL R11, R15
-
- // MIX
- ROLL $+30, R14
- ADDL R15, R12
- MOVL R13, R8
- ROLL $+5, R8
- LEAL 1518500249(R12)(R9*1), R12
- ADDL R8, R12
-
- // Load m1
- MOVQ m1_base+32(FP), R8
- MOVL 8(SP), R9
- MOVL R9, 8(R8)
-
- // ROUND1(3)
- // LOAD
- MOVL 12(DI), R9
- BSWAPL R9
- MOVL R9, 12(SP)
-
- // FUNC1
- MOVL R10, R15
- XORL R14, R15
- ANDL R13, R15
- XORL R10, R15
-
- // MIX
- ROLL $+30, R13
- ADDL R15, R11
- MOVL R12, R8
- ROLL $+5, R8
- LEAL 1518500249(R11)(R9*1), R11
- ADDL R8, R11
-
- // Load m1
- MOVQ m1_base+32(FP), R8
- MOVL 12(SP), R9
- MOVL R9, 12(R8)
-
- // ROUND1(4)
- // LOAD
- MOVL 16(DI), R9
- BSWAPL R9
- MOVL R9, 16(SP)
-
- // FUNC1
- MOVL R14, R15
- XORL R13, R15
- ANDL R12, R15
- XORL R14, R15
-
- // MIX
- ROLL $+30, R12
- ADDL R15, R10
- MOVL R11, R8
- ROLL $+5, R8
- LEAL 1518500249(R10)(R9*1), R10
- ADDL R8, R10
-
- // Load m1
- MOVQ m1_base+32(FP), R8
- MOVL 16(SP), R9
- MOVL R9, 16(R8)
-
- // ROUND1(5)
- // LOAD
- MOVL 20(DI), R9
- BSWAPL R9
- MOVL R9, 20(SP)
-
- // FUNC1
- MOVL R13, R15
- XORL R12, R15
- ANDL R11, R15
- XORL R13, R15
-
- // MIX
- ROLL $+30, R11
- ADDL R15, R14
- MOVL R10, R8
- ROLL $+5, R8
- LEAL 1518500249(R14)(R9*1), R14
- ADDL R8, R14
-
- // Load m1
- MOVQ m1_base+32(FP), R8
- MOVL 20(SP), R9
- MOVL R9, 20(R8)
-
- // ROUND1(6)
- // LOAD
- MOVL 24(DI), R9
- BSWAPL R9
- MOVL R9, 24(SP)
-
- // FUNC1
- MOVL R12, R15
- XORL R11, R15
- ANDL R10, R15
- XORL R12, R15
-
- // MIX
- ROLL $+30, R10
- ADDL R15, R13
- MOVL R14, R8
- ROLL $+5, R8
- LEAL 1518500249(R13)(R9*1), R13
- ADDL R8, R13
-
- // Load m1
- MOVQ m1_base+32(FP), R8
- MOVL 24(SP), R9
- MOVL R9, 24(R8)
-
- // ROUND1(7)
- // LOAD
- MOVL 28(DI), R9
- BSWAPL R9
- MOVL R9, 28(SP)
-
- // FUNC1
- MOVL R11, R15
- XORL R10, R15
- ANDL R14, R15
- XORL R11, R15
-
- // MIX
- ROLL $+30, R14
- ADDL R15, R12
- MOVL R13, R8
- ROLL $+5, R8
- LEAL 1518500249(R12)(R9*1), R12
- ADDL R8, R12
-
- // Load m1
- MOVQ m1_base+32(FP), R8
- MOVL 28(SP), R9
- MOVL R9, 28(R8)
-
- // ROUND1(8)
- // LOAD
- MOVL 32(DI), R9
- BSWAPL R9
- MOVL R9, 32(SP)
-
- // FUNC1
- MOVL R10, R15
- XORL R14, R15
- ANDL R13, R15
- XORL R10, R15
-
- // MIX
- ROLL $+30, R13
- ADDL R15, R11
- MOVL R12, R8
- ROLL $+5, R8
- LEAL 1518500249(R11)(R9*1), R11
- ADDL R8, R11
-
- // Load m1
- MOVQ m1_base+32(FP), R8
- MOVL 32(SP), R9
- MOVL R9, 32(R8)
-
- // ROUND1(9)
- // LOAD
- MOVL 36(DI), R9
- BSWAPL R9
- MOVL R9, 36(SP)
-
- // FUNC1
- MOVL R14, R15
- XORL R13, R15
- ANDL R12, R15
- XORL R14, R15
-
- // MIX
- ROLL $+30, R12
- ADDL R15, R10
- MOVL R11, R8
- ROLL $+5, R8
- LEAL 1518500249(R10)(R9*1), R10
- ADDL R8, R10
-
- // Load m1
- MOVQ m1_base+32(FP), R8
- MOVL 36(SP), R9
- MOVL R9, 36(R8)
-
- // ROUND1(10)
- // LOAD
- MOVL 40(DI), R9
- BSWAPL R9
- MOVL R9, 40(SP)
-
- // FUNC1
- MOVL R13, R15
- XORL R12, R15
- ANDL R11, R15
- XORL R13, R15
-
- // MIX
- ROLL $+30, R11
- ADDL R15, R14
- MOVL R10, R8
- ROLL $+5, R8
- LEAL 1518500249(R14)(R9*1), R14
- ADDL R8, R14
-
- // Load m1
- MOVQ m1_base+32(FP), R8
- MOVL 40(SP), R9
- MOVL R9, 40(R8)
-
- // ROUND1(11)
- // LOAD
- MOVL 44(DI), R9
- BSWAPL R9
- MOVL R9, 44(SP)
-
- // FUNC1
- MOVL R12, R15
- XORL R11, R15
- ANDL R10, R15
- XORL R12, R15
-
- // MIX
- ROLL $+30, R10
- ADDL R15, R13
- MOVL R14, R8
- ROLL $+5, R8
- LEAL 1518500249(R13)(R9*1), R13
- ADDL R8, R13
-
- // Load m1
- MOVQ m1_base+32(FP), R8
- MOVL 44(SP), R9
- MOVL R9, 44(R8)
-
- // ROUND1(12)
- // LOAD
- MOVL 48(DI), R9
- BSWAPL R9
- MOVL R9, 48(SP)
-
- // FUNC1
- MOVL R11, R15
- XORL R10, R15
- ANDL R14, R15
- XORL R11, R15
-
- // MIX
- ROLL $+30, R14
- ADDL R15, R12
- MOVL R13, R8
- ROLL $+5, R8
- LEAL 1518500249(R12)(R9*1), R12
- ADDL R8, R12
-
- // Load m1
- MOVQ m1_base+32(FP), R8
- MOVL 48(SP), R9
- MOVL R9, 48(R8)
-
- // ROUND1(13)
- // LOAD
- MOVL 52(DI), R9
- BSWAPL R9
- MOVL R9, 52(SP)
-
- // FUNC1
- MOVL R10, R15
- XORL R14, R15
- ANDL R13, R15
- XORL R10, R15
-
- // MIX
- ROLL $+30, R13
- ADDL R15, R11
- MOVL R12, R8
- ROLL $+5, R8
- LEAL 1518500249(R11)(R9*1), R11
- ADDL R8, R11
-
- // Load m1
- MOVQ m1_base+32(FP), R8
- MOVL 52(SP), R9
- MOVL R9, 52(R8)
-
- // ROUND1(14)
- // LOAD
- MOVL 56(DI), R9
- BSWAPL R9
- MOVL R9, 56(SP)
-
- // FUNC1
- MOVL R14, R15
- XORL R13, R15
- ANDL R12, R15
- XORL R14, R15
-
- // MIX
- ROLL $+30, R12
- ADDL R15, R10
- MOVL R11, R8
- ROLL $+5, R8
- LEAL 1518500249(R10)(R9*1), R10
- ADDL R8, R10
-
- // Load m1
- MOVQ m1_base+32(FP), R8
- MOVL 56(SP), R9
- MOVL R9, 56(R8)
-
- // ROUND1(15)
- // LOAD
- MOVL 60(DI), R9
- BSWAPL R9
- MOVL R9, 60(SP)
-
- // FUNC1
- MOVL R13, R15
- XORL R12, R15
- ANDL R11, R15
- XORL R13, R15
-
- // MIX
- ROLL $+30, R11
- ADDL R15, R14
- MOVL R10, R8
- ROLL $+5, R8
- LEAL 1518500249(R14)(R9*1), R14
- ADDL R8, R14
-
- // Load m1
- MOVQ m1_base+32(FP), R8
- MOVL 60(SP), R9
- MOVL R9, 60(R8)
-
- // ROUND1x (steps 16-19) - same as ROUND1 but with no data load.
- // ROUND1x(16)
- // SHUFFLE
- MOVL (SP), R9
- XORL 52(SP), R9
- XORL 32(SP), R9
- XORL 8(SP), R9
- ROLL $+1, R9
- MOVL R9, (SP)
-
- // FUNC1
- MOVL R12, R15
- XORL R11, R15
- ANDL R10, R15
- XORL R12, R15
-
- // MIX
- ROLL $+30, R10
- ADDL R15, R13
- MOVL R14, R8
- ROLL $+5, R8
- LEAL 1518500249(R13)(R9*1), R13
- ADDL R8, R13
-
- // Load m1
- MOVQ m1_base+32(FP), R8
- MOVL (SP), R9
- MOVL R9, 64(R8)
-
- // ROUND1x(17)
- // SHUFFLE
- MOVL 4(SP), R9
- XORL 56(SP), R9
- XORL 36(SP), R9
- XORL 12(SP), R9
- ROLL $+1, R9
- MOVL R9, 4(SP)
-
- // FUNC1
- MOVL R11, R15
- XORL R10, R15
- ANDL R14, R15
- XORL R11, R15
-
- // MIX
- ROLL $+30, R14
- ADDL R15, R12
- MOVL R13, R8
- ROLL $+5, R8
- LEAL 1518500249(R12)(R9*1), R12
- ADDL R8, R12
-
- // Load m1
- MOVQ m1_base+32(FP), R8
- MOVL 4(SP), R9
- MOVL R9, 68(R8)
-
- // ROUND1x(18)
- // SHUFFLE
- MOVL 8(SP), R9
- XORL 60(SP), R9
- XORL 40(SP), R9
- XORL 16(SP), R9
- ROLL $+1, R9
- MOVL R9, 8(SP)
-
- // FUNC1
- MOVL R10, R15
- XORL R14, R15
- ANDL R13, R15
- XORL R10, R15
-
- // MIX
- ROLL $+30, R13
- ADDL R15, R11
- MOVL R12, R8
- ROLL $+5, R8
- LEAL 1518500249(R11)(R9*1), R11
- ADDL R8, R11
-
- // Load m1
- MOVQ m1_base+32(FP), R8
- MOVL 8(SP), R9
- MOVL R9, 72(R8)
-
- // ROUND1x(19)
- // SHUFFLE
- MOVL 12(SP), R9
- XORL (SP), R9
- XORL 44(SP), R9
- XORL 20(SP), R9
- ROLL $+1, R9
- MOVL R9, 12(SP)
-
- // FUNC1
- MOVL R14, R15
- XORL R13, R15
- ANDL R12, R15
- XORL R14, R15
-
- // MIX
- ROLL $+30, R12
- ADDL R15, R10
- MOVL R11, R8
- ROLL $+5, R8
- LEAL 1518500249(R10)(R9*1), R10
- ADDL R8, R10
-
- // Load m1
- MOVQ m1_base+32(FP), R8
- MOVL 12(SP), R9
- MOVL R9, 76(R8)
-
- // ROUND2 (steps 20-39)
- // ROUND2(20)
- // SHUFFLE
- MOVL 16(SP), R9
- XORL 4(SP), R9
- XORL 48(SP), R9
- XORL 24(SP), R9
- ROLL $+1, R9
- MOVL R9, 16(SP)
-
- // FUNC2
- MOVL R11, R15
- XORL R12, R15
- XORL R13, R15
-
- // MIX
- ROLL $+30, R11
- ADDL R15, R14
- MOVL R10, R8
- ROLL $+5, R8
- LEAL 1859775393(R14)(R9*1), R14
- ADDL R8, R14
-
- // Load m1
- MOVQ m1_base+32(FP), R8
- MOVL 16(SP), R9
- MOVL R9, 80(R8)
-
- // ROUND2(21)
- // SHUFFLE
- MOVL 20(SP), R9
- XORL 8(SP), R9
- XORL 52(SP), R9
- XORL 28(SP), R9
- ROLL $+1, R9
- MOVL R9, 20(SP)
-
- // FUNC2
- MOVL R10, R15
- XORL R11, R15
- XORL R12, R15
-
- // MIX
- ROLL $+30, R10
- ADDL R15, R13
- MOVL R14, R8
- ROLL $+5, R8
- LEAL 1859775393(R13)(R9*1), R13
- ADDL R8, R13
-
- // Load m1
- MOVQ m1_base+32(FP), R8
- MOVL 20(SP), R9
- MOVL R9, 84(R8)
-
- // ROUND2(22)
- // SHUFFLE
- MOVL 24(SP), R9
- XORL 12(SP), R9
- XORL 56(SP), R9
- XORL 32(SP), R9
- ROLL $+1, R9
- MOVL R9, 24(SP)
-
- // FUNC2
- MOVL R14, R15
- XORL R10, R15
- XORL R11, R15
-
- // MIX
- ROLL $+30, R14
- ADDL R15, R12
- MOVL R13, R8
- ROLL $+5, R8
- LEAL 1859775393(R12)(R9*1), R12
- ADDL R8, R12
-
- // Load m1
- MOVQ m1_base+32(FP), R8
- MOVL 24(SP), R9
- MOVL R9, 88(R8)
-
- // ROUND2(23)
- // SHUFFLE
- MOVL 28(SP), R9
- XORL 16(SP), R9
- XORL 60(SP), R9
- XORL 36(SP), R9
- ROLL $+1, R9
- MOVL R9, 28(SP)
-
- // FUNC2
- MOVL R13, R15
- XORL R14, R15
- XORL R10, R15
-
- // MIX
- ROLL $+30, R13
- ADDL R15, R11
- MOVL R12, R8
- ROLL $+5, R8
- LEAL 1859775393(R11)(R9*1), R11
- ADDL R8, R11
-
- // Load m1
- MOVQ m1_base+32(FP), R8
- MOVL 28(SP), R9
- MOVL R9, 92(R8)
-
- // ROUND2(24)
- // SHUFFLE
- MOVL 32(SP), R9
- XORL 20(SP), R9
- XORL (SP), R9
- XORL 40(SP), R9
- ROLL $+1, R9
- MOVL R9, 32(SP)
-
- // FUNC2
- MOVL R12, R15
- XORL R13, R15
- XORL R14, R15
-
- // MIX
- ROLL $+30, R12
- ADDL R15, R10
- MOVL R11, R8
- ROLL $+5, R8
- LEAL 1859775393(R10)(R9*1), R10
- ADDL R8, R10
-
- // Load m1
- MOVQ m1_base+32(FP), R8
- MOVL 32(SP), R9
- MOVL R9, 96(R8)
-
- // ROUND2(25)
- // SHUFFLE
- MOVL 36(SP), R9
- XORL 24(SP), R9
- XORL 4(SP), R9
- XORL 44(SP), R9
- ROLL $+1, R9
- MOVL R9, 36(SP)
-
- // FUNC2
- MOVL R11, R15
- XORL R12, R15
- XORL R13, R15
-
- // MIX
- ROLL $+30, R11
- ADDL R15, R14
- MOVL R10, R8
- ROLL $+5, R8
- LEAL 1859775393(R14)(R9*1), R14
- ADDL R8, R14
-
- // Load m1
- MOVQ m1_base+32(FP), R8
- MOVL 36(SP), R9
- MOVL R9, 100(R8)
-
- // ROUND2(26)
- // SHUFFLE
- MOVL 40(SP), R9
- XORL 28(SP), R9
- XORL 8(SP), R9
- XORL 48(SP), R9
- ROLL $+1, R9
- MOVL R9, 40(SP)
-
- // FUNC2
- MOVL R10, R15
- XORL R11, R15
- XORL R12, R15
-
- // MIX
- ROLL $+30, R10
- ADDL R15, R13
- MOVL R14, R8
- ROLL $+5, R8
- LEAL 1859775393(R13)(R9*1), R13
- ADDL R8, R13
-
- // Load m1
- MOVQ m1_base+32(FP), R8
- MOVL 40(SP), R9
- MOVL R9, 104(R8)
-
- // ROUND2(27)
- // SHUFFLE
- MOVL 44(SP), R9
- XORL 32(SP), R9
- XORL 12(SP), R9
- XORL 52(SP), R9
- ROLL $+1, R9
- MOVL R9, 44(SP)
-
- // FUNC2
- MOVL R14, R15
- XORL R10, R15
- XORL R11, R15
-
- // MIX
- ROLL $+30, R14
- ADDL R15, R12
- MOVL R13, R8
- ROLL $+5, R8
- LEAL 1859775393(R12)(R9*1), R12
- ADDL R8, R12
-
- // Load m1
- MOVQ m1_base+32(FP), R8
- MOVL 44(SP), R9
- MOVL R9, 108(R8)
-
- // ROUND2(28)
- // SHUFFLE
- MOVL 48(SP), R9
- XORL 36(SP), R9
- XORL 16(SP), R9
- XORL 56(SP), R9
- ROLL $+1, R9
- MOVL R9, 48(SP)
-
- // FUNC2
- MOVL R13, R15
- XORL R14, R15
- XORL R10, R15
-
- // MIX
- ROLL $+30, R13
- ADDL R15, R11
- MOVL R12, R8
- ROLL $+5, R8
- LEAL 1859775393(R11)(R9*1), R11
- ADDL R8, R11
-
- // Load m1
- MOVQ m1_base+32(FP), R8
- MOVL 48(SP), R9
- MOVL R9, 112(R8)
-
- // ROUND2(29)
- // SHUFFLE
- MOVL 52(SP), R9
- XORL 40(SP), R9
- XORL 20(SP), R9
- XORL 60(SP), R9
- ROLL $+1, R9
- MOVL R9, 52(SP)
-
- // FUNC2
- MOVL R12, R15
- XORL R13, R15
- XORL R14, R15
-
- // MIX
- ROLL $+30, R12
- ADDL R15, R10
- MOVL R11, R8
- ROLL $+5, R8
- LEAL 1859775393(R10)(R9*1), R10
- ADDL R8, R10
-
- // Load m1
- MOVQ m1_base+32(FP), R8
- MOVL 52(SP), R9
- MOVL R9, 116(R8)
-
- // ROUND2(30)
- // SHUFFLE
- MOVL 56(SP), R9
- XORL 44(SP), R9
- XORL 24(SP), R9
- XORL (SP), R9
- ROLL $+1, R9
- MOVL R9, 56(SP)
-
- // FUNC2
- MOVL R11, R15
- XORL R12, R15
- XORL R13, R15
-
- // MIX
- ROLL $+30, R11
- ADDL R15, R14
- MOVL R10, R8
- ROLL $+5, R8
- LEAL 1859775393(R14)(R9*1), R14
- ADDL R8, R14
-
- // Load m1
- MOVQ m1_base+32(FP), R8
- MOVL 56(SP), R9
- MOVL R9, 120(R8)
-
- // ROUND2(31)
- // SHUFFLE
- MOVL 60(SP), R9
- XORL 48(SP), R9
- XORL 28(SP), R9
- XORL 4(SP), R9
- ROLL $+1, R9
- MOVL R9, 60(SP)
-
- // FUNC2
- MOVL R10, R15
- XORL R11, R15
- XORL R12, R15
-
- // MIX
- ROLL $+30, R10
- ADDL R15, R13
- MOVL R14, R8
- ROLL $+5, R8
- LEAL 1859775393(R13)(R9*1), R13
- ADDL R8, R13
-
- // Load m1
- MOVQ m1_base+32(FP), R8
- MOVL 60(SP), R9
- MOVL R9, 124(R8)
-
- // ROUND2(32)
- // SHUFFLE
- MOVL (SP), R9
- XORL 52(SP), R9
- XORL 32(SP), R9
- XORL 8(SP), R9
- ROLL $+1, R9
- MOVL R9, (SP)
-
- // FUNC2
- MOVL R14, R15
- XORL R10, R15
- XORL R11, R15
-
- // MIX
- ROLL $+30, R14
- ADDL R15, R12
- MOVL R13, R8
- ROLL $+5, R8
- LEAL 1859775393(R12)(R9*1), R12
- ADDL R8, R12
-
- // Load m1
- MOVQ m1_base+32(FP), R8
- MOVL (SP), R9
- MOVL R9, 128(R8)
-
- // ROUND2(33)
- // SHUFFLE
- MOVL 4(SP), R9
- XORL 56(SP), R9
- XORL 36(SP), R9
- XORL 12(SP), R9
- ROLL $+1, R9
- MOVL R9, 4(SP)
-
- // FUNC2
- MOVL R13, R15
- XORL R14, R15
- XORL R10, R15
-
- // MIX
- ROLL $+30, R13
- ADDL R15, R11
- MOVL R12, R8
- ROLL $+5, R8
- LEAL 1859775393(R11)(R9*1), R11
- ADDL R8, R11
-
- // Load m1
- MOVQ m1_base+32(FP), R8
- MOVL 4(SP), R9
- MOVL R9, 132(R8)
-
- // ROUND2(34)
- // SHUFFLE
- MOVL 8(SP), R9
- XORL 60(SP), R9
- XORL 40(SP), R9
- XORL 16(SP), R9
- ROLL $+1, R9
- MOVL R9, 8(SP)
-
- // FUNC2
- MOVL R12, R15
- XORL R13, R15
- XORL R14, R15
-
- // MIX
- ROLL $+30, R12
- ADDL R15, R10
- MOVL R11, R8
- ROLL $+5, R8
- LEAL 1859775393(R10)(R9*1), R10
- ADDL R8, R10
-
- // Load m1
- MOVQ m1_base+32(FP), R8
- MOVL 8(SP), R9
- MOVL R9, 136(R8)
-
- // ROUND2(35)
- // SHUFFLE
- MOVL 12(SP), R9
- XORL (SP), R9
- XORL 44(SP), R9
- XORL 20(SP), R9
- ROLL $+1, R9
- MOVL R9, 12(SP)
-
- // FUNC2
- MOVL R11, R15
- XORL R12, R15
- XORL R13, R15
-
- // MIX
- ROLL $+30, R11
- ADDL R15, R14
- MOVL R10, R8
- ROLL $+5, R8
- LEAL 1859775393(R14)(R9*1), R14
- ADDL R8, R14
-
- // Load m1
- MOVQ m1_base+32(FP), R8
- MOVL 12(SP), R9
- MOVL R9, 140(R8)
-
- // ROUND2(36)
- // SHUFFLE
- MOVL 16(SP), R9
- XORL 4(SP), R9
- XORL 48(SP), R9
- XORL 24(SP), R9
- ROLL $+1, R9
- MOVL R9, 16(SP)
-
- // FUNC2
- MOVL R10, R15
- XORL R11, R15
- XORL R12, R15
-
- // MIX
- ROLL $+30, R10
- ADDL R15, R13
- MOVL R14, R8
- ROLL $+5, R8
- LEAL 1859775393(R13)(R9*1), R13
- ADDL R8, R13
-
- // Load m1
- MOVQ m1_base+32(FP), R8
- MOVL 16(SP), R9
- MOVL R9, 144(R8)
-
- // ROUND2(37)
- // SHUFFLE
- MOVL 20(SP), R9
- XORL 8(SP), R9
- XORL 52(SP), R9
- XORL 28(SP), R9
- ROLL $+1, R9
- MOVL R9, 20(SP)
-
- // FUNC2
- MOVL R14, R15
- XORL R10, R15
- XORL R11, R15
-
- // MIX
- ROLL $+30, R14
- ADDL R15, R12
- MOVL R13, R8
- ROLL $+5, R8
- LEAL 1859775393(R12)(R9*1), R12
- ADDL R8, R12
-
- // Load m1
- MOVQ m1_base+32(FP), R8
- MOVL 20(SP), R9
- MOVL R9, 148(R8)
-
- // ROUND2(38)
- // SHUFFLE
- MOVL 24(SP), R9
- XORL 12(SP), R9
- XORL 56(SP), R9
- XORL 32(SP), R9
- ROLL $+1, R9
- MOVL R9, 24(SP)
-
- // FUNC2
- MOVL R13, R15
- XORL R14, R15
- XORL R10, R15
-
- // MIX
- ROLL $+30, R13
- ADDL R15, R11
- MOVL R12, R8
- ROLL $+5, R8
- LEAL 1859775393(R11)(R9*1), R11
- ADDL R8, R11
-
- // Load m1
- MOVQ m1_base+32(FP), R8
- MOVL 24(SP), R9
- MOVL R9, 152(R8)
-
- // ROUND2(39)
- // SHUFFLE
- MOVL 28(SP), R9
- XORL 16(SP), R9
- XORL 60(SP), R9
- XORL 36(SP), R9
- ROLL $+1, R9
- MOVL R9, 28(SP)
-
- // FUNC2
- MOVL R12, R15
- XORL R13, R15
- XORL R14, R15
-
- // MIX
- ROLL $+30, R12
- ADDL R15, R10
- MOVL R11, R8
- ROLL $+5, R8
- LEAL 1859775393(R10)(R9*1), R10
- ADDL R8, R10
-
- // Load m1
- MOVQ m1_base+32(FP), R8
- MOVL 28(SP), R9
- MOVL R9, 156(R8)
-
- // ROUND3 (steps 40-59)
- // ROUND3(40)
- // SHUFFLE
- MOVL 32(SP), R9
- XORL 20(SP), R9
- XORL (SP), R9
- XORL 40(SP), R9
- ROLL $+1, R9
- MOVL R9, 32(SP)
-
- // FUNC3
- MOVL R11, R8
- ORL R12, R8
- ANDL R13, R8
- MOVL R11, R15
- ANDL R12, R15
- ORL R8, R15
-
- // MIX
- ROLL $+30, R11
- ADDL R15, R14
- MOVL R10, R8
- ROLL $+5, R8
- LEAL 2400959708(R14)(R9*1), R14
- ADDL R8, R14
-
- // Load m1
- MOVQ m1_base+32(FP), R8
- MOVL 32(SP), R9
- MOVL R9, 160(R8)
-
- // ROUND3(41)
- // SHUFFLE
- MOVL 36(SP), R9
- XORL 24(SP), R9
- XORL 4(SP), R9
- XORL 44(SP), R9
- ROLL $+1, R9
- MOVL R9, 36(SP)
-
- // FUNC3
- MOVL R10, R8
- ORL R11, R8
- ANDL R12, R8
- MOVL R10, R15
- ANDL R11, R15
- ORL R8, R15
-
- // MIX
- ROLL $+30, R10
- ADDL R15, R13
- MOVL R14, R8
- ROLL $+5, R8
- LEAL 2400959708(R13)(R9*1), R13
- ADDL R8, R13
-
- // Load m1
- MOVQ m1_base+32(FP), R8
- MOVL 36(SP), R9
- MOVL R9, 164(R8)
-
- // ROUND3(42)
- // SHUFFLE
- MOVL 40(SP), R9
- XORL 28(SP), R9
- XORL 8(SP), R9
- XORL 48(SP), R9
- ROLL $+1, R9
- MOVL R9, 40(SP)
-
- // FUNC3
- MOVL R14, R8
- ORL R10, R8
- ANDL R11, R8
- MOVL R14, R15
- ANDL R10, R15
- ORL R8, R15
-
- // MIX
- ROLL $+30, R14
- ADDL R15, R12
- MOVL R13, R8
- ROLL $+5, R8
- LEAL 2400959708(R12)(R9*1), R12
- ADDL R8, R12
-
- // Load m1
- MOVQ m1_base+32(FP), R8
- MOVL 40(SP), R9
- MOVL R9, 168(R8)
-
- // ROUND3(43)
- // SHUFFLE
- MOVL 44(SP), R9
- XORL 32(SP), R9
- XORL 12(SP), R9
- XORL 52(SP), R9
- ROLL $+1, R9
- MOVL R9, 44(SP)
-
- // FUNC3
- MOVL R13, R8
- ORL R14, R8
- ANDL R10, R8
- MOVL R13, R15
- ANDL R14, R15
- ORL R8, R15
-
- // MIX
- ROLL $+30, R13
- ADDL R15, R11
- MOVL R12, R8
- ROLL $+5, R8
- LEAL 2400959708(R11)(R9*1), R11
- ADDL R8, R11
-
- // Load m1
- MOVQ m1_base+32(FP), R8
- MOVL 44(SP), R9
- MOVL R9, 172(R8)
-
- // ROUND3(44)
- // SHUFFLE
- MOVL 48(SP), R9
- XORL 36(SP), R9
- XORL 16(SP), R9
- XORL 56(SP), R9
- ROLL $+1, R9
- MOVL R9, 48(SP)
-
- // FUNC3
- MOVL R12, R8
- ORL R13, R8
- ANDL R14, R8
- MOVL R12, R15
- ANDL R13, R15
- ORL R8, R15
-
- // MIX
- ROLL $+30, R12
- ADDL R15, R10
- MOVL R11, R8
- ROLL $+5, R8
- LEAL 2400959708(R10)(R9*1), R10
- ADDL R8, R10
-
- // Load m1
- MOVQ m1_base+32(FP), R8
- MOVL 48(SP), R9
- MOVL R9, 176(R8)
-
- // ROUND3(45)
- // SHUFFLE
- MOVL 52(SP), R9
- XORL 40(SP), R9
- XORL 20(SP), R9
- XORL 60(SP), R9
- ROLL $+1, R9
- MOVL R9, 52(SP)
-
- // FUNC3
- MOVL R11, R8
- ORL R12, R8
- ANDL R13, R8
- MOVL R11, R15
- ANDL R12, R15
- ORL R8, R15
-
- // MIX
- ROLL $+30, R11
- ADDL R15, R14
- MOVL R10, R8
- ROLL $+5, R8
- LEAL 2400959708(R14)(R9*1), R14
- ADDL R8, R14
-
- // Load m1
- MOVQ m1_base+32(FP), R8
- MOVL 52(SP), R9
- MOVL R9, 180(R8)
-
- // ROUND3(46)
- // SHUFFLE
- MOVL 56(SP), R9
- XORL 44(SP), R9
- XORL 24(SP), R9
- XORL (SP), R9
- ROLL $+1, R9
- MOVL R9, 56(SP)
-
- // FUNC3
- MOVL R10, R8
- ORL R11, R8
- ANDL R12, R8
- MOVL R10, R15
- ANDL R11, R15
- ORL R8, R15
-
- // MIX
- ROLL $+30, R10
- ADDL R15, R13
- MOVL R14, R8
- ROLL $+5, R8
- LEAL 2400959708(R13)(R9*1), R13
- ADDL R8, R13
-
- // Load m1
- MOVQ m1_base+32(FP), R8
- MOVL 56(SP), R9
- MOVL R9, 184(R8)
-
- // ROUND3(47)
- // SHUFFLE
- MOVL 60(SP), R9
- XORL 48(SP), R9
- XORL 28(SP), R9
- XORL 4(SP), R9
- ROLL $+1, R9
- MOVL R9, 60(SP)
-
- // FUNC3
- MOVL R14, R8
- ORL R10, R8
- ANDL R11, R8
- MOVL R14, R15
- ANDL R10, R15
- ORL R8, R15
-
- // MIX
- ROLL $+30, R14
- ADDL R15, R12
- MOVL R13, R8
- ROLL $+5, R8
- LEAL 2400959708(R12)(R9*1), R12
- ADDL R8, R12
-
- // Load m1
- MOVQ m1_base+32(FP), R8
- MOVL 60(SP), R9
- MOVL R9, 188(R8)
-
- // ROUND3(48)
- // SHUFFLE
- MOVL (SP), R9
- XORL 52(SP), R9
- XORL 32(SP), R9
- XORL 8(SP), R9
- ROLL $+1, R9
- MOVL R9, (SP)
-
- // FUNC3
- MOVL R13, R8
- ORL R14, R8
- ANDL R10, R8
- MOVL R13, R15
- ANDL R14, R15
- ORL R8, R15
-
- // MIX
- ROLL $+30, R13
- ADDL R15, R11
- MOVL R12, R8
- ROLL $+5, R8
- LEAL 2400959708(R11)(R9*1), R11
- ADDL R8, R11
-
- // Load m1
- MOVQ m1_base+32(FP), R8
- MOVL (SP), R9
- MOVL R9, 192(R8)
-
- // ROUND3(49)
- // SHUFFLE
- MOVL 4(SP), R9
- XORL 56(SP), R9
- XORL 36(SP), R9
- XORL 12(SP), R9
- ROLL $+1, R9
- MOVL R9, 4(SP)
-
- // FUNC3
- MOVL R12, R8
- ORL R13, R8
- ANDL R14, R8
- MOVL R12, R15
- ANDL R13, R15
- ORL R8, R15
-
- // MIX
- ROLL $+30, R12
- ADDL R15, R10
- MOVL R11, R8
- ROLL $+5, R8
- LEAL 2400959708(R10)(R9*1), R10
- ADDL R8, R10
-
- // Load m1
- MOVQ m1_base+32(FP), R8
- MOVL 4(SP), R9
- MOVL R9, 196(R8)
-
- // ROUND3(50)
- // SHUFFLE
- MOVL 8(SP), R9
- XORL 60(SP), R9
- XORL 40(SP), R9
- XORL 16(SP), R9
- ROLL $+1, R9
- MOVL R9, 8(SP)
-
- // FUNC3
- MOVL R11, R8
- ORL R12, R8
- ANDL R13, R8
- MOVL R11, R15
- ANDL R12, R15
- ORL R8, R15
-
- // MIX
- ROLL $+30, R11
- ADDL R15, R14
- MOVL R10, R8
- ROLL $+5, R8
- LEAL 2400959708(R14)(R9*1), R14
- ADDL R8, R14
-
- // Load m1
- MOVQ m1_base+32(FP), R8
- MOVL 8(SP), R9
- MOVL R9, 200(R8)
-
- // ROUND3(51)
- // SHUFFLE
- MOVL 12(SP), R9
- XORL (SP), R9
- XORL 44(SP), R9
- XORL 20(SP), R9
- ROLL $+1, R9
- MOVL R9, 12(SP)
-
- // FUNC3
- MOVL R10, R8
- ORL R11, R8
- ANDL R12, R8
- MOVL R10, R15
- ANDL R11, R15
- ORL R8, R15
-
- // MIX
- ROLL $+30, R10
- ADDL R15, R13
- MOVL R14, R8
- ROLL $+5, R8
- LEAL 2400959708(R13)(R9*1), R13
- ADDL R8, R13
-
- // Load m1
- MOVQ m1_base+32(FP), R8
- MOVL 12(SP), R9
- MOVL R9, 204(R8)
-
- // ROUND3(52)
- // SHUFFLE
- MOVL 16(SP), R9
- XORL 4(SP), R9
- XORL 48(SP), R9
- XORL 24(SP), R9
- ROLL $+1, R9
- MOVL R9, 16(SP)
-
- // FUNC3
- MOVL R14, R8
- ORL R10, R8
- ANDL R11, R8
- MOVL R14, R15
- ANDL R10, R15
- ORL R8, R15
-
- // MIX
- ROLL $+30, R14
- ADDL R15, R12
- MOVL R13, R8
- ROLL $+5, R8
- LEAL 2400959708(R12)(R9*1), R12
- ADDL R8, R12
-
- // Load m1
- MOVQ m1_base+32(FP), R8
- MOVL 16(SP), R9
- MOVL R9, 208(R8)
-
- // ROUND3(53)
- // SHUFFLE
- MOVL 20(SP), R9
- XORL 8(SP), R9
- XORL 52(SP), R9
- XORL 28(SP), R9
- ROLL $+1, R9
- MOVL R9, 20(SP)
-
- // FUNC3
- MOVL R13, R8
- ORL R14, R8
- ANDL R10, R8
- MOVL R13, R15
- ANDL R14, R15
- ORL R8, R15
-
- // MIX
- ROLL $+30, R13
- ADDL R15, R11
- MOVL R12, R8
- ROLL $+5, R8
- LEAL 2400959708(R11)(R9*1), R11
- ADDL R8, R11
-
- // Load m1
- MOVQ m1_base+32(FP), R8
- MOVL 20(SP), R9
- MOVL R9, 212(R8)
-
- // ROUND3(54)
- // SHUFFLE
- MOVL 24(SP), R9
- XORL 12(SP), R9
- XORL 56(SP), R9
- XORL 32(SP), R9
- ROLL $+1, R9
- MOVL R9, 24(SP)
-
- // FUNC3
- MOVL R12, R8
- ORL R13, R8
- ANDL R14, R8
- MOVL R12, R15
- ANDL R13, R15
- ORL R8, R15
-
- // MIX
- ROLL $+30, R12
- ADDL R15, R10
- MOVL R11, R8
- ROLL $+5, R8
- LEAL 2400959708(R10)(R9*1), R10
- ADDL R8, R10
-
- // Load m1
- MOVQ m1_base+32(FP), R8
- MOVL 24(SP), R9
- MOVL R9, 216(R8)
-
- // ROUND3(55)
- // SHUFFLE
- MOVL 28(SP), R9
- XORL 16(SP), R9
- XORL 60(SP), R9
- XORL 36(SP), R9
- ROLL $+1, R9
- MOVL R9, 28(SP)
-
- // FUNC3
- MOVL R11, R8
- ORL R12, R8
- ANDL R13, R8
- MOVL R11, R15
- ANDL R12, R15
- ORL R8, R15
-
- // MIX
- ROLL $+30, R11
- ADDL R15, R14
- MOVL R10, R8
- ROLL $+5, R8
- LEAL 2400959708(R14)(R9*1), R14
- ADDL R8, R14
-
- // Load m1
- MOVQ m1_base+32(FP), R8
- MOVL 28(SP), R9
- MOVL R9, 220(R8)
-
- // ROUND3(56)
- // SHUFFLE
- MOVL 32(SP), R9
- XORL 20(SP), R9
- XORL (SP), R9
- XORL 40(SP), R9
- ROLL $+1, R9
- MOVL R9, 32(SP)
-
- // FUNC3
- MOVL R10, R8
- ORL R11, R8
- ANDL R12, R8
- MOVL R10, R15
- ANDL R11, R15
- ORL R8, R15
-
- // MIX
- ROLL $+30, R10
- ADDL R15, R13
- MOVL R14, R8
- ROLL $+5, R8
- LEAL 2400959708(R13)(R9*1), R13
- ADDL R8, R13
-
- // Load m1
- MOVQ m1_base+32(FP), R8
- MOVL 32(SP), R9
- MOVL R9, 224(R8)
-
- // ROUND3(57)
- // SHUFFLE
- MOVL 36(SP), R9
- XORL 24(SP), R9
- XORL 4(SP), R9
- XORL 44(SP), R9
- ROLL $+1, R9
- MOVL R9, 36(SP)
-
- // FUNC3
- MOVL R14, R8
- ORL R10, R8
- ANDL R11, R8
- MOVL R14, R15
- ANDL R10, R15
- ORL R8, R15
-
- // MIX
- ROLL $+30, R14
- ADDL R15, R12
- MOVL R13, R8
- ROLL $+5, R8
- LEAL 2400959708(R12)(R9*1), R12
- ADDL R8, R12
-
- // Load m1
- MOVQ m1_base+32(FP), R8
- MOVL 36(SP), R9
- MOVL R9, 228(R8)
-
- // Load cs
- MOVQ cs_base+56(FP), R8
- MOVL R12, 20(R8)
- MOVL R13, 24(R8)
- MOVL R14, 28(R8)
- MOVL R10, 32(R8)
- MOVL R11, 36(R8)
-
- // ROUND3(58)
- // SHUFFLE
- MOVL 40(SP), R9
- XORL 28(SP), R9
- XORL 8(SP), R9
- XORL 48(SP), R9
- ROLL $+1, R9
- MOVL R9, 40(SP)
-
- // FUNC3
- MOVL R13, R8
- ORL R14, R8
- ANDL R10, R8
- MOVL R13, R15
- ANDL R14, R15
- ORL R8, R15
-
- // MIX
- ROLL $+30, R13
- ADDL R15, R11
- MOVL R12, R8
- ROLL $+5, R8
- LEAL 2400959708(R11)(R9*1), R11
- ADDL R8, R11
-
- // Load m1
- MOVQ m1_base+32(FP), R8
- MOVL 40(SP), R9
- MOVL R9, 232(R8)
-
- // ROUND3(59)
- // SHUFFLE
- MOVL 44(SP), R9
- XORL 32(SP), R9
- XORL 12(SP), R9
- XORL 52(SP), R9
- ROLL $+1, R9
- MOVL R9, 44(SP)
-
- // FUNC3
- MOVL R12, R8
- ORL R13, R8
- ANDL R14, R8
- MOVL R12, R15
- ANDL R13, R15
- ORL R8, R15
-
- // MIX
- ROLL $+30, R12
- ADDL R15, R10
- MOVL R11, R8
- ROLL $+5, R8
- LEAL 2400959708(R10)(R9*1), R10
- ADDL R8, R10
-
- // Load m1
- MOVQ m1_base+32(FP), R8
- MOVL 44(SP), R9
- MOVL R9, 236(R8)
-
- // ROUND4 (steps 60-79)
- // ROUND4(60)
- // SHUFFLE
- MOVL 48(SP), R9
- XORL 36(SP), R9
- XORL 16(SP), R9
- XORL 56(SP), R9
- ROLL $+1, R9
- MOVL R9, 48(SP)
-
- // FUNC2
- MOVL R11, R15
- XORL R12, R15
- XORL R13, R15
-
- // MIX
- ROLL $+30, R11
- ADDL R15, R14
- MOVL R10, R8
- ROLL $+5, R8
- LEAL 3395469782(R14)(R9*1), R14
- ADDL R8, R14
-
- // Load m1
- MOVQ m1_base+32(FP), R8
- MOVL 48(SP), R9
- MOVL R9, 240(R8)
-
- // ROUND4(61)
- // SHUFFLE
- MOVL 52(SP), R9
- XORL 40(SP), R9
- XORL 20(SP), R9
- XORL 60(SP), R9
- ROLL $+1, R9
- MOVL R9, 52(SP)
-
- // FUNC2
- MOVL R10, R15
- XORL R11, R15
- XORL R12, R15
-
- // MIX
- ROLL $+30, R10
- ADDL R15, R13
- MOVL R14, R8
- ROLL $+5, R8
- LEAL 3395469782(R13)(R9*1), R13
- ADDL R8, R13
-
- // Load m1
- MOVQ m1_base+32(FP), R8
- MOVL 52(SP), R9
- MOVL R9, 244(R8)
-
- // ROUND4(62)
- // SHUFFLE
- MOVL 56(SP), R9
- XORL 44(SP), R9
- XORL 24(SP), R9
- XORL (SP), R9
- ROLL $+1, R9
- MOVL R9, 56(SP)
-
- // FUNC2
- MOVL R14, R15
- XORL R10, R15
- XORL R11, R15
-
- // MIX
- ROLL $+30, R14
- ADDL R15, R12
- MOVL R13, R8
- ROLL $+5, R8
- LEAL 3395469782(R12)(R9*1), R12
- ADDL R8, R12
-
- // Load m1
- MOVQ m1_base+32(FP), R8
- MOVL 56(SP), R9
- MOVL R9, 248(R8)
-
- // ROUND4(63)
- // SHUFFLE
- MOVL 60(SP), R9
- XORL 48(SP), R9
- XORL 28(SP), R9
- XORL 4(SP), R9
- ROLL $+1, R9
- MOVL R9, 60(SP)
-
- // FUNC2
- MOVL R13, R15
- XORL R14, R15
- XORL R10, R15
-
- // MIX
- ROLL $+30, R13
- ADDL R15, R11
- MOVL R12, R8
- ROLL $+5, R8
- LEAL 3395469782(R11)(R9*1), R11
- ADDL R8, R11
-
- // Load m1
- MOVQ m1_base+32(FP), R8
- MOVL 60(SP), R9
- MOVL R9, 252(R8)
-
- // ROUND4(64)
- // SHUFFLE
- MOVL (SP), R9
- XORL 52(SP), R9
- XORL 32(SP), R9
- XORL 8(SP), R9
- ROLL $+1, R9
- MOVL R9, (SP)
-
- // FUNC2
- MOVL R12, R15
- XORL R13, R15
- XORL R14, R15
-
- // MIX
- ROLL $+30, R12
- ADDL R15, R10
- MOVL R11, R8
- ROLL $+5, R8
- LEAL 3395469782(R10)(R9*1), R10
- ADDL R8, R10
-
- // Load m1
- MOVQ m1_base+32(FP), R8
- MOVL (SP), R9
- MOVL R9, 256(R8)
-
- // Load cs
- MOVQ cs_base+56(FP), R8
- MOVL R10, 40(R8)
- MOVL R11, 44(R8)
- MOVL R12, 48(R8)
- MOVL R13, 52(R8)
- MOVL R14, 56(R8)
-
- // ROUND4(65)
- // SHUFFLE
- MOVL 4(SP), R9
- XORL 56(SP), R9
- XORL 36(SP), R9
- XORL 12(SP), R9
- ROLL $+1, R9
- MOVL R9, 4(SP)
-
- // FUNC2
- MOVL R11, R15
- XORL R12, R15
- XORL R13, R15
-
- // MIX
- ROLL $+30, R11
- ADDL R15, R14
- MOVL R10, R8
- ROLL $+5, R8
- LEAL 3395469782(R14)(R9*1), R14
- ADDL R8, R14
-
- // Load m1
- MOVQ m1_base+32(FP), R8
- MOVL 4(SP), R9
- MOVL R9, 260(R8)
-
- // ROUND4(66)
- // SHUFFLE
- MOVL 8(SP), R9
- XORL 60(SP), R9
- XORL 40(SP), R9
- XORL 16(SP), R9
- ROLL $+1, R9
- MOVL R9, 8(SP)
-
- // FUNC2
- MOVL R10, R15
- XORL R11, R15
- XORL R12, R15
-
- // MIX
- ROLL $+30, R10
- ADDL R15, R13
- MOVL R14, R8
- ROLL $+5, R8
- LEAL 3395469782(R13)(R9*1), R13
- ADDL R8, R13
-
- // Load m1
- MOVQ m1_base+32(FP), R8
- MOVL 8(SP), R9
- MOVL R9, 264(R8)
-
- // ROUND4(67)
- // SHUFFLE
- MOVL 12(SP), R9
- XORL (SP), R9
- XORL 44(SP), R9
- XORL 20(SP), R9
- ROLL $+1, R9
- MOVL R9, 12(SP)
-
- // FUNC2
- MOVL R14, R15
- XORL R10, R15
- XORL R11, R15
-
- // MIX
- ROLL $+30, R14
- ADDL R15, R12
- MOVL R13, R8
- ROLL $+5, R8
- LEAL 3395469782(R12)(R9*1), R12
- ADDL R8, R12
-
- // Load m1
- MOVQ m1_base+32(FP), R8
- MOVL 12(SP), R9
- MOVL R9, 268(R8)
-
- // ROUND4(68)
- // SHUFFLE
- MOVL 16(SP), R9
- XORL 4(SP), R9
- XORL 48(SP), R9
- XORL 24(SP), R9
- ROLL $+1, R9
- MOVL R9, 16(SP)
-
- // FUNC2
- MOVL R13, R15
- XORL R14, R15
- XORL R10, R15
-
- // MIX
- ROLL $+30, R13
- ADDL R15, R11
- MOVL R12, R8
- ROLL $+5, R8
- LEAL 3395469782(R11)(R9*1), R11
- ADDL R8, R11
-
- // Load m1
- MOVQ m1_base+32(FP), R8
- MOVL 16(SP), R9
- MOVL R9, 272(R8)
-
- // ROUND4(69)
- // SHUFFLE
- MOVL 20(SP), R9
- XORL 8(SP), R9
- XORL 52(SP), R9
- XORL 28(SP), R9
- ROLL $+1, R9
- MOVL R9, 20(SP)
-
- // FUNC2
- MOVL R12, R15
- XORL R13, R15
- XORL R14, R15
-
- // MIX
- ROLL $+30, R12
- ADDL R15, R10
- MOVL R11, R8
- ROLL $+5, R8
- LEAL 3395469782(R10)(R9*1), R10
- ADDL R8, R10
-
- // Load m1
- MOVQ m1_base+32(FP), R8
- MOVL 20(SP), R9
- MOVL R9, 276(R8)
-
- // ROUND4(70)
- // SHUFFLE
- MOVL 24(SP), R9
- XORL 12(SP), R9
- XORL 56(SP), R9
- XORL 32(SP), R9
- ROLL $+1, R9
- MOVL R9, 24(SP)
-
- // FUNC2
- MOVL R11, R15
- XORL R12, R15
- XORL R13, R15
-
- // MIX
- ROLL $+30, R11
- ADDL R15, R14
- MOVL R10, R8
- ROLL $+5, R8
- LEAL 3395469782(R14)(R9*1), R14
- ADDL R8, R14
-
- // Load m1
- MOVQ m1_base+32(FP), R8
- MOVL 24(SP), R9
- MOVL R9, 280(R8)
-
- // ROUND4(71)
- // SHUFFLE
- MOVL 28(SP), R9
- XORL 16(SP), R9
- XORL 60(SP), R9
- XORL 36(SP), R9
- ROLL $+1, R9
- MOVL R9, 28(SP)
-
- // FUNC2
- MOVL R10, R15
- XORL R11, R15
- XORL R12, R15
-
- // MIX
- ROLL $+30, R10
- ADDL R15, R13
- MOVL R14, R8
- ROLL $+5, R8
- LEAL 3395469782(R13)(R9*1), R13
- ADDL R8, R13
-
- // Load m1
- MOVQ m1_base+32(FP), R8
- MOVL 28(SP), R9
- MOVL R9, 284(R8)
-
- // ROUND4(72)
- // SHUFFLE
- MOVL 32(SP), R9
- XORL 20(SP), R9
- XORL (SP), R9
- XORL 40(SP), R9
- ROLL $+1, R9
- MOVL R9, 32(SP)
-
- // FUNC2
- MOVL R14, R15
- XORL R10, R15
- XORL R11, R15
-
- // MIX
- ROLL $+30, R14
- ADDL R15, R12
- MOVL R13, R8
- ROLL $+5, R8
- LEAL 3395469782(R12)(R9*1), R12
- ADDL R8, R12
-
- // Load m1
- MOVQ m1_base+32(FP), R8
- MOVL 32(SP), R9
- MOVL R9, 288(R8)
-
- // ROUND4(73)
- // SHUFFLE
- MOVL 36(SP), R9
- XORL 24(SP), R9
- XORL 4(SP), R9
- XORL 44(SP), R9
- ROLL $+1, R9
- MOVL R9, 36(SP)
-
- // FUNC2
- MOVL R13, R15
- XORL R14, R15
- XORL R10, R15
-
- // MIX
- ROLL $+30, R13
- ADDL R15, R11
- MOVL R12, R8
- ROLL $+5, R8
- LEAL 3395469782(R11)(R9*1), R11
- ADDL R8, R11
-
- // Load m1
- MOVQ m1_base+32(FP), R8
- MOVL 36(SP), R9
- MOVL R9, 292(R8)
-
- // ROUND4(74)
- // SHUFFLE
- MOVL 40(SP), R9
- XORL 28(SP), R9
- XORL 8(SP), R9
- XORL 48(SP), R9
- ROLL $+1, R9
- MOVL R9, 40(SP)
-
- // FUNC2
- MOVL R12, R15
- XORL R13, R15
- XORL R14, R15
-
- // MIX
- ROLL $+30, R12
- ADDL R15, R10
- MOVL R11, R8
- ROLL $+5, R8
- LEAL 3395469782(R10)(R9*1), R10
- ADDL R8, R10
-
- // Load m1
- MOVQ m1_base+32(FP), R8
- MOVL 40(SP), R9
- MOVL R9, 296(R8)
-
- // ROUND4(75)
- // SHUFFLE
- MOVL 44(SP), R9
- XORL 32(SP), R9
- XORL 12(SP), R9
- XORL 52(SP), R9
- ROLL $+1, R9
- MOVL R9, 44(SP)
-
- // FUNC2
- MOVL R11, R15
- XORL R12, R15
- XORL R13, R15
-
- // MIX
- ROLL $+30, R11
- ADDL R15, R14
- MOVL R10, R8
- ROLL $+5, R8
- LEAL 3395469782(R14)(R9*1), R14
- ADDL R8, R14
-
- // Load m1
- MOVQ m1_base+32(FP), R8
- MOVL 44(SP), R9
- MOVL R9, 300(R8)
-
- // ROUND4(76)
- // SHUFFLE
- MOVL 48(SP), R9
- XORL 36(SP), R9
- XORL 16(SP), R9
- XORL 56(SP), R9
- ROLL $+1, R9
- MOVL R9, 48(SP)
-
- // FUNC2
- MOVL R10, R15
- XORL R11, R15
- XORL R12, R15
-
- // MIX
- ROLL $+30, R10
- ADDL R15, R13
- MOVL R14, R8
- ROLL $+5, R8
- LEAL 3395469782(R13)(R9*1), R13
- ADDL R8, R13
-
- // Load m1
- MOVQ m1_base+32(FP), R8
- MOVL 48(SP), R9
- MOVL R9, 304(R8)
-
- // ROUND4(77)
- // SHUFFLE
- MOVL 52(SP), R9
- XORL 40(SP), R9
- XORL 20(SP), R9
- XORL 60(SP), R9
- ROLL $+1, R9
- MOVL R9, 52(SP)
-
- // FUNC2
- MOVL R14, R15
- XORL R10, R15
- XORL R11, R15
-
- // MIX
- ROLL $+30, R14
- ADDL R15, R12
- MOVL R13, R8
- ROLL $+5, R8
- LEAL 3395469782(R12)(R9*1), R12
- ADDL R8, R12
-
- // Load m1
- MOVQ m1_base+32(FP), R8
- MOVL 52(SP), R9
- MOVL R9, 308(R8)
-
- // ROUND4(78)
- // SHUFFLE
- MOVL 56(SP), R9
- XORL 44(SP), R9
- XORL 24(SP), R9
- XORL (SP), R9
- ROLL $+1, R9
- MOVL R9, 56(SP)
-
- // FUNC2
- MOVL R13, R15
- XORL R14, R15
- XORL R10, R15
-
- // MIX
- ROLL $+30, R13
- ADDL R15, R11
- MOVL R12, R8
- ROLL $+5, R8
- LEAL 3395469782(R11)(R9*1), R11
- ADDL R8, R11
-
- // Load m1
- MOVQ m1_base+32(FP), R8
- MOVL 56(SP), R9
- MOVL R9, 312(R8)
-
- // ROUND4(79)
- // SHUFFLE
- MOVL 60(SP), R9
- XORL 48(SP), R9
- XORL 28(SP), R9
- XORL 4(SP), R9
- ROLL $+1, R9
- MOVL R9, 60(SP)
-
- // FUNC2
- MOVL R12, R15
- XORL R13, R15
- XORL R14, R15
-
- // MIX
- ROLL $+30, R12
- ADDL R15, R10
- MOVL R11, R8
- ROLL $+5, R8
- LEAL 3395469782(R10)(R9*1), R10
- ADDL R8, R10
-
- // Load m1
- MOVQ m1_base+32(FP), R8
- MOVL 60(SP), R9
- MOVL R9, 316(R8)
-
- // Add registers to temp hash.
- ADDL R10, AX
- ADDL R11, BX
- ADDL R12, CX
- ADDL R13, DX
- ADDL R14, BP
- ADDQ $+64, DI
- CMPQ DI, SI
- JB loop
-
-end:
- MOVQ dig+0(FP), SI
- MOVL AX, (SI)
- MOVL BX, 4(SI)
- MOVL CX, 8(SI)
- MOVL DX, 12(SI)
- MOVL BP, 16(SI)
- RET
diff --git a/vendor/github.com/pjbgf/sha1cd/sha1cdblock_generic.go b/vendor/github.com/pjbgf/sha1cd/sha1cdblock_generic.go
deleted file mode 100644
index ba8b96e8794..00000000000
--- a/vendor/github.com/pjbgf/sha1cd/sha1cdblock_generic.go
+++ /dev/null
@@ -1,268 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Originally from: https://github.com/go/blob/master/src/crypto/sha1/sha1block.go
-// It has been modified to support collision detection.
-
-package sha1cd
-
-import (
- "fmt"
- "math/bits"
-
- shared "github.com/pjbgf/sha1cd/internal"
- "github.com/pjbgf/sha1cd/ubc"
-)
-
-// blockGeneric is a portable, pure Go version of the SHA-1 block step.
-// It's used by sha1block_generic.go and tests.
-func blockGeneric(dig *digest, p []byte) {
- var w [16]uint32
-
- // cs stores the pre-step compression state for only the steps required for the
- // collision detection, which are 0, 58 and 65.
- // Refer to ubc/const.go for more details.
- cs := [shared.PreStepState][shared.WordBuffers]uint32{}
-
- h0, h1, h2, h3, h4 := dig.h[0], dig.h[1], dig.h[2], dig.h[3], dig.h[4]
- for len(p) >= shared.Chunk {
- m1 := [shared.Rounds]uint32{}
- hi := 1
-
- // Collision attacks are thwarted by hashing a detected near-collision block 3 times.
- // Think of it as extending SHA-1 from 80-steps to 240-steps for such blocks:
- // The best collision attacks against SHA-1 have complexity about 2^60,
- // thus for 240-steps an immediate lower-bound for the best cryptanalytic attacks would be 2^180.
- // An attacker would be better off using a generic birthday search of complexity 2^80.
- rehash:
- a, b, c, d, e := h0, h1, h2, h3, h4
-
- // Each of the four 20-iteration rounds
- // differs only in the computation of f and
- // the choice of K (K0, K1, etc).
- i := 0
-
- // Store pre-step compression state for the collision detection.
- cs[0] = [shared.WordBuffers]uint32{a, b, c, d, e}
-
- for ; i < 16; i++ {
- // load step
- j := i * 4
- w[i] = uint32(p[j])<<24 | uint32(p[j+1])<<16 | uint32(p[j+2])<<8 | uint32(p[j+3])
-
- f := b&c | (^b)&d
- t := bits.RotateLeft32(a, 5) + f + e + w[i&0xf] + shared.K0
- a, b, c, d, e = t, a, bits.RotateLeft32(b, 30), c, d
-
- // Store compression state for the collision detection.
- m1[i] = w[i&0xf]
- }
- for ; i < 20; i++ {
- tmp := w[(i-3)&0xf] ^ w[(i-8)&0xf] ^ w[(i-14)&0xf] ^ w[(i)&0xf]
- w[i&0xf] = tmp<<1 | tmp>>(32-1)
-
- f := b&c | (^b)&d
- t := bits.RotateLeft32(a, 5) + f + e + w[i&0xf] + shared.K0
- a, b, c, d, e = t, a, bits.RotateLeft32(b, 30), c, d
-
- // Store compression state for the collision detection.
- m1[i] = w[i&0xf]
- }
- for ; i < 40; i++ {
- tmp := w[(i-3)&0xf] ^ w[(i-8)&0xf] ^ w[(i-14)&0xf] ^ w[(i)&0xf]
- w[i&0xf] = tmp<<1 | tmp>>(32-1)
-
- f := b ^ c ^ d
- t := bits.RotateLeft32(a, 5) + f + e + w[i&0xf] + shared.K1
- a, b, c, d, e = t, a, bits.RotateLeft32(b, 30), c, d
-
- // Store compression state for the collision detection.
- m1[i] = w[i&0xf]
- }
- for ; i < 60; i++ {
- if i == 58 {
- // Store pre-step compression state for the collision detection.
- cs[1] = [shared.WordBuffers]uint32{a, b, c, d, e}
- }
-
- tmp := w[(i-3)&0xf] ^ w[(i-8)&0xf] ^ w[(i-14)&0xf] ^ w[(i)&0xf]
- w[i&0xf] = tmp<<1 | tmp>>(32-1)
-
- f := ((b | c) & d) | (b & c)
- t := bits.RotateLeft32(a, 5) + f + e + w[i&0xf] + shared.K2
- a, b, c, d, e = t, a, bits.RotateLeft32(b, 30), c, d
-
- // Store compression state for the collision detection.
- m1[i] = w[i&0xf]
- }
- for ; i < 80; i++ {
- if i == 65 {
- // Store pre-step compression state for the collision detection.
- cs[2] = [shared.WordBuffers]uint32{a, b, c, d, e}
- }
-
- tmp := w[(i-3)&0xf] ^ w[(i-8)&0xf] ^ w[(i-14)&0xf] ^ w[(i)&0xf]
- w[i&0xf] = tmp<<1 | tmp>>(32-1)
-
- f := b ^ c ^ d
- t := bits.RotateLeft32(a, 5) + f + e + w[i&0xf] + shared.K3
- a, b, c, d, e = t, a, bits.RotateLeft32(b, 30), c, d
-
- // Store compression state for the collision detection.
- m1[i] = w[i&0xf]
- }
-
- h0 += a
- h1 += b
- h2 += c
- h3 += d
- h4 += e
-
- if hi == 2 {
- hi++
- goto rehash
- }
-
- if hi == 1 {
- col := checkCollision(m1, cs, [shared.WordBuffers]uint32{h0, h1, h2, h3, h4})
- if col {
- dig.col = true
- hi++
- goto rehash
- }
- }
-
- p = p[shared.Chunk:]
- }
-
- dig.h[0], dig.h[1], dig.h[2], dig.h[3], dig.h[4] = h0, h1, h2, h3, h4
-}
-
-func checkCollision(
- m1 [shared.Rounds]uint32,
- cs [shared.PreStepState][shared.WordBuffers]uint32,
- state [shared.WordBuffers]uint32) bool {
-
- if mask := ubc.CalculateDvMask(m1); mask != 0 {
- dvs := ubc.SHA1_dvs()
-
- for i := 0; dvs[i].DvType != 0; i++ {
- if (mask & ((uint32)(1) << uint32(dvs[i].MaskB))) != 0 {
- var csState [shared.WordBuffers]uint32
- switch dvs[i].TestT {
- case 58:
- csState = cs[1]
- case 65:
- csState = cs[2]
- case 0:
- csState = cs[0]
- default:
- panic(fmt.Sprintf("dvs data is trying to use a testT that isn't available: %d", dvs[i].TestT))
- }
-
- col := hasCollided(
- dvs[i].TestT, // testT is the step number
- // m2 is a secondary message created XORing with
- // ubc's DM prior to the SHA recompression step.
- m1, dvs[i].Dm,
- csState,
- state)
-
- if col {
- return true
- }
- }
- }
- }
- return false
-}
-
-func hasCollided(step uint32, m1, dm [shared.Rounds]uint32,
- state [shared.WordBuffers]uint32, h [shared.WordBuffers]uint32) bool {
- // Intermediary Hash Value.
- ihv := [shared.WordBuffers]uint32{}
-
- a, b, c, d, e := state[0], state[1], state[2], state[3], state[4]
-
- // Walk backwards from current step to undo previous compression.
- // The existing collision detection does not have dvs higher than 65,
- // start value of i accordingly.
- for i := uint32(64); i >= 60; i-- {
- a, b, c, d, e = b, c, d, e, a
- if step > i {
- b = bits.RotateLeft32(b, -30)
- f := b ^ c ^ d
- e -= bits.RotateLeft32(a, 5) + f + shared.K3 + (m1[i] ^ dm[i]) // m2 = m1 ^ dm.
- }
- }
- for i := uint32(59); i >= 40; i-- {
- a, b, c, d, e = b, c, d, e, a
- if step > i {
- b = bits.RotateLeft32(b, -30)
- f := ((b | c) & d) | (b & c)
- e -= bits.RotateLeft32(a, 5) + f + shared.K2 + (m1[i] ^ dm[i])
- }
- }
- for i := uint32(39); i >= 20; i-- {
- a, b, c, d, e = b, c, d, e, a
- if step > i {
- b = bits.RotateLeft32(b, -30)
- f := b ^ c ^ d
- e -= bits.RotateLeft32(a, 5) + f + shared.K1 + (m1[i] ^ dm[i])
- }
- }
- for i := uint32(20); i > 0; i-- {
- j := i - 1
- a, b, c, d, e = b, c, d, e, a
- if step > j {
- b = bits.RotateLeft32(b, -30) // undo the rotate left
- f := b&c | (^b)&d
- // subtract from e
- e -= bits.RotateLeft32(a, 5) + f + shared.K0 + (m1[j] ^ dm[j])
- }
- }
-
- ihv[0] = a
- ihv[1] = b
- ihv[2] = c
- ihv[3] = d
- ihv[4] = e
- a = state[0]
- b = state[1]
- c = state[2]
- d = state[3]
- e = state[4]
-
- // Recompress blocks based on the current step.
- // The existing collision detection does not have dvs below 58, so they have been removed
- // from the source code. If new dvs are added which target rounds below 40, that logic
- // will need to be readded here.
- for i := uint32(40); i < 60; i++ {
- if step <= i {
- f := ((b | c) & d) | (b & c)
- t := bits.RotateLeft32(a, 5) + f + e + shared.K2 + (m1[i] ^ dm[i])
- a, b, c, d, e = t, a, bits.RotateLeft32(b, 30), c, d
- }
- }
- for i := uint32(60); i < 80; i++ {
- if step <= i {
- f := b ^ c ^ d
- t := bits.RotateLeft32(a, 5) + f + e + shared.K3 + (m1[i] ^ dm[i])
- a, b, c, d, e = t, a, bits.RotateLeft32(b, 30), c, d
- }
- }
-
- ihv[0] += a
- ihv[1] += b
- ihv[2] += c
- ihv[3] += d
- ihv[4] += e
-
- if ((ihv[0] ^ h[0]) | (ihv[1] ^ h[1]) |
- (ihv[2] ^ h[2]) | (ihv[3] ^ h[3]) | (ihv[4] ^ h[4])) == 0 {
- return true
- }
-
- return false
-}
diff --git a/vendor/github.com/pjbgf/sha1cd/sha1cdblock_noasm.go b/vendor/github.com/pjbgf/sha1cd/sha1cdblock_noasm.go
deleted file mode 100644
index 15bae5a7e8e..00000000000
--- a/vendor/github.com/pjbgf/sha1cd/sha1cdblock_noasm.go
+++ /dev/null
@@ -1,8 +0,0 @@
-//go:build !amd64 || noasm || !gc
-// +build !amd64 noasm !gc
-
-package sha1cd
-
-func block(dig *digest, p []byte) {
- blockGeneric(dig, p)
-}
diff --git a/vendor/github.com/pjbgf/sha1cd/ubc/check.go b/vendor/github.com/pjbgf/sha1cd/ubc/check.go
deleted file mode 100644
index 167a5558fdd..00000000000
--- a/vendor/github.com/pjbgf/sha1cd/ubc/check.go
+++ /dev/null
@@ -1,368 +0,0 @@
-// Based on the C implementation from Marc Stevens and Dan Shumow.
-// https://github.com/cr-marcstevens/sha1collisiondetection
-
-package ubc
-
-type DvInfo struct {
- // DvType, DvK and DvB define the DV: I(K,B) or II(K,B) (see the paper).
- // https://marc-stevens.nl/research/papers/C13-S.pdf
- DvType uint32
- DvK uint32
- DvB uint32
-
- // TestT is the step to do the recompression from for collision detection.
- TestT uint32
-
- // MaskI and MaskB define the bit to check for each DV in the dvmask returned by ubc_check.
- MaskI uint32
- MaskB uint32
-
- // Dm is the expanded message block XOR-difference defined by the DV.
- Dm [80]uint32
-}
-
-// CalculateDvMask takes as input an expanded message block and verifies the unavoidable bitconditions
-// for all listed DVs. It returns a dvmask where each bit belonging to a DV is set if all
-// unavoidable bitconditions for that DV have been met.
-// Thus, one needs to do the recompression check for each DV that has its bit set.
-func CalculateDvMask(W [80]uint32) uint32 {
- mask := uint32(0xFFFFFFFF)
- mask &= (((((W[44] ^ W[45]) >> 29) & 1) - 1) | ^(DV_I_48_0_bit | DV_I_51_0_bit | DV_I_52_0_bit | DV_II_45_0_bit | DV_II_46_0_bit | DV_II_50_0_bit | DV_II_51_0_bit))
- mask &= (((((W[49] ^ W[50]) >> 29) & 1) - 1) | ^(DV_I_46_0_bit | DV_II_45_0_bit | DV_II_50_0_bit | DV_II_51_0_bit | DV_II_55_0_bit | DV_II_56_0_bit))
- mask &= (((((W[48] ^ W[49]) >> 29) & 1) - 1) | ^(DV_I_45_0_bit | DV_I_52_0_bit | DV_II_49_0_bit | DV_II_50_0_bit | DV_II_54_0_bit | DV_II_55_0_bit))
- mask &= ((((W[47] ^ (W[50] >> 25)) & (1 << 4)) - (1 << 4)) | ^(DV_I_47_0_bit | DV_I_49_0_bit | DV_I_51_0_bit | DV_II_45_0_bit | DV_II_51_0_bit | DV_II_56_0_bit))
- mask &= (((((W[47] ^ W[48]) >> 29) & 1) - 1) | ^(DV_I_44_0_bit | DV_I_51_0_bit | DV_II_48_0_bit | DV_II_49_0_bit | DV_II_53_0_bit | DV_II_54_0_bit))
- mask &= (((((W[46] >> 4) ^ (W[49] >> 29)) & 1) - 1) | ^(DV_I_46_0_bit | DV_I_48_0_bit | DV_I_50_0_bit | DV_I_52_0_bit | DV_II_50_0_bit | DV_II_55_0_bit))
- mask &= (((((W[46] ^ W[47]) >> 29) & 1) - 1) | ^(DV_I_43_0_bit | DV_I_50_0_bit | DV_II_47_0_bit | DV_II_48_0_bit | DV_II_52_0_bit | DV_II_53_0_bit))
- mask &= (((((W[45] >> 4) ^ (W[48] >> 29)) & 1) - 1) | ^(DV_I_45_0_bit | DV_I_47_0_bit | DV_I_49_0_bit | DV_I_51_0_bit | DV_II_49_0_bit | DV_II_54_0_bit))
- mask &= (((((W[45] ^ W[46]) >> 29) & 1) - 1) | ^(DV_I_49_0_bit | DV_I_52_0_bit | DV_II_46_0_bit | DV_II_47_0_bit | DV_II_51_0_bit | DV_II_52_0_bit))
- mask &= (((((W[44] >> 4) ^ (W[47] >> 29)) & 1) - 1) | ^(DV_I_44_0_bit | DV_I_46_0_bit | DV_I_48_0_bit | DV_I_50_0_bit | DV_II_48_0_bit | DV_II_53_0_bit))
- mask &= (((((W[43] >> 4) ^ (W[46] >> 29)) & 1) - 1) | ^(DV_I_43_0_bit | DV_I_45_0_bit | DV_I_47_0_bit | DV_I_49_0_bit | DV_II_47_0_bit | DV_II_52_0_bit))
- mask &= (((((W[43] ^ W[44]) >> 29) & 1) - 1) | ^(DV_I_47_0_bit | DV_I_50_0_bit | DV_I_51_0_bit | DV_II_45_0_bit | DV_II_49_0_bit | DV_II_50_0_bit))
- mask &= (((((W[42] >> 4) ^ (W[45] >> 29)) & 1) - 1) | ^(DV_I_44_0_bit | DV_I_46_0_bit | DV_I_48_0_bit | DV_I_52_0_bit | DV_II_46_0_bit | DV_II_51_0_bit))
- mask &= (((((W[41] >> 4) ^ (W[44] >> 29)) & 1) - 1) | ^(DV_I_43_0_bit | DV_I_45_0_bit | DV_I_47_0_bit | DV_I_51_0_bit | DV_II_45_0_bit | DV_II_50_0_bit))
- mask &= (((((W[40] ^ W[41]) >> 29) & 1) - 1) | ^(DV_I_44_0_bit | DV_I_47_0_bit | DV_I_48_0_bit | DV_II_46_0_bit | DV_II_47_0_bit | DV_II_56_0_bit))
- mask &= (((((W[54] ^ W[55]) >> 29) & 1) - 1) | ^(DV_I_51_0_bit | DV_II_47_0_bit | DV_II_50_0_bit | DV_II_55_0_bit | DV_II_56_0_bit))
- mask &= (((((W[53] ^ W[54]) >> 29) & 1) - 1) | ^(DV_I_50_0_bit | DV_II_46_0_bit | DV_II_49_0_bit | DV_II_54_0_bit | DV_II_55_0_bit))
- mask &= (((((W[52] ^ W[53]) >> 29) & 1) - 1) | ^(DV_I_49_0_bit | DV_II_45_0_bit | DV_II_48_0_bit | DV_II_53_0_bit | DV_II_54_0_bit))
- mask &= ((((W[50] ^ (W[53] >> 25)) & (1 << 4)) - (1 << 4)) | ^(DV_I_50_0_bit | DV_I_52_0_bit | DV_II_46_0_bit | DV_II_48_0_bit | DV_II_54_0_bit))
- mask &= (((((W[50] ^ W[51]) >> 29) & 1) - 1) | ^(DV_I_47_0_bit | DV_II_46_0_bit | DV_II_51_0_bit | DV_II_52_0_bit | DV_II_56_0_bit))
- mask &= ((((W[49] ^ (W[52] >> 25)) & (1 << 4)) - (1 << 4)) | ^(DV_I_49_0_bit | DV_I_51_0_bit | DV_II_45_0_bit | DV_II_47_0_bit | DV_II_53_0_bit))
- mask &= ((((W[48] ^ (W[51] >> 25)) & (1 << 4)) - (1 << 4)) | ^(DV_I_48_0_bit | DV_I_50_0_bit | DV_I_52_0_bit | DV_II_46_0_bit | DV_II_52_0_bit))
- mask &= (((((W[42] ^ W[43]) >> 29) & 1) - 1) | ^(DV_I_46_0_bit | DV_I_49_0_bit | DV_I_50_0_bit | DV_II_48_0_bit | DV_II_49_0_bit))
- mask &= (((((W[41] ^ W[42]) >> 29) & 1) - 1) | ^(DV_I_45_0_bit | DV_I_48_0_bit | DV_I_49_0_bit | DV_II_47_0_bit | DV_II_48_0_bit))
- mask &= (((((W[40] >> 4) ^ (W[43] >> 29)) & 1) - 1) | ^(DV_I_44_0_bit | DV_I_46_0_bit | DV_I_50_0_bit | DV_II_49_0_bit | DV_II_56_0_bit))
- mask &= (((((W[39] >> 4) ^ (W[42] >> 29)) & 1) - 1) | ^(DV_I_43_0_bit | DV_I_45_0_bit | DV_I_49_0_bit | DV_II_48_0_bit | DV_II_55_0_bit))
-
- if (mask & (DV_I_44_0_bit | DV_I_48_0_bit | DV_II_47_0_bit | DV_II_54_0_bit | DV_II_56_0_bit)) != 0 {
- mask &= (((((W[38] >> 4) ^ (W[41] >> 29)) & 1) - 1) | ^(DV_I_44_0_bit | DV_I_48_0_bit | DV_II_47_0_bit | DV_II_54_0_bit | DV_II_56_0_bit))
- }
- mask &= (((((W[37] >> 4) ^ (W[40] >> 29)) & 1) - 1) | ^(DV_I_43_0_bit | DV_I_47_0_bit | DV_II_46_0_bit | DV_II_53_0_bit | DV_II_55_0_bit))
- if (mask & (DV_I_52_0_bit | DV_II_48_0_bit | DV_II_51_0_bit | DV_II_56_0_bit)) != 0 {
- mask &= (((((W[55] ^ W[56]) >> 29) & 1) - 1) | ^(DV_I_52_0_bit | DV_II_48_0_bit | DV_II_51_0_bit | DV_II_56_0_bit))
- }
- if (mask & (DV_I_52_0_bit | DV_II_48_0_bit | DV_II_50_0_bit | DV_II_56_0_bit)) != 0 {
- mask &= ((((W[52] ^ (W[55] >> 25)) & (1 << 4)) - (1 << 4)) | ^(DV_I_52_0_bit | DV_II_48_0_bit | DV_II_50_0_bit | DV_II_56_0_bit))
- }
- if (mask & (DV_I_51_0_bit | DV_II_47_0_bit | DV_II_49_0_bit | DV_II_55_0_bit)) != 0 {
- mask &= ((((W[51] ^ (W[54] >> 25)) & (1 << 4)) - (1 << 4)) | ^(DV_I_51_0_bit | DV_II_47_0_bit | DV_II_49_0_bit | DV_II_55_0_bit))
- }
- if (mask & (DV_I_48_0_bit | DV_II_47_0_bit | DV_II_52_0_bit | DV_II_53_0_bit)) != 0 {
- mask &= (((((W[51] ^ W[52]) >> 29) & 1) - 1) | ^(DV_I_48_0_bit | DV_II_47_0_bit | DV_II_52_0_bit | DV_II_53_0_bit))
- }
- if (mask & (DV_I_46_0_bit | DV_I_49_0_bit | DV_II_45_0_bit | DV_II_48_0_bit)) != 0 {
- mask &= (((((W[36] >> 4) ^ (W[40] >> 29)) & 1) - 1) | ^(DV_I_46_0_bit | DV_I_49_0_bit | DV_II_45_0_bit | DV_II_48_0_bit))
- }
- if (mask & (DV_I_52_0_bit | DV_II_48_0_bit | DV_II_49_0_bit)) != 0 {
- mask &= ((0 - (((W[53] ^ W[56]) >> 29) & 1)) | ^(DV_I_52_0_bit | DV_II_48_0_bit | DV_II_49_0_bit))
- }
- if (mask & (DV_I_50_0_bit | DV_II_46_0_bit | DV_II_47_0_bit)) != 0 {
- mask &= ((0 - (((W[51] ^ W[54]) >> 29) & 1)) | ^(DV_I_50_0_bit | DV_II_46_0_bit | DV_II_47_0_bit))
- }
- if (mask & (DV_I_49_0_bit | DV_I_51_0_bit | DV_II_45_0_bit)) != 0 {
- mask &= ((0 - (((W[50] ^ W[52]) >> 29) & 1)) | ^(DV_I_49_0_bit | DV_I_51_0_bit | DV_II_45_0_bit))
- }
- if (mask & (DV_I_48_0_bit | DV_I_50_0_bit | DV_I_52_0_bit)) != 0 {
- mask &= ((0 - (((W[49] ^ W[51]) >> 29) & 1)) | ^(DV_I_48_0_bit | DV_I_50_0_bit | DV_I_52_0_bit))
- }
- if (mask & (DV_I_47_0_bit | DV_I_49_0_bit | DV_I_51_0_bit)) != 0 {
- mask &= ((0 - (((W[48] ^ W[50]) >> 29) & 1)) | ^(DV_I_47_0_bit | DV_I_49_0_bit | DV_I_51_0_bit))
- }
- if (mask & (DV_I_46_0_bit | DV_I_48_0_bit | DV_I_50_0_bit)) != 0 {
- mask &= ((0 - (((W[47] ^ W[49]) >> 29) & 1)) | ^(DV_I_46_0_bit | DV_I_48_0_bit | DV_I_50_0_bit))
- }
- if (mask & (DV_I_45_0_bit | DV_I_47_0_bit | DV_I_49_0_bit)) != 0 {
- mask &= ((0 - (((W[46] ^ W[48]) >> 29) & 1)) | ^(DV_I_45_0_bit | DV_I_47_0_bit | DV_I_49_0_bit))
- }
- mask &= ((((W[45] ^ W[47]) & (1 << 6)) - (1 << 6)) | ^(DV_I_47_2_bit | DV_I_49_2_bit | DV_I_51_2_bit))
- if (mask & (DV_I_44_0_bit | DV_I_46_0_bit | DV_I_48_0_bit)) != 0 {
- mask &= ((0 - (((W[45] ^ W[47]) >> 29) & 1)) | ^(DV_I_44_0_bit | DV_I_46_0_bit | DV_I_48_0_bit))
- }
- mask &= (((((W[44] ^ W[46]) >> 6) & 1) - 1) | ^(DV_I_46_2_bit | DV_I_48_2_bit | DV_I_50_2_bit))
- if (mask & (DV_I_43_0_bit | DV_I_45_0_bit | DV_I_47_0_bit)) != 0 {
- mask &= ((0 - (((W[44] ^ W[46]) >> 29) & 1)) | ^(DV_I_43_0_bit | DV_I_45_0_bit | DV_I_47_0_bit))
- }
- mask &= ((0 - ((W[41] ^ (W[42] >> 5)) & (1 << 1))) | ^(DV_I_48_2_bit | DV_II_46_2_bit | DV_II_51_2_bit))
- mask &= ((0 - ((W[40] ^ (W[41] >> 5)) & (1 << 1))) | ^(DV_I_47_2_bit | DV_I_51_2_bit | DV_II_50_2_bit))
- if (mask & (DV_I_44_0_bit | DV_I_46_0_bit | DV_II_56_0_bit)) != 0 {
- mask &= ((0 - (((W[40] ^ W[42]) >> 4) & 1)) | ^(DV_I_44_0_bit | DV_I_46_0_bit | DV_II_56_0_bit))
- }
- mask &= ((0 - ((W[39] ^ (W[40] >> 5)) & (1 << 1))) | ^(DV_I_46_2_bit | DV_I_50_2_bit | DV_II_49_2_bit))
- if (mask & (DV_I_43_0_bit | DV_I_45_0_bit | DV_II_55_0_bit)) != 0 {
- mask &= ((0 - (((W[39] ^ W[41]) >> 4) & 1)) | ^(DV_I_43_0_bit | DV_I_45_0_bit | DV_II_55_0_bit))
- }
- if (mask & (DV_I_44_0_bit | DV_II_54_0_bit | DV_II_56_0_bit)) != 0 {
- mask &= ((0 - (((W[38] ^ W[40]) >> 4) & 1)) | ^(DV_I_44_0_bit | DV_II_54_0_bit | DV_II_56_0_bit))
- }
- if (mask & (DV_I_43_0_bit | DV_II_53_0_bit | DV_II_55_0_bit)) != 0 {
- mask &= ((0 - (((W[37] ^ W[39]) >> 4) & 1)) | ^(DV_I_43_0_bit | DV_II_53_0_bit | DV_II_55_0_bit))
- }
- mask &= ((0 - ((W[36] ^ (W[37] >> 5)) & (1 << 1))) | ^(DV_I_47_2_bit | DV_I_50_2_bit | DV_II_46_2_bit))
- if (mask & (DV_I_45_0_bit | DV_I_48_0_bit | DV_II_47_0_bit)) != 0 {
- mask &= (((((W[35] >> 4) ^ (W[39] >> 29)) & 1) - 1) | ^(DV_I_45_0_bit | DV_I_48_0_bit | DV_II_47_0_bit))
- }
- if (mask & (DV_I_48_0_bit | DV_II_48_0_bit)) != 0 {
- mask &= ((0 - ((W[63] ^ (W[64] >> 5)) & (1 << 0))) | ^(DV_I_48_0_bit | DV_II_48_0_bit))
- }
- if (mask & (DV_I_45_0_bit | DV_II_45_0_bit)) != 0 {
- mask &= ((0 - ((W[63] ^ (W[64] >> 5)) & (1 << 1))) | ^(DV_I_45_0_bit | DV_II_45_0_bit))
- }
- if (mask & (DV_I_47_0_bit | DV_II_47_0_bit)) != 0 {
- mask &= ((0 - ((W[62] ^ (W[63] >> 5)) & (1 << 0))) | ^(DV_I_47_0_bit | DV_II_47_0_bit))
- }
- if (mask & (DV_I_46_0_bit | DV_II_46_0_bit)) != 0 {
- mask &= ((0 - ((W[61] ^ (W[62] >> 5)) & (1 << 0))) | ^(DV_I_46_0_bit | DV_II_46_0_bit))
- }
- mask &= ((0 - ((W[61] ^ (W[62] >> 5)) & (1 << 2))) | ^(DV_I_46_2_bit | DV_II_46_2_bit))
- if (mask & (DV_I_45_0_bit | DV_II_45_0_bit)) != 0 {
- mask &= ((0 - ((W[60] ^ (W[61] >> 5)) & (1 << 0))) | ^(DV_I_45_0_bit | DV_II_45_0_bit))
- }
- if (mask & (DV_II_51_0_bit | DV_II_54_0_bit)) != 0 {
- mask &= (((((W[58] ^ W[59]) >> 29) & 1) - 1) | ^(DV_II_51_0_bit | DV_II_54_0_bit))
- }
- if (mask & (DV_II_50_0_bit | DV_II_53_0_bit)) != 0 {
- mask &= (((((W[57] ^ W[58]) >> 29) & 1) - 1) | ^(DV_II_50_0_bit | DV_II_53_0_bit))
- }
- if (mask & (DV_II_52_0_bit | DV_II_54_0_bit)) != 0 {
- mask &= ((((W[56] ^ (W[59] >> 25)) & (1 << 4)) - (1 << 4)) | ^(DV_II_52_0_bit | DV_II_54_0_bit))
- }
- if (mask & (DV_II_51_0_bit | DV_II_52_0_bit)) != 0 {
- mask &= ((0 - (((W[56] ^ W[59]) >> 29) & 1)) | ^(DV_II_51_0_bit | DV_II_52_0_bit))
- }
- if (mask & (DV_II_49_0_bit | DV_II_52_0_bit)) != 0 {
- mask &= (((((W[56] ^ W[57]) >> 29) & 1) - 1) | ^(DV_II_49_0_bit | DV_II_52_0_bit))
- }
- if (mask & (DV_II_51_0_bit | DV_II_53_0_bit)) != 0 {
- mask &= ((((W[55] ^ (W[58] >> 25)) & (1 << 4)) - (1 << 4)) | ^(DV_II_51_0_bit | DV_II_53_0_bit))
- }
- if (mask & (DV_II_50_0_bit | DV_II_52_0_bit)) != 0 {
- mask &= ((((W[54] ^ (W[57] >> 25)) & (1 << 4)) - (1 << 4)) | ^(DV_II_50_0_bit | DV_II_52_0_bit))
- }
- if (mask & (DV_II_49_0_bit | DV_II_51_0_bit)) != 0 {
- mask &= ((((W[53] ^ (W[56] >> 25)) & (1 << 4)) - (1 << 4)) | ^(DV_II_49_0_bit | DV_II_51_0_bit))
- }
- mask &= ((((W[51] ^ (W[50] >> 5)) & (1 << 1)) - (1 << 1)) | ^(DV_I_50_2_bit | DV_II_46_2_bit))
- mask &= ((((W[48] ^ W[50]) & (1 << 6)) - (1 << 6)) | ^(DV_I_50_2_bit | DV_II_46_2_bit))
- if (mask & (DV_I_51_0_bit | DV_I_52_0_bit)) != 0 {
- mask &= ((0 - (((W[48] ^ W[55]) >> 29) & 1)) | ^(DV_I_51_0_bit | DV_I_52_0_bit))
- }
- mask &= ((((W[47] ^ W[49]) & (1 << 6)) - (1 << 6)) | ^(DV_I_49_2_bit | DV_I_51_2_bit))
- mask &= ((((W[48] ^ (W[47] >> 5)) & (1 << 1)) - (1 << 1)) | ^(DV_I_47_2_bit | DV_II_51_2_bit))
- mask &= ((((W[46] ^ W[48]) & (1 << 6)) - (1 << 6)) | ^(DV_I_48_2_bit | DV_I_50_2_bit))
- mask &= ((((W[47] ^ (W[46] >> 5)) & (1 << 1)) - (1 << 1)) | ^(DV_I_46_2_bit | DV_II_50_2_bit))
- mask &= ((0 - ((W[44] ^ (W[45] >> 5)) & (1 << 1))) | ^(DV_I_51_2_bit | DV_II_49_2_bit))
- mask &= ((((W[43] ^ W[45]) & (1 << 6)) - (1 << 6)) | ^(DV_I_47_2_bit | DV_I_49_2_bit))
- mask &= (((((W[42] ^ W[44]) >> 6) & 1) - 1) | ^(DV_I_46_2_bit | DV_I_48_2_bit))
- mask &= ((((W[43] ^ (W[42] >> 5)) & (1 << 1)) - (1 << 1)) | ^(DV_II_46_2_bit | DV_II_51_2_bit))
- mask &= ((((W[42] ^ (W[41] >> 5)) & (1 << 1)) - (1 << 1)) | ^(DV_I_51_2_bit | DV_II_50_2_bit))
- mask &= ((((W[41] ^ (W[40] >> 5)) & (1 << 1)) - (1 << 1)) | ^(DV_I_50_2_bit | DV_II_49_2_bit))
- if (mask & (DV_I_52_0_bit | DV_II_51_0_bit)) != 0 {
- mask &= ((((W[39] ^ (W[43] >> 25)) & (1 << 4)) - (1 << 4)) | ^(DV_I_52_0_bit | DV_II_51_0_bit))
- }
- if (mask & (DV_I_51_0_bit | DV_II_50_0_bit)) != 0 {
- mask &= ((((W[38] ^ (W[42] >> 25)) & (1 << 4)) - (1 << 4)) | ^(DV_I_51_0_bit | DV_II_50_0_bit))
- }
- if (mask & (DV_I_48_2_bit | DV_I_51_2_bit)) != 0 {
- mask &= ((0 - ((W[37] ^ (W[38] >> 5)) & (1 << 1))) | ^(DV_I_48_2_bit | DV_I_51_2_bit))
- }
- if (mask & (DV_I_50_0_bit | DV_II_49_0_bit)) != 0 {
- mask &= ((((W[37] ^ (W[41] >> 25)) & (1 << 4)) - (1 << 4)) | ^(DV_I_50_0_bit | DV_II_49_0_bit))
- }
- if (mask & (DV_II_52_0_bit | DV_II_54_0_bit)) != 0 {
- mask &= ((0 - ((W[36] ^ W[38]) & (1 << 4))) | ^(DV_II_52_0_bit | DV_II_54_0_bit))
- }
- mask &= ((0 - ((W[35] ^ (W[36] >> 5)) & (1 << 1))) | ^(DV_I_46_2_bit | DV_I_49_2_bit))
- if (mask & (DV_I_51_0_bit | DV_II_47_0_bit)) != 0 {
- mask &= ((((W[35] ^ (W[39] >> 25)) & (1 << 3)) - (1 << 3)) | ^(DV_I_51_0_bit | DV_II_47_0_bit))
- }
-
- if mask != 0 {
- if (mask & DV_I_43_0_bit) != 0 {
- if not((W[61]^(W[62]>>5))&(1<<1)) != 0 ||
- not(not((W[59]^(W[63]>>25))&(1<<5))) != 0 ||
- not((W[58]^(W[63]>>30))&(1<<0)) != 0 {
- mask &= ^DV_I_43_0_bit
- }
- }
- if (mask & DV_I_44_0_bit) != 0 {
- if not((W[62]^(W[63]>>5))&(1<<1)) != 0 ||
- not(not((W[60]^(W[64]>>25))&(1<<5))) != 0 ||
- not((W[59]^(W[64]>>30))&(1<<0)) != 0 {
- mask &= ^DV_I_44_0_bit
- }
- }
- if (mask & DV_I_46_2_bit) != 0 {
- mask &= ((^((W[40] ^ W[42]) >> 2)) | ^DV_I_46_2_bit)
- }
- if (mask & DV_I_47_2_bit) != 0 {
- if not((W[62]^(W[63]>>5))&(1<<2)) != 0 ||
- not(not((W[41]^W[43])&(1<<6))) != 0 {
- mask &= ^DV_I_47_2_bit
- }
- }
- if (mask & DV_I_48_2_bit) != 0 {
- if not((W[63]^(W[64]>>5))&(1<<2)) != 0 ||
- not(not((W[48]^(W[49]<<5))&(1<<6))) != 0 {
- mask &= ^DV_I_48_2_bit
- }
- }
- if (mask & DV_I_49_2_bit) != 0 {
- if not(not((W[49]^(W[50]<<5))&(1<<6))) != 0 ||
- not((W[42]^W[50])&(1<<1)) != 0 ||
- not(not((W[39]^(W[40]<<5))&(1<<6))) != 0 ||
- not((W[38]^W[40])&(1<<1)) != 0 {
- mask &= ^DV_I_49_2_bit
- }
- }
- if (mask & DV_I_50_0_bit) != 0 {
- mask &= (((W[36] ^ W[37]) << 7) | ^DV_I_50_0_bit)
- }
- if (mask & DV_I_50_2_bit) != 0 {
- mask &= (((W[43] ^ W[51]) << 11) | ^DV_I_50_2_bit)
- }
- if (mask & DV_I_51_0_bit) != 0 {
- mask &= (((W[37] ^ W[38]) << 9) | ^DV_I_51_0_bit)
- }
- if (mask & DV_I_51_2_bit) != 0 {
- if not(not((W[51]^(W[52]<<5))&(1<<6))) != 0 ||
- not(not((W[49]^W[51])&(1<<6))) != 0 ||
- not(not((W[37]^(W[37]>>5))&(1<<1))) != 0 ||
- not(not((W[35]^(W[39]>>25))&(1<<5))) != 0 {
- mask &= ^DV_I_51_2_bit
- }
- }
- if (mask & DV_I_52_0_bit) != 0 {
- mask &= (((W[38] ^ W[39]) << 11) | ^DV_I_52_0_bit)
- }
- if (mask & DV_II_46_2_bit) != 0 {
- mask &= (((W[47] ^ W[51]) << 17) | ^DV_II_46_2_bit)
- }
- if (mask & DV_II_48_0_bit) != 0 {
- if not(not((W[36]^(W[40]>>25))&(1<<3))) != 0 ||
- not((W[35]^(W[40]<<2))&(1<<30)) != 0 {
- mask &= ^DV_II_48_0_bit
- }
- }
- if (mask & DV_II_49_0_bit) != 0 {
- if not(not((W[37]^(W[41]>>25))&(1<<3))) != 0 ||
- not((W[36]^(W[41]<<2))&(1<<30)) != 0 {
- mask &= ^DV_II_49_0_bit
- }
- }
- if (mask & DV_II_49_2_bit) != 0 {
- if not(not((W[53]^(W[54]<<5))&(1<<6))) != 0 ||
- not(not((W[51]^W[53])&(1<<6))) != 0 ||
- not((W[50]^W[54])&(1<<1)) != 0 ||
- not(not((W[45]^(W[46]<<5))&(1<<6))) != 0 ||
- not(not((W[37]^(W[41]>>25))&(1<<5))) != 0 ||
- not((W[36]^(W[41]>>30))&(1<<0)) != 0 {
- mask &= ^DV_II_49_2_bit
- }
- }
- if (mask & DV_II_50_0_bit) != 0 {
- if not((W[55]^W[58])&(1<<29)) != 0 ||
- not(not((W[38]^(W[42]>>25))&(1<<3))) != 0 ||
- not((W[37]^(W[42]<<2))&(1<<30)) != 0 {
- mask &= ^DV_II_50_0_bit
- }
- }
- if (mask & DV_II_50_2_bit) != 0 {
- if not(not((W[54]^(W[55]<<5))&(1<<6))) != 0 ||
- not(not((W[52]^W[54])&(1<<6))) != 0 ||
- not((W[51]^W[55])&(1<<1)) != 0 ||
- not((W[45]^W[47])&(1<<1)) != 0 ||
- not(not((W[38]^(W[42]>>25))&(1<<5))) != 0 ||
- not((W[37]^(W[42]>>30))&(1<<0)) != 0 {
- mask &= ^DV_II_50_2_bit
- }
- }
- if (mask & DV_II_51_0_bit) != 0 {
- if not(not((W[39]^(W[43]>>25))&(1<<3))) != 0 ||
- not((W[38]^(W[43]<<2))&(1<<30)) != 0 {
- mask &= ^DV_II_51_0_bit
- }
- }
- if (mask & DV_II_51_2_bit) != 0 {
- if not(not((W[55]^(W[56]<<5))&(1<<6))) != 0 ||
- not(not((W[53]^W[55])&(1<<6))) != 0 ||
- not((W[52]^W[56])&(1<<1)) != 0 ||
- not((W[46]^W[48])&(1<<1)) != 0 ||
- not(not((W[39]^(W[43]>>25))&(1<<5))) != 0 ||
- not((W[38]^(W[43]>>30))&(1<<0)) != 0 {
- mask &= ^DV_II_51_2_bit
- }
- }
- if (mask & DV_II_52_0_bit) != 0 {
- if not(not((W[59]^W[60])&(1<<29))) != 0 ||
- not(not((W[40]^(W[44]>>25))&(1<<3))) != 0 ||
- not(not((W[40]^(W[44]>>25))&(1<<4))) != 0 ||
- not((W[39]^(W[44]<<2))&(1<<30)) != 0 {
- mask &= ^DV_II_52_0_bit
- }
- }
- if (mask & DV_II_53_0_bit) != 0 {
- if not((W[58]^W[61])&(1<<29)) != 0 ||
- not(not((W[57]^(W[61]>>25))&(1<<4))) != 0 ||
- not(not((W[41]^(W[45]>>25))&(1<<3))) != 0 ||
- not(not((W[41]^(W[45]>>25))&(1<<4))) != 0 {
- mask &= ^DV_II_53_0_bit
- }
- }
- if (mask & DV_II_54_0_bit) != 0 {
- if not(not((W[58]^(W[62]>>25))&(1<<4))) != 0 ||
- not(not((W[42]^(W[46]>>25))&(1<<3))) != 0 ||
- not(not((W[42]^(W[46]>>25))&(1<<4))) != 0 {
- mask &= ^DV_II_54_0_bit
- }
- }
- if (mask & DV_II_55_0_bit) != 0 {
- if not(not((W[59]^(W[63]>>25))&(1<<4))) != 0 ||
- not(not((W[57]^(W[59]>>25))&(1<<4))) != 0 ||
- not(not((W[43]^(W[47]>>25))&(1<<3))) != 0 ||
- not(not((W[43]^(W[47]>>25))&(1<<4))) != 0 {
- mask &= ^DV_II_55_0_bit
- }
- }
- if (mask & DV_II_56_0_bit) != 0 {
- if not(not((W[60]^(W[64]>>25))&(1<<4))) != 0 ||
- not(not((W[44]^(W[48]>>25))&(1<<3))) != 0 ||
- not(not((W[44]^(W[48]>>25))&(1<<4))) != 0 {
- mask &= ^DV_II_56_0_bit
- }
- }
- }
-
- return mask
-}
-
-func not(x uint32) uint32 {
- if x == 0 {
- return 1
- }
-
- return 0
-}
-
-func SHA1_dvs() []DvInfo {
- return sha1_dvs
-}
diff --git a/vendor/github.com/pjbgf/sha1cd/ubc/const.go b/vendor/github.com/pjbgf/sha1cd/ubc/const.go
deleted file mode 100644
index eac14f466ff..00000000000
--- a/vendor/github.com/pjbgf/sha1cd/ubc/const.go
+++ /dev/null
@@ -1,624 +0,0 @@
-// Based on the C implementation from Marc Stevens and Dan Shumow.
-// https://github.com/cr-marcstevens/sha1collisiondetection
-
-package ubc
-
-const (
- CheckSize = 80
-
- DV_I_43_0_bit = (uint32)(1 << 0)
- DV_I_44_0_bit = (uint32)(1 << 1)
- DV_I_45_0_bit = (uint32)(1 << 2)
- DV_I_46_0_bit = (uint32)(1 << 3)
- DV_I_46_2_bit = (uint32)(1 << 4)
- DV_I_47_0_bit = (uint32)(1 << 5)
- DV_I_47_2_bit = (uint32)(1 << 6)
- DV_I_48_0_bit = (uint32)(1 << 7)
- DV_I_48_2_bit = (uint32)(1 << 8)
- DV_I_49_0_bit = (uint32)(1 << 9)
- DV_I_49_2_bit = (uint32)(1 << 10)
- DV_I_50_0_bit = (uint32)(1 << 11)
- DV_I_50_2_bit = (uint32)(1 << 12)
- DV_I_51_0_bit = (uint32)(1 << 13)
- DV_I_51_2_bit = (uint32)(1 << 14)
- DV_I_52_0_bit = (uint32)(1 << 15)
- DV_II_45_0_bit = (uint32)(1 << 16)
- DV_II_46_0_bit = (uint32)(1 << 17)
- DV_II_46_2_bit = (uint32)(1 << 18)
- DV_II_47_0_bit = (uint32)(1 << 19)
- DV_II_48_0_bit = (uint32)(1 << 20)
- DV_II_49_0_bit = (uint32)(1 << 21)
- DV_II_49_2_bit = (uint32)(1 << 22)
- DV_II_50_0_bit = (uint32)(1 << 23)
- DV_II_50_2_bit = (uint32)(1 << 24)
- DV_II_51_0_bit = (uint32)(1 << 25)
- DV_II_51_2_bit = (uint32)(1 << 26)
- DV_II_52_0_bit = (uint32)(1 << 27)
- DV_II_53_0_bit = (uint32)(1 << 28)
- DV_II_54_0_bit = (uint32)(1 << 29)
- DV_II_55_0_bit = (uint32)(1 << 30)
- DV_II_56_0_bit = (uint32)(1 << 31)
-)
-
-// sha1_dvs contains a list of SHA-1 Disturbance Vectors (DV) which defines the
-// unavoidable bit conditions when a collision attack is in progress.
-var sha1_dvs = []DvInfo{
- {
- DvType: 1, DvK: 43, DvB: 0, TestT: 58, MaskI: 0, MaskB: 0,
- Dm: [CheckSize]uint32{
- 0x08000000, 0x9800000c, 0xd8000010, 0x08000010, 0xb8000010, 0x98000000, 0x60000000,
- 0x00000008, 0xc0000000, 0x90000014, 0x10000010, 0xb8000014, 0x28000000, 0x20000010,
- 0x48000000, 0x08000018, 0x60000000, 0x90000010, 0xf0000010, 0x90000008, 0xc0000000,
- 0x90000010, 0xf0000010, 0xb0000008, 0x40000000, 0x90000000, 0xf0000010, 0x90000018,
- 0x60000000, 0x90000010, 0x90000010, 0x90000000, 0x80000000, 0x00000010, 0xa0000000,
- 0x20000000, 0xa0000000, 0x20000010, 0x00000000, 0x20000010, 0x20000000, 0x00000010,
- 0x20000000, 0x00000010, 0xa0000000, 0x00000000, 0x20000000, 0x20000000, 0x00000000,
- 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
- 0x00000000, 0x00000000, 0x00000001, 0x00000020, 0x00000001, 0x40000002, 0x40000040,
- 0x40000002, 0x80000004, 0x80000080, 0x80000006, 0x00000049, 0x00000103, 0x80000009,
- 0x80000012, 0x80000202, 0x00000018, 0x00000164, 0x00000408, 0x800000e6, 0x8000004c,
- 0x00000803, 0x80000161, 0x80000599},
- }, {
- DvType: 1, DvK: 44, DvB: 0, TestT: 58, MaskI: 0, MaskB: 1,
- Dm: [CheckSize]uint32{
- 0xb4000008, 0x08000000, 0x9800000c, 0xd8000010, 0x08000010, 0xb8000010, 0x98000000,
- 0x60000000, 0x00000008, 0xc0000000, 0x90000014, 0x10000010, 0xb8000014, 0x28000000,
- 0x20000010, 0x48000000, 0x08000018, 0x60000000, 0x90000010, 0xf0000010, 0x90000008,
- 0xc0000000, 0x90000010, 0xf0000010, 0xb0000008, 0x40000000, 0x90000000, 0xf0000010,
- 0x90000018, 0x60000000, 0x90000010, 0x90000010, 0x90000000, 0x80000000, 0x00000010,
- 0xa0000000, 0x20000000, 0xa0000000, 0x20000010, 0x00000000, 0x20000010, 0x20000000,
- 0x00000010, 0x20000000, 0x00000010, 0xa0000000, 0x00000000, 0x20000000, 0x20000000,
- 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
- 0x00000000, 0x00000000, 0x00000000, 0x00000001, 0x00000020, 0x00000001, 0x40000002,
- 0x40000040, 0x40000002, 0x80000004, 0x80000080, 0x80000006, 0x00000049, 0x00000103,
- 0x80000009, 0x80000012, 0x80000202, 0x00000018, 0x00000164, 0x00000408, 0x800000e6,
- 0x8000004c, 0x00000803, 0x80000161},
- },
- {
- DvType: 1, DvK: 45, DvB: 0, TestT: 58, MaskI: 0, MaskB: 2,
- Dm: [CheckSize]uint32{
- 0xf4000014, 0xb4000008, 0x08000000, 0x9800000c, 0xd8000010, 0x08000010, 0xb8000010,
- 0x98000000, 0x60000000, 0x00000008, 0xc0000000, 0x90000014, 0x10000010, 0xb8000014,
- 0x28000000, 0x20000010, 0x48000000, 0x08000018, 0x60000000, 0x90000010, 0xf0000010,
- 0x90000008, 0xc0000000, 0x90000010, 0xf0000010, 0xb0000008, 0x40000000, 0x90000000,
- 0xf0000010, 0x90000018, 0x60000000, 0x90000010, 0x90000010, 0x90000000, 0x80000000,
- 0x00000010, 0xa0000000, 0x20000000, 0xa0000000, 0x20000010, 0x00000000, 0x20000010,
- 0x20000000, 0x00000010, 0x20000000, 0x00000010, 0xa0000000, 0x00000000, 0x20000000,
- 0x20000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
- 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000001, 0x00000020, 0x00000001,
- 0x40000002, 0x40000040, 0x40000002, 0x80000004, 0x80000080, 0x80000006, 0x00000049,
- 0x00000103, 0x80000009, 0x80000012, 0x80000202, 0x00000018, 0x00000164, 0x00000408,
- 0x800000e6, 0x8000004c, 0x00000803},
- },
- {
- DvType: 1, DvK: 46, DvB: 0, TestT: 58, MaskI: 0, MaskB: 3,
- Dm: [CheckSize]uint32{
- 0x2c000010, 0xf4000014, 0xb4000008, 0x08000000, 0x9800000c, 0xd8000010, 0x08000010,
- 0xb8000010, 0x98000000, 0x60000000, 0x00000008, 0xc0000000, 0x90000014, 0x10000010,
- 0xb8000014, 0x28000000, 0x20000010, 0x48000000, 0x08000018, 0x60000000, 0x90000010,
- 0xf0000010, 0x90000008, 0xc0000000, 0x90000010, 0xf0000010, 0xb0000008, 0x40000000,
- 0x90000000, 0xf0000010, 0x90000018, 0x60000000, 0x90000010, 0x90000010, 0x90000000,
- 0x80000000, 0x00000010, 0xa0000000, 0x20000000, 0xa0000000, 0x20000010, 0x00000000,
- 0x20000010, 0x20000000, 0x00000010, 0x20000000, 0x00000010, 0xa0000000, 0x00000000,
- 0x20000000, 0x20000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
- 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000001, 0x00000020,
- 0x00000001, 0x40000002, 0x40000040, 0x40000002, 0x80000004, 0x80000080, 0x80000006,
- 0x00000049, 0x00000103, 0x80000009, 0x80000012, 0x80000202, 0x00000018, 0x00000164,
- 0x00000408, 0x800000e6, 0x8000004c},
- },
- {
- DvType: 1, DvK: 46, DvB: 2, TestT: 58, MaskI: 0, MaskB: 4,
- Dm: [CheckSize]uint32{
- 0xb0000040, 0xd0000053, 0xd0000022, 0x20000000, 0x60000032, 0x60000043,
- 0x20000040, 0xe0000042, 0x60000002, 0x80000001, 0x00000020, 0x00000003,
- 0x40000052, 0x40000040, 0xe0000052, 0xa0000000, 0x80000040, 0x20000001,
- 0x20000060, 0x80000001, 0x40000042, 0xc0000043, 0x40000022, 0x00000003,
- 0x40000042, 0xc0000043, 0xc0000022, 0x00000001, 0x40000002, 0xc0000043,
- 0x40000062, 0x80000001, 0x40000042, 0x40000042, 0x40000002, 0x00000002,
- 0x00000040, 0x80000002, 0x80000000, 0x80000002, 0x80000040, 0x00000000,
- 0x80000040, 0x80000000, 0x00000040, 0x80000000, 0x00000040, 0x80000002,
- 0x00000000, 0x80000000, 0x80000000, 0x00000000, 0x00000000, 0x00000000,
- 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
- 0x00000000, 0x00000004, 0x00000080, 0x00000004, 0x00000009, 0x00000101,
- 0x00000009, 0x00000012, 0x00000202, 0x0000001a, 0x00000124, 0x0000040c,
- 0x00000026, 0x0000004a, 0x0000080a, 0x00000060, 0x00000590, 0x00001020,
- 0x0000039a, 0x00000132},
- },
- {
- DvType: 1, DvK: 47, DvB: 0, TestT: 58, MaskI: 0, MaskB: 5,
- Dm: [CheckSize]uint32{
- 0xc8000010, 0x2c000010, 0xf4000014, 0xb4000008, 0x08000000, 0x9800000c,
- 0xd8000010, 0x08000010, 0xb8000010, 0x98000000, 0x60000000, 0x00000008,
- 0xc0000000, 0x90000014, 0x10000010, 0xb8000014, 0x28000000, 0x20000010,
- 0x48000000, 0x08000018, 0x60000000, 0x90000010, 0xf0000010, 0x90000008,
- 0xc0000000, 0x90000010, 0xf0000010, 0xb0000008, 0x40000000, 0x90000000,
- 0xf0000010, 0x90000018, 0x60000000, 0x90000010, 0x90000010, 0x90000000,
- 0x80000000, 0x00000010, 0xa0000000, 0x20000000, 0xa0000000, 0x20000010,
- 0x00000000, 0x20000010, 0x20000000, 0x00000010, 0x20000000, 0x00000010,
- 0xa0000000, 0x00000000, 0x20000000, 0x20000000, 0x00000000, 0x00000000,
- 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
- 0x00000000, 0x00000000, 0x00000001, 0x00000020, 0x00000001, 0x40000002,
- 0x40000040, 0x40000002, 0x80000004, 0x80000080, 0x80000006, 0x00000049,
- 0x00000103, 0x80000009, 0x80000012, 0x80000202, 0x00000018, 0x00000164,
- 0x00000408, 0x800000e6},
- },
- {
- DvType: 1, DvK: 47, DvB: 2, TestT: 58, MaskI: 0, MaskB: 6,
- Dm: [CheckSize]uint32{
- 0x20000043, 0xb0000040, 0xd0000053, 0xd0000022, 0x20000000, 0x60000032,
- 0x60000043, 0x20000040, 0xe0000042, 0x60000002, 0x80000001, 0x00000020,
- 0x00000003, 0x40000052, 0x40000040, 0xe0000052, 0xa0000000, 0x80000040,
- 0x20000001, 0x20000060, 0x80000001, 0x40000042, 0xc0000043, 0x40000022,
- 0x00000003, 0x40000042, 0xc0000043, 0xc0000022, 0x00000001, 0x40000002,
- 0xc0000043, 0x40000062, 0x80000001, 0x40000042, 0x40000042, 0x40000002,
- 0x00000002, 0x00000040, 0x80000002, 0x80000000, 0x80000002, 0x80000040,
- 0x00000000, 0x80000040, 0x80000000, 0x00000040, 0x80000000, 0x00000040,
- 0x80000002, 0x00000000, 0x80000000, 0x80000000, 0x00000000, 0x00000000,
- 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
- 0x00000000, 0x00000000, 0x00000004, 0x00000080, 0x00000004, 0x00000009,
- 0x00000101, 0x00000009, 0x00000012, 0x00000202, 0x0000001a, 0x00000124,
- 0x0000040c, 0x00000026, 0x0000004a, 0x0000080a, 0x00000060, 0x00000590,
- 0x00001020, 0x0000039a,
- },
- },
- {
- DvType: 1, DvK: 48, DvB: 0, TestT: 58, MaskI: 0, MaskB: 7,
- Dm: [CheckSize]uint32{
- 0xb800000a, 0xc8000010, 0x2c000010, 0xf4000014, 0xb4000008, 0x08000000,
- 0x9800000c, 0xd8000010, 0x08000010, 0xb8000010, 0x98000000, 0x60000000,
- 0x00000008, 0xc0000000, 0x90000014, 0x10000010, 0xb8000014, 0x28000000,
- 0x20000010, 0x48000000, 0x08000018, 0x60000000, 0x90000010, 0xf0000010,
- 0x90000008, 0xc0000000, 0x90000010, 0xf0000010, 0xb0000008, 0x40000000,
- 0x90000000, 0xf0000010, 0x90000018, 0x60000000, 0x90000010, 0x90000010,
- 0x90000000, 0x80000000, 0x00000010, 0xa0000000, 0x20000000, 0xa0000000,
- 0x20000010, 0x00000000, 0x20000010, 0x20000000, 0x00000010, 0x20000000,
- 0x00000010, 0xa0000000, 0x00000000, 0x20000000, 0x20000000, 0x00000000,
- 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
- 0x00000000, 0x00000000, 0x00000000, 0x00000001, 0x00000020, 0x00000001,
- 0x40000002, 0x40000040, 0x40000002, 0x80000004, 0x80000080, 0x80000006,
- 0x00000049, 0x00000103, 0x80000009, 0x80000012, 0x80000202, 0x00000018,
- 0x00000164, 0x00000408,
- },
- },
- {
- DvType: 1, DvK: 48, DvB: 2, TestT: 58, MaskI: 0, MaskB: 8,
- Dm: [CheckSize]uint32{
- 0xe000002a, 0x20000043, 0xb0000040, 0xd0000053, 0xd0000022, 0x20000000,
- 0x60000032, 0x60000043, 0x20000040, 0xe0000042, 0x60000002, 0x80000001,
- 0x00000020, 0x00000003, 0x40000052, 0x40000040, 0xe0000052, 0xa0000000,
- 0x80000040, 0x20000001, 0x20000060, 0x80000001, 0x40000042, 0xc0000043,
- 0x40000022, 0x00000003, 0x40000042, 0xc0000043, 0xc0000022, 0x00000001,
- 0x40000002, 0xc0000043, 0x40000062, 0x80000001, 0x40000042, 0x40000042,
- 0x40000002, 0x00000002, 0x00000040, 0x80000002, 0x80000000, 0x80000002,
- 0x80000040, 0x00000000, 0x80000040, 0x80000000, 0x00000040, 0x80000000,
- 0x00000040, 0x80000002, 0x00000000, 0x80000000, 0x80000000, 0x00000000,
- 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
- 0x00000000, 0x00000000, 0x00000000, 0x00000004, 0x00000080, 0x00000004,
- 0x00000009, 0x00000101, 0x00000009, 0x00000012, 0x00000202, 0x0000001a,
- 0x00000124, 0x0000040c, 0x00000026, 0x0000004a, 0x0000080a, 0x00000060,
- 0x00000590, 0x00001020},
- },
- {
- DvType: 1, DvK: 49, DvB: 0, TestT: 58, MaskI: 0, MaskB: 9,
- Dm: [CheckSize]uint32{
- 0x18000000, 0xb800000a, 0xc8000010, 0x2c000010, 0xf4000014, 0xb4000008,
- 0x08000000, 0x9800000c, 0xd8000010, 0x08000010, 0xb8000010, 0x98000000,
- 0x60000000, 0x00000008, 0xc0000000, 0x90000014, 0x10000010, 0xb8000014,
- 0x28000000, 0x20000010, 0x48000000, 0x08000018, 0x60000000, 0x90000010,
- 0xf0000010, 0x90000008, 0xc0000000, 0x90000010, 0xf0000010, 0xb0000008,
- 0x40000000, 0x90000000, 0xf0000010, 0x90000018, 0x60000000, 0x90000010,
- 0x90000010, 0x90000000, 0x80000000, 0x00000010, 0xa0000000, 0x20000000,
- 0xa0000000, 0x20000010, 0x00000000, 0x20000010, 0x20000000, 0x00000010,
- 0x20000000, 0x00000010, 0xa0000000, 0x00000000, 0x20000000, 0x20000000,
- 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
- 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000001, 0x00000020,
- 0x00000001, 0x40000002, 0x40000040, 0x40000002, 0x80000004, 0x80000080,
- 0x80000006, 0x00000049, 0x00000103, 0x80000009, 0x80000012, 0x80000202,
- 0x00000018, 0x00000164},
- },
- {
- DvType: 1, DvK: 49, DvB: 2, TestT: 58, MaskI: 0, MaskB: 10,
- Dm: [CheckSize]uint32{
- 0x60000000, 0xe000002a, 0x20000043, 0xb0000040, 0xd0000053, 0xd0000022,
- 0x20000000, 0x60000032, 0x60000043, 0x20000040, 0xe0000042, 0x60000002,
- 0x80000001, 0x00000020, 0x00000003, 0x40000052, 0x40000040, 0xe0000052,
- 0xa0000000, 0x80000040, 0x20000001, 0x20000060, 0x80000001, 0x40000042,
- 0xc0000043, 0x40000022, 0x00000003, 0x40000042, 0xc0000043, 0xc0000022,
- 0x00000001, 0x40000002, 0xc0000043, 0x40000062, 0x80000001, 0x40000042,
- 0x40000042, 0x40000002, 0x00000002, 0x00000040, 0x80000002, 0x80000000,
- 0x80000002, 0x80000040, 0x00000000, 0x80000040, 0x80000000, 0x00000040,
- 0x80000000, 0x00000040, 0x80000002, 0x00000000, 0x80000000, 0x80000000,
- 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
- 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000004, 0x00000080,
- 0x00000004, 0x00000009, 0x00000101, 0x00000009, 0x00000012, 0x00000202,
- 0x0000001a, 0x00000124, 0x0000040c, 0x00000026, 0x0000004a, 0x0000080a,
- 0x00000060, 0x00000590},
- },
- {
- DvType: 1, DvK: 50, DvB: 0, TestT: 65, MaskI: 0, MaskB: 11,
- Dm: [CheckSize]uint32{
- 0x0800000c, 0x18000000, 0xb800000a, 0xc8000010, 0x2c000010, 0xf4000014,
- 0xb4000008, 0x08000000, 0x9800000c, 0xd8000010, 0x08000010, 0xb8000010,
- 0x98000000, 0x60000000, 0x00000008, 0xc0000000, 0x90000014, 0x10000010,
- 0xb8000014, 0x28000000, 0x20000010, 0x48000000, 0x08000018, 0x60000000,
- 0x90000010, 0xf0000010, 0x90000008, 0xc0000000, 0x90000010, 0xf0000010,
- 0xb0000008, 0x40000000, 0x90000000, 0xf0000010, 0x90000018, 0x60000000,
- 0x90000010, 0x90000010, 0x90000000, 0x80000000, 0x00000010, 0xa0000000,
- 0x20000000, 0xa0000000, 0x20000010, 0x00000000, 0x20000010, 0x20000000,
- 0x00000010, 0x20000000, 0x00000010, 0xa0000000, 0x00000000, 0x20000000,
- 0x20000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
- 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000001,
- 0x00000020, 0x00000001, 0x40000002, 0x40000040, 0x40000002, 0x80000004,
- 0x80000080, 0x80000006, 0x00000049, 0x00000103, 0x80000009, 0x80000012,
- 0x80000202, 0x00000018,
- },
- },
- {
- DvType: 1, DvK: 50, DvB: 2, TestT: 65, MaskI: 0, MaskB: 12,
- Dm: [CheckSize]uint32{
- 0x20000030, 0x60000000, 0xe000002a, 0x20000043, 0xb0000040, 0xd0000053,
- 0xd0000022, 0x20000000, 0x60000032, 0x60000043, 0x20000040, 0xe0000042,
- 0x60000002, 0x80000001, 0x00000020, 0x00000003, 0x40000052, 0x40000040,
- 0xe0000052, 0xa0000000, 0x80000040, 0x20000001, 0x20000060, 0x80000001,
- 0x40000042, 0xc0000043, 0x40000022, 0x00000003, 0x40000042, 0xc0000043,
- 0xc0000022, 0x00000001, 0x40000002, 0xc0000043, 0x40000062, 0x80000001,
- 0x40000042, 0x40000042, 0x40000002, 0x00000002, 0x00000040, 0x80000002,
- 0x80000000, 0x80000002, 0x80000040, 0x00000000, 0x80000040, 0x80000000,
- 0x00000040, 0x80000000, 0x00000040, 0x80000002, 0x00000000, 0x80000000,
- 0x80000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
- 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000004,
- 0x00000080, 0x00000004, 0x00000009, 0x00000101, 0x00000009, 0x00000012,
- 0x00000202, 0x0000001a, 0x00000124, 0x0000040c, 0x00000026, 0x0000004a,
- 0x0000080a, 0x00000060},
- },
- {
- DvType: 1, DvK: 51, DvB: 0, TestT: 65, MaskI: 0, MaskB: 13,
- Dm: [CheckSize]uint32{
- 0xe8000000, 0x0800000c, 0x18000000, 0xb800000a, 0xc8000010, 0x2c000010,
- 0xf4000014, 0xb4000008, 0x08000000, 0x9800000c, 0xd8000010, 0x08000010,
- 0xb8000010, 0x98000000, 0x60000000, 0x00000008, 0xc0000000, 0x90000014,
- 0x10000010, 0xb8000014, 0x28000000, 0x20000010, 0x48000000, 0x08000018,
- 0x60000000, 0x90000010, 0xf0000010, 0x90000008, 0xc0000000, 0x90000010,
- 0xf0000010, 0xb0000008, 0x40000000, 0x90000000, 0xf0000010, 0x90000018,
- 0x60000000, 0x90000010, 0x90000010, 0x90000000, 0x80000000, 0x00000010,
- 0xa0000000, 0x20000000, 0xa0000000, 0x20000010, 0x00000000, 0x20000010,
- 0x20000000, 0x00000010, 0x20000000, 0x00000010, 0xa0000000, 0x00000000,
- 0x20000000, 0x20000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
- 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
- 0x00000001, 0x00000020, 0x00000001, 0x40000002, 0x40000040, 0x40000002,
- 0x80000004, 0x80000080, 0x80000006, 0x00000049, 0x00000103, 0x80000009,
- 0x80000012, 0x80000202},
- },
- {
- DvType: 1, DvK: 51, DvB: 2, TestT: 65, MaskI: 0, MaskB: 14,
- Dm: [CheckSize]uint32{
- 0xa0000003, 0x20000030, 0x60000000, 0xe000002a, 0x20000043, 0xb0000040,
- 0xd0000053, 0xd0000022, 0x20000000, 0x60000032, 0x60000043, 0x20000040,
- 0xe0000042, 0x60000002, 0x80000001, 0x00000020, 0x00000003, 0x40000052,
- 0x40000040, 0xe0000052, 0xa0000000, 0x80000040, 0x20000001, 0x20000060,
- 0x80000001, 0x40000042, 0xc0000043, 0x40000022, 0x00000003, 0x40000042,
- 0xc0000043, 0xc0000022, 0x00000001, 0x40000002, 0xc0000043, 0x40000062,
- 0x80000001, 0x40000042, 0x40000042, 0x40000002, 0x00000002, 0x00000040,
- 0x80000002, 0x80000000, 0x80000002, 0x80000040, 0x00000000, 0x80000040,
- 0x80000000, 0x00000040, 0x80000000, 0x00000040, 0x80000002, 0x00000000,
- 0x80000000, 0x80000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
- 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
- 0x00000004, 0x00000080, 0x00000004, 0x00000009, 0x00000101, 0x00000009,
- 0x00000012, 0x00000202, 0x0000001a, 0x00000124, 0x0000040c, 0x00000026,
- 0x0000004a, 0x0000080a},
- },
- {
- DvType: 1, DvK: 52, DvB: 0, TestT: 65, MaskI: 0, MaskB: 15,
- Dm: [CheckSize]uint32{
- 0x04000010, 0xe8000000, 0x0800000c, 0x18000000, 0xb800000a, 0xc8000010,
- 0x2c000010, 0xf4000014, 0xb4000008, 0x08000000, 0x9800000c, 0xd8000010,
- 0x08000010, 0xb8000010, 0x98000000, 0x60000000, 0x00000008, 0xc0000000,
- 0x90000014, 0x10000010, 0xb8000014, 0x28000000, 0x20000010, 0x48000000,
- 0x08000018, 0x60000000, 0x90000010, 0xf0000010, 0x90000008, 0xc0000000,
- 0x90000010, 0xf0000010, 0xb0000008, 0x40000000, 0x90000000, 0xf0000010,
- 0x90000018, 0x60000000, 0x90000010, 0x90000010, 0x90000000, 0x80000000,
- 0x00000010, 0xa0000000, 0x20000000, 0xa0000000, 0x20000010, 0x00000000,
- 0x20000010, 0x20000000, 0x00000010, 0x20000000, 0x00000010, 0xa0000000,
- 0x00000000, 0x20000000, 0x20000000, 0x00000000, 0x00000000, 0x00000000,
- 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
- 0x00000000, 0x00000001, 0x00000020, 0x00000001, 0x40000002, 0x40000040,
- 0x40000002, 0x80000004, 0x80000080, 0x80000006, 0x00000049, 0x00000103,
- 0x80000009, 0x80000012},
- },
- {
- DvType: 2, DvK: 45, DvB: 0, TestT: 58, MaskI: 0, MaskB: 16,
- Dm: [CheckSize]uint32{
- 0xec000014, 0x0c000002, 0xc0000010, 0xb400001c, 0x2c000004, 0xbc000018,
- 0xb0000010, 0x0000000c, 0xb8000010, 0x08000018, 0x78000010, 0x08000014,
- 0x70000010, 0xb800001c, 0xe8000000, 0xb0000004, 0x58000010, 0xb000000c,
- 0x48000000, 0xb0000000, 0xb8000010, 0x98000010, 0xa0000000, 0x00000000,
- 0x00000000, 0x20000000, 0x80000000, 0x00000010, 0x00000000, 0x20000010,
- 0x20000000, 0x00000010, 0x60000000, 0x00000018, 0xe0000000, 0x90000000,
- 0x30000010, 0xb0000000, 0x20000000, 0x20000000, 0xa0000000, 0x00000010,
- 0x80000000, 0x20000000, 0x20000000, 0x20000000, 0x80000000, 0x00000010,
- 0x00000000, 0x20000010, 0xa0000000, 0x00000000, 0x20000000, 0x20000000,
- 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
- 0x00000001, 0x00000020, 0x00000001, 0x40000002, 0x40000041, 0x40000022,
- 0x80000005, 0xc0000082, 0xc0000046, 0x4000004b, 0x80000107, 0x00000089,
- 0x00000014, 0x8000024b, 0x0000011b, 0x8000016d, 0x8000041a, 0x000002e4,
- 0x80000054, 0x00000967},
- },
- {
- DvType: 2, DvK: 46, DvB: 0, TestT: 58, MaskI: 0, MaskB: 17,
- Dm: [CheckSize]uint32{
- 0x2400001c, 0xec000014, 0x0c000002, 0xc0000010, 0xb400001c, 0x2c000004,
- 0xbc000018, 0xb0000010, 0x0000000c, 0xb8000010, 0x08000018, 0x78000010,
- 0x08000014, 0x70000010, 0xb800001c, 0xe8000000, 0xb0000004, 0x58000010,
- 0xb000000c, 0x48000000, 0xb0000000, 0xb8000010, 0x98000010, 0xa0000000,
- 0x00000000, 0x00000000, 0x20000000, 0x80000000, 0x00000010, 0x00000000,
- 0x20000010, 0x20000000, 0x00000010, 0x60000000, 0x00000018, 0xe0000000,
- 0x90000000, 0x30000010, 0xb0000000, 0x20000000, 0x20000000, 0xa0000000,
- 0x00000010, 0x80000000, 0x20000000, 0x20000000, 0x20000000, 0x80000000,
- 0x00000010, 0x00000000, 0x20000010, 0xa0000000, 0x00000000, 0x20000000,
- 0x20000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
- 0x00000000, 0x00000001, 0x00000020, 0x00000001, 0x40000002, 0x40000041,
- 0x40000022, 0x80000005, 0xc0000082, 0xc0000046, 0x4000004b, 0x80000107,
- 0x00000089, 0x00000014, 0x8000024b, 0x0000011b, 0x8000016d, 0x8000041a,
- 0x000002e4, 0x80000054},
- },
- {
- DvType: 2, DvK: 46, DvB: 2, TestT: 58, MaskI: 0, MaskB: 18,
- Dm: [CheckSize]uint32{
- 0x90000070, 0xb0000053, 0x30000008, 0x00000043, 0xd0000072, 0xb0000010,
- 0xf0000062, 0xc0000042, 0x00000030, 0xe0000042, 0x20000060, 0xe0000041,
- 0x20000050, 0xc0000041, 0xe0000072, 0xa0000003, 0xc0000012, 0x60000041,
- 0xc0000032, 0x20000001, 0xc0000002, 0xe0000042, 0x60000042, 0x80000002,
- 0x00000000, 0x00000000, 0x80000000, 0x00000002, 0x00000040, 0x00000000,
- 0x80000040, 0x80000000, 0x00000040, 0x80000001, 0x00000060, 0x80000003,
- 0x40000002, 0xc0000040, 0xc0000002, 0x80000000, 0x80000000, 0x80000002,
- 0x00000040, 0x00000002, 0x80000000, 0x80000000, 0x80000000, 0x00000002,
- 0x00000040, 0x00000000, 0x80000040, 0x80000002, 0x00000000, 0x80000000,
- 0x80000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
- 0x00000000, 0x00000004, 0x00000080, 0x00000004, 0x00000009, 0x00000105,
- 0x00000089, 0x00000016, 0x0000020b, 0x0000011b, 0x0000012d, 0x0000041e,
- 0x00000224, 0x00000050, 0x0000092e, 0x0000046c, 0x000005b6, 0x0000106a,
- 0x00000b90, 0x00000152},
- },
- {
- DvType: 2, DvK: 47, DvB: 0, TestT: 58, MaskI: 0, MaskB: 19,
- Dm: [CheckSize]uint32{
- 0x20000010, 0x2400001c, 0xec000014, 0x0c000002, 0xc0000010, 0xb400001c,
- 0x2c000004, 0xbc000018, 0xb0000010, 0x0000000c, 0xb8000010, 0x08000018,
- 0x78000010, 0x08000014, 0x70000010, 0xb800001c, 0xe8000000, 0xb0000004,
- 0x58000010, 0xb000000c, 0x48000000, 0xb0000000, 0xb8000010, 0x98000010,
- 0xa0000000, 0x00000000, 0x00000000, 0x20000000, 0x80000000, 0x00000010,
- 0x00000000, 0x20000010, 0x20000000, 0x00000010, 0x60000000, 0x00000018,
- 0xe0000000, 0x90000000, 0x30000010, 0xb0000000, 0x20000000, 0x20000000,
- 0xa0000000, 0x00000010, 0x80000000, 0x20000000, 0x20000000, 0x20000000,
- 0x80000000, 0x00000010, 0x00000000, 0x20000010, 0xa0000000, 0x00000000,
- 0x20000000, 0x20000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
- 0x00000000, 0x00000000, 0x00000001, 0x00000020, 0x00000001, 0x40000002,
- 0x40000041, 0x40000022, 0x80000005, 0xc0000082, 0xc0000046, 0x4000004b,
- 0x80000107, 0x00000089, 0x00000014, 0x8000024b, 0x0000011b, 0x8000016d,
- 0x8000041a, 0x000002e4},
- },
- {
- DvType: 2, DvK: 48, DvB: 0, TestT: 58, MaskI: 0, MaskB: 20,
- Dm: [CheckSize]uint32{
- 0xbc00001a, 0x20000010, 0x2400001c, 0xec000014, 0x0c000002, 0xc0000010,
- 0xb400001c, 0x2c000004, 0xbc000018, 0xb0000010, 0x0000000c, 0xb8000010,
- 0x08000018, 0x78000010, 0x08000014, 0x70000010, 0xb800001c, 0xe8000000,
- 0xb0000004, 0x58000010, 0xb000000c, 0x48000000, 0xb0000000, 0xb8000010,
- 0x98000010, 0xa0000000, 0x00000000, 0x00000000, 0x20000000, 0x80000000,
- 0x00000010, 0x00000000, 0x20000010, 0x20000000, 0x00000010, 0x60000000,
- 0x00000018, 0xe0000000, 0x90000000, 0x30000010, 0xb0000000, 0x20000000,
- 0x20000000, 0xa0000000, 0x00000010, 0x80000000, 0x20000000, 0x20000000,
- 0x20000000, 0x80000000, 0x00000010, 0x00000000, 0x20000010, 0xa0000000,
- 0x00000000, 0x20000000, 0x20000000, 0x00000000, 0x00000000, 0x00000000,
- 0x00000000, 0x00000000, 0x00000000, 0x00000001, 0x00000020, 0x00000001,
- 0x40000002, 0x40000041, 0x40000022, 0x80000005, 0xc0000082, 0xc0000046,
- 0x4000004b, 0x80000107, 0x00000089, 0x00000014, 0x8000024b, 0x0000011b,
- 0x8000016d, 0x8000041a},
- },
- {
- DvType: 2, DvK: 49, DvB: 0, TestT: 58, MaskI: 0, MaskB: 21,
- Dm: [CheckSize]uint32{
- 0x3c000004, 0xbc00001a, 0x20000010, 0x2400001c, 0xec000014, 0x0c000002,
- 0xc0000010, 0xb400001c, 0x2c000004, 0xbc000018, 0xb0000010, 0x0000000c,
- 0xb8000010, 0x08000018, 0x78000010, 0x08000014, 0x70000010, 0xb800001c,
- 0xe8000000, 0xb0000004, 0x58000010, 0xb000000c, 0x48000000, 0xb0000000,
- 0xb8000010, 0x98000010, 0xa0000000, 0x00000000, 0x00000000, 0x20000000,
- 0x80000000, 0x00000010, 0x00000000, 0x20000010, 0x20000000, 0x00000010,
- 0x60000000, 0x00000018, 0xe0000000, 0x90000000, 0x30000010, 0xb0000000,
- 0x20000000, 0x20000000, 0xa0000000, 0x00000010, 0x80000000, 0x20000000,
- 0x20000000, 0x20000000, 0x80000000, 0x00000010, 0x00000000, 0x20000010,
- 0xa0000000, 0x00000000, 0x20000000, 0x20000000, 0x00000000, 0x00000000,
- 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000001, 0x00000020,
- 0x00000001, 0x40000002, 0x40000041, 0x40000022, 0x80000005, 0xc0000082,
- 0xc0000046, 0x4000004b, 0x80000107, 0x00000089, 0x00000014, 0x8000024b,
- 0x0000011b, 0x8000016d},
- },
- {
- DvType: 2, DvK: 49, DvB: 2, TestT: 58, MaskI: 0, MaskB: 22,
- Dm: [CheckSize]uint32{
- 0xf0000010, 0xf000006a, 0x80000040, 0x90000070, 0xb0000053, 0x30000008,
- 0x00000043, 0xd0000072, 0xb0000010, 0xf0000062, 0xc0000042, 0x00000030,
- 0xe0000042, 0x20000060, 0xe0000041, 0x20000050, 0xc0000041, 0xe0000072,
- 0xa0000003, 0xc0000012, 0x60000041, 0xc0000032, 0x20000001, 0xc0000002,
- 0xe0000042, 0x60000042, 0x80000002, 0x00000000, 0x00000000, 0x80000000,
- 0x00000002, 0x00000040, 0x00000000, 0x80000040, 0x80000000, 0x00000040,
- 0x80000001, 0x00000060, 0x80000003, 0x40000002, 0xc0000040, 0xc0000002,
- 0x80000000, 0x80000000, 0x80000002, 0x00000040, 0x00000002, 0x80000000,
- 0x80000000, 0x80000000, 0x00000002, 0x00000040, 0x00000000, 0x80000040,
- 0x80000002, 0x00000000, 0x80000000, 0x80000000, 0x00000000, 0x00000000,
- 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000004, 0x00000080,
- 0x00000004, 0x00000009, 0x00000105, 0x00000089, 0x00000016, 0x0000020b,
- 0x0000011b, 0x0000012d, 0x0000041e, 0x00000224, 0x00000050, 0x0000092e,
- 0x0000046c, 0x000005b6},
- },
- {
- DvType: 2, DvK: 50, DvB: 0, TestT: 65, MaskI: 0, MaskB: 23,
- Dm: [CheckSize]uint32{
- 0xb400001c, 0x3c000004, 0xbc00001a, 0x20000010, 0x2400001c, 0xec000014,
- 0x0c000002, 0xc0000010, 0xb400001c, 0x2c000004, 0xbc000018, 0xb0000010,
- 0x0000000c, 0xb8000010, 0x08000018, 0x78000010, 0x08000014, 0x70000010,
- 0xb800001c, 0xe8000000, 0xb0000004, 0x58000010, 0xb000000c, 0x48000000,
- 0xb0000000, 0xb8000010, 0x98000010, 0xa0000000, 0x00000000, 0x00000000,
- 0x20000000, 0x80000000, 0x00000010, 0x00000000, 0x20000010, 0x20000000,
- 0x00000010, 0x60000000, 0x00000018, 0xe0000000, 0x90000000, 0x30000010,
- 0xb0000000, 0x20000000, 0x20000000, 0xa0000000, 0x00000010, 0x80000000,
- 0x20000000, 0x20000000, 0x20000000, 0x80000000, 0x00000010, 0x00000000,
- 0x20000010, 0xa0000000, 0x00000000, 0x20000000, 0x20000000, 0x00000000,
- 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000001,
- 0x00000020, 0x00000001, 0x40000002, 0x40000041, 0x40000022, 0x80000005,
- 0xc0000082, 0xc0000046, 0x4000004b, 0x80000107, 0x00000089, 0x00000014,
- 0x8000024b, 0x0000011b},
- },
- {
- DvType: 2, DvK: 50, DvB: 2, TestT: 65, MaskI: 0, MaskB: 24,
- Dm: [CheckSize]uint32{
- 0xd0000072, 0xf0000010, 0xf000006a, 0x80000040, 0x90000070, 0xb0000053,
- 0x30000008, 0x00000043, 0xd0000072, 0xb0000010, 0xf0000062, 0xc0000042,
- 0x00000030, 0xe0000042, 0x20000060, 0xe0000041, 0x20000050, 0xc0000041,
- 0xe0000072, 0xa0000003, 0xc0000012, 0x60000041, 0xc0000032, 0x20000001,
- 0xc0000002, 0xe0000042, 0x60000042, 0x80000002, 0x00000000, 0x00000000,
- 0x80000000, 0x00000002, 0x00000040, 0x00000000, 0x80000040, 0x80000000,
- 0x00000040, 0x80000001, 0x00000060, 0x80000003, 0x40000002, 0xc0000040,
- 0xc0000002, 0x80000000, 0x80000000, 0x80000002, 0x00000040, 0x00000002,
- 0x80000000, 0x80000000, 0x80000000, 0x00000002, 0x00000040, 0x00000000,
- 0x80000040, 0x80000002, 0x00000000, 0x80000000, 0x80000000, 0x00000000,
- 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000004,
- 0x00000080, 0x00000004, 0x00000009, 0x00000105, 0x00000089, 0x00000016,
- 0x0000020b, 0x0000011b, 0x0000012d, 0x0000041e, 0x00000224, 0x00000050,
- 0x0000092e, 0x0000046c},
- },
- {
- DvType: 2, DvK: 51, DvB: 0, TestT: 65, MaskI: 0, MaskB: 25,
- Dm: [CheckSize]uint32{
- 0xc0000010, 0xb400001c, 0x3c000004, 0xbc00001a, 0x20000010, 0x2400001c,
- 0xec000014, 0x0c000002, 0xc0000010, 0xb400001c, 0x2c000004, 0xbc000018,
- 0xb0000010, 0x0000000c, 0xb8000010, 0x08000018, 0x78000010, 0x08000014,
- 0x70000010, 0xb800001c, 0xe8000000, 0xb0000004, 0x58000010, 0xb000000c,
- 0x48000000, 0xb0000000, 0xb8000010, 0x98000010, 0xa0000000, 0x00000000,
- 0x00000000, 0x20000000, 0x80000000, 0x00000010, 0x00000000, 0x20000010,
- 0x20000000, 0x00000010, 0x60000000, 0x00000018, 0xe0000000, 0x90000000,
- 0x30000010, 0xb0000000, 0x20000000, 0x20000000, 0xa0000000, 0x00000010,
- 0x80000000, 0x20000000, 0x20000000, 0x20000000, 0x80000000, 0x00000010,
- 0x00000000, 0x20000010, 0xa0000000, 0x00000000, 0x20000000, 0x20000000,
- 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
- 0x00000001, 0x00000020, 0x00000001, 0x40000002, 0x40000041, 0x40000022,
- 0x80000005, 0xc0000082, 0xc0000046, 0x4000004b, 0x80000107, 0x00000089,
- 0x00000014, 0x8000024b},
- },
- {
- DvType: 2, DvK: 51, DvB: 2, TestT: 65, MaskI: 0, MaskB: 26,
- Dm: [CheckSize]uint32{
- 0x00000043, 0xd0000072, 0xf0000010, 0xf000006a, 0x80000040, 0x90000070,
- 0xb0000053, 0x30000008, 0x00000043, 0xd0000072, 0xb0000010, 0xf0000062,
- 0xc0000042, 0x00000030, 0xe0000042, 0x20000060, 0xe0000041, 0x20000050,
- 0xc0000041, 0xe0000072, 0xa0000003, 0xc0000012, 0x60000041, 0xc0000032,
- 0x20000001, 0xc0000002, 0xe0000042, 0x60000042, 0x80000002, 0x00000000,
- 0x00000000, 0x80000000, 0x00000002, 0x00000040, 0x00000000, 0x80000040,
- 0x80000000, 0x00000040, 0x80000001, 0x00000060, 0x80000003, 0x40000002,
- 0xc0000040, 0xc0000002, 0x80000000, 0x80000000, 0x80000002, 0x00000040,
- 0x00000002, 0x80000000, 0x80000000, 0x80000000, 0x00000002, 0x00000040,
- 0x00000000, 0x80000040, 0x80000002, 0x00000000, 0x80000000, 0x80000000,
- 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
- 0x00000004, 0x00000080, 0x00000004, 0x00000009, 0x00000105, 0x00000089,
- 0x00000016, 0x0000020b, 0x0000011b, 0x0000012d, 0x0000041e, 0x00000224,
- 0x00000050, 0x0000092e},
- },
- {
- DvType: 2, DvK: 52, DvB: 0, TestT: 65, MaskI: 0, MaskB: 27,
- Dm: [CheckSize]uint32{
- 0x0c000002, 0xc0000010, 0xb400001c, 0x3c000004, 0xbc00001a, 0x20000010,
- 0x2400001c, 0xec000014, 0x0c000002, 0xc0000010, 0xb400001c, 0x2c000004,
- 0xbc000018, 0xb0000010, 0x0000000c, 0xb8000010, 0x08000018, 0x78000010,
- 0x08000014, 0x70000010, 0xb800001c, 0xe8000000, 0xb0000004, 0x58000010,
- 0xb000000c, 0x48000000, 0xb0000000, 0xb8000010, 0x98000010, 0xa0000000,
- 0x00000000, 0x00000000, 0x20000000, 0x80000000, 0x00000010, 0x00000000,
- 0x20000010, 0x20000000, 0x00000010, 0x60000000, 0x00000018, 0xe0000000,
- 0x90000000, 0x30000010, 0xb0000000, 0x20000000, 0x20000000, 0xa0000000,
- 0x00000010, 0x80000000, 0x20000000, 0x20000000, 0x20000000, 0x80000000,
- 0x00000010, 0x00000000, 0x20000010, 0xa0000000, 0x00000000, 0x20000000,
- 0x20000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
- 0x00000000, 0x00000001, 0x00000020, 0x00000001, 0x40000002, 0x40000041,
- 0x40000022, 0x80000005, 0xc0000082, 0xc0000046, 0x4000004b, 0x80000107,
- 0x00000089, 0x00000014},
- },
- {
- DvType: 2, DvK: 53, DvB: 0, TestT: 65, MaskI: 0, MaskB: 28,
- Dm: [CheckSize]uint32{
- 0xcc000014, 0x0c000002, 0xc0000010, 0xb400001c, 0x3c000004, 0xbc00001a,
- 0x20000010, 0x2400001c, 0xec000014, 0x0c000002, 0xc0000010, 0xb400001c,
- 0x2c000004, 0xbc000018, 0xb0000010, 0x0000000c, 0xb8000010, 0x08000018,
- 0x78000010, 0x08000014, 0x70000010, 0xb800001c, 0xe8000000, 0xb0000004,
- 0x58000010, 0xb000000c, 0x48000000, 0xb0000000, 0xb8000010, 0x98000010,
- 0xa0000000, 0x00000000, 0x00000000, 0x20000000, 0x80000000, 0x00000010,
- 0x00000000, 0x20000010, 0x20000000, 0x00000010, 0x60000000, 0x00000018,
- 0xe0000000, 0x90000000, 0x30000010, 0xb0000000, 0x20000000, 0x20000000,
- 0xa0000000, 0x00000010, 0x80000000, 0x20000000, 0x20000000, 0x20000000,
- 0x80000000, 0x00000010, 0x00000000, 0x20000010, 0xa0000000, 0x00000000,
- 0x20000000, 0x20000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
- 0x00000000, 0x00000000, 0x00000001, 0x00000020, 0x00000001, 0x40000002,
- 0x40000041, 0x40000022, 0x80000005, 0xc0000082, 0xc0000046, 0x4000004b,
- 0x80000107, 0x00000089},
- },
- {
- DvType: 2, DvK: 54, DvB: 0, TestT: 65, MaskI: 0, MaskB: 29,
- Dm: [CheckSize]uint32{
- 0x0400001c, 0xcc000014, 0x0c000002, 0xc0000010, 0xb400001c, 0x3c000004,
- 0xbc00001a, 0x20000010, 0x2400001c, 0xec000014, 0x0c000002, 0xc0000010,
- 0xb400001c, 0x2c000004, 0xbc000018, 0xb0000010, 0x0000000c, 0xb8000010,
- 0x08000018, 0x78000010, 0x08000014, 0x70000010, 0xb800001c, 0xe8000000,
- 0xb0000004, 0x58000010, 0xb000000c, 0x48000000, 0xb0000000, 0xb8000010,
- 0x98000010, 0xa0000000, 0x00000000, 0x00000000, 0x20000000, 0x80000000,
- 0x00000010, 0x00000000, 0x20000010, 0x20000000, 0x00000010, 0x60000000,
- 0x00000018, 0xe0000000, 0x90000000, 0x30000010, 0xb0000000, 0x20000000,
- 0x20000000, 0xa0000000, 0x00000010, 0x80000000, 0x20000000, 0x20000000,
- 0x20000000, 0x80000000, 0x00000010, 0x00000000, 0x20000010, 0xa0000000,
- 0x00000000, 0x20000000, 0x20000000, 0x00000000, 0x00000000, 0x00000000,
- 0x00000000, 0x00000000, 0x00000000, 0x00000001, 0x00000020, 0x00000001,
- 0x40000002, 0x40000041, 0x40000022, 0x80000005, 0xc0000082, 0xc0000046,
- 0x4000004b, 0x80000107},
- },
- {
- DvType: 2, DvK: 55, DvB: 0, TestT: 65, MaskI: 0, MaskB: 30,
- Dm: [CheckSize]uint32{
- 0x00000010, 0x0400001c, 0xcc000014, 0x0c000002, 0xc0000010, 0xb400001c,
- 0x3c000004, 0xbc00001a, 0x20000010, 0x2400001c, 0xec000014, 0x0c000002,
- 0xc0000010, 0xb400001c, 0x2c000004, 0xbc000018, 0xb0000010, 0x0000000c,
- 0xb8000010, 0x08000018, 0x78000010, 0x08000014, 0x70000010, 0xb800001c,
- 0xe8000000, 0xb0000004, 0x58000010, 0xb000000c, 0x48000000, 0xb0000000,
- 0xb8000010, 0x98000010, 0xa0000000, 0x00000000, 0x00000000, 0x20000000,
- 0x80000000, 0x00000010, 0x00000000, 0x20000010, 0x20000000, 0x00000010,
- 0x60000000, 0x00000018, 0xe0000000, 0x90000000, 0x30000010, 0xb0000000,
- 0x20000000, 0x20000000, 0xa0000000, 0x00000010, 0x80000000, 0x20000000,
- 0x20000000, 0x20000000, 0x80000000, 0x00000010, 0x00000000, 0x20000010,
- 0xa0000000, 0x00000000, 0x20000000, 0x20000000, 0x00000000, 0x00000000,
- 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000001, 0x00000020,
- 0x00000001, 0x40000002, 0x40000041, 0x40000022, 0x80000005, 0xc0000082,
- 0xc0000046, 0x4000004b},
- },
- {
- DvType: 2, DvK: 56, DvB: 0, TestT: 65, MaskI: 0, MaskB: 31,
- Dm: [CheckSize]uint32{
- 0x2600001a, 0x00000010, 0x0400001c, 0xcc000014, 0x0c000002, 0xc0000010,
- 0xb400001c, 0x3c000004, 0xbc00001a, 0x20000010, 0x2400001c, 0xec000014,
- 0x0c000002, 0xc0000010, 0xb400001c, 0x2c000004, 0xbc000018, 0xb0000010,
- 0x0000000c, 0xb8000010, 0x08000018, 0x78000010, 0x08000014, 0x70000010,
- 0xb800001c, 0xe8000000, 0xb0000004, 0x58000010, 0xb000000c, 0x48000000,
- 0xb0000000, 0xb8000010, 0x98000010, 0xa0000000, 0x00000000, 0x00000000,
- 0x20000000, 0x80000000, 0x00000010, 0x00000000, 0x20000010, 0x20000000,
- 0x00000010, 0x60000000, 0x00000018, 0xe0000000, 0x90000000, 0x30000010,
- 0xb0000000, 0x20000000, 0x20000000, 0xa0000000, 0x00000010, 0x80000000,
- 0x20000000, 0x20000000, 0x20000000, 0x80000000, 0x00000010, 0x00000000,
- 0x20000010, 0xa0000000, 0x00000000, 0x20000000, 0x20000000, 0x00000000,
- 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000001,
- 0x00000020, 0x00000001, 0x40000002, 0x40000041, 0x40000022, 0x80000005,
- 0xc0000082, 0xc0000046},
- },
- {
- DvType: 0, DvK: 0, DvB: 0, TestT: 0, MaskI: 0, MaskB: 0,
- Dm: [CheckSize]uint32{
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0},
- },
-}
diff --git a/vendor/github.com/pjbgf/sha1cd/ubc/doc.go b/vendor/github.com/pjbgf/sha1cd/ubc/doc.go
deleted file mode 100644
index 0090e36b906..00000000000
--- a/vendor/github.com/pjbgf/sha1cd/ubc/doc.go
+++ /dev/null
@@ -1,3 +0,0 @@
-// ubc package provides ways for SHA1 blocks to be checked for
-// Unavoidable Bit Conditions that arise from crypto analysis attacks.
-package ubc
diff --git a/vendor/github.com/pkg/xattr/xattr.go b/vendor/github.com/pkg/xattr/xattr.go
index 8b2b5fe80e8..e34e274d513 100644
--- a/vendor/github.com/pkg/xattr/xattr.go
+++ b/vendor/github.com/pkg/xattr/xattr.go
@@ -87,8 +87,8 @@ func get(path string, name string, getxattrFunc getxattrFunc) ([]byte, error) {
initialBufSize = 1024
// The theoretical maximum xattr value size on MacOS is 64 MB. On Linux it's
- // much smaller at 64 KB. Unless the kernel is evil or buggy, we should never
- // hit the limit.
+ // much smaller: documented at 64 KB. However, at least on TrueNAS SCALE, a
+ // Debian-based Linux distro, it can be larger.
maxBufSize = 64 * 1024 * 1024
// Function name as reported in error messages
@@ -102,14 +102,15 @@ func get(path string, name string, getxattrFunc getxattrFunc) ([]byte, error) {
// If the buffer was too small to fit the value, Linux and MacOS react
// differently:
- // Linux: returns an ERANGE error and "-1" bytes.
+ // Linux: returns an ERANGE error and "-1" bytes. However, the TrueNAS
+ // SCALE distro sometimes returns E2BIG.
// MacOS: truncates the value and returns "size" bytes. If the value
// happens to be exactly as big as the buffer, we cannot know if it was
// truncated, and we retry with a bigger buffer. Contrary to documentation,
// MacOS never seems to return ERANGE!
// To keep the code simple, we always check both conditions, and sometimes
// double the buffer size without it being strictly necessary.
- if err == syscall.ERANGE || read == size {
+ if err == syscall.ERANGE || err == syscall.E2BIG || read == size {
// The buffer was too small. Try again.
size <<= 1
if size >= maxBufSize {
diff --git a/vendor/github.com/pkg/xattr/xattr_solaris.go b/vendor/github.com/pkg/xattr/xattr_solaris.go
index 8d65b8d8d69..7c98b4afbac 100644
--- a/vendor/github.com/pkg/xattr/xattr_solaris.go
+++ b/vendor/github.com/pkg/xattr/xattr_solaris.go
@@ -24,7 +24,7 @@ const (
)
func getxattr(path string, name string, data []byte) (int, error) {
- f, err := os.OpenFile(path, os.O_RDONLY, 0)
+ f, err := openNonblock(path)
if err != nil {
return 0, err
}
@@ -50,7 +50,7 @@ func fgetxattr(f *os.File, name string, data []byte) (int, error) {
}
func setxattr(path string, name string, data []byte, flags int) error {
- f, err := os.OpenFile(path, os.O_RDONLY, 0)
+ f, err := openNonblock(path)
if err != nil {
return err
}
@@ -87,7 +87,8 @@ func fsetxattr(f *os.File, name string, data []byte, flags int) error {
}
func removexattr(path string, name string) error {
- fd, err := unix.Open(path, unix.O_RDONLY|unix.O_XATTR, 0)
+ mode := unix.O_RDONLY | unix.O_XATTR | unix.O_NONBLOCK | unix.O_CLOEXEC
+ fd, err := unix.Open(path, mode, 0)
if err != nil {
return err
}
@@ -114,7 +115,7 @@ func fremovexattr(f *os.File, name string) error {
}
func listxattr(path string, data []byte) (int, error) {
- f, err := os.OpenFile(path, os.O_RDONLY, 0)
+ f, err := openNonblock(path)
if err != nil {
return 0, err
}
@@ -151,8 +152,17 @@ func flistxattr(f *os.File, data []byte) (int, error) {
return copy(data, buf), nil
}
+// Like os.Open, but passes O_NONBLOCK to the open(2) syscall.
+func openNonblock(path string) (*os.File, error) {
+ fd, err := unix.Open(path, unix.O_RDONLY|unix.O_CLOEXEC|unix.O_NONBLOCK, 0)
+ if err != nil {
+ return nil, err
+ }
+ return os.NewFile(uintptr(fd), path), err
+}
+
// stringsFromByteSlice converts a sequence of attributes to a []string.
-// On Darwin and Linux, each entry is a NULL-terminated string.
+// We simulate Linux/Darwin, where each entry is a NULL-terminated string.
func stringsFromByteSlice(buf []byte) (result []string) {
offset := 0
for index, b := range buf {
diff --git a/vendor/github.com/rancher/yip/pkg/executor/executor.go b/vendor/github.com/rancher/yip/pkg/executor/executor.go
index e2e274db611..2a0f542dc9d 100644
--- a/vendor/github.com/rancher/yip/pkg/executor/executor.go
+++ b/vendor/github.com/rancher/yip/pkg/executor/executor.go
@@ -74,7 +74,6 @@ func NewExecutor(opts ...Options) Executor {
plugins: []Plugin{
plugins.DNS,
plugins.Download,
- plugins.Git,
plugins.Entities,
plugins.EnsureDirectories,
plugins.EnsureFiles,
diff --git a/vendor/github.com/rancher/yip/pkg/plugins/git.go b/vendor/github.com/rancher/yip/pkg/plugins/git.go
deleted file mode 100644
index c53ebc7e811..00000000000
--- a/vendor/github.com/rancher/yip/pkg/plugins/git.go
+++ /dev/null
@@ -1,152 +0,0 @@
-// Copyright 2021 Ettore Di Giacinto
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package plugins
-
-import (
- "fmt"
- "path/filepath"
-
- git "github.com/go-git/go-git/v5"
- "github.com/go-git/go-git/v5/plumbing"
- "github.com/go-git/go-git/v5/plumbing/transport"
- gith "github.com/go-git/go-git/v5/plumbing/transport/http"
- ssh2 "github.com/go-git/go-git/v5/plumbing/transport/ssh"
- "github.com/twpayne/go-vfs/v4"
- "golang.org/x/crypto/ssh"
-
- "github.com/rancher/yip/pkg/logger"
- "github.com/rancher/yip/pkg/schema"
- "github.com/rancher/yip/pkg/utils"
-)
-
-func Git(l logger.Interface, s schema.Stage, fs vfs.FS, console Console) error {
- if s.Git.URL == "" {
- return nil
- }
-
- branch := "master"
- if s.Git.Branch != "" {
- branch = s.Git.Branch
- }
-
- gitconfig := s.Git
- path, err := fs.RawPath(s.Git.Path)
- if err != nil {
- return err
- }
- l.Infof("Cloning git repository '%s'", s.Git.URL)
-
- if utils.Exists(filepath.Join(path, ".git")) {
- l.Info("Repository already exists, updating it")
- // is a git repo, update it
- // We instantiate a new repository targeting the given path (the .git folder)
- r, err := git.PlainOpen(path)
- if err != nil {
- return err
- }
-
- w, err := r.Worktree()
- if err != nil {
- return err
- }
-
- err = w.Pull(&git.PullOptions{
- Auth: authMethod(s),
- SingleBranch: s.Git.BranchOnly,
- Force: true,
- InsecureSkipTLS: s.Git.Auth.Insecure,
- })
- if err != nil && err != git.NoErrAlreadyUpToDate {
- return err
- }
-
- err = w.Reset(&git.ResetOptions{
- Commit: plumbing.NewHash(branch),
- Mode: git.HardReset,
- })
-
- if err != nil {
- return err
- }
- return nil
-
- }
-
- opts := &git.CloneOptions{
- URL: gitconfig.URL,
- SingleBranch: s.Git.BranchOnly,
- }
-
- applyOptions(s, opts)
-
- _, err = git.PlainClone(path, false, opts)
- if err != nil {
- return fmt.Errorf("Failed cloning repo: %s", err.Error())
- }
- return nil
-}
-
-func authMethod(s schema.Stage) transport.AuthMethod {
- var t transport.AuthMethod
-
- if s.Git.Auth.Username != "" {
- t = &gith.BasicAuth{Username: s.Git.Auth.Username, Password: s.Git.Auth.Password}
- }
-
- if s.Git.Auth.PrivateKey != "" {
- signer, err := ssh.ParsePrivateKey([]byte(s.Git.Auth.PrivateKey))
- if err != nil {
- return t
- }
-
- userName := "git"
- if s.Git.Auth.Username != "" {
- userName = s.Git.Auth.Username
- }
- sshAuth := &ssh2.PublicKeys{
- User: userName,
- Signer: signer,
- }
- if s.Git.Auth.Insecure {
- sshAuth.HostKeyCallbackHelper = ssh2.HostKeyCallbackHelper{
- HostKeyCallback: ssh.InsecureIgnoreHostKey(),
- }
- }
- if s.Git.Auth.PublicKey != "" {
- key, err := ssh.ParsePublicKey([]byte(s.Git.Auth.PublicKey))
- if err != nil {
- return t
- }
- sshAuth.HostKeyCallbackHelper = ssh2.HostKeyCallbackHelper{
- HostKeyCallback: ssh.FixedHostKey(key),
- }
- }
-
- t = sshAuth
- }
- return t
-}
-
-func applyOptions(s schema.Stage, g *git.CloneOptions) {
-
- g.Auth = authMethod(s)
-
- if s.Git.Branch != "" {
- g.ReferenceName = plumbing.NewBranchReferenceName(s.Git.Branch)
- }
- if s.Git.BranchOnly {
- g.SingleBranch = true
- }
-}
diff --git a/vendor/github.com/sergi/go-diff/AUTHORS b/vendor/github.com/sergi/go-diff/AUTHORS
deleted file mode 100644
index 2d7bb2bf572..00000000000
--- a/vendor/github.com/sergi/go-diff/AUTHORS
+++ /dev/null
@@ -1,25 +0,0 @@
-# This is the official list of go-diff authors for copyright purposes.
-# This file is distinct from the CONTRIBUTORS files.
-# See the latter for an explanation.
-
-# Names should be added to this file as
-# Name or Organization
-# The email address is not required for organizations.
-
-# Please keep the list sorted.
-
-Danny Yoo
-James Kolb
-Jonathan Amsterdam
-Markus Zimmermann
-Matt Kovars
-Örjan Persson
-Osman Masood
-Robert Carlsen
-Rory Flynn
-Sergi Mansilla
-Shatrugna Sadhu
-Shawn Smith
-Stas Maksimov
-Tor Arvid Lund
-Zac Bergquist
diff --git a/vendor/github.com/sergi/go-diff/CONTRIBUTORS b/vendor/github.com/sergi/go-diff/CONTRIBUTORS
deleted file mode 100644
index 369e3d55190..00000000000
--- a/vendor/github.com/sergi/go-diff/CONTRIBUTORS
+++ /dev/null
@@ -1,32 +0,0 @@
-# This is the official list of people who can contribute
-# (and typically have contributed) code to the go-diff
-# repository.
-#
-# The AUTHORS file lists the copyright holders; this file
-# lists people. For example, ACME Inc. employees would be listed here
-# but not in AUTHORS, because ACME Inc. would hold the copyright.
-#
-# When adding J Random Contributor's name to this file,
-# either J's name or J's organization's name should be
-# added to the AUTHORS file.
-#
-# Names should be added to this file like so:
-# Name
-#
-# Please keep the list sorted.
-
-Danny Yoo
-James Kolb
-Jonathan Amsterdam
-Markus Zimmermann
-Matt Kovars
-Örjan Persson
-Osman Masood
-Robert Carlsen
-Rory Flynn
-Sergi Mansilla
-Shatrugna Sadhu
-Shawn Smith
-Stas Maksimov
-Tor Arvid Lund
-Zac Bergquist
diff --git a/vendor/github.com/sergi/go-diff/LICENSE b/vendor/github.com/sergi/go-diff/LICENSE
deleted file mode 100644
index 937942c2b2c..00000000000
--- a/vendor/github.com/sergi/go-diff/LICENSE
+++ /dev/null
@@ -1,20 +0,0 @@
-Copyright (c) 2012-2016 The go-diff Authors. All rights reserved.
-
-Permission is hereby granted, free of charge, to any person obtaining a
-copy of this software and associated documentation files (the "Software"),
-to deal in the Software without restriction, including without limitation
-the rights to use, copy, modify, merge, publish, distribute, sublicense,
-and/or sell copies of the Software, and to permit persons to whom the
-Software is furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included
-in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
-OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
-DEALINGS IN THE SOFTWARE.
-
diff --git a/vendor/github.com/sergi/go-diff/diffmatchpatch/diff.go b/vendor/github.com/sergi/go-diff/diffmatchpatch/diff.go
deleted file mode 100644
index 915d5090dde..00000000000
--- a/vendor/github.com/sergi/go-diff/diffmatchpatch/diff.go
+++ /dev/null
@@ -1,1347 +0,0 @@
-// Copyright (c) 2012-2016 The go-diff authors. All rights reserved.
-// https://github.com/sergi/go-diff
-// See the included LICENSE file for license details.
-//
-// go-diff is a Go implementation of Google's Diff, Match, and Patch library
-// Original library is Copyright (c) 2006 Google Inc.
-// http://code.google.com/p/google-diff-match-patch/
-
-package diffmatchpatch
-
-import (
- "bytes"
- "errors"
- "fmt"
- "html"
- "math"
- "net/url"
- "regexp"
- "strconv"
- "strings"
- "time"
- "unicode/utf8"
-)
-
-// Operation defines the operation of a diff item.
-type Operation int8
-
-//go:generate stringer -type=Operation -trimprefix=Diff
-
-const (
- // DiffDelete item represents a delete diff.
- DiffDelete Operation = -1
- // DiffInsert item represents an insert diff.
- DiffInsert Operation = 1
- // DiffEqual item represents an equal diff.
- DiffEqual Operation = 0
-)
-
-// Diff represents one diff operation
-type Diff struct {
- Type Operation
- Text string
-}
-
-// splice removes amount elements from slice at index index, replacing them with elements.
-func splice(slice []Diff, index int, amount int, elements ...Diff) []Diff {
- if len(elements) == amount {
- // Easy case: overwrite the relevant items.
- copy(slice[index:], elements)
- return slice
- }
- if len(elements) < amount {
- // Fewer new items than old.
- // Copy in the new items.
- copy(slice[index:], elements)
- // Shift the remaining items left.
- copy(slice[index+len(elements):], slice[index+amount:])
- // Calculate the new end of the slice.
- end := len(slice) - amount + len(elements)
- // Zero stranded elements at end so that they can be garbage collected.
- tail := slice[end:]
- for i := range tail {
- tail[i] = Diff{}
- }
- return slice[:end]
- }
- // More new items than old.
- // Make room in slice for new elements.
- // There's probably an even more efficient way to do this,
- // but this is simple and clear.
- need := len(slice) - amount + len(elements)
- for len(slice) < need {
- slice = append(slice, Diff{})
- }
- // Shift slice elements right to make room for new elements.
- copy(slice[index+len(elements):], slice[index+amount:])
- // Copy in new elements.
- copy(slice[index:], elements)
- return slice
-}
-
-// DiffMain finds the differences between two texts.
-// If an invalid UTF-8 sequence is encountered, it will be replaced by the Unicode replacement character.
-func (dmp *DiffMatchPatch) DiffMain(text1, text2 string, checklines bool) []Diff {
- return dmp.DiffMainRunes([]rune(text1), []rune(text2), checklines)
-}
-
-// DiffMainRunes finds the differences between two rune sequences.
-// If an invalid UTF-8 sequence is encountered, it will be replaced by the Unicode replacement character.
-func (dmp *DiffMatchPatch) DiffMainRunes(text1, text2 []rune, checklines bool) []Diff {
- var deadline time.Time
- if dmp.DiffTimeout > 0 {
- deadline = time.Now().Add(dmp.DiffTimeout)
- }
- return dmp.diffMainRunes(text1, text2, checklines, deadline)
-}
-
-func (dmp *DiffMatchPatch) diffMainRunes(text1, text2 []rune, checklines bool, deadline time.Time) []Diff {
- if runesEqual(text1, text2) {
- var diffs []Diff
- if len(text1) > 0 {
- diffs = append(diffs, Diff{DiffEqual, string(text1)})
- }
- return diffs
- }
- // Trim off common prefix (speedup).
- commonlength := commonPrefixLength(text1, text2)
- commonprefix := text1[:commonlength]
- text1 = text1[commonlength:]
- text2 = text2[commonlength:]
-
- // Trim off common suffix (speedup).
- commonlength = commonSuffixLength(text1, text2)
- commonsuffix := text1[len(text1)-commonlength:]
- text1 = text1[:len(text1)-commonlength]
- text2 = text2[:len(text2)-commonlength]
-
- // Compute the diff on the middle block.
- diffs := dmp.diffCompute(text1, text2, checklines, deadline)
-
- // Restore the prefix and suffix.
- if len(commonprefix) != 0 {
- diffs = append([]Diff{{DiffEqual, string(commonprefix)}}, diffs...)
- }
- if len(commonsuffix) != 0 {
- diffs = append(diffs, Diff{DiffEqual, string(commonsuffix)})
- }
-
- return dmp.DiffCleanupMerge(diffs)
-}
-
-// diffCompute finds the differences between two rune slices. Assumes that the texts do not have any common prefix or suffix.
-func (dmp *DiffMatchPatch) diffCompute(text1, text2 []rune, checklines bool, deadline time.Time) []Diff {
- diffs := []Diff{}
- if len(text1) == 0 {
- // Just add some text (speedup).
- return append(diffs, Diff{DiffInsert, string(text2)})
- } else if len(text2) == 0 {
- // Just delete some text (speedup).
- return append(diffs, Diff{DiffDelete, string(text1)})
- }
-
- var longtext, shorttext []rune
- if len(text1) > len(text2) {
- longtext = text1
- shorttext = text2
- } else {
- longtext = text2
- shorttext = text1
- }
-
- if i := runesIndex(longtext, shorttext); i != -1 {
- op := DiffInsert
- // Swap insertions for deletions if diff is reversed.
- if len(text1) > len(text2) {
- op = DiffDelete
- }
- // Shorter text is inside the longer text (speedup).
- return []Diff{
- Diff{op, string(longtext[:i])},
- Diff{DiffEqual, string(shorttext)},
- Diff{op, string(longtext[i+len(shorttext):])},
- }
- } else if len(shorttext) == 1 {
- // Single character string.
- // After the previous speedup, the character can't be an equality.
- return []Diff{
- {DiffDelete, string(text1)},
- {DiffInsert, string(text2)},
- }
- // Check to see if the problem can be split in two.
- } else if hm := dmp.diffHalfMatch(text1, text2); hm != nil {
- // A half-match was found, sort out the return data.
- text1A := hm[0]
- text1B := hm[1]
- text2A := hm[2]
- text2B := hm[3]
- midCommon := hm[4]
- // Send both pairs off for separate processing.
- diffsA := dmp.diffMainRunes(text1A, text2A, checklines, deadline)
- diffsB := dmp.diffMainRunes(text1B, text2B, checklines, deadline)
- // Merge the results.
- diffs := diffsA
- diffs = append(diffs, Diff{DiffEqual, string(midCommon)})
- diffs = append(diffs, diffsB...)
- return diffs
- } else if checklines && len(text1) > 100 && len(text2) > 100 {
- return dmp.diffLineMode(text1, text2, deadline)
- }
- return dmp.diffBisect(text1, text2, deadline)
-}
-
-// diffLineMode does a quick line-level diff on both []runes, then rediff the parts for greater accuracy. This speedup can produce non-minimal diffs.
-func (dmp *DiffMatchPatch) diffLineMode(text1, text2 []rune, deadline time.Time) []Diff {
- // Scan the text on a line-by-line basis first.
- text1, text2, linearray := dmp.DiffLinesToRunes(string(text1), string(text2))
-
- diffs := dmp.diffMainRunes(text1, text2, false, deadline)
-
- // Convert the diff back to original text.
- diffs = dmp.DiffCharsToLines(diffs, linearray)
- // Eliminate freak matches (e.g. blank lines)
- diffs = dmp.DiffCleanupSemantic(diffs)
-
- // Rediff any replacement blocks, this time character-by-character.
- // Add a dummy entry at the end.
- diffs = append(diffs, Diff{DiffEqual, ""})
-
- pointer := 0
- countDelete := 0
- countInsert := 0
-
- // NOTE: Rune slices are slower than using strings in this case.
- textDelete := ""
- textInsert := ""
-
- for pointer < len(diffs) {
- switch diffs[pointer].Type {
- case DiffInsert:
- countInsert++
- textInsert += diffs[pointer].Text
- case DiffDelete:
- countDelete++
- textDelete += diffs[pointer].Text
- case DiffEqual:
- // Upon reaching an equality, check for prior redundancies.
- if countDelete >= 1 && countInsert >= 1 {
- // Delete the offending records and add the merged ones.
- diffs = splice(diffs, pointer-countDelete-countInsert,
- countDelete+countInsert)
-
- pointer = pointer - countDelete - countInsert
- a := dmp.diffMainRunes([]rune(textDelete), []rune(textInsert), false, deadline)
- for j := len(a) - 1; j >= 0; j-- {
- diffs = splice(diffs, pointer, 0, a[j])
- }
- pointer = pointer + len(a)
- }
-
- countInsert = 0
- countDelete = 0
- textDelete = ""
- textInsert = ""
- }
- pointer++
- }
-
- return diffs[:len(diffs)-1] // Remove the dummy entry at the end.
-}
-
-// DiffBisect finds the 'middle snake' of a diff, split the problem in two and return the recursively constructed diff.
-// If an invalid UTF-8 sequence is encountered, it will be replaced by the Unicode replacement character.
-// See Myers 1986 paper: An O(ND) Difference Algorithm and Its Variations.
-func (dmp *DiffMatchPatch) DiffBisect(text1, text2 string, deadline time.Time) []Diff {
- // Unused in this code, but retained for interface compatibility.
- return dmp.diffBisect([]rune(text1), []rune(text2), deadline)
-}
-
-// diffBisect finds the 'middle snake' of a diff, splits the problem in two and returns the recursively constructed diff.
-// See Myers's 1986 paper: An O(ND) Difference Algorithm and Its Variations.
-func (dmp *DiffMatchPatch) diffBisect(runes1, runes2 []rune, deadline time.Time) []Diff {
- // Cache the text lengths to prevent multiple calls.
- runes1Len, runes2Len := len(runes1), len(runes2)
-
- maxD := (runes1Len + runes2Len + 1) / 2
- vOffset := maxD
- vLength := 2 * maxD
-
- v1 := make([]int, vLength)
- v2 := make([]int, vLength)
- for i := range v1 {
- v1[i] = -1
- v2[i] = -1
- }
- v1[vOffset+1] = 0
- v2[vOffset+1] = 0
-
- delta := runes1Len - runes2Len
- // If the total number of characters is odd, then the front path will collide with the reverse path.
- front := (delta%2 != 0)
- // Offsets for start and end of k loop. Prevents mapping of space beyond the grid.
- k1start := 0
- k1end := 0
- k2start := 0
- k2end := 0
- for d := 0; d < maxD; d++ {
- // Bail out if deadline is reached.
- if !deadline.IsZero() && d%16 == 0 && time.Now().After(deadline) {
- break
- }
-
- // Walk the front path one step.
- for k1 := -d + k1start; k1 <= d-k1end; k1 += 2 {
- k1Offset := vOffset + k1
- var x1 int
-
- if k1 == -d || (k1 != d && v1[k1Offset-1] < v1[k1Offset+1]) {
- x1 = v1[k1Offset+1]
- } else {
- x1 = v1[k1Offset-1] + 1
- }
-
- y1 := x1 - k1
- for x1 < runes1Len && y1 < runes2Len {
- if runes1[x1] != runes2[y1] {
- break
- }
- x1++
- y1++
- }
- v1[k1Offset] = x1
- if x1 > runes1Len {
- // Ran off the right of the graph.
- k1end += 2
- } else if y1 > runes2Len {
- // Ran off the bottom of the graph.
- k1start += 2
- } else if front {
- k2Offset := vOffset + delta - k1
- if k2Offset >= 0 && k2Offset < vLength && v2[k2Offset] != -1 {
- // Mirror x2 onto top-left coordinate system.
- x2 := runes1Len - v2[k2Offset]
- if x1 >= x2 {
- // Overlap detected.
- return dmp.diffBisectSplit(runes1, runes2, x1, y1, deadline)
- }
- }
- }
- }
- // Walk the reverse path one step.
- for k2 := -d + k2start; k2 <= d-k2end; k2 += 2 {
- k2Offset := vOffset + k2
- var x2 int
- if k2 == -d || (k2 != d && v2[k2Offset-1] < v2[k2Offset+1]) {
- x2 = v2[k2Offset+1]
- } else {
- x2 = v2[k2Offset-1] + 1
- }
- var y2 = x2 - k2
- for x2 < runes1Len && y2 < runes2Len {
- if runes1[runes1Len-x2-1] != runes2[runes2Len-y2-1] {
- break
- }
- x2++
- y2++
- }
- v2[k2Offset] = x2
- if x2 > runes1Len {
- // Ran off the left of the graph.
- k2end += 2
- } else if y2 > runes2Len {
- // Ran off the top of the graph.
- k2start += 2
- } else if !front {
- k1Offset := vOffset + delta - k2
- if k1Offset >= 0 && k1Offset < vLength && v1[k1Offset] != -1 {
- x1 := v1[k1Offset]
- y1 := vOffset + x1 - k1Offset
- // Mirror x2 onto top-left coordinate system.
- x2 = runes1Len - x2
- if x1 >= x2 {
- // Overlap detected.
- return dmp.diffBisectSplit(runes1, runes2, x1, y1, deadline)
- }
- }
- }
- }
- }
- // Diff took too long and hit the deadline or number of diffs equals number of characters, no commonality at all.
- return []Diff{
- {DiffDelete, string(runes1)},
- {DiffInsert, string(runes2)},
- }
-}
-
-func (dmp *DiffMatchPatch) diffBisectSplit(runes1, runes2 []rune, x, y int,
- deadline time.Time) []Diff {
- runes1a := runes1[:x]
- runes2a := runes2[:y]
- runes1b := runes1[x:]
- runes2b := runes2[y:]
-
- // Compute both diffs serially.
- diffs := dmp.diffMainRunes(runes1a, runes2a, false, deadline)
- diffsb := dmp.diffMainRunes(runes1b, runes2b, false, deadline)
-
- return append(diffs, diffsb...)
-}
-
-// DiffLinesToChars splits two texts into a list of strings, and educes the texts to a string of hashes where each Unicode character represents one line.
-// It's slightly faster to call DiffLinesToRunes first, followed by DiffMainRunes.
-func (dmp *DiffMatchPatch) DiffLinesToChars(text1, text2 string) (string, string, []string) {
- chars1, chars2, lineArray := dmp.diffLinesToStrings(text1, text2)
- return chars1, chars2, lineArray
-}
-
-// DiffLinesToRunes splits two texts into a list of runes.
-func (dmp *DiffMatchPatch) DiffLinesToRunes(text1, text2 string) ([]rune, []rune, []string) {
- chars1, chars2, lineArray := dmp.diffLinesToStrings(text1, text2)
- return []rune(chars1), []rune(chars2), lineArray
-}
-
-// DiffCharsToLines rehydrates the text in a diff from a string of line hashes to real lines of text.
-func (dmp *DiffMatchPatch) DiffCharsToLines(diffs []Diff, lineArray []string) []Diff {
- hydrated := make([]Diff, 0, len(diffs))
- for _, aDiff := range diffs {
- runes := []rune(aDiff.Text)
- text := make([]string, len(runes))
-
- for i, r := range runes {
- text[i] = lineArray[runeToInt(r)]
- }
-
- aDiff.Text = strings.Join(text, "")
- hydrated = append(hydrated, aDiff)
- }
- return hydrated
-}
-
-// DiffCommonPrefix determines the common prefix length of two strings.
-func (dmp *DiffMatchPatch) DiffCommonPrefix(text1, text2 string) int {
- // Unused in this code, but retained for interface compatibility.
- return commonPrefixLength([]rune(text1), []rune(text2))
-}
-
-// DiffCommonSuffix determines the common suffix length of two strings.
-func (dmp *DiffMatchPatch) DiffCommonSuffix(text1, text2 string) int {
- // Unused in this code, but retained for interface compatibility.
- return commonSuffixLength([]rune(text1), []rune(text2))
-}
-
-// commonPrefixLength returns the length of the common prefix of two rune slices.
-func commonPrefixLength(text1, text2 []rune) int {
- // Linear search. See comment in commonSuffixLength.
- n := 0
- for ; n < len(text1) && n < len(text2); n++ {
- if text1[n] != text2[n] {
- return n
- }
- }
- return n
-}
-
-// commonSuffixLength returns the length of the common suffix of two rune slices.
-func commonSuffixLength(text1, text2 []rune) int {
- // Use linear search rather than the binary search discussed at https://neil.fraser.name/news/2007/10/09/.
- // See discussion at https://github.com/sergi/go-diff/issues/54.
- i1 := len(text1)
- i2 := len(text2)
- for n := 0; ; n++ {
- i1--
- i2--
- if i1 < 0 || i2 < 0 || text1[i1] != text2[i2] {
- return n
- }
- }
-}
-
-// DiffCommonOverlap determines if the suffix of one string is the prefix of another.
-func (dmp *DiffMatchPatch) DiffCommonOverlap(text1 string, text2 string) int {
- // Cache the text lengths to prevent multiple calls.
- text1Length := len(text1)
- text2Length := len(text2)
- // Eliminate the null case.
- if text1Length == 0 || text2Length == 0 {
- return 0
- }
- // Truncate the longer string.
- if text1Length > text2Length {
- text1 = text1[text1Length-text2Length:]
- } else if text1Length < text2Length {
- text2 = text2[0:text1Length]
- }
- textLength := int(math.Min(float64(text1Length), float64(text2Length)))
- // Quick check for the worst case.
- if text1 == text2 {
- return textLength
- }
-
- // Start by looking for a single character match and increase length until no match is found. Performance analysis: http://neil.fraser.name/news/2010/11/04/
- best := 0
- length := 1
- for {
- pattern := text1[textLength-length:]
- found := strings.Index(text2, pattern)
- if found == -1 {
- break
- }
- length += found
- if found == 0 || text1[textLength-length:] == text2[0:length] {
- best = length
- length++
- }
- }
-
- return best
-}
-
-// DiffHalfMatch checks whether the two texts share a substring which is at least half the length of the longer text. This speedup can produce non-minimal diffs.
-func (dmp *DiffMatchPatch) DiffHalfMatch(text1, text2 string) []string {
- // Unused in this code, but retained for interface compatibility.
- runeSlices := dmp.diffHalfMatch([]rune(text1), []rune(text2))
- if runeSlices == nil {
- return nil
- }
-
- result := make([]string, len(runeSlices))
- for i, r := range runeSlices {
- result[i] = string(r)
- }
- return result
-}
-
-func (dmp *DiffMatchPatch) diffHalfMatch(text1, text2 []rune) [][]rune {
- if dmp.DiffTimeout <= 0 {
- // Don't risk returning a non-optimal diff if we have unlimited time.
- return nil
- }
-
- var longtext, shorttext []rune
- if len(text1) > len(text2) {
- longtext = text1
- shorttext = text2
- } else {
- longtext = text2
- shorttext = text1
- }
-
- if len(longtext) < 4 || len(shorttext)*2 < len(longtext) {
- return nil // Pointless.
- }
-
- // First check if the second quarter is the seed for a half-match.
- hm1 := dmp.diffHalfMatchI(longtext, shorttext, int(float64(len(longtext)+3)/4))
-
- // Check again based on the third quarter.
- hm2 := dmp.diffHalfMatchI(longtext, shorttext, int(float64(len(longtext)+1)/2))
-
- hm := [][]rune{}
- if hm1 == nil && hm2 == nil {
- return nil
- } else if hm2 == nil {
- hm = hm1
- } else if hm1 == nil {
- hm = hm2
- } else {
- // Both matched. Select the longest.
- if len(hm1[4]) > len(hm2[4]) {
- hm = hm1
- } else {
- hm = hm2
- }
- }
-
- // A half-match was found, sort out the return data.
- if len(text1) > len(text2) {
- return hm
- }
-
- return [][]rune{hm[2], hm[3], hm[0], hm[1], hm[4]}
-}
-
-// diffHalfMatchI checks if a substring of shorttext exist within longtext such that the substring is at least half the length of longtext?
-// Returns a slice containing the prefix of longtext, the suffix of longtext, the prefix of shorttext, the suffix of shorttext and the common middle, or null if there was no match.
-func (dmp *DiffMatchPatch) diffHalfMatchI(l, s []rune, i int) [][]rune {
- var bestCommonA []rune
- var bestCommonB []rune
- var bestCommonLen int
- var bestLongtextA []rune
- var bestLongtextB []rune
- var bestShorttextA []rune
- var bestShorttextB []rune
-
- // Start with a 1/4 length substring at position i as a seed.
- seed := l[i : i+len(l)/4]
-
- for j := runesIndexOf(s, seed, 0); j != -1; j = runesIndexOf(s, seed, j+1) {
- prefixLength := commonPrefixLength(l[i:], s[j:])
- suffixLength := commonSuffixLength(l[:i], s[:j])
-
- if bestCommonLen < suffixLength+prefixLength {
- bestCommonA = s[j-suffixLength : j]
- bestCommonB = s[j : j+prefixLength]
- bestCommonLen = len(bestCommonA) + len(bestCommonB)
- bestLongtextA = l[:i-suffixLength]
- bestLongtextB = l[i+prefixLength:]
- bestShorttextA = s[:j-suffixLength]
- bestShorttextB = s[j+prefixLength:]
- }
- }
-
- if bestCommonLen*2 < len(l) {
- return nil
- }
-
- return [][]rune{
- bestLongtextA,
- bestLongtextB,
- bestShorttextA,
- bestShorttextB,
- append(bestCommonA, bestCommonB...),
- }
-}
-
-// DiffCleanupSemantic reduces the number of edits by eliminating semantically trivial equalities.
-func (dmp *DiffMatchPatch) DiffCleanupSemantic(diffs []Diff) []Diff {
- changes := false
- // Stack of indices where equalities are found.
- equalities := make([]int, 0, len(diffs))
-
- var lastequality string
- // Always equal to diffs[equalities[equalitiesLength - 1]][1]
- var pointer int // Index of current position.
- // Number of characters that changed prior to the equality.
- var lengthInsertions1, lengthDeletions1 int
- // Number of characters that changed after the equality.
- var lengthInsertions2, lengthDeletions2 int
-
- for pointer < len(diffs) {
- if diffs[pointer].Type == DiffEqual {
- // Equality found.
- equalities = append(equalities, pointer)
- lengthInsertions1 = lengthInsertions2
- lengthDeletions1 = lengthDeletions2
- lengthInsertions2 = 0
- lengthDeletions2 = 0
- lastequality = diffs[pointer].Text
- } else {
- // An insertion or deletion.
-
- if diffs[pointer].Type == DiffInsert {
- lengthInsertions2 += utf8.RuneCountInString(diffs[pointer].Text)
- } else {
- lengthDeletions2 += utf8.RuneCountInString(diffs[pointer].Text)
- }
- // Eliminate an equality that is smaller or equal to the edits on both sides of it.
- difference1 := int(math.Max(float64(lengthInsertions1), float64(lengthDeletions1)))
- difference2 := int(math.Max(float64(lengthInsertions2), float64(lengthDeletions2)))
- if utf8.RuneCountInString(lastequality) > 0 &&
- (utf8.RuneCountInString(lastequality) <= difference1) &&
- (utf8.RuneCountInString(lastequality) <= difference2) {
- // Duplicate record.
- insPoint := equalities[len(equalities)-1]
- diffs = splice(diffs, insPoint, 0, Diff{DiffDelete, lastequality})
-
- // Change second copy to insert.
- diffs[insPoint+1].Type = DiffInsert
- // Throw away the equality we just deleted.
- equalities = equalities[:len(equalities)-1]
-
- if len(equalities) > 0 {
- equalities = equalities[:len(equalities)-1]
- }
- pointer = -1
- if len(equalities) > 0 {
- pointer = equalities[len(equalities)-1]
- }
-
- lengthInsertions1 = 0 // Reset the counters.
- lengthDeletions1 = 0
- lengthInsertions2 = 0
- lengthDeletions2 = 0
- lastequality = ""
- changes = true
- }
- }
- pointer++
- }
-
- // Normalize the diff.
- if changes {
- diffs = dmp.DiffCleanupMerge(diffs)
- }
- diffs = dmp.DiffCleanupSemanticLossless(diffs)
- // Find any overlaps between deletions and insertions.
- // e.g: abcxxxxxxdef
- // -> abcxxxdef
- // e.g: xxxabcdefxxx
- // -> defxxxabc
- // Only extract an overlap if it is as big as the edit ahead or behind it.
- pointer = 1
- for pointer < len(diffs) {
- if diffs[pointer-1].Type == DiffDelete &&
- diffs[pointer].Type == DiffInsert {
- deletion := diffs[pointer-1].Text
- insertion := diffs[pointer].Text
- overlapLength1 := dmp.DiffCommonOverlap(deletion, insertion)
- overlapLength2 := dmp.DiffCommonOverlap(insertion, deletion)
- if overlapLength1 >= overlapLength2 {
- if float64(overlapLength1) >= float64(utf8.RuneCountInString(deletion))/2 ||
- float64(overlapLength1) >= float64(utf8.RuneCountInString(insertion))/2 {
-
- // Overlap found. Insert an equality and trim the surrounding edits.
- diffs = splice(diffs, pointer, 0, Diff{DiffEqual, insertion[:overlapLength1]})
- diffs[pointer-1].Text =
- deletion[0 : len(deletion)-overlapLength1]
- diffs[pointer+1].Text = insertion[overlapLength1:]
- pointer++
- }
- } else {
- if float64(overlapLength2) >= float64(utf8.RuneCountInString(deletion))/2 ||
- float64(overlapLength2) >= float64(utf8.RuneCountInString(insertion))/2 {
- // Reverse overlap found. Insert an equality and swap and trim the surrounding edits.
- overlap := Diff{DiffEqual, deletion[:overlapLength2]}
- diffs = splice(diffs, pointer, 0, overlap)
- diffs[pointer-1].Type = DiffInsert
- diffs[pointer-1].Text = insertion[0 : len(insertion)-overlapLength2]
- diffs[pointer+1].Type = DiffDelete
- diffs[pointer+1].Text = deletion[overlapLength2:]
- pointer++
- }
- }
- pointer++
- }
- pointer++
- }
-
- return diffs
-}
-
-// Define some regex patterns for matching boundaries.
-var (
- nonAlphaNumericRegex = regexp.MustCompile(`[^a-zA-Z0-9]`)
- whitespaceRegex = regexp.MustCompile(`\s`)
- linebreakRegex = regexp.MustCompile(`[\r\n]`)
- blanklineEndRegex = regexp.MustCompile(`\n\r?\n$`)
- blanklineStartRegex = regexp.MustCompile(`^\r?\n\r?\n`)
-)
-
-// diffCleanupSemanticScore computes a score representing whether the internal boundary falls on logical boundaries.
-// Scores range from 6 (best) to 0 (worst). Closure, but does not reference any external variables.
-func diffCleanupSemanticScore(one, two string) int {
- if len(one) == 0 || len(two) == 0 {
- // Edges are the best.
- return 6
- }
-
- // Each port of this function behaves slightly differently due to subtle differences in each language's definition of things like 'whitespace'. Since this function's purpose is largely cosmetic, the choice has been made to use each language's native features rather than force total conformity.
- rune1, _ := utf8.DecodeLastRuneInString(one)
- rune2, _ := utf8.DecodeRuneInString(two)
- char1 := string(rune1)
- char2 := string(rune2)
-
- nonAlphaNumeric1 := nonAlphaNumericRegex.MatchString(char1)
- nonAlphaNumeric2 := nonAlphaNumericRegex.MatchString(char2)
- whitespace1 := nonAlphaNumeric1 && whitespaceRegex.MatchString(char1)
- whitespace2 := nonAlphaNumeric2 && whitespaceRegex.MatchString(char2)
- lineBreak1 := whitespace1 && linebreakRegex.MatchString(char1)
- lineBreak2 := whitespace2 && linebreakRegex.MatchString(char2)
- blankLine1 := lineBreak1 && blanklineEndRegex.MatchString(one)
- blankLine2 := lineBreak2 && blanklineEndRegex.MatchString(two)
-
- if blankLine1 || blankLine2 {
- // Five points for blank lines.
- return 5
- } else if lineBreak1 || lineBreak2 {
- // Four points for line breaks.
- return 4
- } else if nonAlphaNumeric1 && !whitespace1 && whitespace2 {
- // Three points for end of sentences.
- return 3
- } else if whitespace1 || whitespace2 {
- // Two points for whitespace.
- return 2
- } else if nonAlphaNumeric1 || nonAlphaNumeric2 {
- // One point for non-alphanumeric.
- return 1
- }
- return 0
-}
-
-// DiffCleanupSemanticLossless looks for single edits surrounded on both sides by equalities which can be shifted sideways to align the edit to a word boundary.
-// E.g: The cat came. -> The cat came.
-func (dmp *DiffMatchPatch) DiffCleanupSemanticLossless(diffs []Diff) []Diff {
- pointer := 1
-
- // Intentionally ignore the first and last element (don't need checking).
- for pointer < len(diffs)-1 {
- if diffs[pointer-1].Type == DiffEqual &&
- diffs[pointer+1].Type == DiffEqual {
-
- // This is a single edit surrounded by equalities.
- equality1 := diffs[pointer-1].Text
- edit := diffs[pointer].Text
- equality2 := diffs[pointer+1].Text
-
- // First, shift the edit as far left as possible.
- commonOffset := dmp.DiffCommonSuffix(equality1, edit)
- if commonOffset > 0 {
- commonString := edit[len(edit)-commonOffset:]
- equality1 = equality1[0 : len(equality1)-commonOffset]
- edit = commonString + edit[:len(edit)-commonOffset]
- equality2 = commonString + equality2
- }
-
- // Second, step character by character right, looking for the best fit.
- bestEquality1 := equality1
- bestEdit := edit
- bestEquality2 := equality2
- bestScore := diffCleanupSemanticScore(equality1, edit) +
- diffCleanupSemanticScore(edit, equality2)
-
- for len(edit) != 0 && len(equality2) != 0 {
- _, sz := utf8.DecodeRuneInString(edit)
- if len(equality2) < sz || edit[:sz] != equality2[:sz] {
- break
- }
- equality1 += edit[:sz]
- edit = edit[sz:] + equality2[:sz]
- equality2 = equality2[sz:]
- score := diffCleanupSemanticScore(equality1, edit) +
- diffCleanupSemanticScore(edit, equality2)
- // The >= encourages trailing rather than leading whitespace on edits.
- if score >= bestScore {
- bestScore = score
- bestEquality1 = equality1
- bestEdit = edit
- bestEquality2 = equality2
- }
- }
-
- if diffs[pointer-1].Text != bestEquality1 {
- // We have an improvement, save it back to the diff.
- if len(bestEquality1) != 0 {
- diffs[pointer-1].Text = bestEquality1
- } else {
- diffs = splice(diffs, pointer-1, 1)
- pointer--
- }
-
- diffs[pointer].Text = bestEdit
- if len(bestEquality2) != 0 {
- diffs[pointer+1].Text = bestEquality2
- } else {
- diffs = append(diffs[:pointer+1], diffs[pointer+2:]...)
- pointer--
- }
- }
- }
- pointer++
- }
-
- return diffs
-}
-
-// DiffCleanupEfficiency reduces the number of edits by eliminating operationally trivial equalities.
-func (dmp *DiffMatchPatch) DiffCleanupEfficiency(diffs []Diff) []Diff {
- changes := false
- // Stack of indices where equalities are found.
- type equality struct {
- data int
- next *equality
- }
- var equalities *equality
- // Always equal to equalities[equalitiesLength-1][1]
- lastequality := ""
- pointer := 0 // Index of current position.
- // Is there an insertion operation before the last equality.
- preIns := false
- // Is there a deletion operation before the last equality.
- preDel := false
- // Is there an insertion operation after the last equality.
- postIns := false
- // Is there a deletion operation after the last equality.
- postDel := false
- for pointer < len(diffs) {
- if diffs[pointer].Type == DiffEqual { // Equality found.
- if len(diffs[pointer].Text) < dmp.DiffEditCost &&
- (postIns || postDel) {
- // Candidate found.
- equalities = &equality{
- data: pointer,
- next: equalities,
- }
- preIns = postIns
- preDel = postDel
- lastequality = diffs[pointer].Text
- } else {
- // Not a candidate, and can never become one.
- equalities = nil
- lastequality = ""
- }
- postIns = false
- postDel = false
- } else { // An insertion or deletion.
- if diffs[pointer].Type == DiffDelete {
- postDel = true
- } else {
- postIns = true
- }
-
- // Five types to be split:
- // ABXYCD
- // AXCD
- // ABXC
- // AXCD
- // ABXC
- var sumPres int
- if preIns {
- sumPres++
- }
- if preDel {
- sumPres++
- }
- if postIns {
- sumPres++
- }
- if postDel {
- sumPres++
- }
- if len(lastequality) > 0 &&
- ((preIns && preDel && postIns && postDel) ||
- ((len(lastequality) < dmp.DiffEditCost/2) && sumPres == 3)) {
-
- insPoint := equalities.data
-
- // Duplicate record.
- diffs = splice(diffs, insPoint, 0, Diff{DiffDelete, lastequality})
-
- // Change second copy to insert.
- diffs[insPoint+1].Type = DiffInsert
- // Throw away the equality we just deleted.
- equalities = equalities.next
- lastequality = ""
-
- if preIns && preDel {
- // No changes made which could affect previous entry, keep going.
- postIns = true
- postDel = true
- equalities = nil
- } else {
- if equalities != nil {
- equalities = equalities.next
- }
- if equalities != nil {
- pointer = equalities.data
- } else {
- pointer = -1
- }
- postIns = false
- postDel = false
- }
- changes = true
- }
- }
- pointer++
- }
-
- if changes {
- diffs = dmp.DiffCleanupMerge(diffs)
- }
-
- return diffs
-}
-
-// DiffCleanupMerge reorders and merges like edit sections. Merge equalities.
-// Any edit section can move as long as it doesn't cross an equality.
-func (dmp *DiffMatchPatch) DiffCleanupMerge(diffs []Diff) []Diff {
- // Add a dummy entry at the end.
- diffs = append(diffs, Diff{DiffEqual, ""})
- pointer := 0
- countDelete := 0
- countInsert := 0
- commonlength := 0
- textDelete := []rune(nil)
- textInsert := []rune(nil)
-
- for pointer < len(diffs) {
- switch diffs[pointer].Type {
- case DiffInsert:
- countInsert++
- textInsert = append(textInsert, []rune(diffs[pointer].Text)...)
- pointer++
- break
- case DiffDelete:
- countDelete++
- textDelete = append(textDelete, []rune(diffs[pointer].Text)...)
- pointer++
- break
- case DiffEqual:
- // Upon reaching an equality, check for prior redundancies.
- if countDelete+countInsert > 1 {
- if countDelete != 0 && countInsert != 0 {
- // Factor out any common prefixies.
- commonlength = commonPrefixLength(textInsert, textDelete)
- if commonlength != 0 {
- x := pointer - countDelete - countInsert
- if x > 0 && diffs[x-1].Type == DiffEqual {
- diffs[x-1].Text += string(textInsert[:commonlength])
- } else {
- diffs = append([]Diff{{DiffEqual, string(textInsert[:commonlength])}}, diffs...)
- pointer++
- }
- textInsert = textInsert[commonlength:]
- textDelete = textDelete[commonlength:]
- }
- // Factor out any common suffixies.
- commonlength = commonSuffixLength(textInsert, textDelete)
- if commonlength != 0 {
- insertIndex := len(textInsert) - commonlength
- deleteIndex := len(textDelete) - commonlength
- diffs[pointer].Text = string(textInsert[insertIndex:]) + diffs[pointer].Text
- textInsert = textInsert[:insertIndex]
- textDelete = textDelete[:deleteIndex]
- }
- }
- // Delete the offending records and add the merged ones.
- if countDelete == 0 {
- diffs = splice(diffs, pointer-countInsert,
- countDelete+countInsert,
- Diff{DiffInsert, string(textInsert)})
- } else if countInsert == 0 {
- diffs = splice(diffs, pointer-countDelete,
- countDelete+countInsert,
- Diff{DiffDelete, string(textDelete)})
- } else {
- diffs = splice(diffs, pointer-countDelete-countInsert,
- countDelete+countInsert,
- Diff{DiffDelete, string(textDelete)},
- Diff{DiffInsert, string(textInsert)})
- }
-
- pointer = pointer - countDelete - countInsert + 1
- if countDelete != 0 {
- pointer++
- }
- if countInsert != 0 {
- pointer++
- }
- } else if pointer != 0 && diffs[pointer-1].Type == DiffEqual {
- // Merge this equality with the previous one.
- diffs[pointer-1].Text += diffs[pointer].Text
- diffs = append(diffs[:pointer], diffs[pointer+1:]...)
- } else {
- pointer++
- }
- countInsert = 0
- countDelete = 0
- textDelete = nil
- textInsert = nil
- break
- }
- }
-
- if len(diffs[len(diffs)-1].Text) == 0 {
- diffs = diffs[0 : len(diffs)-1] // Remove the dummy entry at the end.
- }
-
- // Second pass: look for single edits surrounded on both sides by equalities which can be shifted sideways to eliminate an equality. E.g: ABAC -> ABAC
- changes := false
- pointer = 1
- // Intentionally ignore the first and last element (don't need checking).
- for pointer < (len(diffs) - 1) {
- if diffs[pointer-1].Type == DiffEqual &&
- diffs[pointer+1].Type == DiffEqual {
- // This is a single edit surrounded by equalities.
- if strings.HasSuffix(diffs[pointer].Text, diffs[pointer-1].Text) {
- // Shift the edit over the previous equality.
- diffs[pointer].Text = diffs[pointer-1].Text +
- diffs[pointer].Text[:len(diffs[pointer].Text)-len(diffs[pointer-1].Text)]
- diffs[pointer+1].Text = diffs[pointer-1].Text + diffs[pointer+1].Text
- diffs = splice(diffs, pointer-1, 1)
- changes = true
- } else if strings.HasPrefix(diffs[pointer].Text, diffs[pointer+1].Text) {
- // Shift the edit over the next equality.
- diffs[pointer-1].Text += diffs[pointer+1].Text
- diffs[pointer].Text =
- diffs[pointer].Text[len(diffs[pointer+1].Text):] + diffs[pointer+1].Text
- diffs = splice(diffs, pointer+1, 1)
- changes = true
- }
- }
- pointer++
- }
-
- // If shifts were made, the diff needs reordering and another shift sweep.
- if changes {
- diffs = dmp.DiffCleanupMerge(diffs)
- }
-
- return diffs
-}
-
-// DiffXIndex returns the equivalent location in s2.
-func (dmp *DiffMatchPatch) DiffXIndex(diffs []Diff, loc int) int {
- chars1 := 0
- chars2 := 0
- lastChars1 := 0
- lastChars2 := 0
- lastDiff := Diff{}
- for i := 0; i < len(diffs); i++ {
- aDiff := diffs[i]
- if aDiff.Type != DiffInsert {
- // Equality or deletion.
- chars1 += len(aDiff.Text)
- }
- if aDiff.Type != DiffDelete {
- // Equality or insertion.
- chars2 += len(aDiff.Text)
- }
- if chars1 > loc {
- // Overshot the location.
- lastDiff = aDiff
- break
- }
- lastChars1 = chars1
- lastChars2 = chars2
- }
- if lastDiff.Type == DiffDelete {
- // The location was deleted.
- return lastChars2
- }
- // Add the remaining character length.
- return lastChars2 + (loc - lastChars1)
-}
-
-// DiffPrettyHtml converts a []Diff into a pretty HTML report.
-// It is intended as an example from which to write one's own display functions.
-func (dmp *DiffMatchPatch) DiffPrettyHtml(diffs []Diff) string {
- var buff bytes.Buffer
- for _, diff := range diffs {
- text := strings.Replace(html.EscapeString(diff.Text), "\n", "¶
", -1)
- switch diff.Type {
- case DiffInsert:
- _, _ = buff.WriteString("")
- _, _ = buff.WriteString(text)
- _, _ = buff.WriteString("")
- case DiffDelete:
- _, _ = buff.WriteString("")
- _, _ = buff.WriteString(text)
- _, _ = buff.WriteString("")
- case DiffEqual:
- _, _ = buff.WriteString("")
- _, _ = buff.WriteString(text)
- _, _ = buff.WriteString("")
- }
- }
- return buff.String()
-}
-
-// DiffPrettyText converts a []Diff into a colored text report.
-func (dmp *DiffMatchPatch) DiffPrettyText(diffs []Diff) string {
- var buff bytes.Buffer
- for _, diff := range diffs {
- text := diff.Text
-
- switch diff.Type {
- case DiffInsert:
- _, _ = buff.WriteString("\x1b[32m")
- _, _ = buff.WriteString(text)
- _, _ = buff.WriteString("\x1b[0m")
- case DiffDelete:
- _, _ = buff.WriteString("\x1b[31m")
- _, _ = buff.WriteString(text)
- _, _ = buff.WriteString("\x1b[0m")
- case DiffEqual:
- _, _ = buff.WriteString(text)
- }
- }
-
- return buff.String()
-}
-
-// DiffText1 computes and returns the source text (all equalities and deletions).
-func (dmp *DiffMatchPatch) DiffText1(diffs []Diff) string {
- //StringBuilder text = new StringBuilder()
- var text bytes.Buffer
-
- for _, aDiff := range diffs {
- if aDiff.Type != DiffInsert {
- _, _ = text.WriteString(aDiff.Text)
- }
- }
- return text.String()
-}
-
-// DiffText2 computes and returns the destination text (all equalities and insertions).
-func (dmp *DiffMatchPatch) DiffText2(diffs []Diff) string {
- var text bytes.Buffer
-
- for _, aDiff := range diffs {
- if aDiff.Type != DiffDelete {
- _, _ = text.WriteString(aDiff.Text)
- }
- }
- return text.String()
-}
-
-// DiffLevenshtein computes the Levenshtein distance that is the number of inserted, deleted or substituted characters.
-func (dmp *DiffMatchPatch) DiffLevenshtein(diffs []Diff) int {
- levenshtein := 0
- insertions := 0
- deletions := 0
-
- for _, aDiff := range diffs {
- switch aDiff.Type {
- case DiffInsert:
- insertions += utf8.RuneCountInString(aDiff.Text)
- case DiffDelete:
- deletions += utf8.RuneCountInString(aDiff.Text)
- case DiffEqual:
- // A deletion and an insertion is one substitution.
- levenshtein += max(insertions, deletions)
- insertions = 0
- deletions = 0
- }
- }
-
- levenshtein += max(insertions, deletions)
- return levenshtein
-}
-
-// DiffToDelta crushes the diff into an encoded string which describes the operations required to transform text1 into text2.
-// E.g. =3\t-2\t+ing -> Keep 3 chars, delete 2 chars, insert 'ing'. Operations are tab-separated. Inserted text is escaped using %xx notation.
-func (dmp *DiffMatchPatch) DiffToDelta(diffs []Diff) string {
- var text bytes.Buffer
- for _, aDiff := range diffs {
- switch aDiff.Type {
- case DiffInsert:
- _, _ = text.WriteString("+")
- _, _ = text.WriteString(strings.Replace(url.QueryEscape(aDiff.Text), "+", " ", -1))
- _, _ = text.WriteString("\t")
- break
- case DiffDelete:
- _, _ = text.WriteString("-")
- _, _ = text.WriteString(strconv.Itoa(utf8.RuneCountInString(aDiff.Text)))
- _, _ = text.WriteString("\t")
- break
- case DiffEqual:
- _, _ = text.WriteString("=")
- _, _ = text.WriteString(strconv.Itoa(utf8.RuneCountInString(aDiff.Text)))
- _, _ = text.WriteString("\t")
- break
- }
- }
- delta := text.String()
- if len(delta) != 0 {
- // Strip off trailing tab character.
- delta = delta[0 : utf8.RuneCountInString(delta)-1]
- delta = unescaper.Replace(delta)
- }
- return delta
-}
-
-// DiffFromDelta given the original text1, and an encoded string which describes the operations required to transform text1 into text2, comAdde the full diff.
-func (dmp *DiffMatchPatch) DiffFromDelta(text1 string, delta string) (diffs []Diff, err error) {
- i := 0
- runes := []rune(text1)
-
- for _, token := range strings.Split(delta, "\t") {
- if len(token) == 0 {
- // Blank tokens are ok (from a trailing \t).
- continue
- }
-
- // Each token begins with a one character parameter which specifies the operation of this token (delete, insert, equality).
- param := token[1:]
-
- switch op := token[0]; op {
- case '+':
- // Decode would Diff all "+" to " "
- param = strings.Replace(param, "+", "%2b", -1)
- param, err = url.QueryUnescape(param)
- if err != nil {
- return nil, err
- }
- if !utf8.ValidString(param) {
- return nil, fmt.Errorf("invalid UTF-8 token: %q", param)
- }
-
- diffs = append(diffs, Diff{DiffInsert, param})
- case '=', '-':
- n, err := strconv.ParseInt(param, 10, 0)
- if err != nil {
- return nil, err
- } else if n < 0 {
- return nil, errors.New("Negative number in DiffFromDelta: " + param)
- }
-
- i += int(n)
- // Break out if we are out of bounds, go1.6 can't handle this very well
- if i > len(runes) {
- break
- }
- // Remember that string slicing is by byte - we want by rune here.
- text := string(runes[i-int(n) : i])
-
- if op == '=' {
- diffs = append(diffs, Diff{DiffEqual, text})
- } else {
- diffs = append(diffs, Diff{DiffDelete, text})
- }
- default:
- // Anything else is an error.
- return nil, errors.New("Invalid diff operation in DiffFromDelta: " + string(token[0]))
- }
- }
-
- if i != len(runes) {
- return nil, fmt.Errorf("Delta length (%v) is different from source text length (%v)", i, len(text1))
- }
-
- return diffs, nil
-}
-
-// diffLinesToStrings splits two texts into a list of strings. Each string represents one line.
-func (dmp *DiffMatchPatch) diffLinesToStrings(text1, text2 string) (string, string, []string) {
- // '\x00' is a valid character, but various debuggers don't like it. So we'll insert a junk entry to avoid generating a null character.
- lineArray := []string{""} // e.g. lineArray[4] == 'Hello\n'
-
- lineHash := make(map[string]int)
- //Each string has the index of lineArray which it points to
- strIndexArray1 := dmp.diffLinesToStringsMunge(text1, &lineArray, lineHash)
- strIndexArray2 := dmp.diffLinesToStringsMunge(text2, &lineArray, lineHash)
-
- return intArrayToString(strIndexArray1), intArrayToString(strIndexArray2), lineArray
-}
-
-// diffLinesToStringsMunge splits a text into an array of strings, and reduces the texts to a []string.
-func (dmp *DiffMatchPatch) diffLinesToStringsMunge(text string, lineArray *[]string, lineHash map[string]int) []uint32 {
- // Walk the text, pulling out a substring for each line. text.split('\n') would would temporarily double our memory footprint. Modifying text would create many large strings to garbage collect.
- lineStart := 0
- lineEnd := -1
- strs := []uint32{}
-
- for lineEnd < len(text)-1 {
- lineEnd = indexOf(text, "\n", lineStart)
-
- if lineEnd == -1 {
- lineEnd = len(text) - 1
- }
-
- line := text[lineStart : lineEnd+1]
- lineStart = lineEnd + 1
- lineValue, ok := lineHash[line]
-
- if ok {
- strs = append(strs, uint32(lineValue))
- } else {
- *lineArray = append(*lineArray, line)
- lineHash[line] = len(*lineArray) - 1
- strs = append(strs, uint32(len(*lineArray)-1))
- }
- }
-
- return strs
-}
diff --git a/vendor/github.com/sergi/go-diff/diffmatchpatch/diffmatchpatch.go b/vendor/github.com/sergi/go-diff/diffmatchpatch/diffmatchpatch.go
deleted file mode 100644
index d3acc32ce13..00000000000
--- a/vendor/github.com/sergi/go-diff/diffmatchpatch/diffmatchpatch.go
+++ /dev/null
@@ -1,46 +0,0 @@
-// Copyright (c) 2012-2016 The go-diff authors. All rights reserved.
-// https://github.com/sergi/go-diff
-// See the included LICENSE file for license details.
-//
-// go-diff is a Go implementation of Google's Diff, Match, and Patch library
-// Original library is Copyright (c) 2006 Google Inc.
-// http://code.google.com/p/google-diff-match-patch/
-
-// Package diffmatchpatch offers robust algorithms to perform the operations required for synchronizing plain text.
-package diffmatchpatch
-
-import (
- "time"
-)
-
-// DiffMatchPatch holds the configuration for diff-match-patch operations.
-type DiffMatchPatch struct {
- // Number of seconds to map a diff before giving up (0 for infinity).
- DiffTimeout time.Duration
- // Cost of an empty edit operation in terms of edit characters.
- DiffEditCost int
- // How far to search for a match (0 = exact location, 1000+ = broad match). A match this many characters away from the expected location will add 1.0 to the score (0.0 is a perfect match).
- MatchDistance int
- // When deleting a large block of text (over ~64 characters), how close do the contents have to be to match the expected contents. (0.0 = perfection, 1.0 = very loose). Note that MatchThreshold controls how closely the end points of a delete need to match.
- PatchDeleteThreshold float64
- // Chunk size for context length.
- PatchMargin int
- // The number of bits in an int.
- MatchMaxBits int
- // At what point is no match declared (0.0 = perfection, 1.0 = very loose).
- MatchThreshold float64
-}
-
-// New creates a new DiffMatchPatch object with default parameters.
-func New() *DiffMatchPatch {
- // Defaults.
- return &DiffMatchPatch{
- DiffTimeout: time.Second,
- DiffEditCost: 4,
- MatchThreshold: 0.5,
- MatchDistance: 1000,
- PatchDeleteThreshold: 0.5,
- PatchMargin: 4,
- MatchMaxBits: 32,
- }
-}
diff --git a/vendor/github.com/sergi/go-diff/diffmatchpatch/match.go b/vendor/github.com/sergi/go-diff/diffmatchpatch/match.go
deleted file mode 100644
index 17374e109fe..00000000000
--- a/vendor/github.com/sergi/go-diff/diffmatchpatch/match.go
+++ /dev/null
@@ -1,160 +0,0 @@
-// Copyright (c) 2012-2016 The go-diff authors. All rights reserved.
-// https://github.com/sergi/go-diff
-// See the included LICENSE file for license details.
-//
-// go-diff is a Go implementation of Google's Diff, Match, and Patch library
-// Original library is Copyright (c) 2006 Google Inc.
-// http://code.google.com/p/google-diff-match-patch/
-
-package diffmatchpatch
-
-import (
- "math"
-)
-
-// MatchMain locates the best instance of 'pattern' in 'text' near 'loc'.
-// Returns -1 if no match found.
-func (dmp *DiffMatchPatch) MatchMain(text, pattern string, loc int) int {
- // Check for null inputs not needed since null can't be passed in C#.
-
- loc = int(math.Max(0, math.Min(float64(loc), float64(len(text)))))
- if text == pattern {
- // Shortcut (potentially not guaranteed by the algorithm)
- return 0
- } else if len(text) == 0 {
- // Nothing to match.
- return -1
- } else if loc+len(pattern) <= len(text) && text[loc:loc+len(pattern)] == pattern {
- // Perfect match at the perfect spot! (Includes case of null pattern)
- return loc
- }
- // Do a fuzzy compare.
- return dmp.MatchBitap(text, pattern, loc)
-}
-
-// MatchBitap locates the best instance of 'pattern' in 'text' near 'loc' using the Bitap algorithm.
-// Returns -1 if no match was found.
-func (dmp *DiffMatchPatch) MatchBitap(text, pattern string, loc int) int {
- // Initialise the alphabet.
- s := dmp.MatchAlphabet(pattern)
-
- // Highest score beyond which we give up.
- scoreThreshold := dmp.MatchThreshold
- // Is there a nearby exact match? (speedup)
- bestLoc := indexOf(text, pattern, loc)
- if bestLoc != -1 {
- scoreThreshold = math.Min(dmp.matchBitapScore(0, bestLoc, loc,
- pattern), scoreThreshold)
- // What about in the other direction? (speedup)
- bestLoc = lastIndexOf(text, pattern, loc+len(pattern))
- if bestLoc != -1 {
- scoreThreshold = math.Min(dmp.matchBitapScore(0, bestLoc, loc,
- pattern), scoreThreshold)
- }
- }
-
- // Initialise the bit arrays.
- matchmask := 1 << uint((len(pattern) - 1))
- bestLoc = -1
-
- var binMin, binMid int
- binMax := len(pattern) + len(text)
- lastRd := []int{}
- for d := 0; d < len(pattern); d++ {
- // Scan for the best match; each iteration allows for one more error. Run a binary search to determine how far from 'loc' we can stray at this error level.
- binMin = 0
- binMid = binMax
- for binMin < binMid {
- if dmp.matchBitapScore(d, loc+binMid, loc, pattern) <= scoreThreshold {
- binMin = binMid
- } else {
- binMax = binMid
- }
- binMid = (binMax-binMin)/2 + binMin
- }
- // Use the result from this iteration as the maximum for the next.
- binMax = binMid
- start := int(math.Max(1, float64(loc-binMid+1)))
- finish := int(math.Min(float64(loc+binMid), float64(len(text))) + float64(len(pattern)))
-
- rd := make([]int, finish+2)
- rd[finish+1] = (1 << uint(d)) - 1
-
- for j := finish; j >= start; j-- {
- var charMatch int
- if len(text) <= j-1 {
- // Out of range.
- charMatch = 0
- } else if _, ok := s[text[j-1]]; !ok {
- charMatch = 0
- } else {
- charMatch = s[text[j-1]]
- }
-
- if d == 0 {
- // First pass: exact match.
- rd[j] = ((rd[j+1] << 1) | 1) & charMatch
- } else {
- // Subsequent passes: fuzzy match.
- rd[j] = ((rd[j+1]<<1)|1)&charMatch | (((lastRd[j+1] | lastRd[j]) << 1) | 1) | lastRd[j+1]
- }
- if (rd[j] & matchmask) != 0 {
- score := dmp.matchBitapScore(d, j-1, loc, pattern)
- // This match will almost certainly be better than any existing match. But check anyway.
- if score <= scoreThreshold {
- // Told you so.
- scoreThreshold = score
- bestLoc = j - 1
- if bestLoc > loc {
- // When passing loc, don't exceed our current distance from loc.
- start = int(math.Max(1, float64(2*loc-bestLoc)))
- } else {
- // Already passed loc, downhill from here on in.
- break
- }
- }
- }
- }
- if dmp.matchBitapScore(d+1, loc, loc, pattern) > scoreThreshold {
- // No hope for a (better) match at greater error levels.
- break
- }
- lastRd = rd
- }
- return bestLoc
-}
-
-// matchBitapScore computes and returns the score for a match with e errors and x location.
-func (dmp *DiffMatchPatch) matchBitapScore(e, x, loc int, pattern string) float64 {
- accuracy := float64(e) / float64(len(pattern))
- proximity := math.Abs(float64(loc - x))
- if dmp.MatchDistance == 0 {
- // Dodge divide by zero error.
- if proximity == 0 {
- return accuracy
- }
-
- return 1.0
- }
- return accuracy + (proximity / float64(dmp.MatchDistance))
-}
-
-// MatchAlphabet initialises the alphabet for the Bitap algorithm.
-func (dmp *DiffMatchPatch) MatchAlphabet(pattern string) map[byte]int {
- s := map[byte]int{}
- charPattern := []byte(pattern)
- for _, c := range charPattern {
- _, ok := s[c]
- if !ok {
- s[c] = 0
- }
- }
- i := 0
-
- for _, c := range charPattern {
- value := s[c] | int(uint(1)< y {
- return x
- }
- return y
-}
diff --git a/vendor/github.com/sergi/go-diff/diffmatchpatch/operation_string.go b/vendor/github.com/sergi/go-diff/diffmatchpatch/operation_string.go
deleted file mode 100644
index 533ec0da7b3..00000000000
--- a/vendor/github.com/sergi/go-diff/diffmatchpatch/operation_string.go
+++ /dev/null
@@ -1,17 +0,0 @@
-// Code generated by "stringer -type=Operation -trimprefix=Diff"; DO NOT EDIT.
-
-package diffmatchpatch
-
-import "fmt"
-
-const _Operation_name = "DeleteEqualInsert"
-
-var _Operation_index = [...]uint8{0, 6, 11, 17}
-
-func (i Operation) String() string {
- i -= -1
- if i < 0 || i >= Operation(len(_Operation_index)-1) {
- return fmt.Sprintf("Operation(%d)", i+-1)
- }
- return _Operation_name[_Operation_index[i]:_Operation_index[i+1]]
-}
diff --git a/vendor/github.com/sergi/go-diff/diffmatchpatch/patch.go b/vendor/github.com/sergi/go-diff/diffmatchpatch/patch.go
deleted file mode 100644
index 0dbe3bdd7de..00000000000
--- a/vendor/github.com/sergi/go-diff/diffmatchpatch/patch.go
+++ /dev/null
@@ -1,556 +0,0 @@
-// Copyright (c) 2012-2016 The go-diff authors. All rights reserved.
-// https://github.com/sergi/go-diff
-// See the included LICENSE file for license details.
-//
-// go-diff is a Go implementation of Google's Diff, Match, and Patch library
-// Original library is Copyright (c) 2006 Google Inc.
-// http://code.google.com/p/google-diff-match-patch/
-
-package diffmatchpatch
-
-import (
- "bytes"
- "errors"
- "math"
- "net/url"
- "regexp"
- "strconv"
- "strings"
-)
-
-// Patch represents one patch operation.
-type Patch struct {
- diffs []Diff
- Start1 int
- Start2 int
- Length1 int
- Length2 int
-}
-
-// String emulates GNU diff's format.
-// Header: @@ -382,8 +481,9 @@
-// Indices are printed as 1-based, not 0-based.
-func (p *Patch) String() string {
- var coords1, coords2 string
-
- if p.Length1 == 0 {
- coords1 = strconv.Itoa(p.Start1) + ",0"
- } else if p.Length1 == 1 {
- coords1 = strconv.Itoa(p.Start1 + 1)
- } else {
- coords1 = strconv.Itoa(p.Start1+1) + "," + strconv.Itoa(p.Length1)
- }
-
- if p.Length2 == 0 {
- coords2 = strconv.Itoa(p.Start2) + ",0"
- } else if p.Length2 == 1 {
- coords2 = strconv.Itoa(p.Start2 + 1)
- } else {
- coords2 = strconv.Itoa(p.Start2+1) + "," + strconv.Itoa(p.Length2)
- }
-
- var text bytes.Buffer
- _, _ = text.WriteString("@@ -" + coords1 + " +" + coords2 + " @@\n")
-
- // Escape the body of the patch with %xx notation.
- for _, aDiff := range p.diffs {
- switch aDiff.Type {
- case DiffInsert:
- _, _ = text.WriteString("+")
- case DiffDelete:
- _, _ = text.WriteString("-")
- case DiffEqual:
- _, _ = text.WriteString(" ")
- }
-
- _, _ = text.WriteString(strings.Replace(url.QueryEscape(aDiff.Text), "+", " ", -1))
- _, _ = text.WriteString("\n")
- }
-
- return unescaper.Replace(text.String())
-}
-
-// PatchAddContext increases the context until it is unique, but doesn't let the pattern expand beyond MatchMaxBits.
-func (dmp *DiffMatchPatch) PatchAddContext(patch Patch, text string) Patch {
- if len(text) == 0 {
- return patch
- }
-
- pattern := text[patch.Start2 : patch.Start2+patch.Length1]
- padding := 0
-
- // Look for the first and last matches of pattern in text. If two different matches are found, increase the pattern length.
- for strings.Index(text, pattern) != strings.LastIndex(text, pattern) &&
- len(pattern) < dmp.MatchMaxBits-2*dmp.PatchMargin {
- padding += dmp.PatchMargin
- maxStart := max(0, patch.Start2-padding)
- minEnd := min(len(text), patch.Start2+patch.Length1+padding)
- pattern = text[maxStart:minEnd]
- }
- // Add one chunk for good luck.
- padding += dmp.PatchMargin
-
- // Add the prefix.
- prefix := text[max(0, patch.Start2-padding):patch.Start2]
- if len(prefix) != 0 {
- patch.diffs = append([]Diff{Diff{DiffEqual, prefix}}, patch.diffs...)
- }
- // Add the suffix.
- suffix := text[patch.Start2+patch.Length1 : min(len(text), patch.Start2+patch.Length1+padding)]
- if len(suffix) != 0 {
- patch.diffs = append(patch.diffs, Diff{DiffEqual, suffix})
- }
-
- // Roll back the start points.
- patch.Start1 -= len(prefix)
- patch.Start2 -= len(prefix)
- // Extend the lengths.
- patch.Length1 += len(prefix) + len(suffix)
- patch.Length2 += len(prefix) + len(suffix)
-
- return patch
-}
-
-// PatchMake computes a list of patches.
-func (dmp *DiffMatchPatch) PatchMake(opt ...interface{}) []Patch {
- if len(opt) == 1 {
- diffs, _ := opt[0].([]Diff)
- text1 := dmp.DiffText1(diffs)
- return dmp.PatchMake(text1, diffs)
- } else if len(opt) == 2 {
- text1 := opt[0].(string)
- switch t := opt[1].(type) {
- case string:
- diffs := dmp.DiffMain(text1, t, true)
- if len(diffs) > 2 {
- diffs = dmp.DiffCleanupSemantic(diffs)
- diffs = dmp.DiffCleanupEfficiency(diffs)
- }
- return dmp.PatchMake(text1, diffs)
- case []Diff:
- return dmp.patchMake2(text1, t)
- }
- } else if len(opt) == 3 {
- return dmp.PatchMake(opt[0], opt[2])
- }
- return []Patch{}
-}
-
-// patchMake2 computes a list of patches to turn text1 into text2.
-// text2 is not provided, diffs are the delta between text1 and text2.
-func (dmp *DiffMatchPatch) patchMake2(text1 string, diffs []Diff) []Patch {
- // Check for null inputs not needed since null can't be passed in C#.
- patches := []Patch{}
- if len(diffs) == 0 {
- return patches // Get rid of the null case.
- }
-
- patch := Patch{}
- charCount1 := 0 // Number of characters into the text1 string.
- charCount2 := 0 // Number of characters into the text2 string.
- // Start with text1 (prepatchText) and apply the diffs until we arrive at text2 (postpatchText). We recreate the patches one by one to determine context info.
- prepatchText := text1
- postpatchText := text1
-
- for i, aDiff := range diffs {
- if len(patch.diffs) == 0 && aDiff.Type != DiffEqual {
- // A new patch starts here.
- patch.Start1 = charCount1
- patch.Start2 = charCount2
- }
-
- switch aDiff.Type {
- case DiffInsert:
- patch.diffs = append(patch.diffs, aDiff)
- patch.Length2 += len(aDiff.Text)
- postpatchText = postpatchText[:charCount2] +
- aDiff.Text + postpatchText[charCount2:]
- case DiffDelete:
- patch.Length1 += len(aDiff.Text)
- patch.diffs = append(patch.diffs, aDiff)
- postpatchText = postpatchText[:charCount2] + postpatchText[charCount2+len(aDiff.Text):]
- case DiffEqual:
- if len(aDiff.Text) <= 2*dmp.PatchMargin &&
- len(patch.diffs) != 0 && i != len(diffs)-1 {
- // Small equality inside a patch.
- patch.diffs = append(patch.diffs, aDiff)
- patch.Length1 += len(aDiff.Text)
- patch.Length2 += len(aDiff.Text)
- }
- if len(aDiff.Text) >= 2*dmp.PatchMargin {
- // Time for a new patch.
- if len(patch.diffs) != 0 {
- patch = dmp.PatchAddContext(patch, prepatchText)
- patches = append(patches, patch)
- patch = Patch{}
- // Unlike Unidiff, our patch lists have a rolling context. http://code.google.com/p/google-diff-match-patch/wiki/Unidiff Update prepatch text & pos to reflect the application of the just completed patch.
- prepatchText = postpatchText
- charCount1 = charCount2
- }
- }
- }
-
- // Update the current character count.
- if aDiff.Type != DiffInsert {
- charCount1 += len(aDiff.Text)
- }
- if aDiff.Type != DiffDelete {
- charCount2 += len(aDiff.Text)
- }
- }
-
- // Pick up the leftover patch if not empty.
- if len(patch.diffs) != 0 {
- patch = dmp.PatchAddContext(patch, prepatchText)
- patches = append(patches, patch)
- }
-
- return patches
-}
-
-// PatchDeepCopy returns an array that is identical to a given an array of patches.
-func (dmp *DiffMatchPatch) PatchDeepCopy(patches []Patch) []Patch {
- patchesCopy := []Patch{}
- for _, aPatch := range patches {
- patchCopy := Patch{}
- for _, aDiff := range aPatch.diffs {
- patchCopy.diffs = append(patchCopy.diffs, Diff{
- aDiff.Type,
- aDiff.Text,
- })
- }
- patchCopy.Start1 = aPatch.Start1
- patchCopy.Start2 = aPatch.Start2
- patchCopy.Length1 = aPatch.Length1
- patchCopy.Length2 = aPatch.Length2
- patchesCopy = append(patchesCopy, patchCopy)
- }
- return patchesCopy
-}
-
-// PatchApply merges a set of patches onto the text. Returns a patched text, as well as an array of true/false values indicating which patches were applied.
-func (dmp *DiffMatchPatch) PatchApply(patches []Patch, text string) (string, []bool) {
- if len(patches) == 0 {
- return text, []bool{}
- }
-
- // Deep copy the patches so that no changes are made to originals.
- patches = dmp.PatchDeepCopy(patches)
-
- nullPadding := dmp.PatchAddPadding(patches)
- text = nullPadding + text + nullPadding
- patches = dmp.PatchSplitMax(patches)
-
- x := 0
- // delta keeps track of the offset between the expected and actual location of the previous patch. If there are patches expected at positions 10 and 20, but the first patch was found at 12, delta is 2 and the second patch has an effective expected position of 22.
- delta := 0
- results := make([]bool, len(patches))
- for _, aPatch := range patches {
- expectedLoc := aPatch.Start2 + delta
- text1 := dmp.DiffText1(aPatch.diffs)
- var startLoc int
- endLoc := -1
- if len(text1) > dmp.MatchMaxBits {
- // PatchSplitMax will only provide an oversized pattern in the case of a monster delete.
- startLoc = dmp.MatchMain(text, text1[:dmp.MatchMaxBits], expectedLoc)
- if startLoc != -1 {
- endLoc = dmp.MatchMain(text,
- text1[len(text1)-dmp.MatchMaxBits:], expectedLoc+len(text1)-dmp.MatchMaxBits)
- if endLoc == -1 || startLoc >= endLoc {
- // Can't find valid trailing context. Drop this patch.
- startLoc = -1
- }
- }
- } else {
- startLoc = dmp.MatchMain(text, text1, expectedLoc)
- }
- if startLoc == -1 {
- // No match found. :(
- results[x] = false
- // Subtract the delta for this failed patch from subsequent patches.
- delta -= aPatch.Length2 - aPatch.Length1
- } else {
- // Found a match. :)
- results[x] = true
- delta = startLoc - expectedLoc
- var text2 string
- if endLoc == -1 {
- text2 = text[startLoc:int(math.Min(float64(startLoc+len(text1)), float64(len(text))))]
- } else {
- text2 = text[startLoc:int(math.Min(float64(endLoc+dmp.MatchMaxBits), float64(len(text))))]
- }
- if text1 == text2 {
- // Perfect match, just shove the Replacement text in.
- text = text[:startLoc] + dmp.DiffText2(aPatch.diffs) + text[startLoc+len(text1):]
- } else {
- // Imperfect match. Run a diff to get a framework of equivalent indices.
- diffs := dmp.DiffMain(text1, text2, false)
- if len(text1) > dmp.MatchMaxBits && float64(dmp.DiffLevenshtein(diffs))/float64(len(text1)) > dmp.PatchDeleteThreshold {
- // The end points match, but the content is unacceptably bad.
- results[x] = false
- } else {
- diffs = dmp.DiffCleanupSemanticLossless(diffs)
- index1 := 0
- for _, aDiff := range aPatch.diffs {
- if aDiff.Type != DiffEqual {
- index2 := dmp.DiffXIndex(diffs, index1)
- if aDiff.Type == DiffInsert {
- // Insertion
- text = text[:startLoc+index2] + aDiff.Text + text[startLoc+index2:]
- } else if aDiff.Type == DiffDelete {
- // Deletion
- startIndex := startLoc + index2
- text = text[:startIndex] +
- text[startIndex+dmp.DiffXIndex(diffs, index1+len(aDiff.Text))-index2:]
- }
- }
- if aDiff.Type != DiffDelete {
- index1 += len(aDiff.Text)
- }
- }
- }
- }
- }
- x++
- }
- // Strip the padding off.
- text = text[len(nullPadding) : len(nullPadding)+(len(text)-2*len(nullPadding))]
- return text, results
-}
-
-// PatchAddPadding adds some padding on text start and end so that edges can match something.
-// Intended to be called only from within patchApply.
-func (dmp *DiffMatchPatch) PatchAddPadding(patches []Patch) string {
- paddingLength := dmp.PatchMargin
- nullPadding := ""
- for x := 1; x <= paddingLength; x++ {
- nullPadding += string(rune(x))
- }
-
- // Bump all the patches forward.
- for i := range patches {
- patches[i].Start1 += paddingLength
- patches[i].Start2 += paddingLength
- }
-
- // Add some padding on start of first diff.
- if len(patches[0].diffs) == 0 || patches[0].diffs[0].Type != DiffEqual {
- // Add nullPadding equality.
- patches[0].diffs = append([]Diff{Diff{DiffEqual, nullPadding}}, patches[0].diffs...)
- patches[0].Start1 -= paddingLength // Should be 0.
- patches[0].Start2 -= paddingLength // Should be 0.
- patches[0].Length1 += paddingLength
- patches[0].Length2 += paddingLength
- } else if paddingLength > len(patches[0].diffs[0].Text) {
- // Grow first equality.
- extraLength := paddingLength - len(patches[0].diffs[0].Text)
- patches[0].diffs[0].Text = nullPadding[len(patches[0].diffs[0].Text):] + patches[0].diffs[0].Text
- patches[0].Start1 -= extraLength
- patches[0].Start2 -= extraLength
- patches[0].Length1 += extraLength
- patches[0].Length2 += extraLength
- }
-
- // Add some padding on end of last diff.
- last := len(patches) - 1
- if len(patches[last].diffs) == 0 || patches[last].diffs[len(patches[last].diffs)-1].Type != DiffEqual {
- // Add nullPadding equality.
- patches[last].diffs = append(patches[last].diffs, Diff{DiffEqual, nullPadding})
- patches[last].Length1 += paddingLength
- patches[last].Length2 += paddingLength
- } else if paddingLength > len(patches[last].diffs[len(patches[last].diffs)-1].Text) {
- // Grow last equality.
- lastDiff := patches[last].diffs[len(patches[last].diffs)-1]
- extraLength := paddingLength - len(lastDiff.Text)
- patches[last].diffs[len(patches[last].diffs)-1].Text += nullPadding[:extraLength]
- patches[last].Length1 += extraLength
- patches[last].Length2 += extraLength
- }
-
- return nullPadding
-}
-
-// PatchSplitMax looks through the patches and breaks up any which are longer than the maximum limit of the match algorithm.
-// Intended to be called only from within patchApply.
-func (dmp *DiffMatchPatch) PatchSplitMax(patches []Patch) []Patch {
- patchSize := dmp.MatchMaxBits
- for x := 0; x < len(patches); x++ {
- if patches[x].Length1 <= patchSize {
- continue
- }
- bigpatch := patches[x]
- // Remove the big old patch.
- patches = append(patches[:x], patches[x+1:]...)
- x--
-
- Start1 := bigpatch.Start1
- Start2 := bigpatch.Start2
- precontext := ""
- for len(bigpatch.diffs) != 0 {
- // Create one of several smaller patches.
- patch := Patch{}
- empty := true
- patch.Start1 = Start1 - len(precontext)
- patch.Start2 = Start2 - len(precontext)
- if len(precontext) != 0 {
- patch.Length1 = len(precontext)
- patch.Length2 = len(precontext)
- patch.diffs = append(patch.diffs, Diff{DiffEqual, precontext})
- }
- for len(bigpatch.diffs) != 0 && patch.Length1 < patchSize-dmp.PatchMargin {
- diffType := bigpatch.diffs[0].Type
- diffText := bigpatch.diffs[0].Text
- if diffType == DiffInsert {
- // Insertions are harmless.
- patch.Length2 += len(diffText)
- Start2 += len(diffText)
- patch.diffs = append(patch.diffs, bigpatch.diffs[0])
- bigpatch.diffs = bigpatch.diffs[1:]
- empty = false
- } else if diffType == DiffDelete && len(patch.diffs) == 1 && patch.diffs[0].Type == DiffEqual && len(diffText) > 2*patchSize {
- // This is a large deletion. Let it pass in one chunk.
- patch.Length1 += len(diffText)
- Start1 += len(diffText)
- empty = false
- patch.diffs = append(patch.diffs, Diff{diffType, diffText})
- bigpatch.diffs = bigpatch.diffs[1:]
- } else {
- // Deletion or equality. Only take as much as we can stomach.
- diffText = diffText[:min(len(diffText), patchSize-patch.Length1-dmp.PatchMargin)]
-
- patch.Length1 += len(diffText)
- Start1 += len(diffText)
- if diffType == DiffEqual {
- patch.Length2 += len(diffText)
- Start2 += len(diffText)
- } else {
- empty = false
- }
- patch.diffs = append(patch.diffs, Diff{diffType, diffText})
- if diffText == bigpatch.diffs[0].Text {
- bigpatch.diffs = bigpatch.diffs[1:]
- } else {
- bigpatch.diffs[0].Text =
- bigpatch.diffs[0].Text[len(diffText):]
- }
- }
- }
- // Compute the head context for the next patch.
- precontext = dmp.DiffText2(patch.diffs)
- precontext = precontext[max(0, len(precontext)-dmp.PatchMargin):]
-
- postcontext := ""
- // Append the end context for this patch.
- if len(dmp.DiffText1(bigpatch.diffs)) > dmp.PatchMargin {
- postcontext = dmp.DiffText1(bigpatch.diffs)[:dmp.PatchMargin]
- } else {
- postcontext = dmp.DiffText1(bigpatch.diffs)
- }
-
- if len(postcontext) != 0 {
- patch.Length1 += len(postcontext)
- patch.Length2 += len(postcontext)
- if len(patch.diffs) != 0 && patch.diffs[len(patch.diffs)-1].Type == DiffEqual {
- patch.diffs[len(patch.diffs)-1].Text += postcontext
- } else {
- patch.diffs = append(patch.diffs, Diff{DiffEqual, postcontext})
- }
- }
- if !empty {
- x++
- patches = append(patches[:x], append([]Patch{patch}, patches[x:]...)...)
- }
- }
- }
- return patches
-}
-
-// PatchToText takes a list of patches and returns a textual representation.
-func (dmp *DiffMatchPatch) PatchToText(patches []Patch) string {
- var text bytes.Buffer
- for _, aPatch := range patches {
- _, _ = text.WriteString(aPatch.String())
- }
- return text.String()
-}
-
-// PatchFromText parses a textual representation of patches and returns a List of Patch objects.
-func (dmp *DiffMatchPatch) PatchFromText(textline string) ([]Patch, error) {
- patches := []Patch{}
- if len(textline) == 0 {
- return patches, nil
- }
- text := strings.Split(textline, "\n")
- textPointer := 0
- patchHeader := regexp.MustCompile("^@@ -(\\d+),?(\\d*) \\+(\\d+),?(\\d*) @@$")
-
- var patch Patch
- var sign uint8
- var line string
- for textPointer < len(text) {
-
- if !patchHeader.MatchString(text[textPointer]) {
- return patches, errors.New("Invalid patch string: " + text[textPointer])
- }
-
- patch = Patch{}
- m := patchHeader.FindStringSubmatch(text[textPointer])
-
- patch.Start1, _ = strconv.Atoi(m[1])
- if len(m[2]) == 0 {
- patch.Start1--
- patch.Length1 = 1
- } else if m[2] == "0" {
- patch.Length1 = 0
- } else {
- patch.Start1--
- patch.Length1, _ = strconv.Atoi(m[2])
- }
-
- patch.Start2, _ = strconv.Atoi(m[3])
-
- if len(m[4]) == 0 {
- patch.Start2--
- patch.Length2 = 1
- } else if m[4] == "0" {
- patch.Length2 = 0
- } else {
- patch.Start2--
- patch.Length2, _ = strconv.Atoi(m[4])
- }
- textPointer++
-
- for textPointer < len(text) {
- if len(text[textPointer]) > 0 {
- sign = text[textPointer][0]
- } else {
- textPointer++
- continue
- }
-
- line = text[textPointer][1:]
- line = strings.Replace(line, "+", "%2b", -1)
- line, _ = url.QueryUnescape(line)
- if sign == '-' {
- // Deletion.
- patch.diffs = append(patch.diffs, Diff{DiffDelete, line})
- } else if sign == '+' {
- // Insertion.
- patch.diffs = append(patch.diffs, Diff{DiffInsert, line})
- } else if sign == ' ' {
- // Minor equality.
- patch.diffs = append(patch.diffs, Diff{DiffEqual, line})
- } else if sign == '@' {
- // Start of next patch.
- break
- } else {
- // WTF?
- return patches, errors.New("Invalid patch mode '" + string(sign) + "' in: " + string(line))
- }
- textPointer++
- }
-
- patches = append(patches, patch)
- }
- return patches, nil
-}
diff --git a/vendor/github.com/sergi/go-diff/diffmatchpatch/stringutil.go b/vendor/github.com/sergi/go-diff/diffmatchpatch/stringutil.go
deleted file mode 100644
index eb727bb5948..00000000000
--- a/vendor/github.com/sergi/go-diff/diffmatchpatch/stringutil.go
+++ /dev/null
@@ -1,190 +0,0 @@
-// Copyright (c) 2012-2016 The go-diff authors. All rights reserved.
-// https://github.com/sergi/go-diff
-// See the included LICENSE file for license details.
-//
-// go-diff is a Go implementation of Google's Diff, Match, and Patch library
-// Original library is Copyright (c) 2006 Google Inc.
-// http://code.google.com/p/google-diff-match-patch/
-
-package diffmatchpatch
-
-import (
- "fmt"
- "strings"
- "unicode/utf8"
-)
-
-const UNICODE_INVALID_RANGE_START = 0xD800
-const UNICODE_INVALID_RANGE_END = 0xDFFF
-const UNICODE_INVALID_RANGE_DELTA = UNICODE_INVALID_RANGE_END - UNICODE_INVALID_RANGE_START + 1
-const UNICODE_RANGE_MAX = 0x10FFFF
-
-// unescaper unescapes selected chars for compatibility with JavaScript's encodeURI.
-// In speed critical applications this could be dropped since the receiving application will certainly decode these fine. Note that this function is case-sensitive. Thus "%3F" would not be unescaped. But this is ok because it is only called with the output of HttpUtility.UrlEncode which returns lowercase hex. Example: "%3f" -> "?", "%24" -> "$", etc.
-var unescaper = strings.NewReplacer(
- "%21", "!", "%7E", "~", "%27", "'",
- "%28", "(", "%29", ")", "%3B", ";",
- "%2F", "/", "%3F", "?", "%3A", ":",
- "%40", "@", "%26", "&", "%3D", "=",
- "%2B", "+", "%24", "$", "%2C", ",", "%23", "#", "%2A", "*")
-
-// indexOf returns the first index of pattern in str, starting at str[i].
-func indexOf(str string, pattern string, i int) int {
- if i > len(str)-1 {
- return -1
- }
- if i <= 0 {
- return strings.Index(str, pattern)
- }
- ind := strings.Index(str[i:], pattern)
- if ind == -1 {
- return -1
- }
- return ind + i
-}
-
-// lastIndexOf returns the last index of pattern in str, starting at str[i].
-func lastIndexOf(str string, pattern string, i int) int {
- if i < 0 {
- return -1
- }
- if i >= len(str) {
- return strings.LastIndex(str, pattern)
- }
- _, size := utf8.DecodeRuneInString(str[i:])
- return strings.LastIndex(str[:i+size], pattern)
-}
-
-// runesIndexOf returns the index of pattern in target, starting at target[i].
-func runesIndexOf(target, pattern []rune, i int) int {
- if i > len(target)-1 {
- return -1
- }
- if i <= 0 {
- return runesIndex(target, pattern)
- }
- ind := runesIndex(target[i:], pattern)
- if ind == -1 {
- return -1
- }
- return ind + i
-}
-
-func runesEqual(r1, r2 []rune) bool {
- if len(r1) != len(r2) {
- return false
- }
- for i, c := range r1 {
- if c != r2[i] {
- return false
- }
- }
- return true
-}
-
-// runesIndex is the equivalent of strings.Index for rune slices.
-func runesIndex(r1, r2 []rune) int {
- last := len(r1) - len(r2)
- for i := 0; i <= last; i++ {
- if runesEqual(r1[i:i+len(r2)], r2) {
- return i
- }
- }
- return -1
-}
-
-func intArrayToString(ns []uint32) string {
- if len(ns) == 0 {
- return ""
- }
-
- b := []rune{}
- for _, n := range ns {
- b = append(b, intToRune(n))
- }
- return string(b)
-}
-
-// These constants define the number of bits representable
-// in 1,2,3,4 byte utf8 sequences, respectively.
-const ONE_BYTE_BITS = 7
-const TWO_BYTE_BITS = 11
-const THREE_BYTE_BITS = 16
-const FOUR_BYTE_BITS = 21
-
-// Helper for getting a sequence of bits from an integer.
-func getBits(i uint32, cnt byte, from byte) byte {
- return byte((i >> from) & ((1 << cnt) - 1))
-}
-
-// Converts an integer in the range 0~1112060 into a rune.
-// Based on the ranges table in https://en.wikipedia.org/wiki/UTF-8
-func intToRune(i uint32) rune {
- if i < (1 << ONE_BYTE_BITS) {
- return rune(i)
- }
-
- if i < (1 << TWO_BYTE_BITS) {
- r, size := utf8.DecodeRune([]byte{0b11000000 | getBits(i, 5, 6), 0b10000000 | getBits(i, 6, 0)})
- if size != 2 || r == utf8.RuneError {
- panic(fmt.Sprintf("Error encoding an int %d with size 2, got rune %v and size %d", size, r, i))
- }
- return r
- }
-
- // Last -3 here needed because for some reason 3rd to last codepoint 65533 in this range
- // was returning utf8.RuneError during encoding.
- if i < ((1 << THREE_BYTE_BITS) - UNICODE_INVALID_RANGE_DELTA - 3) {
- if i >= UNICODE_INVALID_RANGE_START {
- i += UNICODE_INVALID_RANGE_DELTA
- }
-
- r, size := utf8.DecodeRune([]byte{0b11100000 | getBits(i, 4, 12), 0b10000000 | getBits(i, 6, 6), 0b10000000 | getBits(i, 6, 0)})
- if size != 3 || r == utf8.RuneError {
- panic(fmt.Sprintf("Error encoding an int %d with size 3, got rune %v and size %d", size, r, i))
- }
- return r
- }
-
- if i < (1<= UNICODE_INVALID_RANGE_END {
- return result - UNICODE_INVALID_RANGE_DELTA
- }
-
- return result
- }
-
- if size == 4 {
- result := uint32(bytes[0]&0b111)<<18 | uint32(bytes[1]&0b111111)<<12 | uint32(bytes[2]&0b111111)<<6 | uint32(bytes[3]&0b111111)
- return result - UNICODE_INVALID_RANGE_DELTA - 3
- }
-
- panic(fmt.Sprintf("Unexpected state decoding rune=%v size=%d", r, size))
-}
diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_bsd.go b/vendor/github.com/sirupsen/logrus/terminal_check_bsd.go
index 499789984d2..69956b425a1 100644
--- a/vendor/github.com/sirupsen/logrus/terminal_check_bsd.go
+++ b/vendor/github.com/sirupsen/logrus/terminal_check_bsd.go
@@ -1,4 +1,4 @@
-// +build darwin dragonfly freebsd netbsd openbsd
+// +build darwin dragonfly freebsd netbsd openbsd hurd
// +build !js
package logrus
diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_unix.go b/vendor/github.com/sirupsen/logrus/terminal_check_unix.go
index 04748b8515f..c9aed267a4c 100644
--- a/vendor/github.com/sirupsen/logrus/terminal_check_unix.go
+++ b/vendor/github.com/sirupsen/logrus/terminal_check_unix.go
@@ -1,5 +1,7 @@
+//go:build (linux || aix || zos) && !js && !wasi
// +build linux aix zos
// +build !js
+// +build !wasi
package logrus
diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_wasi.go b/vendor/github.com/sirupsen/logrus/terminal_check_wasi.go
new file mode 100644
index 00000000000..2822b212fbf
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/terminal_check_wasi.go
@@ -0,0 +1,8 @@
+//go:build wasi
+// +build wasi
+
+package logrus
+
+func isTerminal(fd int) bool {
+ return false
+}
diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_wasip1.go b/vendor/github.com/sirupsen/logrus/terminal_check_wasip1.go
new file mode 100644
index 00000000000..108a6be12b1
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/terminal_check_wasip1.go
@@ -0,0 +1,8 @@
+//go:build wasip1
+// +build wasip1
+
+package logrus
+
+func isTerminal(fd int) bool {
+ return false
+}
diff --git a/vendor/github.com/skeema/knownhosts/LICENSE b/vendor/github.com/skeema/knownhosts/LICENSE
deleted file mode 100644
index 8dada3edaf5..00000000000
--- a/vendor/github.com/skeema/knownhosts/LICENSE
+++ /dev/null
@@ -1,201 +0,0 @@
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "{}"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright {yyyy} {name of copyright owner}
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/vendor/github.com/skeema/knownhosts/NOTICE b/vendor/github.com/skeema/knownhosts/NOTICE
deleted file mode 100644
index a92cb34d674..00000000000
--- a/vendor/github.com/skeema/knownhosts/NOTICE
+++ /dev/null
@@ -1,13 +0,0 @@
-Copyright 2024 Skeema LLC and the Skeema Knownhosts authors
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
diff --git a/vendor/github.com/skeema/knownhosts/README.md b/vendor/github.com/skeema/knownhosts/README.md
deleted file mode 100644
index 36b847614ca..00000000000
--- a/vendor/github.com/skeema/knownhosts/README.md
+++ /dev/null
@@ -1,117 +0,0 @@
-# knownhosts: enhanced Golang SSH known_hosts management
-
-[](https://github.com/skeema/knownhosts/actions)
-[](https://pkg.go.dev/github.com/skeema/knownhosts)
-
-
-> This repo is brought to you by [Skeema](https://github.com/skeema/skeema), a
-> declarative pure-SQL schema management system for MySQL and MariaDB. Our
-> premium products include extensive [SSH tunnel](https://www.skeema.io/docs/options/#ssh)
-> functionality, which internally makes use of this package.
-
-Go provides excellent functionality for OpenSSH known_hosts files in its
-external package [golang.org/x/crypto/ssh/knownhosts](https://pkg.go.dev/golang.org/x/crypto/ssh/knownhosts).
-However, that package is somewhat low-level, making it difficult to implement full known_hosts management similar to command-line `ssh`'s behavior for `StrictHostKeyChecking=no` configuration.
-
-This repo ([github.com/skeema/knownhosts](https://github.com/skeema/knownhosts)) is a thin wrapper package around [golang.org/x/crypto/ssh/knownhosts](https://pkg.go.dev/golang.org/x/crypto/ssh/knownhosts), adding the following functionality:
-
-* Look up known_hosts public keys for any given host
-* Auto-populate ssh.ClientConfig.HostKeyAlgorithms easily based on known_hosts, providing a solution for [golang/go#29286](https://github.com/golang/go/issues/29286)
-* Write new known_hosts entries to an io.Writer
-* Properly format/normalize new known_hosts entries containing ipv6 addresses, providing a solution for [golang/go#53463](https://github.com/golang/go/issues/53463)
-* Determine if an ssh.HostKeyCallback's error corresponds to a host whose key has changed (indicating potential MitM attack) vs a host that just isn't known yet
-
-## How host key lookup works
-
-Although [golang.org/x/crypto/ssh/knownhosts](https://pkg.go.dev/golang.org/x/crypto/ssh/knownhosts) doesn't directly expose a way to query its known_host map, we use a subtle trick to do so: invoke the HostKeyCallback with a valid host but a bogus key. The resulting KeyError allows us to determine which public keys are actually present for that host.
-
-By using this technique, [github.com/skeema/knownhosts](https://github.com/skeema/knownhosts) doesn't need to duplicate or re-implement any of the actual known_hosts management from [golang.org/x/crypto/ssh/knownhosts](https://pkg.go.dev/golang.org/x/crypto/ssh/knownhosts).
-
-## Populating ssh.ClientConfig.HostKeyAlgorithms based on known_hosts
-
-Hosts often have multiple public keys, each of a different type (algorithm). This can be [problematic](https://github.com/golang/go/issues/29286) in [golang.org/x/crypto/ssh/knownhosts](https://pkg.go.dev/golang.org/x/crypto/ssh/knownhosts): if a host's first public key is *not* in known_hosts, but a key of a different type *is*, the HostKeyCallback returns an error. The solution is to populate `ssh.ClientConfig.HostKeyAlgorithms` based on the algorithms of the known_hosts entries for that host, but
-[golang.org/x/crypto/ssh/knownhosts](https://pkg.go.dev/golang.org/x/crypto/ssh/knownhosts)
-does not provide an obvious way to do so.
-
-This package uses its host key lookup trick in order to make ssh.ClientConfig.HostKeyAlgorithms easy to populate:
-
-```golang
-import (
- "golang.org/x/crypto/ssh"
- "github.com/skeema/knownhosts"
-)
-
-func sshConfigForHost(hostWithPort string) (*ssh.ClientConfig, error) {
- kh, err := knownhosts.New("/home/myuser/.ssh/known_hosts")
- if err != nil {
- return nil, err
- }
- config := &ssh.ClientConfig{
- User: "myuser",
- Auth: []ssh.AuthMethod{ /* ... */ },
- HostKeyCallback: kh.HostKeyCallback(), // or, equivalently, use ssh.HostKeyCallback(kh)
- HostKeyAlgorithms: kh.HostKeyAlgorithms(hostWithPort),
- }
- return config, nil
-}
-```
-
-## Writing new known_hosts entries
-
-If you wish to mimic the behavior of OpenSSH's `StrictHostKeyChecking=no` or `StrictHostKeyChecking=ask`, this package provides a few functions to simplify this task. For example:
-
-```golang
-sshHost := "yourserver.com:22"
-khPath := "/home/myuser/.ssh/known_hosts"
-kh, err := knownhosts.New(khPath)
-if err != nil {
- log.Fatal("Failed to read known_hosts: ", err)
-}
-
-// Create a custom permissive hostkey callback which still errors on hosts
-// with changed keys, but allows unknown hosts and adds them to known_hosts
-cb := ssh.HostKeyCallback(func(hostname string, remote net.Addr, key ssh.PublicKey) error {
- err := kh(hostname, remote, key)
- if knownhosts.IsHostKeyChanged(err) {
- return fmt.Errorf("REMOTE HOST IDENTIFICATION HAS CHANGED for host %s! This may indicate a MitM attack.", hostname)
- } else if knownhosts.IsHostUnknown(err) {
- f, ferr := os.OpenFile(khPath, os.O_APPEND|os.O_WRONLY, 0600)
- if ferr == nil {
- defer f.Close()
- ferr = knownhosts.WriteKnownHost(f, hostname, remote, key)
- }
- if ferr == nil {
- log.Printf("Added host %s to known_hosts\n", hostname)
- } else {
- log.Printf("Failed to add host %s to known_hosts: %v\n", hostname, ferr)
- }
- return nil // permit previously-unknown hosts (warning: may be insecure)
- }
- return err
-})
-
-config := &ssh.ClientConfig{
- User: "myuser",
- Auth: []ssh.AuthMethod{ /* ... */ },
- HostKeyCallback: cb,
- HostKeyAlgorithms: kh.HostKeyAlgorithms(sshHost),
-}
-```
-
-## License
-
-**Source code copyright 2024 Skeema LLC and the Skeema Knownhosts authors**
-
-```text
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-```
diff --git a/vendor/github.com/skeema/knownhosts/knownhosts.go b/vendor/github.com/skeema/knownhosts/knownhosts.go
deleted file mode 100644
index 4dad7771b88..00000000000
--- a/vendor/github.com/skeema/knownhosts/knownhosts.go
+++ /dev/null
@@ -1,192 +0,0 @@
-// Package knownhosts is a thin wrapper around golang.org/x/crypto/ssh/knownhosts,
-// adding the ability to obtain the list of host key algorithms for a known host.
-package knownhosts
-
-import (
- "encoding/base64"
- "errors"
- "fmt"
- "io"
- "net"
- "sort"
- "strings"
-
- "golang.org/x/crypto/ssh"
- xknownhosts "golang.org/x/crypto/ssh/knownhosts"
-)
-
-// HostKeyCallback wraps ssh.HostKeyCallback with an additional method to
-// perform host key algorithm lookups from the known_hosts entries.
-type HostKeyCallback ssh.HostKeyCallback
-
-// New creates a host key callback from the given OpenSSH host key files. The
-// returned value may be used in ssh.ClientConfig.HostKeyCallback by casting it
-// to ssh.HostKeyCallback, or using its HostKeyCallback method. Otherwise, it
-// operates the same as the New function in golang.org/x/crypto/ssh/knownhosts.
-func New(files ...string) (HostKeyCallback, error) {
- cb, err := xknownhosts.New(files...)
- return HostKeyCallback(cb), err
-}
-
-// HostKeyCallback simply casts the receiver back to ssh.HostKeyCallback, for
-// use in ssh.ClientConfig.HostKeyCallback.
-func (hkcb HostKeyCallback) HostKeyCallback() ssh.HostKeyCallback {
- return ssh.HostKeyCallback(hkcb)
-}
-
-// HostKeys returns a slice of known host public keys for the supplied host:port
-// found in the known_hosts file(s), or an empty slice if the host is not
-// already known. For hosts that have multiple known_hosts entries (for
-// different key types), the result will be sorted by known_hosts filename and
-// line number.
-func (hkcb HostKeyCallback) HostKeys(hostWithPort string) (keys []ssh.PublicKey) {
- var keyErr *xknownhosts.KeyError
- placeholderAddr := &net.TCPAddr{IP: []byte{0, 0, 0, 0}}
- placeholderPubKey := &fakePublicKey{}
- var kkeys []xknownhosts.KnownKey
- if hkcbErr := hkcb(hostWithPort, placeholderAddr, placeholderPubKey); errors.As(hkcbErr, &keyErr) {
- kkeys = append(kkeys, keyErr.Want...)
- knownKeyLess := func(i, j int) bool {
- if kkeys[i].Filename < kkeys[j].Filename {
- return true
- }
- return (kkeys[i].Filename == kkeys[j].Filename && kkeys[i].Line < kkeys[j].Line)
- }
- sort.Slice(kkeys, knownKeyLess)
- keys = make([]ssh.PublicKey, len(kkeys))
- for n := range kkeys {
- keys[n] = kkeys[n].Key
- }
- }
- return keys
-}
-
-// HostKeyAlgorithms returns a slice of host key algorithms for the supplied
-// host:port found in the known_hosts file(s), or an empty slice if the host
-// is not already known. The result may be used in ssh.ClientConfig's
-// HostKeyAlgorithms field, either as-is or after filtering (if you wish to
-// ignore or prefer particular algorithms). For hosts that have multiple
-// known_hosts entries (for different key types), the result will be sorted by
-// known_hosts filename and line number.
-func (hkcb HostKeyCallback) HostKeyAlgorithms(hostWithPort string) (algos []string) {
- // We ensure that algos never contains duplicates. This is done for robustness
- // even though currently golang.org/x/crypto/ssh/knownhosts never exposes
- // multiple keys of the same type. This way our behavior here is unaffected
- // even if https://github.com/golang/go/issues/28870 is implemented, for
- // example by https://github.com/golang/crypto/pull/254.
- hostKeys := hkcb.HostKeys(hostWithPort)
- seen := make(map[string]struct{}, len(hostKeys))
- addAlgo := func(typ string) {
- if _, already := seen[typ]; !already {
- algos = append(algos, typ)
- seen[typ] = struct{}{}
- }
- }
- for _, key := range hostKeys {
- typ := key.Type()
- if typ == ssh.KeyAlgoRSA {
- // KeyAlgoRSASHA256 and KeyAlgoRSASHA512 are only public key algorithms,
- // not public key formats, so they can't appear as a PublicKey.Type.
- // The corresponding PublicKey.Type is KeyAlgoRSA. See RFC 8332, Section 2.
- addAlgo(ssh.KeyAlgoRSASHA512)
- addAlgo(ssh.KeyAlgoRSASHA256)
- }
- addAlgo(typ)
- }
- return algos
-}
-
-// HostKeyAlgorithms is a convenience function for performing host key algorithm
-// lookups on an ssh.HostKeyCallback directly. It is intended for use in code
-// paths that stay with the New method of golang.org/x/crypto/ssh/knownhosts
-// rather than this package's New method.
-func HostKeyAlgorithms(cb ssh.HostKeyCallback, hostWithPort string) []string {
- return HostKeyCallback(cb).HostKeyAlgorithms(hostWithPort)
-}
-
-// IsHostKeyChanged returns a boolean indicating whether the error indicates
-// the host key has changed. It is intended to be called on the error returned
-// from invoking a HostKeyCallback to check whether an SSH host is known.
-func IsHostKeyChanged(err error) bool {
- var keyErr *xknownhosts.KeyError
- return errors.As(err, &keyErr) && len(keyErr.Want) > 0
-}
-
-// IsHostUnknown returns a boolean indicating whether the error represents an
-// unknown host. It is intended to be called on the error returned from invoking
-// a HostKeyCallback to check whether an SSH host is known.
-func IsHostUnknown(err error) bool {
- var keyErr *xknownhosts.KeyError
- return errors.As(err, &keyErr) && len(keyErr.Want) == 0
-}
-
-// Normalize normalizes an address into the form used in known_hosts. This
-// implementation includes a fix for https://github.com/golang/go/issues/53463
-// and will omit brackets around ipv6 addresses on standard port 22.
-func Normalize(address string) string {
- host, port, err := net.SplitHostPort(address)
- if err != nil {
- host = address
- port = "22"
- }
- entry := host
- if port != "22" {
- entry = "[" + entry + "]:" + port
- } else if strings.HasPrefix(host, "[") && strings.HasSuffix(host, "]") {
- entry = entry[1 : len(entry)-1]
- }
- return entry
-}
-
-// Line returns a line to append to the known_hosts files. This implementation
-// uses the local patched implementation of Normalize in order to solve
-// https://github.com/golang/go/issues/53463.
-func Line(addresses []string, key ssh.PublicKey) string {
- var trimmed []string
- for _, a := range addresses {
- trimmed = append(trimmed, Normalize(a))
- }
-
- return strings.Join([]string{
- strings.Join(trimmed, ","),
- key.Type(),
- base64.StdEncoding.EncodeToString(key.Marshal()),
- }, " ")
-}
-
-// WriteKnownHost writes a known_hosts line to writer for the supplied hostname,
-// remote, and key. This is useful when writing a custom hostkey callback which
-// wraps a callback obtained from knownhosts.New to provide additional
-// known_hosts management functionality. The hostname, remote, and key typically
-// correspond to the callback's args.
-func WriteKnownHost(w io.Writer, hostname string, remote net.Addr, key ssh.PublicKey) error {
- // Always include hostname; only also include remote if it isn't a zero value
- // and doesn't normalize to the same string as hostname.
- hostnameNormalized := Normalize(hostname)
- if strings.ContainsAny(hostnameNormalized, "\t ") {
- return fmt.Errorf("knownhosts: hostname '%s' contains spaces", hostnameNormalized)
- }
- addresses := []string{hostnameNormalized}
- remoteStrNormalized := Normalize(remote.String())
- if remoteStrNormalized != "[0.0.0.0]:0" && remoteStrNormalized != hostnameNormalized &&
- !strings.ContainsAny(remoteStrNormalized, "\t ") {
- addresses = append(addresses, remoteStrNormalized)
- }
- line := Line(addresses, key) + "\n"
- _, err := w.Write([]byte(line))
- return err
-}
-
-// fakePublicKey is used as part of the work-around for
-// https://github.com/golang/go/issues/29286
-type fakePublicKey struct{}
-
-func (fakePublicKey) Type() string {
- return "fake-public-key"
-}
-func (fakePublicKey) Marshal() []byte {
- return []byte("fake public key")
-}
-func (fakePublicKey) Verify(_ []byte, _ *ssh.Signature) error {
- return errors.New("Verify called on placeholder key")
-}
diff --git a/vendor/github.com/spf13/cast/caste.go b/vendor/github.com/spf13/cast/caste.go
index d49bbf83ecb..cd9c04885ac 100644
--- a/vendor/github.com/spf13/cast/caste.go
+++ b/vendor/github.com/spf13/cast/caste.go
@@ -18,6 +18,14 @@ import (
var errNegativeNotAllowed = errors.New("unable to cast negative value")
+type float64EProvider interface {
+ Float64() (float64, error)
+}
+
+type float64Provider interface {
+ Float64() float64
+}
+
// ToTimeE casts an interface to a time.Time type.
func ToTimeE(i interface{}) (tim time.Time, err error) {
return ToTimeInDefaultLocationE(i, time.UTC)
@@ -77,11 +85,14 @@ func ToDurationE(i interface{}) (d time.Duration, err error) {
d, err = time.ParseDuration(s + "ns")
}
return
- case json.Number:
+ case float64EProvider:
var v float64
v, err = s.Float64()
d = time.Duration(v)
return
+ case float64Provider:
+ d = time.Duration(s.Float64())
+ return
default:
err = fmt.Errorf("unable to cast %#v of type %T to Duration", i, i)
return
@@ -174,12 +185,14 @@ func ToFloat64E(i interface{}) (float64, error) {
return v, nil
}
return 0, fmt.Errorf("unable to cast %#v of type %T to float64", i, i)
- case json.Number:
+ case float64EProvider:
v, err := s.Float64()
if err == nil {
return v, nil
}
return 0, fmt.Errorf("unable to cast %#v of type %T to float64", i, i)
+ case float64Provider:
+ return s.Float64(), nil
case bool:
if s {
return 1, nil
@@ -230,12 +243,14 @@ func ToFloat32E(i interface{}) (float32, error) {
return float32(v), nil
}
return 0, fmt.Errorf("unable to cast %#v of type %T to float32", i, i)
- case json.Number:
+ case float64EProvider:
v, err := s.Float64()
if err == nil {
return float32(v), nil
}
return 0, fmt.Errorf("unable to cast %#v of type %T to float32", i, i)
+ case float64Provider:
+ return float32(s.Float64()), nil
case bool:
if s {
return 1, nil
@@ -917,8 +932,8 @@ func indirectToStringerOrError(a interface{}) interface{} {
return nil
}
- var errorType = reflect.TypeOf((*error)(nil)).Elem()
- var fmtStringerType = reflect.TypeOf((*fmt.Stringer)(nil)).Elem()
+ errorType := reflect.TypeOf((*error)(nil)).Elem()
+ fmtStringerType := reflect.TypeOf((*fmt.Stringer)(nil)).Elem()
v := reflect.ValueOf(a)
for !v.Type().Implements(fmtStringerType) && !v.Type().Implements(errorType) && v.Kind() == reflect.Ptr && !v.IsNil() {
@@ -987,7 +1002,7 @@ func ToStringE(i interface{}) (string, error) {
// ToStringMapStringE casts an interface to a map[string]string type.
func ToStringMapStringE(i interface{}) (map[string]string, error) {
- var m = map[string]string{}
+ m := map[string]string{}
switch v := i.(type) {
case map[string]string:
@@ -1017,7 +1032,7 @@ func ToStringMapStringE(i interface{}) (map[string]string, error) {
// ToStringMapStringSliceE casts an interface to a map[string][]string type.
func ToStringMapStringSliceE(i interface{}) (map[string][]string, error) {
- var m = map[string][]string{}
+ m := map[string][]string{}
switch v := i.(type) {
case map[string][]string:
@@ -1081,7 +1096,7 @@ func ToStringMapStringSliceE(i interface{}) (map[string][]string, error) {
// ToStringMapBoolE casts an interface to a map[string]bool type.
func ToStringMapBoolE(i interface{}) (map[string]bool, error) {
- var m = map[string]bool{}
+ m := map[string]bool{}
switch v := i.(type) {
case map[interface{}]interface{}:
@@ -1106,7 +1121,7 @@ func ToStringMapBoolE(i interface{}) (map[string]bool, error) {
// ToStringMapE casts an interface to a map[string]interface{} type.
func ToStringMapE(i interface{}) (map[string]interface{}, error) {
- var m = map[string]interface{}{}
+ m := map[string]interface{}{}
switch v := i.(type) {
case map[interface{}]interface{}:
@@ -1126,7 +1141,7 @@ func ToStringMapE(i interface{}) (map[string]interface{}, error) {
// ToStringMapIntE casts an interface to a map[string]int{} type.
func ToStringMapIntE(i interface{}) (map[string]int, error) {
- var m = map[string]int{}
+ m := map[string]int{}
if i == nil {
return m, fmt.Errorf("unable to cast %#v of type %T to map[string]int", i, i)
}
@@ -1167,7 +1182,7 @@ func ToStringMapIntE(i interface{}) (map[string]int, error) {
// ToStringMapInt64E casts an interface to a map[string]int64{} type.
func ToStringMapInt64E(i interface{}) (map[string]int64, error) {
- var m = map[string]int64{}
+ m := map[string]int64{}
if i == nil {
return m, fmt.Errorf("unable to cast %#v of type %T to map[string]int64", i, i)
}
@@ -1404,38 +1419,35 @@ func (f timeFormat) hasTimezone() bool {
return f.typ >= timeFormatNumericTimezone && f.typ <= timeFormatNumericAndNamedTimezone
}
-var (
- timeFormats = []timeFormat{
- // Keep common formats at the top.
- {"2006-01-02", timeFormatNoTimezone},
- {time.RFC3339, timeFormatNumericTimezone},
- {"2006-01-02T15:04:05", timeFormatNoTimezone}, // iso8601 without timezone
- {time.RFC1123Z, timeFormatNumericTimezone},
- {time.RFC1123, timeFormatNamedTimezone},
- {time.RFC822Z, timeFormatNumericTimezone},
- {time.RFC822, timeFormatNamedTimezone},
- {time.RFC850, timeFormatNamedTimezone},
- {"2006-01-02 15:04:05.999999999 -0700 MST", timeFormatNumericAndNamedTimezone}, // Time.String()
- {"2006-01-02T15:04:05-0700", timeFormatNumericTimezone}, // RFC3339 without timezone hh:mm colon
- {"2006-01-02 15:04:05Z0700", timeFormatNumericTimezone}, // RFC3339 without T or timezone hh:mm colon
- {"2006-01-02 15:04:05", timeFormatNoTimezone},
- {time.ANSIC, timeFormatNoTimezone},
- {time.UnixDate, timeFormatNamedTimezone},
- {time.RubyDate, timeFormatNumericTimezone},
- {"2006-01-02 15:04:05Z07:00", timeFormatNumericTimezone},
- {"02 Jan 2006", timeFormatNoTimezone},
- {"2006-01-02 15:04:05 -07:00", timeFormatNumericTimezone},
- {"2006-01-02 15:04:05 -0700", timeFormatNumericTimezone},
- {time.Kitchen, timeFormatTimeOnly},
- {time.Stamp, timeFormatTimeOnly},
- {time.StampMilli, timeFormatTimeOnly},
- {time.StampMicro, timeFormatTimeOnly},
- {time.StampNano, timeFormatTimeOnly},
- }
-)
+var timeFormats = []timeFormat{
+ // Keep common formats at the top.
+ {"2006-01-02", timeFormatNoTimezone},
+ {time.RFC3339, timeFormatNumericTimezone},
+ {"2006-01-02T15:04:05", timeFormatNoTimezone}, // iso8601 without timezone
+ {time.RFC1123Z, timeFormatNumericTimezone},
+ {time.RFC1123, timeFormatNamedTimezone},
+ {time.RFC822Z, timeFormatNumericTimezone},
+ {time.RFC822, timeFormatNamedTimezone},
+ {time.RFC850, timeFormatNamedTimezone},
+ {"2006-01-02 15:04:05.999999999 -0700 MST", timeFormatNumericAndNamedTimezone}, // Time.String()
+ {"2006-01-02T15:04:05-0700", timeFormatNumericTimezone}, // RFC3339 without timezone hh:mm colon
+ {"2006-01-02 15:04:05Z0700", timeFormatNumericTimezone}, // RFC3339 without T or timezone hh:mm colon
+ {"2006-01-02 15:04:05", timeFormatNoTimezone},
+ {time.ANSIC, timeFormatNoTimezone},
+ {time.UnixDate, timeFormatNamedTimezone},
+ {time.RubyDate, timeFormatNumericTimezone},
+ {"2006-01-02 15:04:05Z07:00", timeFormatNumericTimezone},
+ {"02 Jan 2006", timeFormatNoTimezone},
+ {"2006-01-02 15:04:05 -07:00", timeFormatNumericTimezone},
+ {"2006-01-02 15:04:05 -0700", timeFormatNumericTimezone},
+ {time.Kitchen, timeFormatTimeOnly},
+ {time.Stamp, timeFormatTimeOnly},
+ {time.StampMilli, timeFormatTimeOnly},
+ {time.StampMicro, timeFormatTimeOnly},
+ {time.StampNano, timeFormatTimeOnly},
+}
func parseDateWith(s string, location *time.Location, formats []timeFormat) (d time.Time, e error) {
-
for _, format := range formats {
if d, e = time.Parse(format.format, s); e == nil {
diff --git a/vendor/github.com/vishvananda/netlink/.gitignore b/vendor/github.com/vishvananda/netlink/.gitignore
index 9f11b755a17..66f8fb50272 100644
--- a/vendor/github.com/vishvananda/netlink/.gitignore
+++ b/vendor/github.com/vishvananda/netlink/.gitignore
@@ -1 +1,2 @@
.idea/
+.vscode/
diff --git a/vendor/github.com/vishvananda/netlink/addr_linux.go b/vendor/github.com/vishvananda/netlink/addr_linux.go
index 72862ce1f44..218ab237965 100644
--- a/vendor/github.com/vishvananda/netlink/addr_linux.go
+++ b/vendor/github.com/vishvananda/netlink/addr_linux.go
@@ -74,17 +74,19 @@ func (h *Handle) AddrDel(link Link, addr *Addr) error {
}
func (h *Handle) addrHandle(link Link, addr *Addr, req *nl.NetlinkRequest) error {
- base := link.Attrs()
- if addr.Label != "" && !strings.HasPrefix(addr.Label, base.Name) {
- return fmt.Errorf("label must begin with interface name")
- }
- h.ensureIndex(base)
-
family := nl.GetIPFamily(addr.IP)
-
msg := nl.NewIfAddrmsg(family)
- msg.Index = uint32(base.Index)
msg.Scope = uint8(addr.Scope)
+ if link == nil {
+ msg.Index = uint32(addr.LinkIndex)
+ } else {
+ base := link.Attrs()
+ if addr.Label != "" && !strings.HasPrefix(addr.Label, base.Name) {
+ return fmt.Errorf("label must begin with interface name")
+ }
+ h.ensureIndex(base)
+ msg.Index = uint32(base.Index)
+ }
mask := addr.Mask
if addr.Peer != nil {
mask = addr.Peer.Mask
@@ -296,23 +298,24 @@ type AddrUpdate struct {
// AddrSubscribe takes a chan down which notifications will be sent
// when addresses change. Close the 'done' chan to stop subscription.
func AddrSubscribe(ch chan<- AddrUpdate, done <-chan struct{}) error {
- return addrSubscribeAt(netns.None(), netns.None(), ch, done, nil, false, 0, nil)
+ return addrSubscribeAt(netns.None(), netns.None(), ch, done, nil, false, 0, nil, false)
}
// AddrSubscribeAt works like AddrSubscribe plus it allows the caller
// to choose the network namespace in which to subscribe (ns).
func AddrSubscribeAt(ns netns.NsHandle, ch chan<- AddrUpdate, done <-chan struct{}) error {
- return addrSubscribeAt(ns, netns.None(), ch, done, nil, false, 0, nil)
+ return addrSubscribeAt(ns, netns.None(), ch, done, nil, false, 0, nil, false)
}
// AddrSubscribeOptions contains a set of options to use with
// AddrSubscribeWithOptions.
type AddrSubscribeOptions struct {
- Namespace *netns.NsHandle
- ErrorCallback func(error)
- ListExisting bool
- ReceiveBufferSize int
- ReceiveTimeout *unix.Timeval
+ Namespace *netns.NsHandle
+ ErrorCallback func(error)
+ ListExisting bool
+ ReceiveBufferSize int
+ ReceiveBufferForceSize bool
+ ReceiveTimeout *unix.Timeval
}
// AddrSubscribeWithOptions work like AddrSubscribe but enable to
@@ -323,10 +326,12 @@ func AddrSubscribeWithOptions(ch chan<- AddrUpdate, done <-chan struct{}, option
none := netns.None()
options.Namespace = &none
}
- return addrSubscribeAt(*options.Namespace, netns.None(), ch, done, options.ErrorCallback, options.ListExisting, options.ReceiveBufferSize, options.ReceiveTimeout)
+ return addrSubscribeAt(*options.Namespace, netns.None(), ch, done, options.ErrorCallback, options.ListExisting,
+ options.ReceiveBufferSize, options.ReceiveTimeout, options.ReceiveBufferForceSize)
}
-func addrSubscribeAt(newNs, curNs netns.NsHandle, ch chan<- AddrUpdate, done <-chan struct{}, cberr func(error), listExisting bool, rcvbuf int, rcvTimeout *unix.Timeval) error {
+func addrSubscribeAt(newNs, curNs netns.NsHandle, ch chan<- AddrUpdate, done <-chan struct{}, cberr func(error), listExisting bool,
+ rcvbuf int, rcvTimeout *unix.Timeval, rcvBufForce bool) error {
s, err := nl.SubscribeAt(newNs, curNs, unix.NETLINK_ROUTE, unix.RTNLGRP_IPV4_IFADDR, unix.RTNLGRP_IPV6_IFADDR)
if err != nil {
return err
@@ -336,19 +341,18 @@ func addrSubscribeAt(newNs, curNs netns.NsHandle, ch chan<- AddrUpdate, done <-c
return err
}
}
-
+ if rcvbuf != 0 {
+ err = s.SetReceiveBufferSize(rcvbuf, rcvBufForce)
+ if err != nil {
+ return err
+ }
+ }
if done != nil {
go func() {
<-done
s.Close()
}()
}
- if rcvbuf != 0 {
- err = pkgHandle.SetSocketReceiveBufferSize(rcvbuf, false)
- if err != nil {
- return err
- }
- }
if listExisting {
req := pkgHandle.newNetlinkRequest(unix.RTM_GETADDR,
unix.NLM_F_DUMP)
diff --git a/vendor/github.com/vishvananda/netlink/bridge_linux.go b/vendor/github.com/vishvananda/netlink/bridge_linux.go
index 6e1224c47b8..6c340b0ce9a 100644
--- a/vendor/github.com/vishvananda/netlink/bridge_linux.go
+++ b/vendor/github.com/vishvananda/netlink/bridge_linux.go
@@ -63,7 +63,19 @@ func BridgeVlanAdd(link Link, vid uint16, pvid, untagged, self, master bool) err
// BridgeVlanAdd adds a new vlan filter entry
// Equivalent to: `bridge vlan add dev DEV vid VID [ pvid ] [ untagged ] [ self ] [ master ]`
func (h *Handle) BridgeVlanAdd(link Link, vid uint16, pvid, untagged, self, master bool) error {
- return h.bridgeVlanModify(unix.RTM_SETLINK, link, vid, pvid, untagged, self, master)
+ return h.bridgeVlanModify(unix.RTM_SETLINK, link, vid, 0, pvid, untagged, self, master)
+}
+
+// BridgeVlanAddRange adds a new vlan filter entry
+// Equivalent to: `bridge vlan add dev DEV vid VID-VIDEND [ pvid ] [ untagged ] [ self ] [ master ]`
+func BridgeVlanAddRange(link Link, vid, vidEnd uint16, pvid, untagged, self, master bool) error {
+ return pkgHandle.BridgeVlanAddRange(link, vid, vidEnd, pvid, untagged, self, master)
+}
+
+// BridgeVlanAddRange adds a new vlan filter entry
+// Equivalent to: `bridge vlan add dev DEV vid VID-VIDEND [ pvid ] [ untagged ] [ self ] [ master ]`
+func (h *Handle) BridgeVlanAddRange(link Link, vid, vidEnd uint16, pvid, untagged, self, master bool) error {
+ return h.bridgeVlanModify(unix.RTM_SETLINK, link, vid, vidEnd, pvid, untagged, self, master)
}
// BridgeVlanDel adds a new vlan filter entry
@@ -75,10 +87,22 @@ func BridgeVlanDel(link Link, vid uint16, pvid, untagged, self, master bool) err
// BridgeVlanDel adds a new vlan filter entry
// Equivalent to: `bridge vlan del dev DEV vid VID [ pvid ] [ untagged ] [ self ] [ master ]`
func (h *Handle) BridgeVlanDel(link Link, vid uint16, pvid, untagged, self, master bool) error {
- return h.bridgeVlanModify(unix.RTM_DELLINK, link, vid, pvid, untagged, self, master)
+ return h.bridgeVlanModify(unix.RTM_DELLINK, link, vid, 0, pvid, untagged, self, master)
}
-func (h *Handle) bridgeVlanModify(cmd int, link Link, vid uint16, pvid, untagged, self, master bool) error {
+// BridgeVlanDelRange adds a new vlan filter entry
+// Equivalent to: `bridge vlan del dev DEV vid VID-VIDEND [ pvid ] [ untagged ] [ self ] [ master ]`
+func BridgeVlanDelRange(link Link, vid, vidEnd uint16, pvid, untagged, self, master bool) error {
+ return pkgHandle.BridgeVlanDelRange(link, vid, vidEnd, pvid, untagged, self, master)
+}
+
+// BridgeVlanDelRange adds a new vlan filter entry
+// Equivalent to: `bridge vlan del dev DEV vid VID-VIDEND [ pvid ] [ untagged ] [ self ] [ master ]`
+func (h *Handle) BridgeVlanDelRange(link Link, vid, vidEnd uint16, pvid, untagged, self, master bool) error {
+ return h.bridgeVlanModify(unix.RTM_DELLINK, link, vid, vidEnd, pvid, untagged, self, master)
+}
+
+func (h *Handle) bridgeVlanModify(cmd int, link Link, vid, vidEnd uint16, pvid, untagged, self, master bool) error {
base := link.Attrs()
h.ensureIndex(base)
req := h.newNetlinkRequest(cmd, unix.NLM_F_ACK)
@@ -105,7 +129,20 @@ func (h *Handle) bridgeVlanModify(cmd int, link Link, vid uint16, pvid, untagged
if untagged {
vlanInfo.Flags |= nl.BRIDGE_VLAN_INFO_UNTAGGED
}
- br.AddRtAttr(nl.IFLA_BRIDGE_VLAN_INFO, vlanInfo.Serialize())
+
+ if vidEnd != 0 {
+ vlanEndInfo := &nl.BridgeVlanInfo{Vid: vidEnd}
+ vlanEndInfo.Flags = vlanInfo.Flags
+
+ vlanInfo.Flags |= nl.BRIDGE_VLAN_INFO_RANGE_BEGIN
+ br.AddRtAttr(nl.IFLA_BRIDGE_VLAN_INFO, vlanInfo.Serialize())
+
+ vlanEndInfo.Flags |= nl.BRIDGE_VLAN_INFO_RANGE_END
+ br.AddRtAttr(nl.IFLA_BRIDGE_VLAN_INFO, vlanEndInfo.Serialize())
+ } else {
+ br.AddRtAttr(nl.IFLA_BRIDGE_VLAN_INFO, vlanInfo.Serialize())
+ }
+
req.AddData(br)
_, err := req.Execute(unix.NETLINK_ROUTE, 0)
return err
diff --git a/vendor/github.com/vishvananda/netlink/chain.go b/vendor/github.com/vishvananda/netlink/chain.go
new file mode 100644
index 00000000000..1d1c144e95a
--- /dev/null
+++ b/vendor/github.com/vishvananda/netlink/chain.go
@@ -0,0 +1,22 @@
+package netlink
+
+import (
+ "fmt"
+)
+
+// Chain contains the attributes of a Chain
+type Chain struct {
+ Parent uint32
+ Chain uint32
+}
+
+func (c Chain) String() string {
+ return fmt.Sprintf("{Parent: %d, Chain: %d}", c.Parent, c.Chain)
+}
+
+func NewChain(parent uint32, chain uint32) Chain {
+ return Chain{
+ Parent: parent,
+ Chain: chain,
+ }
+}
diff --git a/vendor/github.com/vishvananda/netlink/chain_linux.go b/vendor/github.com/vishvananda/netlink/chain_linux.go
new file mode 100644
index 00000000000..d9f441613cc
--- /dev/null
+++ b/vendor/github.com/vishvananda/netlink/chain_linux.go
@@ -0,0 +1,112 @@
+package netlink
+
+import (
+ "github.com/vishvananda/netlink/nl"
+ "golang.org/x/sys/unix"
+)
+
+// ChainDel will delete a chain from the system.
+func ChainDel(link Link, chain Chain) error {
+ // Equivalent to: `tc chain del $chain`
+ return pkgHandle.ChainDel(link, chain)
+}
+
+// ChainDel will delete a chain from the system.
+// Equivalent to: `tc chain del $chain`
+func (h *Handle) ChainDel(link Link, chain Chain) error {
+ return h.chainModify(unix.RTM_DELCHAIN, 0, link, chain)
+}
+
+// ChainAdd will add a chain to the system.
+// Equivalent to: `tc chain add`
+func ChainAdd(link Link, chain Chain) error {
+ return pkgHandle.ChainAdd(link, chain)
+}
+
+// ChainAdd will add a chain to the system.
+// Equivalent to: `tc chain add`
+func (h *Handle) ChainAdd(link Link, chain Chain) error {
+ return h.chainModify(
+ unix.RTM_NEWCHAIN,
+ unix.NLM_F_CREATE|unix.NLM_F_EXCL,
+ link,
+ chain)
+}
+
+func (h *Handle) chainModify(cmd, flags int, link Link, chain Chain) error {
+ req := h.newNetlinkRequest(cmd, flags|unix.NLM_F_ACK)
+ index := int32(0)
+ if link != nil {
+ base := link.Attrs()
+ h.ensureIndex(base)
+ index = int32(base.Index)
+ }
+ msg := &nl.TcMsg{
+ Family: nl.FAMILY_ALL,
+ Ifindex: index,
+ Parent: chain.Parent,
+ }
+ req.AddData(msg)
+ req.AddData(nl.NewRtAttr(nl.TCA_CHAIN, nl.Uint32Attr(chain.Chain)))
+
+ _, err := req.Execute(unix.NETLINK_ROUTE, 0)
+ return err
+}
+
+// ChainList gets a list of chains in the system.
+// Equivalent to: `tc chain list`.
+// The list can be filtered by link.
+func ChainList(link Link, parent uint32) ([]Chain, error) {
+ return pkgHandle.ChainList(link, parent)
+}
+
+// ChainList gets a list of chains in the system.
+// Equivalent to: `tc chain list`.
+// The list can be filtered by link.
+func (h *Handle) ChainList(link Link, parent uint32) ([]Chain, error) {
+ req := h.newNetlinkRequest(unix.RTM_GETCHAIN, unix.NLM_F_DUMP)
+ index := int32(0)
+ if link != nil {
+ base := link.Attrs()
+ h.ensureIndex(base)
+ index = int32(base.Index)
+ }
+ msg := &nl.TcMsg{
+ Family: nl.FAMILY_ALL,
+ Ifindex: index,
+ Parent: parent,
+ }
+ req.AddData(msg)
+
+ msgs, err := req.Execute(unix.NETLINK_ROUTE, unix.RTM_NEWCHAIN)
+ if err != nil {
+ return nil, err
+ }
+
+ var res []Chain
+ for _, m := range msgs {
+ msg := nl.DeserializeTcMsg(m)
+
+ attrs, err := nl.ParseRouteAttr(m[msg.Len():])
+ if err != nil {
+ return nil, err
+ }
+
+ // skip chains from other interfaces
+ if link != nil && msg.Ifindex != index {
+ continue
+ }
+
+ var chain Chain
+ for _, attr := range attrs {
+ switch attr.Attr.Type {
+ case nl.TCA_CHAIN:
+ chain.Chain = native.Uint32(attr.Value)
+ chain.Parent = parent
+ }
+ }
+ res = append(res, chain)
+ }
+
+ return res, nil
+}
diff --git a/vendor/github.com/vishvananda/netlink/class.go b/vendor/github.com/vishvananda/netlink/class.go
index 10ceffed8bb..e686f674507 100644
--- a/vendor/github.com/vishvananda/netlink/class.go
+++ b/vendor/github.com/vishvananda/netlink/class.go
@@ -47,6 +47,7 @@ type ClassStatistics struct {
Basic *GnetStatsBasic
Queue *GnetStatsQueue
RateEst *GnetStatsRateEst
+ BasicHw *GnetStatsBasic // Hardward statistics added in kernel 4.20
}
// NewClassStatistics Construct a ClassStatistics struct which fields are all initialized by 0.
@@ -55,6 +56,7 @@ func NewClassStatistics() *ClassStatistics {
Basic: &GnetStatsBasic{},
Queue: &GnetStatsQueue{},
RateEst: &GnetStatsRateEst{},
+ BasicHw: &GnetStatsBasic{},
}
}
diff --git a/vendor/github.com/vishvananda/netlink/class_linux.go b/vendor/github.com/vishvananda/netlink/class_linux.go
index 6f542ba4e72..a82eb09de24 100644
--- a/vendor/github.com/vishvananda/netlink/class_linux.go
+++ b/vendor/github.com/vishvananda/netlink/class_linux.go
@@ -388,6 +388,11 @@ func parseTcStats2(data []byte) (*ClassStatistics, error) {
return nil, fmt.Errorf("Failed to parse ClassStatistics.RateEst with: %v\n%s",
err, hex.Dump(datum.Value))
}
+ case nl.TCA_STATS_BASIC_HW:
+ if err := parseGnetStats(datum.Value, stats.BasicHw); err != nil {
+ return nil, fmt.Errorf("Failed to parse ClassStatistics.BasicHw with: %v\n%s",
+ err, hex.Dump(datum.Value))
+ }
}
}
diff --git a/vendor/github.com/vishvananda/netlink/conntrack_linux.go b/vendor/github.com/vishvananda/netlink/conntrack_linux.go
index 03ea1b98fc2..ba022453b3b 100644
--- a/vendor/github.com/vishvananda/netlink/conntrack_linux.go
+++ b/vendor/github.com/vishvananda/netlink/conntrack_linux.go
@@ -55,10 +55,30 @@ func ConntrackTableFlush(table ConntrackTableType) error {
return pkgHandle.ConntrackTableFlush(table)
}
+// ConntrackCreate creates a new conntrack flow in the desired table
+// conntrack -I [table] Create a conntrack or expectation
+func ConntrackCreate(table ConntrackTableType, family InetFamily, flow *ConntrackFlow) error {
+ return pkgHandle.ConntrackCreate(table, family, flow)
+}
+
+// ConntrackUpdate updates an existing conntrack flow in the desired table using the handle
+// conntrack -U [table] Update a conntrack
+func ConntrackUpdate(table ConntrackTableType, family InetFamily, flow *ConntrackFlow) error {
+ return pkgHandle.ConntrackUpdate(table, family, flow)
+}
+
// ConntrackDeleteFilter deletes entries on the specified table on the base of the filter
// conntrack -D [table] parameters Delete conntrack or expectation
+//
+// Deprecated: use [ConntrackDeleteFilter] instead.
func ConntrackDeleteFilter(table ConntrackTableType, family InetFamily, filter CustomConntrackFilter) (uint, error) {
- return pkgHandle.ConntrackDeleteFilter(table, family, filter)
+ return pkgHandle.ConntrackDeleteFilters(table, family, filter)
+}
+
+// ConntrackDeleteFilters deletes entries on the specified table matching any of the specified filters
+// conntrack -D [table] parameters Delete conntrack or expectation
+func ConntrackDeleteFilters(table ConntrackTableType, family InetFamily, filters ...CustomConntrackFilter) (uint, error) {
+ return pkgHandle.ConntrackDeleteFilters(table, family, filters...)
}
// ConntrackTableList returns the flow list of a table of a specific family using the netlink handle passed
@@ -87,9 +107,51 @@ func (h *Handle) ConntrackTableFlush(table ConntrackTableType) error {
return err
}
+// ConntrackCreate creates a new conntrack flow in the desired table using the handle
+// conntrack -I [table] Create a conntrack or expectation
+func (h *Handle) ConntrackCreate(table ConntrackTableType, family InetFamily, flow *ConntrackFlow) error {
+ req := h.newConntrackRequest(table, family, nl.IPCTNL_MSG_CT_NEW, unix.NLM_F_ACK|unix.NLM_F_CREATE)
+ attr, err := flow.toNlData()
+ if err != nil {
+ return err
+ }
+
+ for _, a := range attr {
+ req.AddData(a)
+ }
+
+ _, err = req.Execute(unix.NETLINK_NETFILTER, 0)
+ return err
+}
+
+// ConntrackUpdate updates an existing conntrack flow in the desired table using the handle
+// conntrack -U [table] Update a conntrack
+func (h *Handle) ConntrackUpdate(table ConntrackTableType, family InetFamily, flow *ConntrackFlow) error {
+ req := h.newConntrackRequest(table, family, nl.IPCTNL_MSG_CT_NEW, unix.NLM_F_ACK|unix.NLM_F_REPLACE)
+ attr, err := flow.toNlData()
+ if err != nil {
+ return err
+ }
+
+ for _, a := range attr {
+ req.AddData(a)
+ }
+
+ _, err = req.Execute(unix.NETLINK_NETFILTER, 0)
+ return err
+}
+
// ConntrackDeleteFilter deletes entries on the specified table on the base of the filter using the netlink handle passed
// conntrack -D [table] parameters Delete conntrack or expectation
+//
+// Deprecated: use [Handle.ConntrackDeleteFilters] instead.
func (h *Handle) ConntrackDeleteFilter(table ConntrackTableType, family InetFamily, filter CustomConntrackFilter) (uint, error) {
+ return h.ConntrackDeleteFilters(table, family, filter)
+}
+
+// ConntrackDeleteFilters deletes entries on the specified table matching any of the specified filters using the netlink handle passed
+// conntrack -D [table] parameters Delete conntrack or expectation
+func (h *Handle) ConntrackDeleteFilters(table ConntrackTableType, family InetFamily, filters ...CustomConntrackFilter) (uint, error) {
res, err := h.dumpConntrackTable(table, family)
if err != nil {
return 0, err
@@ -98,12 +160,16 @@ func (h *Handle) ConntrackDeleteFilter(table ConntrackTableType, family InetFami
var matched uint
for _, dataRaw := range res {
flow := parseRawData(dataRaw)
- if match := filter.MatchConntrackFlow(flow); match {
- req2 := h.newConntrackRequest(table, family, nl.IPCTNL_MSG_CT_DELETE, unix.NLM_F_ACK)
- // skip the first 4 byte that are the netfilter header, the newConntrackRequest is adding it already
- req2.AddRawData(dataRaw[4:])
- req2.Execute(unix.NETLINK_NETFILTER, 0)
- matched++
+ for _, filter := range filters {
+ if match := filter.MatchConntrackFlow(flow); match {
+ req2 := h.newConntrackRequest(table, family, nl.IPCTNL_MSG_CT_DELETE, unix.NLM_F_ACK)
+ // skip the first 4 byte that are the netfilter header, the newConntrackRequest is adding it already
+ req2.AddRawData(dataRaw[4:])
+ req2.Execute(unix.NETLINK_NETFILTER, 0)
+ matched++
+ // flow is already deleted, no need to match on other filters and continue to the next flow.
+ break
+ }
}
}
@@ -128,10 +194,44 @@ func (h *Handle) dumpConntrackTable(table ConntrackTableType, family InetFamily)
return req.Execute(unix.NETLINK_NETFILTER, 0)
}
+// ProtoInfo wraps an L4-protocol structure - roughly corresponds to the
+// __nfct_protoinfo union found in libnetfilter_conntrack/include/internal/object.h.
+// Currently, only protocol names, and TCP state is supported.
+type ProtoInfo interface {
+ Protocol() string
+}
+
+// ProtoInfoTCP corresponds to the `tcp` struct of the __nfct_protoinfo union.
+// Only TCP state is currently supported.
+type ProtoInfoTCP struct {
+ State uint8
+}
+// Protocol returns "tcp".
+func (*ProtoInfoTCP) Protocol() string {return "tcp"}
+func (p *ProtoInfoTCP) toNlData() ([]*nl.RtAttr, error) {
+ ctProtoInfo := nl.NewRtAttr(unix.NLA_F_NESTED | nl.CTA_PROTOINFO, []byte{})
+ ctProtoInfoTCP := nl.NewRtAttr(unix.NLA_F_NESTED|nl.CTA_PROTOINFO_TCP, []byte{})
+ ctProtoInfoTCPState := nl.NewRtAttr(nl.CTA_PROTOINFO_TCP_STATE, nl.Uint8Attr(p.State))
+ ctProtoInfoTCP.AddChild(ctProtoInfoTCPState)
+ ctProtoInfo.AddChild(ctProtoInfoTCP)
+
+ return []*nl.RtAttr{ctProtoInfo}, nil
+}
+
+// ProtoInfoSCTP only supports the protocol name.
+type ProtoInfoSCTP struct {}
+// Protocol returns "sctp".
+func (*ProtoInfoSCTP) Protocol() string {return "sctp"}
+
+// ProtoInfoDCCP only supports the protocol name.
+type ProtoInfoDCCP struct {}
+// Protocol returns "dccp".
+func (*ProtoInfoDCCP) Protocol() string {return "dccp"}
+
// The full conntrack flow structure is very complicated and can be found in the file:
// http://git.netfilter.org/libnetfilter_conntrack/tree/include/internal/object.h
// For the time being, the structure below allows to parse and extract the base information of a flow
-type ipTuple struct {
+type IPTuple struct {
Bytes uint64
DstIP net.IP
DstPort uint16
@@ -141,28 +241,150 @@ type ipTuple struct {
SrcPort uint16
}
+// toNlData generates the inner fields of a nested tuple netlink datastructure
+// does not generate the "nested"-flagged outer message.
+func (t *IPTuple) toNlData(family uint8) ([]*nl.RtAttr, error) {
+
+ var srcIPsFlag, dstIPsFlag int
+ if family == nl.FAMILY_V4 {
+ srcIPsFlag = nl.CTA_IP_V4_SRC
+ dstIPsFlag = nl.CTA_IP_V4_DST
+ } else if family == nl.FAMILY_V6 {
+ srcIPsFlag = nl.CTA_IP_V6_SRC
+ dstIPsFlag = nl.CTA_IP_V6_DST
+ } else {
+ return []*nl.RtAttr{}, fmt.Errorf("couldn't generate netlink message for tuple due to unrecognized FamilyType '%d'", family)
+ }
+
+ ctTupleIP := nl.NewRtAttr(unix.NLA_F_NESTED|nl.CTA_TUPLE_IP, nil)
+ ctTupleIPSrc := nl.NewRtAttr(srcIPsFlag, t.SrcIP)
+ ctTupleIP.AddChild(ctTupleIPSrc)
+ ctTupleIPDst := nl.NewRtAttr(dstIPsFlag, t.DstIP)
+ ctTupleIP.AddChild(ctTupleIPDst)
+
+ ctTupleProto := nl.NewRtAttr(unix.NLA_F_NESTED|nl.CTA_TUPLE_PROTO, nil)
+ ctTupleProtoNum := nl.NewRtAttr(nl.CTA_PROTO_NUM, []byte{t.Protocol})
+ ctTupleProto.AddChild(ctTupleProtoNum)
+ ctTupleProtoSrcPort := nl.NewRtAttr(nl.CTA_PROTO_SRC_PORT, nl.BEUint16Attr(t.SrcPort))
+ ctTupleProto.AddChild(ctTupleProtoSrcPort)
+ ctTupleProtoDstPort := nl.NewRtAttr(nl.CTA_PROTO_DST_PORT, nl.BEUint16Attr(t.DstPort))
+ ctTupleProto.AddChild(ctTupleProtoDstPort, )
+
+ return []*nl.RtAttr{ctTupleIP, ctTupleProto}, nil
+}
+
type ConntrackFlow struct {
FamilyType uint8
- Forward ipTuple
- Reverse ipTuple
+ Forward IPTuple
+ Reverse IPTuple
Mark uint32
+ Zone uint16
TimeStart uint64
TimeStop uint64
TimeOut uint32
+ Labels []byte
+ ProtoInfo ProtoInfo
}
func (s *ConntrackFlow) String() string {
// conntrack cmd output:
- // udp 17 src=127.0.0.1 dst=127.0.0.1 sport=4001 dport=1234 packets=5 bytes=532 [UNREPLIED] src=127.0.0.1 dst=127.0.0.1 sport=1234 dport=4001 packets=10 bytes=1078 mark=0
+ // udp 17 src=127.0.0.1 dst=127.0.0.1 sport=4001 dport=1234 packets=5 bytes=532 [UNREPLIED] src=127.0.0.1 dst=127.0.0.1 sport=1234 dport=4001 packets=10 bytes=1078 mark=0 labels=0x00000000050012ac4202010000000000 zone=100
// start=2019-07-26 01:26:21.557800506 +0000 UTC stop=1970-01-01 00:00:00 +0000 UTC timeout=30(sec)
start := time.Unix(0, int64(s.TimeStart))
stop := time.Unix(0, int64(s.TimeStop))
timeout := int32(s.TimeOut)
- return fmt.Sprintf("%s\t%d src=%s dst=%s sport=%d dport=%d packets=%d bytes=%d\tsrc=%s dst=%s sport=%d dport=%d packets=%d bytes=%d mark=0x%x start=%v stop=%v timeout=%d(sec)",
+ res := fmt.Sprintf("%s\t%d src=%s dst=%s sport=%d dport=%d packets=%d bytes=%d\tsrc=%s dst=%s sport=%d dport=%d packets=%d bytes=%d mark=0x%x ",
nl.L4ProtoMap[s.Forward.Protocol], s.Forward.Protocol,
s.Forward.SrcIP.String(), s.Forward.DstIP.String(), s.Forward.SrcPort, s.Forward.DstPort, s.Forward.Packets, s.Forward.Bytes,
s.Reverse.SrcIP.String(), s.Reverse.DstIP.String(), s.Reverse.SrcPort, s.Reverse.DstPort, s.Reverse.Packets, s.Reverse.Bytes,
- s.Mark, start, stop, timeout)
+ s.Mark)
+ if len(s.Labels) > 0 {
+ res += fmt.Sprintf("labels=0x%x ", s.Labels)
+ }
+ if s.Zone != 0 {
+ res += fmt.Sprintf("zone=%d ", s.Zone)
+ }
+ res += fmt.Sprintf("start=%v stop=%v timeout=%d(sec)", start, stop, timeout)
+ return res
+}
+
+// toNlData generates netlink messages representing the flow.
+func (s *ConntrackFlow) toNlData() ([]*nl.RtAttr, error) {
+ var payload []*nl.RtAttr
+ // The message structure is built as follows:
+ //
+ //
+ //
+ //
+ //
+ //
+ //
+ //
+ //
+ //
+ //
+ //
+ //
+ //
+ //
+ //
+ //
+ //
+ //
+ //
+ //
+ //
+ //
+ //
+ //
+ //
+ //
+ //
+ //
+ //
+ //
+ //
+ //
+
+ // CTA_TUPLE_ORIG
+ ctTupleOrig := nl.NewRtAttr(unix.NLA_F_NESTED|nl.CTA_TUPLE_ORIG, nil)
+ forwardFlowAttrs, err := s.Forward.toNlData(s.FamilyType)
+ if err != nil {
+ return nil, fmt.Errorf("couldn't generate netlink data for conntrack forward flow: %w", err)
+ }
+ for _, a := range forwardFlowAttrs {
+ ctTupleOrig.AddChild(a)
+ }
+
+ // CTA_TUPLE_REPLY
+ ctTupleReply := nl.NewRtAttr(unix.NLA_F_NESTED|nl.CTA_TUPLE_REPLY, nil)
+ reverseFlowAttrs, err := s.Reverse.toNlData(s.FamilyType)
+ if err != nil {
+ return nil, fmt.Errorf("couldn't generate netlink data for conntrack reverse flow: %w", err)
+ }
+ for _, a := range reverseFlowAttrs {
+ ctTupleReply.AddChild(a)
+ }
+
+ ctMark := nl.NewRtAttr(nl.CTA_MARK, nl.BEUint32Attr(s.Mark))
+ ctTimeout := nl.NewRtAttr(nl.CTA_TIMEOUT, nl.BEUint32Attr(s.TimeOut))
+
+ payload = append(payload, ctTupleOrig, ctTupleReply, ctMark, ctTimeout)
+
+ if s.ProtoInfo != nil {
+ switch p := s.ProtoInfo.(type) {
+ case *ProtoInfoTCP:
+ attrs, err := p.toNlData()
+ if err != nil {
+ return nil, fmt.Errorf("couldn't generate netlink data for conntrack flow's TCP protoinfo: %w", err)
+ }
+ payload = append(payload, attrs...)
+ default:
+ return nil, errors.New("couldn't generate netlink data for conntrack: field 'ProtoInfo' only supports TCP or nil")
+ }
+ }
+
+ return payload, nil
}
// This method parse the ip tuple structure
@@ -172,7 +394,7 @@ func (s *ConntrackFlow) String() string {
//
//
//
-func parseIpTuple(reader *bytes.Reader, tpl *ipTuple) uint8 {
+func parseIpTuple(reader *bytes.Reader, tpl *IPTuple) uint8 {
for i := 0; i < 2; i++ {
_, t, _, v := parseNfAttrTLV(reader)
switch t {
@@ -191,7 +413,7 @@ func parseIpTuple(reader *bytes.Reader, tpl *ipTuple) uint8 {
tpl.Protocol = uint8(v[0])
}
// We only parse TCP & UDP headers. Skip the others.
- if tpl.Protocol != 6 && tpl.Protocol != 17 {
+ if tpl.Protocol != unix.IPPROTO_TCP && tpl.Protocol != unix.IPPROTO_UDP {
// skip the rest
bytesRemaining := protoInfoTotalLen - protoInfoBytesRead
reader.Seek(int64(bytesRemaining), seekCurrent)
@@ -240,9 +462,13 @@ func parseNfAttrTL(r *bytes.Reader) (isNested bool, attrType, len uint16) {
return isNested, attrType, len
}
-func skipNfAttrValue(r *bytes.Reader, len uint16) {
+// skipNfAttrValue seeks `r` past attr of length `len`.
+// Maintains buffer alignment.
+// Returns length of the seek performed.
+func skipNfAttrValue(r *bytes.Reader, len uint16) uint16 {
len = (len + nl.NLA_ALIGNTO - 1) & ^(nl.NLA_ALIGNTO - 1)
r.Seek(int64(len), seekCurrent)
+ return len
}
func parseBERaw16(r *bytes.Reader, v *uint16) {
@@ -257,6 +483,10 @@ func parseBERaw64(r *bytes.Reader, v *uint64) {
binary.Read(r, binary.BigEndian, v)
}
+func parseRaw32(r *bytes.Reader, v *uint32) {
+ binary.Read(r, nl.NativeEndian(), v)
+}
+
func parseByteAndPacketCounters(r *bytes.Reader) (bytes, packets uint64) {
for i := 0; i < 2; i++ {
switch _, t, _ := parseNfAttrTL(r); t {
@@ -296,6 +526,60 @@ func parseTimeStamp(r *bytes.Reader, readSize uint16) (tstart, tstop uint64) {
}
+func parseProtoInfoTCPState(r *bytes.Reader) (s uint8) {
+ binary.Read(r, binary.BigEndian, &s)
+ r.Seek(nl.SizeofNfattr - 1, seekCurrent)
+ return s
+}
+
+// parseProtoInfoTCP reads the entire nested protoinfo structure, but only parses the state attr.
+func parseProtoInfoTCP(r *bytes.Reader, attrLen uint16) (*ProtoInfoTCP) {
+ p := new(ProtoInfoTCP)
+ bytesRead := 0
+ for bytesRead < int(attrLen) {
+ _, t, l := parseNfAttrTL(r)
+ bytesRead += nl.SizeofNfattr
+
+ switch t {
+ case nl.CTA_PROTOINFO_TCP_STATE:
+ p.State = parseProtoInfoTCPState(r)
+ bytesRead += nl.SizeofNfattr
+ default:
+ bytesRead += int(skipNfAttrValue(r, l))
+ }
+ }
+
+ return p
+}
+
+func parseProtoInfo(r *bytes.Reader, attrLen uint16) (p ProtoInfo) {
+ bytesRead := 0
+ for bytesRead < int(attrLen) {
+ _, t, l := parseNfAttrTL(r)
+ bytesRead += nl.SizeofNfattr
+
+ switch t {
+ case nl.CTA_PROTOINFO_TCP:
+ p = parseProtoInfoTCP(r, l)
+ bytesRead += int(l)
+ // No inner fields of DCCP / SCTP currently supported.
+ case nl.CTA_PROTOINFO_DCCP:
+ p = new(ProtoInfoDCCP)
+ skipped := skipNfAttrValue(r, l)
+ bytesRead += int(skipped)
+ case nl.CTA_PROTOINFO_SCTP:
+ p = new(ProtoInfoSCTP)
+ skipped := skipNfAttrValue(r, l)
+ bytesRead += int(skipped)
+ default:
+ skipped := skipNfAttrValue(r, l)
+ bytesRead += int(skipped)
+ }
+ }
+
+ return p
+}
+
func parseTimeOut(r *bytes.Reader) (ttimeout uint32) {
parseBERaw32(r, &ttimeout)
return
@@ -306,6 +590,18 @@ func parseConnectionMark(r *bytes.Reader) (mark uint32) {
return
}
+func parseConnectionLabels(r *bytes.Reader) (label []byte) {
+ label = make([]byte, 16) // netfilter defines 128 bit labels value
+ binary.Read(r, nl.NativeEndian(), &label)
+ return
+}
+
+func parseConnectionZone(r *bytes.Reader) (zone uint16) {
+ parseBERaw16(r, &zone)
+ r.Seek(2, seekCurrent)
+ return
+}
+
func parseRawData(data []byte) *ConntrackFlow {
s := &ConntrackFlow{}
// First there is the Nfgenmsg header
@@ -343,7 +639,7 @@ func parseRawData(data []byte) *ConntrackFlow {
case nl.CTA_TIMESTAMP:
s.TimeStart, s.TimeStop = parseTimeStamp(reader, l)
case nl.CTA_PROTOINFO:
- skipNfAttrValue(reader, l)
+ s.ProtoInfo = parseProtoInfo(reader, l)
default:
skipNfAttrValue(reader, l)
}
@@ -351,10 +647,14 @@ func parseRawData(data []byte) *ConntrackFlow {
switch t {
case nl.CTA_MARK:
s.Mark = parseConnectionMark(reader)
+ case nl.CTA_LABELS:
+ s.Labels = parseConnectionLabels(reader)
case nl.CTA_TIMEOUT:
s.TimeOut = parseTimeOut(reader)
- case nl.CTA_STATUS, nl.CTA_USE, nl.CTA_ID:
+ case nl.CTA_ID, nl.CTA_STATUS, nl.CTA_USE:
skipNfAttrValue(reader, l)
+ case nl.CTA_ZONE:
+ s.Zone = parseConnectionZone(reader)
default:
skipNfAttrValue(reader, l)
}
@@ -399,16 +699,18 @@ func parseRawData(data []byte) *ConntrackFlow {
type ConntrackFilterType uint8
const (
- ConntrackOrigSrcIP = iota // -orig-src ip Source address from original direction
- ConntrackOrigDstIP // -orig-dst ip Destination address from original direction
- ConntrackReplySrcIP // --reply-src ip Reply Source IP
- ConntrackReplyDstIP // --reply-dst ip Reply Destination IP
- ConntrackReplyAnyIP // Match source or destination reply IP
- ConntrackOrigSrcPort // --orig-port-src port Source port in original direction
- ConntrackOrigDstPort // --orig-port-dst port Destination port in original direction
- ConntrackNatSrcIP = ConntrackReplySrcIP // deprecated use instead ConntrackReplySrcIP
- ConntrackNatDstIP = ConntrackReplyDstIP // deprecated use instead ConntrackReplyDstIP
- ConntrackNatAnyIP = ConntrackReplyAnyIP // deprecated use instead ConntrackReplyAnyIP
+ ConntrackOrigSrcIP = iota // -orig-src ip Source address from original direction
+ ConntrackOrigDstIP // -orig-dst ip Destination address from original direction
+ ConntrackReplySrcIP // --reply-src ip Reply Source IP
+ ConntrackReplyDstIP // --reply-dst ip Reply Destination IP
+ ConntrackReplyAnyIP // Match source or destination reply IP
+ ConntrackOrigSrcPort // --orig-port-src port Source port in original direction
+ ConntrackOrigDstPort // --orig-port-dst port Destination port in original direction
+ ConntrackMatchLabels // --label label1,label2 Labels used in entry
+ ConntrackUnmatchLabels // --label label1,label2 Labels not used in entry
+ ConntrackNatSrcIP = ConntrackReplySrcIP // deprecated use instead ConntrackReplySrcIP
+ ConntrackNatDstIP = ConntrackReplyDstIP // deprecated use instead ConntrackReplyDstIP
+ ConntrackNatAnyIP = ConntrackReplyAnyIP // deprecated use instead ConntrackReplyAnyIP
)
type CustomConntrackFilter interface {
@@ -421,6 +723,8 @@ type ConntrackFilter struct {
ipNetFilter map[ConntrackFilterType]*net.IPNet
portFilter map[ConntrackFilterType]uint16
protoFilter uint8
+ labelFilter map[ConntrackFilterType][][]byte
+ zoneFilter *uint16
}
// AddIPNet adds a IP subnet to the conntrack filter
@@ -474,10 +778,43 @@ func (f *ConntrackFilter) AddProtocol(proto uint8) error {
return nil
}
+// AddLabels adds the provided list (zero or more) of labels to the conntrack filter
+// ConntrackFilterType here can be either:
+// 1. ConntrackMatchLabels: This matches every flow that has a label value (len(flow.Labels) > 0)
+// against the list of provided labels. If `flow.Labels` contains ALL the provided labels
+// it is considered a match. This can be used when you want to match flows that contain
+// one or more labels.
+// 2. ConntrackUnmatchLabels: This matches every flow that has a label value (len(flow.Labels) > 0)
+// against the list of provided labels. If `flow.Labels` does NOT contain ALL the provided labels
+// it is considered a match. This can be used when you want to match flows that don't contain
+// one or more labels.
+func (f *ConntrackFilter) AddLabels(tp ConntrackFilterType, labels [][]byte) error {
+ if len(labels) == 0 {
+ return errors.New("Invalid length for provided labels")
+ }
+ if f.labelFilter == nil {
+ f.labelFilter = make(map[ConntrackFilterType][][]byte)
+ }
+ if _, ok := f.labelFilter[tp]; ok {
+ return errors.New("Filter attribute already present")
+ }
+ f.labelFilter[tp] = labels
+ return nil
+}
+
+// AddZone adds a zone to the conntrack filter
+func (f *ConntrackFilter) AddZone(zone uint16) error {
+ if f.zoneFilter != nil {
+ return errors.New("Filter attribute already present")
+ }
+ f.zoneFilter = &zone
+ return nil
+}
+
// MatchConntrackFlow applies the filter to the flow and returns true if the flow matches the filter
// false otherwise
func (f *ConntrackFilter) MatchConntrackFlow(flow *ConntrackFlow) bool {
- if len(f.ipNetFilter) == 0 && len(f.portFilter) == 0 && f.protoFilter == 0 {
+ if len(f.ipNetFilter) == 0 && len(f.portFilter) == 0 && f.protoFilter == 0 && len(f.labelFilter) == 0 && f.zoneFilter == nil {
// empty filter always not match
return false
}
@@ -488,6 +825,11 @@ func (f *ConntrackFilter) MatchConntrackFlow(flow *ConntrackFlow) bool {
return false
}
+ // Conntrack zone filter
+ if f.zoneFilter != nil && *f.zoneFilter != flow.Zone {
+ return false
+ }
+
match := true
// IP conntrack filter
@@ -531,6 +873,29 @@ func (f *ConntrackFilter) MatchConntrackFlow(flow *ConntrackFlow) bool {
}
}
+ // Label filter
+ if len(f.labelFilter) > 0 {
+ if len(flow.Labels) > 0 {
+ // --label label1,label2 in conn entry;
+ // every label passed should be contained in flow.Labels for a match to be true
+ if elem, found := f.labelFilter[ConntrackMatchLabels]; match && found {
+ for _, label := range elem {
+ match = match && (bytes.Contains(flow.Labels, label))
+ }
+ }
+ // --label label1,label2 in conn entry;
+ // every label passed should be not contained in flow.Labels for a match to be true
+ if elem, found := f.labelFilter[ConntrackUnmatchLabels]; match && found {
+ for _, label := range elem {
+ match = match && !(bytes.Contains(flow.Labels, label))
+ }
+ }
+ } else {
+ // flow doesn't contain labels, so it doesn't contain or notContain any provided matches
+ match = false
+ }
+ }
+
return match
}
diff --git a/vendor/github.com/vishvananda/netlink/conntrack_unspecified.go b/vendor/github.com/vishvananda/netlink/conntrack_unspecified.go
index af7af799e77..0bfdf422d1e 100644
--- a/vendor/github.com/vishvananda/netlink/conntrack_unspecified.go
+++ b/vendor/github.com/vishvananda/netlink/conntrack_unspecified.go
@@ -11,6 +11,9 @@ type InetFamily uint8
// ConntrackFlow placeholder
type ConntrackFlow struct{}
+// CustomConntrackFilter placeholder
+type CustomConntrackFilter struct{}
+
// ConntrackFilter placeholder
type ConntrackFilter struct{}
@@ -29,10 +32,18 @@ func ConntrackTableFlush(table ConntrackTableType) error {
// ConntrackDeleteFilter deletes entries on the specified table on the base of the filter
// conntrack -D [table] parameters Delete conntrack or expectation
+//
+// Deprecated: use [ConntrackDeleteFilter] instead.
func ConntrackDeleteFilter(table ConntrackTableType, family InetFamily, filter *ConntrackFilter) (uint, error) {
return 0, ErrNotImplemented
}
+// ConntrackDeleteFilters deletes entries on the specified table matching any of the specified filters
+// conntrack -D [table] parameters Delete conntrack or expectation
+func ConntrackDeleteFilters(table ConntrackTableType, family InetFamily, filters ...CustomConntrackFilter) (uint, error) {
+ return 0, ErrNotImplemented
+}
+
// ConntrackTableList returns the flow list of a table of a specific family using the netlink handle passed
// conntrack -L [table] [options] List conntrack or expectation table
func (h *Handle) ConntrackTableList(table ConntrackTableType, family InetFamily) ([]*ConntrackFlow, error) {
@@ -48,6 +59,14 @@ func (h *Handle) ConntrackTableFlush(table ConntrackTableType) error {
// ConntrackDeleteFilter deletes entries on the specified table on the base of the filter using the netlink handle passed
// conntrack -D [table] parameters Delete conntrack or expectation
+//
+// Deprecated: use [Handle.ConntrackDeleteFilters] instead.
func (h *Handle) ConntrackDeleteFilter(table ConntrackTableType, family InetFamily, filter *ConntrackFilter) (uint, error) {
return 0, ErrNotImplemented
}
+
+// ConntrackDeleteFilters deletes entries on the specified table matching any of the specified filters using the netlink handle passed
+// conntrack -D [table] parameters Delete conntrack or expectation
+func (h *Handle) ConntrackDeleteFilters(table ConntrackTableType, family InetFamily, filters ...CustomConntrackFilter) (uint, error) {
+ return 0, ErrNotImplemented
+}
diff --git a/vendor/github.com/vishvananda/netlink/devlink_linux.go b/vendor/github.com/vishvananda/netlink/devlink_linux.go
index 358b232c6c5..d98801dbbe5 100644
--- a/vendor/github.com/vishvananda/netlink/devlink_linux.go
+++ b/vendor/github.com/vishvananda/netlink/devlink_linux.go
@@ -84,6 +84,270 @@ type DevlinkDeviceInfo struct {
FwUndi string
}
+// DevlinkResource represents a device resource
+type DevlinkResource struct {
+ Name string
+ ID uint64
+ Size uint64
+ SizeNew uint64
+ SizeMin uint64
+ SizeMax uint64
+ SizeGranularity uint64
+ PendingChange bool
+ Unit uint8
+ SizeValid bool
+ OCCValid bool
+ OCCSize uint64
+ Parent *DevlinkResource
+ Children []DevlinkResource
+}
+
+// parseAttributes parses provided Netlink Attributes and populates DevlinkResource, returns error if occured
+func (dlr *DevlinkResource) parseAttributes(attrs map[uint16]syscall.NetlinkRouteAttr) error {
+ var attr syscall.NetlinkRouteAttr
+ var ok bool
+
+ // mandatory attributes
+ attr, ok = attrs[nl.DEVLINK_ATTR_RESOURCE_ID]
+ if !ok {
+ return fmt.Errorf("missing resource id")
+ }
+ dlr.ID = native.Uint64(attr.Value)
+
+ attr, ok = attrs[nl.DEVLINK_ATTR_RESOURCE_NAME]
+ if !ok {
+ return fmt.Errorf("missing resource name")
+ }
+ dlr.Name = nl.BytesToString(attr.Value)
+
+ attr, ok = attrs[nl.DEVLINK_ATTR_RESOURCE_SIZE]
+ if !ok {
+ return fmt.Errorf("missing resource size")
+ }
+ dlr.Size = native.Uint64(attr.Value)
+
+ attr, ok = attrs[nl.DEVLINK_ATTR_RESOURCE_SIZE_GRAN]
+ if !ok {
+ return fmt.Errorf("missing resource size granularity")
+ }
+ dlr.SizeGranularity = native.Uint64(attr.Value)
+
+ attr, ok = attrs[nl.DEVLINK_ATTR_RESOURCE_UNIT]
+ if !ok {
+ return fmt.Errorf("missing resource unit")
+ }
+ dlr.Unit = uint8(attr.Value[0])
+
+ attr, ok = attrs[nl.DEVLINK_ATTR_RESOURCE_SIZE_MIN]
+ if !ok {
+ return fmt.Errorf("missing resource size min")
+ }
+ dlr.SizeMin = native.Uint64(attr.Value)
+
+ attr, ok = attrs[nl.DEVLINK_ATTR_RESOURCE_SIZE_MAX]
+ if !ok {
+ return fmt.Errorf("missing resource size max")
+ }
+ dlr.SizeMax = native.Uint64(attr.Value)
+
+ // optional attributes
+ attr, ok = attrs[nl.DEVLINK_ATTR_RESOURCE_OCC]
+ if ok {
+ dlr.OCCSize = native.Uint64(attr.Value)
+ dlr.OCCValid = true
+ }
+
+ attr, ok = attrs[nl.DEVLINK_ATTR_RESOURCE_SIZE_VALID]
+ if ok {
+ dlr.SizeValid = uint8(attr.Value[0]) != 0
+ }
+
+ dlr.SizeNew = dlr.Size
+ attr, ok = attrs[nl.DEVLINK_ATTR_RESOURCE_SIZE_NEW]
+ if ok {
+ dlr.SizeNew = native.Uint64(attr.Value)
+ }
+
+ dlr.PendingChange = dlr.Size != dlr.SizeNew
+
+ attr, ok = attrs[nl.DEVLINK_ATTR_RESOURCE_LIST]
+ if ok {
+ // handle nested resoruces recursively
+ subResources, err := nl.ParseRouteAttr(attr.Value)
+ if err != nil {
+ return err
+ }
+
+ for _, subresource := range subResources {
+ resource := DevlinkResource{Parent: dlr}
+ attrs, err := nl.ParseRouteAttrAsMap(subresource.Value)
+ if err != nil {
+ return err
+ }
+ err = resource.parseAttributes(attrs)
+ if err != nil {
+ return fmt.Errorf("failed to parse child resource, parent:%s. %w", dlr.Name, err)
+ }
+ dlr.Children = append(dlr.Children, resource)
+ }
+ }
+ return nil
+}
+
+// DevlinkResources represents all devlink resources of a devlink device
+type DevlinkResources struct {
+ Bus string
+ Device string
+ Resources []DevlinkResource
+}
+
+// parseAttributes parses provided Netlink Attributes and populates DevlinkResources, returns error if occured
+func (dlrs *DevlinkResources) parseAttributes(attrs map[uint16]syscall.NetlinkRouteAttr) error {
+ var attr syscall.NetlinkRouteAttr
+ var ok bool
+
+ // Bus
+ attr, ok = attrs[nl.DEVLINK_ATTR_BUS_NAME]
+ if !ok {
+ return fmt.Errorf("missing bus name")
+ }
+ dlrs.Bus = nl.BytesToString(attr.Value)
+
+ // Device
+ attr, ok = attrs[nl.DEVLINK_ATTR_DEV_NAME]
+ if !ok {
+ return fmt.Errorf("missing device name")
+ }
+ dlrs.Device = nl.BytesToString(attr.Value)
+
+ // Resource List
+ attr, ok = attrs[nl.DEVLINK_ATTR_RESOURCE_LIST]
+ if !ok {
+ return fmt.Errorf("missing resource list")
+ }
+
+ resourceAttrs, err := nl.ParseRouteAttr(attr.Value)
+ if err != nil {
+ return err
+ }
+
+ for _, resourceAttr := range resourceAttrs {
+ resource := DevlinkResource{}
+ attrs, err := nl.ParseRouteAttrAsMap(resourceAttr.Value)
+ if err != nil {
+ return err
+ }
+ err = resource.parseAttributes(attrs)
+ if err != nil {
+ return fmt.Errorf("failed to parse root resoruces, %w", err)
+ }
+ dlrs.Resources = append(dlrs.Resources, resource)
+ }
+
+ return nil
+}
+
+// DevlinkParam represents parameter of the device
+type DevlinkParam struct {
+ Name string
+ IsGeneric bool
+ Type uint8 // possible values are in nl.DEVLINK_PARAM_TYPE_* constants
+ Values []DevlinkParamValue
+}
+
+// DevlinkParamValue contains values of the parameter
+// Data field contains specific type which can be casted by unsing info from the DevlinkParam.Type field
+type DevlinkParamValue struct {
+ rawData []byte
+ Data interface{}
+ CMODE uint8 // possible values are in nl.DEVLINK_PARAM_CMODE_* constants
+}
+
+// parseAttributes parses provided Netlink Attributes and populates DevlinkParam, returns error if occured
+func (dlp *DevlinkParam) parseAttributes(attrs []syscall.NetlinkRouteAttr) error {
+ var valuesList [][]syscall.NetlinkRouteAttr
+ for _, attr := range attrs {
+ switch attr.Attr.Type {
+ case nl.DEVLINK_ATTR_PARAM:
+ nattrs, err := nl.ParseRouteAttr(attr.Value)
+ if err != nil {
+ return err
+ }
+ for _, nattr := range nattrs {
+ switch nattr.Attr.Type {
+ case nl.DEVLINK_ATTR_PARAM_NAME:
+ dlp.Name = nl.BytesToString(nattr.Value)
+ case nl.DEVLINK_ATTR_PARAM_GENERIC:
+ dlp.IsGeneric = true
+ case nl.DEVLINK_ATTR_PARAM_TYPE:
+ if len(nattr.Value) == 1 {
+ dlp.Type = nattr.Value[0]
+ }
+ case nl.DEVLINK_ATTR_PARAM_VALUES_LIST:
+ nnattrs, err := nl.ParseRouteAttr(nattr.Value)
+ if err != nil {
+ return err
+ }
+ valuesList = append(valuesList, nnattrs)
+ }
+ }
+ }
+ }
+ for _, valAttr := range valuesList {
+ v := DevlinkParamValue{}
+ if err := v.parseAttributes(valAttr, dlp.Type); err != nil {
+ return err
+ }
+ dlp.Values = append(dlp.Values, v)
+ }
+ return nil
+}
+
+func (dlpv *DevlinkParamValue) parseAttributes(attrs []syscall.NetlinkRouteAttr, paramType uint8) error {
+ for _, attr := range attrs {
+ nattrs, err := nl.ParseRouteAttr(attr.Value)
+ if err != nil {
+ return err
+ }
+ var rawData []byte
+ for _, nattr := range nattrs {
+ switch nattr.Attr.Type {
+ case nl.DEVLINK_ATTR_PARAM_VALUE_DATA:
+ rawData = nattr.Value
+ case nl.DEVLINK_ATTR_PARAM_VALUE_CMODE:
+ if len(nattr.Value) == 1 {
+ dlpv.CMODE = nattr.Value[0]
+ }
+ }
+ }
+ switch paramType {
+ case nl.DEVLINK_PARAM_TYPE_U8:
+ dlpv.Data = uint8(0)
+ if rawData != nil && len(rawData) == 1 {
+ dlpv.Data = uint8(rawData[0])
+ }
+ case nl.DEVLINK_PARAM_TYPE_U16:
+ dlpv.Data = uint16(0)
+ if rawData != nil {
+ dlpv.Data = native.Uint16(rawData)
+ }
+ case nl.DEVLINK_PARAM_TYPE_U32:
+ dlpv.Data = uint32(0)
+ if rawData != nil {
+ dlpv.Data = native.Uint32(rawData)
+ }
+ case nl.DEVLINK_PARAM_TYPE_STRING:
+ dlpv.Data = ""
+ if rawData != nil {
+ dlpv.Data = nl.BytesToString(rawData)
+ }
+ case nl.DEVLINK_PARAM_TYPE_BOOL:
+ dlpv.Data = rawData != nil
+ }
+ }
+ return nil
+}
+
func parseDevLinkDeviceList(msgs [][]byte) ([]*DevlinkDevice, error) {
devices := make([]*DevlinkDevice, 0, len(msgs))
for _, m := range msgs {
@@ -443,6 +707,173 @@ func (h *Handle) DevLinkGetPortByIndex(Bus string, Device string, PortIndex uint
return port, err
}
+// DevlinkGetDeviceResources returns devlink device resources
+func DevlinkGetDeviceResources(bus string, device string) (*DevlinkResources, error) {
+ return pkgHandle.DevlinkGetDeviceResources(bus, device)
+}
+
+// DevlinkGetDeviceResources returns devlink device resources
+func (h *Handle) DevlinkGetDeviceResources(bus string, device string) (*DevlinkResources, error) {
+ _, req, err := h.createCmdReq(nl.DEVLINK_CMD_RESOURCE_DUMP, bus, device)
+ if err != nil {
+ return nil, err
+ }
+
+ respmsg, err := req.Execute(unix.NETLINK_GENERIC, 0)
+ if err != nil {
+ return nil, err
+ }
+
+ var resources DevlinkResources
+ for _, m := range respmsg {
+ attrs, err := nl.ParseRouteAttrAsMap(m[nl.SizeofGenlmsg:])
+ if err != nil {
+ return nil, err
+ }
+ resources.parseAttributes(attrs)
+ }
+
+ return &resources, nil
+}
+
+// DevlinkGetDeviceParams returns parameters for devlink device
+// Equivalent to: `devlink dev param show /`
+func (h *Handle) DevlinkGetDeviceParams(bus string, device string) ([]*DevlinkParam, error) {
+ _, req, err := h.createCmdReq(nl.DEVLINK_CMD_PARAM_GET, bus, device)
+ if err != nil {
+ return nil, err
+ }
+ req.Flags |= unix.NLM_F_DUMP
+ respmsg, err := req.Execute(unix.NETLINK_GENERIC, 0)
+ if err != nil {
+ return nil, err
+ }
+ var params []*DevlinkParam
+ for _, m := range respmsg {
+ attrs, err := nl.ParseRouteAttr(m[nl.SizeofGenlmsg:])
+ if err != nil {
+ return nil, err
+ }
+ p := &DevlinkParam{}
+ if err := p.parseAttributes(attrs); err != nil {
+ return nil, err
+ }
+ params = append(params, p)
+ }
+
+ return params, nil
+}
+
+// DevlinkGetDeviceParams returns parameters for devlink device
+// Equivalent to: `devlink dev param show /`
+func DevlinkGetDeviceParams(bus string, device string) ([]*DevlinkParam, error) {
+ return pkgHandle.DevlinkGetDeviceParams(bus, device)
+}
+
+// DevlinkGetDeviceParamByName returns specific parameter for devlink device
+// Equivalent to: `devlink dev param show / name `
+func (h *Handle) DevlinkGetDeviceParamByName(bus string, device string, param string) (*DevlinkParam, error) {
+ _, req, err := h.createCmdReq(nl.DEVLINK_CMD_PARAM_GET, bus, device)
+ if err != nil {
+ return nil, err
+ }
+ req.AddData(nl.NewRtAttr(nl.DEVLINK_ATTR_PARAM_NAME, nl.ZeroTerminated(param)))
+ respmsg, err := req.Execute(unix.NETLINK_GENERIC, 0)
+ if err != nil {
+ return nil, err
+ }
+ if len(respmsg) == 0 {
+ return nil, fmt.Errorf("unexpected response")
+ }
+ attrs, err := nl.ParseRouteAttr(respmsg[0][nl.SizeofGenlmsg:])
+ if err != nil {
+ return nil, err
+ }
+ p := &DevlinkParam{}
+ if err := p.parseAttributes(attrs); err != nil {
+ return nil, err
+ }
+ return p, nil
+}
+
+// DevlinkGetDeviceParamByName returns specific parameter for devlink device
+// Equivalent to: `devlink dev param show / name `
+func DevlinkGetDeviceParamByName(bus string, device string, param string) (*DevlinkParam, error) {
+ return pkgHandle.DevlinkGetDeviceParamByName(bus, device, param)
+}
+
+// DevlinkSetDeviceParam set specific parameter for devlink device
+// Equivalent to: `devlink dev param set / name cmode value `
+// cmode argument should contain valid cmode value as uint8, modes are define in nl.DEVLINK_PARAM_CMODE_* constants
+// value argument should have one of the following types: uint8, uint16, uint32, string, bool
+func (h *Handle) DevlinkSetDeviceParam(bus string, device string, param string, cmode uint8, value interface{}) error {
+ // retrive the param type
+ p, err := h.DevlinkGetDeviceParamByName(bus, device, param)
+ if err != nil {
+ return fmt.Errorf("failed to get device param: %v", err)
+ }
+ paramType := p.Type
+
+ _, req, err := h.createCmdReq(nl.DEVLINK_CMD_PARAM_SET, bus, device)
+ if err != nil {
+ return err
+ }
+ req.AddData(nl.NewRtAttr(nl.DEVLINK_ATTR_PARAM_TYPE, nl.Uint8Attr(paramType)))
+ req.AddData(nl.NewRtAttr(nl.DEVLINK_ATTR_PARAM_NAME, nl.ZeroTerminated(param)))
+ req.AddData(nl.NewRtAttr(nl.DEVLINK_ATTR_PARAM_VALUE_CMODE, nl.Uint8Attr(cmode)))
+
+ var valueAsBytes []byte
+ switch paramType {
+ case nl.DEVLINK_PARAM_TYPE_U8:
+ v, ok := value.(uint8)
+ if !ok {
+ return fmt.Errorf("unepected value type required: uint8, actual: %T", value)
+ }
+ valueAsBytes = nl.Uint8Attr(v)
+ case nl.DEVLINK_PARAM_TYPE_U16:
+ v, ok := value.(uint16)
+ if !ok {
+ return fmt.Errorf("unepected value type required: uint16, actual: %T", value)
+ }
+ valueAsBytes = nl.Uint16Attr(v)
+ case nl.DEVLINK_PARAM_TYPE_U32:
+ v, ok := value.(uint32)
+ if !ok {
+ return fmt.Errorf("unepected value type required: uint32, actual: %T", value)
+ }
+ valueAsBytes = nl.Uint32Attr(v)
+ case nl.DEVLINK_PARAM_TYPE_STRING:
+ v, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("unepected value type required: string, actual: %T", value)
+ }
+ valueAsBytes = nl.ZeroTerminated(v)
+ case nl.DEVLINK_PARAM_TYPE_BOOL:
+ v, ok := value.(bool)
+ if !ok {
+ return fmt.Errorf("unepected value type required: bool, actual: %T", value)
+ }
+ if v {
+ valueAsBytes = []byte{}
+ }
+ default:
+ return fmt.Errorf("unsupported parameter type: %d", paramType)
+ }
+ if valueAsBytes != nil {
+ req.AddData(nl.NewRtAttr(nl.DEVLINK_ATTR_PARAM_VALUE_DATA, valueAsBytes))
+ }
+ _, err = req.Execute(unix.NETLINK_GENERIC, 0)
+ return err
+}
+
+// DevlinkSetDeviceParam set specific parameter for devlink device
+// Equivalent to: `devlink dev param set / name cmode value `
+// cmode argument should contain valid cmode value as uint8, modes are define in nl.DEVLINK_PARAM_CMODE_* constants
+// value argument should have one of the following types: uint8, uint16, uint32, string, bool
+func DevlinkSetDeviceParam(bus string, device string, param string, cmode uint8, value interface{}) error {
+ return pkgHandle.DevlinkSetDeviceParam(bus, device, param, cmode, value)
+}
+
// DevLinkGetPortByIndex provides a pointer to devlink portand nil error,
// otherwise returns an error code.
func DevLinkGetPortByIndex(Bus string, Device string, PortIndex uint32) (*DevlinkPort, error) {
diff --git a/vendor/github.com/vishvananda/netlink/filter.go b/vendor/github.com/vishvananda/netlink/filter.go
index 2d798b0fbbc..84e1ca7a49b 100644
--- a/vendor/github.com/vishvananda/netlink/filter.go
+++ b/vendor/github.com/vishvananda/netlink/filter.go
@@ -19,6 +19,7 @@ type FilterAttrs struct {
Parent uint32
Priority uint16 // lower is higher priority
Protocol uint16 // unix.ETH_P_*
+ Chain *uint32
}
func (q FilterAttrs) String() string {
@@ -27,6 +28,11 @@ func (q FilterAttrs) String() string {
type TcAct int32
+const (
+ TC_ACT_EXT_SHIFT = 28
+ TC_ACT_EXT_VAL_MASK = (1 << TC_ACT_EXT_SHIFT) - 1
+)
+
const (
TC_ACT_UNSPEC TcAct = -1
TC_ACT_OK TcAct = 0
@@ -40,6 +46,22 @@ const (
TC_ACT_JUMP TcAct = 0x10000000
)
+func getTcActExt(local int32) int32 {
+ return local << TC_ACT_EXT_SHIFT
+}
+
+func getTcActGotoChain() TcAct {
+ return TcAct(getTcActExt(2))
+}
+
+func getTcActExtOpcode(combined int32) int32 {
+ return combined & (^TC_ACT_EXT_VAL_MASK)
+}
+
+func TcActExtCmp(combined int32, opcode int32) bool {
+ return getTcActExtOpcode(combined) == opcode
+}
+
func (a TcAct) String() string {
switch a {
case TC_ACT_UNSPEC:
@@ -63,6 +85,9 @@ func (a TcAct) String() string {
case TC_ACT_JUMP:
return "jump"
}
+ if TcActExtCmp(int32(a), int32(getTcActGotoChain())) {
+ return "goto"
+ }
return fmt.Sprintf("0x%x", int32(a))
}
@@ -93,17 +118,32 @@ func (a TcPolAct) String() string {
}
type ActionAttrs struct {
- Index int
- Capab int
- Action TcAct
- Refcnt int
- Bindcnt int
+ Index int
+ Capab int
+ Action TcAct
+ Refcnt int
+ Bindcnt int
+ Statistics *ActionStatistic
+ Timestamp *ActionTimestamp
}
func (q ActionAttrs) String() string {
return fmt.Sprintf("{Index: %d, Capab: %x, Action: %s, Refcnt: %d, Bindcnt: %d}", q.Index, q.Capab, q.Action.String(), q.Refcnt, q.Bindcnt)
}
+type ActionTimestamp struct {
+ Installed uint64
+ LastUsed uint64
+ Expires uint64
+ FirstUsed uint64
+}
+
+func (t ActionTimestamp) String() string {
+ return fmt.Sprintf("Installed %d LastUsed %d Expires %d FirstUsed %d", t.Installed, t.LastUsed, t.Expires, t.FirstUsed)
+}
+
+type ActionStatistic ClassStatistics
+
// Action represents an action in any supported filter.
type Action interface {
Attrs() *ActionAttrs
@@ -112,6 +152,7 @@ type Action interface {
type GenericAction struct {
ActionAttrs
+ Chain int32
}
func (action *GenericAction) Type() string {
@@ -275,6 +316,7 @@ type SkbEditAction struct {
PType *uint16
Priority *uint32
Mark *uint32
+ Mask *uint32
}
func (action *SkbEditAction) Type() string {
@@ -348,6 +390,7 @@ type FwFilter struct {
InDev string
Mask uint32
Police *PoliceAction
+ Actions []Action
}
func (filter *FwFilter) Attrs() *FilterAttrs {
@@ -390,3 +433,30 @@ func (filter *GenericFilter) Attrs() *FilterAttrs {
func (filter *GenericFilter) Type() string {
return filter.FilterType
}
+
+type PeditAction struct {
+ ActionAttrs
+ Proto uint8
+ SrcMacAddr net.HardwareAddr
+ DstMacAddr net.HardwareAddr
+ SrcIP net.IP
+ DstIP net.IP
+ SrcPort uint16
+ DstPort uint16
+}
+
+func (p *PeditAction) Attrs() *ActionAttrs {
+ return &p.ActionAttrs
+}
+
+func (p *PeditAction) Type() string {
+ return "pedit"
+}
+
+func NewPeditAction() *PeditAction {
+ return &PeditAction{
+ ActionAttrs: ActionAttrs{
+ Action: TC_ACT_PIPE,
+ },
+ }
+}
diff --git a/vendor/github.com/vishvananda/netlink/filter_linux.go b/vendor/github.com/vishvananda/netlink/filter_linux.go
index 4c6d1cf7d7f..87cd18f8e41 100644
--- a/vendor/github.com/vishvananda/netlink/filter_linux.go
+++ b/vendor/github.com/vishvananda/netlink/filter_linux.go
@@ -41,6 +41,7 @@ type U32 struct {
RedirIndex int
Sel *TcU32Sel
Actions []Action
+ Police *PoliceAction
}
func (filter *U32) Attrs() *FilterAttrs {
@@ -64,6 +65,11 @@ type Flower struct {
EncSrcIPMask net.IPMask
EncDestPort uint16
EncKeyId uint32
+ SkipHw bool
+ SkipSw bool
+ IPProto *nl.IPProto
+ DestPort uint16
+ SrcPort uint16
Actions []Action
}
@@ -129,6 +135,39 @@ func (filter *Flower) encode(parent *nl.RtAttr) error {
if filter.EncKeyId != 0 {
parent.AddRtAttr(nl.TCA_FLOWER_KEY_ENC_KEY_ID, htonl(filter.EncKeyId))
}
+ if filter.IPProto != nil {
+ ipproto := *filter.IPProto
+ parent.AddRtAttr(nl.TCA_FLOWER_KEY_IP_PROTO, ipproto.Serialize())
+ if filter.SrcPort != 0 {
+ switch ipproto {
+ case nl.IPPROTO_TCP:
+ parent.AddRtAttr(nl.TCA_FLOWER_KEY_TCP_SRC, htons(filter.SrcPort))
+ case nl.IPPROTO_UDP:
+ parent.AddRtAttr(nl.TCA_FLOWER_KEY_UDP_SRC, htons(filter.SrcPort))
+ case nl.IPPROTO_SCTP:
+ parent.AddRtAttr(nl.TCA_FLOWER_KEY_SCTP_SRC, htons(filter.SrcPort))
+ }
+ }
+ if filter.DestPort != 0 {
+ switch ipproto {
+ case nl.IPPROTO_TCP:
+ parent.AddRtAttr(nl.TCA_FLOWER_KEY_TCP_DST, htons(filter.DestPort))
+ case nl.IPPROTO_UDP:
+ parent.AddRtAttr(nl.TCA_FLOWER_KEY_UDP_DST, htons(filter.DestPort))
+ case nl.IPPROTO_SCTP:
+ parent.AddRtAttr(nl.TCA_FLOWER_KEY_SCTP_DST, htons(filter.DestPort))
+ }
+ }
+ }
+
+ var flags uint32 = 0
+ if filter.SkipHw {
+ flags |= nl.TCA_CLS_FLAGS_SKIP_HW
+ }
+ if filter.SkipSw {
+ flags |= nl.TCA_CLS_FLAGS_SKIP_SW
+ }
+ parent.AddRtAttr(nl.TCA_FLOWER_FLAGS, htonl(flags))
actionsAttr := parent.AddRtAttr(nl.TCA_FLOWER_ACT, nil)
if err := EncodeActions(actionsAttr, filter.Actions); err != nil {
@@ -162,6 +201,14 @@ func (filter *Flower) decode(data []syscall.NetlinkRouteAttr) error {
filter.EncDestPort = ntohs(datum.Value)
case nl.TCA_FLOWER_KEY_ENC_KEY_ID:
filter.EncKeyId = ntohl(datum.Value)
+ case nl.TCA_FLOWER_KEY_IP_PROTO:
+ val := new(nl.IPProto)
+ *val = nl.IPProto(datum.Value[0])
+ filter.IPProto = val
+ case nl.TCA_FLOWER_KEY_TCP_SRC, nl.TCA_FLOWER_KEY_UDP_SRC, nl.TCA_FLOWER_KEY_SCTP_SRC:
+ filter.SrcPort = ntohs(datum.Value)
+ case nl.TCA_FLOWER_KEY_TCP_DST, nl.TCA_FLOWER_KEY_UDP_DST, nl.TCA_FLOWER_KEY_SCTP_DST:
+ filter.DestPort = ntohs(datum.Value)
case nl.TCA_FLOWER_ACT:
tables, err := nl.ParseRouteAttr(datum.Value)
if err != nil {
@@ -171,6 +218,16 @@ func (filter *Flower) decode(data []syscall.NetlinkRouteAttr) error {
if err != nil {
return err
}
+ case nl.TCA_FLOWER_FLAGS:
+ attr := nl.DeserializeUint32Bitfield(datum.Value)
+ skipSw := attr.Value & nl.TCA_CLS_FLAGS_SKIP_HW
+ skipHw := attr.Value & nl.TCA_CLS_FLAGS_SKIP_SW
+ if skipSw != 0 {
+ filter.SkipSw = true
+ }
+ if skipHw != 0 {
+ filter.SkipHw = true
+ }
}
}
return nil
@@ -185,19 +242,7 @@ func FilterDel(filter Filter) error {
// FilterDel will delete a filter from the system.
// Equivalent to: `tc filter del $filter`
func (h *Handle) FilterDel(filter Filter) error {
- req := h.newNetlinkRequest(unix.RTM_DELTFILTER, unix.NLM_F_ACK)
- base := filter.Attrs()
- msg := &nl.TcMsg{
- Family: nl.FAMILY_ALL,
- Ifindex: int32(base.LinkIndex),
- Handle: base.Handle,
- Parent: base.Parent,
- Info: MakeHandle(base.Priority, nl.Swap16(base.Protocol)),
- }
- req.AddData(msg)
-
- _, err := req.Execute(unix.NETLINK_ROUTE, 0)
- return err
+ return h.filterModify(filter, unix.RTM_DELTFILTER, 0)
}
// FilterAdd will add a filter to the system.
@@ -209,7 +254,7 @@ func FilterAdd(filter Filter) error {
// FilterAdd will add a filter to the system.
// Equivalent to: `tc filter add $filter`
func (h *Handle) FilterAdd(filter Filter) error {
- return h.filterModify(filter, unix.NLM_F_CREATE|unix.NLM_F_EXCL)
+ return h.filterModify(filter, unix.RTM_NEWTFILTER, unix.NLM_F_CREATE|unix.NLM_F_EXCL)
}
// FilterReplace will replace a filter.
@@ -221,11 +266,11 @@ func FilterReplace(filter Filter) error {
// FilterReplace will replace a filter.
// Equivalent to: `tc filter replace $filter`
func (h *Handle) FilterReplace(filter Filter) error {
- return h.filterModify(filter, unix.NLM_F_CREATE)
+ return h.filterModify(filter, unix.RTM_NEWTFILTER, unix.NLM_F_CREATE)
}
-func (h *Handle) filterModify(filter Filter, flags int) error {
- req := h.newNetlinkRequest(unix.RTM_NEWTFILTER, flags|unix.NLM_F_ACK)
+func (h *Handle) filterModify(filter Filter, proto, flags int) error {
+ req := h.newNetlinkRequest(proto, flags|unix.NLM_F_ACK)
base := filter.Attrs()
msg := &nl.TcMsg{
Family: nl.FAMILY_ALL,
@@ -235,6 +280,9 @@ func (h *Handle) filterModify(filter Filter, flags int) error {
Info: MakeHandle(base.Priority, nl.Swap16(base.Protocol)),
}
req.AddData(msg)
+ if filter.Attrs().Chain != nil {
+ req.AddData(nl.NewRtAttr(nl.TCA_CHAIN, nl.Uint32Attr(*filter.Attrs().Chain)))
+ }
req.AddData(nl.NewRtAttr(nl.TCA_KIND, nl.ZeroTerminated(filter.Type())))
options := nl.NewRtAttr(nl.TCA_OPTIONS, nil)
@@ -284,6 +332,12 @@ func (h *Handle) filterModify(filter Filter, flags int) error {
if filter.Link != 0 {
options.AddRtAttr(nl.TCA_U32_LINK, nl.Uint32Attr(filter.Link))
}
+ if filter.Police != nil {
+ police := options.AddRtAttr(nl.TCA_U32_POLICE, nil)
+ if err := encodePolice(police, filter.Police); err != nil {
+ return err
+ }
+ }
actionsAttr := options.AddRtAttr(nl.TCA_U32_ACT, nil)
// backwards compatibility
if filter.RedirIndex != 0 {
@@ -312,6 +366,10 @@ func (h *Handle) filterModify(filter Filter, flags int) error {
native.PutUint32(b, filter.ClassId)
options.AddRtAttr(nl.TCA_FW_CLASSID, b)
}
+ actionsAttr := options.AddRtAttr(nl.TCA_FW_ACT, nil)
+ if err := EncodeActions(actionsAttr, filter.Actions); err != nil {
+ return err
+ }
case *BpfFilter:
var bpfFlags uint32
if filter.ClassId != 0 {
@@ -340,7 +398,6 @@ func (h *Handle) filterModify(filter Filter, flags int) error {
return err
}
}
-
req.AddData(options)
_, err := req.Execute(unix.NETLINK_ROUTE, 0)
return err
@@ -446,6 +503,10 @@ func (h *Handle) FilterList(link Link, parent uint32) ([]Filter, error) {
default:
detailed = true
}
+ case nl.TCA_CHAIN:
+ val := new(uint32)
+ *val = native.Uint32(attr.Value)
+ base.Chain = val
}
}
// only return the detailed version of the filter
@@ -474,6 +535,14 @@ func toAttrs(tcgen *nl.TcGen, attrs *ActionAttrs) {
attrs.Bindcnt = int(tcgen.Bindcnt)
}
+func toTimeStamp(tcf *nl.Tcf) *ActionTimestamp {
+ return &ActionTimestamp{
+ Installed: tcf.Install,
+ LastUsed: tcf.LastUse,
+ Expires: tcf.Expires,
+ FirstUsed: tcf.FirstUse}
+}
+
func encodePolice(attr *nl.RtAttr, action *PoliceAction) error {
var rtab [256]uint32
var ptab [256]uint32
@@ -597,6 +666,9 @@ func EncodeActions(attr *nl.RtAttr, actions []Action) error {
if action.Mark != nil {
aopts.AddRtAttr(nl.TCA_SKBEDIT_MARK, nl.Uint32Attr(*action.Mark))
}
+ if action.Mask != nil {
+ aopts.AddRtAttr(nl.TCA_SKBEDIT_MASK, nl.Uint32Attr(*action.Mask))
+ }
case *ConnmarkAction:
table := attr.AddRtAttr(tabIndex, nil)
tabIndex++
@@ -635,6 +707,29 @@ func EncodeActions(attr *nl.RtAttr, actions []Action) error {
gen := nl.TcGen{}
toTcGen(action.Attrs(), &gen)
aopts.AddRtAttr(nl.TCA_GACT_PARMS, gen.Serialize())
+ case *PeditAction:
+ table := attr.AddRtAttr(tabIndex, nil)
+ tabIndex++
+ pedit := nl.TcPedit{}
+ if action.SrcMacAddr != nil {
+ pedit.SetEthSrc(action.SrcMacAddr)
+ }
+ if action.DstMacAddr != nil {
+ pedit.SetEthDst(action.DstMacAddr)
+ }
+ if action.SrcIP != nil {
+ pedit.SetSrcIP(action.SrcIP)
+ }
+ if action.DstIP != nil {
+ pedit.SetDstIP(action.DstIP)
+ }
+ if action.SrcPort != 0 {
+ pedit.SetSrcPort(action.SrcPort, action.Proto)
+ }
+ if action.DstPort != 0 {
+ pedit.SetDstPort(action.DstPort, action.Proto)
+ }
+ pedit.Encode(table)
}
}
return nil
@@ -668,6 +763,8 @@ func parseActions(tables []syscall.NetlinkRouteAttr) ([]Action, error) {
for _, table := range tables {
var action Action
var actionType string
+ var actionnStatistic *ActionStatistic
+ var actionTimestamp *ActionTimestamp
aattrs, err := nl.ParseRouteAttr(table.Value)
if err != nil {
return nil, err
@@ -695,6 +792,8 @@ func parseActions(tables []syscall.NetlinkRouteAttr) ([]Action, error) {
action = &SkbEditAction{}
case "police":
action = &PoliceAction{}
+ case "pedit":
+ action = &PeditAction{}
default:
break nextattr
}
@@ -713,7 +812,11 @@ func parseActions(tables []syscall.NetlinkRouteAttr) ([]Action, error) {
toAttrs(&mirred.TcGen, action.Attrs())
action.(*MirredAction).Ifindex = int(mirred.Ifindex)
action.(*MirredAction).MirredAction = MirredAct(mirred.Eaction)
+ case nl.TCA_MIRRED_TM:
+ tcTs := nl.DeserializeTcf(adatum.Value)
+ actionTimestamp = toTimeStamp(tcTs)
}
+
case "tunnel_key":
switch adatum.Attr.Type {
case nl.TCA_TUNNEL_KEY_PARMS:
@@ -729,6 +832,9 @@ func parseActions(tables []syscall.NetlinkRouteAttr) ([]Action, error) {
action.(*TunnelKeyAction).DstAddr = adatum.Value[:]
case nl.TCA_TUNNEL_KEY_ENC_DST_PORT:
action.(*TunnelKeyAction).DestPort = ntohs(adatum.Value)
+ case nl.TCA_TUNNEL_KEY_TM:
+ tcTs := nl.DeserializeTcf(adatum.Value)
+ actionTimestamp = toTimeStamp(tcTs)
}
case "skbedit":
switch adatum.Attr.Type {
@@ -739,6 +845,9 @@ func parseActions(tables []syscall.NetlinkRouteAttr) ([]Action, error) {
case nl.TCA_SKBEDIT_MARK:
mark := native.Uint32(adatum.Value[0:4])
action.(*SkbEditAction).Mark = &mark
+ case nl.TCA_SKBEDIT_MASK:
+ mask := native.Uint32(adatum.Value[0:4])
+ action.(*SkbEditAction).Mask = &mask
case nl.TCA_SKBEDIT_PRIORITY:
priority := native.Uint32(adatum.Value[0:4])
action.(*SkbEditAction).Priority = &priority
@@ -748,6 +857,9 @@ func parseActions(tables []syscall.NetlinkRouteAttr) ([]Action, error) {
case nl.TCA_SKBEDIT_QUEUE_MAPPING:
mapping := native.Uint16(adatum.Value[0:2])
action.(*SkbEditAction).QueueMapping = &mapping
+ case nl.TCA_SKBEDIT_TM:
+ tcTs := nl.DeserializeTcf(adatum.Value)
+ actionTimestamp = toTimeStamp(tcTs)
}
case "bpf":
switch adatum.Attr.Type {
@@ -758,6 +870,9 @@ func parseActions(tables []syscall.NetlinkRouteAttr) ([]Action, error) {
action.(*BpfAction).Fd = int(native.Uint32(adatum.Value[0:4]))
case nl.TCA_ACT_BPF_NAME:
action.(*BpfAction).Name = string(adatum.Value[:len(adatum.Value)-1])
+ case nl.TCA_ACT_BPF_TM:
+ tcTs := nl.DeserializeTcf(adatum.Value)
+ actionTimestamp = toTimeStamp(tcTs)
}
case "connmark":
switch adatum.Attr.Type {
@@ -766,6 +881,9 @@ func parseActions(tables []syscall.NetlinkRouteAttr) ([]Action, error) {
action.(*ConnmarkAction).ActionAttrs = ActionAttrs{}
toAttrs(&connmark.TcGen, action.Attrs())
action.(*ConnmarkAction).Zone = connmark.Zone
+ case nl.TCA_CONNMARK_TM:
+ tcTs := nl.DeserializeTcf(adatum.Value)
+ actionTimestamp = toTimeStamp(tcTs)
}
case "csum":
switch adatum.Attr.Type {
@@ -774,19 +892,36 @@ func parseActions(tables []syscall.NetlinkRouteAttr) ([]Action, error) {
action.(*CsumAction).ActionAttrs = ActionAttrs{}
toAttrs(&csum.TcGen, action.Attrs())
action.(*CsumAction).UpdateFlags = CsumUpdateFlags(csum.UpdateFlags)
+ case nl.TCA_CSUM_TM:
+ tcTs := nl.DeserializeTcf(adatum.Value)
+ actionTimestamp = toTimeStamp(tcTs)
}
case "gact":
switch adatum.Attr.Type {
case nl.TCA_GACT_PARMS:
gen := *nl.DeserializeTcGen(adatum.Value)
toAttrs(&gen, action.Attrs())
+ if action.Attrs().Action.String() == "goto" {
+ action.(*GenericAction).Chain = TC_ACT_EXT_VAL_MASK & gen.Action
+ }
+ case nl.TCA_GACT_TM:
+ tcTs := nl.DeserializeTcf(adatum.Value)
+ actionTimestamp = toTimeStamp(tcTs)
}
case "police":
parsePolice(adatum, action.(*PoliceAction))
}
}
+ case nl.TCA_ACT_STATS:
+ s, err := parseTcStats2(aattr.Value)
+ if err != nil {
+ return nil, err
+ }
+ actionnStatistic = (*ActionStatistic)(s)
}
}
+ action.Attrs().Statistics = actionnStatistic
+ action.Attrs().Timestamp = actionTimestamp
actions = append(actions, action)
}
return actions, nil
@@ -824,6 +959,13 @@ func parseU32Data(filter Filter, data []syscall.NetlinkRouteAttr) (bool, error)
u32.RedirIndex = int(action.Ifindex)
}
}
+ case nl.TCA_U32_POLICE:
+ var police PoliceAction
+ adata, _ := nl.ParseRouteAttr(datum.Value)
+ for _, aattr := range adata {
+ parsePolice(aattr, &police)
+ }
+ u32.Police = &police
case nl.TCA_U32_CLASSID:
u32.ClassId = native.Uint32(datum.Value)
case nl.TCA_U32_DIVISOR:
@@ -855,6 +997,15 @@ func parseFwData(filter Filter, data []syscall.NetlinkRouteAttr) (bool, error) {
parsePolice(aattr, &police)
}
fw.Police = &police
+ case nl.TCA_FW_ACT:
+ tables, err := nl.ParseRouteAttr(datum.Value)
+ if err != nil {
+ return detailed, err
+ }
+ fw.Actions, err = parseActions(tables)
+ if err != nil {
+ return detailed, err
+ }
}
}
return detailed, nil
diff --git a/vendor/github.com/vishvananda/netlink/handle_unspecified.go b/vendor/github.com/vishvananda/netlink/handle_unspecified.go
index cc94a4e007e..3fe03642e5b 100644
--- a/vendor/github.com/vishvananda/netlink/handle_unspecified.go
+++ b/vendor/github.com/vishvananda/netlink/handle_unspecified.go
@@ -79,6 +79,10 @@ func (h *Handle) LinkSetVfVlanQos(link Link, vf, vlan, qos int) error {
return ErrNotImplemented
}
+func (h *Handle) LinkSetVfVlanQosProto(link Link, vf, vlan, qos, proto int) error {
+ return ErrNotImplemented
+}
+
func (h *Handle) LinkSetVfTxRate(link Link, vf, rate int) error {
return ErrNotImplemented
}
@@ -163,6 +167,22 @@ func (h *Handle) LinkSetGroup(link Link, group int) error {
return ErrNotImplemented
}
+func (h *Handle) LinkSetGSOMaxSize(link Link, maxSize int) error {
+ return ErrNotImplemented
+}
+
+func (h *Handle) LinkSetGROMaxSize(link Link, maxSize int) error {
+ return ErrNotImplemented
+}
+
+func (h *Handle) LinkSetGSOIPv4MaxSize(link Link, maxSize int) error {
+ return ErrNotImplemented
+}
+
+func (h *Handle) LinkSetGROIPv4MaxSize(link Link, maxSize int) error {
+ return ErrNotImplemented
+}
+
func (h *Handle) setProtinfoAttr(link Link, mode bool, attr int) error {
return ErrNotImplemented
}
@@ -243,6 +263,10 @@ func (h *Handle) RouteAppend(route *Route) error {
return ErrNotImplemented
}
+func (h *Handle) RouteChange(route *Route) error {
+ return ErrNotImplemented
+}
+
func (h *Handle) RouteDel(route *Route) error {
return ErrNotImplemented
}
diff --git a/vendor/github.com/vishvananda/netlink/inet_diag.go b/vendor/github.com/vishvananda/netlink/inet_diag.go
index bee391a8098..2904d964282 100644
--- a/vendor/github.com/vishvananda/netlink/inet_diag.go
+++ b/vendor/github.com/vishvananda/netlink/inet_diag.go
@@ -21,6 +21,10 @@ const (
INET_DIAG_BBRINFO
INET_DIAG_CLASS_ID
INET_DIAG_MD5SIG
+ INET_DIAG_ULP_INFO
+ INET_DIAG_SK_BPF_STORAGES
+ INET_DIAG_CGROUP_ID
+ INET_DIAG_SOCKOPT
INET_DIAG_MAX
)
@@ -29,3 +33,8 @@ type InetDiagTCPInfoResp struct {
TCPInfo *TCPInfo
TCPBBRInfo *TCPBBRInfo
}
+
+type InetDiagUDPInfoResp struct {
+ InetDiagMsg *Socket
+ Memory *MemInfo
+}
diff --git a/vendor/github.com/vishvananda/netlink/ipset_linux.go b/vendor/github.com/vishvananda/netlink/ipset_linux.go
index 1f4eae81c24..f4c05229fa5 100644
--- a/vendor/github.com/vishvananda/netlink/ipset_linux.go
+++ b/vendor/github.com/vishvananda/netlink/ipset_linux.go
@@ -67,11 +67,13 @@ type IpsetCreateOptions struct {
Comments bool
Skbinfo bool
- Revision uint8
- IPFrom net.IP
- IPTo net.IP
- PortFrom uint16
- PortTo uint16
+ Family uint8
+ Revision uint8
+ IPFrom net.IP
+ IPTo net.IP
+ PortFrom uint16
+ PortTo uint16
+ MaxElements uint32
}
// IpsetProtocol returns the ipset protocol version from the kernel
@@ -94,6 +96,11 @@ func IpsetFlush(setname string) error {
return pkgHandle.IpsetFlush(setname)
}
+// IpsetSwap swaps two ipsets.
+func IpsetSwap(setname, othersetname string) error {
+ return pkgHandle.IpsetSwap(setname, othersetname)
+}
+
// IpsetList dumps an specific ipset.
func IpsetList(setname string) (*IPSetResult, error) {
return pkgHandle.IpsetList(setname)
@@ -114,6 +121,11 @@ func IpsetDel(setname string, entry *IPSetEntry) error {
return pkgHandle.IpsetDel(setname, entry)
}
+// IpsetTest tests whether an entry is in a set or not.
+func IpsetTest(setname string, entry *IPSetEntry) (bool, error) {
+ return pkgHandle.IpsetTest(setname, entry)
+}
+
func (h *Handle) IpsetProtocol() (protocol uint8, minVersion uint8, err error) {
req := h.newIpsetRequest(nl.IPSET_CMD_PROTOCOL)
msgs, err := req.Execute(unix.NETLINK_NETFILTER, 0)
@@ -153,11 +165,18 @@ func (h *Handle) IpsetCreate(setname, typename string, options IpsetCreateOption
data.AddChild(nl.NewRtAttr(nl.IPSET_ATTR_PORT_FROM|int(nl.NLA_F_NET_BYTEORDER), buf[:2]))
data.AddChild(nl.NewRtAttr(nl.IPSET_ATTR_PORT_TO|int(nl.NLA_F_NET_BYTEORDER), buf[2:]))
default:
- family = unix.AF_INET
+ family = options.Family
+ if family == 0 {
+ family = unix.AF_INET
+ }
}
req.AddData(nl.NewRtAttr(nl.IPSET_ATTR_FAMILY, nl.Uint8Attr(family)))
+ if options.MaxElements != 0 {
+ data.AddChild(&nl.Uint32Attribute{Type: nl.IPSET_ATTR_MAXELEM | nl.NLA_F_NET_BYTEORDER, Value: options.MaxElements})
+ }
+
if timeout := options.Timeout; timeout != nil {
data.AddChild(&nl.Uint32Attribute{Type: nl.IPSET_ATTR_TIMEOUT | nl.NLA_F_NET_BYTEORDER, Value: *timeout})
}
@@ -197,6 +216,14 @@ func (h *Handle) IpsetFlush(setname string) error {
return err
}
+func (h *Handle) IpsetSwap(setname, othersetname string) error {
+ req := h.newIpsetRequest(nl.IPSET_CMD_SWAP)
+ req.AddData(nl.NewRtAttr(nl.IPSET_ATTR_SETNAME, nl.ZeroTerminated(setname)))
+ req.AddData(nl.NewRtAttr(nl.IPSET_ATTR_TYPENAME, nl.ZeroTerminated(othersetname)))
+ _, err := ipsetExecute(req)
+ return err
+}
+
func (h *Handle) IpsetList(name string) (*IPSetResult, error) {
req := h.newIpsetRequest(nl.IPSET_CMD_LIST)
req.AddData(nl.NewRtAttr(nl.IPSET_ATTR_SETNAME, nl.ZeroTerminated(name)))
@@ -236,18 +263,23 @@ func (h *Handle) IpsetDel(setname string, entry *IPSetEntry) error {
return h.ipsetAddDel(nl.IPSET_CMD_DEL, setname, entry)
}
-func (h *Handle) ipsetAddDel(nlCmd int, setname string, entry *IPSetEntry) error {
- req := h.newIpsetRequest(nlCmd)
- req.AddData(nl.NewRtAttr(nl.IPSET_ATTR_SETNAME, nl.ZeroTerminated(setname)))
-
- if entry.Comment != "" {
- req.AddData(nl.NewRtAttr(nl.IPSET_ATTR_COMMENT, nl.ZeroTerminated(entry.Comment)))
+func encodeIP(ip net.IP) (*nl.RtAttr, error) {
+ typ := int(nl.NLA_F_NET_BYTEORDER)
+ if ip4 := ip.To4(); ip4 != nil {
+ typ |= nl.IPSET_ATTR_IPADDR_IPV4
+ ip = ip4
+ } else {
+ typ |= nl.IPSET_ATTR_IPADDR_IPV6
}
+ return nl.NewRtAttr(typ, ip), nil
+}
+
+func buildEntryData(entry *IPSetEntry) (*nl.RtAttr, error) {
data := nl.NewRtAttr(nl.IPSET_ATTR_DATA|int(nl.NLA_F_NESTED), nil)
- if !entry.Replace {
- req.Flags |= unix.NLM_F_EXCL
+ if entry.Comment != "" {
+ data.AddChild(nl.NewRtAttr(nl.IPSET_ATTR_COMMENT, nl.ZeroTerminated(entry.Comment)))
}
if entry.Timeout != nil {
@@ -255,7 +287,10 @@ func (h *Handle) ipsetAddDel(nlCmd int, setname string, entry *IPSetEntry) error
}
if entry.IP != nil {
- nestedData := nl.NewRtAttr(nl.IPSET_ATTR_IP|int(nl.NLA_F_NET_BYTEORDER), entry.IP)
+ nestedData, err := encodeIP(entry.IP)
+ if err != nil {
+ return nil, err
+ }
data.AddChild(nl.NewRtAttr(nl.IPSET_ATTR_IP|int(nl.NLA_F_NESTED), nestedData.Serialize()))
}
@@ -268,7 +303,10 @@ func (h *Handle) ipsetAddDel(nlCmd int, setname string, entry *IPSetEntry) error
}
if entry.IP2 != nil {
- nestedData := nl.NewRtAttr(nl.IPSET_ATTR_IP|int(nl.NLA_F_NET_BYTEORDER), entry.IP2)
+ nestedData, err := encodeIP(entry.IP2)
+ if err != nil {
+ return nil, err
+ }
data.AddChild(nl.NewRtAttr(nl.IPSET_ATTR_IP2|int(nl.NLA_F_NESTED), nestedData.Serialize()))
}
@@ -295,14 +333,53 @@ func (h *Handle) ipsetAddDel(nlCmd int, setname string, entry *IPSetEntry) error
if entry.Mark != nil {
data.AddChild(&nl.Uint32Attribute{Type: nl.IPSET_ATTR_MARK | nl.NLA_F_NET_BYTEORDER, Value: *entry.Mark})
}
+ return data, nil
+}
+func (h *Handle) ipsetAddDel(nlCmd int, setname string, entry *IPSetEntry) error {
+ req := h.newIpsetRequest(nlCmd)
+ req.AddData(nl.NewRtAttr(nl.IPSET_ATTR_SETNAME, nl.ZeroTerminated(setname)))
+
+ if !entry.Replace {
+ req.Flags |= unix.NLM_F_EXCL
+ }
+
+ data, err := buildEntryData(entry)
+ if err != nil {
+ return err
+ }
data.AddChild(&nl.Uint32Attribute{Type: nl.IPSET_ATTR_LINENO | nl.NLA_F_NET_BYTEORDER, Value: 0})
req.AddData(data)
- _, err := ipsetExecute(req)
+ _, err = ipsetExecute(req)
return err
}
+func (h *Handle) IpsetTest(setname string, entry *IPSetEntry) (bool, error) {
+ req := h.newIpsetRequest(nl.IPSET_CMD_TEST)
+ req.AddData(nl.NewRtAttr(nl.IPSET_ATTR_SETNAME, nl.ZeroTerminated(setname)))
+
+ if !entry.Replace {
+ req.Flags |= unix.NLM_F_EXCL
+ }
+
+ data, err := buildEntryData(entry)
+ if err != nil {
+ return false, err
+ }
+ req.AddData(data)
+
+ _, err = ipsetExecute(req)
+ if err != nil {
+ if err == nl.IPSetError(nl.IPSET_ERR_EXIST) {
+ // not exist
+ return false, nil
+ }
+ return false, err
+ }
+ return true, nil
+}
+
func (h *Handle) newIpsetRequest(cmd int) *nl.NetlinkRequest {
req := h.newNetlinkRequest(cmd|(unix.NFNL_SUBSYS_IPSET<<8), nl.GetIpsetFlags(cmd))
@@ -466,7 +543,7 @@ func parseIPSetEntry(data []byte) (entry IPSetEntry) {
case nl.IPSET_ATTR_IP | nl.NLA_F_NESTED:
for attr := range nl.ParseAttributes(attr.Value) {
switch attr.Type {
- case nl.IPSET_ATTR_IP:
+ case nl.IPSET_ATTR_IPADDR_IPV4, nl.IPSET_ATTR_IPADDR_IPV6:
entry.IP = net.IP(attr.Value)
default:
log.Printf("unknown nested ADT attribute from kernel: %+v", attr)
@@ -475,7 +552,7 @@ func parseIPSetEntry(data []byte) (entry IPSetEntry) {
case nl.IPSET_ATTR_IP2 | nl.NLA_F_NESTED:
for attr := range nl.ParseAttributes(attr.Value) {
switch attr.Type {
- case nl.IPSET_ATTR_IP:
+ case nl.IPSET_ATTR_IPADDR_IPV4, nl.IPSET_ATTR_IPADDR_IPV6:
entry.IP2 = net.IP(attr.Value)
default:
log.Printf("unknown nested ADT attribute from kernel: %+v", attr)
diff --git a/vendor/github.com/vishvananda/netlink/link.go b/vendor/github.com/vishvananda/netlink/link.go
index 33c872336d6..f820cdb678d 100644
--- a/vendor/github.com/vishvananda/netlink/link.go
+++ b/vendor/github.com/vishvananda/netlink/link.go
@@ -22,34 +22,41 @@ type (
// LinkAttrs represents data shared by most link types
type LinkAttrs struct {
- Index int
- MTU int
- TxQLen int // Transmit Queue Length
- Name string
- HardwareAddr net.HardwareAddr
- Flags net.Flags
- RawFlags uint32
- ParentIndex int // index of the parent link device
- MasterIndex int // must be the index of a bridge
- Namespace interface{} // nil | NsPid | NsFd
- Alias string
- Statistics *LinkStatistics
- Promisc int
- Allmulti int
- Multi int
- Xdp *LinkXdp
- EncapType string
- Protinfo *Protinfo
- OperState LinkOperState
- PhysSwitchID int
- NetNsID int
- NumTxQueues int
- NumRxQueues int
- GSOMaxSize uint32
- GSOMaxSegs uint32
- Vfs []VfInfo // virtual functions available on link
- Group uint32
- Slave LinkSlave
+ Index int
+ MTU int
+ TxQLen int // Transmit Queue Length
+ Name string
+ HardwareAddr net.HardwareAddr
+ Flags net.Flags
+ RawFlags uint32
+ ParentIndex int // index of the parent link device
+ MasterIndex int // must be the index of a bridge
+ Namespace interface{} // nil | NsPid | NsFd
+ Alias string
+ AltNames []string
+ Statistics *LinkStatistics
+ Promisc int
+ Allmulti int
+ Multi int
+ Xdp *LinkXdp
+ EncapType string
+ Protinfo *Protinfo
+ OperState LinkOperState
+ PhysSwitchID int
+ NetNsID int
+ NumTxQueues int
+ NumRxQueues int
+ TSOMaxSegs uint32
+ TSOMaxSize uint32
+ GSOMaxSegs uint32
+ GSOMaxSize uint32
+ GROMaxSize uint32
+ GSOIPv4MaxSize uint32
+ GROIPv4MaxSize uint32
+ Vfs []VfInfo // virtual functions available on link
+ Group uint32
+ PermHWAddr net.HardwareAddr
+ Slave LinkSlave
}
// LinkSlave represents a slave device.
@@ -63,6 +70,7 @@ type VfInfo struct {
Mac net.HardwareAddr
Vlan int
Qos int
+ VlanProto int
TxRate int // IFLA_VF_TX_RATE Max TxRate
Spoofchk bool
LinkState uint32
@@ -265,6 +273,8 @@ type Bridge struct {
AgeingTime *uint32
HelloTime *uint32
VlanFiltering *bool
+ VlanDefaultPVID *uint16
+ GroupFwdMask *uint16
}
func (bridge *Bridge) Attrs() *LinkAttrs {
@@ -308,6 +318,9 @@ type Macvlan struct {
// MACAddrs is only populated for Macvlan SOURCE links
MACAddrs []net.HardwareAddr
+
+ BCQueueLen uint32
+ UsedBCQueueLen uint32
}
func (macvlan *Macvlan) Attrs() *LinkAttrs {
@@ -350,6 +363,46 @@ func (tuntap *Tuntap) Type() string {
return "tuntap"
}
+type NetkitMode uint32
+
+const (
+ NETKIT_MODE_L2 NetkitMode = iota
+ NETKIT_MODE_L3
+)
+
+type NetkitPolicy int
+
+const (
+ NETKIT_POLICY_FORWARD NetkitPolicy = 0
+ NETKIT_POLICY_BLACKHOLE NetkitPolicy = 2
+)
+
+func (n *Netkit) IsPrimary() bool {
+ return n.isPrimary
+}
+
+// SetPeerAttrs will not take effect if trying to modify an existing netkit device
+func (n *Netkit) SetPeerAttrs(Attrs *LinkAttrs) {
+ n.peerLinkAttrs = *Attrs
+}
+
+type Netkit struct {
+ LinkAttrs
+ Mode NetkitMode
+ Policy NetkitPolicy
+ PeerPolicy NetkitPolicy
+ isPrimary bool
+ peerLinkAttrs LinkAttrs
+}
+
+func (n *Netkit) Attrs() *LinkAttrs {
+ return &n.LinkAttrs
+}
+
+func (n *Netkit) Type() string {
+ return "netkit"
+}
+
// Veth devices must specify PeerName on create
type Veth struct {
LinkAttrs
@@ -703,6 +756,7 @@ const (
BOND_XMIT_HASH_POLICY_LAYER2_3
BOND_XMIT_HASH_POLICY_ENCAP2_3
BOND_XMIT_HASH_POLICY_ENCAP3_4
+ BOND_XMIT_HASH_POLICY_VLAN_SRCMAC
BOND_XMIT_HASH_POLICY_UNKNOWN
)
@@ -712,6 +766,7 @@ var bondXmitHashPolicyToString = map[BondXmitHashPolicy]string{
BOND_XMIT_HASH_POLICY_LAYER2_3: "layer2+3",
BOND_XMIT_HASH_POLICY_ENCAP2_3: "encap2+3",
BOND_XMIT_HASH_POLICY_ENCAP3_4: "encap3+4",
+ BOND_XMIT_HASH_POLICY_VLAN_SRCMAC: "vlan+srcmac",
}
var StringToBondXmitHashPolicyMap = map[string]BondXmitHashPolicy{
"layer2": BOND_XMIT_HASH_POLICY_LAYER2,
@@ -719,6 +774,7 @@ var StringToBondXmitHashPolicyMap = map[string]BondXmitHashPolicy{
"layer2+3": BOND_XMIT_HASH_POLICY_LAYER2_3,
"encap2+3": BOND_XMIT_HASH_POLICY_ENCAP2_3,
"encap3+4": BOND_XMIT_HASH_POLICY_ENCAP3_4,
+ "vlan+srcmac": BOND_XMIT_HASH_POLICY_VLAN_SRCMAC,
}
// BondLacpRate type
@@ -974,16 +1030,18 @@ func (v *VrfSlave) SlaveType() string {
// https://github.com/torvalds/linux/blob/47ec5303d73ea344e84f46660fff693c57641386/drivers/net/geneve.c#L1209-L1223
type Geneve struct {
LinkAttrs
- ID uint32 // vni
- Remote net.IP
- Ttl uint8
- Tos uint8
- Dport uint16
- UdpCsum uint8
- UdpZeroCsum6Tx uint8
- UdpZeroCsum6Rx uint8
- Link uint32
- FlowBased bool
+ ID uint32 // vni
+ Remote net.IP
+ Ttl uint8
+ Tos uint8
+ Dport uint16
+ UdpCsum uint8
+ UdpZeroCsum6Tx uint8
+ UdpZeroCsum6Rx uint8
+ Link uint32
+ FlowBased bool
+ InnerProtoInherit bool
+ Df GeneveDf
}
func (geneve *Geneve) Attrs() *LinkAttrs {
@@ -994,6 +1052,15 @@ func (geneve *Geneve) Type() string {
return "geneve"
}
+type GeneveDf uint8
+
+const (
+ GENEVE_DF_UNSET GeneveDf = iota
+ GENEVE_DF_SET
+ GENEVE_DF_INHERIT
+ GENEVE_DF_MAX
+)
+
// Gretap devices must specify LocalIP and RemoteIP on create
type Gretap struct {
LinkAttrs
@@ -1064,6 +1131,7 @@ type Ip6tnl struct {
EncapFlags uint16
EncapSport uint16
EncapDport uint16
+ FlowBased bool
}
func (ip6tnl *Ip6tnl) Attrs() *LinkAttrs {
@@ -1165,6 +1233,7 @@ type Gretun struct {
EncapFlags uint16
EncapSport uint16
EncapDport uint16
+ FlowBased bool
}
func (gretun *Gretun) Attrs() *LinkAttrs {
@@ -1208,6 +1277,7 @@ func (gtp *GTP) Type() string {
}
// Virtual XFRM Interfaces
+//
// Named "xfrmi" to prevent confusion with XFRM objects
type Xfrmi struct {
LinkAttrs
diff --git a/vendor/github.com/vishvananda/netlink/link_linux.go b/vendor/github.com/vishvananda/netlink/link_linux.go
index 276947a0069..d713612a907 100644
--- a/vendor/github.com/vishvananda/netlink/link_linux.go
+++ b/vendor/github.com/vishvananda/netlink/link_linux.go
@@ -345,6 +345,16 @@ func (h *Handle) BridgeSetVlanFiltering(link Link, on bool) error {
return h.linkModify(bridge, unix.NLM_F_ACK)
}
+func BridgeSetVlanDefaultPVID(link Link, pvid uint16) error {
+ return pkgHandle.BridgeSetVlanDefaultPVID(link, pvid)
+}
+
+func (h *Handle) BridgeSetVlanDefaultPVID(link Link, pvid uint16) error {
+ bridge := link.(*Bridge)
+ bridge.VlanDefaultPVID = &pvid
+ return h.linkModify(bridge, unix.NLM_F_ACK)
+}
+
func SetPromiscOn(link Link) error {
return pkgHandle.SetPromiscOn(link)
}
@@ -487,6 +497,58 @@ func (h *Handle) LinkSetAlias(link Link, name string) error {
return err
}
+// LinkAddAltName adds a new alternative name for the link device.
+// Equivalent to: `ip link property add $link altname $name`
+func LinkAddAltName(link Link, name string) error {
+ return pkgHandle.LinkAddAltName(link, name)
+}
+
+// LinkAddAltName adds a new alternative name for the link device.
+// Equivalent to: `ip link property add $link altname $name`
+func (h *Handle) LinkAddAltName(link Link, name string) error {
+ base := link.Attrs()
+ h.ensureIndex(base)
+ req := h.newNetlinkRequest(unix.RTM_NEWLINKPROP, unix.NLM_F_ACK)
+
+ msg := nl.NewIfInfomsg(unix.AF_UNSPEC)
+ msg.Index = int32(base.Index)
+ req.AddData(msg)
+
+ data := nl.NewRtAttr(unix.IFLA_PROP_LIST|unix.NLA_F_NESTED, nil)
+ data.AddRtAttr(unix.IFLA_ALT_IFNAME, []byte(name))
+
+ req.AddData(data)
+
+ _, err := req.Execute(unix.NETLINK_ROUTE, 0)
+ return err
+}
+
+// LinkDelAltName delete an alternative name for the link device.
+// Equivalent to: `ip link property del $link altname $name`
+func LinkDelAltName(link Link, name string) error {
+ return pkgHandle.LinkDelAltName(link, name)
+}
+
+// LinkDelAltName delete an alternative name for the link device.
+// Equivalent to: `ip link property del $link altname $name`
+func (h *Handle) LinkDelAltName(link Link, name string) error {
+ base := link.Attrs()
+ h.ensureIndex(base)
+ req := h.newNetlinkRequest(unix.RTM_DELLINKPROP, unix.NLM_F_ACK)
+
+ msg := nl.NewIfInfomsg(unix.AF_UNSPEC)
+ msg.Index = int32(base.Index)
+ req.AddData(msg)
+
+ data := nl.NewRtAttr(unix.IFLA_PROP_LIST|unix.NLA_F_NESTED, nil)
+ data.AddRtAttr(unix.IFLA_ALT_IFNAME, []byte(name))
+
+ req.AddData(data)
+
+ _, err := req.Execute(unix.NETLINK_ROUTE, 0)
+ return err
+}
+
// LinkSetHardwareAddr sets the hardware address of the link device.
// Equivalent to: `ip link set $link address $hwaddr`
func LinkSetHardwareAddr(link Link, hwaddr net.HardwareAddr) error {
@@ -602,6 +664,43 @@ func (h *Handle) LinkSetVfVlanQos(link Link, vf, vlan, qos int) error {
return err
}
+// LinkSetVfVlanQosProto sets the vlan, qos and protocol of a vf for the link.
+// Equivalent to: `ip link set $link vf $vf vlan $vlan qos $qos proto $proto`
+func LinkSetVfVlanQosProto(link Link, vf, vlan, qos, proto int) error {
+ return pkgHandle.LinkSetVfVlanQosProto(link, vf, vlan, qos, proto)
+}
+
+// LinkSetVfVlanQosProto sets the vlan, qos and protocol of a vf for the link.
+// Equivalent to: `ip link set $link vf $vf vlan $vlan qos $qos proto $proto`
+func (h *Handle) LinkSetVfVlanQosProto(link Link, vf, vlan, qos, proto int) error {
+ base := link.Attrs()
+ h.ensureIndex(base)
+ req := h.newNetlinkRequest(unix.RTM_SETLINK, unix.NLM_F_ACK)
+
+ msg := nl.NewIfInfomsg(unix.AF_UNSPEC)
+ msg.Index = int32(base.Index)
+ req.AddData(msg)
+
+ data := nl.NewRtAttr(unix.IFLA_VFINFO_LIST, nil)
+ vfInfo := data.AddRtAttr(nl.IFLA_VF_INFO, nil)
+ vfVlanList := vfInfo.AddRtAttr(nl.IFLA_VF_VLAN_LIST, nil)
+
+ vfmsg := nl.VfVlanInfo{
+ VfVlan: nl.VfVlan{
+ Vf: uint32(vf),
+ Vlan: uint32(vlan),
+ Qos: uint32(qos),
+ },
+ VlanProto: (uint16(proto)>>8)&0xFF | (uint16(proto)&0xFF)<<8,
+ }
+
+ vfVlanList.AddRtAttr(nl.IFLA_VF_VLAN_INFO, vfmsg.Serialize())
+ req.AddData(data)
+
+ _, err := req.Execute(unix.NETLINK_ROUTE, 0)
+ return err
+}
+
// LinkSetVfTxRate sets the tx rate of a vf for the link.
// Equivalent to: `ip link set $link vf $vf rate $rate`
func LinkSetVfTxRate(link Link, vf, rate int) error {
@@ -946,6 +1045,141 @@ func LinkSetXdpFdWithFlags(link Link, fd, flags int) error {
return err
}
+// LinkSetGSOMaxSegs sets the GSO maximum segment count of the link device.
+// Equivalent to: `ip link set $link gso_max_segs $maxSegs`
+func LinkSetGSOMaxSegs(link Link, maxSegs int) error {
+ return pkgHandle.LinkSetGSOMaxSegs(link, maxSegs)
+}
+
+// LinkSetGSOMaxSegs sets the GSO maximum segment count of the link device.
+// Equivalent to: `ip link set $link gso_max_segs $maxSegs`
+func (h *Handle) LinkSetGSOMaxSegs(link Link, maxSize int) error {
+ base := link.Attrs()
+ h.ensureIndex(base)
+ req := h.newNetlinkRequest(unix.RTM_SETLINK, unix.NLM_F_ACK)
+
+ msg := nl.NewIfInfomsg(unix.AF_UNSPEC)
+ msg.Index = int32(base.Index)
+ req.AddData(msg)
+
+ b := make([]byte, 4)
+ native.PutUint32(b, uint32(maxSize))
+
+ data := nl.NewRtAttr(unix.IFLA_GSO_MAX_SEGS, b)
+ req.AddData(data)
+
+ _, err := req.Execute(unix.NETLINK_ROUTE, 0)
+ return err
+}
+
+// LinkSetGSOMaxSize sets the IPv6 GSO maximum size of the link device.
+// Equivalent to: `ip link set $link gso_max_size $maxSize`
+func LinkSetGSOMaxSize(link Link, maxSize int) error {
+ return pkgHandle.LinkSetGSOMaxSize(link, maxSize)
+}
+
+// LinkSetGSOMaxSize sets the IPv6 GSO maximum size of the link device.
+// Equivalent to: `ip link set $link gso_max_size $maxSize`
+func (h *Handle) LinkSetGSOMaxSize(link Link, maxSize int) error {
+ base := link.Attrs()
+ h.ensureIndex(base)
+ req := h.newNetlinkRequest(unix.RTM_SETLINK, unix.NLM_F_ACK)
+
+ msg := nl.NewIfInfomsg(unix.AF_UNSPEC)
+ msg.Index = int32(base.Index)
+ req.AddData(msg)
+
+ b := make([]byte, 4)
+ native.PutUint32(b, uint32(maxSize))
+
+ data := nl.NewRtAttr(unix.IFLA_GSO_MAX_SIZE, b)
+ req.AddData(data)
+
+ _, err := req.Execute(unix.NETLINK_ROUTE, 0)
+ return err
+}
+
+// LinkSetGROMaxSize sets the IPv6 GRO maximum size of the link device.
+// Equivalent to: `ip link set $link gro_max_size $maxSize`
+func LinkSetGROMaxSize(link Link, maxSize int) error {
+ return pkgHandle.LinkSetGROMaxSize(link, maxSize)
+}
+
+// LinkSetGROMaxSize sets the IPv6 GRO maximum size of the link device.
+// Equivalent to: `ip link set $link gro_max_size $maxSize`
+func (h *Handle) LinkSetGROMaxSize(link Link, maxSize int) error {
+ base := link.Attrs()
+ h.ensureIndex(base)
+ req := h.newNetlinkRequest(unix.RTM_SETLINK, unix.NLM_F_ACK)
+
+ msg := nl.NewIfInfomsg(unix.AF_UNSPEC)
+ msg.Index = int32(base.Index)
+ req.AddData(msg)
+
+ b := make([]byte, 4)
+ native.PutUint32(b, uint32(maxSize))
+
+ data := nl.NewRtAttr(unix.IFLA_GRO_MAX_SIZE, b)
+ req.AddData(data)
+
+ _, err := req.Execute(unix.NETLINK_ROUTE, 0)
+ return err
+}
+
+// LinkSetGSOIPv4MaxSize sets the IPv4 GSO maximum size of the link device.
+// Equivalent to: `ip link set $link gso_ipv4_max_size $maxSize`
+func LinkSetGSOIPv4MaxSize(link Link, maxSize int) error {
+ return pkgHandle.LinkSetGSOIPv4MaxSize(link, maxSize)
+}
+
+// LinkSetGSOIPv4MaxSize sets the IPv4 GSO maximum size of the link device.
+// Equivalent to: `ip link set $link gso_ipv4_max_size $maxSize`
+func (h *Handle) LinkSetGSOIPv4MaxSize(link Link, maxSize int) error {
+ base := link.Attrs()
+ h.ensureIndex(base)
+ req := h.newNetlinkRequest(unix.RTM_SETLINK, unix.NLM_F_ACK)
+
+ msg := nl.NewIfInfomsg(unix.AF_UNSPEC)
+ msg.Index = int32(base.Index)
+ req.AddData(msg)
+
+ b := make([]byte, 4)
+ native.PutUint32(b, uint32(maxSize))
+
+ data := nl.NewRtAttr(unix.IFLA_GSO_IPV4_MAX_SIZE, b)
+ req.AddData(data)
+
+ _, err := req.Execute(unix.NETLINK_ROUTE, 0)
+ return err
+}
+
+// LinkSetGROIPv4MaxSize sets the IPv4 GRO maximum size of the link device.
+// Equivalent to: `ip link set $link gro_ipv4_max_size $maxSize`
+func LinkSetGROIPv4MaxSize(link Link, maxSize int) error {
+ return pkgHandle.LinkSetGROIPv4MaxSize(link, maxSize)
+}
+
+// LinkSetGROIPv4MaxSize sets the IPv4 GRO maximum size of the link device.
+// Equivalent to: `ip link set $link gro_ipv4_max_size $maxSize`
+func (h *Handle) LinkSetGROIPv4MaxSize(link Link, maxSize int) error {
+ base := link.Attrs()
+ h.ensureIndex(base)
+ req := h.newNetlinkRequest(unix.RTM_SETLINK, unix.NLM_F_ACK)
+
+ msg := nl.NewIfInfomsg(unix.AF_UNSPEC)
+ msg.Index = int32(base.Index)
+ req.AddData(msg)
+
+ b := make([]byte, 4)
+ native.PutUint32(b, uint32(maxSize))
+
+ data := nl.NewRtAttr(unix.IFLA_GRO_IPV4_MAX_SIZE, b)
+ req.AddData(data)
+
+ _, err := req.Execute(unix.NETLINK_ROUTE, 0)
+ return err
+}
+
func boolAttr(val bool) []byte {
var v uint8
if val {
@@ -1401,6 +1635,21 @@ func (h *Handle) linkModify(link Link, flags int) error {
req.AddData(gsoAttr)
}
+ if base.GROMaxSize > 0 {
+ groAttr := nl.NewRtAttr(unix.IFLA_GRO_MAX_SIZE, nl.Uint32Attr(base.GROMaxSize))
+ req.AddData(groAttr)
+ }
+
+ if base.GSOIPv4MaxSize > 0 {
+ gsoAttr := nl.NewRtAttr(unix.IFLA_GSO_IPV4_MAX_SIZE, nl.Uint32Attr(base.GSOIPv4MaxSize))
+ req.AddData(gsoAttr)
+ }
+
+ if base.GROIPv4MaxSize > 0 {
+ groAttr := nl.NewRtAttr(unix.IFLA_GRO_IPV4_MAX_SIZE, nl.Uint32Attr(base.GROIPv4MaxSize))
+ req.AddData(groAttr)
+ }
+
if base.Group > 0 {
groupAttr := nl.NewRtAttr(unix.IFLA_GROUP, nl.Uint32Attr(base.Group))
req.AddData(groupAttr)
@@ -1437,6 +1686,10 @@ func (h *Handle) linkModify(link Link, flags int) error {
if link.VlanProtocol != VLAN_PROTOCOL_UNKNOWN {
data.AddRtAttr(nl.IFLA_VLAN_PROTOCOL, htons(uint16(link.VlanProtocol)))
}
+ case *Netkit:
+ if err := addNetkitAttrs(link, linkInfo, flags); err != nil {
+ return err
+ }
case *Veth:
data := linkInfo.AddRtAttr(nl.IFLA_INFO_DATA, nil)
peer := data.AddRtAttr(nl.VETH_INFO_PEER, nil)
@@ -1480,15 +1733,9 @@ func (h *Handle) linkModify(link Link, flags int) error {
data.AddRtAttr(nl.IFLA_IPVLAN_MODE, nl.Uint16Attr(uint16(link.Mode)))
data.AddRtAttr(nl.IFLA_IPVLAN_FLAG, nl.Uint16Attr(uint16(link.Flag)))
case *Macvlan:
- if link.Mode != MACVLAN_MODE_DEFAULT {
- data := linkInfo.AddRtAttr(nl.IFLA_INFO_DATA, nil)
- data.AddRtAttr(nl.IFLA_MACVLAN_MODE, nl.Uint32Attr(macvlanModes[link.Mode]))
- }
+ addMacvlanAttrs(link, linkInfo)
case *Macvtap:
- if link.Mode != MACVLAN_MODE_DEFAULT {
- data := linkInfo.AddRtAttr(nl.IFLA_INFO_DATA, nil)
- data.AddRtAttr(nl.IFLA_MACVLAN_MODE, nl.Uint32Attr(macvlanModes[link.Mode]))
- }
+ addMacvtapAttrs(link, linkInfo)
case *Geneve:
addGeneveAttrs(link, linkInfo)
case *Gretap:
@@ -1569,6 +1816,13 @@ func (h *Handle) linkByNameDump(name string) (Link, error) {
if link.Attrs().Name == name {
return link, nil
}
+
+ // support finding interfaces also via altnames
+ for _, altName := range link.Attrs().AltNames {
+ if altName == name {
+ return link, nil
+ }
+ }
}
return nil, LinkNotFoundError{fmt.Errorf("Link %s not found", name)}
}
@@ -1607,6 +1861,9 @@ func (h *Handle) LinkByName(name string) (Link, error) {
req.AddData(attr)
nameData := nl.NewRtAttr(unix.IFLA_IFNAME, nl.ZeroTerminated(name))
+ if len(name) > 15 {
+ nameData = nl.NewRtAttr(unix.IFLA_ALT_IFNAME, nl.ZeroTerminated(name))
+ }
req.AddData(nameData)
link, err := execGetLink(req)
@@ -1712,9 +1969,6 @@ func LinkDeserialize(hdr *unix.NlMsghdr, m []byte) (Link, error) {
base.Flags = linkFlags(msg.Flags)
base.EncapType = msg.EncapType()
base.NetNsID = -1
- if msg.Flags&unix.IFF_PROMISC != 0 {
- base.Promisc = 1
- }
if msg.Flags&unix.IFF_ALLMULTI != 0 {
base.Allmulti = 1
}
@@ -1750,6 +2004,8 @@ func LinkDeserialize(hdr *unix.NlMsghdr, m []byte) (Link, error) {
link = &Bridge{}
case "vlan":
link = &Vlan{}
+ case "netkit":
+ link = &Netkit{}
case "veth":
link = &Veth{}
case "wireguard":
@@ -1807,6 +2063,8 @@ func LinkDeserialize(hdr *unix.NlMsghdr, m []byte) (Link, error) {
return nil, err
}
switch linkType {
+ case "netkit":
+ parseNetkitData(link, data)
case "vlan":
parseVlanData(link, data)
case "vxlan":
@@ -1897,6 +2155,8 @@ func LinkDeserialize(hdr *unix.NlMsghdr, m []byte) (Link, error) {
base.Name = string(attr.Value[:len(attr.Value)-1])
case unix.IFLA_MTU:
base.MTU = int(native.Uint32(attr.Value[0:4]))
+ case unix.IFLA_PROMISCUITY:
+ base.Promisc = int(native.Uint32(attr.Value[0:4]))
case unix.IFLA_LINK:
base.ParentIndex = int(native.Uint32(attr.Value[0:4]))
case unix.IFLA_MASTER:
@@ -1931,16 +2191,38 @@ func LinkDeserialize(hdr *unix.NlMsghdr, m []byte) (Link, error) {
protinfo := parseProtinfo(attrs)
base.Protinfo = &protinfo
}
+ case unix.IFLA_PROP_LIST | unix.NLA_F_NESTED:
+ attrs, err := nl.ParseRouteAttr(attr.Value[:])
+ if err != nil {
+ return nil, err
+ }
+
+ base.AltNames = []string{}
+ for _, attr := range attrs {
+ if attr.Attr.Type == unix.IFLA_ALT_IFNAME {
+ base.AltNames = append(base.AltNames, nl.BytesToString(attr.Value))
+ }
+ }
case unix.IFLA_OPERSTATE:
base.OperState = LinkOperState(uint8(attr.Value[0]))
case unix.IFLA_PHYS_SWITCH_ID:
base.PhysSwitchID = int(native.Uint32(attr.Value[0:4]))
case unix.IFLA_LINK_NETNSID:
base.NetNsID = int(native.Uint32(attr.Value[0:4]))
- case unix.IFLA_GSO_MAX_SIZE:
- base.GSOMaxSize = native.Uint32(attr.Value[0:4])
+ case unix.IFLA_TSO_MAX_SEGS:
+ base.TSOMaxSegs = native.Uint32(attr.Value[0:4])
+ case unix.IFLA_TSO_MAX_SIZE:
+ base.TSOMaxSize = native.Uint32(attr.Value[0:4])
case unix.IFLA_GSO_MAX_SEGS:
base.GSOMaxSegs = native.Uint32(attr.Value[0:4])
+ case unix.IFLA_GSO_MAX_SIZE:
+ base.GSOMaxSize = native.Uint32(attr.Value[0:4])
+ case unix.IFLA_GRO_MAX_SIZE:
+ base.GROMaxSize = native.Uint32(attr.Value[0:4])
+ case unix.IFLA_GSO_IPV4_MAX_SIZE:
+ base.GSOIPv4MaxSize = native.Uint32(attr.Value[0:4])
+ case unix.IFLA_GRO_IPV4_MAX_SIZE:
+ base.GROIPv4MaxSize = native.Uint32(attr.Value[0:4])
case unix.IFLA_VFINFO_LIST:
data, err := nl.ParseRouteAttr(attr.Value)
if err != nil {
@@ -1957,6 +2239,13 @@ func LinkDeserialize(hdr *unix.NlMsghdr, m []byte) (Link, error) {
base.NumRxQueues = int(native.Uint32(attr.Value[0:4]))
case unix.IFLA_GROUP:
base.Group = native.Uint32(attr.Value[0:4])
+ case unix.IFLA_PERM_ADDRESS:
+ for _, b := range attr.Value {
+ if b != 0 {
+ base.PermHWAddr = attr.Value[:]
+ break
+ }
+ }
}
}
@@ -2069,21 +2358,24 @@ type LinkUpdate struct {
// LinkSubscribe takes a chan down which notifications will be sent
// when links change. Close the 'done' chan to stop subscription.
func LinkSubscribe(ch chan<- LinkUpdate, done <-chan struct{}) error {
- return linkSubscribeAt(netns.None(), netns.None(), ch, done, nil, false)
+ return linkSubscribeAt(netns.None(), netns.None(), ch, done, nil, false, 0, nil, false)
}
// LinkSubscribeAt works like LinkSubscribe plus it allows the caller
// to choose the network namespace in which to subscribe (ns).
func LinkSubscribeAt(ns netns.NsHandle, ch chan<- LinkUpdate, done <-chan struct{}) error {
- return linkSubscribeAt(ns, netns.None(), ch, done, nil, false)
+ return linkSubscribeAt(ns, netns.None(), ch, done, nil, false, 0, nil, false)
}
// LinkSubscribeOptions contains a set of options to use with
// LinkSubscribeWithOptions.
type LinkSubscribeOptions struct {
- Namespace *netns.NsHandle
- ErrorCallback func(error)
- ListExisting bool
+ Namespace *netns.NsHandle
+ ErrorCallback func(error)
+ ListExisting bool
+ ReceiveBufferSize int
+ ReceiveBufferForceSize bool
+ ReceiveTimeout *unix.Timeval
}
// LinkSubscribeWithOptions work like LinkSubscribe but enable to
@@ -2094,14 +2386,27 @@ func LinkSubscribeWithOptions(ch chan<- LinkUpdate, done <-chan struct{}, option
none := netns.None()
options.Namespace = &none
}
- return linkSubscribeAt(*options.Namespace, netns.None(), ch, done, options.ErrorCallback, options.ListExisting)
+ return linkSubscribeAt(*options.Namespace, netns.None(), ch, done, options.ErrorCallback, options.ListExisting,
+ options.ReceiveBufferSize, options.ReceiveTimeout, options.ReceiveBufferForceSize)
}
-func linkSubscribeAt(newNs, curNs netns.NsHandle, ch chan<- LinkUpdate, done <-chan struct{}, cberr func(error), listExisting bool) error {
+func linkSubscribeAt(newNs, curNs netns.NsHandle, ch chan<- LinkUpdate, done <-chan struct{}, cberr func(error), listExisting bool,
+ rcvbuf int, rcvTimeout *unix.Timeval, rcvbufForce bool) error {
s, err := nl.SubscribeAt(newNs, curNs, unix.NETLINK_ROUTE, unix.RTNLGRP_LINK)
if err != nil {
return err
}
+ if rcvTimeout != nil {
+ if err := s.SetReceiveTimeout(rcvTimeout); err != nil {
+ return err
+ }
+ }
+ if rcvbuf != 0 {
+ err = s.SetReceiveBufferSize(rcvbuf, rcvbufForce)
+ if err != nil {
+ return err
+ }
+ }
if done != nil {
go func() {
<-done
@@ -2182,6 +2487,16 @@ func (h *Handle) LinkSetGuard(link Link, mode bool) error {
return h.setProtinfoAttr(link, mode, nl.IFLA_BRPORT_GUARD)
}
+// LinkSetBRSlaveGroupFwdMask set the group_fwd_mask of a bridge slave interface
+func LinkSetBRSlaveGroupFwdMask(link Link, mask uint16) error {
+ return pkgHandle.LinkSetBRSlaveGroupFwdMask(link, mask)
+}
+
+// LinkSetBRSlaveGroupFwdMask set the group_fwd_mask of a bridge slave interface
+func (h *Handle) LinkSetBRSlaveGroupFwdMask(link Link, mask uint16) error {
+ return h.setProtinfoAttrRawVal(link, nl.Uint16Attr(mask), nl.IFLA_BRPORT_GROUP_FWD_MASK)
+}
+
func LinkSetFastLeave(link Link, mode bool) error {
return pkgHandle.LinkSetFastLeave(link, mode)
}
@@ -2214,6 +2529,14 @@ func (h *Handle) LinkSetFlood(link Link, mode bool) error {
return h.setProtinfoAttr(link, mode, nl.IFLA_BRPORT_UNICAST_FLOOD)
}
+func LinkSetIsolated(link Link, mode bool) error {
+ return pkgHandle.LinkSetIsolated(link, mode)
+}
+
+func (h *Handle) LinkSetIsolated(link Link, mode bool) error {
+ return h.setProtinfoAttr(link, mode, nl.IFLA_BRPORT_ISOLATED)
+}
+
func LinkSetBrProxyArp(link Link, mode bool) error {
return pkgHandle.LinkSetBrProxyArp(link, mode)
}
@@ -2230,7 +2553,15 @@ func (h *Handle) LinkSetBrProxyArpWiFi(link Link, mode bool) error {
return h.setProtinfoAttr(link, mode, nl.IFLA_BRPORT_PROXYARP_WIFI)
}
-func (h *Handle) setProtinfoAttr(link Link, mode bool, attr int) error {
+func LinkSetBrNeighSuppress(link Link, mode bool) error {
+ return pkgHandle.LinkSetBrNeighSuppress(link, mode)
+}
+
+func (h *Handle) LinkSetBrNeighSuppress(link Link, mode bool) error {
+ return h.setProtinfoAttr(link, mode, nl.IFLA_BRPORT_NEIGH_SUPPRESS)
+}
+
+func (h *Handle) setProtinfoAttrRawVal(link Link, val []byte, attr int) error {
base := link.Attrs()
h.ensureIndex(base)
req := h.newNetlinkRequest(unix.RTM_SETLINK, unix.NLM_F_ACK)
@@ -2240,7 +2571,7 @@ func (h *Handle) setProtinfoAttr(link Link, mode bool, attr int) error {
req.AddData(msg)
br := nl.NewRtAttr(unix.IFLA_PROTINFO|unix.NLA_F_NESTED, nil)
- br.AddRtAttr(attr, boolToByte(mode))
+ br.AddRtAttr(attr, val)
req.AddData(br)
_, err := req.Execute(unix.NETLINK_ROUTE, 0)
if err != nil {
@@ -2248,6 +2579,9 @@ func (h *Handle) setProtinfoAttr(link Link, mode bool, attr int) error {
}
return nil
}
+func (h *Handle) setProtinfoAttr(link Link, mode bool, attr int) error {
+ return h.setProtinfoAttrRawVal(link, boolToByte(mode), attr)
+}
// LinkSetTxQLen sets the transaction queue length for the link.
// Equivalent to: `ip link set $link txqlen $qlen`
@@ -2305,6 +2639,80 @@ func (h *Handle) LinkSetGroup(link Link, group int) error {
return err
}
+func addNetkitAttrs(nk *Netkit, linkInfo *nl.RtAttr, flag int) error {
+ if nk.peerLinkAttrs.HardwareAddr != nil || nk.HardwareAddr != nil {
+ return fmt.Errorf("netkit doesn't support setting Ethernet")
+ }
+
+ data := linkInfo.AddRtAttr(nl.IFLA_INFO_DATA, nil)
+ // Kernel will return error if trying to change the mode of an existing netkit device
+ data.AddRtAttr(nl.IFLA_NETKIT_MODE, nl.Uint32Attr(uint32(nk.Mode)))
+ data.AddRtAttr(nl.IFLA_NETKIT_POLICY, nl.Uint32Attr(uint32(nk.Policy)))
+ data.AddRtAttr(nl.IFLA_NETKIT_PEER_POLICY, nl.Uint32Attr(uint32(nk.PeerPolicy)))
+
+ if (flag & unix.NLM_F_EXCL) == 0 {
+ // Modifying peer link attributes will not take effect
+ return nil
+ }
+
+ peer := data.AddRtAttr(nl.IFLA_NETKIT_PEER_INFO, nil)
+ msg := nl.NewIfInfomsg(unix.AF_UNSPEC)
+ if nk.peerLinkAttrs.Flags&net.FlagUp != 0 {
+ msg.Change = unix.IFF_UP
+ msg.Flags = unix.IFF_UP
+ }
+ if nk.peerLinkAttrs.Index != 0 {
+ msg.Index = int32(nk.peerLinkAttrs.Index)
+ }
+ peer.AddChild(msg)
+ if nk.peerLinkAttrs.Name != "" {
+ peer.AddRtAttr(unix.IFLA_IFNAME, nl.ZeroTerminated(nk.peerLinkAttrs.Name))
+ }
+ if nk.peerLinkAttrs.MTU > 0 {
+ peer.AddRtAttr(unix.IFLA_MTU, nl.Uint32Attr(uint32(nk.peerLinkAttrs.MTU)))
+ }
+ if nk.peerLinkAttrs.GSOMaxSegs > 0 {
+ peer.AddRtAttr(unix.IFLA_GSO_MAX_SEGS, nl.Uint32Attr(nk.peerLinkAttrs.GSOMaxSegs))
+ }
+ if nk.peerLinkAttrs.GSOMaxSize > 0 {
+ peer.AddRtAttr(unix.IFLA_GSO_MAX_SIZE, nl.Uint32Attr(nk.peerLinkAttrs.GSOMaxSize))
+ }
+ if nk.peerLinkAttrs.GSOIPv4MaxSize > 0 {
+ peer.AddRtAttr(unix.IFLA_GSO_IPV4_MAX_SIZE, nl.Uint32Attr(nk.peerLinkAttrs.GSOIPv4MaxSize))
+ }
+ if nk.peerLinkAttrs.GROIPv4MaxSize > 0 {
+ peer.AddRtAttr(unix.IFLA_GRO_IPV4_MAX_SIZE, nl.Uint32Attr(nk.peerLinkAttrs.GROIPv4MaxSize))
+ }
+ if nk.peerLinkAttrs.Namespace != nil {
+ switch ns := nk.peerLinkAttrs.Namespace.(type) {
+ case NsPid:
+ peer.AddRtAttr(unix.IFLA_NET_NS_PID, nl.Uint32Attr(uint32(ns)))
+ case NsFd:
+ peer.AddRtAttr(unix.IFLA_NET_NS_FD, nl.Uint32Attr(uint32(ns)))
+ }
+ }
+ return nil
+}
+
+func parseNetkitData(link Link, data []syscall.NetlinkRouteAttr) {
+ netkit := link.(*Netkit)
+ for _, datum := range data {
+ switch datum.Attr.Type {
+ case nl.IFLA_NETKIT_PRIMARY:
+ isPrimary := datum.Value[0:1][0]
+ if isPrimary != 0 {
+ netkit.isPrimary = true
+ }
+ case nl.IFLA_NETKIT_MODE:
+ netkit.Mode = NetkitMode(native.Uint32(datum.Value[0:4]))
+ case nl.IFLA_NETKIT_POLICY:
+ netkit.Policy = NetkitPolicy(native.Uint32(datum.Value[0:4]))
+ case nl.IFLA_NETKIT_PEER_POLICY:
+ netkit.PeerPolicy = NetkitPolicy(native.Uint32(datum.Value[0:4]))
+ }
+ }
+}
+
func parseVlanData(link Link, data []syscall.NetlinkRouteAttr) {
vlan := link.(*Vlan)
for _, datum := range data {
@@ -2539,11 +2947,30 @@ func parseIPVtapData(link Link, data []syscall.NetlinkRouteAttr) {
}
}
+func addMacvtapAttrs(macvtap *Macvtap, linkInfo *nl.RtAttr) {
+ addMacvlanAttrs(&macvtap.Macvlan, linkInfo)
+}
+
func parseMacvtapData(link Link, data []syscall.NetlinkRouteAttr) {
macv := link.(*Macvtap)
parseMacvlanData(&macv.Macvlan, data)
}
+func addMacvlanAttrs(macvlan *Macvlan, linkInfo *nl.RtAttr) {
+ var data *nl.RtAttr
+
+ if macvlan.Mode != MACVLAN_MODE_DEFAULT || macvlan.BCQueueLen > 0 {
+ data = linkInfo.AddRtAttr(nl.IFLA_INFO_DATA, nil)
+ }
+
+ if macvlan.Mode != MACVLAN_MODE_DEFAULT {
+ data.AddRtAttr(nl.IFLA_MACVLAN_MODE, nl.Uint32Attr(macvlanModes[macvlan.Mode]))
+ }
+ if macvlan.BCQueueLen > 0 {
+ data.AddRtAttr(nl.IFLA_MACVLAN_BC_QUEUE_LEN, nl.Uint32Attr(macvlan.BCQueueLen))
+ }
+}
+
func parseMacvlanData(link Link, data []syscall.NetlinkRouteAttr) {
macv := link.(*Macvlan)
for _, datum := range data {
@@ -2571,6 +2998,10 @@ func parseMacvlanData(link Link, data []syscall.NetlinkRouteAttr) {
for _, macDatum := range macs {
macv.MACAddrs = append(macv.MACAddrs, net.HardwareAddr(macDatum.Value[0:6]))
}
+ case nl.IFLA_MACVLAN_BC_QUEUE_LEN:
+ macv.BCQueueLen = native.Uint32(datum.Value[0:4])
+ case nl.IFLA_MACVLAN_BC_QUEUE_LEN_USED:
+ macv.UsedBCQueueLen = native.Uint32(datum.Value[0:4])
}
}
}
@@ -2599,10 +3030,13 @@ func linkFlags(rawFlags uint32) net.Flags {
func addGeneveAttrs(geneve *Geneve, linkInfo *nl.RtAttr) {
data := linkInfo.AddRtAttr(nl.IFLA_INFO_DATA, nil)
+ if geneve.InnerProtoInherit {
+ data.AddRtAttr(nl.IFLA_GENEVE_INNER_PROTO_INHERIT, []byte{})
+ }
+
if geneve.FlowBased {
- // In flow based mode, no other attributes need to be configured
- linkInfo.AddRtAttr(nl.IFLA_GENEVE_COLLECT_METADATA, boolAttr(geneve.FlowBased))
- return
+ geneve.ID = 0
+ data.AddRtAttr(nl.IFLA_GENEVE_COLLECT_METADATA, []byte{})
}
if ip := geneve.Remote; ip != nil {
@@ -2628,6 +3062,8 @@ func addGeneveAttrs(geneve *Geneve, linkInfo *nl.RtAttr) {
if geneve.Tos != 0 {
data.AddRtAttr(nl.IFLA_GENEVE_TOS, nl.Uint8Attr(geneve.Tos))
}
+
+ data.AddRtAttr(nl.IFLA_GENEVE_DF, nl.Uint8Attr(uint8(geneve.Df)))
}
func parseGeneveData(link Link, data []syscall.NetlinkRouteAttr) {
@@ -2644,6 +3080,10 @@ func parseGeneveData(link Link, data []syscall.NetlinkRouteAttr) {
geneve.Ttl = uint8(datum.Value[0])
case nl.IFLA_GENEVE_TOS:
geneve.Tos = uint8(datum.Value[0])
+ case nl.IFLA_GENEVE_COLLECT_METADATA:
+ geneve.FlowBased = true
+ case nl.IFLA_GENEVE_INNER_PROTO_INHERIT:
+ geneve.InnerProtoInherit = true
}
}
}
@@ -2653,7 +3093,7 @@ func addGretapAttrs(gretap *Gretap, linkInfo *nl.RtAttr) {
if gretap.FlowBased {
// In flow based mode, no other attributes need to be configured
- data.AddRtAttr(nl.IFLA_GRE_COLLECT_METADATA, boolAttr(gretap.FlowBased))
+ data.AddRtAttr(nl.IFLA_GRE_COLLECT_METADATA, []byte{})
return
}
@@ -2736,6 +3176,12 @@ func parseGretapData(link Link, data []syscall.NetlinkRouteAttr) {
func addGretunAttrs(gre *Gretun, linkInfo *nl.RtAttr) {
data := linkInfo.AddRtAttr(nl.IFLA_INFO_DATA, nil)
+ if gre.FlowBased {
+ // In flow based mode, no other attributes need to be configured
+ data.AddRtAttr(nl.IFLA_GRE_COLLECT_METADATA, []byte{})
+ return
+ }
+
if ip := gre.Local; ip != nil {
if ip.To4() != nil {
ip = ip.To4()
@@ -2806,6 +3252,8 @@ func parseGretunData(link Link, data []syscall.NetlinkRouteAttr) {
gre.EncapSport = ntohs(datum.Value[0:2])
case nl.IFLA_GRE_ENCAP_DPORT:
gre.EncapDport = ntohs(datum.Value[0:2])
+ case nl.IFLA_GRE_COLLECT_METADATA:
+ gre.FlowBased = true
}
}
}
@@ -2846,14 +3294,14 @@ func parseLinkXdp(data []byte) (*LinkXdp, error) {
}
func addIptunAttrs(iptun *Iptun, linkInfo *nl.RtAttr) {
+ data := linkInfo.AddRtAttr(nl.IFLA_INFO_DATA, nil)
+
if iptun.FlowBased {
// In flow based mode, no other attributes need to be configured
- linkInfo.AddRtAttr(nl.IFLA_IPTUN_COLLECT_METADATA, boolAttr(iptun.FlowBased))
+ data.AddRtAttr(nl.IFLA_IPTUN_COLLECT_METADATA, []byte{})
return
}
- data := linkInfo.AddRtAttr(nl.IFLA_INFO_DATA, nil)
-
ip := iptun.Local.To4()
if ip != nil {
data.AddRtAttr(nl.IFLA_IPTUN_LOCAL, []byte(ip))
@@ -2880,10 +3328,6 @@ func addIptunAttrs(iptun *Iptun, linkInfo *nl.RtAttr) {
func parseIptunData(link Link, data []syscall.NetlinkRouteAttr) {
iptun := link.(*Iptun)
for _, datum := range data {
- // NOTE: same with vxlan, ip tunnel may also has null datum.Value
- if len(datum.Value) == 0 {
- continue
- }
switch datum.Attr.Type {
case nl.IFLA_IPTUN_LOCAL:
iptun.Local = net.IP(datum.Value[0:4])
@@ -2914,6 +3358,12 @@ func parseIptunData(link Link, data []syscall.NetlinkRouteAttr) {
func addIp6tnlAttrs(ip6tnl *Ip6tnl, linkInfo *nl.RtAttr) {
data := linkInfo.AddRtAttr(nl.IFLA_INFO_DATA, nil)
+ if ip6tnl.FlowBased {
+ // In flow based mode, no other attributes need to be configured
+ data.AddRtAttr(nl.IFLA_IPTUN_COLLECT_METADATA, []byte{})
+ return
+ }
+
if ip6tnl.Link != 0 {
data.AddRtAttr(nl.IFLA_IPTUN_LINK, nl.Uint32Attr(ip6tnl.Link))
}
@@ -2968,6 +3418,8 @@ func parseIp6tnlData(link Link, data []syscall.NetlinkRouteAttr) {
ip6tnl.EncapSport = ntohs(datum.Value[0:2])
case nl.IFLA_IPTUN_ENCAP_DPORT:
ip6tnl.EncapDport = ntohs(datum.Value[0:2])
+ case nl.IFLA_IPTUN_COLLECT_METADATA:
+ ip6tnl.FlowBased = true
}
}
}
@@ -3115,6 +3567,12 @@ func addBridgeAttrs(bridge *Bridge, linkInfo *nl.RtAttr) {
if bridge.VlanFiltering != nil {
data.AddRtAttr(nl.IFLA_BR_VLAN_FILTERING, boolToByte(*bridge.VlanFiltering))
}
+ if bridge.VlanDefaultPVID != nil {
+ data.AddRtAttr(nl.IFLA_BR_VLAN_DEFAULT_PVID, nl.Uint16Attr(*bridge.VlanDefaultPVID))
+ }
+ if bridge.GroupFwdMask != nil {
+ data.AddRtAttr(nl.IFLA_BR_GROUP_FWD_MASK, nl.Uint16Attr(*bridge.GroupFwdMask))
+ }
}
func parseBridgeData(bridge Link, data []syscall.NetlinkRouteAttr) {
@@ -3133,6 +3591,12 @@ func parseBridgeData(bridge Link, data []syscall.NetlinkRouteAttr) {
case nl.IFLA_BR_VLAN_FILTERING:
vlanFiltering := datum.Value[0] == 1
br.VlanFiltering = &vlanFiltering
+ case nl.IFLA_BR_VLAN_DEFAULT_PVID:
+ vlanDefaultPVID := native.Uint16(datum.Value[0:2])
+ br.VlanDefaultPVID = &vlanDefaultPVID
+ case nl.IFLA_BR_GROUP_FWD_MASK:
+ mask := native.Uint16(datum.Value[0:2])
+ br.GroupFwdMask = &mask
}
}
}
@@ -3174,12 +3638,17 @@ func parseVfInfoList(data []syscall.NetlinkRouteAttr) ([]VfInfo, error) {
if err != nil {
return nil, err
}
- vfs = append(vfs, parseVfInfo(vfAttrs, i))
+
+ vf, err := parseVfInfo(vfAttrs, i)
+ if err != nil {
+ return nil, err
+ }
+ vfs = append(vfs, vf)
}
return vfs, nil
}
-func parseVfInfo(data []syscall.NetlinkRouteAttr, id int) VfInfo {
+func parseVfInfo(data []syscall.NetlinkRouteAttr, id int) (VfInfo, error) {
vf := VfInfo{ID: id}
for _, element := range data {
switch element.Attr.Type {
@@ -3190,6 +3659,12 @@ func parseVfInfo(data []syscall.NetlinkRouteAttr, id int) VfInfo {
vl := nl.DeserializeVfVlan(element.Value[:])
vf.Vlan = int(vl.Vlan)
vf.Qos = int(vl.Qos)
+ case nl.IFLA_VF_VLAN_LIST:
+ vfVlanInfoList, err := nl.DeserializeVfVlanList(element.Value[:])
+ if err != nil {
+ return vf, err
+ }
+ vf.VlanProto = int(vfVlanInfoList[0].VlanProto)
case nl.IFLA_VF_TX_RATE:
txr := nl.DeserializeVfTxRate(element.Value[:])
vf.TxRate = int(txr.Rate)
@@ -3223,7 +3698,7 @@ func parseVfInfo(data []syscall.NetlinkRouteAttr, id int) VfInfo {
vf.Trust = result.Setting
}
}
- return vf
+ return vf, nil
}
func addXfrmiAttrs(xfrmi *Xfrmi, linkInfo *nl.RtAttr) {
@@ -3246,8 +3721,7 @@ func parseXfrmiData(link Link, data []syscall.NetlinkRouteAttr) {
}
}
-// LinkSetBondSlave add slave to bond link via ioctl interface.
-func LinkSetBondSlave(link Link, master *Bond) error {
+func ioctlBondSlave(cmd uintptr, link Link, master *Bond) error {
fd, err := getSocketUDP()
if err != nil {
return err
@@ -3255,10 +3729,38 @@ func LinkSetBondSlave(link Link, master *Bond) error {
defer syscall.Close(fd)
ifreq := newIocltSlaveReq(link.Attrs().Name, master.Attrs().Name)
-
- _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, uintptr(fd), unix.SIOCBONDENSLAVE, uintptr(unsafe.Pointer(ifreq)))
+ _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, uintptr(fd), cmd, uintptr(unsafe.Pointer(ifreq)))
if errno != 0 {
- return fmt.Errorf("Failed to enslave %q to %q, errno=%v", link.Attrs().Name, master.Attrs().Name, errno)
+ return fmt.Errorf("errno=%v", errno)
+ }
+ return nil
+}
+
+// LinkSetBondSlaveActive sets specified slave to ACTIVE in an `active-backup` bond link via ioctl interface.
+//
+// Multiple calls keeps the status unchanged(shown in the unit test).
+func LinkSetBondSlaveActive(link Link, master *Bond) error {
+ err := ioctlBondSlave(unix.SIOCBONDCHANGEACTIVE, link, master)
+ if err != nil {
+ return fmt.Errorf("Failed to set slave %q active in %q, %v", link.Attrs().Name, master.Attrs().Name, err)
+ }
+ return nil
+}
+
+// LinkSetBondSlave add slave to bond link via ioctl interface.
+func LinkSetBondSlave(link Link, master *Bond) error {
+ err := ioctlBondSlave(unix.SIOCBONDENSLAVE, link, master)
+ if err != nil {
+ return fmt.Errorf("Failed to enslave %q to %q, %v", link.Attrs().Name, master.Attrs().Name, err)
+ }
+ return nil
+}
+
+// LinkSetBondSlave removes specified slave from bond link via ioctl interface.
+func LinkDelBondSlave(link Link, master *Bond) error {
+ err := ioctlBondSlave(unix.SIOCBONDRELEASE, link, master)
+ if err != nil {
+ return fmt.Errorf("Failed to del slave %q from %q, %v", link.Attrs().Name, master.Attrs().Name, err)
}
return nil
}
diff --git a/vendor/github.com/vishvananda/netlink/neigh_linux.go b/vendor/github.com/vishvananda/netlink/neigh_linux.go
index 4c1e766351c..2d93044a6ea 100644
--- a/vendor/github.com/vishvananda/netlink/neigh_linux.go
+++ b/vendor/github.com/vishvananda/netlink/neigh_linux.go
@@ -339,13 +339,13 @@ func NeighDeserialize(m []byte) (*Neigh, error) {
// NeighSubscribe takes a chan down which notifications will be sent
// when neighbors are added or deleted. Close the 'done' chan to stop subscription.
func NeighSubscribe(ch chan<- NeighUpdate, done <-chan struct{}) error {
- return neighSubscribeAt(netns.None(), netns.None(), ch, done, nil, false)
+ return neighSubscribeAt(netns.None(), netns.None(), ch, done, nil, false, 0, nil, false)
}
// NeighSubscribeAt works like NeighSubscribe plus it allows the caller
// to choose the network namespace in which to subscribe (ns).
func NeighSubscribeAt(ns netns.NsHandle, ch chan<- NeighUpdate, done <-chan struct{}) error {
- return neighSubscribeAt(ns, netns.None(), ch, done, nil, false)
+ return neighSubscribeAt(ns, netns.None(), ch, done, nil, false, 0, nil, false)
}
// NeighSubscribeOptions contains a set of options to use with
@@ -354,6 +354,11 @@ type NeighSubscribeOptions struct {
Namespace *netns.NsHandle
ErrorCallback func(error)
ListExisting bool
+
+ // max size is based on value of /proc/sys/net/core/rmem_max
+ ReceiveBufferSize int
+ ReceiveBufferForceSize bool
+ ReceiveTimeout *unix.Timeval
}
// NeighSubscribeWithOptions work like NeighSubscribe but enable to
@@ -364,16 +369,17 @@ func NeighSubscribeWithOptions(ch chan<- NeighUpdate, done <-chan struct{}, opti
none := netns.None()
options.Namespace = &none
}
- return neighSubscribeAt(*options.Namespace, netns.None(), ch, done, options.ErrorCallback, options.ListExisting)
+ return neighSubscribeAt(*options.Namespace, netns.None(), ch, done, options.ErrorCallback, options.ListExisting,
+ options.ReceiveBufferSize, options.ReceiveTimeout, options.ReceiveBufferForceSize)
}
-func neighSubscribeAt(newNs, curNs netns.NsHandle, ch chan<- NeighUpdate, done <-chan struct{}, cberr func(error), listExisting bool) error {
+func neighSubscribeAt(newNs, curNs netns.NsHandle, ch chan<- NeighUpdate, done <-chan struct{}, cberr func(error), listExisting bool,
+ rcvbuf int, rcvTimeout *unix.Timeval, rcvbufForce bool) error {
s, err := nl.SubscribeAt(newNs, curNs, unix.NETLINK_ROUTE, unix.RTNLGRP_NEIGH)
makeRequest := func(family int) error {
- req := pkgHandle.newNetlinkRequest(unix.RTM_GETNEIGH,
- unix.NLM_F_DUMP)
- infmsg := nl.NewIfInfomsg(family)
- req.AddData(infmsg)
+ req := pkgHandle.newNetlinkRequest(unix.RTM_GETNEIGH, unix.NLM_F_DUMP)
+ ndmsg := &Ndmsg{Family: uint8(family)}
+ req.AddData(ndmsg)
if err := s.Send(req); err != nil {
return err
}
@@ -382,6 +388,17 @@ func neighSubscribeAt(newNs, curNs netns.NsHandle, ch chan<- NeighUpdate, done <
if err != nil {
return err
}
+ if rcvTimeout != nil {
+ if err := s.SetReceiveTimeout(rcvTimeout); err != nil {
+ return err
+ }
+ }
+ if rcvbuf != 0 {
+ err = s.SetReceiveBufferSize(rcvbuf, rcvbufForce)
+ if err != nil {
+ return err
+ }
+ }
if done != nil {
go func() {
<-done
@@ -427,12 +444,12 @@ func neighSubscribeAt(newNs, curNs netns.NsHandle, ch chan<- NeighUpdate, done <
continue
}
if m.Header.Type == unix.NLMSG_ERROR {
- error := int32(native.Uint32(m.Data[0:4]))
- if error == 0 {
+ nError := int32(native.Uint32(m.Data[0:4]))
+ if nError == 0 {
continue
}
if cberr != nil {
- cberr(syscall.Errno(-error))
+ cberr(syscall.Errno(-nError))
}
return
}
diff --git a/vendor/github.com/vishvananda/netlink/netlink_unspecified.go b/vendor/github.com/vishvananda/netlink/netlink_unspecified.go
index 98d2c0dbf37..da12c42a560 100644
--- a/vendor/github.com/vishvananda/netlink/netlink_unspecified.go
+++ b/vendor/github.com/vishvananda/netlink/netlink_unspecified.go
@@ -52,6 +52,10 @@ func LinkSetVfVlanQos(link Link, vf, vlan, qos int) error {
return ErrNotImplemented
}
+func LinkSetVfVlanQosProto(link Link, vf, vlan, qos, proto int) error {
+ return ErrNotImplemented
+}
+
func LinkSetVfTxRate(link Link, vf, rate int) error {
return ErrNotImplemented
}
@@ -124,6 +128,22 @@ func LinkSetTxQLen(link Link, qlen int) error {
return ErrNotImplemented
}
+func LinkSetGSOMaxSize(link Link, maxSize int) error {
+ return ErrNotImplemented
+}
+
+func LinkSetGROMaxSize(link Link, maxSize int) error {
+ return ErrNotImplemented
+}
+
+func LinkSetGSOIPv4MaxSize(link Link, maxSize int) error {
+ return ErrNotImplemented
+}
+
+func LinkSetGROIPv4MaxSize(link Link, maxSize int) error {
+ return ErrNotImplemented
+}
+
func LinkAdd(link Link) error {
return ErrNotImplemented
}
@@ -184,6 +204,10 @@ func RouteAppend(route *Route) error {
return ErrNotImplemented
}
+func RouteChange(route *Route) error {
+ return ErrNotImplemented
+}
+
func RouteDel(route *Route) error {
return ErrNotImplemented
}
@@ -216,6 +240,10 @@ func XfrmPolicyList(family int) ([]XfrmPolicy, error) {
return nil, ErrNotImplemented
}
+func XfrmPolicyGet(policy *XfrmPolicy) (*XfrmPolicy, error) {
+ return nil, ErrNotImplemented
+}
+
func XfrmStateAdd(policy *XfrmState) error {
return ErrNotImplemented
}
@@ -255,3 +283,7 @@ func NeighDeserialize(m []byte) (*Neigh, error) {
func SocketGet(local, remote net.Addr) (*Socket, error) {
return nil, ErrNotImplemented
}
+
+func SocketDestroy(local, remote net.Addr) (*Socket, error) {
+ return nil, ErrNotImplemented
+}
diff --git a/vendor/github.com/vishvananda/netlink/nl/conntrack_linux.go b/vendor/github.com/vishvananda/netlink/nl/conntrack_linux.go
index 183601803b8..6989d1edc0b 100644
--- a/vendor/github.com/vishvananda/netlink/nl/conntrack_linux.go
+++ b/vendor/github.com/vishvananda/netlink/nl/conntrack_linux.go
@@ -15,6 +15,38 @@ var L4ProtoMap = map[uint8]string{
17: "udp",
}
+// From https://git.netfilter.org/libnetfilter_conntrack/tree/include/libnetfilter_conntrack/libnetfilter_conntrack_tcp.h
+// enum tcp_state {
+// TCP_CONNTRACK_NONE,
+// TCP_CONNTRACK_SYN_SENT,
+// TCP_CONNTRACK_SYN_RECV,
+// TCP_CONNTRACK_ESTABLISHED,
+// TCP_CONNTRACK_FIN_WAIT,
+// TCP_CONNTRACK_CLOSE_WAIT,
+// TCP_CONNTRACK_LAST_ACK,
+// TCP_CONNTRACK_TIME_WAIT,
+// TCP_CONNTRACK_CLOSE,
+// TCP_CONNTRACK_LISTEN, /* obsolete */
+// #define TCP_CONNTRACK_SYN_SENT2 TCP_CONNTRACK_LISTEN
+// TCP_CONNTRACK_MAX,
+// TCP_CONNTRACK_IGNORE
+// };
+const (
+ TCP_CONNTRACK_NONE = 0
+ TCP_CONNTRACK_SYN_SENT = 1
+ TCP_CONNTRACK_SYN_RECV = 2
+ TCP_CONNTRACK_ESTABLISHED = 3
+ TCP_CONNTRACK_FIN_WAIT = 4
+ TCP_CONNTRACK_CLOSE_WAIT = 5
+ TCP_CONNTRACK_LAST_ACK = 6
+ TCP_CONNTRACK_TIME_WAIT = 7
+ TCP_CONNTRACK_CLOSE = 8
+ TCP_CONNTRACK_LISTEN = 9
+ TCP_CONNTRACK_SYN_SENT2 = 9
+ TCP_CONNTRACK_MAX = 10
+ TCP_CONNTRACK_IGNORE = 11
+)
+
// All the following constants are coming from:
// https://github.com/torvalds/linux/blob/master/include/uapi/linux/netfilter/nfnetlink_conntrack.h
@@ -31,6 +63,7 @@ var L4ProtoMap = map[uint8]string{
// IPCTNL_MSG_MAX
// };
const (
+ IPCTNL_MSG_CT_NEW = 0
IPCTNL_MSG_CT_GET = 1
IPCTNL_MSG_CT_DELETE = 2
)
@@ -88,7 +121,10 @@ const (
CTA_COUNTERS_REPLY = 10
CTA_USE = 11
CTA_ID = 12
+ CTA_ZONE = 18
CTA_TIMESTAMP = 20
+ CTA_LABELS = 22
+ CTA_LABELS_MASK = 23
)
// enum ctattr_tuple {
@@ -149,7 +185,10 @@ const (
// };
// #define CTA_PROTOINFO_MAX (__CTA_PROTOINFO_MAX - 1)
const (
+ CTA_PROTOINFO_UNSPEC = 0
CTA_PROTOINFO_TCP = 1
+ CTA_PROTOINFO_DCCP = 2
+ CTA_PROTOINFO_SCTP = 3
)
// enum ctattr_protoinfo_tcp {
diff --git a/vendor/github.com/vishvananda/netlink/nl/devlink_linux.go b/vendor/github.com/vishvananda/netlink/nl/devlink_linux.go
index 2995da492f6..956367b2957 100644
--- a/vendor/github.com/vishvananda/netlink/nl/devlink_linux.go
+++ b/vendor/github.com/vishvananda/netlink/nl/devlink_linux.go
@@ -9,39 +9,56 @@ const (
)
const (
- DEVLINK_CMD_GET = 1
- DEVLINK_CMD_PORT_GET = 5
- DEVLINK_CMD_PORT_SET = 6
- DEVLINK_CMD_PORT_NEW = 7
- DEVLINK_CMD_PORT_DEL = 8
- DEVLINK_CMD_ESWITCH_GET = 29
- DEVLINK_CMD_ESWITCH_SET = 30
- DEVLINK_CMD_INFO_GET = 51
+ DEVLINK_CMD_GET = 1
+ DEVLINK_CMD_PORT_GET = 5
+ DEVLINK_CMD_PORT_SET = 6
+ DEVLINK_CMD_PORT_NEW = 7
+ DEVLINK_CMD_PORT_DEL = 8
+ DEVLINK_CMD_ESWITCH_GET = 29
+ DEVLINK_CMD_ESWITCH_SET = 30
+ DEVLINK_CMD_RESOURCE_DUMP = 36
+ DEVLINK_CMD_PARAM_GET = 38
+ DEVLINK_CMD_PARAM_SET = 39
+ DEVLINK_CMD_INFO_GET = 51
)
const (
- DEVLINK_ATTR_BUS_NAME = 1
- DEVLINK_ATTR_DEV_NAME = 2
- DEVLINK_ATTR_PORT_INDEX = 3
- DEVLINK_ATTR_PORT_TYPE = 4
- DEVLINK_ATTR_PORT_NETDEV_IFINDEX = 6
- DEVLINK_ATTR_PORT_NETDEV_NAME = 7
- DEVLINK_ATTR_PORT_IBDEV_NAME = 8
- DEVLINK_ATTR_ESWITCH_MODE = 25
- DEVLINK_ATTR_ESWITCH_INLINE_MODE = 26
- DEVLINK_ATTR_ESWITCH_ENCAP_MODE = 62
- DEVLINK_ATTR_PORT_FLAVOUR = 77
- DEVLINK_ATTR_INFO_DRIVER_NAME = 98
- DEVLINK_ATTR_INFO_SERIAL_NUMBER = 99
- DEVLINK_ATTR_INFO_VERSION_FIXED = 100
- DEVLINK_ATTR_INFO_VERSION_RUNNING = 101
- DEVLINK_ATTR_INFO_VERSION_STORED = 102
- DEVLINK_ATTR_INFO_VERSION_NAME = 103
- DEVLINK_ATTR_INFO_VERSION_VALUE = 104
- DEVLINK_ATTR_PORT_PCI_PF_NUMBER = 127
- DEVLINK_ATTR_PORT_FUNCTION = 145
- DEVLINK_ATTR_PORT_CONTROLLER_NUMBER = 150
- DEVLINK_ATTR_PORT_PCI_SF_NUMBER = 164
+ DEVLINK_ATTR_BUS_NAME = 1
+ DEVLINK_ATTR_DEV_NAME = 2
+ DEVLINK_ATTR_PORT_INDEX = 3
+ DEVLINK_ATTR_PORT_TYPE = 4
+ DEVLINK_ATTR_PORT_NETDEV_IFINDEX = 6
+ DEVLINK_ATTR_PORT_NETDEV_NAME = 7
+ DEVLINK_ATTR_PORT_IBDEV_NAME = 8
+ DEVLINK_ATTR_ESWITCH_MODE = 25
+ DEVLINK_ATTR_ESWITCH_INLINE_MODE = 26
+ DEVLINK_ATTR_ESWITCH_ENCAP_MODE = 62
+ DEVLINK_ATTR_RESOURCE_LIST = 63 /* nested */
+ DEVLINK_ATTR_RESOURCE = 64 /* nested */
+ DEVLINK_ATTR_RESOURCE_NAME = 65 /* string */
+ DEVLINK_ATTR_RESOURCE_ID = 66 /* u64 */
+ DEVLINK_ATTR_RESOURCE_SIZE = 67 /* u64 */
+ DEVLINK_ATTR_RESOURCE_SIZE_NEW = 68 /* u64 */
+ DEVLINK_ATTR_RESOURCE_SIZE_VALID = 69 /* u8 */
+ DEVLINK_ATTR_RESOURCE_SIZE_MIN = 70 /* u64 */
+ DEVLINK_ATTR_RESOURCE_SIZE_MAX = 71 /* u64 */
+ DEVLINK_ATTR_RESOURCE_SIZE_GRAN = 72 /* u64 */
+ DEVLINK_ATTR_RESOURCE_UNIT = 73 /* u8 */
+ DEVLINK_ATTR_RESOURCE_OCC = 74 /* u64 */
+ DEVLINK_ATTR_DPIPE_TABLE_RESOURCE_ID = 75 /* u64 */
+ DEVLINK_ATTR_DPIPE_TABLE_RESOURCE_UNITS = 76 /* u64 */
+ DEVLINK_ATTR_PORT_FLAVOUR = 77
+ DEVLINK_ATTR_INFO_DRIVER_NAME = 98
+ DEVLINK_ATTR_INFO_SERIAL_NUMBER = 99
+ DEVLINK_ATTR_INFO_VERSION_FIXED = 100
+ DEVLINK_ATTR_INFO_VERSION_RUNNING = 101
+ DEVLINK_ATTR_INFO_VERSION_STORED = 102
+ DEVLINK_ATTR_INFO_VERSION_NAME = 103
+ DEVLINK_ATTR_INFO_VERSION_VALUE = 104
+ DEVLINK_ATTR_PORT_PCI_PF_NUMBER = 127
+ DEVLINK_ATTR_PORT_FUNCTION = 145
+ DEVLINK_ATTR_PORT_CONTROLLER_NUMBER = 150
+ DEVLINK_ATTR_PORT_PCI_SF_NUMBER = 164
)
const (
@@ -94,3 +111,32 @@ const (
DEVLINK_PORT_FN_OPSTATE_DETACHED = 0
DEVLINK_PORT_FN_OPSTATE_ATTACHED = 1
)
+
+const (
+ DEVLINK_RESOURCE_UNIT_ENTRY uint8 = 0
+)
+
+const (
+ DEVLINK_ATTR_PARAM = iota + 80 /* nested */
+ DEVLINK_ATTR_PARAM_NAME /* string */
+ DEVLINK_ATTR_PARAM_GENERIC /* flag */
+ DEVLINK_ATTR_PARAM_TYPE /* u8 */
+ DEVLINK_ATTR_PARAM_VALUES_LIST /* nested */
+ DEVLINK_ATTR_PARAM_VALUE /* nested */
+ DEVLINK_ATTR_PARAM_VALUE_DATA /* dynamic */
+ DEVLINK_ATTR_PARAM_VALUE_CMODE /* u8 */
+)
+
+const (
+ DEVLINK_PARAM_TYPE_U8 = 1
+ DEVLINK_PARAM_TYPE_U16 = 2
+ DEVLINK_PARAM_TYPE_U32 = 3
+ DEVLINK_PARAM_TYPE_STRING = 5
+ DEVLINK_PARAM_TYPE_BOOL = 6
+)
+
+const (
+ DEVLINK_PARAM_CMODE_RUNTIME = iota
+ DEVLINK_PARAM_CMODE_DRIVERINIT
+ DEVLINK_PARAM_CMODE_PERMANENT
+)
diff --git a/vendor/github.com/vishvananda/netlink/nl/ip6tnl_linux.go b/vendor/github.com/vishvananda/netlink/nl/ip6tnl_linux.go
new file mode 100644
index 00000000000..d5dd69e0c40
--- /dev/null
+++ b/vendor/github.com/vishvananda/netlink/nl/ip6tnl_linux.go
@@ -0,0 +1,21 @@
+package nl
+
+// id's of route attribute from https://elixir.bootlin.com/linux/v5.17.3/source/include/uapi/linux/lwtunnel.h#L38
+// the value's size are specified in https://elixir.bootlin.com/linux/v5.17.3/source/net/ipv4/ip_tunnel_core.c#L928
+
+const (
+ LWTUNNEL_IP6_UNSPEC = iota
+ LWTUNNEL_IP6_ID
+ LWTUNNEL_IP6_DST
+ LWTUNNEL_IP6_SRC
+ LWTUNNEL_IP6_HOPLIMIT
+ LWTUNNEL_IP6_TC
+ LWTUNNEL_IP6_FLAGS
+ LWTUNNEL_IP6_PAD // not implemented
+ LWTUNNEL_IP6_OPTS // not implemented
+ __LWTUNNEL_IP6_MAX
+)
+
+
+
+
diff --git a/vendor/github.com/vishvananda/netlink/nl/ipset_linux.go b/vendor/github.com/vishvananda/netlink/nl/ipset_linux.go
index a60b4b09d9b..89dd009df1f 100644
--- a/vendor/github.com/vishvananda/netlink/nl/ipset_linux.go
+++ b/vendor/github.com/vishvananda/netlink/nl/ipset_linux.go
@@ -88,6 +88,11 @@ const (
SET_ATTR_CREATE_MAX
)
+const (
+ IPSET_ATTR_IPADDR_IPV4 = 1
+ IPSET_ATTR_IPADDR_IPV6 = 2
+)
+
/* ADT specific attributes */
const (
IPSET_ATTR_ETHER = IPSET_ATTR_CADT_MAX + iota + 1
diff --git a/vendor/github.com/vishvananda/netlink/nl/link_linux.go b/vendor/github.com/vishvananda/netlink/nl/link_linux.go
index e10edbc09d8..0b5be470cb0 100644
--- a/vendor/github.com/vishvananda/netlink/nl/link_linux.go
+++ b/vendor/github.com/vishvananda/netlink/nl/link_linux.go
@@ -3,6 +3,7 @@ package nl
import (
"bytes"
"encoding/binary"
+ "fmt"
"unsafe"
)
@@ -30,6 +31,16 @@ const (
IFLA_VLAN_MAX = IFLA_VLAN_PROTOCOL
)
+const (
+ IFLA_NETKIT_UNSPEC = iota
+ IFLA_NETKIT_PEER_INFO
+ IFLA_NETKIT_PRIMARY
+ IFLA_NETKIT_POLICY
+ IFLA_NETKIT_PEER_POLICY
+ IFLA_NETKIT_MODE
+ IFLA_NETKIT_MAX = IFLA_NETKIT_MODE
+)
+
const (
VETH_INFO_UNSPEC = iota
VETH_INFO_PEER
@@ -85,7 +96,37 @@ const (
IFLA_BRPORT_PROXYARP
IFLA_BRPORT_LEARNING_SYNC
IFLA_BRPORT_PROXYARP_WIFI
- IFLA_BRPORT_MAX = IFLA_BRPORT_PROXYARP_WIFI
+ IFLA_BRPORT_ROOT_ID
+ IFLA_BRPORT_BRIDGE_ID
+ IFLA_BRPORT_DESIGNATED_PORT
+ IFLA_BRPORT_DESIGNATED_COST
+ IFLA_BRPORT_ID
+ IFLA_BRPORT_NO
+ IFLA_BRPORT_TOPOLOGY_CHANGE_ACK
+ IFLA_BRPORT_CONFIG_PENDING
+ IFLA_BRPORT_MESSAGE_AGE_TIMER
+ IFLA_BRPORT_FORWARD_DELAY_TIMER
+ IFLA_BRPORT_HOLD_TIMER
+ IFLA_BRPORT_FLUSH
+ IFLA_BRPORT_MULTICAST_ROUTER
+ IFLA_BRPORT_PAD
+ IFLA_BRPORT_MCAST_FLOOD
+ IFLA_BRPORT_MCAST_TO_UCAST
+ IFLA_BRPORT_VLAN_TUNNEL
+ IFLA_BRPORT_BCAST_FLOOD
+ IFLA_BRPORT_GROUP_FWD_MASK
+ IFLA_BRPORT_NEIGH_SUPPRESS
+ IFLA_BRPORT_ISOLATED
+ IFLA_BRPORT_BACKUP_PORT
+ IFLA_BRPORT_MRP_RING_OPEN
+ IFLA_BRPORT_MRP_IN_OPEN
+ IFLA_BRPORT_MCAST_EHT_HOSTS_LIMIT
+ IFLA_BRPORT_MCAST_EHT_HOSTS_CNT
+ IFLA_BRPORT_LOCKED
+ IFLA_BRPORT_MAB
+ IFLA_BRPORT_MCAST_N_GROUPS
+ IFLA_BRPORT_MCAST_MAX_GROUPS
+ IFLA_BRPORT_MAX = IFLA_BRPORT_MCAST_MAX_GROUPS
)
const (
@@ -103,7 +144,9 @@ const (
IFLA_MACVLAN_MACADDR
IFLA_MACVLAN_MACADDR_DATA
IFLA_MACVLAN_MACADDR_COUNT
- IFLA_MACVLAN_MAX = IFLA_MACVLAN_FLAGS
+ IFLA_MACVLAN_BC_QUEUE_LEN
+ IFLA_MACVLAN_BC_QUEUE_LEN_USED
+ IFLA_MACVLAN_MAX = IFLA_MACVLAN_BC_QUEUE_LEN_USED
)
const (
@@ -186,7 +229,10 @@ const (
IFLA_GENEVE_UDP_ZERO_CSUM6_TX
IFLA_GENEVE_UDP_ZERO_CSUM6_RX
IFLA_GENEVE_LABEL
- IFLA_GENEVE_MAX = IFLA_GENEVE_LABEL
+ IFLA_GENEVE_TTL_INHERIT
+ IFLA_GENEVE_DF
+ IFLA_GENEVE_INNER_PROTO_INHERIT
+ IFLA_GENEVE_MAX = IFLA_GENEVE_INNER_PROTO_INHERIT
)
const (
@@ -244,7 +290,15 @@ const (
IFLA_VF_TRUST /* Trust state of VF */
IFLA_VF_IB_NODE_GUID /* VF Infiniband node GUID */
IFLA_VF_IB_PORT_GUID /* VF Infiniband port GUID */
- IFLA_VF_MAX = IFLA_VF_IB_PORT_GUID
+ IFLA_VF_VLAN_LIST /* nested list of vlans, option for QinQ */
+
+ IFLA_VF_MAX = IFLA_VF_IB_PORT_GUID
+)
+
+const (
+ IFLA_VF_VLAN_INFO_UNSPEC = iota
+ IFLA_VF_VLAN_INFO /* VLAN ID, QoS and VLAN protocol */
+ __IFLA_VF_VLAN_INFO_MAX
)
const (
@@ -269,6 +323,7 @@ const (
const (
SizeofVfMac = 0x24
SizeofVfVlan = 0x0c
+ SizeofVfVlanInfo = 0x10
SizeofVfTxRate = 0x08
SizeofVfRate = 0x0c
SizeofVfSpoofchk = 0x08
@@ -324,6 +379,49 @@ func (msg *VfVlan) Serialize() []byte {
return (*(*[SizeofVfVlan]byte)(unsafe.Pointer(msg)))[:]
}
+func DeserializeVfVlanList(b []byte) ([]*VfVlanInfo, error) {
+ var vfVlanInfoList []*VfVlanInfo
+ attrs, err := ParseRouteAttr(b)
+ if err != nil {
+ return nil, err
+ }
+
+ for _, element := range attrs {
+ if element.Attr.Type == IFLA_VF_VLAN_INFO {
+ vfVlanInfoList = append(vfVlanInfoList, DeserializeVfVlanInfo(element.Value))
+ }
+ }
+
+ if len(vfVlanInfoList) == 0 {
+ return nil, fmt.Errorf("VF vlan list is defined but no vf vlan info elements were found")
+ }
+
+ return vfVlanInfoList, nil
+}
+
+// struct ifla_vf_vlan_info {
+// __u32 vf;
+// __u32 vlan; /* 0 - 4095, 0 disables VLAN filter */
+// __u32 qos;
+// __be16 vlan_proto; /* VLAN protocol either 802.1Q or 802.1ad */
+// };
+
+type VfVlanInfo struct {
+ VfVlan
+ VlanProto uint16
+}
+
+func DeserializeVfVlanInfo(b []byte) *VfVlanInfo {
+ return &VfVlanInfo{
+ *(*VfVlan)(unsafe.Pointer(&b[0:SizeofVfVlan][0])),
+ binary.BigEndian.Uint16(b[SizeofVfVlan:SizeofVfVlanInfo]),
+ }
+}
+
+func (msg *VfVlanInfo) Serialize() []byte {
+ return (*(*[SizeofVfVlanInfo]byte)(unsafe.Pointer(msg)))[:]
+}
+
// struct ifla_vf_tx_rate {
// __u32 vf;
// __u32 rate; /* Max TX bandwidth in Mbps, 0 disables throttling */
diff --git a/vendor/github.com/vishvananda/netlink/nl/nl_linux.go b/vendor/github.com/vishvananda/netlink/nl/nl_linux.go
index 600b942b178..6cecc4517a5 100644
--- a/vendor/github.com/vishvananda/netlink/nl/nl_linux.go
+++ b/vendor/github.com/vishvananda/netlink/nl/nl_linux.go
@@ -6,6 +6,7 @@ import (
"encoding/binary"
"fmt"
"net"
+ "os"
"runtime"
"sync"
"sync/atomic"
@@ -330,6 +331,19 @@ func NewIfInfomsgChild(parent *RtAttr, family int) *IfInfomsg {
return msg
}
+type Uint32Bitfield struct {
+ Value uint32
+ Selector uint32
+}
+
+func (a *Uint32Bitfield) Serialize() []byte {
+ return (*(*[SizeofUint32Bitfield]byte)(unsafe.Pointer(a)))[:]
+}
+
+func DeserializeUint32Bitfield(data []byte) *Uint32Bitfield {
+ return (*Uint32Bitfield)(unsafe.Pointer(&data[0:SizeofUint32Bitfield][0]))
+}
+
type Uint32Attribute struct {
Type uint16
Value uint32
@@ -475,10 +489,30 @@ func (req *NetlinkRequest) AddRawData(data []byte) {
req.RawData = append(req.RawData, data...)
}
-// Execute the request against a the given sockType.
+// Execute the request against the given sockType.
// Returns a list of netlink messages in serialized format, optionally filtered
// by resType.
func (req *NetlinkRequest) Execute(sockType int, resType uint16) ([][]byte, error) {
+ var res [][]byte
+ err := req.ExecuteIter(sockType, resType, func(msg []byte) bool {
+ res = append(res, msg)
+ return true
+ })
+ if err != nil {
+ return nil, err
+ }
+ return res, nil
+}
+
+// ExecuteIter executes the request against the given sockType.
+// Calls the provided callback func once for each netlink message.
+// If the callback returns false, it is not called again, but
+// the remaining messages are consumed/discarded.
+//
+// Thread safety: ExecuteIter holds a lock on the socket until
+// it finishes iteration so the callback must not call back into
+// the netlink API.
+func (req *NetlinkRequest) ExecuteIter(sockType int, resType uint16, f func(msg []byte) bool) error {
var (
s *NetlinkSocket
err error
@@ -495,18 +529,18 @@ func (req *NetlinkRequest) Execute(sockType int, resType uint16) ([][]byte, erro
if s == nil {
s, err = getNetlinkSocket(sockType)
if err != nil {
- return nil, err
+ return err
}
if err := s.SetSendTimeout(&SocketTimeoutTv); err != nil {
- return nil, err
+ return err
}
if err := s.SetReceiveTimeout(&SocketTimeoutTv); err != nil {
- return nil, err
+ return err
}
if EnableErrorMessageReporting {
if err := s.SetExtAck(true); err != nil {
- return nil, err
+ return err
}
}
@@ -517,36 +551,44 @@ func (req *NetlinkRequest) Execute(sockType int, resType uint16) ([][]byte, erro
}
if err := s.Send(req); err != nil {
- return nil, err
+ return err
}
pid, err := s.GetPid()
if err != nil {
- return nil, err
+ return err
}
- var res [][]byte
-
done:
for {
msgs, from, err := s.Receive()
if err != nil {
- return nil, err
+ return err
}
if from.Pid != PidKernel {
- return nil, fmt.Errorf("Wrong sender portid %d, expected %d", from.Pid, PidKernel)
+ return fmt.Errorf("Wrong sender portid %d, expected %d", from.Pid, PidKernel)
}
for _, m := range msgs {
if m.Header.Seq != req.Seq {
if sharedSocket {
continue
}
- return nil, fmt.Errorf("Wrong Seq nr %d, expected %d", m.Header.Seq, req.Seq)
+ return fmt.Errorf("Wrong Seq nr %d, expected %d", m.Header.Seq, req.Seq)
}
if m.Header.Pid != pid {
continue
}
+
+ if m.Header.Flags&unix.NLM_F_DUMP_INTR != 0 {
+ return syscall.Errno(unix.EINTR)
+ }
+
if m.Header.Type == unix.NLMSG_DONE || m.Header.Type == unix.NLMSG_ERROR {
+ // NLMSG_DONE might have no payload, if so assume no error.
+ if m.Header.Type == unix.NLMSG_DONE && len(m.Data) == 0 {
+ break done
+ }
+
native := NativeEndian()
errno := int32(native.Uint32(m.Data[0:4]))
if errno == 0 {
@@ -556,7 +598,7 @@ done:
err = syscall.Errno(-errno)
unreadData := m.Data[4:]
- if m.Header.Flags|unix.NLM_F_ACK_TLVS != 0 && len(unreadData) > syscall.SizeofNlMsghdr {
+ if m.Header.Flags&unix.NLM_F_ACK_TLVS != 0 && len(unreadData) > syscall.SizeofNlMsghdr {
// Skip the echoed request message.
echoReqH := (*syscall.NlMsghdr)(unsafe.Pointer(&unreadData[0]))
unreadData = unreadData[nlmAlignOf(int(echoReqH.Len)):]
@@ -568,8 +610,7 @@ done:
switch attr.Type {
case NLMSGERR_ATTR_MSG:
- err = fmt.Errorf("%w: %s", err, string(attrData))
-
+ err = fmt.Errorf("%w: %s", err, unix.ByteSliceToString(attrData))
default:
// TODO: handle other NLMSGERR_ATTR types
}
@@ -578,18 +619,26 @@ done:
}
}
- return nil, err
+ return err
}
if resType != 0 && m.Header.Type != resType {
continue
}
- res = append(res, m.Data)
+ if cont := f(m.Data); !cont {
+ // Drain the rest of the messages from the kernel but don't
+ // pass them to the iterator func.
+ f = dummyMsgIterFunc
+ }
if m.Header.Flags&unix.NLM_F_MULTI == 0 {
break done
}
}
}
- return res, nil
+ return nil
+}
+
+func dummyMsgIterFunc(msg []byte) bool {
+ return true
}
// Create a new netlink request from proto and flags
@@ -607,8 +656,9 @@ func NewNetlinkRequest(proto, flags int) *NetlinkRequest {
}
type NetlinkSocket struct {
- fd int32
- lsa unix.SockaddrNetlink
+ fd int32
+ file *os.File
+ lsa unix.SockaddrNetlink
sync.Mutex
}
@@ -617,8 +667,13 @@ func getNetlinkSocket(protocol int) (*NetlinkSocket, error) {
if err != nil {
return nil, err
}
+ err = unix.SetNonblock(fd, true)
+ if err != nil {
+ return nil, err
+ }
s := &NetlinkSocket{
- fd: int32(fd),
+ fd: int32(fd),
+ file: os.NewFile(uintptr(fd), "netlink"),
}
s.lsa.Family = unix.AF_NETLINK
if err := unix.Bind(fd, &s.lsa); err != nil {
@@ -649,12 +704,14 @@ func GetNetlinkSocketAt(newNs, curNs netns.NsHandle, protocol int) (*NetlinkSock
// In case of success, the caller is expected to execute the returned function
// at the end of the code that needs to be executed in the network namespace.
// Example:
-// func jobAt(...) error {
-// d, err := executeInNetns(...)
-// if err != nil { return err}
-// defer d()
-// < code which needs to be executed in specific netns>
-// }
+//
+// func jobAt(...) error {
+// d, err := executeInNetns(...)
+// if err != nil { return err}
+// defer d()
+// < code which needs to be executed in specific netns>
+// }
+//
// TODO: his function probably belongs to netns pkg.
func executeInNetns(newNs, curNs netns.NsHandle) (func(), error) {
var (
@@ -703,8 +760,13 @@ func Subscribe(protocol int, groups ...uint) (*NetlinkSocket, error) {
if err != nil {
return nil, err
}
+ err = unix.SetNonblock(fd, true)
+ if err != nil {
+ return nil, err
+ }
s := &NetlinkSocket{
- fd: int32(fd),
+ fd: int32(fd),
+ file: os.NewFile(uintptr(fd), "netlink"),
}
s.lsa.Family = unix.AF_NETLINK
@@ -733,33 +795,36 @@ func SubscribeAt(newNs, curNs netns.NsHandle, protocol int, groups ...uint) (*Ne
}
func (s *NetlinkSocket) Close() {
- fd := int(atomic.SwapInt32(&s.fd, -1))
- unix.Close(fd)
+ s.file.Close()
}
func (s *NetlinkSocket) GetFd() int {
- return int(atomic.LoadInt32(&s.fd))
+ return int(s.fd)
}
func (s *NetlinkSocket) Send(request *NetlinkRequest) error {
- fd := int(atomic.LoadInt32(&s.fd))
- if fd < 0 {
- return fmt.Errorf("Send called on a closed socket")
- }
- if err := unix.Sendto(fd, request.Serialize(), 0, &s.lsa); err != nil {
- return err
- }
- return nil
+ return unix.Sendto(int(s.fd), request.Serialize(), 0, &s.lsa)
}
func (s *NetlinkSocket) Receive() ([]syscall.NetlinkMessage, *unix.SockaddrNetlink, error) {
- fd := int(atomic.LoadInt32(&s.fd))
- if fd < 0 {
- return nil, nil, fmt.Errorf("Receive called on a closed socket")
+ rawConn, err := s.file.SyscallConn()
+ if err != nil {
+ return nil, nil, err
+ }
+ var (
+ fromAddr *unix.SockaddrNetlink
+ rb [RECEIVE_BUFFER_SIZE]byte
+ nr int
+ from unix.Sockaddr
+ innerErr error
+ )
+ err = rawConn.Read(func(fd uintptr) (done bool) {
+ nr, from, innerErr = unix.Recvfrom(int(fd), rb[:], 0)
+ return innerErr != unix.EWOULDBLOCK
+ })
+ if innerErr != nil {
+ err = innerErr
}
- var fromAddr *unix.SockaddrNetlink
- var rb [RECEIVE_BUFFER_SIZE]byte
- nr, from, err := unix.Recvfrom(fd, rb[:], 0)
if err != nil {
return nil, nil, err
}
@@ -770,8 +835,9 @@ func (s *NetlinkSocket) Receive() ([]syscall.NetlinkMessage, *unix.SockaddrNetli
if nr < unix.NLMSG_HDRLEN {
return nil, nil, fmt.Errorf("Got short response from netlink")
}
- rb2 := make([]byte, nr)
- copy(rb2, rb[:nr])
+ msgLen := nlmAlignOf(nr)
+ rb2 := make([]byte, msgLen)
+ copy(rb2, rb[:msgLen])
nl, err := syscall.ParseNetlinkMessage(rb2)
if err != nil {
return nil, nil, err
@@ -793,6 +859,15 @@ func (s *NetlinkSocket) SetReceiveTimeout(timeout *unix.Timeval) error {
return unix.SetsockoptTimeval(int(s.fd), unix.SOL_SOCKET, unix.SO_RCVTIMEO, timeout)
}
+// SetReceiveBufferSize allows to set a receive buffer size on the socket
+func (s *NetlinkSocket) SetReceiveBufferSize(size int, force bool) error {
+ opt := unix.SO_RCVBUF
+ if force {
+ opt = unix.SO_RCVBUFFORCE
+ }
+ return unix.SetsockoptInt(int(s.fd), unix.SOL_SOCKET, opt, size)
+}
+
// SetExtAck requests error messages to be reported on the socket
func (s *NetlinkSocket) SetExtAck(enable bool) error {
var enableN int
@@ -804,8 +879,7 @@ func (s *NetlinkSocket) SetExtAck(enable bool) error {
}
func (s *NetlinkSocket) GetPid() (uint32, error) {
- fd := int(atomic.LoadInt32(&s.fd))
- lsa, err := unix.Getsockname(fd)
+ lsa, err := unix.Getsockname(int(s.fd))
if err != nil {
return 0, err
}
@@ -849,6 +923,12 @@ func Uint16Attr(v uint16) []byte {
return bytes
}
+func BEUint16Attr(v uint16) []byte {
+ bytes := make([]byte, 2)
+ binary.BigEndian.PutUint16(bytes, v)
+ return bytes
+}
+
func Uint32Attr(v uint32) []byte {
native := NativeEndian()
bytes := make([]byte, 4)
@@ -856,6 +936,12 @@ func Uint32Attr(v uint32) []byte {
return bytes
}
+func BEUint32Attr(v uint32) []byte {
+ bytes := make([]byte, 4)
+ binary.BigEndian.PutUint32(bytes, v)
+ return bytes
+}
+
func Uint64Attr(v uint64) []byte {
native := NativeEndian()
bytes := make([]byte, 8)
@@ -863,6 +949,12 @@ func Uint64Attr(v uint64) []byte {
return bytes
}
+func BEUint64Attr(v uint64) []byte {
+ bytes := make([]byte, 8)
+ binary.BigEndian.PutUint64(bytes, v)
+ return bytes
+}
+
func ParseRouteAttr(b []byte) ([]syscall.NetlinkRouteAttr, error) {
var attrs []syscall.NetlinkRouteAttr
for len(b) >= unix.SizeofRtAttr {
@@ -877,6 +969,22 @@ func ParseRouteAttr(b []byte) ([]syscall.NetlinkRouteAttr, error) {
return attrs, nil
}
+// ParseRouteAttrAsMap parses provided buffer that contains raw RtAttrs and returns a map of parsed
+// atttributes indexed by attribute type or error if occured.
+func ParseRouteAttrAsMap(b []byte) (map[uint16]syscall.NetlinkRouteAttr, error) {
+ attrMap := make(map[uint16]syscall.NetlinkRouteAttr)
+
+ attrs, err := ParseRouteAttr(b)
+ if err != nil {
+ return nil, err
+ }
+
+ for _, attr := range attrs {
+ attrMap[attr.Attr.Type] = attr
+ }
+ return attrMap, nil
+}
+
func netlinkRouteAttrAndValue(b []byte) (*unix.RtAttr, []byte, int, error) {
a := (*unix.RtAttr)(unsafe.Pointer(&b[0]))
if int(a.Len) < unix.SizeofRtAttr || int(a.Len) > len(b) {
diff --git a/vendor/github.com/vishvananda/netlink/nl/route_linux.go b/vendor/github.com/vishvananda/netlink/nl/route_linux.go
index 03c1900ffa8..c26f3bf91ae 100644
--- a/vendor/github.com/vishvananda/netlink/nl/route_linux.go
+++ b/vendor/github.com/vishvananda/netlink/nl/route_linux.go
@@ -48,7 +48,9 @@ type RtNexthop struct {
}
func DeserializeRtNexthop(b []byte) *RtNexthop {
- return (*RtNexthop)(unsafe.Pointer(&b[0:unix.SizeofRtNexthop][0]))
+ return &RtNexthop{
+ RtNexthop: *((*unix.RtNexthop)(unsafe.Pointer(&b[0:unix.SizeofRtNexthop][0]))),
+ }
}
func (msg *RtNexthop) Len() int {
diff --git a/vendor/github.com/vishvananda/netlink/nl/seg6local_linux.go b/vendor/github.com/vishvananda/netlink/nl/seg6local_linux.go
index 1500177267a..8172b8471f2 100644
--- a/vendor/github.com/vishvananda/netlink/nl/seg6local_linux.go
+++ b/vendor/github.com/vishvananda/netlink/nl/seg6local_linux.go
@@ -12,6 +12,7 @@ const (
SEG6_LOCAL_NH6
SEG6_LOCAL_IIF
SEG6_LOCAL_OIF
+ SEG6_LOCAL_BPF
__SEG6_LOCAL_MAX
)
const (
@@ -34,6 +35,7 @@ const (
SEG6_LOCAL_ACTION_END_S // 12
SEG6_LOCAL_ACTION_END_AS // 13
SEG6_LOCAL_ACTION_END_AM // 14
+ SEG6_LOCAL_ACTION_END_BPF // 15
__SEG6_LOCAL_ACTION_MAX
)
const (
@@ -71,6 +73,8 @@ func SEG6LocalActionString(action int) string {
return "End.AS"
case SEG6_LOCAL_ACTION_END_AM:
return "End.AM"
+ case SEG6_LOCAL_ACTION_END_BPF:
+ return "End.BPF"
}
return "unknown"
}
diff --git a/vendor/github.com/vishvananda/netlink/nl/syscall.go b/vendor/github.com/vishvananda/netlink/nl/syscall.go
index bdf6ba63957..b5ba039acb3 100644
--- a/vendor/github.com/vishvananda/netlink/nl/syscall.go
+++ b/vendor/github.com/vishvananda/netlink/nl/syscall.go
@@ -46,6 +46,7 @@ const (
// socket diags related
const (
SOCK_DIAG_BY_FAMILY = 20 /* linux.sock_diag.h */
+ SOCK_DESTROY = 21
TCPDIAG_NOCOOKIE = 0xFFFFFFFF /* TCPDIAG_NOCOOKIE in net/ipv4/tcp_diag.h*/
)
diff --git a/vendor/github.com/vishvananda/netlink/nl/tc_linux.go b/vendor/github.com/vishvananda/netlink/nl/tc_linux.go
index eb05ff1cd1c..0720729a900 100644
--- a/vendor/github.com/vishvananda/netlink/nl/tc_linux.go
+++ b/vendor/github.com/vishvananda/netlink/nl/tc_linux.go
@@ -1,8 +1,13 @@
package nl
import (
+ "bytes"
"encoding/binary"
+ "fmt"
+ "net"
"unsafe"
+
+ "golang.org/x/sys/unix"
)
// LinkLayer
@@ -42,7 +47,14 @@ const (
TCA_FCNT
TCA_STATS2
TCA_STAB
- TCA_MAX = TCA_STAB
+ TCA_PAD
+ TCA_DUMP_INVISIBLE
+ TCA_CHAIN
+ TCA_HW_OFFLOAD
+ TCA_INGRESS_BLOCK
+ TCA_EGRESS_BLOCK
+ TCA_DUMP_FLAGS
+ TCA_MAX = TCA_DUMP_FLAGS
)
const (
@@ -56,6 +68,12 @@ const (
TCA_ACT_OPTIONS
TCA_ACT_INDEX
TCA_ACT_STATS
+ TCA_ACT_PAD
+ TCA_ACT_COOKIE
+ TCA_ACT_FLAGS
+ TCA_ACT_HW_STATS
+ TCA_ACT_USED_HW_STATS
+ TCA_ACT_IN_HW_COUNT
TCA_ACT_MAX
)
@@ -71,7 +89,11 @@ const (
TCA_STATS_RATE_EST
TCA_STATS_QUEUE
TCA_STATS_APP
- TCA_STATS_MAX = TCA_STATS_APP
+ TCA_STATS_RATE_EST64
+ TCA_STATS_PAD
+ TCA_STATS_BASIC_HW
+ TCA_STATS_PKT64
+ TCA_STATS_MAX = TCA_STATS_PKT64
)
const (
@@ -83,12 +105,13 @@ const (
SizeofTcNetemCorr = 0x0c
SizeofTcNetemReorder = 0x08
SizeofTcNetemCorrupt = 0x08
+ SizeOfTcNetemRate = 0x10
SizeofTcTbfQopt = 2*SizeofTcRateSpec + 0x0c
SizeofTcHtbCopt = 2*SizeofTcRateSpec + 0x14
SizeofTcHtbGlob = 0x14
SizeofTcU32Key = 0x10
SizeofTcU32Sel = 0x10 // without keys
- SizeofTcGen = 0x14
+ SizeofTcGen = 0x16
SizeofTcConnmark = SizeofTcGen + 0x04
SizeofTcCsum = SizeofTcGen + 0x04
SizeofTcMirred = SizeofTcGen + 0x08
@@ -98,6 +121,7 @@ const (
SizeofTcSfqQopt = 0x0b
SizeofTcSfqRedStats = 0x18
SizeofTcSfqQoptV1 = SizeofTcSfqQopt + SizeofTcSfqRedStats + 0x1c
+ SizeofUint32Bitfield = 0x8
)
// struct tcmsg {
@@ -131,6 +155,18 @@ func (x *TcMsg) Serialize() []byte {
return (*(*[SizeofTcMsg]byte)(unsafe.Pointer(x)))[:]
}
+type Tcf struct {
+ Install uint64
+ LastUse uint64
+ Expires uint64
+ FirstUse uint64
+}
+
+func DeserializeTcf(b []byte) *Tcf {
+ const size = int(unsafe.Sizeof(Tcf{}))
+ return (*Tcf)(unsafe.Pointer(&b[0:size][0]))
+}
+
// struct tcamsg {
// unsigned char tca_family;
// unsigned char tca__pad1;
@@ -337,6 +373,26 @@ func (x *TcNetemCorrupt) Serialize() []byte {
return (*(*[SizeofTcNetemCorrupt]byte)(unsafe.Pointer(x)))[:]
}
+// TcNetemRate is a struct that represents the rate of a netem qdisc
+type TcNetemRate struct {
+ Rate uint32
+ PacketOverhead int32
+ CellSize uint32
+ CellOverhead int32
+}
+
+func (msg *TcNetemRate) Len() int {
+ return SizeofTcRateSpec
+}
+
+func DeserializeTcNetemRate(b []byte) *TcNetemRate {
+ return (*TcNetemRate)(unsafe.Pointer(&b[0:SizeofTcRateSpec][0]))
+}
+
+func (msg *TcNetemRate) Serialize() []byte {
+ return (*(*[SizeOfTcNetemRate]byte)(unsafe.Pointer(msg)))[:]
+}
+
// struct tc_tbf_qopt {
// struct tc_ratespec rate;
// struct tc_ratespec peakrate;
@@ -804,7 +860,8 @@ const (
TCA_SKBEDIT_MARK
TCA_SKBEDIT_PAD
TCA_SKBEDIT_PTYPE
- TCA_SKBEDIT_MAX = TCA_SKBEDIT_MARK
+ TCA_SKBEDIT_MASK
+ TCA_SKBEDIT_MAX
)
type TcSkbEdit struct {
@@ -891,6 +948,10 @@ const (
TCA_FQ_FLOW_REFILL_DELAY // flow credit refill delay in usec
TCA_FQ_ORPHAN_MASK // mask applied to orphaned skb hashes
TCA_FQ_LOW_RATE_THRESHOLD // per packet delay under this rate
+ TCA_FQ_CE_THRESHOLD // DCTCP-like CE-marking threshold
+ TCA_FQ_TIMER_SLACK // timer slack
+ TCA_FQ_HORIZON // time horizon in us
+ TCA_FQ_HORIZON_DROP // drop packets beyond horizon, or cap their EDT
)
const (
@@ -1018,6 +1079,9 @@ const (
__TCA_FLOWER_MAX
)
+const TCA_CLS_FLAGS_SKIP_HW = 1 << 0 /* don't offload filter to HW */
+const TCA_CLS_FLAGS_SKIP_SW = 1 << 1 /* don't use filter in SW */
+
// struct tc_sfq_qopt {
// unsigned quantum; /* Bytes per round allocated to flow */
// int perturb_period; /* Period of hash perturbation */
@@ -1046,14 +1110,14 @@ func (x *TcSfqQopt) Serialize() []byte {
return (*(*[SizeofTcSfqQopt]byte)(unsafe.Pointer(x)))[:]
}
-// struct tc_sfqred_stats {
-// __u32 prob_drop; /* Early drops, below max threshold */
-// __u32 forced_drop; /* Early drops, after max threshold */
-// __u32 prob_mark; /* Marked packets, below max threshold */
-// __u32 forced_mark; /* Marked packets, after max threshold */
-// __u32 prob_mark_head; /* Marked packets, below max threshold */
-// __u32 forced_mark_head;/* Marked packets, after max threshold */
-// };
+// struct tc_sfqred_stats {
+// __u32 prob_drop; /* Early drops, below max threshold */
+// __u32 forced_drop; /* Early drops, after max threshold */
+// __u32 prob_mark; /* Marked packets, below max threshold */
+// __u32 forced_mark; /* Marked packets, after max threshold */
+// __u32 prob_mark_head; /* Marked packets, below max threshold */
+// __u32 forced_mark_head;/* Marked packets, after max threshold */
+// };
type TcSfqRedStats struct {
ProbDrop uint32
ForcedDrop uint32
@@ -1075,22 +1139,26 @@ func (x *TcSfqRedStats) Serialize() []byte {
return (*(*[SizeofTcSfqRedStats]byte)(unsafe.Pointer(x)))[:]
}
-// struct tc_sfq_qopt_v1 {
-// struct tc_sfq_qopt v0;
-// unsigned int depth; /* max number of packets per flow */
-// unsigned int headdrop;
+// struct tc_sfq_qopt_v1 {
+// struct tc_sfq_qopt v0;
+// unsigned int depth; /* max number of packets per flow */
+// unsigned int headdrop;
+//
// /* SFQRED parameters */
-// __u32 limit; /* HARD maximal flow queue length (bytes) */
-// __u32 qth_min; /* Min average length threshold (bytes) */
-// __u32 qth_max; /* Max average length threshold (bytes) */
-// unsigned char Wlog; /* log(W) */
-// unsigned char Plog; /* log(P_max/(qth_max-qth_min)) */
-// unsigned char Scell_log; /* cell size for idle damping */
-// unsigned char flags;
-// __u32 max_P; /* probability, high resolution */
+//
+// __u32 limit; /* HARD maximal flow queue length (bytes) */
+// __u32 qth_min; /* Min average length threshold (bytes) */
+// __u32 qth_max; /* Max average length threshold (bytes) */
+// unsigned char Wlog; /* log(W) */
+// unsigned char Plog; /* log(P_max/(qth_max-qth_min)) */
+// unsigned char Scell_log; /* cell size for idle damping */
+// unsigned char flags;
+// __u32 max_P; /* probability, high resolution */
+//
// /* SFQRED stats */
-// struct tc_sfqred_stats stats;
-// };
+//
+// struct tc_sfqred_stats stats;
+// };
type TcSfqQoptV1 struct {
TcSfqQopt
Depth uint32
@@ -1117,3 +1185,427 @@ func DeserializeTcSfqQoptV1(b []byte) *TcSfqQoptV1 {
func (x *TcSfqQoptV1) Serialize() []byte {
return (*(*[SizeofTcSfqQoptV1]byte)(unsafe.Pointer(x)))[:]
}
+
+// IPProto represents Flower ip_proto attribute
+type IPProto uint8
+
+const (
+ IPPROTO_TCP IPProto = unix.IPPROTO_TCP
+ IPPROTO_UDP IPProto = unix.IPPROTO_UDP
+ IPPROTO_SCTP IPProto = unix.IPPROTO_SCTP
+ IPPROTO_ICMP IPProto = unix.IPPROTO_ICMP
+ IPPROTO_ICMPV6 IPProto = unix.IPPROTO_ICMPV6
+)
+
+func (i IPProto) Serialize() []byte {
+ arr := make([]byte, 1)
+ arr[0] = byte(i)
+ return arr
+}
+
+func (i IPProto) String() string {
+ switch i {
+ case IPPROTO_TCP:
+ return "tcp"
+ case IPPROTO_UDP:
+ return "udp"
+ case IPPROTO_SCTP:
+ return "sctp"
+ case IPPROTO_ICMP:
+ return "icmp"
+ case IPPROTO_ICMPV6:
+ return "icmpv6"
+ }
+ return fmt.Sprintf("%d", i)
+}
+
+const (
+ MaxOffs = 128
+ SizeOfPeditSel = 24
+ SizeOfPeditKey = 24
+
+ TCA_PEDIT_KEY_EX_HTYPE = 1
+ TCA_PEDIT_KEY_EX_CMD = 2
+)
+
+const (
+ TCA_PEDIT_UNSPEC = iota
+ TCA_PEDIT_TM
+ TCA_PEDIT_PARMS
+ TCA_PEDIT_PAD
+ TCA_PEDIT_PARMS_EX
+ TCA_PEDIT_KEYS_EX
+ TCA_PEDIT_KEY_EX
+)
+
+// /* TCA_PEDIT_KEY_EX_HDR_TYPE_NETWROK is a special case for legacy users. It
+// * means no specific header type - offset is relative to the network layer
+// */
+type PeditHeaderType uint16
+
+const (
+ TCA_PEDIT_KEY_EX_HDR_TYPE_NETWORK = iota
+ TCA_PEDIT_KEY_EX_HDR_TYPE_ETH
+ TCA_PEDIT_KEY_EX_HDR_TYPE_IP4
+ TCA_PEDIT_KEY_EX_HDR_TYPE_IP6
+ TCA_PEDIT_KEY_EX_HDR_TYPE_TCP
+ TCA_PEDIT_KEY_EX_HDR_TYPE_UDP
+ __PEDIT_HDR_TYPE_MAX
+)
+
+type PeditCmd uint16
+
+const (
+ TCA_PEDIT_KEY_EX_CMD_SET = 0
+ TCA_PEDIT_KEY_EX_CMD_ADD = 1
+)
+
+type TcPeditSel struct {
+ TcGen
+ NKeys uint8
+ Flags uint8
+}
+
+func DeserializeTcPeditKey(b []byte) *TcPeditKey {
+ return (*TcPeditKey)(unsafe.Pointer(&b[0:SizeOfPeditKey][0]))
+}
+
+func DeserializeTcPedit(b []byte) (*TcPeditSel, []TcPeditKey) {
+ x := &TcPeditSel{}
+ copy((*(*[SizeOfPeditSel]byte)(unsafe.Pointer(x)))[:SizeOfPeditSel], b)
+
+ var keys []TcPeditKey
+
+ next := SizeOfPeditKey
+ var i uint8
+ for i = 0; i < x.NKeys; i++ {
+ keys = append(keys, *DeserializeTcPeditKey(b[next:]))
+ next += SizeOfPeditKey
+ }
+
+ return x, keys
+}
+
+type TcPeditKey struct {
+ Mask uint32
+ Val uint32
+ Off uint32
+ At uint32
+ OffMask uint32
+ Shift uint32
+}
+
+type TcPeditKeyEx struct {
+ HeaderType PeditHeaderType
+ Cmd PeditCmd
+}
+
+type TcPedit struct {
+ Sel TcPeditSel
+ Keys []TcPeditKey
+ KeysEx []TcPeditKeyEx
+ Extend uint8
+}
+
+func (p *TcPedit) Encode(parent *RtAttr) {
+ parent.AddRtAttr(TCA_ACT_KIND, ZeroTerminated("pedit"))
+ actOpts := parent.AddRtAttr(TCA_ACT_OPTIONS, nil)
+
+ bbuf := bytes.NewBuffer(make([]byte, 0, int(unsafe.Sizeof(p.Sel)+unsafe.Sizeof(p.Keys))))
+
+ bbuf.Write((*(*[SizeOfPeditSel]byte)(unsafe.Pointer(&p.Sel)))[:])
+
+ for i := uint8(0); i < p.Sel.NKeys; i++ {
+ bbuf.Write((*(*[SizeOfPeditKey]byte)(unsafe.Pointer(&p.Keys[i])))[:])
+ }
+ actOpts.AddRtAttr(TCA_PEDIT_PARMS_EX, bbuf.Bytes())
+
+ exAttrs := actOpts.AddRtAttr(int(TCA_PEDIT_KEYS_EX|NLA_F_NESTED), nil)
+ for i := uint8(0); i < p.Sel.NKeys; i++ {
+ keyAttr := exAttrs.AddRtAttr(int(TCA_PEDIT_KEY_EX|NLA_F_NESTED), nil)
+
+ htypeBuf := make([]byte, 2)
+ cmdBuf := make([]byte, 2)
+
+ NativeEndian().PutUint16(htypeBuf, uint16(p.KeysEx[i].HeaderType))
+ NativeEndian().PutUint16(cmdBuf, uint16(p.KeysEx[i].Cmd))
+
+ keyAttr.AddRtAttr(TCA_PEDIT_KEY_EX_HTYPE, htypeBuf)
+ keyAttr.AddRtAttr(TCA_PEDIT_KEY_EX_CMD, cmdBuf)
+ }
+}
+
+func (p *TcPedit) SetEthDst(mac net.HardwareAddr) {
+ u32 := NativeEndian().Uint32(mac)
+ u16 := NativeEndian().Uint16(mac[4:])
+
+ tKey := TcPeditKey{}
+ tKeyEx := TcPeditKeyEx{}
+
+ tKey.Val = u32
+
+ tKeyEx.HeaderType = TCA_PEDIT_KEY_EX_HDR_TYPE_ETH
+ tKeyEx.Cmd = TCA_PEDIT_KEY_EX_CMD_SET
+
+ p.Keys = append(p.Keys, tKey)
+ p.KeysEx = append(p.KeysEx, tKeyEx)
+ p.Sel.NKeys++
+
+ tKey = TcPeditKey{}
+ tKeyEx = TcPeditKeyEx{}
+
+ tKey.Val = uint32(u16)
+ tKey.Mask = 0xffff0000
+ tKey.Off = 4
+ tKeyEx.HeaderType = TCA_PEDIT_KEY_EX_HDR_TYPE_ETH
+ tKeyEx.Cmd = TCA_PEDIT_KEY_EX_CMD_SET
+
+ p.Keys = append(p.Keys, tKey)
+ p.KeysEx = append(p.KeysEx, tKeyEx)
+
+ p.Sel.NKeys++
+}
+
+func (p *TcPedit) SetEthSrc(mac net.HardwareAddr) {
+ u16 := NativeEndian().Uint16(mac)
+ u32 := NativeEndian().Uint32(mac[2:])
+
+ tKey := TcPeditKey{}
+ tKeyEx := TcPeditKeyEx{}
+
+ tKey.Val = uint32(u16) << 16
+ tKey.Mask = 0x0000ffff
+ tKey.Off = 4
+
+ tKeyEx.HeaderType = TCA_PEDIT_KEY_EX_HDR_TYPE_ETH
+ tKeyEx.Cmd = TCA_PEDIT_KEY_EX_CMD_SET
+
+ p.Keys = append(p.Keys, tKey)
+ p.KeysEx = append(p.KeysEx, tKeyEx)
+ p.Sel.NKeys++
+
+ tKey = TcPeditKey{}
+ tKeyEx = TcPeditKeyEx{}
+
+ tKey.Val = u32
+ tKey.Mask = 0
+ tKey.Off = 8
+
+ tKeyEx.HeaderType = TCA_PEDIT_KEY_EX_HDR_TYPE_ETH
+ tKeyEx.Cmd = TCA_PEDIT_KEY_EX_CMD_SET
+
+ p.Keys = append(p.Keys, tKey)
+ p.KeysEx = append(p.KeysEx, tKeyEx)
+
+ p.Sel.NKeys++
+}
+
+func (p *TcPedit) SetIPv6Src(ip6 net.IP) {
+ u32 := NativeEndian().Uint32(ip6[:4])
+
+ tKey := TcPeditKey{}
+ tKeyEx := TcPeditKeyEx{}
+
+ tKey.Val = u32
+ tKey.Off = 8
+ tKeyEx.HeaderType = TCA_PEDIT_KEY_EX_HDR_TYPE_IP6
+ tKeyEx.Cmd = TCA_PEDIT_KEY_EX_CMD_SET
+
+ p.Keys = append(p.Keys, tKey)
+ p.KeysEx = append(p.KeysEx, tKeyEx)
+ p.Sel.NKeys++
+
+ u32 = NativeEndian().Uint32(ip6[4:8])
+ tKey = TcPeditKey{}
+ tKeyEx = TcPeditKeyEx{}
+
+ tKey.Val = u32
+ tKey.Off = 12
+ tKeyEx.HeaderType = TCA_PEDIT_KEY_EX_HDR_TYPE_IP6
+ tKeyEx.Cmd = TCA_PEDIT_KEY_EX_CMD_SET
+
+ p.Keys = append(p.Keys, tKey)
+ p.KeysEx = append(p.KeysEx, tKeyEx)
+
+ p.Sel.NKeys++
+
+ u32 = NativeEndian().Uint32(ip6[8:12])
+ tKey = TcPeditKey{}
+ tKeyEx = TcPeditKeyEx{}
+
+ tKey.Val = u32
+ tKey.Off = 16
+ tKeyEx.HeaderType = TCA_PEDIT_KEY_EX_HDR_TYPE_IP6
+ tKeyEx.Cmd = TCA_PEDIT_KEY_EX_CMD_SET
+
+ p.Keys = append(p.Keys, tKey)
+ p.KeysEx = append(p.KeysEx, tKeyEx)
+
+ p.Sel.NKeys++
+
+ u32 = NativeEndian().Uint32(ip6[12:16])
+ tKey = TcPeditKey{}
+ tKeyEx = TcPeditKeyEx{}
+
+ tKey.Val = u32
+ tKey.Off = 20
+ tKeyEx.HeaderType = TCA_PEDIT_KEY_EX_HDR_TYPE_IP6
+ tKeyEx.Cmd = TCA_PEDIT_KEY_EX_CMD_SET
+
+ p.Keys = append(p.Keys, tKey)
+ p.KeysEx = append(p.KeysEx, tKeyEx)
+
+ p.Sel.NKeys++
+}
+
+func (p *TcPedit) SetDstIP(ip net.IP) {
+ if ip.To4() != nil {
+ p.SetIPv4Dst(ip)
+ } else {
+ p.SetIPv6Dst(ip)
+ }
+}
+
+func (p *TcPedit) SetSrcIP(ip net.IP) {
+ if ip.To4() != nil {
+ p.SetIPv4Src(ip)
+ } else {
+ p.SetIPv6Src(ip)
+ }
+}
+
+func (p *TcPedit) SetIPv6Dst(ip6 net.IP) {
+ u32 := NativeEndian().Uint32(ip6[:4])
+
+ tKey := TcPeditKey{}
+ tKeyEx := TcPeditKeyEx{}
+
+ tKey.Val = u32
+ tKey.Off = 24
+ tKeyEx.HeaderType = TCA_PEDIT_KEY_EX_HDR_TYPE_IP6
+ tKeyEx.Cmd = TCA_PEDIT_KEY_EX_CMD_SET
+
+ p.Keys = append(p.Keys, tKey)
+ p.KeysEx = append(p.KeysEx, tKeyEx)
+ p.Sel.NKeys++
+
+ u32 = NativeEndian().Uint32(ip6[4:8])
+ tKey = TcPeditKey{}
+ tKeyEx = TcPeditKeyEx{}
+
+ tKey.Val = u32
+ tKey.Off = 28
+ tKeyEx.HeaderType = TCA_PEDIT_KEY_EX_HDR_TYPE_IP6
+ tKeyEx.Cmd = TCA_PEDIT_KEY_EX_CMD_SET
+
+ p.Keys = append(p.Keys, tKey)
+ p.KeysEx = append(p.KeysEx, tKeyEx)
+
+ p.Sel.NKeys++
+
+ u32 = NativeEndian().Uint32(ip6[8:12])
+ tKey = TcPeditKey{}
+ tKeyEx = TcPeditKeyEx{}
+
+ tKey.Val = u32
+ tKey.Off = 32
+ tKeyEx.HeaderType = TCA_PEDIT_KEY_EX_HDR_TYPE_IP6
+ tKeyEx.Cmd = TCA_PEDIT_KEY_EX_CMD_SET
+
+ p.Keys = append(p.Keys, tKey)
+ p.KeysEx = append(p.KeysEx, tKeyEx)
+
+ p.Sel.NKeys++
+
+ u32 = NativeEndian().Uint32(ip6[12:16])
+ tKey = TcPeditKey{}
+ tKeyEx = TcPeditKeyEx{}
+
+ tKey.Val = u32
+ tKey.Off = 36
+ tKeyEx.HeaderType = TCA_PEDIT_KEY_EX_HDR_TYPE_IP6
+ tKeyEx.Cmd = TCA_PEDIT_KEY_EX_CMD_SET
+
+ p.Keys = append(p.Keys, tKey)
+ p.KeysEx = append(p.KeysEx, tKeyEx)
+
+ p.Sel.NKeys++
+}
+
+func (p *TcPedit) SetIPv4Src(ip net.IP) {
+ u32 := NativeEndian().Uint32(ip[:4])
+
+ tKey := TcPeditKey{}
+ tKeyEx := TcPeditKeyEx{}
+
+ tKey.Val = u32
+ tKey.Off = 12
+ tKeyEx.HeaderType = TCA_PEDIT_KEY_EX_HDR_TYPE_IP4
+ tKeyEx.Cmd = TCA_PEDIT_KEY_EX_CMD_SET
+
+ p.Keys = append(p.Keys, tKey)
+ p.KeysEx = append(p.KeysEx, tKeyEx)
+ p.Sel.NKeys++
+}
+
+func (p *TcPedit) SetIPv4Dst(ip net.IP) {
+ u32 := NativeEndian().Uint32(ip[:4])
+
+ tKey := TcPeditKey{}
+ tKeyEx := TcPeditKeyEx{}
+
+ tKey.Val = u32
+ tKey.Off = 16
+ tKeyEx.HeaderType = TCA_PEDIT_KEY_EX_HDR_TYPE_IP4
+ tKeyEx.Cmd = TCA_PEDIT_KEY_EX_CMD_SET
+
+ p.Keys = append(p.Keys, tKey)
+ p.KeysEx = append(p.KeysEx, tKeyEx)
+ p.Sel.NKeys++
+}
+
+// SetDstPort only tcp and udp are supported to set port
+func (p *TcPedit) SetDstPort(dstPort uint16, protocol uint8) {
+ tKey := TcPeditKey{}
+ tKeyEx := TcPeditKeyEx{}
+
+ switch protocol {
+ case unix.IPPROTO_TCP:
+ tKeyEx.HeaderType = TCA_PEDIT_KEY_EX_HDR_TYPE_TCP
+ case unix.IPPROTO_UDP:
+ tKeyEx.HeaderType = TCA_PEDIT_KEY_EX_HDR_TYPE_UDP
+ default:
+ return
+ }
+
+ tKeyEx.Cmd = TCA_PEDIT_KEY_EX_CMD_SET
+
+ tKey.Val = uint32(Swap16(dstPort)) << 16
+ tKey.Mask = 0x0000ffff
+ p.Keys = append(p.Keys, tKey)
+ p.KeysEx = append(p.KeysEx, tKeyEx)
+ p.Sel.NKeys++
+}
+
+// SetSrcPort only tcp and udp are supported to set port
+func (p *TcPedit) SetSrcPort(srcPort uint16, protocol uint8) {
+ tKey := TcPeditKey{}
+ tKeyEx := TcPeditKeyEx{}
+
+ switch protocol {
+ case unix.IPPROTO_TCP:
+ tKeyEx.HeaderType = TCA_PEDIT_KEY_EX_HDR_TYPE_TCP
+ case unix.IPPROTO_UDP:
+ tKeyEx.HeaderType = TCA_PEDIT_KEY_EX_HDR_TYPE_UDP
+ default:
+ return
+ }
+
+ tKeyEx.Cmd = TCA_PEDIT_KEY_EX_CMD_SET
+
+ tKey.Val = uint32(Swap16(srcPort))
+ tKey.Mask = 0xffff0000
+ p.Keys = append(p.Keys, tKey)
+ p.KeysEx = append(p.KeysEx, tKeyEx)
+ p.Sel.NKeys++
+}
diff --git a/vendor/github.com/vishvananda/netlink/nl/vdpa_linux.go b/vendor/github.com/vishvananda/netlink/nl/vdpa_linux.go
new file mode 100644
index 00000000000..f209125df4a
--- /dev/null
+++ b/vendor/github.com/vishvananda/netlink/nl/vdpa_linux.go
@@ -0,0 +1,41 @@
+package nl
+
+const (
+ VDPA_GENL_NAME = "vdpa"
+ VDPA_GENL_VERSION = 0x1
+)
+
+const (
+ VDPA_CMD_UNSPEC = iota
+ VDPA_CMD_MGMTDEV_NEW
+ VDPA_CMD_MGMTDEV_GET /* can dump */
+ VDPA_CMD_DEV_NEW
+ VDPA_CMD_DEV_DEL
+ VDPA_CMD_DEV_GET /* can dump */
+ VDPA_CMD_DEV_CONFIG_GET /* can dump */
+ VDPA_CMD_DEV_VSTATS_GET
+)
+
+const (
+ VDPA_ATTR_UNSPEC = iota
+ VDPA_ATTR_MGMTDEV_BUS_NAME
+ VDPA_ATTR_MGMTDEV_DEV_NAME
+ VDPA_ATTR_MGMTDEV_SUPPORTED_CLASSES
+ VDPA_ATTR_DEV_NAME
+ VDPA_ATTR_DEV_ID
+ VDPA_ATTR_DEV_VENDOR_ID
+ VDPA_ATTR_DEV_MAX_VQS
+ VDPA_ATTR_DEV_MAX_VQ_SIZE
+ VDPA_ATTR_DEV_MIN_VQ_SIZE
+ VDPA_ATTR_DEV_NET_CFG_MACADDR
+ VDPA_ATTR_DEV_NET_STATUS
+ VDPA_ATTR_DEV_NET_CFG_MAX_VQP
+ VDPA_ATTR_DEV_NET_CFG_MTU
+ VDPA_ATTR_DEV_NEGOTIATED_FEATURES
+ VDPA_ATTR_DEV_MGMTDEV_MAX_VQS
+ VDPA_ATTR_DEV_SUPPORTED_FEATURES
+ VDPA_ATTR_DEV_QUEUE_INDEX
+ VDPA_ATTR_DEV_VENDOR_ATTR_NAME
+ VDPA_ATTR_DEV_VENDOR_ATTR_VALUE
+ VDPA_ATTR_DEV_FEATURES
+)
diff --git a/vendor/github.com/vishvananda/netlink/nl/xfrm_linux.go b/vendor/github.com/vishvananda/netlink/nl/xfrm_linux.go
index dce9073f7b5..cdb318ba557 100644
--- a/vendor/github.com/vishvananda/netlink/nl/xfrm_linux.go
+++ b/vendor/github.com/vishvananda/netlink/nl/xfrm_linux.go
@@ -131,7 +131,15 @@ func (x *XfrmAddress) ToIP() net.IP {
return ip
}
-func (x *XfrmAddress) ToIPNet(prefixlen uint8) *net.IPNet {
+// family is only used when x and prefixlen are both 0
+func (x *XfrmAddress) ToIPNet(prefixlen uint8, family uint16) *net.IPNet {
+ empty := [SizeofXfrmAddress]byte{}
+ if bytes.Equal(x[:], empty[:]) && prefixlen == 0 {
+ if family == FAMILY_V6 {
+ return &net.IPNet{IP: net.ParseIP("::"), Mask: net.CIDRMask(int(prefixlen), 128)}
+ }
+ return &net.IPNet{IP: net.ParseIP("0.0.0.0"), Mask: net.CIDRMask(int(prefixlen), 32)}
+ }
ip := x.ToIP()
if GetIPFamily(ip) == FAMILY_V4 {
return &net.IPNet{IP: ip, Mask: net.CIDRMask(int(prefixlen), 32)}
diff --git a/vendor/github.com/vishvananda/netlink/nl/xfrm_state_linux.go b/vendor/github.com/vishvananda/netlink/nl/xfrm_state_linux.go
index 43a947f2294..e8920b9a69b 100644
--- a/vendor/github.com/vishvananda/netlink/nl/xfrm_state_linux.go
+++ b/vendor/github.com/vishvananda/netlink/nl/xfrm_state_linux.go
@@ -15,6 +15,7 @@ const (
SizeofXfrmEncapTmpl = 0x18
SizeofXfrmUsersaFlush = 0x1
SizeofXfrmReplayStateEsn = 0x18
+ SizeofXfrmReplayState = 0x0c
)
const (
@@ -28,6 +29,11 @@ const (
XFRM_STATE_ESN = 128
)
+const (
+ XFRM_SA_XFLAG_DONT_ENCAP_DSCP = 1
+ XFRM_SA_XFLAG_OSEQ_MAY_WRAP = 2
+)
+
// struct xfrm_usersa_id {
// xfrm_address_t daddr;
// __be32 spi;
@@ -103,6 +109,7 @@ func (msg *XfrmStats) Serialize() []byte {
// };
//
// #define XFRM_SA_XFLAG_DONT_ENCAP_DSCP 1
+// #define XFRM_SA_XFLAG_OSEQ_MAY_WRAP 2
//
type XfrmUsersaInfo struct {
@@ -332,3 +339,23 @@ func (msg *XfrmReplayStateEsn) Serialize() []byte {
// We deliberately do not pass Bmp, as it gets set by the kernel.
return (*(*[SizeofXfrmReplayStateEsn]byte)(unsafe.Pointer(msg)))[:]
}
+
+// struct xfrm_replay_state {
+// __u32 oseq;
+// __u32 seq;
+// __u32 bitmap;
+// };
+
+type XfrmReplayState struct {
+ OSeq uint32
+ Seq uint32
+ BitMap uint32
+}
+
+func DeserializeXfrmReplayState(b []byte) *XfrmReplayState {
+ return (*XfrmReplayState)(unsafe.Pointer(&b[0:SizeofXfrmReplayState][0]))
+}
+
+func (msg *XfrmReplayState) Serialize() []byte {
+ return (*(*[SizeofXfrmReplayState]byte)(unsafe.Pointer(msg)))[:]
+}
diff --git a/vendor/github.com/vishvananda/netlink/proc_event_linux.go b/vendor/github.com/vishvananda/netlink/proc_event_linux.go
index 53bc59a6ecf..ac8762bd82e 100644
--- a/vendor/github.com/vishvananda/netlink/proc_event_linux.go
+++ b/vendor/github.com/vishvananda/netlink/proc_event_linux.go
@@ -63,15 +63,6 @@ type ExitProcEvent struct {
ParentTgid uint32
}
-type ExitProcEvent2 struct {
- ProcessPid uint32
- ProcessTgid uint32
- ExitCode uint32
- ExitSignal uint32
- ParentPid uint32
- ParentTgid uint32
-}
-
func (e *ExitProcEvent) Pid() uint32 {
return e.ProcessPid
}
diff --git a/vendor/github.com/vishvananda/netlink/protinfo.go b/vendor/github.com/vishvananda/netlink/protinfo.go
index 60b23b3742c..0163cba3a8b 100644
--- a/vendor/github.com/vishvananda/netlink/protinfo.go
+++ b/vendor/github.com/vishvananda/netlink/protinfo.go
@@ -6,14 +6,16 @@ import (
// Protinfo represents bridge flags from netlink.
type Protinfo struct {
- Hairpin bool
- Guard bool
- FastLeave bool
- RootBlock bool
- Learning bool
- Flood bool
- ProxyArp bool
- ProxyArpWiFi bool
+ Hairpin bool
+ Guard bool
+ FastLeave bool
+ RootBlock bool
+ Learning bool
+ Flood bool
+ ProxyArp bool
+ ProxyArpWiFi bool
+ Isolated bool
+ NeighSuppress bool
}
// String returns a list of enabled flags
@@ -47,6 +49,12 @@ func (prot *Protinfo) String() string {
if prot.ProxyArpWiFi {
boolStrings = append(boolStrings, "ProxyArpWiFi")
}
+ if prot.Isolated {
+ boolStrings = append(boolStrings, "Isolated")
+ }
+ if prot.NeighSuppress {
+ boolStrings = append(boolStrings, "NeighSuppress")
+ }
return strings.Join(boolStrings, " ")
}
diff --git a/vendor/github.com/vishvananda/netlink/protinfo_linux.go b/vendor/github.com/vishvananda/netlink/protinfo_linux.go
index 15b65123cef..1ba25d3cd47 100644
--- a/vendor/github.com/vishvananda/netlink/protinfo_linux.go
+++ b/vendor/github.com/vishvananda/netlink/protinfo_linux.go
@@ -68,6 +68,10 @@ func parseProtinfo(infos []syscall.NetlinkRouteAttr) (pi Protinfo) {
pi.ProxyArp = byteToBool(info.Value[0])
case nl.IFLA_BRPORT_PROXYARP_WIFI:
pi.ProxyArpWiFi = byteToBool(info.Value[0])
+ case nl.IFLA_BRPORT_ISOLATED:
+ pi.Isolated = byteToBool(info.Value[0])
+ case nl.IFLA_BRPORT_NEIGH_SUPPRESS:
+ pi.NeighSuppress = byteToBool(info.Value[0])
}
}
return
diff --git a/vendor/github.com/vishvananda/netlink/qdisc.go b/vendor/github.com/vishvananda/netlink/qdisc.go
index f594c9c212d..067743d390d 100644
--- a/vendor/github.com/vishvananda/netlink/qdisc.go
+++ b/vendor/github.com/vishvananda/netlink/qdisc.go
@@ -17,19 +17,29 @@ const (
HANDLE_MIN_EGRESS = 0xFFFFFFF3
)
+const (
+ HORIZON_DROP_POLICY_CAP = 0
+ HORIZON_DROP_POLICY_DROP = 1
+ HORIZON_DROP_POLICY_DEFAULT = 255
+)
+
type Qdisc interface {
Attrs() *QdiscAttrs
Type() string
}
+type QdiscStatistics ClassStatistics
+
// QdiscAttrs represents a netlink qdisc. A qdisc is associated with a link,
// has a handle, a parent and a refcnt. The root qdisc of a device should
// have parent == HANDLE_ROOT.
type QdiscAttrs struct {
- LinkIndex int
- Handle uint32
- Parent uint32
- Refcnt uint32 // read only
+ LinkIndex int
+ Handle uint32
+ Parent uint32
+ Refcnt uint32 // read only
+ IngressBlock *uint32
+ Statistics *QdiscStatistics
}
func (q QdiscAttrs) String() string {
@@ -113,6 +123,7 @@ type Htb struct {
Defcls uint32
Debug uint32
DirectPkts uint32
+ DirectQlen *uint32
}
func NewHtb(attrs QdiscAttrs) *Htb {
@@ -123,6 +134,7 @@ func NewHtb(attrs QdiscAttrs) *Htb {
Rate2Quantum: 10,
Debug: 0,
DirectPkts: 0,
+ DirectQlen: nil,
}
}
@@ -150,6 +162,7 @@ type NetemQdiscAttrs struct {
ReorderCorr float32 // in %
CorruptProb float32 // in %
CorruptCorr float32 // in %
+ Rate64 uint64
}
func (q NetemQdiscAttrs) String() string {
@@ -174,6 +187,7 @@ type Netem struct {
ReorderCorr uint32
CorruptProb uint32
CorruptCorr uint32
+ Rate64 uint64
}
func (netem *Netem) String() string {
@@ -210,6 +224,19 @@ func (qdisc *Tbf) Type() string {
return "tbf"
}
+// Clsact is a qdisc for adding filters
+type Clsact struct {
+ QdiscAttrs
+}
+
+func (qdisc *Clsact) Attrs() *QdiscAttrs {
+ return &qdisc.QdiscAttrs
+}
+
+func (qdisc *Clsact) Type() string {
+ return "clsact"
+}
+
// Ingress is a qdisc for adding ingress filters
type Ingress struct {
QdiscAttrs
@@ -278,22 +305,25 @@ type Fq struct {
FlowDefaultRate uint32
FlowMaxRate uint32
// called BucketsLog under the hood
- Buckets uint32
- FlowRefillDelay uint32
- LowRateThreshold uint32
+ Buckets uint32
+ FlowRefillDelay uint32
+ LowRateThreshold uint32
+ Horizon uint32
+ HorizonDropPolicy uint8
}
func (fq *Fq) String() string {
return fmt.Sprintf(
- "{PacketLimit: %v, FlowPacketLimit: %v, Quantum: %v, InitialQuantum: %v, Pacing: %v, FlowDefaultRate: %v, FlowMaxRate: %v, Buckets: %v, FlowRefillDelay: %v, LowRateThreshold: %v}",
- fq.PacketLimit, fq.FlowPacketLimit, fq.Quantum, fq.InitialQuantum, fq.Pacing, fq.FlowDefaultRate, fq.FlowMaxRate, fq.Buckets, fq.FlowRefillDelay, fq.LowRateThreshold,
+ "{PacketLimit: %v, FlowPacketLimit: %v, Quantum: %v, InitialQuantum: %v, Pacing: %v, FlowDefaultRate: %v, FlowMaxRate: %v, Buckets: %v, FlowRefillDelay: %v, LowRateThreshold: %v, Horizon: %v, HorizonDropPolicy: %v}",
+ fq.PacketLimit, fq.FlowPacketLimit, fq.Quantum, fq.InitialQuantum, fq.Pacing, fq.FlowDefaultRate, fq.FlowMaxRate, fq.Buckets, fq.FlowRefillDelay, fq.LowRateThreshold, fq.Horizon, fq.HorizonDropPolicy,
)
}
func NewFq(attrs QdiscAttrs) *Fq {
return &Fq{
- QdiscAttrs: attrs,
- Pacing: 1,
+ QdiscAttrs: attrs,
+ Pacing: 1,
+ HorizonDropPolicy: HORIZON_DROP_POLICY_DEFAULT,
}
}
diff --git a/vendor/github.com/vishvananda/netlink/qdisc_linux.go b/vendor/github.com/vishvananda/netlink/qdisc_linux.go
index e182e1cfe67..e732ae3bd64 100644
--- a/vendor/github.com/vishvananda/netlink/qdisc_linux.go
+++ b/vendor/github.com/vishvananda/netlink/qdisc_linux.go
@@ -5,6 +5,7 @@ import (
"io/ioutil"
"strconv"
"strings"
+ "sync"
"syscall"
"github.com/vishvananda/netlink/nl"
@@ -17,6 +18,7 @@ func NewNetem(attrs QdiscAttrs, nattrs NetemQdiscAttrs) *Netem {
var lossCorr, delayCorr, duplicateCorr uint32
var reorderProb, reorderCorr uint32
var corruptProb, corruptCorr uint32
+ var rate64 uint64
latency := nattrs.Latency
loss := Percentage2u32(nattrs.Loss)
@@ -57,6 +59,7 @@ func NewNetem(attrs QdiscAttrs, nattrs NetemQdiscAttrs) *Netem {
corruptProb = Percentage2u32(nattrs.CorruptProb)
corruptCorr = Percentage2u32(nattrs.CorruptCorr)
+ rate64 = nattrs.Rate64
return &Netem{
QdiscAttrs: attrs,
@@ -73,6 +76,7 @@ func NewNetem(attrs QdiscAttrs, nattrs NetemQdiscAttrs) *Netem {
ReorderCorr: reorderCorr,
CorruptProb: corruptProb,
CorruptCorr: corruptCorr,
+ Rate64: rate64,
}
}
@@ -159,6 +163,9 @@ func (h *Handle) qdiscModify(cmd, flags int, qdisc Qdisc) error {
func qdiscPayload(req *nl.NetlinkRequest, qdisc Qdisc) error {
req.AddData(nl.NewRtAttr(nl.TCA_KIND, nl.ZeroTerminated(qdisc.Type())))
+ if qdisc.Attrs().IngressBlock != nil {
+ req.AddData(nl.NewRtAttr(nl.TCA_INGRESS_BLOCK, nl.Uint32Attr(*qdisc.Attrs().IngressBlock)))
+ }
options := nl.NewRtAttr(nl.TCA_OPTIONS, nil)
@@ -194,7 +201,9 @@ func qdiscPayload(req *nl.NetlinkRequest, qdisc Qdisc) error {
opt.Debug = qdisc.Debug
opt.DirectPkts = qdisc.DirectPkts
options.AddRtAttr(nl.TCA_HTB_INIT, opt.Serialize())
- // options.AddRtAttr(nl.TCA_HTB_DIRECT_QLEN, opt.Serialize())
+ if qdisc.DirectQlen != nil {
+ options.AddRtAttr(nl.TCA_HTB_DIRECT_QLEN, nl.Uint32Attr(*qdisc.DirectQlen))
+ }
case *Hfsc:
opt := nl.TcHfscOpt{}
opt.Defcls = qdisc.Defcls
@@ -231,6 +240,19 @@ func qdiscPayload(req *nl.NetlinkRequest, qdisc Qdisc) error {
if reorder.Probability > 0 {
options.AddRtAttr(nl.TCA_NETEM_REORDER, reorder.Serialize())
}
+ // Rate
+ if qdisc.Rate64 > 0 {
+ rate := nl.TcNetemRate{}
+ if qdisc.Rate64 >= uint64(1<<32) {
+ options.AddRtAttr(nl.TCA_NETEM_RATE64, nl.Uint64Attr(qdisc.Rate64))
+ rate.Rate = ^uint32(0)
+ } else {
+ rate.Rate = uint32(qdisc.Rate64)
+ }
+ options.AddRtAttr(nl.TCA_NETEM_RATE, rate.Serialize())
+ }
+ case *Clsact:
+ options = nil
case *Ingress:
// ingress filters must use the proper handle
if qdisc.Attrs().Parent != HANDLE_INGRESS {
@@ -265,6 +287,9 @@ func qdiscPayload(req *nl.NetlinkRequest, qdisc Qdisc) error {
if qdisc.Buckets > 0 {
options.AddRtAttr(nl.TCA_FQ_BUCKETS_LOG, nl.Uint32Attr((uint32(qdisc.Buckets))))
}
+ if qdisc.PacketLimit > 0 {
+ options.AddRtAttr(nl.TCA_FQ_PLIMIT, nl.Uint32Attr((uint32(qdisc.PacketLimit))))
+ }
if qdisc.LowRateThreshold > 0 {
options.AddRtAttr(nl.TCA_FQ_LOW_RATE_THRESHOLD, nl.Uint32Attr((uint32(qdisc.LowRateThreshold))))
}
@@ -286,6 +311,12 @@ func qdiscPayload(req *nl.NetlinkRequest, qdisc Qdisc) error {
if qdisc.FlowDefaultRate > 0 {
options.AddRtAttr(nl.TCA_FQ_FLOW_DEFAULT_RATE, nl.Uint32Attr((uint32(qdisc.FlowDefaultRate))))
}
+ if qdisc.Horizon > 0 {
+ options.AddRtAttr(nl.TCA_FQ_HORIZON, nl.Uint32Attr(qdisc.Horizon))
+ }
+ if qdisc.HorizonDropPolicy != HORIZON_DROP_POLICY_DEFAULT {
+ options.AddRtAttr(nl.TCA_FQ_HORIZON_DROP, nl.Uint8Attr(qdisc.HorizonDropPolicy))
+ }
case *Sfq:
opt := nl.TcSfqQoptV1{}
opt.TcSfqQopt.Quantum = qdisc.Quantum
@@ -380,6 +411,8 @@ func (h *Handle) QdiscList(link Link) ([]Qdisc, error) {
qdisc = &Netem{}
case "sfq":
qdisc = &Sfq{}
+ case "clsact":
+ qdisc = &Clsact{}
default:
qdisc = &GenericQdisc{QdiscType: qdiscType}
}
@@ -442,6 +475,22 @@ func (h *Handle) QdiscList(link Link) ([]Qdisc, error) {
// no options for ingress
}
+ case nl.TCA_INGRESS_BLOCK:
+ ingressBlock := new(uint32)
+ *ingressBlock = native.Uint32(attr.Value)
+ base.IngressBlock = ingressBlock
+ case nl.TCA_STATS:
+ s, err := parseTcStats(attr.Value)
+ if err != nil {
+ return nil, err
+ }
+ base.Statistics = (*QdiscStatistics)(s)
+ case nl.TCA_STATS2:
+ s, err := parseTcStats2(attr.Value)
+ if err != nil {
+ return nil, err
+ }
+ base.Statistics = (*QdiscStatistics)(s)
}
}
*qdisc.Attrs() = base
@@ -479,8 +528,8 @@ func parseHtbData(qdisc Qdisc, data []syscall.NetlinkRouteAttr) error {
htb.Debug = opt.Debug
htb.DirectPkts = opt.DirectPkts
case nl.TCA_HTB_DIRECT_QLEN:
- // TODO
- //htb.DirectQlen = native.uint32(datum.Value)
+ directQlen := native.Uint32(datum.Value)
+ htb.DirectQlen = &directQlen
}
}
return nil
@@ -546,6 +595,11 @@ func parseFqData(qdisc Qdisc, data []syscall.NetlinkRouteAttr) error {
fq.FlowMaxRate = native.Uint32(datum.Value)
case nl.TCA_FQ_FLOW_DEFAULT_RATE:
fq.FlowDefaultRate = native.Uint32(datum.Value)
+ case nl.TCA_FQ_HORIZON:
+ fq.Horizon = native.Uint32(datum.Value)
+ case nl.TCA_FQ_HORIZON_DROP:
+ fq.HorizonDropPolicy = datum.Value[0]
+
}
}
return nil
@@ -564,6 +618,8 @@ func parseNetemData(qdisc Qdisc, value []byte) error {
if err != nil {
return err
}
+ var rate *nl.TcNetemRate
+ var rate64 uint64
for _, datum := range data {
switch datum.Attr.Type {
case nl.TCA_NETEM_CORR:
@@ -579,8 +635,19 @@ func parseNetemData(qdisc Qdisc, value []byte) error {
opt := nl.DeserializeTcNetemReorder(datum.Value)
netem.ReorderProb = opt.Probability
netem.ReorderCorr = opt.Correlation
+ case nl.TCA_NETEM_RATE:
+ rate = nl.DeserializeTcNetemRate(datum.Value)
+ case nl.TCA_NETEM_RATE64:
+ rate64 = native.Uint64(datum.Value)
}
}
+ if rate != nil {
+ netem.Rate64 = uint64(rate.Rate)
+ if rate64 > 0 {
+ netem.Rate64 = rate64
+ }
+ }
+
return nil
}
@@ -624,6 +691,9 @@ var (
tickInUsec float64
clockFactor float64
hz float64
+
+ // Without this, the go race detector may report races.
+ initClockMutex sync.Mutex
)
func initClock() {
@@ -658,6 +728,8 @@ func initClock() {
}
func TickInUsec() float64 {
+ initClockMutex.Lock()
+ defer initClockMutex.Unlock()
if tickInUsec == 0.0 {
initClock()
}
@@ -665,6 +737,8 @@ func TickInUsec() float64 {
}
func ClockFactor() float64 {
+ initClockMutex.Lock()
+ defer initClockMutex.Unlock()
if clockFactor == 0.0 {
initClock()
}
@@ -672,6 +746,8 @@ func ClockFactor() float64 {
}
func Hz() float64 {
+ initClockMutex.Lock()
+ defer initClockMutex.Unlock()
if hz == 0.0 {
initClock()
}
diff --git a/vendor/github.com/vishvananda/netlink/route.go b/vendor/github.com/vishvananda/netlink/route.go
index 79cc218ec81..1b4555d5c51 100644
--- a/vendor/github.com/vishvananda/netlink/route.go
+++ b/vendor/github.com/vishvananda/netlink/route.go
@@ -154,8 +154,15 @@ type flagString struct {
}
// RouteUpdate is sent when a route changes - type is RTM_NEWROUTE or RTM_DELROUTE
+
+// NlFlags is only non-zero for RTM_NEWROUTE, the following flags can be set:
+// - unix.NLM_F_REPLACE - Replace existing matching config object with this request
+// - unix.NLM_F_EXCL - Don't replace the config object if it already exists
+// - unix.NLM_F_CREATE - Create config object if it doesn't already exist
+// - unix.NLM_F_APPEND - Add to the end of the object list
type RouteUpdate struct {
- Type uint16
+ Type uint16
+ NlFlags uint16
Route
}
diff --git a/vendor/github.com/vishvananda/netlink/route_linux.go b/vendor/github.com/vishvananda/netlink/route_linux.go
index 8da8866573c..0cd4f8363a7 100644
--- a/vendor/github.com/vishvananda/netlink/route_linux.go
+++ b/vendor/github.com/vishvananda/netlink/route_linux.go
@@ -41,7 +41,6 @@ func (s Scope) String() string {
}
}
-
const (
FLAG_ONLINK NextHopFlag = unix.RTNH_F_ONLINK
FLAG_PERVASIVE NextHopFlag = unix.RTNH_F_PERVASIVE
@@ -274,6 +273,16 @@ type SEG6LocalEncap struct {
In6Addr net.IP
Iif int
Oif int
+ bpf bpfObj
+}
+
+func (e *SEG6LocalEncap) SetProg(progFd int, progName string) error {
+ if progFd <= 0 {
+ return fmt.Errorf("seg6local bpf SetProg: invalid fd")
+ }
+ e.bpf.progFd = progFd
+ e.bpf.progName = progName
+ return nil
}
func (e *SEG6LocalEncap) Type() int {
@@ -307,6 +316,22 @@ func (e *SEG6LocalEncap) Decode(buf []byte) error {
case nl.SEG6_LOCAL_OIF:
e.Oif = int(native.Uint32(attr.Value[0:4]))
e.Flags[nl.SEG6_LOCAL_OIF] = true
+ case nl.SEG6_LOCAL_BPF:
+ var bpfAttrs []syscall.NetlinkRouteAttr
+ bpfAttrs, err = nl.ParseRouteAttr(attr.Value)
+ bpfobj := bpfObj{}
+ for _, bpfAttr := range bpfAttrs {
+ switch bpfAttr.Attr.Type {
+ case nl.LWT_BPF_PROG_FD:
+ bpfobj.progFd = int(native.Uint32(bpfAttr.Value))
+ case nl.LWT_BPF_PROG_NAME:
+ bpfobj.progName = string(bpfAttr.Value)
+ default:
+ err = fmt.Errorf("seg6local bpf decode: unknown attribute: Type %d", bpfAttr.Attr)
+ }
+ }
+ e.bpf = bpfobj
+ e.Flags[nl.SEG6_LOCAL_BPF] = true
}
}
return err
@@ -368,6 +393,16 @@ func (e *SEG6LocalEncap) Encode() ([]byte, error) {
native.PutUint32(attr[4:], uint32(e.Oif))
res = append(res, attr...)
}
+ if e.Flags[nl.SEG6_LOCAL_BPF] {
+ attr := nl.NewRtAttr(nl.SEG6_LOCAL_BPF, []byte{})
+ if e.bpf.progFd != 0 {
+ attr.AddRtAttr(nl.LWT_BPF_PROG_FD, nl.Uint32Attr(uint32(e.bpf.progFd)))
+ }
+ if e.bpf.progName != "" {
+ attr.AddRtAttr(nl.LWT_BPF_PROG_NAME, nl.ZeroTerminated(e.bpf.progName))
+ }
+ res = append(res, attr.Serialize()...)
+ }
return res, err
}
func (e *SEG6LocalEncap) String() string {
@@ -401,12 +436,15 @@ func (e *SEG6LocalEncap) String() string {
}
if e.Flags[nl.SEG6_LOCAL_SRH] {
segs := make([]string, 0, len(e.Segments))
- //append segment backwards (from n to 0) since seg#0 is the last segment.
+ // append segment backwards (from n to 0) since seg#0 is the last segment.
for i := len(e.Segments); i > 0; i-- {
segs = append(segs, e.Segments[i-1].String())
}
strs = append(strs, fmt.Sprintf("segs %d [ %s ]", len(e.Segments), strings.Join(segs, " ")))
}
+ if e.Flags[nl.SEG6_LOCAL_BPF] {
+ strs = append(strs, fmt.Sprintf("bpf %s[%d]", e.bpf.progName, e.bpf.progFd))
+ }
return strings.Join(strs, " ")
}
func (e *SEG6LocalEncap) Equal(x Encap) bool {
@@ -438,7 +476,7 @@ func (e *SEG6LocalEncap) Equal(x Encap) bool {
if !e.InAddr.Equal(o.InAddr) || !e.In6Addr.Equal(o.In6Addr) {
return false
}
- if e.Action != o.Action || e.Table != o.Table || e.Iif != o.Iif || e.Oif != o.Oif {
+ if e.Action != o.Action || e.Table != o.Table || e.Iif != o.Iif || e.Oif != o.Oif || e.bpf != o.bpf {
return false
}
return true
@@ -590,6 +628,109 @@ func (e *BpfEncap) Equal(x Encap) bool {
return true
}
+// IP6tnlEncap definition
+type IP6tnlEncap struct {
+ ID uint64
+ Dst net.IP
+ Src net.IP
+ Hoplimit uint8
+ TC uint8
+ Flags uint16
+}
+
+func (e *IP6tnlEncap) Type() int {
+ return nl.LWTUNNEL_ENCAP_IP6
+}
+
+func (e *IP6tnlEncap) Decode(buf []byte) error {
+ attrs, err := nl.ParseRouteAttr(buf)
+ if err != nil {
+ return err
+ }
+ for _, attr := range attrs {
+ switch attr.Attr.Type {
+ case nl.LWTUNNEL_IP6_ID:
+ e.ID = uint64(native.Uint64(attr.Value[0:4]))
+ case nl.LWTUNNEL_IP6_DST:
+ e.Dst = net.IP(attr.Value[:])
+ case nl.LWTUNNEL_IP6_SRC:
+ e.Src = net.IP(attr.Value[:])
+ case nl.LWTUNNEL_IP6_HOPLIMIT:
+ e.Hoplimit = attr.Value[0]
+ case nl.LWTUNNEL_IP6_TC:
+ // e.TC = attr.Value[0]
+ err = fmt.Errorf("decoding TC in IP6tnlEncap is not supported")
+ case nl.LWTUNNEL_IP6_FLAGS:
+ // e.Flags = uint16(native.Uint16(attr.Value[0:2]))
+ err = fmt.Errorf("decoding FLAG in IP6tnlEncap is not supported")
+ case nl.LWTUNNEL_IP6_PAD:
+ err = fmt.Errorf("decoding PAD in IP6tnlEncap is not supported")
+ case nl.LWTUNNEL_IP6_OPTS:
+ err = fmt.Errorf("decoding OPTS in IP6tnlEncap is not supported")
+ }
+ }
+ return err
+}
+
+func (e *IP6tnlEncap) Encode() ([]byte, error) {
+
+ final := []byte{}
+
+ resID := make([]byte, 12)
+ native.PutUint16(resID, 12) // 2+2+8
+ native.PutUint16(resID[2:], nl.LWTUNNEL_IP6_ID)
+ native.PutUint64(resID[4:], 0)
+ final = append(final, resID...)
+
+ resDst := make([]byte, 4)
+ native.PutUint16(resDst, 20) // 2+2+16
+ native.PutUint16(resDst[2:], nl.LWTUNNEL_IP6_DST)
+ resDst = append(resDst, e.Dst...)
+ final = append(final, resDst...)
+
+ resSrc := make([]byte, 4)
+ native.PutUint16(resSrc, 20)
+ native.PutUint16(resSrc[2:], nl.LWTUNNEL_IP6_SRC)
+ resSrc = append(resSrc, e.Src...)
+ final = append(final, resSrc...)
+
+ // resTc := make([]byte, 5)
+ // native.PutUint16(resTc, 5)
+ // native.PutUint16(resTc[2:], nl.LWTUNNEL_IP6_TC)
+ // resTc[4] = e.TC
+ // final = append(final,resTc...)
+
+ resHops := make([]byte, 5)
+ native.PutUint16(resHops, 5)
+ native.PutUint16(resHops[2:], nl.LWTUNNEL_IP6_HOPLIMIT)
+ resHops[4] = e.Hoplimit
+ final = append(final, resHops...)
+
+ // resFlags := make([]byte, 6)
+ // native.PutUint16(resFlags, 6)
+ // native.PutUint16(resFlags[2:], nl.LWTUNNEL_IP6_FLAGS)
+ // native.PutUint16(resFlags[4:], e.Flags)
+ // final = append(final,resFlags...)
+
+ return final, nil
+}
+
+func (e *IP6tnlEncap) String() string {
+ return fmt.Sprintf("id %d src %s dst %s hoplimit %d tc %d flags 0x%.4x", e.ID, e.Src, e.Dst, e.Hoplimit, e.TC, e.Flags)
+}
+
+func (e *IP6tnlEncap) Equal(x Encap) bool {
+ o, ok := x.(*IP6tnlEncap)
+ if !ok {
+ return false
+ }
+
+ if e.ID != o.ID || e.Flags != o.Flags || e.Hoplimit != o.Hoplimit || e.Src.Equal(o.Src) || e.Dst.Equal(o.Dst) || e.TC != o.TC {
+ return false
+ }
+ return true
+}
+
type Via struct {
AddrFamily int
Addr net.IP
@@ -656,7 +797,8 @@ func RouteAdd(route *Route) error {
func (h *Handle) RouteAdd(route *Route) error {
flags := unix.NLM_F_CREATE | unix.NLM_F_EXCL | unix.NLM_F_ACK
req := h.newNetlinkRequest(unix.RTM_NEWROUTE, flags)
- return h.routeHandle(route, req, nl.NewRtMsg())
+ _, err := h.routeHandle(route, req, nl.NewRtMsg())
+ return err
}
// RouteAppend will append a route to the system.
@@ -670,7 +812,8 @@ func RouteAppend(route *Route) error {
func (h *Handle) RouteAppend(route *Route) error {
flags := unix.NLM_F_CREATE | unix.NLM_F_APPEND | unix.NLM_F_ACK
req := h.newNetlinkRequest(unix.RTM_NEWROUTE, flags)
- return h.routeHandle(route, req, nl.NewRtMsg())
+ _, err := h.routeHandle(route, req, nl.NewRtMsg())
+ return err
}
// RouteAddEcmp will add a route to the system.
@@ -682,7 +825,23 @@ func RouteAddEcmp(route *Route) error {
func (h *Handle) RouteAddEcmp(route *Route) error {
flags := unix.NLM_F_CREATE | unix.NLM_F_ACK
req := h.newNetlinkRequest(unix.RTM_NEWROUTE, flags)
- return h.routeHandle(route, req, nl.NewRtMsg())
+ _, err := h.routeHandle(route, req, nl.NewRtMsg())
+ return err
+}
+
+// RouteChange will change an existing route in the system.
+// Equivalent to: `ip route change $route`
+func RouteChange(route *Route) error {
+ return pkgHandle.RouteChange(route)
+}
+
+// RouteChange will change an existing route in the system.
+// Equivalent to: `ip route change $route`
+func (h *Handle) RouteChange(route *Route) error {
+ flags := unix.NLM_F_REPLACE | unix.NLM_F_ACK
+ req := h.newNetlinkRequest(unix.RTM_NEWROUTE, flags)
+ _, err := h.routeHandle(route, req, nl.NewRtMsg())
+ return err
}
// RouteReplace will add a route to the system.
@@ -696,7 +855,8 @@ func RouteReplace(route *Route) error {
func (h *Handle) RouteReplace(route *Route) error {
flags := unix.NLM_F_CREATE | unix.NLM_F_REPLACE | unix.NLM_F_ACK
req := h.newNetlinkRequest(unix.RTM_NEWROUTE, flags)
- return h.routeHandle(route, req, nl.NewRtMsg())
+ _, err := h.routeHandle(route, req, nl.NewRtMsg())
+ return err
}
// RouteDel will delete a route from the system.
@@ -709,12 +869,27 @@ func RouteDel(route *Route) error {
// Equivalent to: `ip route del $route`
func (h *Handle) RouteDel(route *Route) error {
req := h.newNetlinkRequest(unix.RTM_DELROUTE, unix.NLM_F_ACK)
- return h.routeHandle(route, req, nl.NewRtDelMsg())
+ _, err := h.routeHandle(route, req, nl.NewRtDelMsg())
+ return err
}
-func (h *Handle) routeHandle(route *Route, req *nl.NetlinkRequest, msg *nl.RtMsg) error {
- if (route.Dst == nil || route.Dst.IP == nil) && route.Src == nil && route.Gw == nil && route.MPLSDst == nil {
- return fmt.Errorf("one of Dst.IP, Src, or Gw must not be nil")
+func (h *Handle) routeHandle(route *Route, req *nl.NetlinkRequest, msg *nl.RtMsg) ([][]byte, error) {
+ if err := h.prepareRouteReq(route, req, msg); err != nil {
+ return nil, err
+ }
+ return req.Execute(unix.NETLINK_ROUTE, 0)
+}
+
+func (h *Handle) routeHandleIter(route *Route, req *nl.NetlinkRequest, msg *nl.RtMsg, f func(msg []byte) bool) error {
+ if err := h.prepareRouteReq(route, req, msg); err != nil {
+ return err
+ }
+ return req.ExecuteIter(unix.NETLINK_ROUTE, 0, f)
+}
+
+func (h *Handle) prepareRouteReq(route *Route, req *nl.NetlinkRequest, msg *nl.RtMsg) error {
+ if req.NlMsghdr.Type != unix.RTM_GETROUTE && (route.Dst == nil || route.Dst.IP == nil) && route.Src == nil && route.Gw == nil && route.MPLSDst == nil {
+ return fmt.Errorf("either Dst.IP, Src.IP or Gw must be set")
}
family := -1
@@ -968,19 +1143,21 @@ func (h *Handle) routeHandle(route *Route, req *nl.NetlinkRequest, msg *nl.RtMsg
msg.Flags = uint32(route.Flags)
msg.Scope = uint8(route.Scope)
- msg.Family = uint8(family)
+ // only overwrite family if it was not set in msg
+ if msg.Family == 0 {
+ msg.Family = uint8(family)
+ }
req.AddData(msg)
for _, attr := range rtAttrs {
req.AddData(attr)
}
- b := make([]byte, 4)
- native.PutUint32(b, uint32(route.LinkIndex))
-
- req.AddData(nl.NewRtAttr(unix.RTA_OIF, b))
-
- _, err := req.Execute(unix.NETLINK_ROUTE, 0)
- return err
+ if (req.NlMsghdr.Type != unix.RTM_GETROUTE) || (req.NlMsghdr.Type == unix.RTM_GETROUTE && route.LinkIndex > 0) {
+ b := make([]byte, 4)
+ native.PutUint32(b, uint32(route.LinkIndex))
+ req.AddData(nl.NewRtAttr(unix.RTA_OIF, b))
+ }
+ return nil
}
// RouteList gets a list of routes in the system.
@@ -994,13 +1171,13 @@ func RouteList(link Link, family int) ([]Route, error) {
// Equivalent to: `ip route show`.
// The list can be filtered by link and ip family.
func (h *Handle) RouteList(link Link, family int) ([]Route, error) {
- var routeFilter *Route
+ routeFilter := &Route{}
if link != nil {
- routeFilter = &Route{
- LinkIndex: link.Attrs().Index,
- }
+ routeFilter.LinkIndex = link.Attrs().Index
+
+ return h.RouteListFiltered(family, routeFilter, RT_FILTER_OIF)
}
- return h.RouteListFiltered(family, routeFilter, RT_FILTER_OIF)
+ return h.RouteListFiltered(family, routeFilter, 0)
}
// RouteListFiltered gets a list of routes in the system filtered with specified rules.
@@ -1012,68 +1189,94 @@ func RouteListFiltered(family int, filter *Route, filterMask uint64) ([]Route, e
// RouteListFiltered gets a list of routes in the system filtered with specified rules.
// All rules must be defined in RouteFilter struct
func (h *Handle) RouteListFiltered(family int, filter *Route, filterMask uint64) ([]Route, error) {
- req := h.newNetlinkRequest(unix.RTM_GETROUTE, unix.NLM_F_DUMP)
- rtmsg := nl.NewRtMsg()
- rtmsg.Family = uint8(family)
- req.AddData(rtmsg)
-
- msgs, err := req.Execute(unix.NETLINK_ROUTE, unix.RTM_NEWROUTE)
+ var res []Route
+ err := h.RouteListFilteredIter(family, filter, filterMask, func(route Route) (cont bool) {
+ res = append(res, route)
+ return true
+ })
if err != nil {
return nil, err
}
+ return res, nil
+}
- var res []Route
- for _, m := range msgs {
+// RouteListFilteredIter passes each route that matches the filter to the given iterator func. Iteration continues
+// until all routes are loaded or the func returns false.
+func RouteListFilteredIter(family int, filter *Route, filterMask uint64, f func(Route) (cont bool)) error {
+ return pkgHandle.RouteListFilteredIter(family, filter, filterMask, f)
+}
+
+func (h *Handle) RouteListFilteredIter(family int, filter *Route, filterMask uint64, f func(Route) (cont bool)) error {
+ req := h.newNetlinkRequest(unix.RTM_GETROUTE, unix.NLM_F_DUMP)
+ rtmsg := &nl.RtMsg{}
+ rtmsg.Family = uint8(family)
+
+ var parseErr error
+ err := h.routeHandleIter(filter, req, rtmsg, func(m []byte) bool {
msg := nl.DeserializeRtMsg(m)
+ if family != FAMILY_ALL && msg.Family != uint8(family) {
+ // Ignore routes not matching requested family
+ return true
+ }
if msg.Flags&unix.RTM_F_CLONED != 0 {
// Ignore cloned routes
- continue
+ return true
}
if msg.Table != unix.RT_TABLE_MAIN {
- if filter == nil || filter != nil && filterMask&RT_FILTER_TABLE == 0 {
+ if filter == nil || filterMask&RT_FILTER_TABLE == 0 {
// Ignore non-main tables
- continue
+ return true
}
}
route, err := deserializeRoute(m)
if err != nil {
- return nil, err
+ parseErr = err
+ return false
}
if filter != nil {
switch {
case filterMask&RT_FILTER_TABLE != 0 && filter.Table != unix.RT_TABLE_UNSPEC && route.Table != filter.Table:
- continue
+ return true
case filterMask&RT_FILTER_PROTOCOL != 0 && route.Protocol != filter.Protocol:
- continue
+ return true
case filterMask&RT_FILTER_SCOPE != 0 && route.Scope != filter.Scope:
- continue
+ return true
case filterMask&RT_FILTER_TYPE != 0 && route.Type != filter.Type:
- continue
+ return true
case filterMask&RT_FILTER_TOS != 0 && route.Tos != filter.Tos:
- continue
+ return true
case filterMask&RT_FILTER_REALM != 0 && route.Realm != filter.Realm:
- continue
+ return true
case filterMask&RT_FILTER_OIF != 0 && route.LinkIndex != filter.LinkIndex:
- continue
+ return true
case filterMask&RT_FILTER_IIF != 0 && route.ILinkIndex != filter.ILinkIndex:
- continue
+ return true
case filterMask&RT_FILTER_GW != 0 && !route.Gw.Equal(filter.Gw):
- continue
+ return true
case filterMask&RT_FILTER_SRC != 0 && !route.Src.Equal(filter.Src):
- continue
+ return true
case filterMask&RT_FILTER_DST != 0:
if filter.MPLSDst == nil || route.MPLSDst == nil || (*filter.MPLSDst) != (*route.MPLSDst) {
+ if filter.Dst == nil {
+ filter.Dst = genZeroIPNet(family)
+ }
if !ipNetEqual(route.Dst, filter.Dst) {
- continue
+ return true
}
}
case filterMask&RT_FILTER_HOPLIMIT != 0 && route.Hoplimit != filter.Hoplimit:
- continue
+ return true
}
}
- res = append(res, route)
+ return f(route)
+ })
+ if err != nil {
+ return err
}
- return res, nil
+ if parseErr != nil {
+ return parseErr
+ }
+ return nil
}
// deserializeRoute decodes a binary netlink message into a Route struct
@@ -1257,6 +1460,27 @@ func deserializeRoute(m []byte) (Route, error) {
}
}
+ // Same logic to generate "default" dst with iproute2 implementation
+ if route.Dst == nil {
+ var addLen int
+ var ip net.IP
+ switch msg.Family {
+ case FAMILY_V4:
+ addLen = net.IPv4len
+ ip = net.IPv4zero
+ case FAMILY_V6:
+ addLen = net.IPv6len
+ ip = net.IPv6zero
+ }
+
+ if addLen != 0 {
+ route.Dst = &net.IPNet{
+ IP: ip,
+ Mask: net.CIDRMask(int(msg.Dst_len), 8*addLen),
+ }
+ }
+ }
+
if len(encap.Value) != 0 && len(encapType.Value) != 0 {
typ := int(native.Uint16(encapType.Value[0:2]))
var e Encap
@@ -1291,10 +1515,14 @@ func deserializeRoute(m []byte) (Route, error) {
// RouteGetOptions contains a set of options to use with
// RouteGetWithOptions
type RouteGetOptions struct {
- Iif string
- Oif string
- VrfName string
- SrcAddr net.IP
+ Iif string
+ IifIndex int
+ Oif string
+ VrfName string
+ SrcAddr net.IP
+ UID *uint32
+ Mark uint32
+ FIBMatch bool
}
// RouteGetWithOptions gets a route to a specific destination from the host system.
@@ -1330,6 +1558,9 @@ func (h *Handle) RouteGetWithOptions(destination net.IP, options *RouteGetOption
msg.Src_len = bitlen
}
msg.Flags = unix.RTM_F_LOOKUP_TABLE
+ if options != nil && options.FIBMatch {
+ msg.Flags |= unix.RTM_F_FIB_MATCH
+ }
req.AddData(msg)
rtaDst := nl.NewRtAttr(unix.RTA_DST, destinationData)
@@ -1337,7 +1568,7 @@ func (h *Handle) RouteGetWithOptions(destination net.IP, options *RouteGetOption
if options != nil {
if options.VrfName != "" {
- link, err := LinkByName(options.VrfName)
+ link, err := h.LinkByName(options.VrfName)
if err != nil {
return nil, err
}
@@ -1347,20 +1578,27 @@ func (h *Handle) RouteGetWithOptions(destination net.IP, options *RouteGetOption
req.AddData(nl.NewRtAttr(unix.RTA_OIF, b))
}
+ iifIndex := 0
if len(options.Iif) > 0 {
- link, err := LinkByName(options.Iif)
+ link, err := h.LinkByName(options.Iif)
if err != nil {
return nil, err
}
+ iifIndex = link.Attrs().Index
+ } else if options.IifIndex > 0 {
+ iifIndex = options.IifIndex
+ }
+
+ if iifIndex > 0 {
b := make([]byte, 4)
- native.PutUint32(b, uint32(link.Attrs().Index))
+ native.PutUint32(b, uint32(iifIndex))
req.AddData(nl.NewRtAttr(unix.RTA_IIF, b))
}
if len(options.Oif) > 0 {
- link, err := LinkByName(options.Oif)
+ link, err := h.LinkByName(options.Oif)
if err != nil {
return nil, err
}
@@ -1381,6 +1619,21 @@ func (h *Handle) RouteGetWithOptions(destination net.IP, options *RouteGetOption
req.AddData(nl.NewRtAttr(unix.RTA_SRC, srcAddr))
}
+
+ if options.UID != nil {
+ uid := *options.UID
+ b := make([]byte, 4)
+ native.PutUint32(b, uid)
+
+ req.AddData(nl.NewRtAttr(unix.RTA_UID, b))
+ }
+
+ if options.Mark > 0 {
+ b := make([]byte, 4)
+ native.PutUint32(b, options.Mark)
+
+ req.AddData(nl.NewRtAttr(unix.RTA_MARK, b))
+ }
}
msgs, err := req.Execute(unix.NETLINK_ROUTE, unix.RTM_NEWROUTE)
@@ -1408,21 +1661,24 @@ func (h *Handle) RouteGet(destination net.IP) ([]Route, error) {
// RouteSubscribe takes a chan down which notifications will be sent
// when routes are added or deleted. Close the 'done' chan to stop subscription.
func RouteSubscribe(ch chan<- RouteUpdate, done <-chan struct{}) error {
- return routeSubscribeAt(netns.None(), netns.None(), ch, done, nil, false)
+ return routeSubscribeAt(netns.None(), netns.None(), ch, done, nil, false, 0, nil, false)
}
// RouteSubscribeAt works like RouteSubscribe plus it allows the caller
// to choose the network namespace in which to subscribe (ns).
func RouteSubscribeAt(ns netns.NsHandle, ch chan<- RouteUpdate, done <-chan struct{}) error {
- return routeSubscribeAt(ns, netns.None(), ch, done, nil, false)
+ return routeSubscribeAt(ns, netns.None(), ch, done, nil, false, 0, nil, false)
}
// RouteSubscribeOptions contains a set of options to use with
// RouteSubscribeWithOptions.
type RouteSubscribeOptions struct {
- Namespace *netns.NsHandle
- ErrorCallback func(error)
- ListExisting bool
+ Namespace *netns.NsHandle
+ ErrorCallback func(error)
+ ListExisting bool
+ ReceiveBufferSize int
+ ReceiveBufferForceSize bool
+ ReceiveTimeout *unix.Timeval
}
// RouteSubscribeWithOptions work like RouteSubscribe but enable to
@@ -1433,14 +1689,27 @@ func RouteSubscribeWithOptions(ch chan<- RouteUpdate, done <-chan struct{}, opti
none := netns.None()
options.Namespace = &none
}
- return routeSubscribeAt(*options.Namespace, netns.None(), ch, done, options.ErrorCallback, options.ListExisting)
+ return routeSubscribeAt(*options.Namespace, netns.None(), ch, done, options.ErrorCallback, options.ListExisting,
+ options.ReceiveBufferSize, options.ReceiveTimeout, options.ReceiveBufferForceSize)
}
-func routeSubscribeAt(newNs, curNs netns.NsHandle, ch chan<- RouteUpdate, done <-chan struct{}, cberr func(error), listExisting bool) error {
+func routeSubscribeAt(newNs, curNs netns.NsHandle, ch chan<- RouteUpdate, done <-chan struct{}, cberr func(error), listExisting bool,
+ rcvbuf int, rcvTimeout *unix.Timeval, rcvbufForce bool) error {
s, err := nl.SubscribeAt(newNs, curNs, unix.NETLINK_ROUTE, unix.RTNLGRP_IPV4_ROUTE, unix.RTNLGRP_IPV6_ROUTE)
if err != nil {
return err
}
+ if rcvTimeout != nil {
+ if err := s.SetReceiveTimeout(rcvTimeout); err != nil {
+ return err
+ }
+ }
+ if rcvbuf != 0 {
+ err = s.SetReceiveBufferSize(rcvbuf, rcvbufForce)
+ if err != nil {
+ return err
+ }
+ }
if done != nil {
go func() {
<-done
@@ -1495,7 +1764,11 @@ func routeSubscribeAt(newNs, curNs netns.NsHandle, ch chan<- RouteUpdate, done <
}
continue
}
- ch <- RouteUpdate{Type: m.Header.Type, Route: route}
+ ch <- RouteUpdate{
+ Type: m.Header.Type,
+ NlFlags: m.Header.Flags & (unix.NLM_F_REPLACE | unix.NLM_F_EXCL | unix.NLM_F_CREATE | unix.NLM_F_APPEND),
+ Route: route,
+ }
}
}
}()
@@ -1523,7 +1796,7 @@ func (p RouteProtocol) String() string {
return "gated"
case unix.RTPROT_ISIS:
return "isis"
- //case unix.RTPROT_KEEPALIVED:
+ // case unix.RTPROT_KEEPALIVED:
// return "keepalived"
case unix.RTPROT_KERNEL:
return "kernel"
@@ -1553,3 +1826,24 @@ func (p RouteProtocol) String() string {
return strconv.Itoa(int(p))
}
}
+
+// genZeroIPNet returns 0.0.0.0/0 or ::/0 for IPv4 or IPv6, otherwise nil
+func genZeroIPNet(family int) *net.IPNet {
+ var addLen int
+ var ip net.IP
+ switch family {
+ case FAMILY_V4:
+ addLen = net.IPv4len
+ ip = net.IPv4zero
+ case FAMILY_V6:
+ addLen = net.IPv6len
+ ip = net.IPv6zero
+ }
+ if addLen != 0 {
+ return &net.IPNet{
+ IP: ip,
+ Mask: net.CIDRMask(0, 8*addLen),
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/vishvananda/netlink/rule.go b/vendor/github.com/vishvananda/netlink/rule.go
index 53cd3d4f6ac..9d74c7cd8a9 100644
--- a/vendor/github.com/vishvananda/netlink/rule.go
+++ b/vendor/github.com/vishvananda/netlink/rule.go
@@ -10,8 +10,8 @@ type Rule struct {
Priority int
Family int
Table int
- Mark int
- Mask int
+ Mark uint32
+ Mask *uint32
Tos uint
TunID uint
Goto int
@@ -26,6 +26,9 @@ type Rule struct {
Dport *RulePortRange
Sport *RulePortRange
IPProto int
+ UIDRange *RuleUIDRange
+ Protocol uint8
+ Type uint8
}
func (r Rule) String() string {
@@ -39,8 +42,8 @@ func (r Rule) String() string {
to = r.Dst.String()
}
- return fmt.Sprintf("ip rule %d: from %s to %s table %d",
- r.Priority, from, to, r.Table)
+ return fmt.Sprintf("ip rule %d: from %s to %s table %d %s",
+ r.Priority, from, to, r.Table, r.typeString())
}
// NewRule return empty rules.
@@ -49,8 +52,8 @@ func NewRule() *Rule {
SuppressIfgroup: -1,
SuppressPrefixlen: -1,
Priority: -1,
- Mark: -1,
- Mask: -1,
+ Mark: 0,
+ Mask: nil,
Goto: -1,
Flow: -1,
}
@@ -66,3 +69,14 @@ type RulePortRange struct {
Start uint16
End uint16
}
+
+// NewRuleUIDRange creates rule uid range.
+func NewRuleUIDRange(start, end uint32) *RuleUIDRange {
+ return &RuleUIDRange{Start: start, End: end}
+}
+
+// RuleUIDRange represents rule uid range.
+type RuleUIDRange struct {
+ Start uint32
+ End uint32
+}
diff --git a/vendor/github.com/vishvananda/netlink/rule_linux.go b/vendor/github.com/vishvananda/netlink/rule_linux.go
index 3ae2138808e..ddff99cfad2 100644
--- a/vendor/github.com/vishvananda/netlink/rule_linux.go
+++ b/vendor/github.com/vishvananda/netlink/rule_linux.go
@@ -43,8 +43,8 @@ func ruleHandle(rule *Rule, req *nl.NetlinkRequest) error {
msg.Protocol = unix.RTPROT_BOOT
msg.Scope = unix.RT_SCOPE_UNIVERSE
msg.Table = unix.RT_TABLE_UNSPEC
- msg.Type = unix.RTN_UNSPEC
- if req.NlMsghdr.Flags&unix.NLM_F_CREATE > 0 {
+ msg.Type = rule.Type // usually 0, same as unix.RTN_UNSPEC
+ if msg.Type == 0 && req.NlMsghdr.Flags&unix.NLM_F_CREATE > 0 {
msg.Type = unix.RTN_UNICAST
}
if rule.Invert {
@@ -102,14 +102,14 @@ func ruleHandle(rule *Rule, req *nl.NetlinkRequest) error {
native.PutUint32(b, uint32(rule.Priority))
req.AddData(nl.NewRtAttr(nl.FRA_PRIORITY, b))
}
- if rule.Mark >= 0 {
+ if rule.Mark != 0 || rule.Mask != nil {
b := make([]byte, 4)
- native.PutUint32(b, uint32(rule.Mark))
+ native.PutUint32(b, rule.Mark)
req.AddData(nl.NewRtAttr(nl.FRA_FWMARK, b))
}
- if rule.Mask >= 0 {
+ if rule.Mask != nil {
b := make([]byte, 4)
- native.PutUint32(b, uint32(rule.Mask))
+ native.PutUint32(b, *rule.Mask)
req.AddData(nl.NewRtAttr(nl.FRA_FWMASK, b))
}
if rule.Flow >= 0 {
@@ -168,6 +168,15 @@ func ruleHandle(rule *Rule, req *nl.NetlinkRequest) error {
req.AddData(nl.NewRtAttr(nl.FRA_SPORT_RANGE, b))
}
+ if rule.UIDRange != nil {
+ b := rule.UIDRange.toRtAttrData()
+ req.AddData(nl.NewRtAttr(nl.FRA_UID_RANGE, b))
+ }
+
+ if rule.Protocol > 0 {
+ req.AddData(nl.NewRtAttr(nl.FRA_PROTOCOL, nl.Uint8Attr(rule.Protocol)))
+ }
+
_, err := req.Execute(unix.NETLINK_ROUTE, 0)
return err
}
@@ -212,8 +221,10 @@ func (h *Handle) RuleListFiltered(family int, filter *Rule, filterMask uint64) (
}
rule := NewRule()
+ rule.Priority = 0 // The default priority from kernel
rule.Invert = msg.Flags&FibRuleInvert > 0
+ rule.Family = int(msg.Family)
rule.Tos = uint(msg.Tos)
for j := range attrs {
@@ -231,9 +242,10 @@ func (h *Handle) RuleListFiltered(family int, filter *Rule, filterMask uint64) (
Mask: net.CIDRMask(int(msg.Dst_len), 8*len(attrs[j].Value)),
}
case nl.FRA_FWMARK:
- rule.Mark = int(native.Uint32(attrs[j].Value[0:4]))
+ rule.Mark = native.Uint32(attrs[j].Value[0:4])
case nl.FRA_FWMASK:
- rule.Mask = int(native.Uint32(attrs[j].Value[0:4]))
+ mask := native.Uint32(attrs[j].Value[0:4])
+ rule.Mask = &mask
case nl.FRA_TUN_ID:
rule.TunID = uint(native.Uint64(attrs[j].Value[0:8]))
case nl.FRA_IIFNAME:
@@ -262,6 +274,10 @@ func (h *Handle) RuleListFiltered(family int, filter *Rule, filterMask uint64) (
rule.Dport = NewRulePortRange(native.Uint16(attrs[j].Value[0:2]), native.Uint16(attrs[j].Value[2:4]))
case nl.FRA_SPORT_RANGE:
rule.Sport = NewRulePortRange(native.Uint16(attrs[j].Value[0:2]), native.Uint16(attrs[j].Value[2:4]))
+ case nl.FRA_UID_RANGE:
+ rule.UIDRange = NewRuleUIDRange(native.Uint32(attrs[j].Value[0:4]), native.Uint32(attrs[j].Value[4:8]))
+ case nl.FRA_PROTOCOL:
+ rule.Protocol = uint8(attrs[j].Value[0])
}
}
@@ -282,7 +298,7 @@ func (h *Handle) RuleListFiltered(family int, filter *Rule, filterMask uint64) (
continue
case filterMask&RT_FILTER_MARK != 0 && rule.Mark != filter.Mark:
continue
- case filterMask&RT_FILTER_MASK != 0 && rule.Mask != filter.Mask:
+ case filterMask&RT_FILTER_MASK != 0 && !ptrEqual(rule.Mask, filter.Mask):
continue
}
}
@@ -299,3 +315,51 @@ func (pr *RulePortRange) toRtAttrData() []byte {
native.PutUint16(b[1], pr.End)
return bytes.Join(b, []byte{})
}
+
+func (pr *RuleUIDRange) toRtAttrData() []byte {
+ b := [][]byte{make([]byte, 4), make([]byte, 4)}
+ native.PutUint32(b[0], pr.Start)
+ native.PutUint32(b[1], pr.End)
+ return bytes.Join(b, []byte{})
+}
+
+func ptrEqual(a, b *uint32) bool {
+ if a == b {
+ return true
+ }
+ if (a == nil) || (b == nil) {
+ return false
+ }
+ return *a == *b
+}
+
+func (r Rule) typeString() string {
+ switch r.Type {
+ case unix.RTN_UNSPEC: // zero
+ return ""
+ case unix.RTN_UNICAST:
+ return ""
+ case unix.RTN_LOCAL:
+ return "local"
+ case unix.RTN_BROADCAST:
+ return "broadcast"
+ case unix.RTN_ANYCAST:
+ return "anycast"
+ case unix.RTN_MULTICAST:
+ return "multicast"
+ case unix.RTN_BLACKHOLE:
+ return "blackhole"
+ case unix.RTN_UNREACHABLE:
+ return "unreachable"
+ case unix.RTN_PROHIBIT:
+ return "prohibit"
+ case unix.RTN_THROW:
+ return "throw"
+ case unix.RTN_NAT:
+ return "nat"
+ case unix.RTN_XRESOLVE:
+ return "xresolve"
+ default:
+ return fmt.Sprintf("type(0x%x)", r.Type)
+ }
+}
diff --git a/vendor/github.com/vishvananda/netlink/rule_nonlinux.go b/vendor/github.com/vishvananda/netlink/rule_nonlinux.go
new file mode 100644
index 00000000000..2b19aa64c7d
--- /dev/null
+++ b/vendor/github.com/vishvananda/netlink/rule_nonlinux.go
@@ -0,0 +1,8 @@
+//go:build !linux
+// +build !linux
+
+package netlink
+
+func (r Rule) typeString() string {
+ return ""
+}
diff --git a/vendor/github.com/vishvananda/netlink/socket.go b/vendor/github.com/vishvananda/netlink/socket.go
index 41aa726245b..e65efb130f5 100644
--- a/vendor/github.com/vishvananda/netlink/socket.go
+++ b/vendor/github.com/vishvananda/netlink/socket.go
@@ -25,3 +25,80 @@ type Socket struct {
UID uint32
INode uint32
}
+
+// UnixSocket represents a netlink unix socket.
+type UnixSocket struct {
+ Type uint8
+ Family uint8
+ State uint8
+ pad uint8
+ INode uint32
+ Cookie [2]uint32
+}
+
+// XDPSocket represents an XDP socket (and the common diagnosis part in
+// particular). Please note that in contrast to [UnixSocket] the XDPSocket type
+// does not feature “State” information.
+type XDPSocket struct {
+ // xdp_diag_msg
+ // https://elixir.bootlin.com/linux/v6.2/source/include/uapi/linux/xdp_diag.h#L21
+ Family uint8
+ Type uint8
+ pad uint16
+ Ino uint32
+ Cookie [2]uint32
+}
+
+type XDPInfo struct {
+ // XDP_DIAG_INFO/xdp_diag_info
+ // https://elixir.bootlin.com/linux/v6.2/source/include/uapi/linux/xdp_diag.h#L51
+ Ifindex uint32
+ QueueID uint32
+
+ // XDP_DIAG_UID
+ UID uint32
+
+ // XDP_RX_RING
+ // https://elixir.bootlin.com/linux/v6.2/source/include/uapi/linux/xdp_diag.h#L56
+ RxRingEntries uint32
+ TxRingEntries uint32
+ UmemFillRingEntries uint32
+ UmemCompletionRingEntries uint32
+
+ // XDR_DIAG_UMEM
+ Umem *XDPDiagUmem
+
+ // XDR_DIAG_STATS
+ Stats *XDPDiagStats
+}
+
+const (
+ XDP_DU_F_ZEROCOPY = 1 << iota
+)
+
+// XDPDiagUmem describes the umem attached to an XDP socket.
+//
+// https://elixir.bootlin.com/linux/v6.2/source/include/uapi/linux/xdp_diag.h#L62
+type XDPDiagUmem struct {
+ Size uint64
+ ID uint32
+ NumPages uint32
+ ChunkSize uint32
+ Headroom uint32
+ Ifindex uint32
+ QueueID uint32
+ Flags uint32
+ Refs uint32
+}
+
+// XDPDiagStats contains ring statistics for an XDP socket.
+//
+// https://elixir.bootlin.com/linux/v6.2/source/include/uapi/linux/xdp_diag.h#L74
+type XDPDiagStats struct {
+ RxDropped uint64
+ RxInvalid uint64
+ RxFull uint64
+ FillRingEmpty uint64
+ TxInvalid uint64
+ TxRingEmpty uint64
+}
diff --git a/vendor/github.com/vishvananda/netlink/socket_linux.go b/vendor/github.com/vishvananda/netlink/socket_linux.go
index b881fe496dd..4eb4aeafbdf 100644
--- a/vendor/github.com/vishvananda/netlink/socket_linux.go
+++ b/vendor/github.com/vishvananda/netlink/socket_linux.go
@@ -11,9 +11,11 @@ import (
)
const (
- sizeofSocketID = 0x30
- sizeofSocketRequest = sizeofSocketID + 0x8
- sizeofSocket = sizeofSocketID + 0x18
+ sizeofSocketID = 0x30
+ sizeofSocketRequest = sizeofSocketID + 0x8
+ sizeofSocket = sizeofSocketID + 0x18
+ sizeofUnixSocketRequest = 0x18 // 24 byte
+ sizeofUnixSocket = 0x10 // 16 byte
)
type socketRequest struct {
@@ -54,10 +56,8 @@ func (r *socketRequest) Serialize() []byte {
copy(b.Next(16), r.ID.Source)
copy(b.Next(16), r.ID.Destination)
} else {
- copy(b.Next(4), r.ID.Source.To4())
- b.Next(12)
- copy(b.Next(4), r.ID.Destination.To4())
- b.Next(12)
+ copy(b.Next(16), r.ID.Source.To4())
+ copy(b.Next(16), r.ID.Destination.To4())
}
native.PutUint32(b.Next(4), r.ID.Interface)
native.PutUint32(b.Next(4), r.ID.Cookie[0])
@@ -67,6 +67,32 @@ func (r *socketRequest) Serialize() []byte {
func (r *socketRequest) Len() int { return sizeofSocketRequest }
+// According to linux/include/uapi/linux/unix_diag.h
+type unixSocketRequest struct {
+ Family uint8
+ Protocol uint8
+ pad uint16
+ States uint32
+ INode uint32
+ Show uint32
+ Cookie [2]uint32
+}
+
+func (r *unixSocketRequest) Serialize() []byte {
+ b := writeBuffer{Bytes: make([]byte, sizeofUnixSocketRequest)}
+ b.Write(r.Family)
+ b.Write(r.Protocol)
+ native.PutUint16(b.Next(2), r.pad)
+ native.PutUint32(b.Next(4), r.States)
+ native.PutUint32(b.Next(4), r.INode)
+ native.PutUint32(b.Next(4), r.Show)
+ native.PutUint32(b.Next(4), r.Cookie[0])
+ native.PutUint32(b.Next(4), r.Cookie[1])
+ return b.Bytes
+}
+
+func (r *unixSocketRequest) Len() int { return sizeofUnixSocketRequest }
+
type readBuffer struct {
Bytes []byte
pos int
@@ -115,31 +141,126 @@ func (s *Socket) deserialize(b []byte) error {
return nil
}
+func (u *UnixSocket) deserialize(b []byte) error {
+ if len(b) < sizeofUnixSocket {
+ return fmt.Errorf("unix diag data short read (%d); want %d", len(b), sizeofUnixSocket)
+ }
+ rb := readBuffer{Bytes: b}
+ u.Type = rb.Read()
+ u.Family = rb.Read()
+ u.State = rb.Read()
+ u.pad = rb.Read()
+ u.INode = native.Uint32(rb.Next(4))
+ u.Cookie[0] = native.Uint32(rb.Next(4))
+ u.Cookie[1] = native.Uint32(rb.Next(4))
+ return nil
+}
+
+// SocketGet returns the Socket identified by its local and remote addresses.
+func (h *Handle) SocketGet(local, remote net.Addr) (*Socket, error) {
+ var protocol uint8
+ var localIP, remoteIP net.IP
+ var localPort, remotePort uint16
+ switch l := local.(type) {
+ case *net.TCPAddr:
+ r, ok := remote.(*net.TCPAddr)
+ if !ok {
+ return nil, ErrNotImplemented
+ }
+ localIP = l.IP
+ localPort = uint16(l.Port)
+ remoteIP = r.IP
+ remotePort = uint16(r.Port)
+ protocol = unix.IPPROTO_TCP
+ case *net.UDPAddr:
+ r, ok := remote.(*net.UDPAddr)
+ if !ok {
+ return nil, ErrNotImplemented
+ }
+ localIP = l.IP
+ localPort = uint16(l.Port)
+ remoteIP = r.IP
+ remotePort = uint16(r.Port)
+ protocol = unix.IPPROTO_UDP
+ default:
+ return nil, ErrNotImplemented
+ }
+
+ var family uint8
+ if localIP.To4() != nil && remoteIP.To4() != nil {
+ family = unix.AF_INET
+ }
+
+ if family == 0 && localIP.To16() != nil && remoteIP.To16() != nil {
+ family = unix.AF_INET6
+ }
+
+ if family == 0 {
+ return nil, ErrNotImplemented
+ }
+
+ req := h.newNetlinkRequest(nl.SOCK_DIAG_BY_FAMILY, unix.NLM_F_DUMP)
+ req.AddData(&socketRequest{
+ Family: family,
+ Protocol: protocol,
+ States: 0xffffffff,
+ ID: SocketID{
+ SourcePort: localPort,
+ DestinationPort: remotePort,
+ Source: localIP,
+ Destination: remoteIP,
+ Cookie: [2]uint32{nl.TCPDIAG_NOCOOKIE, nl.TCPDIAG_NOCOOKIE},
+ },
+ })
+
+ msgs, err := req.Execute(unix.NETLINK_INET_DIAG, nl.SOCK_DIAG_BY_FAMILY)
+ if err != nil {
+ return nil, err
+ }
+ if len(msgs) == 0 {
+ return nil, errors.New("no message nor error from netlink")
+ }
+ if len(msgs) > 2 {
+ return nil, fmt.Errorf("multiple (%d) matching sockets", len(msgs))
+ }
+
+ sock := &Socket{}
+ if err := sock.deserialize(msgs[0]); err != nil {
+ return nil, err
+ }
+ return sock, nil
+}
+
// SocketGet returns the Socket identified by its local and remote addresses.
func SocketGet(local, remote net.Addr) (*Socket, error) {
+ return pkgHandle.SocketGet(local, remote)
+}
+
+// SocketDestroy kills the Socket identified by its local and remote addresses.
+func (h *Handle) SocketDestroy(local, remote net.Addr) error {
localTCP, ok := local.(*net.TCPAddr)
if !ok {
- return nil, ErrNotImplemented
+ return ErrNotImplemented
}
remoteTCP, ok := remote.(*net.TCPAddr)
if !ok {
- return nil, ErrNotImplemented
+ return ErrNotImplemented
}
localIP := localTCP.IP.To4()
if localIP == nil {
- return nil, ErrNotImplemented
+ return ErrNotImplemented
}
remoteIP := remoteTCP.IP.To4()
if remoteIP == nil {
- return nil, ErrNotImplemented
+ return ErrNotImplemented
}
s, err := nl.Subscribe(unix.NETLINK_INET_DIAG)
if err != nil {
- return nil, err
+ return err
}
defer s.Close()
- req := nl.NewNetlinkRequest(nl.SOCK_DIAG_BY_FAMILY, 0)
+ req := h.newNetlinkRequest(nl.SOCK_DESTROY, unix.NLM_F_ACK)
req.AddData(&socketRequest{
Family: unix.AF_INET,
Protocol: unix.IPPROTO_TCP,
@@ -151,64 +272,81 @@ func SocketGet(local, remote net.Addr) (*Socket, error) {
Cookie: [2]uint32{nl.TCPDIAG_NOCOOKIE, nl.TCPDIAG_NOCOOKIE},
},
})
- s.Send(req)
- msgs, from, err := s.Receive()
- if err != nil {
- return nil, err
- }
- if from.Pid != nl.PidKernel {
- return nil, fmt.Errorf("Wrong sender portid %d, expected %d", from.Pid, nl.PidKernel)
- }
- if len(msgs) == 0 {
- return nil, errors.New("no message nor error from netlink")
- }
- if len(msgs) > 2 {
- return nil, fmt.Errorf("multiple (%d) matching sockets", len(msgs))
- }
- sock := &Socket{}
- if err := sock.deserialize(msgs[0].Data); err != nil {
- return nil, err
- }
- return sock, nil
+
+ _, err = req.Execute(unix.NETLINK_INET_DIAG, 0)
+ return err
+}
+
+// SocketDestroy kills the Socket identified by its local and remote addresses.
+func SocketDestroy(local, remote net.Addr) error {
+ return pkgHandle.SocketDestroy(local, remote)
}
// SocketDiagTCPInfo requests INET_DIAG_INFO for TCP protocol for specified family type and return with extension TCP info.
-func SocketDiagTCPInfo(family uint8) ([]*InetDiagTCPInfoResp, error) {
+func (h *Handle) SocketDiagTCPInfo(family uint8) ([]*InetDiagTCPInfoResp, error) {
+ // Construct the request
+ req := h.newNetlinkRequest(nl.SOCK_DIAG_BY_FAMILY, unix.NLM_F_DUMP)
+ req.AddData(&socketRequest{
+ Family: family,
+ Protocol: unix.IPPROTO_TCP,
+ Ext: (1 << (INET_DIAG_VEGASINFO - 1)) | (1 << (INET_DIAG_INFO - 1)),
+ States: uint32(0xfff), // all states
+ })
+
+ // Do the query and parse the result
var result []*InetDiagTCPInfoResp
- err := socketDiagTCPExecutor(family, func(m syscall.NetlinkMessage) error {
+ var err error
+ err = req.ExecuteIter(unix.NETLINK_INET_DIAG, nl.SOCK_DIAG_BY_FAMILY, func(msg []byte) bool {
sockInfo := &Socket{}
- if err := sockInfo.deserialize(m.Data); err != nil {
- return err
+ if err = sockInfo.deserialize(msg); err != nil {
+ return false
}
- attrs, err := nl.ParseRouteAttr(m.Data[sizeofSocket:])
- if err != nil {
- return err
+ var attrs []syscall.NetlinkRouteAttr
+ if attrs, err = nl.ParseRouteAttr(msg[sizeofSocket:]); err != nil {
+ return false
}
- res, err := attrsToInetDiagTCPInfoResp(attrs, sockInfo)
- if err != nil {
- return err
+ var res *InetDiagTCPInfoResp
+ if res, err = attrsToInetDiagTCPInfoResp(attrs, sockInfo); err != nil {
+ return false
}
result = append(result, res)
- return nil
+ return true
})
+
if err != nil {
return nil, err
}
return result, nil
}
+// SocketDiagTCPInfo requests INET_DIAG_INFO for TCP protocol for specified family type and return with extension TCP info.
+func SocketDiagTCPInfo(family uint8) ([]*InetDiagTCPInfoResp, error) {
+ return pkgHandle.SocketDiagTCPInfo(family)
+}
+
// SocketDiagTCP requests INET_DIAG_INFO for TCP protocol for specified family type and return related socket.
-func SocketDiagTCP(family uint8) ([]*Socket, error) {
+func (h *Handle) SocketDiagTCP(family uint8) ([]*Socket, error) {
+ // Construct the request
+ req := h.newNetlinkRequest(nl.SOCK_DIAG_BY_FAMILY, unix.NLM_F_DUMP)
+ req.AddData(&socketRequest{
+ Family: family,
+ Protocol: unix.IPPROTO_TCP,
+ Ext: (1 << (INET_DIAG_VEGASINFO - 1)) | (1 << (INET_DIAG_INFO - 1)),
+ States: uint32(0xfff), // all states
+ })
+
+ // Do the query and parse the result
var result []*Socket
- err := socketDiagTCPExecutor(family, func(m syscall.NetlinkMessage) error {
+ var err error
+ err = req.ExecuteIter(unix.NETLINK_INET_DIAG, nl.SOCK_DIAG_BY_FAMILY, func(msg []byte) bool {
sockInfo := &Socket{}
- if err := sockInfo.deserialize(m.Data); err != nil {
- return err
+ if err = sockInfo.deserialize(msg); err != nil {
+ return false
}
result = append(result, sockInfo)
- return nil
+ return true
})
if err != nil {
return nil, err
@@ -216,76 +354,237 @@ func SocketDiagTCP(family uint8) ([]*Socket, error) {
return result, nil
}
-// socketDiagTCPExecutor requests INET_DIAG_INFO for TCP protocol for specified family type.
-func socketDiagTCPExecutor(family uint8, receiver func(syscall.NetlinkMessage) error) error {
- s, err := nl.Subscribe(unix.NETLINK_INET_DIAG)
+// SocketDiagTCP requests INET_DIAG_INFO for TCP protocol for specified family type and return related socket.
+func SocketDiagTCP(family uint8) ([]*Socket, error) {
+ return pkgHandle.SocketDiagTCP(family)
+}
+
+// SocketDiagUDPInfo requests INET_DIAG_INFO for UDP protocol for specified family type and return with extension info.
+func (h *Handle) SocketDiagUDPInfo(family uint8) ([]*InetDiagUDPInfoResp, error) {
+ // Construct the request
+ var extensions uint8
+ extensions = 1 << (INET_DIAG_VEGASINFO - 1)
+ extensions |= 1 << (INET_DIAG_INFO - 1)
+ extensions |= 1 << (INET_DIAG_MEMINFO - 1)
+
+ req := h.newNetlinkRequest(nl.SOCK_DIAG_BY_FAMILY, unix.NLM_F_DUMP)
+ req.AddData(&socketRequest{
+ Family: family,
+ Protocol: unix.IPPROTO_UDP,
+ Ext: extensions,
+ States: uint32(0xfff), // all states
+ })
+
+ // Do the query and parse the result
+ var result []*InetDiagUDPInfoResp
+ var err error
+ err = req.ExecuteIter(unix.NETLINK_INET_DIAG, nl.SOCK_DIAG_BY_FAMILY, func(msg []byte) bool {
+ sockInfo := &Socket{}
+ if err = sockInfo.deserialize(msg); err != nil {
+ return false
+ }
+
+ var attrs []syscall.NetlinkRouteAttr
+ if attrs, err = nl.ParseRouteAttr(msg[sizeofSocket:]); err != nil {
+ return false
+ }
+
+ var res *InetDiagUDPInfoResp
+ if res, err = attrsToInetDiagUDPInfoResp(attrs, sockInfo); err != nil {
+ return false
+ }
+
+ result = append(result, res)
+ return true
+ })
if err != nil {
- return err
+ return nil, err
}
- defer s.Close()
+ return result, nil
+}
- req := nl.NewNetlinkRequest(nl.SOCK_DIAG_BY_FAMILY, unix.NLM_F_DUMP)
+// SocketDiagUDPInfo requests INET_DIAG_INFO for UDP protocol for specified family type and return with extension info.
+func SocketDiagUDPInfo(family uint8) ([]*InetDiagUDPInfoResp, error) {
+ return pkgHandle.SocketDiagUDPInfo(family)
+}
+
+// SocketDiagUDP requests INET_DIAG_INFO for UDP protocol for specified family type and return related socket.
+func (h *Handle) SocketDiagUDP(family uint8) ([]*Socket, error) {
+ // Construct the request
+ req := h.newNetlinkRequest(nl.SOCK_DIAG_BY_FAMILY, unix.NLM_F_DUMP)
req.AddData(&socketRequest{
Family: family,
- Protocol: unix.IPPROTO_TCP,
+ Protocol: unix.IPPROTO_UDP,
Ext: (1 << (INET_DIAG_VEGASINFO - 1)) | (1 << (INET_DIAG_INFO - 1)),
- States: uint32(0xfff), // All TCP states
+ States: uint32(0xfff), // all states
})
- s.Send(req)
-loop:
- for {
- msgs, from, err := s.Receive()
- if err != nil {
- return err
+ // Do the query and parse the result
+ var result []*Socket
+ var err error
+ err = req.ExecuteIter(unix.NETLINK_INET_DIAG, nl.SOCK_DIAG_BY_FAMILY, func(msg []byte) bool {
+ sockInfo := &Socket{}
+ if err = sockInfo.deserialize(msg); err != nil {
+ return false
}
- if from.Pid != nl.PidKernel {
- return fmt.Errorf("Wrong sender portid %d, expected %d", from.Pid, nl.PidKernel)
+ result = append(result, sockInfo)
+ return true
+ })
+ if err != nil {
+ return nil, err
+ }
+ return result, nil
+}
+
+// SocketDiagUDP requests INET_DIAG_INFO for UDP protocol for specified family type and return related socket.
+func SocketDiagUDP(family uint8) ([]*Socket, error) {
+ return pkgHandle.SocketDiagUDP(family)
+}
+
+// UnixSocketDiagInfo requests UNIX_DIAG_INFO for unix sockets and return with extension info.
+func (h *Handle) UnixSocketDiagInfo() ([]*UnixDiagInfoResp, error) {
+ // Construct the request
+ var extensions uint8
+ extensions = 1 << UNIX_DIAG_NAME
+ extensions |= 1 << UNIX_DIAG_PEER
+ extensions |= 1 << UNIX_DIAG_RQLEN
+ req := h.newNetlinkRequest(nl.SOCK_DIAG_BY_FAMILY, unix.NLM_F_DUMP)
+ req.AddData(&unixSocketRequest{
+ Family: unix.AF_UNIX,
+ States: ^uint32(0), // all states
+ Show: uint32(extensions),
+ })
+
+ var result []*UnixDiagInfoResp
+ var err error
+ err = req.ExecuteIter(unix.NETLINK_INET_DIAG, nl.SOCK_DIAG_BY_FAMILY, func(msg []byte) bool {
+ sockInfo := &UnixSocket{}
+ if err = sockInfo.deserialize(msg); err != nil {
+ return false
}
- if len(msgs) == 0 {
- return errors.New("no message nor error from netlink")
+
+ // Diagnosis also delivers sockets with AF_INET family, filter those
+ if sockInfo.Family != unix.AF_UNIX {
+ return false
}
- for _, m := range msgs {
- switch m.Header.Type {
- case unix.NLMSG_DONE:
- break loop
- case unix.NLMSG_ERROR:
- error := int32(native.Uint32(m.Data[0:4]))
- return syscall.Errno(-error)
- }
- if err := receiver(m); err != nil {
- return err
- }
+ var attrs []syscall.NetlinkRouteAttr
+ if attrs, err = nl.ParseRouteAttr(msg[sizeofSocket:]); err != nil {
+ return false
}
+
+ var res *UnixDiagInfoResp
+ if res, err = attrsToUnixDiagInfoResp(attrs, sockInfo); err != nil {
+ return false
+ }
+ result = append(result, res)
+ return true
+ })
+ if err != nil {
+ return nil, err
}
- return nil
+ return result, nil
+}
+
+// UnixSocketDiagInfo requests UNIX_DIAG_INFO for unix sockets and return with extension info.
+func UnixSocketDiagInfo() ([]*UnixDiagInfoResp, error) {
+ return pkgHandle.UnixSocketDiagInfo()
+}
+
+// UnixSocketDiag requests UNIX_DIAG_INFO for unix sockets.
+func (h *Handle) UnixSocketDiag() ([]*UnixSocket, error) {
+ // Construct the request
+ req := h.newNetlinkRequest(nl.SOCK_DIAG_BY_FAMILY, unix.NLM_F_DUMP)
+ req.AddData(&unixSocketRequest{
+ Family: unix.AF_UNIX,
+ States: ^uint32(0), // all states
+ })
+
+ var result []*UnixSocket
+ var err error
+ err = req.ExecuteIter(unix.NETLINK_INET_DIAG, nl.SOCK_DIAG_BY_FAMILY, func(msg []byte) bool {
+ sockInfo := &UnixSocket{}
+ if err = sockInfo.deserialize(msg); err != nil {
+ return false
+ }
+
+ // Diagnosis also delivers sockets with AF_INET family, filter those
+ if sockInfo.Family == unix.AF_UNIX {
+ result = append(result, sockInfo)
+ }
+ return true
+ })
+ if err != nil {
+ return nil, err
+ }
+ return result, nil
+}
+
+// UnixSocketDiag requests UNIX_DIAG_INFO for unix sockets.
+func UnixSocketDiag() ([]*UnixSocket, error) {
+ return pkgHandle.UnixSocketDiag()
}
func attrsToInetDiagTCPInfoResp(attrs []syscall.NetlinkRouteAttr, sockInfo *Socket) (*InetDiagTCPInfoResp, error) {
- var tcpInfo *TCPInfo
- var tcpBBRInfo *TCPBBRInfo
+ info := &InetDiagTCPInfoResp{
+ InetDiagMsg: sockInfo,
+ }
for _, a := range attrs {
- if a.Attr.Type == INET_DIAG_INFO {
- tcpInfo = &TCPInfo{}
- if err := tcpInfo.deserialize(a.Value); err != nil {
+ switch a.Attr.Type {
+ case INET_DIAG_INFO:
+ info.TCPInfo = &TCPInfo{}
+ if err := info.TCPInfo.deserialize(a.Value); err != nil {
+ return nil, err
+ }
+ case INET_DIAG_BBRINFO:
+ info.TCPBBRInfo = &TCPBBRInfo{}
+ if err := info.TCPBBRInfo.deserialize(a.Value); err != nil {
return nil, err
}
- continue
}
+ }
- if a.Attr.Type == INET_DIAG_BBRINFO {
- tcpBBRInfo = &TCPBBRInfo{}
- if err := tcpBBRInfo.deserialize(a.Value); err != nil {
+ return info, nil
+}
+
+func attrsToInetDiagUDPInfoResp(attrs []syscall.NetlinkRouteAttr, sockInfo *Socket) (*InetDiagUDPInfoResp, error) {
+ info := &InetDiagUDPInfoResp{
+ InetDiagMsg: sockInfo,
+ }
+ for _, a := range attrs {
+ switch a.Attr.Type {
+ case INET_DIAG_MEMINFO:
+ info.Memory = &MemInfo{}
+ if err := info.Memory.deserialize(a.Value); err != nil {
return nil, err
}
- continue
}
}
- return &InetDiagTCPInfoResp{
- InetDiagMsg: sockInfo,
- TCPInfo: tcpInfo,
- TCPBBRInfo: tcpBBRInfo,
- }, nil
+ return info, nil
+}
+
+func attrsToUnixDiagInfoResp(attrs []syscall.NetlinkRouteAttr, sockInfo *UnixSocket) (*UnixDiagInfoResp, error) {
+ info := &UnixDiagInfoResp{
+ DiagMsg: sockInfo,
+ }
+ for _, a := range attrs {
+ switch a.Attr.Type {
+ case UNIX_DIAG_NAME:
+ name := string(a.Value[:a.Attr.Len])
+ info.Name = &name
+ case UNIX_DIAG_PEER:
+ peer := native.Uint32(a.Value)
+ info.Peer = &peer
+ case UNIX_DIAG_RQLEN:
+ info.Queue = &QueueInfo{
+ RQueue: native.Uint32(a.Value[:4]),
+ WQueue: native.Uint32(a.Value[4:]),
+ }
+ // default:
+ // fmt.Println("unknown unix attribute type", a.Attr.Type, "with data", a.Value)
+ }
+ }
+
+ return info, nil
}
diff --git a/vendor/github.com/vishvananda/netlink/socket_xdp_linux.go b/vendor/github.com/vishvananda/netlink/socket_xdp_linux.go
new file mode 100644
index 00000000000..20c82f9c766
--- /dev/null
+++ b/vendor/github.com/vishvananda/netlink/socket_xdp_linux.go
@@ -0,0 +1,195 @@
+package netlink
+
+import (
+ "errors"
+ "fmt"
+ "syscall"
+
+ "github.com/vishvananda/netlink/nl"
+ "golang.org/x/sys/unix"
+)
+
+const (
+ sizeofXDPSocketRequest = 1 + 1 + 2 + 4 + 4 + 2*4
+ sizeofXDPSocket = 0x10
+)
+
+// https://elixir.bootlin.com/linux/v6.2/source/include/uapi/linux/xdp_diag.h#L12
+type xdpSocketRequest struct {
+ Family uint8
+ Protocol uint8
+ pad uint16
+ Ino uint32
+ Show uint32
+ Cookie [2]uint32
+}
+
+func (r *xdpSocketRequest) Serialize() []byte {
+ b := writeBuffer{Bytes: make([]byte, sizeofSocketRequest)}
+ b.Write(r.Family)
+ b.Write(r.Protocol)
+ native.PutUint16(b.Next(2), r.pad)
+ native.PutUint32(b.Next(4), r.Ino)
+ native.PutUint32(b.Next(4), r.Show)
+ native.PutUint32(b.Next(4), r.Cookie[0])
+ native.PutUint32(b.Next(4), r.Cookie[1])
+ return b.Bytes
+}
+
+func (r *xdpSocketRequest) Len() int { return sizeofXDPSocketRequest }
+
+func (s *XDPSocket) deserialize(b []byte) error {
+ if len(b) < sizeofXDPSocket {
+ return fmt.Errorf("XDP socket data short read (%d); want %d", len(b), sizeofXDPSocket)
+ }
+ rb := readBuffer{Bytes: b}
+ s.Family = rb.Read()
+ s.Type = rb.Read()
+ s.pad = native.Uint16(rb.Next(2))
+ s.Ino = native.Uint32(rb.Next(4))
+ s.Cookie[0] = native.Uint32(rb.Next(4))
+ s.Cookie[1] = native.Uint32(rb.Next(4))
+ return nil
+}
+
+// XDPSocketGet returns the XDP socket identified by its inode number and/or
+// socket cookie. Specify the cookie as SOCK_ANY_COOKIE if
+func SocketXDPGetInfo(ino uint32, cookie uint64) (*XDPDiagInfoResp, error) {
+ // We have a problem here: dumping AF_XDP sockets currently does not support
+ // filtering. We thus need to dump all XSKs and then only filter afterwards
+ // :(
+ xsks, err := SocketDiagXDP()
+ if err != nil {
+ return nil, err
+ }
+ checkCookie := cookie != SOCK_ANY_COOKIE && cookie != 0
+ crumblingCookie := [2]uint32{uint32(cookie), uint32(cookie >> 32)}
+ checkIno := ino != 0
+ var xskinfo *XDPDiagInfoResp
+ for _, xsk := range xsks {
+ if checkIno && xsk.XDPDiagMsg.Ino != ino {
+ continue
+ }
+ if checkCookie && xsk.XDPDiagMsg.Cookie != crumblingCookie {
+ continue
+ }
+ if xskinfo != nil {
+ return nil, errors.New("multiple matching XDP sockets")
+ }
+ xskinfo = xsk
+ }
+ if xskinfo == nil {
+ return nil, errors.New("no matching XDP socket")
+ }
+ return xskinfo, nil
+}
+
+// SocketDiagXDP requests XDP_DIAG_INFO for XDP family sockets.
+func SocketDiagXDP() ([]*XDPDiagInfoResp, error) {
+ var result []*XDPDiagInfoResp
+ err := socketDiagXDPExecutor(func(m syscall.NetlinkMessage) error {
+ sockInfo := &XDPSocket{}
+ if err := sockInfo.deserialize(m.Data); err != nil {
+ return err
+ }
+ attrs, err := nl.ParseRouteAttr(m.Data[sizeofXDPSocket:])
+ if err != nil {
+ return err
+ }
+
+ res, err := attrsToXDPDiagInfoResp(attrs, sockInfo)
+ if err != nil {
+ return err
+ }
+
+ result = append(result, res)
+ return nil
+ })
+ if err != nil {
+ return nil, err
+ }
+ return result, nil
+}
+
+// socketDiagXDPExecutor requests XDP_DIAG_INFO for XDP family sockets.
+func socketDiagXDPExecutor(receiver func(syscall.NetlinkMessage) error) error {
+ s, err := nl.Subscribe(unix.NETLINK_INET_DIAG)
+ if err != nil {
+ return err
+ }
+ defer s.Close()
+
+ req := nl.NewNetlinkRequest(nl.SOCK_DIAG_BY_FAMILY, unix.NLM_F_DUMP)
+ req.AddData(&xdpSocketRequest{
+ Family: unix.AF_XDP,
+ Show: XDP_SHOW_INFO | XDP_SHOW_RING_CFG | XDP_SHOW_UMEM | XDP_SHOW_STATS,
+ })
+ if err := s.Send(req); err != nil {
+ return err
+ }
+
+loop:
+ for {
+ msgs, from, err := s.Receive()
+ if err != nil {
+ return err
+ }
+ if from.Pid != nl.PidKernel {
+ return fmt.Errorf("Wrong sender portid %d, expected %d", from.Pid, nl.PidKernel)
+ }
+ if len(msgs) == 0 {
+ return errors.New("no message nor error from netlink")
+ }
+
+ for _, m := range msgs {
+ switch m.Header.Type {
+ case unix.NLMSG_DONE:
+ break loop
+ case unix.NLMSG_ERROR:
+ error := int32(native.Uint32(m.Data[0:4]))
+ return syscall.Errno(-error)
+ }
+ if err := receiver(m); err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func attrsToXDPDiagInfoResp(attrs []syscall.NetlinkRouteAttr, sockInfo *XDPSocket) (*XDPDiagInfoResp, error) {
+ resp := &XDPDiagInfoResp{
+ XDPDiagMsg: sockInfo,
+ XDPInfo: &XDPInfo{},
+ }
+ for _, a := range attrs {
+ switch a.Attr.Type {
+ case XDP_DIAG_INFO:
+ resp.XDPInfo.Ifindex = native.Uint32(a.Value[0:4])
+ resp.XDPInfo.QueueID = native.Uint32(a.Value[4:8])
+ case XDP_DIAG_UID:
+ resp.XDPInfo.UID = native.Uint32(a.Value[0:4])
+ case XDP_DIAG_RX_RING:
+ resp.XDPInfo.RxRingEntries = native.Uint32(a.Value[0:4])
+ case XDP_DIAG_TX_RING:
+ resp.XDPInfo.TxRingEntries = native.Uint32(a.Value[0:4])
+ case XDP_DIAG_UMEM_FILL_RING:
+ resp.XDPInfo.UmemFillRingEntries = native.Uint32(a.Value[0:4])
+ case XDP_DIAG_UMEM_COMPLETION_RING:
+ resp.XDPInfo.UmemCompletionRingEntries = native.Uint32(a.Value[0:4])
+ case XDP_DIAG_UMEM:
+ umem := &XDPDiagUmem{}
+ if err := umem.deserialize(a.Value); err != nil {
+ return nil, err
+ }
+ resp.XDPInfo.Umem = umem
+ case XDP_DIAG_STATS:
+ stats := &XDPDiagStats{}
+ if err := stats.deserialize(a.Value); err != nil {
+ return nil, err
+ }
+ resp.XDPInfo.Stats = stats
+ }
+ }
+ return resp, nil
+}
diff --git a/vendor/github.com/vishvananda/netlink/tcp.go b/vendor/github.com/vishvananda/netlink/tcp.go
index 23ca014d43b..43f80a0fca7 100644
--- a/vendor/github.com/vishvananda/netlink/tcp.go
+++ b/vendor/github.com/vishvananda/netlink/tcp.go
@@ -82,3 +82,11 @@ type TCPBBRInfo struct {
BBRPacingGain uint32
BBRCwndGain uint32
}
+
+// According to https://man7.org/linux/man-pages/man7/sock_diag.7.html
+type MemInfo struct {
+ RMem uint32
+ WMem uint32
+ FMem uint32
+ TMem uint32
+}
diff --git a/vendor/github.com/vishvananda/netlink/tcp_linux.go b/vendor/github.com/vishvananda/netlink/tcp_linux.go
index 293858738d8..e98036da55b 100644
--- a/vendor/github.com/vishvananda/netlink/tcp_linux.go
+++ b/vendor/github.com/vishvananda/netlink/tcp_linux.go
@@ -8,6 +8,7 @@ import (
const (
tcpBBRInfoLen = 20
+ memInfoLen = 16
)
func checkDeserErr(err error) error {
@@ -351,3 +352,17 @@ func (t *TCPBBRInfo) deserialize(b []byte) error {
return nil
}
+
+func (m *MemInfo) deserialize(b []byte) error {
+ if len(b) != memInfoLen {
+ return errors.New("Invalid length")
+ }
+
+ rb := bytes.NewBuffer(b)
+ m.RMem = native.Uint32(rb.Next(4))
+ m.WMem = native.Uint32(rb.Next(4))
+ m.FMem = native.Uint32(rb.Next(4))
+ m.TMem = native.Uint32(rb.Next(4))
+
+ return nil
+}
diff --git a/vendor/github.com/vishvananda/netlink/unix_diag.go b/vendor/github.com/vishvananda/netlink/unix_diag.go
new file mode 100644
index 00000000000..d81776f36ef
--- /dev/null
+++ b/vendor/github.com/vishvananda/netlink/unix_diag.go
@@ -0,0 +1,27 @@
+package netlink
+
+// According to linux/include/uapi/linux/unix_diag.h
+const (
+ UNIX_DIAG_NAME = iota
+ UNIX_DIAG_VFS
+ UNIX_DIAG_PEER
+ UNIX_DIAG_ICONS
+ UNIX_DIAG_RQLEN
+ UNIX_DIAG_MEMINFO
+ UNIX_DIAG_SHUTDOWN
+ UNIX_DIAG_UID
+ UNIX_DIAG_MAX
+)
+
+type UnixDiagInfoResp struct {
+ DiagMsg *UnixSocket
+ Name *string
+ Peer *uint32
+ Queue *QueueInfo
+ Shutdown *uint8
+}
+
+type QueueInfo struct {
+ RQueue uint32
+ WQueue uint32
+}
diff --git a/vendor/github.com/vishvananda/netlink/vdpa_linux.go b/vendor/github.com/vishvananda/netlink/vdpa_linux.go
new file mode 100644
index 00000000000..7c15986d0f9
--- /dev/null
+++ b/vendor/github.com/vishvananda/netlink/vdpa_linux.go
@@ -0,0 +1,463 @@
+package netlink
+
+import (
+ "fmt"
+ "net"
+ "syscall"
+
+ "golang.org/x/sys/unix"
+
+ "github.com/vishvananda/netlink/nl"
+)
+
+type vdpaDevID struct {
+ Name string
+ ID uint32
+}
+
+// VDPADev contains info about VDPA device
+type VDPADev struct {
+ vdpaDevID
+ VendorID uint32
+ MaxVQS uint32
+ MaxVQSize uint16
+ MinVQSize uint16
+}
+
+// VDPADevConfig contains configuration of the VDPA device
+type VDPADevConfig struct {
+ vdpaDevID
+ Features uint64
+ NegotiatedFeatures uint64
+ Net VDPADevConfigNet
+}
+
+// VDPADevVStats conatins vStats for the VDPA device
+type VDPADevVStats struct {
+ vdpaDevID
+ QueueIndex uint32
+ Vendor []VDPADevVStatsVendor
+ NegotiatedFeatures uint64
+}
+
+// VDPADevVStatsVendor conatins name and value for vendor specific vstat option
+type VDPADevVStatsVendor struct {
+ Name string
+ Value uint64
+}
+
+// VDPADevConfigNet conatins status and net config for the VDPA device
+type VDPADevConfigNet struct {
+ Status VDPADevConfigNetStatus
+ Cfg VDPADevConfigNetCfg
+}
+
+// VDPADevConfigNetStatus contains info about net status
+type VDPADevConfigNetStatus struct {
+ LinkUp bool
+ Announce bool
+}
+
+// VDPADevConfigNetCfg contains net config for the VDPA device
+type VDPADevConfigNetCfg struct {
+ MACAddr net.HardwareAddr
+ MaxVQP uint16
+ MTU uint16
+}
+
+// VDPAMGMTDev conatins info about VDPA management device
+type VDPAMGMTDev struct {
+ BusName string
+ DevName string
+ SupportedClasses uint64
+ SupportedFeatures uint64
+ MaxVQS uint32
+}
+
+// VDPANewDevParams contains parameters for new VDPA device
+// use SetBits to configure requried features for the device
+// example:
+//
+// VDPANewDevParams{Features: SetBits(0, VIRTIO_NET_F_MTU, VIRTIO_NET_F_CTRL_MAC_ADDR)}
+type VDPANewDevParams struct {
+ MACAddr net.HardwareAddr
+ MaxVQP uint16
+ MTU uint16
+ Features uint64
+}
+
+// SetBits set provided bits in the uint64 input value
+// usage example:
+// features := SetBits(0, VIRTIO_NET_F_MTU, VIRTIO_NET_F_CTRL_MAC_ADDR)
+func SetBits(input uint64, pos ...int) uint64 {
+ for _, p := range pos {
+ input |= 1 << uint64(p)
+ }
+ return input
+}
+
+// IsBitSet check if specific bit is set in the uint64 input value
+// usage example:
+// hasNetClass := IsBitSet(mgmtDev, VIRTIO_ID_NET)
+func IsBitSet(input uint64, pos int) bool {
+ val := input & (1 << uint64(pos))
+ return val > 0
+}
+
+// VDPANewDev adds new VDPA device
+// Equivalent to: `vdpa dev add name mgmtdev /mgmtName [params]`
+func VDPANewDev(name, mgmtBus, mgmtName string, params VDPANewDevParams) error {
+ return pkgHandle.VDPANewDev(name, mgmtBus, mgmtName, params)
+}
+
+// VDPADelDev removes VDPA device
+// Equivalent to: `vdpa dev del `
+func VDPADelDev(name string) error {
+ return pkgHandle.VDPADelDev(name)
+}
+
+// VDPAGetDevList returns list of VDPA devices
+// Equivalent to: `vdpa dev show`
+func VDPAGetDevList() ([]*VDPADev, error) {
+ return pkgHandle.VDPAGetDevList()
+}
+
+// VDPAGetDevByName returns VDPA device selected by name
+// Equivalent to: `vdpa dev show `
+func VDPAGetDevByName(name string) (*VDPADev, error) {
+ return pkgHandle.VDPAGetDevByName(name)
+}
+
+// VDPAGetDevConfigList returns list of VDPA devices configurations
+// Equivalent to: `vdpa dev config show`
+func VDPAGetDevConfigList() ([]*VDPADevConfig, error) {
+ return pkgHandle.VDPAGetDevConfigList()
+}
+
+// VDPAGetDevConfigByName returns VDPA device configuration selected by name
+// Equivalent to: `vdpa dev config show `
+func VDPAGetDevConfigByName(name string) (*VDPADevConfig, error) {
+ return pkgHandle.VDPAGetDevConfigByName(name)
+}
+
+// VDPAGetDevVStats returns vstats for VDPA device
+// Equivalent to: `vdpa dev vstats show qidx `
+func VDPAGetDevVStats(name string, queueIndex uint32) (*VDPADevVStats, error) {
+ return pkgHandle.VDPAGetDevVStats(name, queueIndex)
+}
+
+// VDPAGetMGMTDevList returns list of mgmt devices
+// Equivalent to: `vdpa mgmtdev show`
+func VDPAGetMGMTDevList() ([]*VDPAMGMTDev, error) {
+ return pkgHandle.VDPAGetMGMTDevList()
+}
+
+// VDPAGetMGMTDevByBusAndName returns mgmt devices selected by bus and name
+// Equivalent to: `vdpa mgmtdev show /`
+func VDPAGetMGMTDevByBusAndName(bus, name string) (*VDPAMGMTDev, error) {
+ return pkgHandle.VDPAGetMGMTDevByBusAndName(bus, name)
+}
+
+type vdpaNetlinkMessage []syscall.NetlinkRouteAttr
+
+func (id *vdpaDevID) parseIDAttribute(attr syscall.NetlinkRouteAttr) {
+ switch attr.Attr.Type {
+ case nl.VDPA_ATTR_DEV_NAME:
+ id.Name = nl.BytesToString(attr.Value)
+ case nl.VDPA_ATTR_DEV_ID:
+ id.ID = native.Uint32(attr.Value)
+ }
+}
+
+func (netStatus *VDPADevConfigNetStatus) parseStatusAttribute(value []byte) {
+ a := native.Uint16(value)
+ netStatus.Announce = (a & VIRTIO_NET_S_ANNOUNCE) > 0
+ netStatus.LinkUp = (a & VIRTIO_NET_S_LINK_UP) > 0
+}
+
+func (d *VDPADev) parseAttributes(attrs vdpaNetlinkMessage) {
+ for _, a := range attrs {
+ d.parseIDAttribute(a)
+ switch a.Attr.Type {
+ case nl.VDPA_ATTR_DEV_VENDOR_ID:
+ d.VendorID = native.Uint32(a.Value)
+ case nl.VDPA_ATTR_DEV_MAX_VQS:
+ d.MaxVQS = native.Uint32(a.Value)
+ case nl.VDPA_ATTR_DEV_MAX_VQ_SIZE:
+ d.MaxVQSize = native.Uint16(a.Value)
+ case nl.VDPA_ATTR_DEV_MIN_VQ_SIZE:
+ d.MinVQSize = native.Uint16(a.Value)
+ }
+ }
+}
+
+func (c *VDPADevConfig) parseAttributes(attrs vdpaNetlinkMessage) {
+ for _, a := range attrs {
+ c.parseIDAttribute(a)
+ switch a.Attr.Type {
+ case nl.VDPA_ATTR_DEV_NET_CFG_MACADDR:
+ c.Net.Cfg.MACAddr = a.Value
+ case nl.VDPA_ATTR_DEV_NET_STATUS:
+ c.Net.Status.parseStatusAttribute(a.Value)
+ case nl.VDPA_ATTR_DEV_NET_CFG_MAX_VQP:
+ c.Net.Cfg.MaxVQP = native.Uint16(a.Value)
+ case nl.VDPA_ATTR_DEV_NET_CFG_MTU:
+ c.Net.Cfg.MTU = native.Uint16(a.Value)
+ case nl.VDPA_ATTR_DEV_FEATURES:
+ c.Features = native.Uint64(a.Value)
+ case nl.VDPA_ATTR_DEV_NEGOTIATED_FEATURES:
+ c.NegotiatedFeatures = native.Uint64(a.Value)
+ }
+ }
+}
+
+func (s *VDPADevVStats) parseAttributes(attrs vdpaNetlinkMessage) {
+ for _, a := range attrs {
+ s.parseIDAttribute(a)
+ switch a.Attr.Type {
+ case nl.VDPA_ATTR_DEV_QUEUE_INDEX:
+ s.QueueIndex = native.Uint32(a.Value)
+ case nl.VDPA_ATTR_DEV_VENDOR_ATTR_NAME:
+ s.Vendor = append(s.Vendor, VDPADevVStatsVendor{Name: nl.BytesToString(a.Value)})
+ case nl.VDPA_ATTR_DEV_VENDOR_ATTR_VALUE:
+ if len(s.Vendor) == 0 {
+ break
+ }
+ s.Vendor[len(s.Vendor)-1].Value = native.Uint64(a.Value)
+ case nl.VDPA_ATTR_DEV_NEGOTIATED_FEATURES:
+ s.NegotiatedFeatures = native.Uint64(a.Value)
+ }
+ }
+}
+
+func (d *VDPAMGMTDev) parseAttributes(attrs vdpaNetlinkMessage) {
+ for _, a := range attrs {
+ switch a.Attr.Type {
+ case nl.VDPA_ATTR_MGMTDEV_BUS_NAME:
+ d.BusName = nl.BytesToString(a.Value)
+ case nl.VDPA_ATTR_MGMTDEV_DEV_NAME:
+ d.DevName = nl.BytesToString(a.Value)
+ case nl.VDPA_ATTR_MGMTDEV_SUPPORTED_CLASSES:
+ d.SupportedClasses = native.Uint64(a.Value)
+ case nl.VDPA_ATTR_DEV_SUPPORTED_FEATURES:
+ d.SupportedFeatures = native.Uint64(a.Value)
+ case nl.VDPA_ATTR_DEV_MGMTDEV_MAX_VQS:
+ d.MaxVQS = native.Uint32(a.Value)
+ }
+ }
+}
+
+func (h *Handle) vdpaRequest(command uint8, extraFlags int, attrs []*nl.RtAttr) ([]vdpaNetlinkMessage, error) {
+ f, err := h.GenlFamilyGet(nl.VDPA_GENL_NAME)
+ if err != nil {
+ return nil, err
+ }
+ req := h.newNetlinkRequest(int(f.ID), unix.NLM_F_ACK|extraFlags)
+ req.AddData(&nl.Genlmsg{
+ Command: command,
+ Version: nl.VDPA_GENL_VERSION,
+ })
+ for _, a := range attrs {
+ req.AddData(a)
+ }
+
+ resp, err := req.Execute(unix.NETLINK_GENERIC, 0)
+ if err != nil {
+ return nil, err
+ }
+ messages := make([]vdpaNetlinkMessage, 0, len(resp))
+ for _, m := range resp {
+ attrs, err := nl.ParseRouteAttr(m[nl.SizeofGenlmsg:])
+ if err != nil {
+ return nil, err
+ }
+ messages = append(messages, attrs)
+ }
+ return messages, nil
+}
+
+// dump all devices if dev is nil
+func (h *Handle) vdpaDevGet(dev *string) ([]*VDPADev, error) {
+ var extraFlags int
+ var attrs []*nl.RtAttr
+ if dev != nil {
+ attrs = append(attrs, nl.NewRtAttr(nl.VDPA_ATTR_DEV_NAME, nl.ZeroTerminated(*dev)))
+ } else {
+ extraFlags = extraFlags | unix.NLM_F_DUMP
+ }
+ messages, err := h.vdpaRequest(nl.VDPA_CMD_DEV_GET, extraFlags, attrs)
+ if err != nil {
+ return nil, err
+ }
+ devs := make([]*VDPADev, 0, len(messages))
+ for _, m := range messages {
+ d := &VDPADev{}
+ d.parseAttributes(m)
+ devs = append(devs, d)
+ }
+ return devs, nil
+}
+
+// dump all devices if dev is nil
+func (h *Handle) vdpaDevConfigGet(dev *string) ([]*VDPADevConfig, error) {
+ var extraFlags int
+ var attrs []*nl.RtAttr
+ if dev != nil {
+ attrs = append(attrs, nl.NewRtAttr(nl.VDPA_ATTR_DEV_NAME, nl.ZeroTerminated(*dev)))
+ } else {
+ extraFlags = extraFlags | unix.NLM_F_DUMP
+ }
+ messages, err := h.vdpaRequest(nl.VDPA_CMD_DEV_CONFIG_GET, extraFlags, attrs)
+ if err != nil {
+ return nil, err
+ }
+ cfgs := make([]*VDPADevConfig, 0, len(messages))
+ for _, m := range messages {
+ cfg := &VDPADevConfig{}
+ cfg.parseAttributes(m)
+ cfgs = append(cfgs, cfg)
+ }
+ return cfgs, nil
+}
+
+// dump all devices if dev is nil
+func (h *Handle) vdpaMGMTDevGet(bus, dev *string) ([]*VDPAMGMTDev, error) {
+ var extraFlags int
+ var attrs []*nl.RtAttr
+ if dev != nil {
+ attrs = append(attrs,
+ nl.NewRtAttr(nl.VDPA_ATTR_MGMTDEV_DEV_NAME, nl.ZeroTerminated(*dev)),
+ )
+ if bus != nil {
+ attrs = append(attrs,
+ nl.NewRtAttr(nl.VDPA_ATTR_MGMTDEV_BUS_NAME, nl.ZeroTerminated(*bus)),
+ )
+ }
+ } else {
+ extraFlags = extraFlags | unix.NLM_F_DUMP
+ }
+ messages, err := h.vdpaRequest(nl.VDPA_CMD_MGMTDEV_GET, extraFlags, attrs)
+ if err != nil {
+ return nil, err
+ }
+ cfgs := make([]*VDPAMGMTDev, 0, len(messages))
+ for _, m := range messages {
+ cfg := &VDPAMGMTDev{}
+ cfg.parseAttributes(m)
+ cfgs = append(cfgs, cfg)
+ }
+ return cfgs, nil
+}
+
+// VDPANewDev adds new VDPA device
+// Equivalent to: `vdpa dev add name mgmtdev /mgmtName [params]`
+func (h *Handle) VDPANewDev(name, mgmtBus, mgmtName string, params VDPANewDevParams) error {
+ attrs := []*nl.RtAttr{
+ nl.NewRtAttr(nl.VDPA_ATTR_DEV_NAME, nl.ZeroTerminated(name)),
+ nl.NewRtAttr(nl.VDPA_ATTR_MGMTDEV_DEV_NAME, nl.ZeroTerminated(mgmtName)),
+ }
+ if mgmtBus != "" {
+ attrs = append(attrs, nl.NewRtAttr(nl.VDPA_ATTR_MGMTDEV_BUS_NAME, nl.ZeroTerminated(mgmtBus)))
+ }
+ if len(params.MACAddr) != 0 {
+ attrs = append(attrs, nl.NewRtAttr(nl.VDPA_ATTR_DEV_NET_CFG_MACADDR, params.MACAddr))
+ }
+ if params.MaxVQP > 0 {
+ attrs = append(attrs, nl.NewRtAttr(nl.VDPA_ATTR_DEV_NET_CFG_MAX_VQP, nl.Uint16Attr(params.MaxVQP)))
+ }
+ if params.MTU > 0 {
+ attrs = append(attrs, nl.NewRtAttr(nl.VDPA_ATTR_DEV_NET_CFG_MTU, nl.Uint16Attr(params.MTU)))
+ }
+ if params.Features > 0 {
+ attrs = append(attrs, nl.NewRtAttr(nl.VDPA_ATTR_DEV_FEATURES, nl.Uint64Attr(params.Features)))
+ }
+ _, err := h.vdpaRequest(nl.VDPA_CMD_DEV_NEW, 0, attrs)
+ return err
+}
+
+// VDPADelDev removes VDPA device
+// Equivalent to: `vdpa dev del `
+func (h *Handle) VDPADelDev(name string) error {
+ _, err := h.vdpaRequest(nl.VDPA_CMD_DEV_DEL, 0, []*nl.RtAttr{
+ nl.NewRtAttr(nl.VDPA_ATTR_DEV_NAME, nl.ZeroTerminated(name))})
+ return err
+}
+
+// VDPAGetDevList returns list of VDPA devices
+// Equivalent to: `vdpa dev show`
+func (h *Handle) VDPAGetDevList() ([]*VDPADev, error) {
+ return h.vdpaDevGet(nil)
+}
+
+// VDPAGetDevByName returns VDPA device selected by name
+// Equivalent to: `vdpa dev show `
+func (h *Handle) VDPAGetDevByName(name string) (*VDPADev, error) {
+ devs, err := h.vdpaDevGet(&name)
+ if err != nil {
+ return nil, err
+ }
+ if len(devs) == 0 {
+ return nil, fmt.Errorf("device not found")
+ }
+ return devs[0], nil
+}
+
+// VDPAGetDevConfigList returns list of VDPA devices configurations
+// Equivalent to: `vdpa dev config show`
+func (h *Handle) VDPAGetDevConfigList() ([]*VDPADevConfig, error) {
+ return h.vdpaDevConfigGet(nil)
+}
+
+// VDPAGetDevConfigByName returns VDPA device configuration selected by name
+// Equivalent to: `vdpa dev config show `
+func (h *Handle) VDPAGetDevConfigByName(name string) (*VDPADevConfig, error) {
+ cfgs, err := h.vdpaDevConfigGet(&name)
+ if err != nil {
+ return nil, err
+ }
+ if len(cfgs) == 0 {
+ return nil, fmt.Errorf("configuration not found")
+ }
+ return cfgs[0], nil
+}
+
+// VDPAGetDevVStats returns vstats for VDPA device
+// Equivalent to: `vdpa dev vstats show qidx `
+func (h *Handle) VDPAGetDevVStats(name string, queueIndex uint32) (*VDPADevVStats, error) {
+ messages, err := h.vdpaRequest(nl.VDPA_CMD_DEV_VSTATS_GET, 0, []*nl.RtAttr{
+ nl.NewRtAttr(nl.VDPA_ATTR_DEV_NAME, nl.ZeroTerminated(name)),
+ nl.NewRtAttr(nl.VDPA_ATTR_DEV_QUEUE_INDEX, nl.Uint32Attr(queueIndex)),
+ })
+ if err != nil {
+ return nil, err
+ }
+ if len(messages) == 0 {
+ return nil, fmt.Errorf("stats not found")
+ }
+ stats := &VDPADevVStats{}
+ stats.parseAttributes(messages[0])
+ return stats, nil
+}
+
+// VDPAGetMGMTDevList returns list of mgmt devices
+// Equivalent to: `vdpa mgmtdev show`
+func (h *Handle) VDPAGetMGMTDevList() ([]*VDPAMGMTDev, error) {
+ return h.vdpaMGMTDevGet(nil, nil)
+}
+
+// VDPAGetMGMTDevByBusAndName returns mgmt devices selected by bus and name
+// Equivalent to: `vdpa mgmtdev show /`
+func (h *Handle) VDPAGetMGMTDevByBusAndName(bus, name string) (*VDPAMGMTDev, error) {
+ var busPtr *string
+ if bus != "" {
+ busPtr = &bus
+ }
+ devs, err := h.vdpaMGMTDevGet(busPtr, &name)
+ if err != nil {
+ return nil, err
+ }
+ if len(devs) == 0 {
+ return nil, fmt.Errorf("mgmtdev not found")
+ }
+ return devs[0], nil
+}
diff --git a/vendor/github.com/vishvananda/netlink/virtio.go b/vendor/github.com/vishvananda/netlink/virtio.go
new file mode 100644
index 00000000000..78a497bbc3b
--- /dev/null
+++ b/vendor/github.com/vishvananda/netlink/virtio.go
@@ -0,0 +1,132 @@
+package netlink
+
+// features for virtio net
+const (
+ VIRTIO_NET_F_CSUM = 0 // Host handles pkts w/ partial csum
+ VIRTIO_NET_F_GUEST_CSUM = 1 // Guest handles pkts w/ partial csum
+ VIRTIO_NET_F_CTRL_GUEST_OFFLOADS = 2 // Dynamic offload configuration.
+ VIRTIO_NET_F_MTU = 3 // Initial MTU advice
+ VIRTIO_NET_F_MAC = 5 // Host has given MAC address.
+ VIRTIO_NET_F_GUEST_TSO4 = 7 // Guest can handle TSOv4 in.
+ VIRTIO_NET_F_GUEST_TSO6 = 8 // Guest can handle TSOv6 in.
+ VIRTIO_NET_F_GUEST_ECN = 9 // Guest can handle TSO[6] w/ ECN in.
+ VIRTIO_NET_F_GUEST_UFO = 10 // Guest can handle UFO in.
+ VIRTIO_NET_F_HOST_TSO4 = 11 // Host can handle TSOv4 in.
+ VIRTIO_NET_F_HOST_TSO6 = 12 // Host can handle TSOv6 in.
+ VIRTIO_NET_F_HOST_ECN = 13 // Host can handle TSO[6] w/ ECN in.
+ VIRTIO_NET_F_HOST_UFO = 14 // Host can handle UFO in.
+ VIRTIO_NET_F_MRG_RXBUF = 15 // Host can merge receive buffers.
+ VIRTIO_NET_F_STATUS = 16 // virtio_net_config.status available
+ VIRTIO_NET_F_CTRL_VQ = 17 // Control channel available
+ VIRTIO_NET_F_CTRL_RX = 18 // Control channel RX mode support
+ VIRTIO_NET_F_CTRL_VLAN = 19 // Control channel VLAN filtering
+ VIRTIO_NET_F_CTRL_RX_EXTRA = 20 // Extra RX mode control support
+ VIRTIO_NET_F_GUEST_ANNOUNCE = 21 // Guest can announce device on the* network
+ VIRTIO_NET_F_MQ = 22 // Device supports Receive Flow Steering
+ VIRTIO_NET_F_CTRL_MAC_ADDR = 23 // Set MAC address
+ VIRTIO_NET_F_VQ_NOTF_COAL = 52 // Device supports virtqueue notification coalescing
+ VIRTIO_NET_F_NOTF_COAL = 53 // Device supports notifications coalescing
+ VIRTIO_NET_F_GUEST_USO4 = 54 // Guest can handle USOv4 in.
+ VIRTIO_NET_F_GUEST_USO6 = 55 // Guest can handle USOv6 in.
+ VIRTIO_NET_F_HOST_USO = 56 // Host can handle USO in.
+ VIRTIO_NET_F_HASH_REPORT = 57 // Supports hash report
+ VIRTIO_NET_F_GUEST_HDRLEN = 59 // Guest provides the exact hdr_len value.
+ VIRTIO_NET_F_RSS = 60 // Supports RSS RX steering
+ VIRTIO_NET_F_RSC_EXT = 61 // extended coalescing info
+ VIRTIO_NET_F_STANDBY = 62 // Act as standby for another device with the same MAC.
+ VIRTIO_NET_F_SPEED_DUPLEX = 63 // Device set linkspeed and duplex
+ VIRTIO_NET_F_GSO = 6 // Host handles pkts any GSO type
+)
+
+// virtio net status
+const (
+ VIRTIO_NET_S_LINK_UP = 1 // Link is up
+ VIRTIO_NET_S_ANNOUNCE = 2 // Announcement is needed
+)
+
+// virtio config
+const (
+ // Do we get callbacks when the ring is completely used, even if we've
+ // suppressed them?
+ VIRTIO_F_NOTIFY_ON_EMPTY = 24
+ // Can the device handle any descriptor layout?
+ VIRTIO_F_ANY_LAYOUT = 27
+ // v1.0 compliant
+ VIRTIO_F_VERSION_1 = 32
+ // If clear - device has the platform DMA (e.g. IOMMU) bypass quirk feature.
+ // If set - use platform DMA tools to access the memory.
+ // Note the reverse polarity (compared to most other features),
+ // this is for compatibility with legacy systems.
+ VIRTIO_F_ACCESS_PLATFORM = 33
+ // Legacy name for VIRTIO_F_ACCESS_PLATFORM (for compatibility with old userspace)
+ VIRTIO_F_IOMMU_PLATFORM = VIRTIO_F_ACCESS_PLATFORM
+ // This feature indicates support for the packed virtqueue layout.
+ VIRTIO_F_RING_PACKED = 34
+ // Inorder feature indicates that all buffers are used by the device
+ // in the same order in which they have been made available.
+ VIRTIO_F_IN_ORDER = 35
+ // This feature indicates that memory accesses by the driver and the
+ // device are ordered in a way described by the platform.
+ VIRTIO_F_ORDER_PLATFORM = 36
+ // Does the device support Single Root I/O Virtualization?
+ VIRTIO_F_SR_IOV = 37
+ // This feature indicates that the driver passes extra data (besides
+ // identifying the virtqueue) in its device notifications.
+ VIRTIO_F_NOTIFICATION_DATA = 38
+ // This feature indicates that the driver uses the data provided by the device
+ // as a virtqueue identifier in available buffer notifications.
+ VIRTIO_F_NOTIF_CONFIG_DATA = 39
+ // This feature indicates that the driver can reset a queue individually.
+ VIRTIO_F_RING_RESET = 40
+)
+
+// virtio device ids
+const (
+ VIRTIO_ID_NET = 1 // virtio net
+ VIRTIO_ID_BLOCK = 2 // virtio block
+ VIRTIO_ID_CONSOLE = 3 // virtio console
+ VIRTIO_ID_RNG = 4 // virtio rng
+ VIRTIO_ID_BALLOON = 5 // virtio balloon
+ VIRTIO_ID_IOMEM = 6 // virtio ioMemory
+ VIRTIO_ID_RPMSG = 7 // virtio remote processor messaging
+ VIRTIO_ID_SCSI = 8 // virtio scsi
+ VIRTIO_ID_9P = 9 // 9p virtio console
+ VIRTIO_ID_MAC80211_WLAN = 10 // virtio WLAN MAC
+ VIRTIO_ID_RPROC_SERIAL = 11 // virtio remoteproc serial link
+ VIRTIO_ID_CAIF = 12 // Virtio caif
+ VIRTIO_ID_MEMORY_BALLOON = 13 // virtio memory balloon
+ VIRTIO_ID_GPU = 16 // virtio GPU
+ VIRTIO_ID_CLOCK = 17 // virtio clock/timer
+ VIRTIO_ID_INPUT = 18 // virtio input
+ VIRTIO_ID_VSOCK = 19 // virtio vsock transport
+ VIRTIO_ID_CRYPTO = 20 // virtio crypto
+ VIRTIO_ID_SIGNAL_DIST = 21 // virtio signal distribution device
+ VIRTIO_ID_PSTORE = 22 // virtio pstore device
+ VIRTIO_ID_IOMMU = 23 // virtio IOMMU
+ VIRTIO_ID_MEM = 24 // virtio mem
+ VIRTIO_ID_SOUND = 25 // virtio sound
+ VIRTIO_ID_FS = 26 // virtio filesystem
+ VIRTIO_ID_PMEM = 27 // virtio pmem
+ VIRTIO_ID_RPMB = 28 // virtio rpmb
+ VIRTIO_ID_MAC80211_HWSIM = 29 // virtio mac80211-hwsim
+ VIRTIO_ID_VIDEO_ENCODER = 30 // virtio video encoder
+ VIRTIO_ID_VIDEO_DECODER = 31 // virtio video decoder
+ VIRTIO_ID_SCMI = 32 // virtio SCMI
+ VIRTIO_ID_NITRO_SEC_MOD = 33 // virtio nitro secure module
+ VIRTIO_ID_I2C_ADAPTER = 34 // virtio i2c adapter
+ VIRTIO_ID_WATCHDOG = 35 // virtio watchdog
+ VIRTIO_ID_CAN = 36 // virtio can
+ VIRTIO_ID_DMABUF = 37 // virtio dmabuf
+ VIRTIO_ID_PARAM_SERV = 38 // virtio parameter server
+ VIRTIO_ID_AUDIO_POLICY = 39 // virtio audio policy
+ VIRTIO_ID_BT = 40 // virtio bluetooth
+ VIRTIO_ID_GPIO = 41 // virtio gpio
+ // Virtio Transitional IDs
+ VIRTIO_TRANS_ID_NET = 0x1000 // transitional virtio net
+ VIRTIO_TRANS_ID_BLOCK = 0x1001 // transitional virtio block
+ VIRTIO_TRANS_ID_BALLOON = 0x1002 // transitional virtio balloon
+ VIRTIO_TRANS_ID_CONSOLE = 0x1003 // transitional virtio console
+ VIRTIO_TRANS_ID_SCSI = 0x1004 // transitional virtio SCSI
+ VIRTIO_TRANS_ID_RNG = 0x1005 // transitional virtio rng
+ VIRTIO_TRANS_ID_9P = 0x1009 // transitional virtio 9p console
+)
diff --git a/vendor/github.com/vishvananda/netlink/xdp_diag.go b/vendor/github.com/vishvananda/netlink/xdp_diag.go
new file mode 100644
index 00000000000..e88825bf55e
--- /dev/null
+++ b/vendor/github.com/vishvananda/netlink/xdp_diag.go
@@ -0,0 +1,34 @@
+package netlink
+
+import "github.com/vishvananda/netlink/nl"
+
+const SOCK_ANY_COOKIE = uint64(nl.TCPDIAG_NOCOOKIE)<<32 + uint64(nl.TCPDIAG_NOCOOKIE)
+
+// XDP diagnosis show flag constants to request particular information elements.
+const (
+ XDP_SHOW_INFO = 1 << iota
+ XDP_SHOW_RING_CFG
+ XDP_SHOW_UMEM
+ XDP_SHOW_MEMINFO
+ XDP_SHOW_STATS
+)
+
+// XDP diag element constants
+const (
+ XDP_DIAG_NONE = iota
+ XDP_DIAG_INFO // when using XDP_SHOW_INFO
+ XDP_DIAG_UID // when using XDP_SHOW_INFO
+ XDP_DIAG_RX_RING // when using XDP_SHOW_RING_CFG
+ XDP_DIAG_TX_RING // when using XDP_SHOW_RING_CFG
+ XDP_DIAG_UMEM // when using XDP_SHOW_UMEM
+ XDP_DIAG_UMEM_FILL_RING // when using XDP_SHOW_UMEM
+ XDP_DIAG_UMEM_COMPLETION_RING // when using XDP_SHOW_UMEM
+ XDP_DIAG_MEMINFO // when using XDP_SHOW_MEMINFO
+ XDP_DIAG_STATS // when using XDP_SHOW_STATS
+)
+
+// https://elixir.bootlin.com/linux/v6.2/source/include/uapi/linux/xdp_diag.h#L21
+type XDPDiagInfoResp struct {
+ XDPDiagMsg *XDPSocket
+ XDPInfo *XDPInfo
+}
diff --git a/vendor/github.com/vishvananda/netlink/xdp_linux.go b/vendor/github.com/vishvananda/netlink/xdp_linux.go
new file mode 100644
index 00000000000..896a406deb4
--- /dev/null
+++ b/vendor/github.com/vishvananda/netlink/xdp_linux.go
@@ -0,0 +1,46 @@
+package netlink
+
+import (
+ "bytes"
+ "fmt"
+)
+
+const (
+ xdrDiagUmemLen = 8 + 8*4
+ xdrDiagStatsLen = 6 * 8
+)
+
+func (x *XDPDiagUmem) deserialize(b []byte) error {
+ if len(b) < xdrDiagUmemLen {
+ return fmt.Errorf("XDP umem diagnosis data short read (%d); want %d", len(b), xdrDiagUmemLen)
+ }
+
+ rb := bytes.NewBuffer(b)
+ x.Size = native.Uint64(rb.Next(8))
+ x.ID = native.Uint32(rb.Next(4))
+ x.NumPages = native.Uint32(rb.Next(4))
+ x.ChunkSize = native.Uint32(rb.Next(4))
+ x.Headroom = native.Uint32(rb.Next(4))
+ x.Ifindex = native.Uint32(rb.Next(4))
+ x.QueueID = native.Uint32(rb.Next(4))
+ x.Flags = native.Uint32(rb.Next(4))
+ x.Refs = native.Uint32(rb.Next(4))
+
+ return nil
+}
+
+func (x *XDPDiagStats) deserialize(b []byte) error {
+ if len(b) < xdrDiagStatsLen {
+ return fmt.Errorf("XDP diagnosis statistics short read (%d); want %d", len(b), xdrDiagStatsLen)
+ }
+
+ rb := bytes.NewBuffer(b)
+ x.RxDropped = native.Uint64(rb.Next(8))
+ x.RxInvalid = native.Uint64(rb.Next(8))
+ x.RxFull = native.Uint64(rb.Next(8))
+ x.FillRingEmpty = native.Uint64(rb.Next(8))
+ x.TxInvalid = native.Uint64(rb.Next(8))
+ x.TxRingEmpty = native.Uint64(rb.Next(8))
+
+ return nil
+}
diff --git a/vendor/github.com/vishvananda/netlink/xfrm.go b/vendor/github.com/vishvananda/netlink/xfrm_linux.go
similarity index 95%
rename from vendor/github.com/vishvananda/netlink/xfrm.go
rename to vendor/github.com/vishvananda/netlink/xfrm_linux.go
index 02b41842e10..dd38ed8e082 100644
--- a/vendor/github.com/vishvananda/netlink/xfrm.go
+++ b/vendor/github.com/vishvananda/netlink/xfrm_linux.go
@@ -14,7 +14,7 @@ const (
XFRM_PROTO_ESP Proto = unix.IPPROTO_ESP
XFRM_PROTO_AH Proto = unix.IPPROTO_AH
XFRM_PROTO_HAO Proto = unix.IPPROTO_DSTOPTS
- XFRM_PROTO_COMP Proto = 0x6c // NOTE not defined on darwin
+ XFRM_PROTO_COMP Proto = unix.IPPROTO_COMP
XFRM_PROTO_IPSEC_ANY Proto = unix.IPPROTO_RAW
)
diff --git a/vendor/github.com/vishvananda/netlink/xfrm_policy.go b/vendor/github.com/vishvananda/netlink/xfrm_policy.go
deleted file mode 100644
index b7532b092cb..00000000000
--- a/vendor/github.com/vishvananda/netlink/xfrm_policy.go
+++ /dev/null
@@ -1,97 +0,0 @@
-package netlink
-
-import (
- "fmt"
- "net"
-)
-
-// Dir is an enum representing an ipsec template direction.
-type Dir uint8
-
-const (
- XFRM_DIR_IN Dir = iota
- XFRM_DIR_OUT
- XFRM_DIR_FWD
- XFRM_SOCKET_IN
- XFRM_SOCKET_OUT
- XFRM_SOCKET_FWD
-)
-
-func (d Dir) String() string {
- switch d {
- case XFRM_DIR_IN:
- return "dir in"
- case XFRM_DIR_OUT:
- return "dir out"
- case XFRM_DIR_FWD:
- return "dir fwd"
- case XFRM_SOCKET_IN:
- return "socket in"
- case XFRM_SOCKET_OUT:
- return "socket out"
- case XFRM_SOCKET_FWD:
- return "socket fwd"
- }
- return fmt.Sprintf("socket %d", d-XFRM_SOCKET_IN)
-}
-
-// PolicyAction is an enum representing an ipsec policy action.
-type PolicyAction uint8
-
-const (
- XFRM_POLICY_ALLOW PolicyAction = 0
- XFRM_POLICY_BLOCK PolicyAction = 1
-)
-
-func (a PolicyAction) String() string {
- switch a {
- case XFRM_POLICY_ALLOW:
- return "allow"
- case XFRM_POLICY_BLOCK:
- return "block"
- default:
- return fmt.Sprintf("action %d", a)
- }
-}
-
-// XfrmPolicyTmpl encapsulates a rule for the base addresses of an ipsec
-// policy. These rules are matched with XfrmState to determine encryption
-// and authentication algorithms.
-type XfrmPolicyTmpl struct {
- Dst net.IP
- Src net.IP
- Proto Proto
- Mode Mode
- Spi int
- Reqid int
- Optional int
-}
-
-func (t XfrmPolicyTmpl) String() string {
- return fmt.Sprintf("{Dst: %v, Src: %v, Proto: %s, Mode: %s, Spi: 0x%x, Reqid: 0x%x}",
- t.Dst, t.Src, t.Proto, t.Mode, t.Spi, t.Reqid)
-}
-
-// XfrmPolicy represents an ipsec policy. It represents the overlay network
-// and has a list of XfrmPolicyTmpls representing the base addresses of
-// the policy.
-type XfrmPolicy struct {
- Dst *net.IPNet
- Src *net.IPNet
- Proto Proto
- DstPort int
- SrcPort int
- Dir Dir
- Priority int
- Index int
- Action PolicyAction
- Ifindex int
- Ifid int
- Mark *XfrmMark
- Tmpls []XfrmPolicyTmpl
-}
-
-func (p XfrmPolicy) String() string {
- return fmt.Sprintf("{Dst: %v, Src: %v, Proto: %s, DstPort: %d, SrcPort: %d, Dir: %s, Priority: %d, Index: %d, Action: %s, Ifindex: %d, Ifid: %d, Mark: %s, Tmpls: %s}",
- p.Dst, p.Src, p.Proto, p.DstPort, p.SrcPort, p.Dir, p.Priority, p.Index, p.Action, p.Ifindex, p.Ifid, p.Mark, p.Tmpls)
-}
diff --git a/vendor/github.com/vishvananda/netlink/xfrm_policy_linux.go b/vendor/github.com/vishvananda/netlink/xfrm_policy_linux.go
index 35849680413..d526739cebf 100644
--- a/vendor/github.com/vishvananda/netlink/xfrm_policy_linux.go
+++ b/vendor/github.com/vishvananda/netlink/xfrm_policy_linux.go
@@ -1,10 +1,104 @@
package netlink
import (
+ "fmt"
+ "net"
+
"github.com/vishvananda/netlink/nl"
"golang.org/x/sys/unix"
)
+// Dir is an enum representing an ipsec template direction.
+type Dir uint8
+
+const (
+ XFRM_DIR_IN Dir = iota
+ XFRM_DIR_OUT
+ XFRM_DIR_FWD
+ XFRM_SOCKET_IN
+ XFRM_SOCKET_OUT
+ XFRM_SOCKET_FWD
+)
+
+func (d Dir) String() string {
+ switch d {
+ case XFRM_DIR_IN:
+ return "dir in"
+ case XFRM_DIR_OUT:
+ return "dir out"
+ case XFRM_DIR_FWD:
+ return "dir fwd"
+ case XFRM_SOCKET_IN:
+ return "socket in"
+ case XFRM_SOCKET_OUT:
+ return "socket out"
+ case XFRM_SOCKET_FWD:
+ return "socket fwd"
+ }
+ return fmt.Sprintf("socket %d", d-XFRM_SOCKET_IN)
+}
+
+// PolicyAction is an enum representing an ipsec policy action.
+type PolicyAction uint8
+
+const (
+ XFRM_POLICY_ALLOW PolicyAction = 0
+ XFRM_POLICY_BLOCK PolicyAction = 1
+)
+
+func (a PolicyAction) String() string {
+ switch a {
+ case XFRM_POLICY_ALLOW:
+ return "allow"
+ case XFRM_POLICY_BLOCK:
+ return "block"
+ default:
+ return fmt.Sprintf("action %d", a)
+ }
+}
+
+// XfrmPolicyTmpl encapsulates a rule for the base addresses of an ipsec
+// policy. These rules are matched with XfrmState to determine encryption
+// and authentication algorithms.
+type XfrmPolicyTmpl struct {
+ Dst net.IP
+ Src net.IP
+ Proto Proto
+ Mode Mode
+ Spi int
+ Reqid int
+ Optional int
+}
+
+func (t XfrmPolicyTmpl) String() string {
+ return fmt.Sprintf("{Dst: %v, Src: %v, Proto: %s, Mode: %s, Spi: 0x%x, Reqid: 0x%x}",
+ t.Dst, t.Src, t.Proto, t.Mode, t.Spi, t.Reqid)
+}
+
+// XfrmPolicy represents an ipsec policy. It represents the overlay network
+// and has a list of XfrmPolicyTmpls representing the base addresses of
+// the policy.
+type XfrmPolicy struct {
+ Dst *net.IPNet
+ Src *net.IPNet
+ Proto Proto
+ DstPort int
+ SrcPort int
+ Dir Dir
+ Priority int
+ Index int
+ Action PolicyAction
+ Ifindex int
+ Ifid int
+ Mark *XfrmMark
+ Tmpls []XfrmPolicyTmpl
+}
+
+func (p XfrmPolicy) String() string {
+ return fmt.Sprintf("{Dst: %v, Src: %v, Proto: %s, DstPort: %d, SrcPort: %d, Dir: %s, Priority: %d, Index: %d, Action: %s, Ifindex: %d, Ifid: %d, Mark: %s, Tmpls: %s}",
+ p.Dst, p.Src, p.Proto, p.DstPort, p.SrcPort, p.Dir, p.Priority, p.Index, p.Action, p.Ifindex, p.Ifid, p.Mark, p.Tmpls)
+}
+
func selFromPolicy(sel *nl.XfrmSelector, policy *XfrmPolicy) {
sel.Family = uint16(nl.FAMILY_V4)
if policy.Dst != nil {
@@ -75,6 +169,7 @@ func (h *Handle) xfrmPolicyAddOrUpdate(policy *XfrmPolicy, nlProto int) error {
userTmpl := nl.DeserializeXfrmUserTmpl(tmplData[start : start+nl.SizeofXfrmUserTmpl])
userTmpl.XfrmId.Daddr.FromIP(tmpl.Dst)
userTmpl.Saddr.FromIP(tmpl.Src)
+ userTmpl.Family = uint16(nl.GetIPFamily(tmpl.Dst))
userTmpl.XfrmId.Proto = uint8(tmpl.Proto)
userTmpl.XfrmId.Spi = nl.Swap32(uint32(tmpl.Spi))
userTmpl.Mode = uint8(tmpl.Mode)
@@ -223,8 +318,8 @@ func parseXfrmPolicy(m []byte, family int) (*XfrmPolicy, error) {
var policy XfrmPolicy
- policy.Dst = msg.Sel.Daddr.ToIPNet(msg.Sel.PrefixlenD)
- policy.Src = msg.Sel.Saddr.ToIPNet(msg.Sel.PrefixlenS)
+ policy.Dst = msg.Sel.Daddr.ToIPNet(msg.Sel.PrefixlenD, uint16(family))
+ policy.Src = msg.Sel.Saddr.ToIPNet(msg.Sel.PrefixlenS, uint16(family))
policy.Proto = Proto(msg.Sel.Proto)
policy.DstPort = int(nl.Swap16(msg.Sel.Dport))
policy.SrcPort = int(nl.Swap16(msg.Sel.Sport))
diff --git a/vendor/github.com/vishvananda/netlink/xfrm_state.go b/vendor/github.com/vishvananda/netlink/xfrm_state.go
deleted file mode 100644
index 19df82c7632..00000000000
--- a/vendor/github.com/vishvananda/netlink/xfrm_state.go
+++ /dev/null
@@ -1,131 +0,0 @@
-package netlink
-
-import (
- "fmt"
- "net"
- "time"
-)
-
-// XfrmStateAlgo represents the algorithm to use for the ipsec encryption.
-type XfrmStateAlgo struct {
- Name string
- Key []byte
- TruncateLen int // Auth only
- ICVLen int // AEAD only
-}
-
-func (a XfrmStateAlgo) String() string {
- base := fmt.Sprintf("{Name: %s, Key: 0x%x", a.Name, a.Key)
- if a.TruncateLen != 0 {
- base = fmt.Sprintf("%s, Truncate length: %d", base, a.TruncateLen)
- }
- if a.ICVLen != 0 {
- base = fmt.Sprintf("%s, ICV length: %d", base, a.ICVLen)
- }
- return fmt.Sprintf("%s}", base)
-}
-
-// EncapType is an enum representing the optional packet encapsulation.
-type EncapType uint8
-
-const (
- XFRM_ENCAP_ESPINUDP_NONIKE EncapType = iota + 1
- XFRM_ENCAP_ESPINUDP
-)
-
-func (e EncapType) String() string {
- switch e {
- case XFRM_ENCAP_ESPINUDP_NONIKE:
- return "espinudp-non-ike"
- case XFRM_ENCAP_ESPINUDP:
- return "espinudp"
- }
- return "unknown"
-}
-
-// XfrmStateEncap represents the encapsulation to use for the ipsec encryption.
-type XfrmStateEncap struct {
- Type EncapType
- SrcPort int
- DstPort int
- OriginalAddress net.IP
-}
-
-func (e XfrmStateEncap) String() string {
- return fmt.Sprintf("{Type: %s, Srcport: %d, DstPort: %d, OriginalAddress: %v}",
- e.Type, e.SrcPort, e.DstPort, e.OriginalAddress)
-}
-
-// XfrmStateLimits represents the configured limits for the state.
-type XfrmStateLimits struct {
- ByteSoft uint64
- ByteHard uint64
- PacketSoft uint64
- PacketHard uint64
- TimeSoft uint64
- TimeHard uint64
- TimeUseSoft uint64
- TimeUseHard uint64
-}
-
-// XfrmStateStats represents the current number of bytes/packets
-// processed by this State, the State's installation and first use
-// time and the replay window counters.
-type XfrmStateStats struct {
- ReplayWindow uint32
- Replay uint32
- Failed uint32
- Bytes uint64
- Packets uint64
- AddTime uint64
- UseTime uint64
-}
-
-// XfrmState represents the state of an ipsec policy. It optionally
-// contains an XfrmStateAlgo for encryption and one for authentication.
-type XfrmState struct {
- Dst net.IP
- Src net.IP
- Proto Proto
- Mode Mode
- Spi int
- Reqid int
- ReplayWindow int
- Limits XfrmStateLimits
- Statistics XfrmStateStats
- Mark *XfrmMark
- OutputMark *XfrmMark
- Ifid int
- Auth *XfrmStateAlgo
- Crypt *XfrmStateAlgo
- Aead *XfrmStateAlgo
- Encap *XfrmStateEncap
- ESN bool
-}
-
-func (sa XfrmState) String() string {
- return fmt.Sprintf("Dst: %v, Src: %v, Proto: %s, Mode: %s, SPI: 0x%x, ReqID: 0x%x, ReplayWindow: %d, Mark: %v, OutputMark: %v, Ifid: %d, Auth: %v, Crypt: %v, Aead: %v, Encap: %v, ESN: %t",
- sa.Dst, sa.Src, sa.Proto, sa.Mode, sa.Spi, sa.Reqid, sa.ReplayWindow, sa.Mark, sa.OutputMark, sa.Ifid, sa.Auth, sa.Crypt, sa.Aead, sa.Encap, sa.ESN)
-}
-func (sa XfrmState) Print(stats bool) string {
- if !stats {
- return sa.String()
- }
- at := time.Unix(int64(sa.Statistics.AddTime), 0).Format(time.UnixDate)
- ut := "-"
- if sa.Statistics.UseTime > 0 {
- ut = time.Unix(int64(sa.Statistics.UseTime), 0).Format(time.UnixDate)
- }
- return fmt.Sprintf("%s, ByteSoft: %s, ByteHard: %s, PacketSoft: %s, PacketHard: %s, TimeSoft: %d, TimeHard: %d, TimeUseSoft: %d, TimeUseHard: %d, Bytes: %d, Packets: %d, "+
- "AddTime: %s, UseTime: %s, ReplayWindow: %d, Replay: %d, Failed: %d",
- sa.String(), printLimit(sa.Limits.ByteSoft), printLimit(sa.Limits.ByteHard), printLimit(sa.Limits.PacketSoft), printLimit(sa.Limits.PacketHard),
- sa.Limits.TimeSoft, sa.Limits.TimeHard, sa.Limits.TimeUseSoft, sa.Limits.TimeUseHard, sa.Statistics.Bytes, sa.Statistics.Packets, at, ut,
- sa.Statistics.ReplayWindow, sa.Statistics.Replay, sa.Statistics.Failed)
-}
-
-func printLimit(lmt uint64) string {
- if lmt == ^uint64(0) {
- return "(INF)"
- }
- return fmt.Sprintf("%d", lmt)
-}
diff --git a/vendor/github.com/vishvananda/netlink/xfrm_state_linux.go b/vendor/github.com/vishvananda/netlink/xfrm_state_linux.go
index 61a2d2dea28..554f2498c2c 100644
--- a/vendor/github.com/vishvananda/netlink/xfrm_state_linux.go
+++ b/vendor/github.com/vishvananda/netlink/xfrm_state_linux.go
@@ -2,12 +2,154 @@ package netlink
import (
"fmt"
+ "net"
+ "time"
"unsafe"
"github.com/vishvananda/netlink/nl"
"golang.org/x/sys/unix"
)
+// XfrmStateAlgo represents the algorithm to use for the ipsec encryption.
+type XfrmStateAlgo struct {
+ Name string
+ Key []byte
+ TruncateLen int // Auth only
+ ICVLen int // AEAD only
+}
+
+func (a XfrmStateAlgo) String() string {
+ base := fmt.Sprintf("{Name: %s, Key: 0x%x", a.Name, a.Key)
+ if a.TruncateLen != 0 {
+ base = fmt.Sprintf("%s, Truncate length: %d", base, a.TruncateLen)
+ }
+ if a.ICVLen != 0 {
+ base = fmt.Sprintf("%s, ICV length: %d", base, a.ICVLen)
+ }
+ return fmt.Sprintf("%s}", base)
+}
+
+// EncapType is an enum representing the optional packet encapsulation.
+type EncapType uint8
+
+const (
+ XFRM_ENCAP_ESPINUDP_NONIKE EncapType = iota + 1
+ XFRM_ENCAP_ESPINUDP
+)
+
+func (e EncapType) String() string {
+ switch e {
+ case XFRM_ENCAP_ESPINUDP_NONIKE:
+ return "espinudp-non-ike"
+ case XFRM_ENCAP_ESPINUDP:
+ return "espinudp"
+ }
+ return "unknown"
+}
+
+// XfrmStateEncap represents the encapsulation to use for the ipsec encryption.
+type XfrmStateEncap struct {
+ Type EncapType
+ SrcPort int
+ DstPort int
+ OriginalAddress net.IP
+}
+
+func (e XfrmStateEncap) String() string {
+ return fmt.Sprintf("{Type: %s, Srcport: %d, DstPort: %d, OriginalAddress: %v}",
+ e.Type, e.SrcPort, e.DstPort, e.OriginalAddress)
+}
+
+// XfrmStateLimits represents the configured limits for the state.
+type XfrmStateLimits struct {
+ ByteSoft uint64
+ ByteHard uint64
+ PacketSoft uint64
+ PacketHard uint64
+ TimeSoft uint64
+ TimeHard uint64
+ TimeUseSoft uint64
+ TimeUseHard uint64
+}
+
+// XfrmStateStats represents the current number of bytes/packets
+// processed by this State, the State's installation and first use
+// time and the replay window counters.
+type XfrmStateStats struct {
+ ReplayWindow uint32
+ Replay uint32
+ Failed uint32
+ Bytes uint64
+ Packets uint64
+ AddTime uint64
+ UseTime uint64
+}
+
+// XfrmReplayState represents the sequence number states for
+// "legacy" anti-replay mode.
+type XfrmReplayState struct {
+ OSeq uint32
+ Seq uint32
+ BitMap uint32
+}
+
+func (r XfrmReplayState) String() string {
+ return fmt.Sprintf("{OSeq: 0x%x, Seq: 0x%x, BitMap: 0x%x}",
+ r.OSeq, r.Seq, r.BitMap)
+}
+
+// XfrmState represents the state of an ipsec policy. It optionally
+// contains an XfrmStateAlgo for encryption and one for authentication.
+type XfrmState struct {
+ Dst net.IP
+ Src net.IP
+ Proto Proto
+ Mode Mode
+ Spi int
+ Reqid int
+ ReplayWindow int
+ Limits XfrmStateLimits
+ Statistics XfrmStateStats
+ Mark *XfrmMark
+ OutputMark *XfrmMark
+ Ifid int
+ Auth *XfrmStateAlgo
+ Crypt *XfrmStateAlgo
+ Aead *XfrmStateAlgo
+ Encap *XfrmStateEncap
+ ESN bool
+ DontEncapDSCP bool
+ OSeqMayWrap bool
+ Replay *XfrmReplayState
+ Selector *XfrmPolicy
+}
+
+func (sa XfrmState) String() string {
+ return fmt.Sprintf("Dst: %v, Src: %v, Proto: %s, Mode: %s, SPI: 0x%x, ReqID: 0x%x, ReplayWindow: %d, Mark: %v, OutputMark: %v, Ifid: %d, Auth: %v, Crypt: %v, Aead: %v, Encap: %v, ESN: %t, DontEncapDSCP: %t, OSeqMayWrap: %t, Replay: %v",
+ sa.Dst, sa.Src, sa.Proto, sa.Mode, sa.Spi, sa.Reqid, sa.ReplayWindow, sa.Mark, sa.OutputMark, sa.Ifid, sa.Auth, sa.Crypt, sa.Aead, sa.Encap, sa.ESN, sa.DontEncapDSCP, sa.OSeqMayWrap, sa.Replay)
+}
+func (sa XfrmState) Print(stats bool) string {
+ if !stats {
+ return sa.String()
+ }
+ at := time.Unix(int64(sa.Statistics.AddTime), 0).Format(time.UnixDate)
+ ut := "-"
+ if sa.Statistics.UseTime > 0 {
+ ut = time.Unix(int64(sa.Statistics.UseTime), 0).Format(time.UnixDate)
+ }
+ return fmt.Sprintf("%s, ByteSoft: %s, ByteHard: %s, PacketSoft: %s, PacketHard: %s, TimeSoft: %d, TimeHard: %d, TimeUseSoft: %d, TimeUseHard: %d, Bytes: %d, Packets: %d, "+
+ "AddTime: %s, UseTime: %s, ReplayWindow: %d, Replay: %d, Failed: %d",
+ sa.String(), printLimit(sa.Limits.ByteSoft), printLimit(sa.Limits.ByteHard), printLimit(sa.Limits.PacketSoft), printLimit(sa.Limits.PacketHard),
+ sa.Limits.TimeSoft, sa.Limits.TimeHard, sa.Limits.TimeUseSoft, sa.Limits.TimeUseHard, sa.Statistics.Bytes, sa.Statistics.Packets, at, ut,
+ sa.Statistics.ReplayWindow, sa.Statistics.Replay, sa.Statistics.Failed)
+}
+
+func printLimit(lmt uint64) string {
+ if lmt == ^uint64(0) {
+ return "(INF)"
+ }
+ return fmt.Sprintf("%d", lmt)
+}
func writeStateAlgo(a *XfrmStateAlgo) []byte {
algo := nl.XfrmAlgo{
AlgKeyLen: uint32(len(a.Key) * 8),
@@ -77,6 +219,14 @@ func writeReplayEsn(replayWindow int) []byte {
return replayEsn.Serialize()
}
+func writeReplay(r *XfrmReplayState) []byte {
+ return (&nl.XfrmReplayState{
+ OSeq: r.OSeq,
+ Seq: r.Seq,
+ BitMap: r.BitMap,
+ }).Serialize()
+}
+
// XfrmStateAdd will add an xfrm state to the system.
// Equivalent to: `ip xfrm state add $state`
func XfrmStateAdd(state *XfrmState) error {
@@ -166,6 +316,21 @@ func (h *Handle) xfrmStateAddOrUpdate(state *XfrmState, nlProto int) error {
req.AddData(out)
}
}
+ if state.OSeqMayWrap || state.DontEncapDSCP {
+ var flags uint32
+ if state.DontEncapDSCP {
+ flags |= nl.XFRM_SA_XFLAG_DONT_ENCAP_DSCP
+ }
+ if state.OSeqMayWrap {
+ flags |= nl.XFRM_SA_XFLAG_OSEQ_MAY_WRAP
+ }
+ out := nl.NewRtAttr(nl.XFRMA_SA_EXTRA_FLAGS, nl.Uint32Attr(flags))
+ req.AddData(out)
+ }
+ if state.Replay != nil {
+ out := nl.NewRtAttr(nl.XFRMA_REPLAY_VAL, writeReplay(state.Replay))
+ req.AddData(out)
+ }
if state.Ifid != 0 {
ifId := nl.NewRtAttr(nl.XFRMA_IF_ID, nl.Uint32Attr(uint32(state.Ifid)))
@@ -186,7 +351,6 @@ func (h *Handle) xfrmStateAllocSpi(state *XfrmState) (*XfrmState, error) {
msg.Min = 0x100
msg.Max = 0xffffffff
req.AddData(msg)
-
if state.Mark != nil {
out := nl.NewRtAttr(nl.XFRMA_MARK, writeMark(state.Mark))
req.AddData(out)
@@ -314,7 +478,6 @@ var familyError = fmt.Errorf("family error")
func xfrmStateFromXfrmUsersaInfo(msg *nl.XfrmUsersaInfo) *XfrmState {
var state XfrmState
-
state.Dst = msg.Id.Daddr.ToIP()
state.Src = msg.Saddr.ToIP()
state.Proto = Proto(msg.Id.Proto)
@@ -324,20 +487,25 @@ func xfrmStateFromXfrmUsersaInfo(msg *nl.XfrmUsersaInfo) *XfrmState {
state.ReplayWindow = int(msg.ReplayWindow)
lftToLimits(&msg.Lft, &state.Limits)
curToStats(&msg.Curlft, &msg.Stats, &state.Statistics)
+ state.Selector = &XfrmPolicy{
+ Dst: msg.Sel.Daddr.ToIPNet(msg.Sel.PrefixlenD, msg.Sel.Family),
+ Src: msg.Sel.Saddr.ToIPNet(msg.Sel.PrefixlenS, msg.Sel.Family),
+ Proto: Proto(msg.Sel.Proto),
+ DstPort: int(nl.Swap16(msg.Sel.Dport)),
+ SrcPort: int(nl.Swap16(msg.Sel.Sport)),
+ Ifindex: int(msg.Sel.Ifindex),
+ }
return &state
}
func parseXfrmState(m []byte, family int) (*XfrmState, error) {
msg := nl.DeserializeXfrmUsersaInfo(m)
-
// This is mainly for the state dump
if family != FAMILY_ALL && family != int(msg.Family) {
return nil, familyError
}
-
state := xfrmStateFromXfrmUsersaInfo(msg)
-
attrs, err := nl.ParseRouteAttr(m[nl.SizeofXfrmUsersaInfo:])
if err != nil {
return nil, err
@@ -385,6 +553,14 @@ func parseXfrmState(m []byte, family int) (*XfrmState, error) {
state.Mark = new(XfrmMark)
state.Mark.Value = mark.Value
state.Mark.Mask = mark.Mask
+ case nl.XFRMA_SA_EXTRA_FLAGS:
+ flags := native.Uint32(attr.Value)
+ if (flags & nl.XFRM_SA_XFLAG_DONT_ENCAP_DSCP) != 0 {
+ state.DontEncapDSCP = true
+ }
+ if (flags & nl.XFRM_SA_XFLAG_OSEQ_MAY_WRAP) != 0 {
+ state.OSeqMayWrap = true
+ }
case nl.XFRMA_SET_MARK:
if state.OutputMark == nil {
state.OutputMark = new(XfrmMark)
@@ -400,6 +576,14 @@ func parseXfrmState(m []byte, family int) (*XfrmState, error) {
}
case nl.XFRMA_IF_ID:
state.Ifid = int(native.Uint32(attr.Value))
+ case nl.XFRMA_REPLAY_VAL:
+ if state.Replay == nil {
+ state.Replay = new(XfrmReplayState)
+ }
+ replay := nl.DeserializeXfrmReplayState(attr.Value[:])
+ state.Replay.OSeq = replay.OSeq
+ state.Replay.Seq = replay.Seq
+ state.Replay.BitMap = replay.BitMap
}
}
@@ -476,6 +660,9 @@ func xfrmUsersaInfoFromXfrmState(state *XfrmState) *nl.XfrmUsersaInfo {
msg.Id.Spi = nl.Swap32(uint32(state.Spi))
msg.Reqid = uint32(state.Reqid)
msg.ReplayWindow = uint8(state.ReplayWindow)
-
+ msg.Sel = nl.XfrmSelector{}
+ if state.Selector != nil {
+ selFromPolicy(&msg.Sel, state.Selector)
+ }
return msg
}
diff --git a/vendor/github.com/vishvananda/netlink/xfrm_unspecified.go b/vendor/github.com/vishvananda/netlink/xfrm_unspecified.go
new file mode 100644
index 00000000000..12fdd26d79a
--- /dev/null
+++ b/vendor/github.com/vishvananda/netlink/xfrm_unspecified.go
@@ -0,0 +1,7 @@
+//go:build !linux
+// +build !linux
+
+package netlink
+
+type XfrmPolicy struct{}
+type XfrmState struct{}
diff --git a/vendor/github.com/xanzy/ssh-agent/.gitignore b/vendor/github.com/xanzy/ssh-agent/.gitignore
deleted file mode 100644
index daf913b1b34..00000000000
--- a/vendor/github.com/xanzy/ssh-agent/.gitignore
+++ /dev/null
@@ -1,24 +0,0 @@
-# Compiled Object files, Static and Dynamic libs (Shared Objects)
-*.o
-*.a
-*.so
-
-# Folders
-_obj
-_test
-
-# Architecture specific extensions/prefixes
-*.[568vq]
-[568vq].out
-
-*.cgo1.go
-*.cgo2.c
-_cgo_defun.c
-_cgo_gotypes.go
-_cgo_export.*
-
-_testmain.go
-
-*.exe
-*.test
-*.prof
diff --git a/vendor/github.com/xanzy/ssh-agent/LICENSE b/vendor/github.com/xanzy/ssh-agent/LICENSE
deleted file mode 100644
index 8f71f43fee3..00000000000
--- a/vendor/github.com/xanzy/ssh-agent/LICENSE
+++ /dev/null
@@ -1,202 +0,0 @@
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "{}"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright {yyyy} {name of copyright owner}
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-
diff --git a/vendor/github.com/xanzy/ssh-agent/README.md b/vendor/github.com/xanzy/ssh-agent/README.md
deleted file mode 100644
index e2dfcedca9b..00000000000
--- a/vendor/github.com/xanzy/ssh-agent/README.md
+++ /dev/null
@@ -1,23 +0,0 @@
-# ssh-agent
-
-Create a new [agent.Agent](https://godoc.org/golang.org/x/crypto/ssh/agent#Agent) on any type of OS (so including Windows) from any [Go](https://golang.org) application.
-
-## Limitations
-
-When compiled for Windows, it will only support [Pageant](http://the.earth.li/~sgtatham/putty/0.66/htmldoc/Chapter9.html#pageant) as the SSH authentication agent.
-
-## Credits
-
-Big thanks to [Давид Мзареулян (David Mzareulyan)](https://github.com/davidmz) for creating the [go-pageant](https://github.com/davidmz/go-pageant) package!
-
-## Issues
-
-If you have an issue: report it on the [issue tracker](https://github.com/xanzy/ssh-agent/issues)
-
-## Author
-
-Sander van Harmelen ()
-
-## License
-
-The files `pageant_windows.go` and `sshagent_windows.go` have their own license (see file headers). The rest of this package is licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
diff --git a/vendor/github.com/xanzy/ssh-agent/pageant_windows.go b/vendor/github.com/xanzy/ssh-agent/pageant_windows.go
deleted file mode 100644
index 1608e54cc2f..00000000000
--- a/vendor/github.com/xanzy/ssh-agent/pageant_windows.go
+++ /dev/null
@@ -1,149 +0,0 @@
-//
-// Copyright (c) 2014 David Mzareulyan
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy of this software
-// and associated documentation files (the "Software"), to deal in the Software without restriction,
-// including without limitation the rights to use, copy, modify, merge, publish, distribute,
-// sublicense, and/or sell copies of the Software, and to permit persons to whom the Software
-// is furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in all copies or substantial
-// portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
-// BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
-// DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-//
-
-//go:build windows
-// +build windows
-
-package sshagent
-
-// see https://github.com/Yasushi/putty/blob/master/windows/winpgntc.c#L155
-// see https://github.com/paramiko/paramiko/blob/master/paramiko/win_pageant.py
-
-import (
- "encoding/binary"
- "errors"
- "fmt"
- "sync"
- "syscall"
- "unsafe"
-
- "golang.org/x/sys/windows"
-)
-
-// Maximum size of message can be sent to pageant
-const MaxMessageLen = 8192
-
-var (
- ErrPageantNotFound = errors.New("pageant process not found")
- ErrSendMessage = errors.New("error sending message")
-
- ErrMessageTooLong = errors.New("message too long")
- ErrInvalidMessageFormat = errors.New("invalid message format")
- ErrResponseTooLong = errors.New("response too long")
-)
-
-const (
- agentCopydataID = 0x804e50ba
- wmCopydata = 74
-)
-
-type copyData struct {
- dwData uintptr
- cbData uint32
- lpData unsafe.Pointer
-}
-
-var (
- lock sync.Mutex
-
- user32dll = windows.NewLazySystemDLL("user32.dll")
- winFindWindow = winAPI(user32dll, "FindWindowW")
- winSendMessage = winAPI(user32dll, "SendMessageW")
-
- kernel32dll = windows.NewLazySystemDLL("kernel32.dll")
- winGetCurrentThreadID = winAPI(kernel32dll, "GetCurrentThreadId")
-)
-
-func winAPI(dll *windows.LazyDLL, funcName string) func(...uintptr) (uintptr, uintptr, error) {
- proc := dll.NewProc(funcName)
- return func(a ...uintptr) (uintptr, uintptr, error) { return proc.Call(a...) }
-}
-
-// Query sends message msg to Pageant and returns response or error.
-// 'msg' is raw agent request with length prefix
-// Response is raw agent response with length prefix
-func query(msg []byte) ([]byte, error) {
- if len(msg) > MaxMessageLen {
- return nil, ErrMessageTooLong
- }
-
- msgLen := binary.BigEndian.Uint32(msg[:4])
- if len(msg) != int(msgLen)+4 {
- return nil, ErrInvalidMessageFormat
- }
-
- lock.Lock()
- defer lock.Unlock()
-
- paWin := pageantWindow()
-
- if paWin == 0 {
- return nil, ErrPageantNotFound
- }
-
- thID, _, _ := winGetCurrentThreadID()
- mapName := fmt.Sprintf("PageantRequest%08x", thID)
- pMapName, _ := syscall.UTF16PtrFromString(mapName)
-
- mmap, err := syscall.CreateFileMapping(syscall.InvalidHandle, nil, syscall.PAGE_READWRITE, 0, MaxMessageLen+4, pMapName)
- if err != nil {
- return nil, err
- }
- defer syscall.CloseHandle(mmap)
-
- ptr, err := syscall.MapViewOfFile(mmap, syscall.FILE_MAP_WRITE, 0, 0, 0)
- if err != nil {
- return nil, err
- }
- defer syscall.UnmapViewOfFile(ptr)
-
- mmSlice := (*(*[MaxMessageLen]byte)(unsafe.Pointer(ptr)))[:]
-
- copy(mmSlice, msg)
-
- mapNameBytesZ := append([]byte(mapName), 0)
-
- cds := copyData{
- dwData: agentCopydataID,
- cbData: uint32(len(mapNameBytesZ)),
- lpData: unsafe.Pointer(&(mapNameBytesZ[0])),
- }
-
- resp, _, _ := winSendMessage(paWin, wmCopydata, 0, uintptr(unsafe.Pointer(&cds)))
-
- if resp == 0 {
- return nil, ErrSendMessage
- }
-
- respLen := binary.BigEndian.Uint32(mmSlice[:4])
- if respLen > MaxMessageLen-4 {
- return nil, ErrResponseTooLong
- }
-
- respData := make([]byte, respLen+4)
- copy(respData, mmSlice)
-
- return respData, nil
-}
-
-func pageantWindow() uintptr {
- nameP, _ := syscall.UTF16PtrFromString("Pageant")
- h, _, _ := winFindWindow(uintptr(unsafe.Pointer(nameP)), uintptr(unsafe.Pointer(nameP)))
- return h
-}
diff --git a/vendor/github.com/xanzy/ssh-agent/sshagent.go b/vendor/github.com/xanzy/ssh-agent/sshagent.go
deleted file mode 100644
index 4a4ee30c932..00000000000
--- a/vendor/github.com/xanzy/ssh-agent/sshagent.go
+++ /dev/null
@@ -1,50 +0,0 @@
-//
-// Copyright 2015, Sander van Harmelen
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-//go:build !windows
-// +build !windows
-
-package sshagent
-
-import (
- "errors"
- "fmt"
- "net"
- "os"
-
- "golang.org/x/crypto/ssh/agent"
-)
-
-// New returns a new agent.Agent that uses a unix socket
-func New() (agent.Agent, net.Conn, error) {
- if !Available() {
- return nil, nil, errors.New("SSH agent requested but SSH_AUTH_SOCK not-specified")
- }
-
- sshAuthSock := os.Getenv("SSH_AUTH_SOCK")
-
- conn, err := net.Dial("unix", sshAuthSock)
- if err != nil {
- return nil, nil, fmt.Errorf("Error connecting to SSH_AUTH_SOCK: %v", err)
- }
-
- return agent.NewClient(conn), conn, nil
-}
-
-// Available returns true is a auth socket is defined
-func Available() bool {
- return os.Getenv("SSH_AUTH_SOCK") != ""
-}
diff --git a/vendor/github.com/xanzy/ssh-agent/sshagent_windows.go b/vendor/github.com/xanzy/ssh-agent/sshagent_windows.go
deleted file mode 100644
index 175d1619d89..00000000000
--- a/vendor/github.com/xanzy/ssh-agent/sshagent_windows.go
+++ /dev/null
@@ -1,104 +0,0 @@
-//
-// Copyright (c) 2014 David Mzareulyan
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy of this software
-// and associated documentation files (the "Software"), to deal in the Software without restriction,
-// including without limitation the rights to use, copy, modify, merge, publish, distribute,
-// sublicense, and/or sell copies of the Software, and to permit persons to whom the Software
-// is furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in all copies or substantial
-// portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
-// BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
-// DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-//
-
-//go:build windows
-// +build windows
-
-package sshagent
-
-import (
- "errors"
- "io"
- "net"
- "sync"
-
- "github.com/Microsoft/go-winio"
- "golang.org/x/crypto/ssh/agent"
-)
-
-const (
- sshAgentPipe = `\\.\pipe\openssh-ssh-agent`
-)
-
-// Available returns true if Pageant is running
-func Available() bool {
- if pageantWindow() != 0 {
- return true
- }
- conn, err := winio.DialPipe(sshAgentPipe, nil)
- if err != nil {
- return false
- }
- conn.Close()
- return true
-}
-
-// New returns a new agent.Agent and the (custom) connection it uses
-// to communicate with a running pagent.exe instance (see README.md)
-func New() (agent.Agent, net.Conn, error) {
- if pageantWindow() != 0 {
- return agent.NewClient(&conn{}), nil, nil
- }
- conn, err := winio.DialPipe(sshAgentPipe, nil)
- if err != nil {
- return nil, nil, errors.New(
- "SSH agent requested, but could not detect Pageant or Windows native SSH agent",
- )
- }
- return agent.NewClient(conn), nil, nil
-}
-
-type conn struct {
- sync.Mutex
- buf []byte
-}
-
-func (c *conn) Close() {
- c.Lock()
- defer c.Unlock()
- c.buf = nil
-}
-
-func (c *conn) Write(p []byte) (int, error) {
- c.Lock()
- defer c.Unlock()
-
- resp, err := query(p)
- if err != nil {
- return 0, err
- }
-
- c.buf = append(c.buf, resp...)
-
- return len(p), nil
-}
-
-func (c *conn) Read(p []byte) (int, error) {
- c.Lock()
- defer c.Unlock()
-
- if len(c.buf) == 0 {
- return 0, io.EOF
- }
-
- n := copy(p, c.buf)
- c.buf = c.buf[n:]
-
- return n, nil
-}
diff --git a/vendor/github.com/zcalusic/sysinfo/cpu.go b/vendor/github.com/zcalusic/sysinfo/cpu.go
index cdd3a7c03fc..11fe17b9125 100644
--- a/vendor/github.com/zcalusic/sysinfo/cpu.go
+++ b/vendor/github.com/zcalusic/sysinfo/cpu.go
@@ -80,12 +80,6 @@ func (si *SysInfo) getCPUInfo() {
return
}
- // getNodeInfo() must have run first, to detect if we're dealing with a virtualized CPU! Detecting number of
- // physical processors and/or cores is totally unreliable in virtualized environments, so let's not do it.
- if si.Node.Hostname == "" || si.Node.Hypervisor != "" {
- return
- }
-
si.CPU.Cpus = uint(len(cpu))
si.CPU.Cores = uint(len(core))
}
diff --git a/vendor/github.com/zcalusic/sysinfo/memory.go b/vendor/github.com/zcalusic/sysinfo/memory.go
index 20735bc0440..f0529258eb9 100644
--- a/vendor/github.com/zcalusic/sysinfo/memory.go
+++ b/vendor/github.com/zcalusic/sysinfo/memory.go
@@ -7,7 +7,7 @@ package sysinfo
import (
"bytes"
"encoding/binary"
- "io/ioutil"
+ "os"
"strconv"
)
@@ -31,7 +31,7 @@ func qword(data []byte, index int) uint64 {
}
func (si *SysInfo) getMemoryInfo() {
- dmi, err := ioutil.ReadFile("/sys/firmware/dmi/tables/DMI")
+ dmi, err := os.ReadFile("/sys/firmware/dmi/tables/DMI")
if err != nil {
// Xen hypervisor
if targetKB := slurpFile("/sys/devices/system/xen_memory/xen_memory0/target_kb"); targetKB != "" {
diff --git a/vendor/github.com/zcalusic/sysinfo/network.go b/vendor/github.com/zcalusic/sysinfo/network.go
index a2565c8c626..fa24dc6da64 100644
--- a/vendor/github.com/zcalusic/sysinfo/network.go
+++ b/vendor/github.com/zcalusic/sysinfo/network.go
@@ -5,7 +5,6 @@
package sysinfo
import (
- "io/ioutil"
"os"
"path"
"strings"
@@ -112,7 +111,7 @@ func getSupported(name string) uint32 {
func (si *SysInfo) getNetworkInfo() {
sysClassNet := "/sys/class/net"
- devices, err := ioutil.ReadDir(sysClassNet)
+ devices, err := os.ReadDir(sysClassNet)
if err != nil {
return
}
diff --git a/vendor/github.com/zcalusic/sysinfo/node.go b/vendor/github.com/zcalusic/sysinfo/node.go
index 480ac0d0919..216ee3aea34 100644
--- a/vendor/github.com/zcalusic/sysinfo/node.go
+++ b/vendor/github.com/zcalusic/sysinfo/node.go
@@ -6,11 +6,8 @@ package sysinfo
import (
"bufio"
- "crypto/rand"
- "fmt"
"os"
"strings"
- "time"
)
// Node information.
@@ -58,19 +55,6 @@ func (si *SysInfo) getSetMachineID() {
si.Node.MachineID = systemdMachineID
return
}
-
- // Generate and write fresh new machine ID to both locations, conforming to the DBUS specification:
- // https://dbus.freedesktop.org/doc/dbus-specification.html#uuids
-
- random := make([]byte, 12)
- if _, err := rand.Read(random); err != nil {
- return
- }
- newMachineID := fmt.Sprintf("%x%x", random, time.Now().Unix())
-
- spewFile(pathSystemdMachineID, newMachineID, 0444)
- spewFile(pathDbusMachineID, newMachineID, 0444)
- si.Node.MachineID = newMachineID
}
func (si *SysInfo) getTimezone() {
diff --git a/vendor/github.com/zcalusic/sysinfo/storage.go b/vendor/github.com/zcalusic/sysinfo/storage.go
index 4ce10f2dec9..eaee7a7614e 100644
--- a/vendor/github.com/zcalusic/sysinfo/storage.go
+++ b/vendor/github.com/zcalusic/sysinfo/storage.go
@@ -6,7 +6,6 @@ package sysinfo
import (
"bufio"
- "io/ioutil"
"os"
"path"
"strconv"
@@ -60,7 +59,7 @@ scan:
func (si *SysInfo) getStorageInfo() {
sysBlock := "/sys/block"
- devices, err := ioutil.ReadDir(sysBlock)
+ devices, err := os.ReadDir(sysBlock)
if err != nil {
return
}
diff --git a/vendor/github.com/zcalusic/sysinfo/util.go b/vendor/github.com/zcalusic/sysinfo/util.go
index cd499a471d4..f6c39b35d42 100644
--- a/vendor/github.com/zcalusic/sysinfo/util.go
+++ b/vendor/github.com/zcalusic/sysinfo/util.go
@@ -5,14 +5,13 @@
package sysinfo
import (
- "io/ioutil"
"os"
"strings"
)
// Read one-liner text files, strip newline.
func slurpFile(path string) string {
- data, err := ioutil.ReadFile(path)
+ data, err := os.ReadFile(path)
if err != nil {
return ""
}
@@ -22,5 +21,5 @@ func slurpFile(path string) string {
// Write one-liner text files, add newline, ignore errors (best effort).
func spewFile(path string, data string, perm os.FileMode) {
- _ = ioutil.WriteFile(path, []byte(data+"\n"), perm)
+ _ = os.WriteFile(path, []byte(data+"\n"), perm)
}
diff --git a/vendor/github.com/zcalusic/sysinfo/version.go b/vendor/github.com/zcalusic/sysinfo/version.go
index e20684c9809..67a0767dbb3 100644
--- a/vendor/github.com/zcalusic/sysinfo/version.go
+++ b/vendor/github.com/zcalusic/sysinfo/version.go
@@ -5,4 +5,4 @@
package sysinfo
// Version of the sysinfo library.
-const Version = "1.0.2"
+const Version = "1.1.0"
diff --git a/vendor/golang.org/x/crypto/argon2/argon2.go b/vendor/golang.org/x/crypto/argon2/argon2.go
deleted file mode 100644
index 29f0a2de451..00000000000
--- a/vendor/golang.org/x/crypto/argon2/argon2.go
+++ /dev/null
@@ -1,283 +0,0 @@
-// Copyright 2017 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package argon2 implements the key derivation function Argon2.
-// Argon2 was selected as the winner of the Password Hashing Competition and can
-// be used to derive cryptographic keys from passwords.
-//
-// For a detailed specification of Argon2 see [1].
-//
-// If you aren't sure which function you need, use Argon2id (IDKey) and
-// the parameter recommendations for your scenario.
-//
-// # Argon2i
-//
-// Argon2i (implemented by Key) is the side-channel resistant version of Argon2.
-// It uses data-independent memory access, which is preferred for password
-// hashing and password-based key derivation. Argon2i requires more passes over
-// memory than Argon2id to protect from trade-off attacks. The recommended
-// parameters (taken from [2]) for non-interactive operations are time=3 and to
-// use the maximum available memory.
-//
-// # Argon2id
-//
-// Argon2id (implemented by IDKey) is a hybrid version of Argon2 combining
-// Argon2i and Argon2d. It uses data-independent memory access for the first
-// half of the first iteration over the memory and data-dependent memory access
-// for the rest. Argon2id is side-channel resistant and provides better brute-
-// force cost savings due to time-memory tradeoffs than Argon2i. The recommended
-// parameters for non-interactive operations (taken from [2]) are time=1 and to
-// use the maximum available memory.
-//
-// [1] https://github.com/P-H-C/phc-winner-argon2/blob/master/argon2-specs.pdf
-// [2] https://tools.ietf.org/html/draft-irtf-cfrg-argon2-03#section-9.3
-package argon2
-
-import (
- "encoding/binary"
- "sync"
-
- "golang.org/x/crypto/blake2b"
-)
-
-// The Argon2 version implemented by this package.
-const Version = 0x13
-
-const (
- argon2d = iota
- argon2i
- argon2id
-)
-
-// Key derives a key from the password, salt, and cost parameters using Argon2i
-// returning a byte slice of length keyLen that can be used as cryptographic
-// key. The CPU cost and parallelism degree must be greater than zero.
-//
-// For example, you can get a derived key for e.g. AES-256 (which needs a
-// 32-byte key) by doing:
-//
-// key := argon2.Key([]byte("some password"), salt, 3, 32*1024, 4, 32)
-//
-// The draft RFC recommends[2] time=3, and memory=32*1024 is a sensible number.
-// If using that amount of memory (32 MB) is not possible in some contexts then
-// the time parameter can be increased to compensate.
-//
-// The time parameter specifies the number of passes over the memory and the
-// memory parameter specifies the size of the memory in KiB. For example
-// memory=32*1024 sets the memory cost to ~32 MB. The number of threads can be
-// adjusted to the number of available CPUs. The cost parameters should be
-// increased as memory latency and CPU parallelism increases. Remember to get a
-// good random salt.
-func Key(password, salt []byte, time, memory uint32, threads uint8, keyLen uint32) []byte {
- return deriveKey(argon2i, password, salt, nil, nil, time, memory, threads, keyLen)
-}
-
-// IDKey derives a key from the password, salt, and cost parameters using
-// Argon2id returning a byte slice of length keyLen that can be used as
-// cryptographic key. The CPU cost and parallelism degree must be greater than
-// zero.
-//
-// For example, you can get a derived key for e.g. AES-256 (which needs a
-// 32-byte key) by doing:
-//
-// key := argon2.IDKey([]byte("some password"), salt, 1, 64*1024, 4, 32)
-//
-// The draft RFC recommends[2] time=1, and memory=64*1024 is a sensible number.
-// If using that amount of memory (64 MB) is not possible in some contexts then
-// the time parameter can be increased to compensate.
-//
-// The time parameter specifies the number of passes over the memory and the
-// memory parameter specifies the size of the memory in KiB. For example
-// memory=64*1024 sets the memory cost to ~64 MB. The number of threads can be
-// adjusted to the numbers of available CPUs. The cost parameters should be
-// increased as memory latency and CPU parallelism increases. Remember to get a
-// good random salt.
-func IDKey(password, salt []byte, time, memory uint32, threads uint8, keyLen uint32) []byte {
- return deriveKey(argon2id, password, salt, nil, nil, time, memory, threads, keyLen)
-}
-
-func deriveKey(mode int, password, salt, secret, data []byte, time, memory uint32, threads uint8, keyLen uint32) []byte {
- if time < 1 {
- panic("argon2: number of rounds too small")
- }
- if threads < 1 {
- panic("argon2: parallelism degree too low")
- }
- h0 := initHash(password, salt, secret, data, time, memory, uint32(threads), keyLen, mode)
-
- memory = memory / (syncPoints * uint32(threads)) * (syncPoints * uint32(threads))
- if memory < 2*syncPoints*uint32(threads) {
- memory = 2 * syncPoints * uint32(threads)
- }
- B := initBlocks(&h0, memory, uint32(threads))
- processBlocks(B, time, memory, uint32(threads), mode)
- return extractKey(B, memory, uint32(threads), keyLen)
-}
-
-const (
- blockLength = 128
- syncPoints = 4
-)
-
-type block [blockLength]uint64
-
-func initHash(password, salt, key, data []byte, time, memory, threads, keyLen uint32, mode int) [blake2b.Size + 8]byte {
- var (
- h0 [blake2b.Size + 8]byte
- params [24]byte
- tmp [4]byte
- )
-
- b2, _ := blake2b.New512(nil)
- binary.LittleEndian.PutUint32(params[0:4], threads)
- binary.LittleEndian.PutUint32(params[4:8], keyLen)
- binary.LittleEndian.PutUint32(params[8:12], memory)
- binary.LittleEndian.PutUint32(params[12:16], time)
- binary.LittleEndian.PutUint32(params[16:20], uint32(Version))
- binary.LittleEndian.PutUint32(params[20:24], uint32(mode))
- b2.Write(params[:])
- binary.LittleEndian.PutUint32(tmp[:], uint32(len(password)))
- b2.Write(tmp[:])
- b2.Write(password)
- binary.LittleEndian.PutUint32(tmp[:], uint32(len(salt)))
- b2.Write(tmp[:])
- b2.Write(salt)
- binary.LittleEndian.PutUint32(tmp[:], uint32(len(key)))
- b2.Write(tmp[:])
- b2.Write(key)
- binary.LittleEndian.PutUint32(tmp[:], uint32(len(data)))
- b2.Write(tmp[:])
- b2.Write(data)
- b2.Sum(h0[:0])
- return h0
-}
-
-func initBlocks(h0 *[blake2b.Size + 8]byte, memory, threads uint32) []block {
- var block0 [1024]byte
- B := make([]block, memory)
- for lane := uint32(0); lane < threads; lane++ {
- j := lane * (memory / threads)
- binary.LittleEndian.PutUint32(h0[blake2b.Size+4:], lane)
-
- binary.LittleEndian.PutUint32(h0[blake2b.Size:], 0)
- blake2bHash(block0[:], h0[:])
- for i := range B[j+0] {
- B[j+0][i] = binary.LittleEndian.Uint64(block0[i*8:])
- }
-
- binary.LittleEndian.PutUint32(h0[blake2b.Size:], 1)
- blake2bHash(block0[:], h0[:])
- for i := range B[j+1] {
- B[j+1][i] = binary.LittleEndian.Uint64(block0[i*8:])
- }
- }
- return B
-}
-
-func processBlocks(B []block, time, memory, threads uint32, mode int) {
- lanes := memory / threads
- segments := lanes / syncPoints
-
- processSegment := func(n, slice, lane uint32, wg *sync.WaitGroup) {
- var addresses, in, zero block
- if mode == argon2i || (mode == argon2id && n == 0 && slice < syncPoints/2) {
- in[0] = uint64(n)
- in[1] = uint64(lane)
- in[2] = uint64(slice)
- in[3] = uint64(memory)
- in[4] = uint64(time)
- in[5] = uint64(mode)
- }
-
- index := uint32(0)
- if n == 0 && slice == 0 {
- index = 2 // we have already generated the first two blocks
- if mode == argon2i || mode == argon2id {
- in[6]++
- processBlock(&addresses, &in, &zero)
- processBlock(&addresses, &addresses, &zero)
- }
- }
-
- offset := lane*lanes + slice*segments + index
- var random uint64
- for index < segments {
- prev := offset - 1
- if index == 0 && slice == 0 {
- prev += lanes // last block in lane
- }
- if mode == argon2i || (mode == argon2id && n == 0 && slice < syncPoints/2) {
- if index%blockLength == 0 {
- in[6]++
- processBlock(&addresses, &in, &zero)
- processBlock(&addresses, &addresses, &zero)
- }
- random = addresses[index%blockLength]
- } else {
- random = B[prev][0]
- }
- newOffset := indexAlpha(random, lanes, segments, threads, n, slice, lane, index)
- processBlockXOR(&B[offset], &B[prev], &B[newOffset])
- index, offset = index+1, offset+1
- }
- wg.Done()
- }
-
- for n := uint32(0); n < time; n++ {
- for slice := uint32(0); slice < syncPoints; slice++ {
- var wg sync.WaitGroup
- for lane := uint32(0); lane < threads; lane++ {
- wg.Add(1)
- go processSegment(n, slice, lane, &wg)
- }
- wg.Wait()
- }
- }
-
-}
-
-func extractKey(B []block, memory, threads, keyLen uint32) []byte {
- lanes := memory / threads
- for lane := uint32(0); lane < threads-1; lane++ {
- for i, v := range B[(lane*lanes)+lanes-1] {
- B[memory-1][i] ^= v
- }
- }
-
- var block [1024]byte
- for i, v := range B[memory-1] {
- binary.LittleEndian.PutUint64(block[i*8:], v)
- }
- key := make([]byte, keyLen)
- blake2bHash(key, block[:])
- return key
-}
-
-func indexAlpha(rand uint64, lanes, segments, threads, n, slice, lane, index uint32) uint32 {
- refLane := uint32(rand>>32) % threads
- if n == 0 && slice == 0 {
- refLane = lane
- }
- m, s := 3*segments, ((slice+1)%syncPoints)*segments
- if lane == refLane {
- m += index
- }
- if n == 0 {
- m, s = slice*segments, 0
- if slice == 0 || lane == refLane {
- m += index
- }
- }
- if index == 0 || lane == refLane {
- m--
- }
- return phi(rand, uint64(m), uint64(s), refLane, lanes)
-}
-
-func phi(rand, m, s uint64, lane, lanes uint32) uint32 {
- p := rand & 0xFFFFFFFF
- p = (p * p) >> 32
- p = (p * m) >> 32
- return lane*lanes + uint32((s+m-(p+1))%uint64(lanes))
-}
diff --git a/vendor/golang.org/x/crypto/argon2/blake2b.go b/vendor/golang.org/x/crypto/argon2/blake2b.go
deleted file mode 100644
index 10f46948dc1..00000000000
--- a/vendor/golang.org/x/crypto/argon2/blake2b.go
+++ /dev/null
@@ -1,53 +0,0 @@
-// Copyright 2017 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package argon2
-
-import (
- "encoding/binary"
- "hash"
-
- "golang.org/x/crypto/blake2b"
-)
-
-// blake2bHash computes an arbitrary long hash value of in
-// and writes the hash to out.
-func blake2bHash(out []byte, in []byte) {
- var b2 hash.Hash
- if n := len(out); n < blake2b.Size {
- b2, _ = blake2b.New(n, nil)
- } else {
- b2, _ = blake2b.New512(nil)
- }
-
- var buffer [blake2b.Size]byte
- binary.LittleEndian.PutUint32(buffer[:4], uint32(len(out)))
- b2.Write(buffer[:4])
- b2.Write(in)
-
- if len(out) <= blake2b.Size {
- b2.Sum(out[:0])
- return
- }
-
- outLen := len(out)
- b2.Sum(buffer[:0])
- b2.Reset()
- copy(out, buffer[:32])
- out = out[32:]
- for len(out) > blake2b.Size {
- b2.Write(buffer[:])
- b2.Sum(buffer[:0])
- copy(out, buffer[:32])
- out = out[32:]
- b2.Reset()
- }
-
- if outLen%blake2b.Size > 0 { // outLen > 64
- r := ((outLen + 31) / 32) - 2 // ⌈τ /32⌉-2
- b2, _ = blake2b.New(outLen-32*r, nil)
- }
- b2.Write(buffer[:])
- b2.Sum(out[:0])
-}
diff --git a/vendor/golang.org/x/crypto/argon2/blamka_amd64.go b/vendor/golang.org/x/crypto/argon2/blamka_amd64.go
deleted file mode 100644
index 063e7784f86..00000000000
--- a/vendor/golang.org/x/crypto/argon2/blamka_amd64.go
+++ /dev/null
@@ -1,60 +0,0 @@
-// Copyright 2017 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build amd64 && gc && !purego
-
-package argon2
-
-import "golang.org/x/sys/cpu"
-
-func init() {
- useSSE4 = cpu.X86.HasSSE41
-}
-
-//go:noescape
-func mixBlocksSSE2(out, a, b, c *block)
-
-//go:noescape
-func xorBlocksSSE2(out, a, b, c *block)
-
-//go:noescape
-func blamkaSSE4(b *block)
-
-func processBlockSSE(out, in1, in2 *block, xor bool) {
- var t block
- mixBlocksSSE2(&t, in1, in2, &t)
- if useSSE4 {
- blamkaSSE4(&t)
- } else {
- for i := 0; i < blockLength; i += 16 {
- blamkaGeneric(
- &t[i+0], &t[i+1], &t[i+2], &t[i+3],
- &t[i+4], &t[i+5], &t[i+6], &t[i+7],
- &t[i+8], &t[i+9], &t[i+10], &t[i+11],
- &t[i+12], &t[i+13], &t[i+14], &t[i+15],
- )
- }
- for i := 0; i < blockLength/8; i += 2 {
- blamkaGeneric(
- &t[i], &t[i+1], &t[16+i], &t[16+i+1],
- &t[32+i], &t[32+i+1], &t[48+i], &t[48+i+1],
- &t[64+i], &t[64+i+1], &t[80+i], &t[80+i+1],
- &t[96+i], &t[96+i+1], &t[112+i], &t[112+i+1],
- )
- }
- }
- if xor {
- xorBlocksSSE2(out, in1, in2, &t)
- } else {
- mixBlocksSSE2(out, in1, in2, &t)
- }
-}
-
-func processBlock(out, in1, in2 *block) {
- processBlockSSE(out, in1, in2, false)
-}
-
-func processBlockXOR(out, in1, in2 *block) {
- processBlockSSE(out, in1, in2, true)
-}
diff --git a/vendor/golang.org/x/crypto/argon2/blamka_amd64.s b/vendor/golang.org/x/crypto/argon2/blamka_amd64.s
deleted file mode 100644
index 6713accac09..00000000000
--- a/vendor/golang.org/x/crypto/argon2/blamka_amd64.s
+++ /dev/null
@@ -1,243 +0,0 @@
-// Copyright 2017 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build amd64 && gc && !purego
-
-#include "textflag.h"
-
-DATA ·c40<>+0x00(SB)/8, $0x0201000706050403
-DATA ·c40<>+0x08(SB)/8, $0x0a09080f0e0d0c0b
-GLOBL ·c40<>(SB), (NOPTR+RODATA), $16
-
-DATA ·c48<>+0x00(SB)/8, $0x0100070605040302
-DATA ·c48<>+0x08(SB)/8, $0x09080f0e0d0c0b0a
-GLOBL ·c48<>(SB), (NOPTR+RODATA), $16
-
-#define SHUFFLE(v2, v3, v4, v5, v6, v7, t1, t2) \
- MOVO v4, t1; \
- MOVO v5, v4; \
- MOVO t1, v5; \
- MOVO v6, t1; \
- PUNPCKLQDQ v6, t2; \
- PUNPCKHQDQ v7, v6; \
- PUNPCKHQDQ t2, v6; \
- PUNPCKLQDQ v7, t2; \
- MOVO t1, v7; \
- MOVO v2, t1; \
- PUNPCKHQDQ t2, v7; \
- PUNPCKLQDQ v3, t2; \
- PUNPCKHQDQ t2, v2; \
- PUNPCKLQDQ t1, t2; \
- PUNPCKHQDQ t2, v3
-
-#define SHUFFLE_INV(v2, v3, v4, v5, v6, v7, t1, t2) \
- MOVO v4, t1; \
- MOVO v5, v4; \
- MOVO t1, v5; \
- MOVO v2, t1; \
- PUNPCKLQDQ v2, t2; \
- PUNPCKHQDQ v3, v2; \
- PUNPCKHQDQ t2, v2; \
- PUNPCKLQDQ v3, t2; \
- MOVO t1, v3; \
- MOVO v6, t1; \
- PUNPCKHQDQ t2, v3; \
- PUNPCKLQDQ v7, t2; \
- PUNPCKHQDQ t2, v6; \
- PUNPCKLQDQ t1, t2; \
- PUNPCKHQDQ t2, v7
-
-#define HALF_ROUND(v0, v1, v2, v3, v4, v5, v6, v7, t0, c40, c48) \
- MOVO v0, t0; \
- PMULULQ v2, t0; \
- PADDQ v2, v0; \
- PADDQ t0, v0; \
- PADDQ t0, v0; \
- PXOR v0, v6; \
- PSHUFD $0xB1, v6, v6; \
- MOVO v4, t0; \
- PMULULQ v6, t0; \
- PADDQ v6, v4; \
- PADDQ t0, v4; \
- PADDQ t0, v4; \
- PXOR v4, v2; \
- PSHUFB c40, v2; \
- MOVO v0, t0; \
- PMULULQ v2, t0; \
- PADDQ v2, v0; \
- PADDQ t0, v0; \
- PADDQ t0, v0; \
- PXOR v0, v6; \
- PSHUFB c48, v6; \
- MOVO v4, t0; \
- PMULULQ v6, t0; \
- PADDQ v6, v4; \
- PADDQ t0, v4; \
- PADDQ t0, v4; \
- PXOR v4, v2; \
- MOVO v2, t0; \
- PADDQ v2, t0; \
- PSRLQ $63, v2; \
- PXOR t0, v2; \
- MOVO v1, t0; \
- PMULULQ v3, t0; \
- PADDQ v3, v1; \
- PADDQ t0, v1; \
- PADDQ t0, v1; \
- PXOR v1, v7; \
- PSHUFD $0xB1, v7, v7; \
- MOVO v5, t0; \
- PMULULQ v7, t0; \
- PADDQ v7, v5; \
- PADDQ t0, v5; \
- PADDQ t0, v5; \
- PXOR v5, v3; \
- PSHUFB c40, v3; \
- MOVO v1, t0; \
- PMULULQ v3, t0; \
- PADDQ v3, v1; \
- PADDQ t0, v1; \
- PADDQ t0, v1; \
- PXOR v1, v7; \
- PSHUFB c48, v7; \
- MOVO v5, t0; \
- PMULULQ v7, t0; \
- PADDQ v7, v5; \
- PADDQ t0, v5; \
- PADDQ t0, v5; \
- PXOR v5, v3; \
- MOVO v3, t0; \
- PADDQ v3, t0; \
- PSRLQ $63, v3; \
- PXOR t0, v3
-
-#define LOAD_MSG_0(block, off) \
- MOVOU 8*(off+0)(block), X0; \
- MOVOU 8*(off+2)(block), X1; \
- MOVOU 8*(off+4)(block), X2; \
- MOVOU 8*(off+6)(block), X3; \
- MOVOU 8*(off+8)(block), X4; \
- MOVOU 8*(off+10)(block), X5; \
- MOVOU 8*(off+12)(block), X6; \
- MOVOU 8*(off+14)(block), X7
-
-#define STORE_MSG_0(block, off) \
- MOVOU X0, 8*(off+0)(block); \
- MOVOU X1, 8*(off+2)(block); \
- MOVOU X2, 8*(off+4)(block); \
- MOVOU X3, 8*(off+6)(block); \
- MOVOU X4, 8*(off+8)(block); \
- MOVOU X5, 8*(off+10)(block); \
- MOVOU X6, 8*(off+12)(block); \
- MOVOU X7, 8*(off+14)(block)
-
-#define LOAD_MSG_1(block, off) \
- MOVOU 8*off+0*8(block), X0; \
- MOVOU 8*off+16*8(block), X1; \
- MOVOU 8*off+32*8(block), X2; \
- MOVOU 8*off+48*8(block), X3; \
- MOVOU 8*off+64*8(block), X4; \
- MOVOU 8*off+80*8(block), X5; \
- MOVOU 8*off+96*8(block), X6; \
- MOVOU 8*off+112*8(block), X7
-
-#define STORE_MSG_1(block, off) \
- MOVOU X0, 8*off+0*8(block); \
- MOVOU X1, 8*off+16*8(block); \
- MOVOU X2, 8*off+32*8(block); \
- MOVOU X3, 8*off+48*8(block); \
- MOVOU X4, 8*off+64*8(block); \
- MOVOU X5, 8*off+80*8(block); \
- MOVOU X6, 8*off+96*8(block); \
- MOVOU X7, 8*off+112*8(block)
-
-#define BLAMKA_ROUND_0(block, off, t0, t1, c40, c48) \
- LOAD_MSG_0(block, off); \
- HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, t0, c40, c48); \
- SHUFFLE(X2, X3, X4, X5, X6, X7, t0, t1); \
- HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, t0, c40, c48); \
- SHUFFLE_INV(X2, X3, X4, X5, X6, X7, t0, t1); \
- STORE_MSG_0(block, off)
-
-#define BLAMKA_ROUND_1(block, off, t0, t1, c40, c48) \
- LOAD_MSG_1(block, off); \
- HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, t0, c40, c48); \
- SHUFFLE(X2, X3, X4, X5, X6, X7, t0, t1); \
- HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, t0, c40, c48); \
- SHUFFLE_INV(X2, X3, X4, X5, X6, X7, t0, t1); \
- STORE_MSG_1(block, off)
-
-// func blamkaSSE4(b *block)
-TEXT ·blamkaSSE4(SB), 4, $0-8
- MOVQ b+0(FP), AX
-
- MOVOU ·c40<>(SB), X10
- MOVOU ·c48<>(SB), X11
-
- BLAMKA_ROUND_0(AX, 0, X8, X9, X10, X11)
- BLAMKA_ROUND_0(AX, 16, X8, X9, X10, X11)
- BLAMKA_ROUND_0(AX, 32, X8, X9, X10, X11)
- BLAMKA_ROUND_0(AX, 48, X8, X9, X10, X11)
- BLAMKA_ROUND_0(AX, 64, X8, X9, X10, X11)
- BLAMKA_ROUND_0(AX, 80, X8, X9, X10, X11)
- BLAMKA_ROUND_0(AX, 96, X8, X9, X10, X11)
- BLAMKA_ROUND_0(AX, 112, X8, X9, X10, X11)
-
- BLAMKA_ROUND_1(AX, 0, X8, X9, X10, X11)
- BLAMKA_ROUND_1(AX, 2, X8, X9, X10, X11)
- BLAMKA_ROUND_1(AX, 4, X8, X9, X10, X11)
- BLAMKA_ROUND_1(AX, 6, X8, X9, X10, X11)
- BLAMKA_ROUND_1(AX, 8, X8, X9, X10, X11)
- BLAMKA_ROUND_1(AX, 10, X8, X9, X10, X11)
- BLAMKA_ROUND_1(AX, 12, X8, X9, X10, X11)
- BLAMKA_ROUND_1(AX, 14, X8, X9, X10, X11)
- RET
-
-// func mixBlocksSSE2(out, a, b, c *block)
-TEXT ·mixBlocksSSE2(SB), 4, $0-32
- MOVQ out+0(FP), DX
- MOVQ a+8(FP), AX
- MOVQ b+16(FP), BX
- MOVQ c+24(FP), CX
- MOVQ $128, DI
-
-loop:
- MOVOU 0(AX), X0
- MOVOU 0(BX), X1
- MOVOU 0(CX), X2
- PXOR X1, X0
- PXOR X2, X0
- MOVOU X0, 0(DX)
- ADDQ $16, AX
- ADDQ $16, BX
- ADDQ $16, CX
- ADDQ $16, DX
- SUBQ $2, DI
- JA loop
- RET
-
-// func xorBlocksSSE2(out, a, b, c *block)
-TEXT ·xorBlocksSSE2(SB), 4, $0-32
- MOVQ out+0(FP), DX
- MOVQ a+8(FP), AX
- MOVQ b+16(FP), BX
- MOVQ c+24(FP), CX
- MOVQ $128, DI
-
-loop:
- MOVOU 0(AX), X0
- MOVOU 0(BX), X1
- MOVOU 0(CX), X2
- MOVOU 0(DX), X3
- PXOR X1, X0
- PXOR X2, X0
- PXOR X3, X0
- MOVOU X0, 0(DX)
- ADDQ $16, AX
- ADDQ $16, BX
- ADDQ $16, CX
- ADDQ $16, DX
- SUBQ $2, DI
- JA loop
- RET
diff --git a/vendor/golang.org/x/crypto/argon2/blamka_generic.go b/vendor/golang.org/x/crypto/argon2/blamka_generic.go
deleted file mode 100644
index a481b2243f8..00000000000
--- a/vendor/golang.org/x/crypto/argon2/blamka_generic.go
+++ /dev/null
@@ -1,163 +0,0 @@
-// Copyright 2017 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package argon2
-
-var useSSE4 bool
-
-func processBlockGeneric(out, in1, in2 *block, xor bool) {
- var t block
- for i := range t {
- t[i] = in1[i] ^ in2[i]
- }
- for i := 0; i < blockLength; i += 16 {
- blamkaGeneric(
- &t[i+0], &t[i+1], &t[i+2], &t[i+3],
- &t[i+4], &t[i+5], &t[i+6], &t[i+7],
- &t[i+8], &t[i+9], &t[i+10], &t[i+11],
- &t[i+12], &t[i+13], &t[i+14], &t[i+15],
- )
- }
- for i := 0; i < blockLength/8; i += 2 {
- blamkaGeneric(
- &t[i], &t[i+1], &t[16+i], &t[16+i+1],
- &t[32+i], &t[32+i+1], &t[48+i], &t[48+i+1],
- &t[64+i], &t[64+i+1], &t[80+i], &t[80+i+1],
- &t[96+i], &t[96+i+1], &t[112+i], &t[112+i+1],
- )
- }
- if xor {
- for i := range t {
- out[i] ^= in1[i] ^ in2[i] ^ t[i]
- }
- } else {
- for i := range t {
- out[i] = in1[i] ^ in2[i] ^ t[i]
- }
- }
-}
-
-func blamkaGeneric(t00, t01, t02, t03, t04, t05, t06, t07, t08, t09, t10, t11, t12, t13, t14, t15 *uint64) {
- v00, v01, v02, v03 := *t00, *t01, *t02, *t03
- v04, v05, v06, v07 := *t04, *t05, *t06, *t07
- v08, v09, v10, v11 := *t08, *t09, *t10, *t11
- v12, v13, v14, v15 := *t12, *t13, *t14, *t15
-
- v00 += v04 + 2*uint64(uint32(v00))*uint64(uint32(v04))
- v12 ^= v00
- v12 = v12>>32 | v12<<32
- v08 += v12 + 2*uint64(uint32(v08))*uint64(uint32(v12))
- v04 ^= v08
- v04 = v04>>24 | v04<<40
-
- v00 += v04 + 2*uint64(uint32(v00))*uint64(uint32(v04))
- v12 ^= v00
- v12 = v12>>16 | v12<<48
- v08 += v12 + 2*uint64(uint32(v08))*uint64(uint32(v12))
- v04 ^= v08
- v04 = v04>>63 | v04<<1
-
- v01 += v05 + 2*uint64(uint32(v01))*uint64(uint32(v05))
- v13 ^= v01
- v13 = v13>>32 | v13<<32
- v09 += v13 + 2*uint64(uint32(v09))*uint64(uint32(v13))
- v05 ^= v09
- v05 = v05>>24 | v05<<40
-
- v01 += v05 + 2*uint64(uint32(v01))*uint64(uint32(v05))
- v13 ^= v01
- v13 = v13>>16 | v13<<48
- v09 += v13 + 2*uint64(uint32(v09))*uint64(uint32(v13))
- v05 ^= v09
- v05 = v05>>63 | v05<<1
-
- v02 += v06 + 2*uint64(uint32(v02))*uint64(uint32(v06))
- v14 ^= v02
- v14 = v14>>32 | v14<<32
- v10 += v14 + 2*uint64(uint32(v10))*uint64(uint32(v14))
- v06 ^= v10
- v06 = v06>>24 | v06<<40
-
- v02 += v06 + 2*uint64(uint32(v02))*uint64(uint32(v06))
- v14 ^= v02
- v14 = v14>>16 | v14<<48
- v10 += v14 + 2*uint64(uint32(v10))*uint64(uint32(v14))
- v06 ^= v10
- v06 = v06>>63 | v06<<1
-
- v03 += v07 + 2*uint64(uint32(v03))*uint64(uint32(v07))
- v15 ^= v03
- v15 = v15>>32 | v15<<32
- v11 += v15 + 2*uint64(uint32(v11))*uint64(uint32(v15))
- v07 ^= v11
- v07 = v07>>24 | v07<<40
-
- v03 += v07 + 2*uint64(uint32(v03))*uint64(uint32(v07))
- v15 ^= v03
- v15 = v15>>16 | v15<<48
- v11 += v15 + 2*uint64(uint32(v11))*uint64(uint32(v15))
- v07 ^= v11
- v07 = v07>>63 | v07<<1
-
- v00 += v05 + 2*uint64(uint32(v00))*uint64(uint32(v05))
- v15 ^= v00
- v15 = v15>>32 | v15<<32
- v10 += v15 + 2*uint64(uint32(v10))*uint64(uint32(v15))
- v05 ^= v10
- v05 = v05>>24 | v05<<40
-
- v00 += v05 + 2*uint64(uint32(v00))*uint64(uint32(v05))
- v15 ^= v00
- v15 = v15>>16 | v15<<48
- v10 += v15 + 2*uint64(uint32(v10))*uint64(uint32(v15))
- v05 ^= v10
- v05 = v05>>63 | v05<<1
-
- v01 += v06 + 2*uint64(uint32(v01))*uint64(uint32(v06))
- v12 ^= v01
- v12 = v12>>32 | v12<<32
- v11 += v12 + 2*uint64(uint32(v11))*uint64(uint32(v12))
- v06 ^= v11
- v06 = v06>>24 | v06<<40
-
- v01 += v06 + 2*uint64(uint32(v01))*uint64(uint32(v06))
- v12 ^= v01
- v12 = v12>>16 | v12<<48
- v11 += v12 + 2*uint64(uint32(v11))*uint64(uint32(v12))
- v06 ^= v11
- v06 = v06>>63 | v06<<1
-
- v02 += v07 + 2*uint64(uint32(v02))*uint64(uint32(v07))
- v13 ^= v02
- v13 = v13>>32 | v13<<32
- v08 += v13 + 2*uint64(uint32(v08))*uint64(uint32(v13))
- v07 ^= v08
- v07 = v07>>24 | v07<<40
-
- v02 += v07 + 2*uint64(uint32(v02))*uint64(uint32(v07))
- v13 ^= v02
- v13 = v13>>16 | v13<<48
- v08 += v13 + 2*uint64(uint32(v08))*uint64(uint32(v13))
- v07 ^= v08
- v07 = v07>>63 | v07<<1
-
- v03 += v04 + 2*uint64(uint32(v03))*uint64(uint32(v04))
- v14 ^= v03
- v14 = v14>>32 | v14<<32
- v09 += v14 + 2*uint64(uint32(v09))*uint64(uint32(v14))
- v04 ^= v09
- v04 = v04>>24 | v04<<40
-
- v03 += v04 + 2*uint64(uint32(v03))*uint64(uint32(v04))
- v14 ^= v03
- v14 = v14>>16 | v14<<48
- v09 += v14 + 2*uint64(uint32(v09))*uint64(uint32(v14))
- v04 ^= v09
- v04 = v04>>63 | v04<<1
-
- *t00, *t01, *t02, *t03 = v00, v01, v02, v03
- *t04, *t05, *t06, *t07 = v04, v05, v06, v07
- *t08, *t09, *t10, *t11 = v08, v09, v10, v11
- *t12, *t13, *t14, *t15 = v12, v13, v14, v15
-}
diff --git a/vendor/golang.org/x/crypto/argon2/blamka_ref.go b/vendor/golang.org/x/crypto/argon2/blamka_ref.go
deleted file mode 100644
index 16d58c650eb..00000000000
--- a/vendor/golang.org/x/crypto/argon2/blamka_ref.go
+++ /dev/null
@@ -1,15 +0,0 @@
-// Copyright 2017 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build !amd64 || purego || !gc
-
-package argon2
-
-func processBlock(out, in1, in2 *block) {
- processBlockGeneric(out, in1, in2, false)
-}
-
-func processBlockXOR(out, in1, in2 *block) {
- processBlockGeneric(out, in1, in2, true)
-}
diff --git a/vendor/golang.org/x/crypto/blake2b/blake2b.go b/vendor/golang.org/x/crypto/blake2b/blake2b.go
deleted file mode 100644
index d2e98d4295b..00000000000
--- a/vendor/golang.org/x/crypto/blake2b/blake2b.go
+++ /dev/null
@@ -1,291 +0,0 @@
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package blake2b implements the BLAKE2b hash algorithm defined by RFC 7693
-// and the extendable output function (XOF) BLAKE2Xb.
-//
-// BLAKE2b is optimized for 64-bit platforms—including NEON-enabled ARMs—and
-// produces digests of any size between 1 and 64 bytes.
-// For a detailed specification of BLAKE2b see https://blake2.net/blake2.pdf
-// and for BLAKE2Xb see https://blake2.net/blake2x.pdf
-//
-// If you aren't sure which function you need, use BLAKE2b (Sum512 or New512).
-// If you need a secret-key MAC (message authentication code), use the New512
-// function with a non-nil key.
-//
-// BLAKE2X is a construction to compute hash values larger than 64 bytes. It
-// can produce hash values between 0 and 4 GiB.
-package blake2b
-
-import (
- "encoding/binary"
- "errors"
- "hash"
-)
-
-const (
- // The blocksize of BLAKE2b in bytes.
- BlockSize = 128
- // The hash size of BLAKE2b-512 in bytes.
- Size = 64
- // The hash size of BLAKE2b-384 in bytes.
- Size384 = 48
- // The hash size of BLAKE2b-256 in bytes.
- Size256 = 32
-)
-
-var (
- useAVX2 bool
- useAVX bool
- useSSE4 bool
-)
-
-var (
- errKeySize = errors.New("blake2b: invalid key size")
- errHashSize = errors.New("blake2b: invalid hash size")
-)
-
-var iv = [8]uint64{
- 0x6a09e667f3bcc908, 0xbb67ae8584caa73b, 0x3c6ef372fe94f82b, 0xa54ff53a5f1d36f1,
- 0x510e527fade682d1, 0x9b05688c2b3e6c1f, 0x1f83d9abfb41bd6b, 0x5be0cd19137e2179,
-}
-
-// Sum512 returns the BLAKE2b-512 checksum of the data.
-func Sum512(data []byte) [Size]byte {
- var sum [Size]byte
- checkSum(&sum, Size, data)
- return sum
-}
-
-// Sum384 returns the BLAKE2b-384 checksum of the data.
-func Sum384(data []byte) [Size384]byte {
- var sum [Size]byte
- var sum384 [Size384]byte
- checkSum(&sum, Size384, data)
- copy(sum384[:], sum[:Size384])
- return sum384
-}
-
-// Sum256 returns the BLAKE2b-256 checksum of the data.
-func Sum256(data []byte) [Size256]byte {
- var sum [Size]byte
- var sum256 [Size256]byte
- checkSum(&sum, Size256, data)
- copy(sum256[:], sum[:Size256])
- return sum256
-}
-
-// New512 returns a new hash.Hash computing the BLAKE2b-512 checksum. A non-nil
-// key turns the hash into a MAC. The key must be between zero and 64 bytes long.
-func New512(key []byte) (hash.Hash, error) { return newDigest(Size, key) }
-
-// New384 returns a new hash.Hash computing the BLAKE2b-384 checksum. A non-nil
-// key turns the hash into a MAC. The key must be between zero and 64 bytes long.
-func New384(key []byte) (hash.Hash, error) { return newDigest(Size384, key) }
-
-// New256 returns a new hash.Hash computing the BLAKE2b-256 checksum. A non-nil
-// key turns the hash into a MAC. The key must be between zero and 64 bytes long.
-func New256(key []byte) (hash.Hash, error) { return newDigest(Size256, key) }
-
-// New returns a new hash.Hash computing the BLAKE2b checksum with a custom length.
-// A non-nil key turns the hash into a MAC. The key must be between zero and 64 bytes long.
-// The hash size can be a value between 1 and 64 but it is highly recommended to use
-// values equal or greater than:
-// - 32 if BLAKE2b is used as a hash function (The key is zero bytes long).
-// - 16 if BLAKE2b is used as a MAC function (The key is at least 16 bytes long).
-// When the key is nil, the returned hash.Hash implements BinaryMarshaler
-// and BinaryUnmarshaler for state (de)serialization as documented by hash.Hash.
-func New(size int, key []byte) (hash.Hash, error) { return newDigest(size, key) }
-
-func newDigest(hashSize int, key []byte) (*digest, error) {
- if hashSize < 1 || hashSize > Size {
- return nil, errHashSize
- }
- if len(key) > Size {
- return nil, errKeySize
- }
- d := &digest{
- size: hashSize,
- keyLen: len(key),
- }
- copy(d.key[:], key)
- d.Reset()
- return d, nil
-}
-
-func checkSum(sum *[Size]byte, hashSize int, data []byte) {
- h := iv
- h[0] ^= uint64(hashSize) | (1 << 16) | (1 << 24)
- var c [2]uint64
-
- if length := len(data); length > BlockSize {
- n := length &^ (BlockSize - 1)
- if length == n {
- n -= BlockSize
- }
- hashBlocks(&h, &c, 0, data[:n])
- data = data[n:]
- }
-
- var block [BlockSize]byte
- offset := copy(block[:], data)
- remaining := uint64(BlockSize - offset)
- if c[0] < remaining {
- c[1]--
- }
- c[0] -= remaining
-
- hashBlocks(&h, &c, 0xFFFFFFFFFFFFFFFF, block[:])
-
- for i, v := range h[:(hashSize+7)/8] {
- binary.LittleEndian.PutUint64(sum[8*i:], v)
- }
-}
-
-type digest struct {
- h [8]uint64
- c [2]uint64
- size int
- block [BlockSize]byte
- offset int
-
- key [BlockSize]byte
- keyLen int
-}
-
-const (
- magic = "b2b"
- marshaledSize = len(magic) + 8*8 + 2*8 + 1 + BlockSize + 1
-)
-
-func (d *digest) MarshalBinary() ([]byte, error) {
- if d.keyLen != 0 {
- return nil, errors.New("crypto/blake2b: cannot marshal MACs")
- }
- b := make([]byte, 0, marshaledSize)
- b = append(b, magic...)
- for i := 0; i < 8; i++ {
- b = appendUint64(b, d.h[i])
- }
- b = appendUint64(b, d.c[0])
- b = appendUint64(b, d.c[1])
- // Maximum value for size is 64
- b = append(b, byte(d.size))
- b = append(b, d.block[:]...)
- b = append(b, byte(d.offset))
- return b, nil
-}
-
-func (d *digest) UnmarshalBinary(b []byte) error {
- if len(b) < len(magic) || string(b[:len(magic)]) != magic {
- return errors.New("crypto/blake2b: invalid hash state identifier")
- }
- if len(b) != marshaledSize {
- return errors.New("crypto/blake2b: invalid hash state size")
- }
- b = b[len(magic):]
- for i := 0; i < 8; i++ {
- b, d.h[i] = consumeUint64(b)
- }
- b, d.c[0] = consumeUint64(b)
- b, d.c[1] = consumeUint64(b)
- d.size = int(b[0])
- b = b[1:]
- copy(d.block[:], b[:BlockSize])
- b = b[BlockSize:]
- d.offset = int(b[0])
- return nil
-}
-
-func (d *digest) BlockSize() int { return BlockSize }
-
-func (d *digest) Size() int { return d.size }
-
-func (d *digest) Reset() {
- d.h = iv
- d.h[0] ^= uint64(d.size) | (uint64(d.keyLen) << 8) | (1 << 16) | (1 << 24)
- d.offset, d.c[0], d.c[1] = 0, 0, 0
- if d.keyLen > 0 {
- d.block = d.key
- d.offset = BlockSize
- }
-}
-
-func (d *digest) Write(p []byte) (n int, err error) {
- n = len(p)
-
- if d.offset > 0 {
- remaining := BlockSize - d.offset
- if n <= remaining {
- d.offset += copy(d.block[d.offset:], p)
- return
- }
- copy(d.block[d.offset:], p[:remaining])
- hashBlocks(&d.h, &d.c, 0, d.block[:])
- d.offset = 0
- p = p[remaining:]
- }
-
- if length := len(p); length > BlockSize {
- nn := length &^ (BlockSize - 1)
- if length == nn {
- nn -= BlockSize
- }
- hashBlocks(&d.h, &d.c, 0, p[:nn])
- p = p[nn:]
- }
-
- if len(p) > 0 {
- d.offset += copy(d.block[:], p)
- }
-
- return
-}
-
-func (d *digest) Sum(sum []byte) []byte {
- var hash [Size]byte
- d.finalize(&hash)
- return append(sum, hash[:d.size]...)
-}
-
-func (d *digest) finalize(hash *[Size]byte) {
- var block [BlockSize]byte
- copy(block[:], d.block[:d.offset])
- remaining := uint64(BlockSize - d.offset)
-
- c := d.c
- if c[0] < remaining {
- c[1]--
- }
- c[0] -= remaining
-
- h := d.h
- hashBlocks(&h, &c, 0xFFFFFFFFFFFFFFFF, block[:])
-
- for i, v := range h {
- binary.LittleEndian.PutUint64(hash[8*i:], v)
- }
-}
-
-func appendUint64(b []byte, x uint64) []byte {
- var a [8]byte
- binary.BigEndian.PutUint64(a[:], x)
- return append(b, a[:]...)
-}
-
-func appendUint32(b []byte, x uint32) []byte {
- var a [4]byte
- binary.BigEndian.PutUint32(a[:], x)
- return append(b, a[:]...)
-}
-
-func consumeUint64(b []byte) ([]byte, uint64) {
- x := binary.BigEndian.Uint64(b)
- return b[8:], x
-}
-
-func consumeUint32(b []byte) ([]byte, uint32) {
- x := binary.BigEndian.Uint32(b)
- return b[4:], x
-}
diff --git a/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.go b/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.go
deleted file mode 100644
index 199c21d27aa..00000000000
--- a/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.go
+++ /dev/null
@@ -1,37 +0,0 @@
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build amd64 && gc && !purego
-
-package blake2b
-
-import "golang.org/x/sys/cpu"
-
-func init() {
- useAVX2 = cpu.X86.HasAVX2
- useAVX = cpu.X86.HasAVX
- useSSE4 = cpu.X86.HasSSE41
-}
-
-//go:noescape
-func hashBlocksAVX2(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte)
-
-//go:noescape
-func hashBlocksAVX(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte)
-
-//go:noescape
-func hashBlocksSSE4(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte)
-
-func hashBlocks(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) {
- switch {
- case useAVX2:
- hashBlocksAVX2(h, c, flag, blocks)
- case useAVX:
- hashBlocksAVX(h, c, flag, blocks)
- case useSSE4:
- hashBlocksSSE4(h, c, flag, blocks)
- default:
- hashBlocksGeneric(h, c, flag, blocks)
- }
-}
diff --git a/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.s b/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.s
deleted file mode 100644
index 9ae8206c201..00000000000
--- a/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.s
+++ /dev/null
@@ -1,744 +0,0 @@
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build amd64 && gc && !purego
-
-#include "textflag.h"
-
-DATA ·AVX2_iv0<>+0x00(SB)/8, $0x6a09e667f3bcc908
-DATA ·AVX2_iv0<>+0x08(SB)/8, $0xbb67ae8584caa73b
-DATA ·AVX2_iv0<>+0x10(SB)/8, $0x3c6ef372fe94f82b
-DATA ·AVX2_iv0<>+0x18(SB)/8, $0xa54ff53a5f1d36f1
-GLOBL ·AVX2_iv0<>(SB), (NOPTR+RODATA), $32
-
-DATA ·AVX2_iv1<>+0x00(SB)/8, $0x510e527fade682d1
-DATA ·AVX2_iv1<>+0x08(SB)/8, $0x9b05688c2b3e6c1f
-DATA ·AVX2_iv1<>+0x10(SB)/8, $0x1f83d9abfb41bd6b
-DATA ·AVX2_iv1<>+0x18(SB)/8, $0x5be0cd19137e2179
-GLOBL ·AVX2_iv1<>(SB), (NOPTR+RODATA), $32
-
-DATA ·AVX2_c40<>+0x00(SB)/8, $0x0201000706050403
-DATA ·AVX2_c40<>+0x08(SB)/8, $0x0a09080f0e0d0c0b
-DATA ·AVX2_c40<>+0x10(SB)/8, $0x0201000706050403
-DATA ·AVX2_c40<>+0x18(SB)/8, $0x0a09080f0e0d0c0b
-GLOBL ·AVX2_c40<>(SB), (NOPTR+RODATA), $32
-
-DATA ·AVX2_c48<>+0x00(SB)/8, $0x0100070605040302
-DATA ·AVX2_c48<>+0x08(SB)/8, $0x09080f0e0d0c0b0a
-DATA ·AVX2_c48<>+0x10(SB)/8, $0x0100070605040302
-DATA ·AVX2_c48<>+0x18(SB)/8, $0x09080f0e0d0c0b0a
-GLOBL ·AVX2_c48<>(SB), (NOPTR+RODATA), $32
-
-DATA ·AVX_iv0<>+0x00(SB)/8, $0x6a09e667f3bcc908
-DATA ·AVX_iv0<>+0x08(SB)/8, $0xbb67ae8584caa73b
-GLOBL ·AVX_iv0<>(SB), (NOPTR+RODATA), $16
-
-DATA ·AVX_iv1<>+0x00(SB)/8, $0x3c6ef372fe94f82b
-DATA ·AVX_iv1<>+0x08(SB)/8, $0xa54ff53a5f1d36f1
-GLOBL ·AVX_iv1<>(SB), (NOPTR+RODATA), $16
-
-DATA ·AVX_iv2<>+0x00(SB)/8, $0x510e527fade682d1
-DATA ·AVX_iv2<>+0x08(SB)/8, $0x9b05688c2b3e6c1f
-GLOBL ·AVX_iv2<>(SB), (NOPTR+RODATA), $16
-
-DATA ·AVX_iv3<>+0x00(SB)/8, $0x1f83d9abfb41bd6b
-DATA ·AVX_iv3<>+0x08(SB)/8, $0x5be0cd19137e2179
-GLOBL ·AVX_iv3<>(SB), (NOPTR+RODATA), $16
-
-DATA ·AVX_c40<>+0x00(SB)/8, $0x0201000706050403
-DATA ·AVX_c40<>+0x08(SB)/8, $0x0a09080f0e0d0c0b
-GLOBL ·AVX_c40<>(SB), (NOPTR+RODATA), $16
-
-DATA ·AVX_c48<>+0x00(SB)/8, $0x0100070605040302
-DATA ·AVX_c48<>+0x08(SB)/8, $0x09080f0e0d0c0b0a
-GLOBL ·AVX_c48<>(SB), (NOPTR+RODATA), $16
-
-#define VPERMQ_0x39_Y1_Y1 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xc9; BYTE $0x39
-#define VPERMQ_0x93_Y1_Y1 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xc9; BYTE $0x93
-#define VPERMQ_0x4E_Y2_Y2 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xd2; BYTE $0x4e
-#define VPERMQ_0x93_Y3_Y3 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xdb; BYTE $0x93
-#define VPERMQ_0x39_Y3_Y3 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xdb; BYTE $0x39
-
-#define ROUND_AVX2(m0, m1, m2, m3, t, c40, c48) \
- VPADDQ m0, Y0, Y0; \
- VPADDQ Y1, Y0, Y0; \
- VPXOR Y0, Y3, Y3; \
- VPSHUFD $-79, Y3, Y3; \
- VPADDQ Y3, Y2, Y2; \
- VPXOR Y2, Y1, Y1; \
- VPSHUFB c40, Y1, Y1; \
- VPADDQ m1, Y0, Y0; \
- VPADDQ Y1, Y0, Y0; \
- VPXOR Y0, Y3, Y3; \
- VPSHUFB c48, Y3, Y3; \
- VPADDQ Y3, Y2, Y2; \
- VPXOR Y2, Y1, Y1; \
- VPADDQ Y1, Y1, t; \
- VPSRLQ $63, Y1, Y1; \
- VPXOR t, Y1, Y1; \
- VPERMQ_0x39_Y1_Y1; \
- VPERMQ_0x4E_Y2_Y2; \
- VPERMQ_0x93_Y3_Y3; \
- VPADDQ m2, Y0, Y0; \
- VPADDQ Y1, Y0, Y0; \
- VPXOR Y0, Y3, Y3; \
- VPSHUFD $-79, Y3, Y3; \
- VPADDQ Y3, Y2, Y2; \
- VPXOR Y2, Y1, Y1; \
- VPSHUFB c40, Y1, Y1; \
- VPADDQ m3, Y0, Y0; \
- VPADDQ Y1, Y0, Y0; \
- VPXOR Y0, Y3, Y3; \
- VPSHUFB c48, Y3, Y3; \
- VPADDQ Y3, Y2, Y2; \
- VPXOR Y2, Y1, Y1; \
- VPADDQ Y1, Y1, t; \
- VPSRLQ $63, Y1, Y1; \
- VPXOR t, Y1, Y1; \
- VPERMQ_0x39_Y3_Y3; \
- VPERMQ_0x4E_Y2_Y2; \
- VPERMQ_0x93_Y1_Y1
-
-#define VMOVQ_SI_X11_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x1E
-#define VMOVQ_SI_X12_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x26
-#define VMOVQ_SI_X13_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x2E
-#define VMOVQ_SI_X14_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x36
-#define VMOVQ_SI_X15_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x3E
-
-#define VMOVQ_SI_X11(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x5E; BYTE $n
-#define VMOVQ_SI_X12(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x66; BYTE $n
-#define VMOVQ_SI_X13(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x6E; BYTE $n
-#define VMOVQ_SI_X14(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x76; BYTE $n
-#define VMOVQ_SI_X15(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x7E; BYTE $n
-
-#define VPINSRQ_1_SI_X11_0 BYTE $0xC4; BYTE $0x63; BYTE $0xA1; BYTE $0x22; BYTE $0x1E; BYTE $0x01
-#define VPINSRQ_1_SI_X12_0 BYTE $0xC4; BYTE $0x63; BYTE $0x99; BYTE $0x22; BYTE $0x26; BYTE $0x01
-#define VPINSRQ_1_SI_X13_0 BYTE $0xC4; BYTE $0x63; BYTE $0x91; BYTE $0x22; BYTE $0x2E; BYTE $0x01
-#define VPINSRQ_1_SI_X14_0 BYTE $0xC4; BYTE $0x63; BYTE $0x89; BYTE $0x22; BYTE $0x36; BYTE $0x01
-#define VPINSRQ_1_SI_X15_0 BYTE $0xC4; BYTE $0x63; BYTE $0x81; BYTE $0x22; BYTE $0x3E; BYTE $0x01
-
-#define VPINSRQ_1_SI_X11(n) BYTE $0xC4; BYTE $0x63; BYTE $0xA1; BYTE $0x22; BYTE $0x5E; BYTE $n; BYTE $0x01
-#define VPINSRQ_1_SI_X12(n) BYTE $0xC4; BYTE $0x63; BYTE $0x99; BYTE $0x22; BYTE $0x66; BYTE $n; BYTE $0x01
-#define VPINSRQ_1_SI_X13(n) BYTE $0xC4; BYTE $0x63; BYTE $0x91; BYTE $0x22; BYTE $0x6E; BYTE $n; BYTE $0x01
-#define VPINSRQ_1_SI_X14(n) BYTE $0xC4; BYTE $0x63; BYTE $0x89; BYTE $0x22; BYTE $0x76; BYTE $n; BYTE $0x01
-#define VPINSRQ_1_SI_X15(n) BYTE $0xC4; BYTE $0x63; BYTE $0x81; BYTE $0x22; BYTE $0x7E; BYTE $n; BYTE $0x01
-
-#define VMOVQ_R8_X15 BYTE $0xC4; BYTE $0x41; BYTE $0xF9; BYTE $0x6E; BYTE $0xF8
-#define VPINSRQ_1_R9_X15 BYTE $0xC4; BYTE $0x43; BYTE $0x81; BYTE $0x22; BYTE $0xF9; BYTE $0x01
-
-// load msg: Y12 = (i0, i1, i2, i3)
-// i0, i1, i2, i3 must not be 0
-#define LOAD_MSG_AVX2_Y12(i0, i1, i2, i3) \
- VMOVQ_SI_X12(i0*8); \
- VMOVQ_SI_X11(i2*8); \
- VPINSRQ_1_SI_X12(i1*8); \
- VPINSRQ_1_SI_X11(i3*8); \
- VINSERTI128 $1, X11, Y12, Y12
-
-// load msg: Y13 = (i0, i1, i2, i3)
-// i0, i1, i2, i3 must not be 0
-#define LOAD_MSG_AVX2_Y13(i0, i1, i2, i3) \
- VMOVQ_SI_X13(i0*8); \
- VMOVQ_SI_X11(i2*8); \
- VPINSRQ_1_SI_X13(i1*8); \
- VPINSRQ_1_SI_X11(i3*8); \
- VINSERTI128 $1, X11, Y13, Y13
-
-// load msg: Y14 = (i0, i1, i2, i3)
-// i0, i1, i2, i3 must not be 0
-#define LOAD_MSG_AVX2_Y14(i0, i1, i2, i3) \
- VMOVQ_SI_X14(i0*8); \
- VMOVQ_SI_X11(i2*8); \
- VPINSRQ_1_SI_X14(i1*8); \
- VPINSRQ_1_SI_X11(i3*8); \
- VINSERTI128 $1, X11, Y14, Y14
-
-// load msg: Y15 = (i0, i1, i2, i3)
-// i0, i1, i2, i3 must not be 0
-#define LOAD_MSG_AVX2_Y15(i0, i1, i2, i3) \
- VMOVQ_SI_X15(i0*8); \
- VMOVQ_SI_X11(i2*8); \
- VPINSRQ_1_SI_X15(i1*8); \
- VPINSRQ_1_SI_X11(i3*8); \
- VINSERTI128 $1, X11, Y15, Y15
-
-#define LOAD_MSG_AVX2_0_2_4_6_1_3_5_7_8_10_12_14_9_11_13_15() \
- VMOVQ_SI_X12_0; \
- VMOVQ_SI_X11(4*8); \
- VPINSRQ_1_SI_X12(2*8); \
- VPINSRQ_1_SI_X11(6*8); \
- VINSERTI128 $1, X11, Y12, Y12; \
- LOAD_MSG_AVX2_Y13(1, 3, 5, 7); \
- LOAD_MSG_AVX2_Y14(8, 10, 12, 14); \
- LOAD_MSG_AVX2_Y15(9, 11, 13, 15)
-
-#define LOAD_MSG_AVX2_14_4_9_13_10_8_15_6_1_0_11_5_12_2_7_3() \
- LOAD_MSG_AVX2_Y12(14, 4, 9, 13); \
- LOAD_MSG_AVX2_Y13(10, 8, 15, 6); \
- VMOVQ_SI_X11(11*8); \
- VPSHUFD $0x4E, 0*8(SI), X14; \
- VPINSRQ_1_SI_X11(5*8); \
- VINSERTI128 $1, X11, Y14, Y14; \
- LOAD_MSG_AVX2_Y15(12, 2, 7, 3)
-
-#define LOAD_MSG_AVX2_11_12_5_15_8_0_2_13_10_3_7_9_14_6_1_4() \
- VMOVQ_SI_X11(5*8); \
- VMOVDQU 11*8(SI), X12; \
- VPINSRQ_1_SI_X11(15*8); \
- VINSERTI128 $1, X11, Y12, Y12; \
- VMOVQ_SI_X13(8*8); \
- VMOVQ_SI_X11(2*8); \
- VPINSRQ_1_SI_X13_0; \
- VPINSRQ_1_SI_X11(13*8); \
- VINSERTI128 $1, X11, Y13, Y13; \
- LOAD_MSG_AVX2_Y14(10, 3, 7, 9); \
- LOAD_MSG_AVX2_Y15(14, 6, 1, 4)
-
-#define LOAD_MSG_AVX2_7_3_13_11_9_1_12_14_2_5_4_15_6_10_0_8() \
- LOAD_MSG_AVX2_Y12(7, 3, 13, 11); \
- LOAD_MSG_AVX2_Y13(9, 1, 12, 14); \
- LOAD_MSG_AVX2_Y14(2, 5, 4, 15); \
- VMOVQ_SI_X15(6*8); \
- VMOVQ_SI_X11_0; \
- VPINSRQ_1_SI_X15(10*8); \
- VPINSRQ_1_SI_X11(8*8); \
- VINSERTI128 $1, X11, Y15, Y15
-
-#define LOAD_MSG_AVX2_9_5_2_10_0_7_4_15_14_11_6_3_1_12_8_13() \
- LOAD_MSG_AVX2_Y12(9, 5, 2, 10); \
- VMOVQ_SI_X13_0; \
- VMOVQ_SI_X11(4*8); \
- VPINSRQ_1_SI_X13(7*8); \
- VPINSRQ_1_SI_X11(15*8); \
- VINSERTI128 $1, X11, Y13, Y13; \
- LOAD_MSG_AVX2_Y14(14, 11, 6, 3); \
- LOAD_MSG_AVX2_Y15(1, 12, 8, 13)
-
-#define LOAD_MSG_AVX2_2_6_0_8_12_10_11_3_4_7_15_1_13_5_14_9() \
- VMOVQ_SI_X12(2*8); \
- VMOVQ_SI_X11_0; \
- VPINSRQ_1_SI_X12(6*8); \
- VPINSRQ_1_SI_X11(8*8); \
- VINSERTI128 $1, X11, Y12, Y12; \
- LOAD_MSG_AVX2_Y13(12, 10, 11, 3); \
- LOAD_MSG_AVX2_Y14(4, 7, 15, 1); \
- LOAD_MSG_AVX2_Y15(13, 5, 14, 9)
-
-#define LOAD_MSG_AVX2_12_1_14_4_5_15_13_10_0_6_9_8_7_3_2_11() \
- LOAD_MSG_AVX2_Y12(12, 1, 14, 4); \
- LOAD_MSG_AVX2_Y13(5, 15, 13, 10); \
- VMOVQ_SI_X14_0; \
- VPSHUFD $0x4E, 8*8(SI), X11; \
- VPINSRQ_1_SI_X14(6*8); \
- VINSERTI128 $1, X11, Y14, Y14; \
- LOAD_MSG_AVX2_Y15(7, 3, 2, 11)
-
-#define LOAD_MSG_AVX2_13_7_12_3_11_14_1_9_5_15_8_2_0_4_6_10() \
- LOAD_MSG_AVX2_Y12(13, 7, 12, 3); \
- LOAD_MSG_AVX2_Y13(11, 14, 1, 9); \
- LOAD_MSG_AVX2_Y14(5, 15, 8, 2); \
- VMOVQ_SI_X15_0; \
- VMOVQ_SI_X11(6*8); \
- VPINSRQ_1_SI_X15(4*8); \
- VPINSRQ_1_SI_X11(10*8); \
- VINSERTI128 $1, X11, Y15, Y15
-
-#define LOAD_MSG_AVX2_6_14_11_0_15_9_3_8_12_13_1_10_2_7_4_5() \
- VMOVQ_SI_X12(6*8); \
- VMOVQ_SI_X11(11*8); \
- VPINSRQ_1_SI_X12(14*8); \
- VPINSRQ_1_SI_X11_0; \
- VINSERTI128 $1, X11, Y12, Y12; \
- LOAD_MSG_AVX2_Y13(15, 9, 3, 8); \
- VMOVQ_SI_X11(1*8); \
- VMOVDQU 12*8(SI), X14; \
- VPINSRQ_1_SI_X11(10*8); \
- VINSERTI128 $1, X11, Y14, Y14; \
- VMOVQ_SI_X15(2*8); \
- VMOVDQU 4*8(SI), X11; \
- VPINSRQ_1_SI_X15(7*8); \
- VINSERTI128 $1, X11, Y15, Y15
-
-#define LOAD_MSG_AVX2_10_8_7_1_2_4_6_5_15_9_3_13_11_14_12_0() \
- LOAD_MSG_AVX2_Y12(10, 8, 7, 1); \
- VMOVQ_SI_X13(2*8); \
- VPSHUFD $0x4E, 5*8(SI), X11; \
- VPINSRQ_1_SI_X13(4*8); \
- VINSERTI128 $1, X11, Y13, Y13; \
- LOAD_MSG_AVX2_Y14(15, 9, 3, 13); \
- VMOVQ_SI_X15(11*8); \
- VMOVQ_SI_X11(12*8); \
- VPINSRQ_1_SI_X15(14*8); \
- VPINSRQ_1_SI_X11_0; \
- VINSERTI128 $1, X11, Y15, Y15
-
-// func hashBlocksAVX2(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte)
-TEXT ·hashBlocksAVX2(SB), 4, $320-48 // frame size = 288 + 32 byte alignment
- MOVQ h+0(FP), AX
- MOVQ c+8(FP), BX
- MOVQ flag+16(FP), CX
- MOVQ blocks_base+24(FP), SI
- MOVQ blocks_len+32(FP), DI
-
- MOVQ SP, DX
- ADDQ $31, DX
- ANDQ $~31, DX
-
- MOVQ CX, 16(DX)
- XORQ CX, CX
- MOVQ CX, 24(DX)
-
- VMOVDQU ·AVX2_c40<>(SB), Y4
- VMOVDQU ·AVX2_c48<>(SB), Y5
-
- VMOVDQU 0(AX), Y8
- VMOVDQU 32(AX), Y9
- VMOVDQU ·AVX2_iv0<>(SB), Y6
- VMOVDQU ·AVX2_iv1<>(SB), Y7
-
- MOVQ 0(BX), R8
- MOVQ 8(BX), R9
- MOVQ R9, 8(DX)
-
-loop:
- ADDQ $128, R8
- MOVQ R8, 0(DX)
- CMPQ R8, $128
- JGE noinc
- INCQ R9
- MOVQ R9, 8(DX)
-
-noinc:
- VMOVDQA Y8, Y0
- VMOVDQA Y9, Y1
- VMOVDQA Y6, Y2
- VPXOR 0(DX), Y7, Y3
-
- LOAD_MSG_AVX2_0_2_4_6_1_3_5_7_8_10_12_14_9_11_13_15()
- VMOVDQA Y12, 32(DX)
- VMOVDQA Y13, 64(DX)
- VMOVDQA Y14, 96(DX)
- VMOVDQA Y15, 128(DX)
- ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5)
- LOAD_MSG_AVX2_14_4_9_13_10_8_15_6_1_0_11_5_12_2_7_3()
- VMOVDQA Y12, 160(DX)
- VMOVDQA Y13, 192(DX)
- VMOVDQA Y14, 224(DX)
- VMOVDQA Y15, 256(DX)
-
- ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5)
- LOAD_MSG_AVX2_11_12_5_15_8_0_2_13_10_3_7_9_14_6_1_4()
- ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5)
- LOAD_MSG_AVX2_7_3_13_11_9_1_12_14_2_5_4_15_6_10_0_8()
- ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5)
- LOAD_MSG_AVX2_9_5_2_10_0_7_4_15_14_11_6_3_1_12_8_13()
- ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5)
- LOAD_MSG_AVX2_2_6_0_8_12_10_11_3_4_7_15_1_13_5_14_9()
- ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5)
- LOAD_MSG_AVX2_12_1_14_4_5_15_13_10_0_6_9_8_7_3_2_11()
- ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5)
- LOAD_MSG_AVX2_13_7_12_3_11_14_1_9_5_15_8_2_0_4_6_10()
- ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5)
- LOAD_MSG_AVX2_6_14_11_0_15_9_3_8_12_13_1_10_2_7_4_5()
- ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5)
- LOAD_MSG_AVX2_10_8_7_1_2_4_6_5_15_9_3_13_11_14_12_0()
- ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5)
-
- ROUND_AVX2(32(DX), 64(DX), 96(DX), 128(DX), Y10, Y4, Y5)
- ROUND_AVX2(160(DX), 192(DX), 224(DX), 256(DX), Y10, Y4, Y5)
-
- VPXOR Y0, Y8, Y8
- VPXOR Y1, Y9, Y9
- VPXOR Y2, Y8, Y8
- VPXOR Y3, Y9, Y9
-
- LEAQ 128(SI), SI
- SUBQ $128, DI
- JNE loop
-
- MOVQ R8, 0(BX)
- MOVQ R9, 8(BX)
-
- VMOVDQU Y8, 0(AX)
- VMOVDQU Y9, 32(AX)
- VZEROUPPER
-
- RET
-
-#define VPUNPCKLQDQ_X2_X2_X15 BYTE $0xC5; BYTE $0x69; BYTE $0x6C; BYTE $0xFA
-#define VPUNPCKLQDQ_X3_X3_X15 BYTE $0xC5; BYTE $0x61; BYTE $0x6C; BYTE $0xFB
-#define VPUNPCKLQDQ_X7_X7_X15 BYTE $0xC5; BYTE $0x41; BYTE $0x6C; BYTE $0xFF
-#define VPUNPCKLQDQ_X13_X13_X15 BYTE $0xC4; BYTE $0x41; BYTE $0x11; BYTE $0x6C; BYTE $0xFD
-#define VPUNPCKLQDQ_X14_X14_X15 BYTE $0xC4; BYTE $0x41; BYTE $0x09; BYTE $0x6C; BYTE $0xFE
-
-#define VPUNPCKHQDQ_X15_X2_X2 BYTE $0xC4; BYTE $0xC1; BYTE $0x69; BYTE $0x6D; BYTE $0xD7
-#define VPUNPCKHQDQ_X15_X3_X3 BYTE $0xC4; BYTE $0xC1; BYTE $0x61; BYTE $0x6D; BYTE $0xDF
-#define VPUNPCKHQDQ_X15_X6_X6 BYTE $0xC4; BYTE $0xC1; BYTE $0x49; BYTE $0x6D; BYTE $0xF7
-#define VPUNPCKHQDQ_X15_X7_X7 BYTE $0xC4; BYTE $0xC1; BYTE $0x41; BYTE $0x6D; BYTE $0xFF
-#define VPUNPCKHQDQ_X15_X3_X2 BYTE $0xC4; BYTE $0xC1; BYTE $0x61; BYTE $0x6D; BYTE $0xD7
-#define VPUNPCKHQDQ_X15_X7_X6 BYTE $0xC4; BYTE $0xC1; BYTE $0x41; BYTE $0x6D; BYTE $0xF7
-#define VPUNPCKHQDQ_X15_X13_X3 BYTE $0xC4; BYTE $0xC1; BYTE $0x11; BYTE $0x6D; BYTE $0xDF
-#define VPUNPCKHQDQ_X15_X13_X7 BYTE $0xC4; BYTE $0xC1; BYTE $0x11; BYTE $0x6D; BYTE $0xFF
-
-#define SHUFFLE_AVX() \
- VMOVDQA X6, X13; \
- VMOVDQA X2, X14; \
- VMOVDQA X4, X6; \
- VPUNPCKLQDQ_X13_X13_X15; \
- VMOVDQA X5, X4; \
- VMOVDQA X6, X5; \
- VPUNPCKHQDQ_X15_X7_X6; \
- VPUNPCKLQDQ_X7_X7_X15; \
- VPUNPCKHQDQ_X15_X13_X7; \
- VPUNPCKLQDQ_X3_X3_X15; \
- VPUNPCKHQDQ_X15_X2_X2; \
- VPUNPCKLQDQ_X14_X14_X15; \
- VPUNPCKHQDQ_X15_X3_X3; \
-
-#define SHUFFLE_AVX_INV() \
- VMOVDQA X2, X13; \
- VMOVDQA X4, X14; \
- VPUNPCKLQDQ_X2_X2_X15; \
- VMOVDQA X5, X4; \
- VPUNPCKHQDQ_X15_X3_X2; \
- VMOVDQA X14, X5; \
- VPUNPCKLQDQ_X3_X3_X15; \
- VMOVDQA X6, X14; \
- VPUNPCKHQDQ_X15_X13_X3; \
- VPUNPCKLQDQ_X7_X7_X15; \
- VPUNPCKHQDQ_X15_X6_X6; \
- VPUNPCKLQDQ_X14_X14_X15; \
- VPUNPCKHQDQ_X15_X7_X7; \
-
-#define HALF_ROUND_AVX(v0, v1, v2, v3, v4, v5, v6, v7, m0, m1, m2, m3, t0, c40, c48) \
- VPADDQ m0, v0, v0; \
- VPADDQ v2, v0, v0; \
- VPADDQ m1, v1, v1; \
- VPADDQ v3, v1, v1; \
- VPXOR v0, v6, v6; \
- VPXOR v1, v7, v7; \
- VPSHUFD $-79, v6, v6; \
- VPSHUFD $-79, v7, v7; \
- VPADDQ v6, v4, v4; \
- VPADDQ v7, v5, v5; \
- VPXOR v4, v2, v2; \
- VPXOR v5, v3, v3; \
- VPSHUFB c40, v2, v2; \
- VPSHUFB c40, v3, v3; \
- VPADDQ m2, v0, v0; \
- VPADDQ v2, v0, v0; \
- VPADDQ m3, v1, v1; \
- VPADDQ v3, v1, v1; \
- VPXOR v0, v6, v6; \
- VPXOR v1, v7, v7; \
- VPSHUFB c48, v6, v6; \
- VPSHUFB c48, v7, v7; \
- VPADDQ v6, v4, v4; \
- VPADDQ v7, v5, v5; \
- VPXOR v4, v2, v2; \
- VPXOR v5, v3, v3; \
- VPADDQ v2, v2, t0; \
- VPSRLQ $63, v2, v2; \
- VPXOR t0, v2, v2; \
- VPADDQ v3, v3, t0; \
- VPSRLQ $63, v3, v3; \
- VPXOR t0, v3, v3
-
-// load msg: X12 = (i0, i1), X13 = (i2, i3), X14 = (i4, i5), X15 = (i6, i7)
-// i0, i1, i2, i3, i4, i5, i6, i7 must not be 0
-#define LOAD_MSG_AVX(i0, i1, i2, i3, i4, i5, i6, i7) \
- VMOVQ_SI_X12(i0*8); \
- VMOVQ_SI_X13(i2*8); \
- VMOVQ_SI_X14(i4*8); \
- VMOVQ_SI_X15(i6*8); \
- VPINSRQ_1_SI_X12(i1*8); \
- VPINSRQ_1_SI_X13(i3*8); \
- VPINSRQ_1_SI_X14(i5*8); \
- VPINSRQ_1_SI_X15(i7*8)
-
-// load msg: X12 = (0, 2), X13 = (4, 6), X14 = (1, 3), X15 = (5, 7)
-#define LOAD_MSG_AVX_0_2_4_6_1_3_5_7() \
- VMOVQ_SI_X12_0; \
- VMOVQ_SI_X13(4*8); \
- VMOVQ_SI_X14(1*8); \
- VMOVQ_SI_X15(5*8); \
- VPINSRQ_1_SI_X12(2*8); \
- VPINSRQ_1_SI_X13(6*8); \
- VPINSRQ_1_SI_X14(3*8); \
- VPINSRQ_1_SI_X15(7*8)
-
-// load msg: X12 = (1, 0), X13 = (11, 5), X14 = (12, 2), X15 = (7, 3)
-#define LOAD_MSG_AVX_1_0_11_5_12_2_7_3() \
- VPSHUFD $0x4E, 0*8(SI), X12; \
- VMOVQ_SI_X13(11*8); \
- VMOVQ_SI_X14(12*8); \
- VMOVQ_SI_X15(7*8); \
- VPINSRQ_1_SI_X13(5*8); \
- VPINSRQ_1_SI_X14(2*8); \
- VPINSRQ_1_SI_X15(3*8)
-
-// load msg: X12 = (11, 12), X13 = (5, 15), X14 = (8, 0), X15 = (2, 13)
-#define LOAD_MSG_AVX_11_12_5_15_8_0_2_13() \
- VMOVDQU 11*8(SI), X12; \
- VMOVQ_SI_X13(5*8); \
- VMOVQ_SI_X14(8*8); \
- VMOVQ_SI_X15(2*8); \
- VPINSRQ_1_SI_X13(15*8); \
- VPINSRQ_1_SI_X14_0; \
- VPINSRQ_1_SI_X15(13*8)
-
-// load msg: X12 = (2, 5), X13 = (4, 15), X14 = (6, 10), X15 = (0, 8)
-#define LOAD_MSG_AVX_2_5_4_15_6_10_0_8() \
- VMOVQ_SI_X12(2*8); \
- VMOVQ_SI_X13(4*8); \
- VMOVQ_SI_X14(6*8); \
- VMOVQ_SI_X15_0; \
- VPINSRQ_1_SI_X12(5*8); \
- VPINSRQ_1_SI_X13(15*8); \
- VPINSRQ_1_SI_X14(10*8); \
- VPINSRQ_1_SI_X15(8*8)
-
-// load msg: X12 = (9, 5), X13 = (2, 10), X14 = (0, 7), X15 = (4, 15)
-#define LOAD_MSG_AVX_9_5_2_10_0_7_4_15() \
- VMOVQ_SI_X12(9*8); \
- VMOVQ_SI_X13(2*8); \
- VMOVQ_SI_X14_0; \
- VMOVQ_SI_X15(4*8); \
- VPINSRQ_1_SI_X12(5*8); \
- VPINSRQ_1_SI_X13(10*8); \
- VPINSRQ_1_SI_X14(7*8); \
- VPINSRQ_1_SI_X15(15*8)
-
-// load msg: X12 = (2, 6), X13 = (0, 8), X14 = (12, 10), X15 = (11, 3)
-#define LOAD_MSG_AVX_2_6_0_8_12_10_11_3() \
- VMOVQ_SI_X12(2*8); \
- VMOVQ_SI_X13_0; \
- VMOVQ_SI_X14(12*8); \
- VMOVQ_SI_X15(11*8); \
- VPINSRQ_1_SI_X12(6*8); \
- VPINSRQ_1_SI_X13(8*8); \
- VPINSRQ_1_SI_X14(10*8); \
- VPINSRQ_1_SI_X15(3*8)
-
-// load msg: X12 = (0, 6), X13 = (9, 8), X14 = (7, 3), X15 = (2, 11)
-#define LOAD_MSG_AVX_0_6_9_8_7_3_2_11() \
- MOVQ 0*8(SI), X12; \
- VPSHUFD $0x4E, 8*8(SI), X13; \
- MOVQ 7*8(SI), X14; \
- MOVQ 2*8(SI), X15; \
- VPINSRQ_1_SI_X12(6*8); \
- VPINSRQ_1_SI_X14(3*8); \
- VPINSRQ_1_SI_X15(11*8)
-
-// load msg: X12 = (6, 14), X13 = (11, 0), X14 = (15, 9), X15 = (3, 8)
-#define LOAD_MSG_AVX_6_14_11_0_15_9_3_8() \
- MOVQ 6*8(SI), X12; \
- MOVQ 11*8(SI), X13; \
- MOVQ 15*8(SI), X14; \
- MOVQ 3*8(SI), X15; \
- VPINSRQ_1_SI_X12(14*8); \
- VPINSRQ_1_SI_X13_0; \
- VPINSRQ_1_SI_X14(9*8); \
- VPINSRQ_1_SI_X15(8*8)
-
-// load msg: X12 = (5, 15), X13 = (8, 2), X14 = (0, 4), X15 = (6, 10)
-#define LOAD_MSG_AVX_5_15_8_2_0_4_6_10() \
- MOVQ 5*8(SI), X12; \
- MOVQ 8*8(SI), X13; \
- MOVQ 0*8(SI), X14; \
- MOVQ 6*8(SI), X15; \
- VPINSRQ_1_SI_X12(15*8); \
- VPINSRQ_1_SI_X13(2*8); \
- VPINSRQ_1_SI_X14(4*8); \
- VPINSRQ_1_SI_X15(10*8)
-
-// load msg: X12 = (12, 13), X13 = (1, 10), X14 = (2, 7), X15 = (4, 5)
-#define LOAD_MSG_AVX_12_13_1_10_2_7_4_5() \
- VMOVDQU 12*8(SI), X12; \
- MOVQ 1*8(SI), X13; \
- MOVQ 2*8(SI), X14; \
- VPINSRQ_1_SI_X13(10*8); \
- VPINSRQ_1_SI_X14(7*8); \
- VMOVDQU 4*8(SI), X15
-
-// load msg: X12 = (15, 9), X13 = (3, 13), X14 = (11, 14), X15 = (12, 0)
-#define LOAD_MSG_AVX_15_9_3_13_11_14_12_0() \
- MOVQ 15*8(SI), X12; \
- MOVQ 3*8(SI), X13; \
- MOVQ 11*8(SI), X14; \
- MOVQ 12*8(SI), X15; \
- VPINSRQ_1_SI_X12(9*8); \
- VPINSRQ_1_SI_X13(13*8); \
- VPINSRQ_1_SI_X14(14*8); \
- VPINSRQ_1_SI_X15_0
-
-// func hashBlocksAVX(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte)
-TEXT ·hashBlocksAVX(SB), 4, $288-48 // frame size = 272 + 16 byte alignment
- MOVQ h+0(FP), AX
- MOVQ c+8(FP), BX
- MOVQ flag+16(FP), CX
- MOVQ blocks_base+24(FP), SI
- MOVQ blocks_len+32(FP), DI
-
- MOVQ SP, R10
- ADDQ $15, R10
- ANDQ $~15, R10
-
- VMOVDQU ·AVX_c40<>(SB), X0
- VMOVDQU ·AVX_c48<>(SB), X1
- VMOVDQA X0, X8
- VMOVDQA X1, X9
-
- VMOVDQU ·AVX_iv3<>(SB), X0
- VMOVDQA X0, 0(R10)
- XORQ CX, 0(R10) // 0(R10) = ·AVX_iv3 ^ (CX || 0)
-
- VMOVDQU 0(AX), X10
- VMOVDQU 16(AX), X11
- VMOVDQU 32(AX), X2
- VMOVDQU 48(AX), X3
-
- MOVQ 0(BX), R8
- MOVQ 8(BX), R9
-
-loop:
- ADDQ $128, R8
- CMPQ R8, $128
- JGE noinc
- INCQ R9
-
-noinc:
- VMOVQ_R8_X15
- VPINSRQ_1_R9_X15
-
- VMOVDQA X10, X0
- VMOVDQA X11, X1
- VMOVDQU ·AVX_iv0<>(SB), X4
- VMOVDQU ·AVX_iv1<>(SB), X5
- VMOVDQU ·AVX_iv2<>(SB), X6
-
- VPXOR X15, X6, X6
- VMOVDQA 0(R10), X7
-
- LOAD_MSG_AVX_0_2_4_6_1_3_5_7()
- VMOVDQA X12, 16(R10)
- VMOVDQA X13, 32(R10)
- VMOVDQA X14, 48(R10)
- VMOVDQA X15, 64(R10)
- HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
- SHUFFLE_AVX()
- LOAD_MSG_AVX(8, 10, 12, 14, 9, 11, 13, 15)
- VMOVDQA X12, 80(R10)
- VMOVDQA X13, 96(R10)
- VMOVDQA X14, 112(R10)
- VMOVDQA X15, 128(R10)
- HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
- SHUFFLE_AVX_INV()
-
- LOAD_MSG_AVX(14, 4, 9, 13, 10, 8, 15, 6)
- VMOVDQA X12, 144(R10)
- VMOVDQA X13, 160(R10)
- VMOVDQA X14, 176(R10)
- VMOVDQA X15, 192(R10)
- HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
- SHUFFLE_AVX()
- LOAD_MSG_AVX_1_0_11_5_12_2_7_3()
- VMOVDQA X12, 208(R10)
- VMOVDQA X13, 224(R10)
- VMOVDQA X14, 240(R10)
- VMOVDQA X15, 256(R10)
- HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
- SHUFFLE_AVX_INV()
-
- LOAD_MSG_AVX_11_12_5_15_8_0_2_13()
- HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
- SHUFFLE_AVX()
- LOAD_MSG_AVX(10, 3, 7, 9, 14, 6, 1, 4)
- HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
- SHUFFLE_AVX_INV()
-
- LOAD_MSG_AVX(7, 3, 13, 11, 9, 1, 12, 14)
- HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
- SHUFFLE_AVX()
- LOAD_MSG_AVX_2_5_4_15_6_10_0_8()
- HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
- SHUFFLE_AVX_INV()
-
- LOAD_MSG_AVX_9_5_2_10_0_7_4_15()
- HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
- SHUFFLE_AVX()
- LOAD_MSG_AVX(14, 11, 6, 3, 1, 12, 8, 13)
- HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
- SHUFFLE_AVX_INV()
-
- LOAD_MSG_AVX_2_6_0_8_12_10_11_3()
- HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
- SHUFFLE_AVX()
- LOAD_MSG_AVX(4, 7, 15, 1, 13, 5, 14, 9)
- HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
- SHUFFLE_AVX_INV()
-
- LOAD_MSG_AVX(12, 1, 14, 4, 5, 15, 13, 10)
- HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
- SHUFFLE_AVX()
- LOAD_MSG_AVX_0_6_9_8_7_3_2_11()
- HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
- SHUFFLE_AVX_INV()
-
- LOAD_MSG_AVX(13, 7, 12, 3, 11, 14, 1, 9)
- HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
- SHUFFLE_AVX()
- LOAD_MSG_AVX_5_15_8_2_0_4_6_10()
- HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
- SHUFFLE_AVX_INV()
-
- LOAD_MSG_AVX_6_14_11_0_15_9_3_8()
- HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
- SHUFFLE_AVX()
- LOAD_MSG_AVX_12_13_1_10_2_7_4_5()
- HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
- SHUFFLE_AVX_INV()
-
- LOAD_MSG_AVX(10, 8, 7, 1, 2, 4, 6, 5)
- HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
- SHUFFLE_AVX()
- LOAD_MSG_AVX_15_9_3_13_11_14_12_0()
- HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
- SHUFFLE_AVX_INV()
-
- HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, 16(R10), 32(R10), 48(R10), 64(R10), X15, X8, X9)
- SHUFFLE_AVX()
- HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, 80(R10), 96(R10), 112(R10), 128(R10), X15, X8, X9)
- SHUFFLE_AVX_INV()
-
- HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, 144(R10), 160(R10), 176(R10), 192(R10), X15, X8, X9)
- SHUFFLE_AVX()
- HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, 208(R10), 224(R10), 240(R10), 256(R10), X15, X8, X9)
- SHUFFLE_AVX_INV()
-
- VMOVDQU 32(AX), X14
- VMOVDQU 48(AX), X15
- VPXOR X0, X10, X10
- VPXOR X1, X11, X11
- VPXOR X2, X14, X14
- VPXOR X3, X15, X15
- VPXOR X4, X10, X10
- VPXOR X5, X11, X11
- VPXOR X6, X14, X2
- VPXOR X7, X15, X3
- VMOVDQU X2, 32(AX)
- VMOVDQU X3, 48(AX)
-
- LEAQ 128(SI), SI
- SUBQ $128, DI
- JNE loop
-
- VMOVDQU X10, 0(AX)
- VMOVDQU X11, 16(AX)
-
- MOVQ R8, 0(BX)
- MOVQ R9, 8(BX)
- VZEROUPPER
-
- RET
diff --git a/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.s b/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.s
deleted file mode 100644
index adfac00c15c..00000000000
--- a/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.s
+++ /dev/null
@@ -1,278 +0,0 @@
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build amd64 && gc && !purego
-
-#include "textflag.h"
-
-DATA ·iv0<>+0x00(SB)/8, $0x6a09e667f3bcc908
-DATA ·iv0<>+0x08(SB)/8, $0xbb67ae8584caa73b
-GLOBL ·iv0<>(SB), (NOPTR+RODATA), $16
-
-DATA ·iv1<>+0x00(SB)/8, $0x3c6ef372fe94f82b
-DATA ·iv1<>+0x08(SB)/8, $0xa54ff53a5f1d36f1
-GLOBL ·iv1<>(SB), (NOPTR+RODATA), $16
-
-DATA ·iv2<>+0x00(SB)/8, $0x510e527fade682d1
-DATA ·iv2<>+0x08(SB)/8, $0x9b05688c2b3e6c1f
-GLOBL ·iv2<>(SB), (NOPTR+RODATA), $16
-
-DATA ·iv3<>+0x00(SB)/8, $0x1f83d9abfb41bd6b
-DATA ·iv3<>+0x08(SB)/8, $0x5be0cd19137e2179
-GLOBL ·iv3<>(SB), (NOPTR+RODATA), $16
-
-DATA ·c40<>+0x00(SB)/8, $0x0201000706050403
-DATA ·c40<>+0x08(SB)/8, $0x0a09080f0e0d0c0b
-GLOBL ·c40<>(SB), (NOPTR+RODATA), $16
-
-DATA ·c48<>+0x00(SB)/8, $0x0100070605040302
-DATA ·c48<>+0x08(SB)/8, $0x09080f0e0d0c0b0a
-GLOBL ·c48<>(SB), (NOPTR+RODATA), $16
-
-#define SHUFFLE(v2, v3, v4, v5, v6, v7, t1, t2) \
- MOVO v4, t1; \
- MOVO v5, v4; \
- MOVO t1, v5; \
- MOVO v6, t1; \
- PUNPCKLQDQ v6, t2; \
- PUNPCKHQDQ v7, v6; \
- PUNPCKHQDQ t2, v6; \
- PUNPCKLQDQ v7, t2; \
- MOVO t1, v7; \
- MOVO v2, t1; \
- PUNPCKHQDQ t2, v7; \
- PUNPCKLQDQ v3, t2; \
- PUNPCKHQDQ t2, v2; \
- PUNPCKLQDQ t1, t2; \
- PUNPCKHQDQ t2, v3
-
-#define SHUFFLE_INV(v2, v3, v4, v5, v6, v7, t1, t2) \
- MOVO v4, t1; \
- MOVO v5, v4; \
- MOVO t1, v5; \
- MOVO v2, t1; \
- PUNPCKLQDQ v2, t2; \
- PUNPCKHQDQ v3, v2; \
- PUNPCKHQDQ t2, v2; \
- PUNPCKLQDQ v3, t2; \
- MOVO t1, v3; \
- MOVO v6, t1; \
- PUNPCKHQDQ t2, v3; \
- PUNPCKLQDQ v7, t2; \
- PUNPCKHQDQ t2, v6; \
- PUNPCKLQDQ t1, t2; \
- PUNPCKHQDQ t2, v7
-
-#define HALF_ROUND(v0, v1, v2, v3, v4, v5, v6, v7, m0, m1, m2, m3, t0, c40, c48) \
- PADDQ m0, v0; \
- PADDQ m1, v1; \
- PADDQ v2, v0; \
- PADDQ v3, v1; \
- PXOR v0, v6; \
- PXOR v1, v7; \
- PSHUFD $0xB1, v6, v6; \
- PSHUFD $0xB1, v7, v7; \
- PADDQ v6, v4; \
- PADDQ v7, v5; \
- PXOR v4, v2; \
- PXOR v5, v3; \
- PSHUFB c40, v2; \
- PSHUFB c40, v3; \
- PADDQ m2, v0; \
- PADDQ m3, v1; \
- PADDQ v2, v0; \
- PADDQ v3, v1; \
- PXOR v0, v6; \
- PXOR v1, v7; \
- PSHUFB c48, v6; \
- PSHUFB c48, v7; \
- PADDQ v6, v4; \
- PADDQ v7, v5; \
- PXOR v4, v2; \
- PXOR v5, v3; \
- MOVOU v2, t0; \
- PADDQ v2, t0; \
- PSRLQ $63, v2; \
- PXOR t0, v2; \
- MOVOU v3, t0; \
- PADDQ v3, t0; \
- PSRLQ $63, v3; \
- PXOR t0, v3
-
-#define LOAD_MSG(m0, m1, m2, m3, src, i0, i1, i2, i3, i4, i5, i6, i7) \
- MOVQ i0*8(src), m0; \
- PINSRQ $1, i1*8(src), m0; \
- MOVQ i2*8(src), m1; \
- PINSRQ $1, i3*8(src), m1; \
- MOVQ i4*8(src), m2; \
- PINSRQ $1, i5*8(src), m2; \
- MOVQ i6*8(src), m3; \
- PINSRQ $1, i7*8(src), m3
-
-// func hashBlocksSSE4(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte)
-TEXT ·hashBlocksSSE4(SB), 4, $288-48 // frame size = 272 + 16 byte alignment
- MOVQ h+0(FP), AX
- MOVQ c+8(FP), BX
- MOVQ flag+16(FP), CX
- MOVQ blocks_base+24(FP), SI
- MOVQ blocks_len+32(FP), DI
-
- MOVQ SP, R10
- ADDQ $15, R10
- ANDQ $~15, R10
-
- MOVOU ·iv3<>(SB), X0
- MOVO X0, 0(R10)
- XORQ CX, 0(R10) // 0(R10) = ·iv3 ^ (CX || 0)
-
- MOVOU ·c40<>(SB), X13
- MOVOU ·c48<>(SB), X14
-
- MOVOU 0(AX), X12
- MOVOU 16(AX), X15
-
- MOVQ 0(BX), R8
- MOVQ 8(BX), R9
-
-loop:
- ADDQ $128, R8
- CMPQ R8, $128
- JGE noinc
- INCQ R9
-
-noinc:
- MOVQ R8, X8
- PINSRQ $1, R9, X8
-
- MOVO X12, X0
- MOVO X15, X1
- MOVOU 32(AX), X2
- MOVOU 48(AX), X3
- MOVOU ·iv0<>(SB), X4
- MOVOU ·iv1<>(SB), X5
- MOVOU ·iv2<>(SB), X6
-
- PXOR X8, X6
- MOVO 0(R10), X7
-
- LOAD_MSG(X8, X9, X10, X11, SI, 0, 2, 4, 6, 1, 3, 5, 7)
- MOVO X8, 16(R10)
- MOVO X9, 32(R10)
- MOVO X10, 48(R10)
- MOVO X11, 64(R10)
- HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
- SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9)
- LOAD_MSG(X8, X9, X10, X11, SI, 8, 10, 12, 14, 9, 11, 13, 15)
- MOVO X8, 80(R10)
- MOVO X9, 96(R10)
- MOVO X10, 112(R10)
- MOVO X11, 128(R10)
- HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
- SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9)
-
- LOAD_MSG(X8, X9, X10, X11, SI, 14, 4, 9, 13, 10, 8, 15, 6)
- MOVO X8, 144(R10)
- MOVO X9, 160(R10)
- MOVO X10, 176(R10)
- MOVO X11, 192(R10)
- HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
- SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9)
- LOAD_MSG(X8, X9, X10, X11, SI, 1, 0, 11, 5, 12, 2, 7, 3)
- MOVO X8, 208(R10)
- MOVO X9, 224(R10)
- MOVO X10, 240(R10)
- MOVO X11, 256(R10)
- HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
- SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9)
-
- LOAD_MSG(X8, X9, X10, X11, SI, 11, 12, 5, 15, 8, 0, 2, 13)
- HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
- SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9)
- LOAD_MSG(X8, X9, X10, X11, SI, 10, 3, 7, 9, 14, 6, 1, 4)
- HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
- SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9)
-
- LOAD_MSG(X8, X9, X10, X11, SI, 7, 3, 13, 11, 9, 1, 12, 14)
- HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
- SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9)
- LOAD_MSG(X8, X9, X10, X11, SI, 2, 5, 4, 15, 6, 10, 0, 8)
- HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
- SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9)
-
- LOAD_MSG(X8, X9, X10, X11, SI, 9, 5, 2, 10, 0, 7, 4, 15)
- HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
- SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9)
- LOAD_MSG(X8, X9, X10, X11, SI, 14, 11, 6, 3, 1, 12, 8, 13)
- HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
- SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9)
-
- LOAD_MSG(X8, X9, X10, X11, SI, 2, 6, 0, 8, 12, 10, 11, 3)
- HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
- SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9)
- LOAD_MSG(X8, X9, X10, X11, SI, 4, 7, 15, 1, 13, 5, 14, 9)
- HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
- SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9)
-
- LOAD_MSG(X8, X9, X10, X11, SI, 12, 1, 14, 4, 5, 15, 13, 10)
- HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
- SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9)
- LOAD_MSG(X8, X9, X10, X11, SI, 0, 6, 9, 8, 7, 3, 2, 11)
- HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
- SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9)
-
- LOAD_MSG(X8, X9, X10, X11, SI, 13, 7, 12, 3, 11, 14, 1, 9)
- HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
- SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9)
- LOAD_MSG(X8, X9, X10, X11, SI, 5, 15, 8, 2, 0, 4, 6, 10)
- HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
- SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9)
-
- LOAD_MSG(X8, X9, X10, X11, SI, 6, 14, 11, 0, 15, 9, 3, 8)
- HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
- SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9)
- LOAD_MSG(X8, X9, X10, X11, SI, 12, 13, 1, 10, 2, 7, 4, 5)
- HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
- SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9)
-
- LOAD_MSG(X8, X9, X10, X11, SI, 10, 8, 7, 1, 2, 4, 6, 5)
- HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
- SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9)
- LOAD_MSG(X8, X9, X10, X11, SI, 15, 9, 3, 13, 11, 14, 12, 0)
- HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
- SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9)
-
- HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, 16(R10), 32(R10), 48(R10), 64(R10), X11, X13, X14)
- SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9)
- HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, 80(R10), 96(R10), 112(R10), 128(R10), X11, X13, X14)
- SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9)
-
- HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, 144(R10), 160(R10), 176(R10), 192(R10), X11, X13, X14)
- SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9)
- HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, 208(R10), 224(R10), 240(R10), 256(R10), X11, X13, X14)
- SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9)
-
- MOVOU 32(AX), X10
- MOVOU 48(AX), X11
- PXOR X0, X12
- PXOR X1, X15
- PXOR X2, X10
- PXOR X3, X11
- PXOR X4, X12
- PXOR X5, X15
- PXOR X6, X10
- PXOR X7, X11
- MOVOU X10, 32(AX)
- MOVOU X11, 48(AX)
-
- LEAQ 128(SI), SI
- SUBQ $128, DI
- JNE loop
-
- MOVOU X12, 0(AX)
- MOVOU X15, 16(AX)
-
- MOVQ R8, 0(BX)
- MOVQ R9, 8(BX)
-
- RET
diff --git a/vendor/golang.org/x/crypto/blake2b/blake2b_generic.go b/vendor/golang.org/x/crypto/blake2b/blake2b_generic.go
deleted file mode 100644
index 3168a8aa3c8..00000000000
--- a/vendor/golang.org/x/crypto/blake2b/blake2b_generic.go
+++ /dev/null
@@ -1,182 +0,0 @@
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package blake2b
-
-import (
- "encoding/binary"
- "math/bits"
-)
-
-// the precomputed values for BLAKE2b
-// there are 12 16-byte arrays - one for each round
-// the entries are calculated from the sigma constants.
-var precomputed = [12][16]byte{
- {0, 2, 4, 6, 1, 3, 5, 7, 8, 10, 12, 14, 9, 11, 13, 15},
- {14, 4, 9, 13, 10, 8, 15, 6, 1, 0, 11, 5, 12, 2, 7, 3},
- {11, 12, 5, 15, 8, 0, 2, 13, 10, 3, 7, 9, 14, 6, 1, 4},
- {7, 3, 13, 11, 9, 1, 12, 14, 2, 5, 4, 15, 6, 10, 0, 8},
- {9, 5, 2, 10, 0, 7, 4, 15, 14, 11, 6, 3, 1, 12, 8, 13},
- {2, 6, 0, 8, 12, 10, 11, 3, 4, 7, 15, 1, 13, 5, 14, 9},
- {12, 1, 14, 4, 5, 15, 13, 10, 0, 6, 9, 8, 7, 3, 2, 11},
- {13, 7, 12, 3, 11, 14, 1, 9, 5, 15, 8, 2, 0, 4, 6, 10},
- {6, 14, 11, 0, 15, 9, 3, 8, 12, 13, 1, 10, 2, 7, 4, 5},
- {10, 8, 7, 1, 2, 4, 6, 5, 15, 9, 3, 13, 11, 14, 12, 0},
- {0, 2, 4, 6, 1, 3, 5, 7, 8, 10, 12, 14, 9, 11, 13, 15}, // equal to the first
- {14, 4, 9, 13, 10, 8, 15, 6, 1, 0, 11, 5, 12, 2, 7, 3}, // equal to the second
-}
-
-func hashBlocksGeneric(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) {
- var m [16]uint64
- c0, c1 := c[0], c[1]
-
- for i := 0; i < len(blocks); {
- c0 += BlockSize
- if c0 < BlockSize {
- c1++
- }
-
- v0, v1, v2, v3, v4, v5, v6, v7 := h[0], h[1], h[2], h[3], h[4], h[5], h[6], h[7]
- v8, v9, v10, v11, v12, v13, v14, v15 := iv[0], iv[1], iv[2], iv[3], iv[4], iv[5], iv[6], iv[7]
- v12 ^= c0
- v13 ^= c1
- v14 ^= flag
-
- for j := range m {
- m[j] = binary.LittleEndian.Uint64(blocks[i:])
- i += 8
- }
-
- for j := range precomputed {
- s := &(precomputed[j])
-
- v0 += m[s[0]]
- v0 += v4
- v12 ^= v0
- v12 = bits.RotateLeft64(v12, -32)
- v8 += v12
- v4 ^= v8
- v4 = bits.RotateLeft64(v4, -24)
- v1 += m[s[1]]
- v1 += v5
- v13 ^= v1
- v13 = bits.RotateLeft64(v13, -32)
- v9 += v13
- v5 ^= v9
- v5 = bits.RotateLeft64(v5, -24)
- v2 += m[s[2]]
- v2 += v6
- v14 ^= v2
- v14 = bits.RotateLeft64(v14, -32)
- v10 += v14
- v6 ^= v10
- v6 = bits.RotateLeft64(v6, -24)
- v3 += m[s[3]]
- v3 += v7
- v15 ^= v3
- v15 = bits.RotateLeft64(v15, -32)
- v11 += v15
- v7 ^= v11
- v7 = bits.RotateLeft64(v7, -24)
-
- v0 += m[s[4]]
- v0 += v4
- v12 ^= v0
- v12 = bits.RotateLeft64(v12, -16)
- v8 += v12
- v4 ^= v8
- v4 = bits.RotateLeft64(v4, -63)
- v1 += m[s[5]]
- v1 += v5
- v13 ^= v1
- v13 = bits.RotateLeft64(v13, -16)
- v9 += v13
- v5 ^= v9
- v5 = bits.RotateLeft64(v5, -63)
- v2 += m[s[6]]
- v2 += v6
- v14 ^= v2
- v14 = bits.RotateLeft64(v14, -16)
- v10 += v14
- v6 ^= v10
- v6 = bits.RotateLeft64(v6, -63)
- v3 += m[s[7]]
- v3 += v7
- v15 ^= v3
- v15 = bits.RotateLeft64(v15, -16)
- v11 += v15
- v7 ^= v11
- v7 = bits.RotateLeft64(v7, -63)
-
- v0 += m[s[8]]
- v0 += v5
- v15 ^= v0
- v15 = bits.RotateLeft64(v15, -32)
- v10 += v15
- v5 ^= v10
- v5 = bits.RotateLeft64(v5, -24)
- v1 += m[s[9]]
- v1 += v6
- v12 ^= v1
- v12 = bits.RotateLeft64(v12, -32)
- v11 += v12
- v6 ^= v11
- v6 = bits.RotateLeft64(v6, -24)
- v2 += m[s[10]]
- v2 += v7
- v13 ^= v2
- v13 = bits.RotateLeft64(v13, -32)
- v8 += v13
- v7 ^= v8
- v7 = bits.RotateLeft64(v7, -24)
- v3 += m[s[11]]
- v3 += v4
- v14 ^= v3
- v14 = bits.RotateLeft64(v14, -32)
- v9 += v14
- v4 ^= v9
- v4 = bits.RotateLeft64(v4, -24)
-
- v0 += m[s[12]]
- v0 += v5
- v15 ^= v0
- v15 = bits.RotateLeft64(v15, -16)
- v10 += v15
- v5 ^= v10
- v5 = bits.RotateLeft64(v5, -63)
- v1 += m[s[13]]
- v1 += v6
- v12 ^= v1
- v12 = bits.RotateLeft64(v12, -16)
- v11 += v12
- v6 ^= v11
- v6 = bits.RotateLeft64(v6, -63)
- v2 += m[s[14]]
- v2 += v7
- v13 ^= v2
- v13 = bits.RotateLeft64(v13, -16)
- v8 += v13
- v7 ^= v8
- v7 = bits.RotateLeft64(v7, -63)
- v3 += m[s[15]]
- v3 += v4
- v14 ^= v3
- v14 = bits.RotateLeft64(v14, -16)
- v9 += v14
- v4 ^= v9
- v4 = bits.RotateLeft64(v4, -63)
-
- }
-
- h[0] ^= v0 ^ v8
- h[1] ^= v1 ^ v9
- h[2] ^= v2 ^ v10
- h[3] ^= v3 ^ v11
- h[4] ^= v4 ^ v12
- h[5] ^= v5 ^ v13
- h[6] ^= v6 ^ v14
- h[7] ^= v7 ^ v15
- }
- c[0], c[1] = c0, c1
-}
diff --git a/vendor/golang.org/x/crypto/blake2b/blake2b_ref.go b/vendor/golang.org/x/crypto/blake2b/blake2b_ref.go
deleted file mode 100644
index 6e28668cd19..00000000000
--- a/vendor/golang.org/x/crypto/blake2b/blake2b_ref.go
+++ /dev/null
@@ -1,11 +0,0 @@
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build !amd64 || purego || !gc
-
-package blake2b
-
-func hashBlocks(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) {
- hashBlocksGeneric(h, c, flag, blocks)
-}
diff --git a/vendor/golang.org/x/crypto/blake2b/blake2x.go b/vendor/golang.org/x/crypto/blake2b/blake2x.go
deleted file mode 100644
index 52c414db0e6..00000000000
--- a/vendor/golang.org/x/crypto/blake2b/blake2x.go
+++ /dev/null
@@ -1,177 +0,0 @@
-// Copyright 2017 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package blake2b
-
-import (
- "encoding/binary"
- "errors"
- "io"
-)
-
-// XOF defines the interface to hash functions that
-// support arbitrary-length output.
-type XOF interface {
- // Write absorbs more data into the hash's state. It panics if called
- // after Read.
- io.Writer
-
- // Read reads more output from the hash. It returns io.EOF if the limit
- // has been reached.
- io.Reader
-
- // Clone returns a copy of the XOF in its current state.
- Clone() XOF
-
- // Reset resets the XOF to its initial state.
- Reset()
-}
-
-// OutputLengthUnknown can be used as the size argument to NewXOF to indicate
-// the length of the output is not known in advance.
-const OutputLengthUnknown = 0
-
-// magicUnknownOutputLength is a magic value for the output size that indicates
-// an unknown number of output bytes.
-const magicUnknownOutputLength = (1 << 32) - 1
-
-// maxOutputLength is the absolute maximum number of bytes to produce when the
-// number of output bytes is unknown.
-const maxOutputLength = (1 << 32) * 64
-
-// NewXOF creates a new variable-output-length hash. The hash either produce a
-// known number of bytes (1 <= size < 2**32-1), or an unknown number of bytes
-// (size == OutputLengthUnknown). In the latter case, an absolute limit of
-// 256GiB applies.
-//
-// A non-nil key turns the hash into a MAC. The key must between
-// zero and 32 bytes long.
-func NewXOF(size uint32, key []byte) (XOF, error) {
- if len(key) > Size {
- return nil, errKeySize
- }
- if size == magicUnknownOutputLength {
- // 2^32-1 indicates an unknown number of bytes and thus isn't a
- // valid length.
- return nil, errors.New("blake2b: XOF length too large")
- }
- if size == OutputLengthUnknown {
- size = magicUnknownOutputLength
- }
- x := &xof{
- d: digest{
- size: Size,
- keyLen: len(key),
- },
- length: size,
- }
- copy(x.d.key[:], key)
- x.Reset()
- return x, nil
-}
-
-type xof struct {
- d digest
- length uint32
- remaining uint64
- cfg, root, block [Size]byte
- offset int
- nodeOffset uint32
- readMode bool
-}
-
-func (x *xof) Write(p []byte) (n int, err error) {
- if x.readMode {
- panic("blake2b: write to XOF after read")
- }
- return x.d.Write(p)
-}
-
-func (x *xof) Clone() XOF {
- clone := *x
- return &clone
-}
-
-func (x *xof) Reset() {
- x.cfg[0] = byte(Size)
- binary.LittleEndian.PutUint32(x.cfg[4:], uint32(Size)) // leaf length
- binary.LittleEndian.PutUint32(x.cfg[12:], x.length) // XOF length
- x.cfg[17] = byte(Size) // inner hash size
-
- x.d.Reset()
- x.d.h[1] ^= uint64(x.length) << 32
-
- x.remaining = uint64(x.length)
- if x.remaining == magicUnknownOutputLength {
- x.remaining = maxOutputLength
- }
- x.offset, x.nodeOffset = 0, 0
- x.readMode = false
-}
-
-func (x *xof) Read(p []byte) (n int, err error) {
- if !x.readMode {
- x.d.finalize(&x.root)
- x.readMode = true
- }
-
- if x.remaining == 0 {
- return 0, io.EOF
- }
-
- n = len(p)
- if uint64(n) > x.remaining {
- n = int(x.remaining)
- p = p[:n]
- }
-
- if x.offset > 0 {
- blockRemaining := Size - x.offset
- if n < blockRemaining {
- x.offset += copy(p, x.block[x.offset:])
- x.remaining -= uint64(n)
- return
- }
- copy(p, x.block[x.offset:])
- p = p[blockRemaining:]
- x.offset = 0
- x.remaining -= uint64(blockRemaining)
- }
-
- for len(p) >= Size {
- binary.LittleEndian.PutUint32(x.cfg[8:], x.nodeOffset)
- x.nodeOffset++
-
- x.d.initConfig(&x.cfg)
- x.d.Write(x.root[:])
- x.d.finalize(&x.block)
-
- copy(p, x.block[:])
- p = p[Size:]
- x.remaining -= uint64(Size)
- }
-
- if todo := len(p); todo > 0 {
- if x.remaining < uint64(Size) {
- x.cfg[0] = byte(x.remaining)
- }
- binary.LittleEndian.PutUint32(x.cfg[8:], x.nodeOffset)
- x.nodeOffset++
-
- x.d.initConfig(&x.cfg)
- x.d.Write(x.root[:])
- x.d.finalize(&x.block)
-
- x.offset = copy(p, x.block[:todo])
- x.remaining -= uint64(todo)
- }
- return
-}
-
-func (d *digest) initConfig(cfg *[Size]byte) {
- d.offset, d.c[0], d.c[1] = 0, 0, 0
- for i := range d.h {
- d.h[i] = iv[i] ^ binary.LittleEndian.Uint64(cfg[i*8:])
- }
-}
diff --git a/vendor/golang.org/x/crypto/blake2b/register.go b/vendor/golang.org/x/crypto/blake2b/register.go
deleted file mode 100644
index 54e446e1d2c..00000000000
--- a/vendor/golang.org/x/crypto/blake2b/register.go
+++ /dev/null
@@ -1,30 +0,0 @@
-// Copyright 2017 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package blake2b
-
-import (
- "crypto"
- "hash"
-)
-
-func init() {
- newHash256 := func() hash.Hash {
- h, _ := New256(nil)
- return h
- }
- newHash384 := func() hash.Hash {
- h, _ := New384(nil)
- return h
- }
-
- newHash512 := func() hash.Hash {
- h, _ := New512(nil)
- return h
- }
-
- crypto.RegisterHash(crypto.BLAKE2b_256, newHash256)
- crypto.RegisterHash(crypto.BLAKE2b_384, newHash384)
- crypto.RegisterHash(crypto.BLAKE2b_512, newHash512)
-}
diff --git a/vendor/golang.org/x/crypto/cast5/cast5.go b/vendor/golang.org/x/crypto/cast5/cast5.go
deleted file mode 100644
index 016e90215cd..00000000000
--- a/vendor/golang.org/x/crypto/cast5/cast5.go
+++ /dev/null
@@ -1,536 +0,0 @@
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package cast5 implements CAST5, as defined in RFC 2144.
-//
-// CAST5 is a legacy cipher and its short block size makes it vulnerable to
-// birthday bound attacks (see https://sweet32.info). It should only be used
-// where compatibility with legacy systems, not security, is the goal.
-//
-// Deprecated: any new system should use AES (from crypto/aes, if necessary in
-// an AEAD mode like crypto/cipher.NewGCM) or XChaCha20-Poly1305 (from
-// golang.org/x/crypto/chacha20poly1305).
-package cast5
-
-import (
- "errors"
- "math/bits"
-)
-
-const BlockSize = 8
-const KeySize = 16
-
-type Cipher struct {
- masking [16]uint32
- rotate [16]uint8
-}
-
-func NewCipher(key []byte) (c *Cipher, err error) {
- if len(key) != KeySize {
- return nil, errors.New("CAST5: keys must be 16 bytes")
- }
-
- c = new(Cipher)
- c.keySchedule(key)
- return
-}
-
-func (c *Cipher) BlockSize() int {
- return BlockSize
-}
-
-func (c *Cipher) Encrypt(dst, src []byte) {
- l := uint32(src[0])<<24 | uint32(src[1])<<16 | uint32(src[2])<<8 | uint32(src[3])
- r := uint32(src[4])<<24 | uint32(src[5])<<16 | uint32(src[6])<<8 | uint32(src[7])
-
- l, r = r, l^f1(r, c.masking[0], c.rotate[0])
- l, r = r, l^f2(r, c.masking[1], c.rotate[1])
- l, r = r, l^f3(r, c.masking[2], c.rotate[2])
- l, r = r, l^f1(r, c.masking[3], c.rotate[3])
-
- l, r = r, l^f2(r, c.masking[4], c.rotate[4])
- l, r = r, l^f3(r, c.masking[5], c.rotate[5])
- l, r = r, l^f1(r, c.masking[6], c.rotate[6])
- l, r = r, l^f2(r, c.masking[7], c.rotate[7])
-
- l, r = r, l^f3(r, c.masking[8], c.rotate[8])
- l, r = r, l^f1(r, c.masking[9], c.rotate[9])
- l, r = r, l^f2(r, c.masking[10], c.rotate[10])
- l, r = r, l^f3(r, c.masking[11], c.rotate[11])
-
- l, r = r, l^f1(r, c.masking[12], c.rotate[12])
- l, r = r, l^f2(r, c.masking[13], c.rotate[13])
- l, r = r, l^f3(r, c.masking[14], c.rotate[14])
- l, r = r, l^f1(r, c.masking[15], c.rotate[15])
-
- dst[0] = uint8(r >> 24)
- dst[1] = uint8(r >> 16)
- dst[2] = uint8(r >> 8)
- dst[3] = uint8(r)
- dst[4] = uint8(l >> 24)
- dst[5] = uint8(l >> 16)
- dst[6] = uint8(l >> 8)
- dst[7] = uint8(l)
-}
-
-func (c *Cipher) Decrypt(dst, src []byte) {
- l := uint32(src[0])<<24 | uint32(src[1])<<16 | uint32(src[2])<<8 | uint32(src[3])
- r := uint32(src[4])<<24 | uint32(src[5])<<16 | uint32(src[6])<<8 | uint32(src[7])
-
- l, r = r, l^f1(r, c.masking[15], c.rotate[15])
- l, r = r, l^f3(r, c.masking[14], c.rotate[14])
- l, r = r, l^f2(r, c.masking[13], c.rotate[13])
- l, r = r, l^f1(r, c.masking[12], c.rotate[12])
-
- l, r = r, l^f3(r, c.masking[11], c.rotate[11])
- l, r = r, l^f2(r, c.masking[10], c.rotate[10])
- l, r = r, l^f1(r, c.masking[9], c.rotate[9])
- l, r = r, l^f3(r, c.masking[8], c.rotate[8])
-
- l, r = r, l^f2(r, c.masking[7], c.rotate[7])
- l, r = r, l^f1(r, c.masking[6], c.rotate[6])
- l, r = r, l^f3(r, c.masking[5], c.rotate[5])
- l, r = r, l^f2(r, c.masking[4], c.rotate[4])
-
- l, r = r, l^f1(r, c.masking[3], c.rotate[3])
- l, r = r, l^f3(r, c.masking[2], c.rotate[2])
- l, r = r, l^f2(r, c.masking[1], c.rotate[1])
- l, r = r, l^f1(r, c.masking[0], c.rotate[0])
-
- dst[0] = uint8(r >> 24)
- dst[1] = uint8(r >> 16)
- dst[2] = uint8(r >> 8)
- dst[3] = uint8(r)
- dst[4] = uint8(l >> 24)
- dst[5] = uint8(l >> 16)
- dst[6] = uint8(l >> 8)
- dst[7] = uint8(l)
-}
-
-type keyScheduleA [4][7]uint8
-type keyScheduleB [4][5]uint8
-
-// keyScheduleRound contains the magic values for a round of the key schedule.
-// The keyScheduleA deals with the lines like:
-// z0z1z2z3 = x0x1x2x3 ^ S5[xD] ^ S6[xF] ^ S7[xC] ^ S8[xE] ^ S7[x8]
-// Conceptually, both x and z are in the same array, x first. The first
-// element describes which word of this array gets written to and the
-// second, which word gets read. So, for the line above, it's "4, 0", because
-// it's writing to the first word of z, which, being after x, is word 4, and
-// reading from the first word of x: word 0.
-//
-// Next are the indexes into the S-boxes. Now the array is treated as bytes. So
-// "xD" is 0xd. The first byte of z is written as "16 + 0", just to be clear
-// that it's z that we're indexing.
-//
-// keyScheduleB deals with lines like:
-// K1 = S5[z8] ^ S6[z9] ^ S7[z7] ^ S8[z6] ^ S5[z2]
-// "K1" is ignored because key words are always written in order. So the five
-// elements are the S-box indexes. They use the same form as in keyScheduleA,
-// above.
-
-type keyScheduleRound struct{}
-type keySchedule []keyScheduleRound
-
-var schedule = []struct {
- a keyScheduleA
- b keyScheduleB
-}{
- {
- keyScheduleA{
- {4, 0, 0xd, 0xf, 0xc, 0xe, 0x8},
- {5, 2, 16 + 0, 16 + 2, 16 + 1, 16 + 3, 0xa},
- {6, 3, 16 + 7, 16 + 6, 16 + 5, 16 + 4, 9},
- {7, 1, 16 + 0xa, 16 + 9, 16 + 0xb, 16 + 8, 0xb},
- },
- keyScheduleB{
- {16 + 8, 16 + 9, 16 + 7, 16 + 6, 16 + 2},
- {16 + 0xa, 16 + 0xb, 16 + 5, 16 + 4, 16 + 6},
- {16 + 0xc, 16 + 0xd, 16 + 3, 16 + 2, 16 + 9},
- {16 + 0xe, 16 + 0xf, 16 + 1, 16 + 0, 16 + 0xc},
- },
- },
- {
- keyScheduleA{
- {0, 6, 16 + 5, 16 + 7, 16 + 4, 16 + 6, 16 + 0},
- {1, 4, 0, 2, 1, 3, 16 + 2},
- {2, 5, 7, 6, 5, 4, 16 + 1},
- {3, 7, 0xa, 9, 0xb, 8, 16 + 3},
- },
- keyScheduleB{
- {3, 2, 0xc, 0xd, 8},
- {1, 0, 0xe, 0xf, 0xd},
- {7, 6, 8, 9, 3},
- {5, 4, 0xa, 0xb, 7},
- },
- },
- {
- keyScheduleA{
- {4, 0, 0xd, 0xf, 0xc, 0xe, 8},
- {5, 2, 16 + 0, 16 + 2, 16 + 1, 16 + 3, 0xa},
- {6, 3, 16 + 7, 16 + 6, 16 + 5, 16 + 4, 9},
- {7, 1, 16 + 0xa, 16 + 9, 16 + 0xb, 16 + 8, 0xb},
- },
- keyScheduleB{
- {16 + 3, 16 + 2, 16 + 0xc, 16 + 0xd, 16 + 9},
- {16 + 1, 16 + 0, 16 + 0xe, 16 + 0xf, 16 + 0xc},
- {16 + 7, 16 + 6, 16 + 8, 16 + 9, 16 + 2},
- {16 + 5, 16 + 4, 16 + 0xa, 16 + 0xb, 16 + 6},
- },
- },
- {
- keyScheduleA{
- {0, 6, 16 + 5, 16 + 7, 16 + 4, 16 + 6, 16 + 0},
- {1, 4, 0, 2, 1, 3, 16 + 2},
- {2, 5, 7, 6, 5, 4, 16 + 1},
- {3, 7, 0xa, 9, 0xb, 8, 16 + 3},
- },
- keyScheduleB{
- {8, 9, 7, 6, 3},
- {0xa, 0xb, 5, 4, 7},
- {0xc, 0xd, 3, 2, 8},
- {0xe, 0xf, 1, 0, 0xd},
- },
- },
-}
-
-func (c *Cipher) keySchedule(in []byte) {
- var t [8]uint32
- var k [32]uint32
-
- for i := 0; i < 4; i++ {
- j := i * 4
- t[i] = uint32(in[j])<<24 | uint32(in[j+1])<<16 | uint32(in[j+2])<<8 | uint32(in[j+3])
- }
-
- x := []byte{6, 7, 4, 5}
- ki := 0
-
- for half := 0; half < 2; half++ {
- for _, round := range schedule {
- for j := 0; j < 4; j++ {
- var a [7]uint8
- copy(a[:], round.a[j][:])
- w := t[a[1]]
- w ^= sBox[4][(t[a[2]>>2]>>(24-8*(a[2]&3)))&0xff]
- w ^= sBox[5][(t[a[3]>>2]>>(24-8*(a[3]&3)))&0xff]
- w ^= sBox[6][(t[a[4]>>2]>>(24-8*(a[4]&3)))&0xff]
- w ^= sBox[7][(t[a[5]>>2]>>(24-8*(a[5]&3)))&0xff]
- w ^= sBox[x[j]][(t[a[6]>>2]>>(24-8*(a[6]&3)))&0xff]
- t[a[0]] = w
- }
-
- for j := 0; j < 4; j++ {
- var b [5]uint8
- copy(b[:], round.b[j][:])
- w := sBox[4][(t[b[0]>>2]>>(24-8*(b[0]&3)))&0xff]
- w ^= sBox[5][(t[b[1]>>2]>>(24-8*(b[1]&3)))&0xff]
- w ^= sBox[6][(t[b[2]>>2]>>(24-8*(b[2]&3)))&0xff]
- w ^= sBox[7][(t[b[3]>>2]>>(24-8*(b[3]&3)))&0xff]
- w ^= sBox[4+j][(t[b[4]>>2]>>(24-8*(b[4]&3)))&0xff]
- k[ki] = w
- ki++
- }
- }
- }
-
- for i := 0; i < 16; i++ {
- c.masking[i] = k[i]
- c.rotate[i] = uint8(k[16+i] & 0x1f)
- }
-}
-
-// These are the three 'f' functions. See RFC 2144, section 2.2.
-func f1(d, m uint32, r uint8) uint32 {
- t := m + d
- I := bits.RotateLeft32(t, int(r))
- return ((sBox[0][I>>24] ^ sBox[1][(I>>16)&0xff]) - sBox[2][(I>>8)&0xff]) + sBox[3][I&0xff]
-}
-
-func f2(d, m uint32, r uint8) uint32 {
- t := m ^ d
- I := bits.RotateLeft32(t, int(r))
- return ((sBox[0][I>>24] - sBox[1][(I>>16)&0xff]) + sBox[2][(I>>8)&0xff]) ^ sBox[3][I&0xff]
-}
-
-func f3(d, m uint32, r uint8) uint32 {
- t := m - d
- I := bits.RotateLeft32(t, int(r))
- return ((sBox[0][I>>24] + sBox[1][(I>>16)&0xff]) ^ sBox[2][(I>>8)&0xff]) - sBox[3][I&0xff]
-}
-
-var sBox = [8][256]uint32{
- {
- 0x30fb40d4, 0x9fa0ff0b, 0x6beccd2f, 0x3f258c7a, 0x1e213f2f, 0x9c004dd3, 0x6003e540, 0xcf9fc949,
- 0xbfd4af27, 0x88bbbdb5, 0xe2034090, 0x98d09675, 0x6e63a0e0, 0x15c361d2, 0xc2e7661d, 0x22d4ff8e,
- 0x28683b6f, 0xc07fd059, 0xff2379c8, 0x775f50e2, 0x43c340d3, 0xdf2f8656, 0x887ca41a, 0xa2d2bd2d,
- 0xa1c9e0d6, 0x346c4819, 0x61b76d87, 0x22540f2f, 0x2abe32e1, 0xaa54166b, 0x22568e3a, 0xa2d341d0,
- 0x66db40c8, 0xa784392f, 0x004dff2f, 0x2db9d2de, 0x97943fac, 0x4a97c1d8, 0x527644b7, 0xb5f437a7,
- 0xb82cbaef, 0xd751d159, 0x6ff7f0ed, 0x5a097a1f, 0x827b68d0, 0x90ecf52e, 0x22b0c054, 0xbc8e5935,
- 0x4b6d2f7f, 0x50bb64a2, 0xd2664910, 0xbee5812d, 0xb7332290, 0xe93b159f, 0xb48ee411, 0x4bff345d,
- 0xfd45c240, 0xad31973f, 0xc4f6d02e, 0x55fc8165, 0xd5b1caad, 0xa1ac2dae, 0xa2d4b76d, 0xc19b0c50,
- 0x882240f2, 0x0c6e4f38, 0xa4e4bfd7, 0x4f5ba272, 0x564c1d2f, 0xc59c5319, 0xb949e354, 0xb04669fe,
- 0xb1b6ab8a, 0xc71358dd, 0x6385c545, 0x110f935d, 0x57538ad5, 0x6a390493, 0xe63d37e0, 0x2a54f6b3,
- 0x3a787d5f, 0x6276a0b5, 0x19a6fcdf, 0x7a42206a, 0x29f9d4d5, 0xf61b1891, 0xbb72275e, 0xaa508167,
- 0x38901091, 0xc6b505eb, 0x84c7cb8c, 0x2ad75a0f, 0x874a1427, 0xa2d1936b, 0x2ad286af, 0xaa56d291,
- 0xd7894360, 0x425c750d, 0x93b39e26, 0x187184c9, 0x6c00b32d, 0x73e2bb14, 0xa0bebc3c, 0x54623779,
- 0x64459eab, 0x3f328b82, 0x7718cf82, 0x59a2cea6, 0x04ee002e, 0x89fe78e6, 0x3fab0950, 0x325ff6c2,
- 0x81383f05, 0x6963c5c8, 0x76cb5ad6, 0xd49974c9, 0xca180dcf, 0x380782d5, 0xc7fa5cf6, 0x8ac31511,
- 0x35e79e13, 0x47da91d0, 0xf40f9086, 0xa7e2419e, 0x31366241, 0x051ef495, 0xaa573b04, 0x4a805d8d,
- 0x548300d0, 0x00322a3c, 0xbf64cddf, 0xba57a68e, 0x75c6372b, 0x50afd341, 0xa7c13275, 0x915a0bf5,
- 0x6b54bfab, 0x2b0b1426, 0xab4cc9d7, 0x449ccd82, 0xf7fbf265, 0xab85c5f3, 0x1b55db94, 0xaad4e324,
- 0xcfa4bd3f, 0x2deaa3e2, 0x9e204d02, 0xc8bd25ac, 0xeadf55b3, 0xd5bd9e98, 0xe31231b2, 0x2ad5ad6c,
- 0x954329de, 0xadbe4528, 0xd8710f69, 0xaa51c90f, 0xaa786bf6, 0x22513f1e, 0xaa51a79b, 0x2ad344cc,
- 0x7b5a41f0, 0xd37cfbad, 0x1b069505, 0x41ece491, 0xb4c332e6, 0x032268d4, 0xc9600acc, 0xce387e6d,
- 0xbf6bb16c, 0x6a70fb78, 0x0d03d9c9, 0xd4df39de, 0xe01063da, 0x4736f464, 0x5ad328d8, 0xb347cc96,
- 0x75bb0fc3, 0x98511bfb, 0x4ffbcc35, 0xb58bcf6a, 0xe11f0abc, 0xbfc5fe4a, 0xa70aec10, 0xac39570a,
- 0x3f04442f, 0x6188b153, 0xe0397a2e, 0x5727cb79, 0x9ceb418f, 0x1cacd68d, 0x2ad37c96, 0x0175cb9d,
- 0xc69dff09, 0xc75b65f0, 0xd9db40d8, 0xec0e7779, 0x4744ead4, 0xb11c3274, 0xdd24cb9e, 0x7e1c54bd,
- 0xf01144f9, 0xd2240eb1, 0x9675b3fd, 0xa3ac3755, 0xd47c27af, 0x51c85f4d, 0x56907596, 0xa5bb15e6,
- 0x580304f0, 0xca042cf1, 0x011a37ea, 0x8dbfaadb, 0x35ba3e4a, 0x3526ffa0, 0xc37b4d09, 0xbc306ed9,
- 0x98a52666, 0x5648f725, 0xff5e569d, 0x0ced63d0, 0x7c63b2cf, 0x700b45e1, 0xd5ea50f1, 0x85a92872,
- 0xaf1fbda7, 0xd4234870, 0xa7870bf3, 0x2d3b4d79, 0x42e04198, 0x0cd0ede7, 0x26470db8, 0xf881814c,
- 0x474d6ad7, 0x7c0c5e5c, 0xd1231959, 0x381b7298, 0xf5d2f4db, 0xab838653, 0x6e2f1e23, 0x83719c9e,
- 0xbd91e046, 0x9a56456e, 0xdc39200c, 0x20c8c571, 0x962bda1c, 0xe1e696ff, 0xb141ab08, 0x7cca89b9,
- 0x1a69e783, 0x02cc4843, 0xa2f7c579, 0x429ef47d, 0x427b169c, 0x5ac9f049, 0xdd8f0f00, 0x5c8165bf,
- },
- {
- 0x1f201094, 0xef0ba75b, 0x69e3cf7e, 0x393f4380, 0xfe61cf7a, 0xeec5207a, 0x55889c94, 0x72fc0651,
- 0xada7ef79, 0x4e1d7235, 0xd55a63ce, 0xde0436ba, 0x99c430ef, 0x5f0c0794, 0x18dcdb7d, 0xa1d6eff3,
- 0xa0b52f7b, 0x59e83605, 0xee15b094, 0xe9ffd909, 0xdc440086, 0xef944459, 0xba83ccb3, 0xe0c3cdfb,
- 0xd1da4181, 0x3b092ab1, 0xf997f1c1, 0xa5e6cf7b, 0x01420ddb, 0xe4e7ef5b, 0x25a1ff41, 0xe180f806,
- 0x1fc41080, 0x179bee7a, 0xd37ac6a9, 0xfe5830a4, 0x98de8b7f, 0x77e83f4e, 0x79929269, 0x24fa9f7b,
- 0xe113c85b, 0xacc40083, 0xd7503525, 0xf7ea615f, 0x62143154, 0x0d554b63, 0x5d681121, 0xc866c359,
- 0x3d63cf73, 0xcee234c0, 0xd4d87e87, 0x5c672b21, 0x071f6181, 0x39f7627f, 0x361e3084, 0xe4eb573b,
- 0x602f64a4, 0xd63acd9c, 0x1bbc4635, 0x9e81032d, 0x2701f50c, 0x99847ab4, 0xa0e3df79, 0xba6cf38c,
- 0x10843094, 0x2537a95e, 0xf46f6ffe, 0xa1ff3b1f, 0x208cfb6a, 0x8f458c74, 0xd9e0a227, 0x4ec73a34,
- 0xfc884f69, 0x3e4de8df, 0xef0e0088, 0x3559648d, 0x8a45388c, 0x1d804366, 0x721d9bfd, 0xa58684bb,
- 0xe8256333, 0x844e8212, 0x128d8098, 0xfed33fb4, 0xce280ae1, 0x27e19ba5, 0xd5a6c252, 0xe49754bd,
- 0xc5d655dd, 0xeb667064, 0x77840b4d, 0xa1b6a801, 0x84db26a9, 0xe0b56714, 0x21f043b7, 0xe5d05860,
- 0x54f03084, 0x066ff472, 0xa31aa153, 0xdadc4755, 0xb5625dbf, 0x68561be6, 0x83ca6b94, 0x2d6ed23b,
- 0xeccf01db, 0xa6d3d0ba, 0xb6803d5c, 0xaf77a709, 0x33b4a34c, 0x397bc8d6, 0x5ee22b95, 0x5f0e5304,
- 0x81ed6f61, 0x20e74364, 0xb45e1378, 0xde18639b, 0x881ca122, 0xb96726d1, 0x8049a7e8, 0x22b7da7b,
- 0x5e552d25, 0x5272d237, 0x79d2951c, 0xc60d894c, 0x488cb402, 0x1ba4fe5b, 0xa4b09f6b, 0x1ca815cf,
- 0xa20c3005, 0x8871df63, 0xb9de2fcb, 0x0cc6c9e9, 0x0beeff53, 0xe3214517, 0xb4542835, 0x9f63293c,
- 0xee41e729, 0x6e1d2d7c, 0x50045286, 0x1e6685f3, 0xf33401c6, 0x30a22c95, 0x31a70850, 0x60930f13,
- 0x73f98417, 0xa1269859, 0xec645c44, 0x52c877a9, 0xcdff33a6, 0xa02b1741, 0x7cbad9a2, 0x2180036f,
- 0x50d99c08, 0xcb3f4861, 0xc26bd765, 0x64a3f6ab, 0x80342676, 0x25a75e7b, 0xe4e6d1fc, 0x20c710e6,
- 0xcdf0b680, 0x17844d3b, 0x31eef84d, 0x7e0824e4, 0x2ccb49eb, 0x846a3bae, 0x8ff77888, 0xee5d60f6,
- 0x7af75673, 0x2fdd5cdb, 0xa11631c1, 0x30f66f43, 0xb3faec54, 0x157fd7fa, 0xef8579cc, 0xd152de58,
- 0xdb2ffd5e, 0x8f32ce19, 0x306af97a, 0x02f03ef8, 0x99319ad5, 0xc242fa0f, 0xa7e3ebb0, 0xc68e4906,
- 0xb8da230c, 0x80823028, 0xdcdef3c8, 0xd35fb171, 0x088a1bc8, 0xbec0c560, 0x61a3c9e8, 0xbca8f54d,
- 0xc72feffa, 0x22822e99, 0x82c570b4, 0xd8d94e89, 0x8b1c34bc, 0x301e16e6, 0x273be979, 0xb0ffeaa6,
- 0x61d9b8c6, 0x00b24869, 0xb7ffce3f, 0x08dc283b, 0x43daf65a, 0xf7e19798, 0x7619b72f, 0x8f1c9ba4,
- 0xdc8637a0, 0x16a7d3b1, 0x9fc393b7, 0xa7136eeb, 0xc6bcc63e, 0x1a513742, 0xef6828bc, 0x520365d6,
- 0x2d6a77ab, 0x3527ed4b, 0x821fd216, 0x095c6e2e, 0xdb92f2fb, 0x5eea29cb, 0x145892f5, 0x91584f7f,
- 0x5483697b, 0x2667a8cc, 0x85196048, 0x8c4bacea, 0x833860d4, 0x0d23e0f9, 0x6c387e8a, 0x0ae6d249,
- 0xb284600c, 0xd835731d, 0xdcb1c647, 0xac4c56ea, 0x3ebd81b3, 0x230eabb0, 0x6438bc87, 0xf0b5b1fa,
- 0x8f5ea2b3, 0xfc184642, 0x0a036b7a, 0x4fb089bd, 0x649da589, 0xa345415e, 0x5c038323, 0x3e5d3bb9,
- 0x43d79572, 0x7e6dd07c, 0x06dfdf1e, 0x6c6cc4ef, 0x7160a539, 0x73bfbe70, 0x83877605, 0x4523ecf1,
- },
- {
- 0x8defc240, 0x25fa5d9f, 0xeb903dbf, 0xe810c907, 0x47607fff, 0x369fe44b, 0x8c1fc644, 0xaececa90,
- 0xbeb1f9bf, 0xeefbcaea, 0xe8cf1950, 0x51df07ae, 0x920e8806, 0xf0ad0548, 0xe13c8d83, 0x927010d5,
- 0x11107d9f, 0x07647db9, 0xb2e3e4d4, 0x3d4f285e, 0xb9afa820, 0xfade82e0, 0xa067268b, 0x8272792e,
- 0x553fb2c0, 0x489ae22b, 0xd4ef9794, 0x125e3fbc, 0x21fffcee, 0x825b1bfd, 0x9255c5ed, 0x1257a240,
- 0x4e1a8302, 0xbae07fff, 0x528246e7, 0x8e57140e, 0x3373f7bf, 0x8c9f8188, 0xa6fc4ee8, 0xc982b5a5,
- 0xa8c01db7, 0x579fc264, 0x67094f31, 0xf2bd3f5f, 0x40fff7c1, 0x1fb78dfc, 0x8e6bd2c1, 0x437be59b,
- 0x99b03dbf, 0xb5dbc64b, 0x638dc0e6, 0x55819d99, 0xa197c81c, 0x4a012d6e, 0xc5884a28, 0xccc36f71,
- 0xb843c213, 0x6c0743f1, 0x8309893c, 0x0feddd5f, 0x2f7fe850, 0xd7c07f7e, 0x02507fbf, 0x5afb9a04,
- 0xa747d2d0, 0x1651192e, 0xaf70bf3e, 0x58c31380, 0x5f98302e, 0x727cc3c4, 0x0a0fb402, 0x0f7fef82,
- 0x8c96fdad, 0x5d2c2aae, 0x8ee99a49, 0x50da88b8, 0x8427f4a0, 0x1eac5790, 0x796fb449, 0x8252dc15,
- 0xefbd7d9b, 0xa672597d, 0xada840d8, 0x45f54504, 0xfa5d7403, 0xe83ec305, 0x4f91751a, 0x925669c2,
- 0x23efe941, 0xa903f12e, 0x60270df2, 0x0276e4b6, 0x94fd6574, 0x927985b2, 0x8276dbcb, 0x02778176,
- 0xf8af918d, 0x4e48f79e, 0x8f616ddf, 0xe29d840e, 0x842f7d83, 0x340ce5c8, 0x96bbb682, 0x93b4b148,
- 0xef303cab, 0x984faf28, 0x779faf9b, 0x92dc560d, 0x224d1e20, 0x8437aa88, 0x7d29dc96, 0x2756d3dc,
- 0x8b907cee, 0xb51fd240, 0xe7c07ce3, 0xe566b4a1, 0xc3e9615e, 0x3cf8209d, 0x6094d1e3, 0xcd9ca341,
- 0x5c76460e, 0x00ea983b, 0xd4d67881, 0xfd47572c, 0xf76cedd9, 0xbda8229c, 0x127dadaa, 0x438a074e,
- 0x1f97c090, 0x081bdb8a, 0x93a07ebe, 0xb938ca15, 0x97b03cff, 0x3dc2c0f8, 0x8d1ab2ec, 0x64380e51,
- 0x68cc7bfb, 0xd90f2788, 0x12490181, 0x5de5ffd4, 0xdd7ef86a, 0x76a2e214, 0xb9a40368, 0x925d958f,
- 0x4b39fffa, 0xba39aee9, 0xa4ffd30b, 0xfaf7933b, 0x6d498623, 0x193cbcfa, 0x27627545, 0x825cf47a,
- 0x61bd8ba0, 0xd11e42d1, 0xcead04f4, 0x127ea392, 0x10428db7, 0x8272a972, 0x9270c4a8, 0x127de50b,
- 0x285ba1c8, 0x3c62f44f, 0x35c0eaa5, 0xe805d231, 0x428929fb, 0xb4fcdf82, 0x4fb66a53, 0x0e7dc15b,
- 0x1f081fab, 0x108618ae, 0xfcfd086d, 0xf9ff2889, 0x694bcc11, 0x236a5cae, 0x12deca4d, 0x2c3f8cc5,
- 0xd2d02dfe, 0xf8ef5896, 0xe4cf52da, 0x95155b67, 0x494a488c, 0xb9b6a80c, 0x5c8f82bc, 0x89d36b45,
- 0x3a609437, 0xec00c9a9, 0x44715253, 0x0a874b49, 0xd773bc40, 0x7c34671c, 0x02717ef6, 0x4feb5536,
- 0xa2d02fff, 0xd2bf60c4, 0xd43f03c0, 0x50b4ef6d, 0x07478cd1, 0x006e1888, 0xa2e53f55, 0xb9e6d4bc,
- 0xa2048016, 0x97573833, 0xd7207d67, 0xde0f8f3d, 0x72f87b33, 0xabcc4f33, 0x7688c55d, 0x7b00a6b0,
- 0x947b0001, 0x570075d2, 0xf9bb88f8, 0x8942019e, 0x4264a5ff, 0x856302e0, 0x72dbd92b, 0xee971b69,
- 0x6ea22fde, 0x5f08ae2b, 0xaf7a616d, 0xe5c98767, 0xcf1febd2, 0x61efc8c2, 0xf1ac2571, 0xcc8239c2,
- 0x67214cb8, 0xb1e583d1, 0xb7dc3e62, 0x7f10bdce, 0xf90a5c38, 0x0ff0443d, 0x606e6dc6, 0x60543a49,
- 0x5727c148, 0x2be98a1d, 0x8ab41738, 0x20e1be24, 0xaf96da0f, 0x68458425, 0x99833be5, 0x600d457d,
- 0x282f9350, 0x8334b362, 0xd91d1120, 0x2b6d8da0, 0x642b1e31, 0x9c305a00, 0x52bce688, 0x1b03588a,
- 0xf7baefd5, 0x4142ed9c, 0xa4315c11, 0x83323ec5, 0xdfef4636, 0xa133c501, 0xe9d3531c, 0xee353783,
- },
- {
- 0x9db30420, 0x1fb6e9de, 0xa7be7bef, 0xd273a298, 0x4a4f7bdb, 0x64ad8c57, 0x85510443, 0xfa020ed1,
- 0x7e287aff, 0xe60fb663, 0x095f35a1, 0x79ebf120, 0xfd059d43, 0x6497b7b1, 0xf3641f63, 0x241e4adf,
- 0x28147f5f, 0x4fa2b8cd, 0xc9430040, 0x0cc32220, 0xfdd30b30, 0xc0a5374f, 0x1d2d00d9, 0x24147b15,
- 0xee4d111a, 0x0fca5167, 0x71ff904c, 0x2d195ffe, 0x1a05645f, 0x0c13fefe, 0x081b08ca, 0x05170121,
- 0x80530100, 0xe83e5efe, 0xac9af4f8, 0x7fe72701, 0xd2b8ee5f, 0x06df4261, 0xbb9e9b8a, 0x7293ea25,
- 0xce84ffdf, 0xf5718801, 0x3dd64b04, 0xa26f263b, 0x7ed48400, 0x547eebe6, 0x446d4ca0, 0x6cf3d6f5,
- 0x2649abdf, 0xaea0c7f5, 0x36338cc1, 0x503f7e93, 0xd3772061, 0x11b638e1, 0x72500e03, 0xf80eb2bb,
- 0xabe0502e, 0xec8d77de, 0x57971e81, 0xe14f6746, 0xc9335400, 0x6920318f, 0x081dbb99, 0xffc304a5,
- 0x4d351805, 0x7f3d5ce3, 0xa6c866c6, 0x5d5bcca9, 0xdaec6fea, 0x9f926f91, 0x9f46222f, 0x3991467d,
- 0xa5bf6d8e, 0x1143c44f, 0x43958302, 0xd0214eeb, 0x022083b8, 0x3fb6180c, 0x18f8931e, 0x281658e6,
- 0x26486e3e, 0x8bd78a70, 0x7477e4c1, 0xb506e07c, 0xf32d0a25, 0x79098b02, 0xe4eabb81, 0x28123b23,
- 0x69dead38, 0x1574ca16, 0xdf871b62, 0x211c40b7, 0xa51a9ef9, 0x0014377b, 0x041e8ac8, 0x09114003,
- 0xbd59e4d2, 0xe3d156d5, 0x4fe876d5, 0x2f91a340, 0x557be8de, 0x00eae4a7, 0x0ce5c2ec, 0x4db4bba6,
- 0xe756bdff, 0xdd3369ac, 0xec17b035, 0x06572327, 0x99afc8b0, 0x56c8c391, 0x6b65811c, 0x5e146119,
- 0x6e85cb75, 0xbe07c002, 0xc2325577, 0x893ff4ec, 0x5bbfc92d, 0xd0ec3b25, 0xb7801ab7, 0x8d6d3b24,
- 0x20c763ef, 0xc366a5fc, 0x9c382880, 0x0ace3205, 0xaac9548a, 0xeca1d7c7, 0x041afa32, 0x1d16625a,
- 0x6701902c, 0x9b757a54, 0x31d477f7, 0x9126b031, 0x36cc6fdb, 0xc70b8b46, 0xd9e66a48, 0x56e55a79,
- 0x026a4ceb, 0x52437eff, 0x2f8f76b4, 0x0df980a5, 0x8674cde3, 0xedda04eb, 0x17a9be04, 0x2c18f4df,
- 0xb7747f9d, 0xab2af7b4, 0xefc34d20, 0x2e096b7c, 0x1741a254, 0xe5b6a035, 0x213d42f6, 0x2c1c7c26,
- 0x61c2f50f, 0x6552daf9, 0xd2c231f8, 0x25130f69, 0xd8167fa2, 0x0418f2c8, 0x001a96a6, 0x0d1526ab,
- 0x63315c21, 0x5e0a72ec, 0x49bafefd, 0x187908d9, 0x8d0dbd86, 0x311170a7, 0x3e9b640c, 0xcc3e10d7,
- 0xd5cad3b6, 0x0caec388, 0xf73001e1, 0x6c728aff, 0x71eae2a1, 0x1f9af36e, 0xcfcbd12f, 0xc1de8417,
- 0xac07be6b, 0xcb44a1d8, 0x8b9b0f56, 0x013988c3, 0xb1c52fca, 0xb4be31cd, 0xd8782806, 0x12a3a4e2,
- 0x6f7de532, 0x58fd7eb6, 0xd01ee900, 0x24adffc2, 0xf4990fc5, 0x9711aac5, 0x001d7b95, 0x82e5e7d2,
- 0x109873f6, 0x00613096, 0xc32d9521, 0xada121ff, 0x29908415, 0x7fbb977f, 0xaf9eb3db, 0x29c9ed2a,
- 0x5ce2a465, 0xa730f32c, 0xd0aa3fe8, 0x8a5cc091, 0xd49e2ce7, 0x0ce454a9, 0xd60acd86, 0x015f1919,
- 0x77079103, 0xdea03af6, 0x78a8565e, 0xdee356df, 0x21f05cbe, 0x8b75e387, 0xb3c50651, 0xb8a5c3ef,
- 0xd8eeb6d2, 0xe523be77, 0xc2154529, 0x2f69efdf, 0xafe67afb, 0xf470c4b2, 0xf3e0eb5b, 0xd6cc9876,
- 0x39e4460c, 0x1fda8538, 0x1987832f, 0xca007367, 0xa99144f8, 0x296b299e, 0x492fc295, 0x9266beab,
- 0xb5676e69, 0x9bd3ddda, 0xdf7e052f, 0xdb25701c, 0x1b5e51ee, 0xf65324e6, 0x6afce36c, 0x0316cc04,
- 0x8644213e, 0xb7dc59d0, 0x7965291f, 0xccd6fd43, 0x41823979, 0x932bcdf6, 0xb657c34d, 0x4edfd282,
- 0x7ae5290c, 0x3cb9536b, 0x851e20fe, 0x9833557e, 0x13ecf0b0, 0xd3ffb372, 0x3f85c5c1, 0x0aef7ed2,
- },
- {
- 0x7ec90c04, 0x2c6e74b9, 0x9b0e66df, 0xa6337911, 0xb86a7fff, 0x1dd358f5, 0x44dd9d44, 0x1731167f,
- 0x08fbf1fa, 0xe7f511cc, 0xd2051b00, 0x735aba00, 0x2ab722d8, 0x386381cb, 0xacf6243a, 0x69befd7a,
- 0xe6a2e77f, 0xf0c720cd, 0xc4494816, 0xccf5c180, 0x38851640, 0x15b0a848, 0xe68b18cb, 0x4caadeff,
- 0x5f480a01, 0x0412b2aa, 0x259814fc, 0x41d0efe2, 0x4e40b48d, 0x248eb6fb, 0x8dba1cfe, 0x41a99b02,
- 0x1a550a04, 0xba8f65cb, 0x7251f4e7, 0x95a51725, 0xc106ecd7, 0x97a5980a, 0xc539b9aa, 0x4d79fe6a,
- 0xf2f3f763, 0x68af8040, 0xed0c9e56, 0x11b4958b, 0xe1eb5a88, 0x8709e6b0, 0xd7e07156, 0x4e29fea7,
- 0x6366e52d, 0x02d1c000, 0xc4ac8e05, 0x9377f571, 0x0c05372a, 0x578535f2, 0x2261be02, 0xd642a0c9,
- 0xdf13a280, 0x74b55bd2, 0x682199c0, 0xd421e5ec, 0x53fb3ce8, 0xc8adedb3, 0x28a87fc9, 0x3d959981,
- 0x5c1ff900, 0xfe38d399, 0x0c4eff0b, 0x062407ea, 0xaa2f4fb1, 0x4fb96976, 0x90c79505, 0xb0a8a774,
- 0xef55a1ff, 0xe59ca2c2, 0xa6b62d27, 0xe66a4263, 0xdf65001f, 0x0ec50966, 0xdfdd55bc, 0x29de0655,
- 0x911e739a, 0x17af8975, 0x32c7911c, 0x89f89468, 0x0d01e980, 0x524755f4, 0x03b63cc9, 0x0cc844b2,
- 0xbcf3f0aa, 0x87ac36e9, 0xe53a7426, 0x01b3d82b, 0x1a9e7449, 0x64ee2d7e, 0xcddbb1da, 0x01c94910,
- 0xb868bf80, 0x0d26f3fd, 0x9342ede7, 0x04a5c284, 0x636737b6, 0x50f5b616, 0xf24766e3, 0x8eca36c1,
- 0x136e05db, 0xfef18391, 0xfb887a37, 0xd6e7f7d4, 0xc7fb7dc9, 0x3063fcdf, 0xb6f589de, 0xec2941da,
- 0x26e46695, 0xb7566419, 0xf654efc5, 0xd08d58b7, 0x48925401, 0xc1bacb7f, 0xe5ff550f, 0xb6083049,
- 0x5bb5d0e8, 0x87d72e5a, 0xab6a6ee1, 0x223a66ce, 0xc62bf3cd, 0x9e0885f9, 0x68cb3e47, 0x086c010f,
- 0xa21de820, 0xd18b69de, 0xf3f65777, 0xfa02c3f6, 0x407edac3, 0xcbb3d550, 0x1793084d, 0xb0d70eba,
- 0x0ab378d5, 0xd951fb0c, 0xded7da56, 0x4124bbe4, 0x94ca0b56, 0x0f5755d1, 0xe0e1e56e, 0x6184b5be,
- 0x580a249f, 0x94f74bc0, 0xe327888e, 0x9f7b5561, 0xc3dc0280, 0x05687715, 0x646c6bd7, 0x44904db3,
- 0x66b4f0a3, 0xc0f1648a, 0x697ed5af, 0x49e92ff6, 0x309e374f, 0x2cb6356a, 0x85808573, 0x4991f840,
- 0x76f0ae02, 0x083be84d, 0x28421c9a, 0x44489406, 0x736e4cb8, 0xc1092910, 0x8bc95fc6, 0x7d869cf4,
- 0x134f616f, 0x2e77118d, 0xb31b2be1, 0xaa90b472, 0x3ca5d717, 0x7d161bba, 0x9cad9010, 0xaf462ba2,
- 0x9fe459d2, 0x45d34559, 0xd9f2da13, 0xdbc65487, 0xf3e4f94e, 0x176d486f, 0x097c13ea, 0x631da5c7,
- 0x445f7382, 0x175683f4, 0xcdc66a97, 0x70be0288, 0xb3cdcf72, 0x6e5dd2f3, 0x20936079, 0x459b80a5,
- 0xbe60e2db, 0xa9c23101, 0xeba5315c, 0x224e42f2, 0x1c5c1572, 0xf6721b2c, 0x1ad2fff3, 0x8c25404e,
- 0x324ed72f, 0x4067b7fd, 0x0523138e, 0x5ca3bc78, 0xdc0fd66e, 0x75922283, 0x784d6b17, 0x58ebb16e,
- 0x44094f85, 0x3f481d87, 0xfcfeae7b, 0x77b5ff76, 0x8c2302bf, 0xaaf47556, 0x5f46b02a, 0x2b092801,
- 0x3d38f5f7, 0x0ca81f36, 0x52af4a8a, 0x66d5e7c0, 0xdf3b0874, 0x95055110, 0x1b5ad7a8, 0xf61ed5ad,
- 0x6cf6e479, 0x20758184, 0xd0cefa65, 0x88f7be58, 0x4a046826, 0x0ff6f8f3, 0xa09c7f70, 0x5346aba0,
- 0x5ce96c28, 0xe176eda3, 0x6bac307f, 0x376829d2, 0x85360fa9, 0x17e3fe2a, 0x24b79767, 0xf5a96b20,
- 0xd6cd2595, 0x68ff1ebf, 0x7555442c, 0xf19f06be, 0xf9e0659a, 0xeeb9491d, 0x34010718, 0xbb30cab8,
- 0xe822fe15, 0x88570983, 0x750e6249, 0xda627e55, 0x5e76ffa8, 0xb1534546, 0x6d47de08, 0xefe9e7d4,
- },
- {
- 0xf6fa8f9d, 0x2cac6ce1, 0x4ca34867, 0xe2337f7c, 0x95db08e7, 0x016843b4, 0xeced5cbc, 0x325553ac,
- 0xbf9f0960, 0xdfa1e2ed, 0x83f0579d, 0x63ed86b9, 0x1ab6a6b8, 0xde5ebe39, 0xf38ff732, 0x8989b138,
- 0x33f14961, 0xc01937bd, 0xf506c6da, 0xe4625e7e, 0xa308ea99, 0x4e23e33c, 0x79cbd7cc, 0x48a14367,
- 0xa3149619, 0xfec94bd5, 0xa114174a, 0xeaa01866, 0xa084db2d, 0x09a8486f, 0xa888614a, 0x2900af98,
- 0x01665991, 0xe1992863, 0xc8f30c60, 0x2e78ef3c, 0xd0d51932, 0xcf0fec14, 0xf7ca07d2, 0xd0a82072,
- 0xfd41197e, 0x9305a6b0, 0xe86be3da, 0x74bed3cd, 0x372da53c, 0x4c7f4448, 0xdab5d440, 0x6dba0ec3,
- 0x083919a7, 0x9fbaeed9, 0x49dbcfb0, 0x4e670c53, 0x5c3d9c01, 0x64bdb941, 0x2c0e636a, 0xba7dd9cd,
- 0xea6f7388, 0xe70bc762, 0x35f29adb, 0x5c4cdd8d, 0xf0d48d8c, 0xb88153e2, 0x08a19866, 0x1ae2eac8,
- 0x284caf89, 0xaa928223, 0x9334be53, 0x3b3a21bf, 0x16434be3, 0x9aea3906, 0xefe8c36e, 0xf890cdd9,
- 0x80226dae, 0xc340a4a3, 0xdf7e9c09, 0xa694a807, 0x5b7c5ecc, 0x221db3a6, 0x9a69a02f, 0x68818a54,
- 0xceb2296f, 0x53c0843a, 0xfe893655, 0x25bfe68a, 0xb4628abc, 0xcf222ebf, 0x25ac6f48, 0xa9a99387,
- 0x53bddb65, 0xe76ffbe7, 0xe967fd78, 0x0ba93563, 0x8e342bc1, 0xe8a11be9, 0x4980740d, 0xc8087dfc,
- 0x8de4bf99, 0xa11101a0, 0x7fd37975, 0xda5a26c0, 0xe81f994f, 0x9528cd89, 0xfd339fed, 0xb87834bf,
- 0x5f04456d, 0x22258698, 0xc9c4c83b, 0x2dc156be, 0x4f628daa, 0x57f55ec5, 0xe2220abe, 0xd2916ebf,
- 0x4ec75b95, 0x24f2c3c0, 0x42d15d99, 0xcd0d7fa0, 0x7b6e27ff, 0xa8dc8af0, 0x7345c106, 0xf41e232f,
- 0x35162386, 0xe6ea8926, 0x3333b094, 0x157ec6f2, 0x372b74af, 0x692573e4, 0xe9a9d848, 0xf3160289,
- 0x3a62ef1d, 0xa787e238, 0xf3a5f676, 0x74364853, 0x20951063, 0x4576698d, 0xb6fad407, 0x592af950,
- 0x36f73523, 0x4cfb6e87, 0x7da4cec0, 0x6c152daa, 0xcb0396a8, 0xc50dfe5d, 0xfcd707ab, 0x0921c42f,
- 0x89dff0bb, 0x5fe2be78, 0x448f4f33, 0x754613c9, 0x2b05d08d, 0x48b9d585, 0xdc049441, 0xc8098f9b,
- 0x7dede786, 0xc39a3373, 0x42410005, 0x6a091751, 0x0ef3c8a6, 0x890072d6, 0x28207682, 0xa9a9f7be,
- 0xbf32679d, 0xd45b5b75, 0xb353fd00, 0xcbb0e358, 0x830f220a, 0x1f8fb214, 0xd372cf08, 0xcc3c4a13,
- 0x8cf63166, 0x061c87be, 0x88c98f88, 0x6062e397, 0x47cf8e7a, 0xb6c85283, 0x3cc2acfb, 0x3fc06976,
- 0x4e8f0252, 0x64d8314d, 0xda3870e3, 0x1e665459, 0xc10908f0, 0x513021a5, 0x6c5b68b7, 0x822f8aa0,
- 0x3007cd3e, 0x74719eef, 0xdc872681, 0x073340d4, 0x7e432fd9, 0x0c5ec241, 0x8809286c, 0xf592d891,
- 0x08a930f6, 0x957ef305, 0xb7fbffbd, 0xc266e96f, 0x6fe4ac98, 0xb173ecc0, 0xbc60b42a, 0x953498da,
- 0xfba1ae12, 0x2d4bd736, 0x0f25faab, 0xa4f3fceb, 0xe2969123, 0x257f0c3d, 0x9348af49, 0x361400bc,
- 0xe8816f4a, 0x3814f200, 0xa3f94043, 0x9c7a54c2, 0xbc704f57, 0xda41e7f9, 0xc25ad33a, 0x54f4a084,
- 0xb17f5505, 0x59357cbe, 0xedbd15c8, 0x7f97c5ab, 0xba5ac7b5, 0xb6f6deaf, 0x3a479c3a, 0x5302da25,
- 0x653d7e6a, 0x54268d49, 0x51a477ea, 0x5017d55b, 0xd7d25d88, 0x44136c76, 0x0404a8c8, 0xb8e5a121,
- 0xb81a928a, 0x60ed5869, 0x97c55b96, 0xeaec991b, 0x29935913, 0x01fdb7f1, 0x088e8dfa, 0x9ab6f6f5,
- 0x3b4cbf9f, 0x4a5de3ab, 0xe6051d35, 0xa0e1d855, 0xd36b4cf1, 0xf544edeb, 0xb0e93524, 0xbebb8fbd,
- 0xa2d762cf, 0x49c92f54, 0x38b5f331, 0x7128a454, 0x48392905, 0xa65b1db8, 0x851c97bd, 0xd675cf2f,
- },
- {
- 0x85e04019, 0x332bf567, 0x662dbfff, 0xcfc65693, 0x2a8d7f6f, 0xab9bc912, 0xde6008a1, 0x2028da1f,
- 0x0227bce7, 0x4d642916, 0x18fac300, 0x50f18b82, 0x2cb2cb11, 0xb232e75c, 0x4b3695f2, 0xb28707de,
- 0xa05fbcf6, 0xcd4181e9, 0xe150210c, 0xe24ef1bd, 0xb168c381, 0xfde4e789, 0x5c79b0d8, 0x1e8bfd43,
- 0x4d495001, 0x38be4341, 0x913cee1d, 0x92a79c3f, 0x089766be, 0xbaeeadf4, 0x1286becf, 0xb6eacb19,
- 0x2660c200, 0x7565bde4, 0x64241f7a, 0x8248dca9, 0xc3b3ad66, 0x28136086, 0x0bd8dfa8, 0x356d1cf2,
- 0x107789be, 0xb3b2e9ce, 0x0502aa8f, 0x0bc0351e, 0x166bf52a, 0xeb12ff82, 0xe3486911, 0xd34d7516,
- 0x4e7b3aff, 0x5f43671b, 0x9cf6e037, 0x4981ac83, 0x334266ce, 0x8c9341b7, 0xd0d854c0, 0xcb3a6c88,
- 0x47bc2829, 0x4725ba37, 0xa66ad22b, 0x7ad61f1e, 0x0c5cbafa, 0x4437f107, 0xb6e79962, 0x42d2d816,
- 0x0a961288, 0xe1a5c06e, 0x13749e67, 0x72fc081a, 0xb1d139f7, 0xf9583745, 0xcf19df58, 0xbec3f756,
- 0xc06eba30, 0x07211b24, 0x45c28829, 0xc95e317f, 0xbc8ec511, 0x38bc46e9, 0xc6e6fa14, 0xbae8584a,
- 0xad4ebc46, 0x468f508b, 0x7829435f, 0xf124183b, 0x821dba9f, 0xaff60ff4, 0xea2c4e6d, 0x16e39264,
- 0x92544a8b, 0x009b4fc3, 0xaba68ced, 0x9ac96f78, 0x06a5b79a, 0xb2856e6e, 0x1aec3ca9, 0xbe838688,
- 0x0e0804e9, 0x55f1be56, 0xe7e5363b, 0xb3a1f25d, 0xf7debb85, 0x61fe033c, 0x16746233, 0x3c034c28,
- 0xda6d0c74, 0x79aac56c, 0x3ce4e1ad, 0x51f0c802, 0x98f8f35a, 0x1626a49f, 0xeed82b29, 0x1d382fe3,
- 0x0c4fb99a, 0xbb325778, 0x3ec6d97b, 0x6e77a6a9, 0xcb658b5c, 0xd45230c7, 0x2bd1408b, 0x60c03eb7,
- 0xb9068d78, 0xa33754f4, 0xf430c87d, 0xc8a71302, 0xb96d8c32, 0xebd4e7be, 0xbe8b9d2d, 0x7979fb06,
- 0xe7225308, 0x8b75cf77, 0x11ef8da4, 0xe083c858, 0x8d6b786f, 0x5a6317a6, 0xfa5cf7a0, 0x5dda0033,
- 0xf28ebfb0, 0xf5b9c310, 0xa0eac280, 0x08b9767a, 0xa3d9d2b0, 0x79d34217, 0x021a718d, 0x9ac6336a,
- 0x2711fd60, 0x438050e3, 0x069908a8, 0x3d7fedc4, 0x826d2bef, 0x4eeb8476, 0x488dcf25, 0x36c9d566,
- 0x28e74e41, 0xc2610aca, 0x3d49a9cf, 0xbae3b9df, 0xb65f8de6, 0x92aeaf64, 0x3ac7d5e6, 0x9ea80509,
- 0xf22b017d, 0xa4173f70, 0xdd1e16c3, 0x15e0d7f9, 0x50b1b887, 0x2b9f4fd5, 0x625aba82, 0x6a017962,
- 0x2ec01b9c, 0x15488aa9, 0xd716e740, 0x40055a2c, 0x93d29a22, 0xe32dbf9a, 0x058745b9, 0x3453dc1e,
- 0xd699296e, 0x496cff6f, 0x1c9f4986, 0xdfe2ed07, 0xb87242d1, 0x19de7eae, 0x053e561a, 0x15ad6f8c,
- 0x66626c1c, 0x7154c24c, 0xea082b2a, 0x93eb2939, 0x17dcb0f0, 0x58d4f2ae, 0x9ea294fb, 0x52cf564c,
- 0x9883fe66, 0x2ec40581, 0x763953c3, 0x01d6692e, 0xd3a0c108, 0xa1e7160e, 0xe4f2dfa6, 0x693ed285,
- 0x74904698, 0x4c2b0edd, 0x4f757656, 0x5d393378, 0xa132234f, 0x3d321c5d, 0xc3f5e194, 0x4b269301,
- 0xc79f022f, 0x3c997e7e, 0x5e4f9504, 0x3ffafbbd, 0x76f7ad0e, 0x296693f4, 0x3d1fce6f, 0xc61e45be,
- 0xd3b5ab34, 0xf72bf9b7, 0x1b0434c0, 0x4e72b567, 0x5592a33d, 0xb5229301, 0xcfd2a87f, 0x60aeb767,
- 0x1814386b, 0x30bcc33d, 0x38a0c07d, 0xfd1606f2, 0xc363519b, 0x589dd390, 0x5479f8e6, 0x1cb8d647,
- 0x97fd61a9, 0xea7759f4, 0x2d57539d, 0x569a58cf, 0xe84e63ad, 0x462e1b78, 0x6580f87e, 0xf3817914,
- 0x91da55f4, 0x40a230f3, 0xd1988f35, 0xb6e318d2, 0x3ffa50bc, 0x3d40f021, 0xc3c0bdae, 0x4958c24c,
- 0x518f36b2, 0x84b1d370, 0x0fedce83, 0x878ddada, 0xf2a279c7, 0x94e01be8, 0x90716f4b, 0x954b8aa3,
- },
- {
- 0xe216300d, 0xbbddfffc, 0xa7ebdabd, 0x35648095, 0x7789f8b7, 0xe6c1121b, 0x0e241600, 0x052ce8b5,
- 0x11a9cfb0, 0xe5952f11, 0xece7990a, 0x9386d174, 0x2a42931c, 0x76e38111, 0xb12def3a, 0x37ddddfc,
- 0xde9adeb1, 0x0a0cc32c, 0xbe197029, 0x84a00940, 0xbb243a0f, 0xb4d137cf, 0xb44e79f0, 0x049eedfd,
- 0x0b15a15d, 0x480d3168, 0x8bbbde5a, 0x669ded42, 0xc7ece831, 0x3f8f95e7, 0x72df191b, 0x7580330d,
- 0x94074251, 0x5c7dcdfa, 0xabbe6d63, 0xaa402164, 0xb301d40a, 0x02e7d1ca, 0x53571dae, 0x7a3182a2,
- 0x12a8ddec, 0xfdaa335d, 0x176f43e8, 0x71fb46d4, 0x38129022, 0xce949ad4, 0xb84769ad, 0x965bd862,
- 0x82f3d055, 0x66fb9767, 0x15b80b4e, 0x1d5b47a0, 0x4cfde06f, 0xc28ec4b8, 0x57e8726e, 0x647a78fc,
- 0x99865d44, 0x608bd593, 0x6c200e03, 0x39dc5ff6, 0x5d0b00a3, 0xae63aff2, 0x7e8bd632, 0x70108c0c,
- 0xbbd35049, 0x2998df04, 0x980cf42a, 0x9b6df491, 0x9e7edd53, 0x06918548, 0x58cb7e07, 0x3b74ef2e,
- 0x522fffb1, 0xd24708cc, 0x1c7e27cd, 0xa4eb215b, 0x3cf1d2e2, 0x19b47a38, 0x424f7618, 0x35856039,
- 0x9d17dee7, 0x27eb35e6, 0xc9aff67b, 0x36baf5b8, 0x09c467cd, 0xc18910b1, 0xe11dbf7b, 0x06cd1af8,
- 0x7170c608, 0x2d5e3354, 0xd4de495a, 0x64c6d006, 0xbcc0c62c, 0x3dd00db3, 0x708f8f34, 0x77d51b42,
- 0x264f620f, 0x24b8d2bf, 0x15c1b79e, 0x46a52564, 0xf8d7e54e, 0x3e378160, 0x7895cda5, 0x859c15a5,
- 0xe6459788, 0xc37bc75f, 0xdb07ba0c, 0x0676a3ab, 0x7f229b1e, 0x31842e7b, 0x24259fd7, 0xf8bef472,
- 0x835ffcb8, 0x6df4c1f2, 0x96f5b195, 0xfd0af0fc, 0xb0fe134c, 0xe2506d3d, 0x4f9b12ea, 0xf215f225,
- 0xa223736f, 0x9fb4c428, 0x25d04979, 0x34c713f8, 0xc4618187, 0xea7a6e98, 0x7cd16efc, 0x1436876c,
- 0xf1544107, 0xbedeee14, 0x56e9af27, 0xa04aa441, 0x3cf7c899, 0x92ecbae6, 0xdd67016d, 0x151682eb,
- 0xa842eedf, 0xfdba60b4, 0xf1907b75, 0x20e3030f, 0x24d8c29e, 0xe139673b, 0xefa63fb8, 0x71873054,
- 0xb6f2cf3b, 0x9f326442, 0xcb15a4cc, 0xb01a4504, 0xf1e47d8d, 0x844a1be5, 0xbae7dfdc, 0x42cbda70,
- 0xcd7dae0a, 0x57e85b7a, 0xd53f5af6, 0x20cf4d8c, 0xcea4d428, 0x79d130a4, 0x3486ebfb, 0x33d3cddc,
- 0x77853b53, 0x37effcb5, 0xc5068778, 0xe580b3e6, 0x4e68b8f4, 0xc5c8b37e, 0x0d809ea2, 0x398feb7c,
- 0x132a4f94, 0x43b7950e, 0x2fee7d1c, 0x223613bd, 0xdd06caa2, 0x37df932b, 0xc4248289, 0xacf3ebc3,
- 0x5715f6b7, 0xef3478dd, 0xf267616f, 0xc148cbe4, 0x9052815e, 0x5e410fab, 0xb48a2465, 0x2eda7fa4,
- 0xe87b40e4, 0xe98ea084, 0x5889e9e1, 0xefd390fc, 0xdd07d35b, 0xdb485694, 0x38d7e5b2, 0x57720101,
- 0x730edebc, 0x5b643113, 0x94917e4f, 0x503c2fba, 0x646f1282, 0x7523d24a, 0xe0779695, 0xf9c17a8f,
- 0x7a5b2121, 0xd187b896, 0x29263a4d, 0xba510cdf, 0x81f47c9f, 0xad1163ed, 0xea7b5965, 0x1a00726e,
- 0x11403092, 0x00da6d77, 0x4a0cdd61, 0xad1f4603, 0x605bdfb0, 0x9eedc364, 0x22ebe6a8, 0xcee7d28a,
- 0xa0e736a0, 0x5564a6b9, 0x10853209, 0xc7eb8f37, 0x2de705ca, 0x8951570f, 0xdf09822b, 0xbd691a6c,
- 0xaa12e4f2, 0x87451c0f, 0xe0f6a27a, 0x3ada4819, 0x4cf1764f, 0x0d771c2b, 0x67cdb156, 0x350d8384,
- 0x5938fa0f, 0x42399ef3, 0x36997b07, 0x0e84093d, 0x4aa93e61, 0x8360d87b, 0x1fa98b0c, 0x1149382c,
- 0xe97625a5, 0x0614d1b7, 0x0e25244b, 0x0c768347, 0x589e8d82, 0x0d2059d1, 0xa466bb1e, 0xf8da0a82,
- 0x04f19130, 0xba6e4ec0, 0x99265164, 0x1ee7230d, 0x50b2ad80, 0xeaee6801, 0x8db2a283, 0xea8bf59e,
- },
-}
diff --git a/vendor/golang.org/x/crypto/hkdf/hkdf.go b/vendor/golang.org/x/crypto/hkdf/hkdf.go
deleted file mode 100644
index 3bee66294ec..00000000000
--- a/vendor/golang.org/x/crypto/hkdf/hkdf.go
+++ /dev/null
@@ -1,95 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package hkdf implements the HMAC-based Extract-and-Expand Key Derivation
-// Function (HKDF) as defined in RFC 5869.
-//
-// HKDF is a cryptographic key derivation function (KDF) with the goal of
-// expanding limited input keying material into one or more cryptographically
-// strong secret keys.
-package hkdf
-
-import (
- "crypto/hmac"
- "errors"
- "hash"
- "io"
-)
-
-// Extract generates a pseudorandom key for use with Expand from an input secret
-// and an optional independent salt.
-//
-// Only use this function if you need to reuse the extracted key with multiple
-// Expand invocations and different context values. Most common scenarios,
-// including the generation of multiple keys, should use New instead.
-func Extract(hash func() hash.Hash, secret, salt []byte) []byte {
- if salt == nil {
- salt = make([]byte, hash().Size())
- }
- extractor := hmac.New(hash, salt)
- extractor.Write(secret)
- return extractor.Sum(nil)
-}
-
-type hkdf struct {
- expander hash.Hash
- size int
-
- info []byte
- counter byte
-
- prev []byte
- buf []byte
-}
-
-func (f *hkdf) Read(p []byte) (int, error) {
- // Check whether enough data can be generated
- need := len(p)
- remains := len(f.buf) + int(255-f.counter+1)*f.size
- if remains < need {
- return 0, errors.New("hkdf: entropy limit reached")
- }
- // Read any leftover from the buffer
- n := copy(p, f.buf)
- p = p[n:]
-
- // Fill the rest of the buffer
- for len(p) > 0 {
- if f.counter > 1 {
- f.expander.Reset()
- }
- f.expander.Write(f.prev)
- f.expander.Write(f.info)
- f.expander.Write([]byte{f.counter})
- f.prev = f.expander.Sum(f.prev[:0])
- f.counter++
-
- // Copy the new batch into p
- f.buf = f.prev
- n = copy(p, f.buf)
- p = p[n:]
- }
- // Save leftovers for next run
- f.buf = f.buf[n:]
-
- return need, nil
-}
-
-// Expand returns a Reader, from which keys can be read, using the given
-// pseudorandom key and optional context info, skipping the extraction step.
-//
-// The pseudorandomKey should have been generated by Extract, or be a uniformly
-// random or pseudorandom cryptographically strong key. See RFC 5869, Section
-// 3.3. Most common scenarios will want to use New instead.
-func Expand(hash func() hash.Hash, pseudorandomKey, info []byte) io.Reader {
- expander := hmac.New(hash, pseudorandomKey)
- return &hkdf{expander, expander.Size(), info, 1, nil, nil}
-}
-
-// New returns a Reader, from which keys can be read, using the given hash,
-// secret, salt and context info. Salt and info can be nil.
-func New(hash func() hash.Hash, secret, salt, info []byte) io.Reader {
- prk := Extract(hash, secret, salt)
- return Expand(hash, prk, info)
-}
diff --git a/vendor/golang.org/x/crypto/sha3/doc.go b/vendor/golang.org/x/crypto/sha3/doc.go
deleted file mode 100644
index 7e023090707..00000000000
--- a/vendor/golang.org/x/crypto/sha3/doc.go
+++ /dev/null
@@ -1,62 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package sha3 implements the SHA-3 fixed-output-length hash functions and
-// the SHAKE variable-output-length hash functions defined by FIPS-202.
-//
-// Both types of hash function use the "sponge" construction and the Keccak
-// permutation. For a detailed specification see http://keccak.noekeon.org/
-//
-// # Guidance
-//
-// If you aren't sure what function you need, use SHAKE256 with at least 64
-// bytes of output. The SHAKE instances are faster than the SHA3 instances;
-// the latter have to allocate memory to conform to the hash.Hash interface.
-//
-// If you need a secret-key MAC (message authentication code), prepend the
-// secret key to the input, hash with SHAKE256 and read at least 32 bytes of
-// output.
-//
-// # Security strengths
-//
-// The SHA3-x (x equals 224, 256, 384, or 512) functions have a security
-// strength against preimage attacks of x bits. Since they only produce "x"
-// bits of output, their collision-resistance is only "x/2" bits.
-//
-// The SHAKE-256 and -128 functions have a generic security strength of 256 and
-// 128 bits against all attacks, provided that at least 2x bits of their output
-// is used. Requesting more than 64 or 32 bytes of output, respectively, does
-// not increase the collision-resistance of the SHAKE functions.
-//
-// # The sponge construction
-//
-// A sponge builds a pseudo-random function from a public pseudo-random
-// permutation, by applying the permutation to a state of "rate + capacity"
-// bytes, but hiding "capacity" of the bytes.
-//
-// A sponge starts out with a zero state. To hash an input using a sponge, up
-// to "rate" bytes of the input are XORed into the sponge's state. The sponge
-// is then "full" and the permutation is applied to "empty" it. This process is
-// repeated until all the input has been "absorbed". The input is then padded.
-// The digest is "squeezed" from the sponge in the same way, except that output
-// is copied out instead of input being XORed in.
-//
-// A sponge is parameterized by its generic security strength, which is equal
-// to half its capacity; capacity + rate is equal to the permutation's width.
-// Since the KeccakF-1600 permutation is 1600 bits (200 bytes) wide, this means
-// that the security strength of a sponge instance is equal to (1600 - bitrate) / 2.
-//
-// # Recommendations
-//
-// The SHAKE functions are recommended for most new uses. They can produce
-// output of arbitrary length. SHAKE256, with an output length of at least
-// 64 bytes, provides 256-bit security against all attacks. The Keccak team
-// recommends it for most applications upgrading from SHA2-512. (NIST chose a
-// much stronger, but much slower, sponge instance for SHA3-512.)
-//
-// The SHA-3 functions are "drop-in" replacements for the SHA-2 functions.
-// They produce output of the same length, with the same security strengths
-// against all attacks. This means, in particular, that SHA3-256 only has
-// 128-bit collision resistance, because its output length is 32 bytes.
-package sha3
diff --git a/vendor/golang.org/x/crypto/sha3/hashes.go b/vendor/golang.org/x/crypto/sha3/hashes.go
deleted file mode 100644
index c544b29e5f2..00000000000
--- a/vendor/golang.org/x/crypto/sha3/hashes.go
+++ /dev/null
@@ -1,109 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package sha3
-
-// This file provides functions for creating instances of the SHA-3
-// and SHAKE hash functions, as well as utility functions for hashing
-// bytes.
-
-import (
- "crypto"
- "hash"
-)
-
-// New224 creates a new SHA3-224 hash.
-// Its generic security strength is 224 bits against preimage attacks,
-// and 112 bits against collision attacks.
-func New224() hash.Hash {
- return new224()
-}
-
-// New256 creates a new SHA3-256 hash.
-// Its generic security strength is 256 bits against preimage attacks,
-// and 128 bits against collision attacks.
-func New256() hash.Hash {
- return new256()
-}
-
-// New384 creates a new SHA3-384 hash.
-// Its generic security strength is 384 bits against preimage attacks,
-// and 192 bits against collision attacks.
-func New384() hash.Hash {
- return new384()
-}
-
-// New512 creates a new SHA3-512 hash.
-// Its generic security strength is 512 bits against preimage attacks,
-// and 256 bits against collision attacks.
-func New512() hash.Hash {
- return new512()
-}
-
-func init() {
- crypto.RegisterHash(crypto.SHA3_224, New224)
- crypto.RegisterHash(crypto.SHA3_256, New256)
- crypto.RegisterHash(crypto.SHA3_384, New384)
- crypto.RegisterHash(crypto.SHA3_512, New512)
-}
-
-func new224Generic() *state {
- return &state{rate: 144, outputLen: 28, dsbyte: 0x06}
-}
-
-func new256Generic() *state {
- return &state{rate: 136, outputLen: 32, dsbyte: 0x06}
-}
-
-func new384Generic() *state {
- return &state{rate: 104, outputLen: 48, dsbyte: 0x06}
-}
-
-func new512Generic() *state {
- return &state{rate: 72, outputLen: 64, dsbyte: 0x06}
-}
-
-// NewLegacyKeccak256 creates a new Keccak-256 hash.
-//
-// Only use this function if you require compatibility with an existing cryptosystem
-// that uses non-standard padding. All other users should use New256 instead.
-func NewLegacyKeccak256() hash.Hash { return &state{rate: 136, outputLen: 32, dsbyte: 0x01} }
-
-// NewLegacyKeccak512 creates a new Keccak-512 hash.
-//
-// Only use this function if you require compatibility with an existing cryptosystem
-// that uses non-standard padding. All other users should use New512 instead.
-func NewLegacyKeccak512() hash.Hash { return &state{rate: 72, outputLen: 64, dsbyte: 0x01} }
-
-// Sum224 returns the SHA3-224 digest of the data.
-func Sum224(data []byte) (digest [28]byte) {
- h := New224()
- h.Write(data)
- h.Sum(digest[:0])
- return
-}
-
-// Sum256 returns the SHA3-256 digest of the data.
-func Sum256(data []byte) (digest [32]byte) {
- h := New256()
- h.Write(data)
- h.Sum(digest[:0])
- return
-}
-
-// Sum384 returns the SHA3-384 digest of the data.
-func Sum384(data []byte) (digest [48]byte) {
- h := New384()
- h.Write(data)
- h.Sum(digest[:0])
- return
-}
-
-// Sum512 returns the SHA3-512 digest of the data.
-func Sum512(data []byte) (digest [64]byte) {
- h := New512()
- h.Write(data)
- h.Sum(digest[:0])
- return
-}
diff --git a/vendor/golang.org/x/crypto/sha3/hashes_noasm.go b/vendor/golang.org/x/crypto/sha3/hashes_noasm.go
deleted file mode 100644
index 9d85fb62144..00000000000
--- a/vendor/golang.org/x/crypto/sha3/hashes_noasm.go
+++ /dev/null
@@ -1,23 +0,0 @@
-// Copyright 2023 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build !gc || purego || !s390x
-
-package sha3
-
-func new224() *state {
- return new224Generic()
-}
-
-func new256() *state {
- return new256Generic()
-}
-
-func new384() *state {
- return new384Generic()
-}
-
-func new512() *state {
- return new512Generic()
-}
diff --git a/vendor/golang.org/x/crypto/sha3/keccakf.go b/vendor/golang.org/x/crypto/sha3/keccakf.go
deleted file mode 100644
index ce48b1dd3ed..00000000000
--- a/vendor/golang.org/x/crypto/sha3/keccakf.go
+++ /dev/null
@@ -1,414 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build !amd64 || purego || !gc
-
-package sha3
-
-import "math/bits"
-
-// rc stores the round constants for use in the ι step.
-var rc = [24]uint64{
- 0x0000000000000001,
- 0x0000000000008082,
- 0x800000000000808A,
- 0x8000000080008000,
- 0x000000000000808B,
- 0x0000000080000001,
- 0x8000000080008081,
- 0x8000000000008009,
- 0x000000000000008A,
- 0x0000000000000088,
- 0x0000000080008009,
- 0x000000008000000A,
- 0x000000008000808B,
- 0x800000000000008B,
- 0x8000000000008089,
- 0x8000000000008003,
- 0x8000000000008002,
- 0x8000000000000080,
- 0x000000000000800A,
- 0x800000008000000A,
- 0x8000000080008081,
- 0x8000000000008080,
- 0x0000000080000001,
- 0x8000000080008008,
-}
-
-// keccakF1600 applies the Keccak permutation to a 1600b-wide
-// state represented as a slice of 25 uint64s.
-func keccakF1600(a *[25]uint64) {
- // Implementation translated from Keccak-inplace.c
- // in the keccak reference code.
- var t, bc0, bc1, bc2, bc3, bc4, d0, d1, d2, d3, d4 uint64
-
- for i := 0; i < 24; i += 4 {
- // Combines the 5 steps in each round into 2 steps.
- // Unrolls 4 rounds per loop and spreads some steps across rounds.
-
- // Round 1
- bc0 = a[0] ^ a[5] ^ a[10] ^ a[15] ^ a[20]
- bc1 = a[1] ^ a[6] ^ a[11] ^ a[16] ^ a[21]
- bc2 = a[2] ^ a[7] ^ a[12] ^ a[17] ^ a[22]
- bc3 = a[3] ^ a[8] ^ a[13] ^ a[18] ^ a[23]
- bc4 = a[4] ^ a[9] ^ a[14] ^ a[19] ^ a[24]
- d0 = bc4 ^ (bc1<<1 | bc1>>63)
- d1 = bc0 ^ (bc2<<1 | bc2>>63)
- d2 = bc1 ^ (bc3<<1 | bc3>>63)
- d3 = bc2 ^ (bc4<<1 | bc4>>63)
- d4 = bc3 ^ (bc0<<1 | bc0>>63)
-
- bc0 = a[0] ^ d0
- t = a[6] ^ d1
- bc1 = bits.RotateLeft64(t, 44)
- t = a[12] ^ d2
- bc2 = bits.RotateLeft64(t, 43)
- t = a[18] ^ d3
- bc3 = bits.RotateLeft64(t, 21)
- t = a[24] ^ d4
- bc4 = bits.RotateLeft64(t, 14)
- a[0] = bc0 ^ (bc2 &^ bc1) ^ rc[i]
- a[6] = bc1 ^ (bc3 &^ bc2)
- a[12] = bc2 ^ (bc4 &^ bc3)
- a[18] = bc3 ^ (bc0 &^ bc4)
- a[24] = bc4 ^ (bc1 &^ bc0)
-
- t = a[10] ^ d0
- bc2 = bits.RotateLeft64(t, 3)
- t = a[16] ^ d1
- bc3 = bits.RotateLeft64(t, 45)
- t = a[22] ^ d2
- bc4 = bits.RotateLeft64(t, 61)
- t = a[3] ^ d3
- bc0 = bits.RotateLeft64(t, 28)
- t = a[9] ^ d4
- bc1 = bits.RotateLeft64(t, 20)
- a[10] = bc0 ^ (bc2 &^ bc1)
- a[16] = bc1 ^ (bc3 &^ bc2)
- a[22] = bc2 ^ (bc4 &^ bc3)
- a[3] = bc3 ^ (bc0 &^ bc4)
- a[9] = bc4 ^ (bc1 &^ bc0)
-
- t = a[20] ^ d0
- bc4 = bits.RotateLeft64(t, 18)
- t = a[1] ^ d1
- bc0 = bits.RotateLeft64(t, 1)
- t = a[7] ^ d2
- bc1 = bits.RotateLeft64(t, 6)
- t = a[13] ^ d3
- bc2 = bits.RotateLeft64(t, 25)
- t = a[19] ^ d4
- bc3 = bits.RotateLeft64(t, 8)
- a[20] = bc0 ^ (bc2 &^ bc1)
- a[1] = bc1 ^ (bc3 &^ bc2)
- a[7] = bc2 ^ (bc4 &^ bc3)
- a[13] = bc3 ^ (bc0 &^ bc4)
- a[19] = bc4 ^ (bc1 &^ bc0)
-
- t = a[5] ^ d0
- bc1 = bits.RotateLeft64(t, 36)
- t = a[11] ^ d1
- bc2 = bits.RotateLeft64(t, 10)
- t = a[17] ^ d2
- bc3 = bits.RotateLeft64(t, 15)
- t = a[23] ^ d3
- bc4 = bits.RotateLeft64(t, 56)
- t = a[4] ^ d4
- bc0 = bits.RotateLeft64(t, 27)
- a[5] = bc0 ^ (bc2 &^ bc1)
- a[11] = bc1 ^ (bc3 &^ bc2)
- a[17] = bc2 ^ (bc4 &^ bc3)
- a[23] = bc3 ^ (bc0 &^ bc4)
- a[4] = bc4 ^ (bc1 &^ bc0)
-
- t = a[15] ^ d0
- bc3 = bits.RotateLeft64(t, 41)
- t = a[21] ^ d1
- bc4 = bits.RotateLeft64(t, 2)
- t = a[2] ^ d2
- bc0 = bits.RotateLeft64(t, 62)
- t = a[8] ^ d3
- bc1 = bits.RotateLeft64(t, 55)
- t = a[14] ^ d4
- bc2 = bits.RotateLeft64(t, 39)
- a[15] = bc0 ^ (bc2 &^ bc1)
- a[21] = bc1 ^ (bc3 &^ bc2)
- a[2] = bc2 ^ (bc4 &^ bc3)
- a[8] = bc3 ^ (bc0 &^ bc4)
- a[14] = bc4 ^ (bc1 &^ bc0)
-
- // Round 2
- bc0 = a[0] ^ a[5] ^ a[10] ^ a[15] ^ a[20]
- bc1 = a[1] ^ a[6] ^ a[11] ^ a[16] ^ a[21]
- bc2 = a[2] ^ a[7] ^ a[12] ^ a[17] ^ a[22]
- bc3 = a[3] ^ a[8] ^ a[13] ^ a[18] ^ a[23]
- bc4 = a[4] ^ a[9] ^ a[14] ^ a[19] ^ a[24]
- d0 = bc4 ^ (bc1<<1 | bc1>>63)
- d1 = bc0 ^ (bc2<<1 | bc2>>63)
- d2 = bc1 ^ (bc3<<1 | bc3>>63)
- d3 = bc2 ^ (bc4<<1 | bc4>>63)
- d4 = bc3 ^ (bc0<<1 | bc0>>63)
-
- bc0 = a[0] ^ d0
- t = a[16] ^ d1
- bc1 = bits.RotateLeft64(t, 44)
- t = a[7] ^ d2
- bc2 = bits.RotateLeft64(t, 43)
- t = a[23] ^ d3
- bc3 = bits.RotateLeft64(t, 21)
- t = a[14] ^ d4
- bc4 = bits.RotateLeft64(t, 14)
- a[0] = bc0 ^ (bc2 &^ bc1) ^ rc[i+1]
- a[16] = bc1 ^ (bc3 &^ bc2)
- a[7] = bc2 ^ (bc4 &^ bc3)
- a[23] = bc3 ^ (bc0 &^ bc4)
- a[14] = bc4 ^ (bc1 &^ bc0)
-
- t = a[20] ^ d0
- bc2 = bits.RotateLeft64(t, 3)
- t = a[11] ^ d1
- bc3 = bits.RotateLeft64(t, 45)
- t = a[2] ^ d2
- bc4 = bits.RotateLeft64(t, 61)
- t = a[18] ^ d3
- bc0 = bits.RotateLeft64(t, 28)
- t = a[9] ^ d4
- bc1 = bits.RotateLeft64(t, 20)
- a[20] = bc0 ^ (bc2 &^ bc1)
- a[11] = bc1 ^ (bc3 &^ bc2)
- a[2] = bc2 ^ (bc4 &^ bc3)
- a[18] = bc3 ^ (bc0 &^ bc4)
- a[9] = bc4 ^ (bc1 &^ bc0)
-
- t = a[15] ^ d0
- bc4 = bits.RotateLeft64(t, 18)
- t = a[6] ^ d1
- bc0 = bits.RotateLeft64(t, 1)
- t = a[22] ^ d2
- bc1 = bits.RotateLeft64(t, 6)
- t = a[13] ^ d3
- bc2 = bits.RotateLeft64(t, 25)
- t = a[4] ^ d4
- bc3 = bits.RotateLeft64(t, 8)
- a[15] = bc0 ^ (bc2 &^ bc1)
- a[6] = bc1 ^ (bc3 &^ bc2)
- a[22] = bc2 ^ (bc4 &^ bc3)
- a[13] = bc3 ^ (bc0 &^ bc4)
- a[4] = bc4 ^ (bc1 &^ bc0)
-
- t = a[10] ^ d0
- bc1 = bits.RotateLeft64(t, 36)
- t = a[1] ^ d1
- bc2 = bits.RotateLeft64(t, 10)
- t = a[17] ^ d2
- bc3 = bits.RotateLeft64(t, 15)
- t = a[8] ^ d3
- bc4 = bits.RotateLeft64(t, 56)
- t = a[24] ^ d4
- bc0 = bits.RotateLeft64(t, 27)
- a[10] = bc0 ^ (bc2 &^ bc1)
- a[1] = bc1 ^ (bc3 &^ bc2)
- a[17] = bc2 ^ (bc4 &^ bc3)
- a[8] = bc3 ^ (bc0 &^ bc4)
- a[24] = bc4 ^ (bc1 &^ bc0)
-
- t = a[5] ^ d0
- bc3 = bits.RotateLeft64(t, 41)
- t = a[21] ^ d1
- bc4 = bits.RotateLeft64(t, 2)
- t = a[12] ^ d2
- bc0 = bits.RotateLeft64(t, 62)
- t = a[3] ^ d3
- bc1 = bits.RotateLeft64(t, 55)
- t = a[19] ^ d4
- bc2 = bits.RotateLeft64(t, 39)
- a[5] = bc0 ^ (bc2 &^ bc1)
- a[21] = bc1 ^ (bc3 &^ bc2)
- a[12] = bc2 ^ (bc4 &^ bc3)
- a[3] = bc3 ^ (bc0 &^ bc4)
- a[19] = bc4 ^ (bc1 &^ bc0)
-
- // Round 3
- bc0 = a[0] ^ a[5] ^ a[10] ^ a[15] ^ a[20]
- bc1 = a[1] ^ a[6] ^ a[11] ^ a[16] ^ a[21]
- bc2 = a[2] ^ a[7] ^ a[12] ^ a[17] ^ a[22]
- bc3 = a[3] ^ a[8] ^ a[13] ^ a[18] ^ a[23]
- bc4 = a[4] ^ a[9] ^ a[14] ^ a[19] ^ a[24]
- d0 = bc4 ^ (bc1<<1 | bc1>>63)
- d1 = bc0 ^ (bc2<<1 | bc2>>63)
- d2 = bc1 ^ (bc3<<1 | bc3>>63)
- d3 = bc2 ^ (bc4<<1 | bc4>>63)
- d4 = bc3 ^ (bc0<<1 | bc0>>63)
-
- bc0 = a[0] ^ d0
- t = a[11] ^ d1
- bc1 = bits.RotateLeft64(t, 44)
- t = a[22] ^ d2
- bc2 = bits.RotateLeft64(t, 43)
- t = a[8] ^ d3
- bc3 = bits.RotateLeft64(t, 21)
- t = a[19] ^ d4
- bc4 = bits.RotateLeft64(t, 14)
- a[0] = bc0 ^ (bc2 &^ bc1) ^ rc[i+2]
- a[11] = bc1 ^ (bc3 &^ bc2)
- a[22] = bc2 ^ (bc4 &^ bc3)
- a[8] = bc3 ^ (bc0 &^ bc4)
- a[19] = bc4 ^ (bc1 &^ bc0)
-
- t = a[15] ^ d0
- bc2 = bits.RotateLeft64(t, 3)
- t = a[1] ^ d1
- bc3 = bits.RotateLeft64(t, 45)
- t = a[12] ^ d2
- bc4 = bits.RotateLeft64(t, 61)
- t = a[23] ^ d3
- bc0 = bits.RotateLeft64(t, 28)
- t = a[9] ^ d4
- bc1 = bits.RotateLeft64(t, 20)
- a[15] = bc0 ^ (bc2 &^ bc1)
- a[1] = bc1 ^ (bc3 &^ bc2)
- a[12] = bc2 ^ (bc4 &^ bc3)
- a[23] = bc3 ^ (bc0 &^ bc4)
- a[9] = bc4 ^ (bc1 &^ bc0)
-
- t = a[5] ^ d0
- bc4 = bits.RotateLeft64(t, 18)
- t = a[16] ^ d1
- bc0 = bits.RotateLeft64(t, 1)
- t = a[2] ^ d2
- bc1 = bits.RotateLeft64(t, 6)
- t = a[13] ^ d3
- bc2 = bits.RotateLeft64(t, 25)
- t = a[24] ^ d4
- bc3 = bits.RotateLeft64(t, 8)
- a[5] = bc0 ^ (bc2 &^ bc1)
- a[16] = bc1 ^ (bc3 &^ bc2)
- a[2] = bc2 ^ (bc4 &^ bc3)
- a[13] = bc3 ^ (bc0 &^ bc4)
- a[24] = bc4 ^ (bc1 &^ bc0)
-
- t = a[20] ^ d0
- bc1 = bits.RotateLeft64(t, 36)
- t = a[6] ^ d1
- bc2 = bits.RotateLeft64(t, 10)
- t = a[17] ^ d2
- bc3 = bits.RotateLeft64(t, 15)
- t = a[3] ^ d3
- bc4 = bits.RotateLeft64(t, 56)
- t = a[14] ^ d4
- bc0 = bits.RotateLeft64(t, 27)
- a[20] = bc0 ^ (bc2 &^ bc1)
- a[6] = bc1 ^ (bc3 &^ bc2)
- a[17] = bc2 ^ (bc4 &^ bc3)
- a[3] = bc3 ^ (bc0 &^ bc4)
- a[14] = bc4 ^ (bc1 &^ bc0)
-
- t = a[10] ^ d0
- bc3 = bits.RotateLeft64(t, 41)
- t = a[21] ^ d1
- bc4 = bits.RotateLeft64(t, 2)
- t = a[7] ^ d2
- bc0 = bits.RotateLeft64(t, 62)
- t = a[18] ^ d3
- bc1 = bits.RotateLeft64(t, 55)
- t = a[4] ^ d4
- bc2 = bits.RotateLeft64(t, 39)
- a[10] = bc0 ^ (bc2 &^ bc1)
- a[21] = bc1 ^ (bc3 &^ bc2)
- a[7] = bc2 ^ (bc4 &^ bc3)
- a[18] = bc3 ^ (bc0 &^ bc4)
- a[4] = bc4 ^ (bc1 &^ bc0)
-
- // Round 4
- bc0 = a[0] ^ a[5] ^ a[10] ^ a[15] ^ a[20]
- bc1 = a[1] ^ a[6] ^ a[11] ^ a[16] ^ a[21]
- bc2 = a[2] ^ a[7] ^ a[12] ^ a[17] ^ a[22]
- bc3 = a[3] ^ a[8] ^ a[13] ^ a[18] ^ a[23]
- bc4 = a[4] ^ a[9] ^ a[14] ^ a[19] ^ a[24]
- d0 = bc4 ^ (bc1<<1 | bc1>>63)
- d1 = bc0 ^ (bc2<<1 | bc2>>63)
- d2 = bc1 ^ (bc3<<1 | bc3>>63)
- d3 = bc2 ^ (bc4<<1 | bc4>>63)
- d4 = bc3 ^ (bc0<<1 | bc0>>63)
-
- bc0 = a[0] ^ d0
- t = a[1] ^ d1
- bc1 = bits.RotateLeft64(t, 44)
- t = a[2] ^ d2
- bc2 = bits.RotateLeft64(t, 43)
- t = a[3] ^ d3
- bc3 = bits.RotateLeft64(t, 21)
- t = a[4] ^ d4
- bc4 = bits.RotateLeft64(t, 14)
- a[0] = bc0 ^ (bc2 &^ bc1) ^ rc[i+3]
- a[1] = bc1 ^ (bc3 &^ bc2)
- a[2] = bc2 ^ (bc4 &^ bc3)
- a[3] = bc3 ^ (bc0 &^ bc4)
- a[4] = bc4 ^ (bc1 &^ bc0)
-
- t = a[5] ^ d0
- bc2 = bits.RotateLeft64(t, 3)
- t = a[6] ^ d1
- bc3 = bits.RotateLeft64(t, 45)
- t = a[7] ^ d2
- bc4 = bits.RotateLeft64(t, 61)
- t = a[8] ^ d3
- bc0 = bits.RotateLeft64(t, 28)
- t = a[9] ^ d4
- bc1 = bits.RotateLeft64(t, 20)
- a[5] = bc0 ^ (bc2 &^ bc1)
- a[6] = bc1 ^ (bc3 &^ bc2)
- a[7] = bc2 ^ (bc4 &^ bc3)
- a[8] = bc3 ^ (bc0 &^ bc4)
- a[9] = bc4 ^ (bc1 &^ bc0)
-
- t = a[10] ^ d0
- bc4 = bits.RotateLeft64(t, 18)
- t = a[11] ^ d1
- bc0 = bits.RotateLeft64(t, 1)
- t = a[12] ^ d2
- bc1 = bits.RotateLeft64(t, 6)
- t = a[13] ^ d3
- bc2 = bits.RotateLeft64(t, 25)
- t = a[14] ^ d4
- bc3 = bits.RotateLeft64(t, 8)
- a[10] = bc0 ^ (bc2 &^ bc1)
- a[11] = bc1 ^ (bc3 &^ bc2)
- a[12] = bc2 ^ (bc4 &^ bc3)
- a[13] = bc3 ^ (bc0 &^ bc4)
- a[14] = bc4 ^ (bc1 &^ bc0)
-
- t = a[15] ^ d0
- bc1 = bits.RotateLeft64(t, 36)
- t = a[16] ^ d1
- bc2 = bits.RotateLeft64(t, 10)
- t = a[17] ^ d2
- bc3 = bits.RotateLeft64(t, 15)
- t = a[18] ^ d3
- bc4 = bits.RotateLeft64(t, 56)
- t = a[19] ^ d4
- bc0 = bits.RotateLeft64(t, 27)
- a[15] = bc0 ^ (bc2 &^ bc1)
- a[16] = bc1 ^ (bc3 &^ bc2)
- a[17] = bc2 ^ (bc4 &^ bc3)
- a[18] = bc3 ^ (bc0 &^ bc4)
- a[19] = bc4 ^ (bc1 &^ bc0)
-
- t = a[20] ^ d0
- bc3 = bits.RotateLeft64(t, 41)
- t = a[21] ^ d1
- bc4 = bits.RotateLeft64(t, 2)
- t = a[22] ^ d2
- bc0 = bits.RotateLeft64(t, 62)
- t = a[23] ^ d3
- bc1 = bits.RotateLeft64(t, 55)
- t = a[24] ^ d4
- bc2 = bits.RotateLeft64(t, 39)
- a[20] = bc0 ^ (bc2 &^ bc1)
- a[21] = bc1 ^ (bc3 &^ bc2)
- a[22] = bc2 ^ (bc4 &^ bc3)
- a[23] = bc3 ^ (bc0 &^ bc4)
- a[24] = bc4 ^ (bc1 &^ bc0)
- }
-}
diff --git a/vendor/golang.org/x/crypto/sha3/keccakf_amd64.go b/vendor/golang.org/x/crypto/sha3/keccakf_amd64.go
deleted file mode 100644
index b908696be58..00000000000
--- a/vendor/golang.org/x/crypto/sha3/keccakf_amd64.go
+++ /dev/null
@@ -1,13 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build amd64 && !purego && gc
-
-package sha3
-
-// This function is implemented in keccakf_amd64.s.
-
-//go:noescape
-
-func keccakF1600(a *[25]uint64)
diff --git a/vendor/golang.org/x/crypto/sha3/keccakf_amd64.s b/vendor/golang.org/x/crypto/sha3/keccakf_amd64.s
deleted file mode 100644
index 99e2f16e971..00000000000
--- a/vendor/golang.org/x/crypto/sha3/keccakf_amd64.s
+++ /dev/null
@@ -1,5419 +0,0 @@
-// Code generated by command: go run keccakf_amd64_asm.go -out ../keccakf_amd64.s -pkg sha3. DO NOT EDIT.
-
-//go:build amd64 && !purego && gc
-
-// func keccakF1600(a *[25]uint64)
-TEXT ·keccakF1600(SB), $200-8
- MOVQ a+0(FP), DI
-
- // Convert the user state into an internal state
- NOTQ 8(DI)
- NOTQ 16(DI)
- NOTQ 64(DI)
- NOTQ 96(DI)
- NOTQ 136(DI)
- NOTQ 160(DI)
-
- // Execute the KeccakF permutation
- MOVQ (DI), SI
- MOVQ 8(DI), BP
- MOVQ 32(DI), R15
- XORQ 40(DI), SI
- XORQ 48(DI), BP
- XORQ 72(DI), R15
- XORQ 80(DI), SI
- XORQ 88(DI), BP
- XORQ 112(DI), R15
- XORQ 120(DI), SI
- XORQ 128(DI), BP
- XORQ 152(DI), R15
- XORQ 160(DI), SI
- XORQ 168(DI), BP
- MOVQ 176(DI), DX
- MOVQ 184(DI), R8
- XORQ 192(DI), R15
-
- // Prepare round
- MOVQ BP, BX
- ROLQ $0x01, BX
- MOVQ 16(DI), R12
- XORQ 56(DI), DX
- XORQ R15, BX
- XORQ 96(DI), R12
- XORQ 136(DI), DX
- XORQ DX, R12
- MOVQ R12, CX
- ROLQ $0x01, CX
- MOVQ 24(DI), R13
- XORQ 64(DI), R8
- XORQ SI, CX
- XORQ 104(DI), R13
- XORQ 144(DI), R8
- XORQ R8, R13
- MOVQ R13, DX
- ROLQ $0x01, DX
- MOVQ R15, R8
- XORQ BP, DX
- ROLQ $0x01, R8
- MOVQ SI, R9
- XORQ R12, R8
- ROLQ $0x01, R9
-
- // Result b
- MOVQ (DI), R10
- MOVQ 48(DI), R11
- XORQ R13, R9
- MOVQ 96(DI), R12
- MOVQ 144(DI), R13
- MOVQ 192(DI), R14
- XORQ CX, R11
- ROLQ $0x2c, R11
- XORQ DX, R12
- XORQ BX, R10
- ROLQ $0x2b, R12
- MOVQ R11, SI
- MOVQ $0x0000000000000001, AX
- ORQ R12, SI
- XORQ R10, AX
- XORQ AX, SI
- MOVQ SI, (SP)
- XORQ R9, R14
- ROLQ $0x0e, R14
- MOVQ R10, R15
- ANDQ R11, R15
- XORQ R14, R15
- MOVQ R15, 32(SP)
- XORQ R8, R13
- ROLQ $0x15, R13
- MOVQ R13, AX
- ANDQ R14, AX
- XORQ R12, AX
- MOVQ AX, 16(SP)
- NOTQ R12
- ORQ R10, R14
- ORQ R13, R12
- XORQ R13, R14
- XORQ R11, R12
- MOVQ R14, 24(SP)
- MOVQ R12, 8(SP)
- MOVQ R12, BP
-
- // Result g
- MOVQ 72(DI), R11
- XORQ R9, R11
- MOVQ 80(DI), R12
- ROLQ $0x14, R11
- XORQ BX, R12
- ROLQ $0x03, R12
- MOVQ 24(DI), R10
- MOVQ R11, AX
- ORQ R12, AX
- XORQ R8, R10
- MOVQ 128(DI), R13
- MOVQ 176(DI), R14
- ROLQ $0x1c, R10
- XORQ R10, AX
- MOVQ AX, 40(SP)
- XORQ AX, SI
- XORQ CX, R13
- ROLQ $0x2d, R13
- MOVQ R12, AX
- ANDQ R13, AX
- XORQ R11, AX
- MOVQ AX, 48(SP)
- XORQ AX, BP
- XORQ DX, R14
- ROLQ $0x3d, R14
- MOVQ R14, AX
- ORQ R10, AX
- XORQ R13, AX
- MOVQ AX, 64(SP)
- ANDQ R11, R10
- XORQ R14, R10
- MOVQ R10, 72(SP)
- NOTQ R14
- XORQ R10, R15
- ORQ R14, R13
- XORQ R12, R13
- MOVQ R13, 56(SP)
-
- // Result k
- MOVQ 8(DI), R10
- MOVQ 56(DI), R11
- MOVQ 104(DI), R12
- MOVQ 152(DI), R13
- MOVQ 160(DI), R14
- XORQ DX, R11
- ROLQ $0x06, R11
- XORQ R8, R12
- ROLQ $0x19, R12
- MOVQ R11, AX
- ORQ R12, AX
- XORQ CX, R10
- ROLQ $0x01, R10
- XORQ R10, AX
- MOVQ AX, 80(SP)
- XORQ AX, SI
- XORQ R9, R13
- ROLQ $0x08, R13
- MOVQ R12, AX
- ANDQ R13, AX
- XORQ R11, AX
- MOVQ AX, 88(SP)
- XORQ AX, BP
- XORQ BX, R14
- ROLQ $0x12, R14
- NOTQ R13
- MOVQ R13, AX
- ANDQ R14, AX
- XORQ R12, AX
- MOVQ AX, 96(SP)
- MOVQ R14, AX
- ORQ R10, AX
- XORQ R13, AX
- MOVQ AX, 104(SP)
- ANDQ R11, R10
- XORQ R14, R10
- MOVQ R10, 112(SP)
- XORQ R10, R15
-
- // Result m
- MOVQ 40(DI), R11
- XORQ BX, R11
- MOVQ 88(DI), R12
- ROLQ $0x24, R11
- XORQ CX, R12
- MOVQ 32(DI), R10
- ROLQ $0x0a, R12
- MOVQ R11, AX
- MOVQ 136(DI), R13
- ANDQ R12, AX
- XORQ R9, R10
- MOVQ 184(DI), R14
- ROLQ $0x1b, R10
- XORQ R10, AX
- MOVQ AX, 120(SP)
- XORQ AX, SI
- XORQ DX, R13
- ROLQ $0x0f, R13
- MOVQ R12, AX
- ORQ R13, AX
- XORQ R11, AX
- MOVQ AX, 128(SP)
- XORQ AX, BP
- XORQ R8, R14
- ROLQ $0x38, R14
- NOTQ R13
- MOVQ R13, AX
- ORQ R14, AX
- XORQ R12, AX
- MOVQ AX, 136(SP)
- ORQ R10, R11
- XORQ R14, R11
- MOVQ R11, 152(SP)
- ANDQ R10, R14
- XORQ R13, R14
- MOVQ R14, 144(SP)
- XORQ R11, R15
-
- // Result s
- MOVQ 16(DI), R10
- MOVQ 64(DI), R11
- MOVQ 112(DI), R12
- XORQ DX, R10
- MOVQ 120(DI), R13
- ROLQ $0x3e, R10
- XORQ R8, R11
- MOVQ 168(DI), R14
- ROLQ $0x37, R11
- XORQ R9, R12
- MOVQ R10, R9
- XORQ CX, R14
- ROLQ $0x02, R14
- ANDQ R11, R9
- XORQ R14, R9
- MOVQ R9, 192(SP)
- ROLQ $0x27, R12
- XORQ R9, R15
- NOTQ R11
- XORQ BX, R13
- MOVQ R11, BX
- ANDQ R12, BX
- XORQ R10, BX
- MOVQ BX, 160(SP)
- XORQ BX, SI
- ROLQ $0x29, R13
- MOVQ R12, CX
- ORQ R13, CX
- XORQ R11, CX
- MOVQ CX, 168(SP)
- XORQ CX, BP
- MOVQ R13, DX
- MOVQ R14, R8
- ANDQ R14, DX
- ORQ R10, R8
- XORQ R12, DX
- XORQ R13, R8
- MOVQ DX, 176(SP)
- MOVQ R8, 184(SP)
-
- // Prepare round
- MOVQ BP, BX
- ROLQ $0x01, BX
- MOVQ 16(SP), R12
- XORQ 56(SP), DX
- XORQ R15, BX
- XORQ 96(SP), R12
- XORQ 136(SP), DX
- XORQ DX, R12
- MOVQ R12, CX
- ROLQ $0x01, CX
- MOVQ 24(SP), R13
- XORQ 64(SP), R8
- XORQ SI, CX
- XORQ 104(SP), R13
- XORQ 144(SP), R8
- XORQ R8, R13
- MOVQ R13, DX
- ROLQ $0x01, DX
- MOVQ R15, R8
- XORQ BP, DX
- ROLQ $0x01, R8
- MOVQ SI, R9
- XORQ R12, R8
- ROLQ $0x01, R9
-
- // Result b
- MOVQ (SP), R10
- MOVQ 48(SP), R11
- XORQ R13, R9
- MOVQ 96(SP), R12
- MOVQ 144(SP), R13
- MOVQ 192(SP), R14
- XORQ CX, R11
- ROLQ $0x2c, R11
- XORQ DX, R12
- XORQ BX, R10
- ROLQ $0x2b, R12
- MOVQ R11, SI
- MOVQ $0x0000000000008082, AX
- ORQ R12, SI
- XORQ R10, AX
- XORQ AX, SI
- MOVQ SI, (DI)
- XORQ R9, R14
- ROLQ $0x0e, R14
- MOVQ R10, R15
- ANDQ R11, R15
- XORQ R14, R15
- MOVQ R15, 32(DI)
- XORQ R8, R13
- ROLQ $0x15, R13
- MOVQ R13, AX
- ANDQ R14, AX
- XORQ R12, AX
- MOVQ AX, 16(DI)
- NOTQ R12
- ORQ R10, R14
- ORQ R13, R12
- XORQ R13, R14
- XORQ R11, R12
- MOVQ R14, 24(DI)
- MOVQ R12, 8(DI)
- MOVQ R12, BP
-
- // Result g
- MOVQ 72(SP), R11
- XORQ R9, R11
- MOVQ 80(SP), R12
- ROLQ $0x14, R11
- XORQ BX, R12
- ROLQ $0x03, R12
- MOVQ 24(SP), R10
- MOVQ R11, AX
- ORQ R12, AX
- XORQ R8, R10
- MOVQ 128(SP), R13
- MOVQ 176(SP), R14
- ROLQ $0x1c, R10
- XORQ R10, AX
- MOVQ AX, 40(DI)
- XORQ AX, SI
- XORQ CX, R13
- ROLQ $0x2d, R13
- MOVQ R12, AX
- ANDQ R13, AX
- XORQ R11, AX
- MOVQ AX, 48(DI)
- XORQ AX, BP
- XORQ DX, R14
- ROLQ $0x3d, R14
- MOVQ R14, AX
- ORQ R10, AX
- XORQ R13, AX
- MOVQ AX, 64(DI)
- ANDQ R11, R10
- XORQ R14, R10
- MOVQ R10, 72(DI)
- NOTQ R14
- XORQ R10, R15
- ORQ R14, R13
- XORQ R12, R13
- MOVQ R13, 56(DI)
-
- // Result k
- MOVQ 8(SP), R10
- MOVQ 56(SP), R11
- MOVQ 104(SP), R12
- MOVQ 152(SP), R13
- MOVQ 160(SP), R14
- XORQ DX, R11
- ROLQ $0x06, R11
- XORQ R8, R12
- ROLQ $0x19, R12
- MOVQ R11, AX
- ORQ R12, AX
- XORQ CX, R10
- ROLQ $0x01, R10
- XORQ R10, AX
- MOVQ AX, 80(DI)
- XORQ AX, SI
- XORQ R9, R13
- ROLQ $0x08, R13
- MOVQ R12, AX
- ANDQ R13, AX
- XORQ R11, AX
- MOVQ AX, 88(DI)
- XORQ AX, BP
- XORQ BX, R14
- ROLQ $0x12, R14
- NOTQ R13
- MOVQ R13, AX
- ANDQ R14, AX
- XORQ R12, AX
- MOVQ AX, 96(DI)
- MOVQ R14, AX
- ORQ R10, AX
- XORQ R13, AX
- MOVQ AX, 104(DI)
- ANDQ R11, R10
- XORQ R14, R10
- MOVQ R10, 112(DI)
- XORQ R10, R15
-
- // Result m
- MOVQ 40(SP), R11
- XORQ BX, R11
- MOVQ 88(SP), R12
- ROLQ $0x24, R11
- XORQ CX, R12
- MOVQ 32(SP), R10
- ROLQ $0x0a, R12
- MOVQ R11, AX
- MOVQ 136(SP), R13
- ANDQ R12, AX
- XORQ R9, R10
- MOVQ 184(SP), R14
- ROLQ $0x1b, R10
- XORQ R10, AX
- MOVQ AX, 120(DI)
- XORQ AX, SI
- XORQ DX, R13
- ROLQ $0x0f, R13
- MOVQ R12, AX
- ORQ R13, AX
- XORQ R11, AX
- MOVQ AX, 128(DI)
- XORQ AX, BP
- XORQ R8, R14
- ROLQ $0x38, R14
- NOTQ R13
- MOVQ R13, AX
- ORQ R14, AX
- XORQ R12, AX
- MOVQ AX, 136(DI)
- ORQ R10, R11
- XORQ R14, R11
- MOVQ R11, 152(DI)
- ANDQ R10, R14
- XORQ R13, R14
- MOVQ R14, 144(DI)
- XORQ R11, R15
-
- // Result s
- MOVQ 16(SP), R10
- MOVQ 64(SP), R11
- MOVQ 112(SP), R12
- XORQ DX, R10
- MOVQ 120(SP), R13
- ROLQ $0x3e, R10
- XORQ R8, R11
- MOVQ 168(SP), R14
- ROLQ $0x37, R11
- XORQ R9, R12
- MOVQ R10, R9
- XORQ CX, R14
- ROLQ $0x02, R14
- ANDQ R11, R9
- XORQ R14, R9
- MOVQ R9, 192(DI)
- ROLQ $0x27, R12
- XORQ R9, R15
- NOTQ R11
- XORQ BX, R13
- MOVQ R11, BX
- ANDQ R12, BX
- XORQ R10, BX
- MOVQ BX, 160(DI)
- XORQ BX, SI
- ROLQ $0x29, R13
- MOVQ R12, CX
- ORQ R13, CX
- XORQ R11, CX
- MOVQ CX, 168(DI)
- XORQ CX, BP
- MOVQ R13, DX
- MOVQ R14, R8
- ANDQ R14, DX
- ORQ R10, R8
- XORQ R12, DX
- XORQ R13, R8
- MOVQ DX, 176(DI)
- MOVQ R8, 184(DI)
-
- // Prepare round
- MOVQ BP, BX
- ROLQ $0x01, BX
- MOVQ 16(DI), R12
- XORQ 56(DI), DX
- XORQ R15, BX
- XORQ 96(DI), R12
- XORQ 136(DI), DX
- XORQ DX, R12
- MOVQ R12, CX
- ROLQ $0x01, CX
- MOVQ 24(DI), R13
- XORQ 64(DI), R8
- XORQ SI, CX
- XORQ 104(DI), R13
- XORQ 144(DI), R8
- XORQ R8, R13
- MOVQ R13, DX
- ROLQ $0x01, DX
- MOVQ R15, R8
- XORQ BP, DX
- ROLQ $0x01, R8
- MOVQ SI, R9
- XORQ R12, R8
- ROLQ $0x01, R9
-
- // Result b
- MOVQ (DI), R10
- MOVQ 48(DI), R11
- XORQ R13, R9
- MOVQ 96(DI), R12
- MOVQ 144(DI), R13
- MOVQ 192(DI), R14
- XORQ CX, R11
- ROLQ $0x2c, R11
- XORQ DX, R12
- XORQ BX, R10
- ROLQ $0x2b, R12
- MOVQ R11, SI
- MOVQ $0x800000000000808a, AX
- ORQ R12, SI
- XORQ R10, AX
- XORQ AX, SI
- MOVQ SI, (SP)
- XORQ R9, R14
- ROLQ $0x0e, R14
- MOVQ R10, R15
- ANDQ R11, R15
- XORQ R14, R15
- MOVQ R15, 32(SP)
- XORQ R8, R13
- ROLQ $0x15, R13
- MOVQ R13, AX
- ANDQ R14, AX
- XORQ R12, AX
- MOVQ AX, 16(SP)
- NOTQ R12
- ORQ R10, R14
- ORQ R13, R12
- XORQ R13, R14
- XORQ R11, R12
- MOVQ R14, 24(SP)
- MOVQ R12, 8(SP)
- MOVQ R12, BP
-
- // Result g
- MOVQ 72(DI), R11
- XORQ R9, R11
- MOVQ 80(DI), R12
- ROLQ $0x14, R11
- XORQ BX, R12
- ROLQ $0x03, R12
- MOVQ 24(DI), R10
- MOVQ R11, AX
- ORQ R12, AX
- XORQ R8, R10
- MOVQ 128(DI), R13
- MOVQ 176(DI), R14
- ROLQ $0x1c, R10
- XORQ R10, AX
- MOVQ AX, 40(SP)
- XORQ AX, SI
- XORQ CX, R13
- ROLQ $0x2d, R13
- MOVQ R12, AX
- ANDQ R13, AX
- XORQ R11, AX
- MOVQ AX, 48(SP)
- XORQ AX, BP
- XORQ DX, R14
- ROLQ $0x3d, R14
- MOVQ R14, AX
- ORQ R10, AX
- XORQ R13, AX
- MOVQ AX, 64(SP)
- ANDQ R11, R10
- XORQ R14, R10
- MOVQ R10, 72(SP)
- NOTQ R14
- XORQ R10, R15
- ORQ R14, R13
- XORQ R12, R13
- MOVQ R13, 56(SP)
-
- // Result k
- MOVQ 8(DI), R10
- MOVQ 56(DI), R11
- MOVQ 104(DI), R12
- MOVQ 152(DI), R13
- MOVQ 160(DI), R14
- XORQ DX, R11
- ROLQ $0x06, R11
- XORQ R8, R12
- ROLQ $0x19, R12
- MOVQ R11, AX
- ORQ R12, AX
- XORQ CX, R10
- ROLQ $0x01, R10
- XORQ R10, AX
- MOVQ AX, 80(SP)
- XORQ AX, SI
- XORQ R9, R13
- ROLQ $0x08, R13
- MOVQ R12, AX
- ANDQ R13, AX
- XORQ R11, AX
- MOVQ AX, 88(SP)
- XORQ AX, BP
- XORQ BX, R14
- ROLQ $0x12, R14
- NOTQ R13
- MOVQ R13, AX
- ANDQ R14, AX
- XORQ R12, AX
- MOVQ AX, 96(SP)
- MOVQ R14, AX
- ORQ R10, AX
- XORQ R13, AX
- MOVQ AX, 104(SP)
- ANDQ R11, R10
- XORQ R14, R10
- MOVQ R10, 112(SP)
- XORQ R10, R15
-
- // Result m
- MOVQ 40(DI), R11
- XORQ BX, R11
- MOVQ 88(DI), R12
- ROLQ $0x24, R11
- XORQ CX, R12
- MOVQ 32(DI), R10
- ROLQ $0x0a, R12
- MOVQ R11, AX
- MOVQ 136(DI), R13
- ANDQ R12, AX
- XORQ R9, R10
- MOVQ 184(DI), R14
- ROLQ $0x1b, R10
- XORQ R10, AX
- MOVQ AX, 120(SP)
- XORQ AX, SI
- XORQ DX, R13
- ROLQ $0x0f, R13
- MOVQ R12, AX
- ORQ R13, AX
- XORQ R11, AX
- MOVQ AX, 128(SP)
- XORQ AX, BP
- XORQ R8, R14
- ROLQ $0x38, R14
- NOTQ R13
- MOVQ R13, AX
- ORQ R14, AX
- XORQ R12, AX
- MOVQ AX, 136(SP)
- ORQ R10, R11
- XORQ R14, R11
- MOVQ R11, 152(SP)
- ANDQ R10, R14
- XORQ R13, R14
- MOVQ R14, 144(SP)
- XORQ R11, R15
-
- // Result s
- MOVQ 16(DI), R10
- MOVQ 64(DI), R11
- MOVQ 112(DI), R12
- XORQ DX, R10
- MOVQ 120(DI), R13
- ROLQ $0x3e, R10
- XORQ R8, R11
- MOVQ 168(DI), R14
- ROLQ $0x37, R11
- XORQ R9, R12
- MOVQ R10, R9
- XORQ CX, R14
- ROLQ $0x02, R14
- ANDQ R11, R9
- XORQ R14, R9
- MOVQ R9, 192(SP)
- ROLQ $0x27, R12
- XORQ R9, R15
- NOTQ R11
- XORQ BX, R13
- MOVQ R11, BX
- ANDQ R12, BX
- XORQ R10, BX
- MOVQ BX, 160(SP)
- XORQ BX, SI
- ROLQ $0x29, R13
- MOVQ R12, CX
- ORQ R13, CX
- XORQ R11, CX
- MOVQ CX, 168(SP)
- XORQ CX, BP
- MOVQ R13, DX
- MOVQ R14, R8
- ANDQ R14, DX
- ORQ R10, R8
- XORQ R12, DX
- XORQ R13, R8
- MOVQ DX, 176(SP)
- MOVQ R8, 184(SP)
-
- // Prepare round
- MOVQ BP, BX
- ROLQ $0x01, BX
- MOVQ 16(SP), R12
- XORQ 56(SP), DX
- XORQ R15, BX
- XORQ 96(SP), R12
- XORQ 136(SP), DX
- XORQ DX, R12
- MOVQ R12, CX
- ROLQ $0x01, CX
- MOVQ 24(SP), R13
- XORQ 64(SP), R8
- XORQ SI, CX
- XORQ 104(SP), R13
- XORQ 144(SP), R8
- XORQ R8, R13
- MOVQ R13, DX
- ROLQ $0x01, DX
- MOVQ R15, R8
- XORQ BP, DX
- ROLQ $0x01, R8
- MOVQ SI, R9
- XORQ R12, R8
- ROLQ $0x01, R9
-
- // Result b
- MOVQ (SP), R10
- MOVQ 48(SP), R11
- XORQ R13, R9
- MOVQ 96(SP), R12
- MOVQ 144(SP), R13
- MOVQ 192(SP), R14
- XORQ CX, R11
- ROLQ $0x2c, R11
- XORQ DX, R12
- XORQ BX, R10
- ROLQ $0x2b, R12
- MOVQ R11, SI
- MOVQ $0x8000000080008000, AX
- ORQ R12, SI
- XORQ R10, AX
- XORQ AX, SI
- MOVQ SI, (DI)
- XORQ R9, R14
- ROLQ $0x0e, R14
- MOVQ R10, R15
- ANDQ R11, R15
- XORQ R14, R15
- MOVQ R15, 32(DI)
- XORQ R8, R13
- ROLQ $0x15, R13
- MOVQ R13, AX
- ANDQ R14, AX
- XORQ R12, AX
- MOVQ AX, 16(DI)
- NOTQ R12
- ORQ R10, R14
- ORQ R13, R12
- XORQ R13, R14
- XORQ R11, R12
- MOVQ R14, 24(DI)
- MOVQ R12, 8(DI)
- MOVQ R12, BP
-
- // Result g
- MOVQ 72(SP), R11
- XORQ R9, R11
- MOVQ 80(SP), R12
- ROLQ $0x14, R11
- XORQ BX, R12
- ROLQ $0x03, R12
- MOVQ 24(SP), R10
- MOVQ R11, AX
- ORQ R12, AX
- XORQ R8, R10
- MOVQ 128(SP), R13
- MOVQ 176(SP), R14
- ROLQ $0x1c, R10
- XORQ R10, AX
- MOVQ AX, 40(DI)
- XORQ AX, SI
- XORQ CX, R13
- ROLQ $0x2d, R13
- MOVQ R12, AX
- ANDQ R13, AX
- XORQ R11, AX
- MOVQ AX, 48(DI)
- XORQ AX, BP
- XORQ DX, R14
- ROLQ $0x3d, R14
- MOVQ R14, AX
- ORQ R10, AX
- XORQ R13, AX
- MOVQ AX, 64(DI)
- ANDQ R11, R10
- XORQ R14, R10
- MOVQ R10, 72(DI)
- NOTQ R14
- XORQ R10, R15
- ORQ R14, R13
- XORQ R12, R13
- MOVQ R13, 56(DI)
-
- // Result k
- MOVQ 8(SP), R10
- MOVQ 56(SP), R11
- MOVQ 104(SP), R12
- MOVQ 152(SP), R13
- MOVQ 160(SP), R14
- XORQ DX, R11
- ROLQ $0x06, R11
- XORQ R8, R12
- ROLQ $0x19, R12
- MOVQ R11, AX
- ORQ R12, AX
- XORQ CX, R10
- ROLQ $0x01, R10
- XORQ R10, AX
- MOVQ AX, 80(DI)
- XORQ AX, SI
- XORQ R9, R13
- ROLQ $0x08, R13
- MOVQ R12, AX
- ANDQ R13, AX
- XORQ R11, AX
- MOVQ AX, 88(DI)
- XORQ AX, BP
- XORQ BX, R14
- ROLQ $0x12, R14
- NOTQ R13
- MOVQ R13, AX
- ANDQ R14, AX
- XORQ R12, AX
- MOVQ AX, 96(DI)
- MOVQ R14, AX
- ORQ R10, AX
- XORQ R13, AX
- MOVQ AX, 104(DI)
- ANDQ R11, R10
- XORQ R14, R10
- MOVQ R10, 112(DI)
- XORQ R10, R15
-
- // Result m
- MOVQ 40(SP), R11
- XORQ BX, R11
- MOVQ 88(SP), R12
- ROLQ $0x24, R11
- XORQ CX, R12
- MOVQ 32(SP), R10
- ROLQ $0x0a, R12
- MOVQ R11, AX
- MOVQ 136(SP), R13
- ANDQ R12, AX
- XORQ R9, R10
- MOVQ 184(SP), R14
- ROLQ $0x1b, R10
- XORQ R10, AX
- MOVQ AX, 120(DI)
- XORQ AX, SI
- XORQ DX, R13
- ROLQ $0x0f, R13
- MOVQ R12, AX
- ORQ R13, AX
- XORQ R11, AX
- MOVQ AX, 128(DI)
- XORQ AX, BP
- XORQ R8, R14
- ROLQ $0x38, R14
- NOTQ R13
- MOVQ R13, AX
- ORQ R14, AX
- XORQ R12, AX
- MOVQ AX, 136(DI)
- ORQ R10, R11
- XORQ R14, R11
- MOVQ R11, 152(DI)
- ANDQ R10, R14
- XORQ R13, R14
- MOVQ R14, 144(DI)
- XORQ R11, R15
-
- // Result s
- MOVQ 16(SP), R10
- MOVQ 64(SP), R11
- MOVQ 112(SP), R12
- XORQ DX, R10
- MOVQ 120(SP), R13
- ROLQ $0x3e, R10
- XORQ R8, R11
- MOVQ 168(SP), R14
- ROLQ $0x37, R11
- XORQ R9, R12
- MOVQ R10, R9
- XORQ CX, R14
- ROLQ $0x02, R14
- ANDQ R11, R9
- XORQ R14, R9
- MOVQ R9, 192(DI)
- ROLQ $0x27, R12
- XORQ R9, R15
- NOTQ R11
- XORQ BX, R13
- MOVQ R11, BX
- ANDQ R12, BX
- XORQ R10, BX
- MOVQ BX, 160(DI)
- XORQ BX, SI
- ROLQ $0x29, R13
- MOVQ R12, CX
- ORQ R13, CX
- XORQ R11, CX
- MOVQ CX, 168(DI)
- XORQ CX, BP
- MOVQ R13, DX
- MOVQ R14, R8
- ANDQ R14, DX
- ORQ R10, R8
- XORQ R12, DX
- XORQ R13, R8
- MOVQ DX, 176(DI)
- MOVQ R8, 184(DI)
-
- // Prepare round
- MOVQ BP, BX
- ROLQ $0x01, BX
- MOVQ 16(DI), R12
- XORQ 56(DI), DX
- XORQ R15, BX
- XORQ 96(DI), R12
- XORQ 136(DI), DX
- XORQ DX, R12
- MOVQ R12, CX
- ROLQ $0x01, CX
- MOVQ 24(DI), R13
- XORQ 64(DI), R8
- XORQ SI, CX
- XORQ 104(DI), R13
- XORQ 144(DI), R8
- XORQ R8, R13
- MOVQ R13, DX
- ROLQ $0x01, DX
- MOVQ R15, R8
- XORQ BP, DX
- ROLQ $0x01, R8
- MOVQ SI, R9
- XORQ R12, R8
- ROLQ $0x01, R9
-
- // Result b
- MOVQ (DI), R10
- MOVQ 48(DI), R11
- XORQ R13, R9
- MOVQ 96(DI), R12
- MOVQ 144(DI), R13
- MOVQ 192(DI), R14
- XORQ CX, R11
- ROLQ $0x2c, R11
- XORQ DX, R12
- XORQ BX, R10
- ROLQ $0x2b, R12
- MOVQ R11, SI
- MOVQ $0x000000000000808b, AX
- ORQ R12, SI
- XORQ R10, AX
- XORQ AX, SI
- MOVQ SI, (SP)
- XORQ R9, R14
- ROLQ $0x0e, R14
- MOVQ R10, R15
- ANDQ R11, R15
- XORQ R14, R15
- MOVQ R15, 32(SP)
- XORQ R8, R13
- ROLQ $0x15, R13
- MOVQ R13, AX
- ANDQ R14, AX
- XORQ R12, AX
- MOVQ AX, 16(SP)
- NOTQ R12
- ORQ R10, R14
- ORQ R13, R12
- XORQ R13, R14
- XORQ R11, R12
- MOVQ R14, 24(SP)
- MOVQ R12, 8(SP)
- MOVQ R12, BP
-
- // Result g
- MOVQ 72(DI), R11
- XORQ R9, R11
- MOVQ 80(DI), R12
- ROLQ $0x14, R11
- XORQ BX, R12
- ROLQ $0x03, R12
- MOVQ 24(DI), R10
- MOVQ R11, AX
- ORQ R12, AX
- XORQ R8, R10
- MOVQ 128(DI), R13
- MOVQ 176(DI), R14
- ROLQ $0x1c, R10
- XORQ R10, AX
- MOVQ AX, 40(SP)
- XORQ AX, SI
- XORQ CX, R13
- ROLQ $0x2d, R13
- MOVQ R12, AX
- ANDQ R13, AX
- XORQ R11, AX
- MOVQ AX, 48(SP)
- XORQ AX, BP
- XORQ DX, R14
- ROLQ $0x3d, R14
- MOVQ R14, AX
- ORQ R10, AX
- XORQ R13, AX
- MOVQ AX, 64(SP)
- ANDQ R11, R10
- XORQ R14, R10
- MOVQ R10, 72(SP)
- NOTQ R14
- XORQ R10, R15
- ORQ R14, R13
- XORQ R12, R13
- MOVQ R13, 56(SP)
-
- // Result k
- MOVQ 8(DI), R10
- MOVQ 56(DI), R11
- MOVQ 104(DI), R12
- MOVQ 152(DI), R13
- MOVQ 160(DI), R14
- XORQ DX, R11
- ROLQ $0x06, R11
- XORQ R8, R12
- ROLQ $0x19, R12
- MOVQ R11, AX
- ORQ R12, AX
- XORQ CX, R10
- ROLQ $0x01, R10
- XORQ R10, AX
- MOVQ AX, 80(SP)
- XORQ AX, SI
- XORQ R9, R13
- ROLQ $0x08, R13
- MOVQ R12, AX
- ANDQ R13, AX
- XORQ R11, AX
- MOVQ AX, 88(SP)
- XORQ AX, BP
- XORQ BX, R14
- ROLQ $0x12, R14
- NOTQ R13
- MOVQ R13, AX
- ANDQ R14, AX
- XORQ R12, AX
- MOVQ AX, 96(SP)
- MOVQ R14, AX
- ORQ R10, AX
- XORQ R13, AX
- MOVQ AX, 104(SP)
- ANDQ R11, R10
- XORQ R14, R10
- MOVQ R10, 112(SP)
- XORQ R10, R15
-
- // Result m
- MOVQ 40(DI), R11
- XORQ BX, R11
- MOVQ 88(DI), R12
- ROLQ $0x24, R11
- XORQ CX, R12
- MOVQ 32(DI), R10
- ROLQ $0x0a, R12
- MOVQ R11, AX
- MOVQ 136(DI), R13
- ANDQ R12, AX
- XORQ R9, R10
- MOVQ 184(DI), R14
- ROLQ $0x1b, R10
- XORQ R10, AX
- MOVQ AX, 120(SP)
- XORQ AX, SI
- XORQ DX, R13
- ROLQ $0x0f, R13
- MOVQ R12, AX
- ORQ R13, AX
- XORQ R11, AX
- MOVQ AX, 128(SP)
- XORQ AX, BP
- XORQ R8, R14
- ROLQ $0x38, R14
- NOTQ R13
- MOVQ R13, AX
- ORQ R14, AX
- XORQ R12, AX
- MOVQ AX, 136(SP)
- ORQ R10, R11
- XORQ R14, R11
- MOVQ R11, 152(SP)
- ANDQ R10, R14
- XORQ R13, R14
- MOVQ R14, 144(SP)
- XORQ R11, R15
-
- // Result s
- MOVQ 16(DI), R10
- MOVQ 64(DI), R11
- MOVQ 112(DI), R12
- XORQ DX, R10
- MOVQ 120(DI), R13
- ROLQ $0x3e, R10
- XORQ R8, R11
- MOVQ 168(DI), R14
- ROLQ $0x37, R11
- XORQ R9, R12
- MOVQ R10, R9
- XORQ CX, R14
- ROLQ $0x02, R14
- ANDQ R11, R9
- XORQ R14, R9
- MOVQ R9, 192(SP)
- ROLQ $0x27, R12
- XORQ R9, R15
- NOTQ R11
- XORQ BX, R13
- MOVQ R11, BX
- ANDQ R12, BX
- XORQ R10, BX
- MOVQ BX, 160(SP)
- XORQ BX, SI
- ROLQ $0x29, R13
- MOVQ R12, CX
- ORQ R13, CX
- XORQ R11, CX
- MOVQ CX, 168(SP)
- XORQ CX, BP
- MOVQ R13, DX
- MOVQ R14, R8
- ANDQ R14, DX
- ORQ R10, R8
- XORQ R12, DX
- XORQ R13, R8
- MOVQ DX, 176(SP)
- MOVQ R8, 184(SP)
-
- // Prepare round
- MOVQ BP, BX
- ROLQ $0x01, BX
- MOVQ 16(SP), R12
- XORQ 56(SP), DX
- XORQ R15, BX
- XORQ 96(SP), R12
- XORQ 136(SP), DX
- XORQ DX, R12
- MOVQ R12, CX
- ROLQ $0x01, CX
- MOVQ 24(SP), R13
- XORQ 64(SP), R8
- XORQ SI, CX
- XORQ 104(SP), R13
- XORQ 144(SP), R8
- XORQ R8, R13
- MOVQ R13, DX
- ROLQ $0x01, DX
- MOVQ R15, R8
- XORQ BP, DX
- ROLQ $0x01, R8
- MOVQ SI, R9
- XORQ R12, R8
- ROLQ $0x01, R9
-
- // Result b
- MOVQ (SP), R10
- MOVQ 48(SP), R11
- XORQ R13, R9
- MOVQ 96(SP), R12
- MOVQ 144(SP), R13
- MOVQ 192(SP), R14
- XORQ CX, R11
- ROLQ $0x2c, R11
- XORQ DX, R12
- XORQ BX, R10
- ROLQ $0x2b, R12
- MOVQ R11, SI
- MOVQ $0x0000000080000001, AX
- ORQ R12, SI
- XORQ R10, AX
- XORQ AX, SI
- MOVQ SI, (DI)
- XORQ R9, R14
- ROLQ $0x0e, R14
- MOVQ R10, R15
- ANDQ R11, R15
- XORQ R14, R15
- MOVQ R15, 32(DI)
- XORQ R8, R13
- ROLQ $0x15, R13
- MOVQ R13, AX
- ANDQ R14, AX
- XORQ R12, AX
- MOVQ AX, 16(DI)
- NOTQ R12
- ORQ R10, R14
- ORQ R13, R12
- XORQ R13, R14
- XORQ R11, R12
- MOVQ R14, 24(DI)
- MOVQ R12, 8(DI)
- MOVQ R12, BP
-
- // Result g
- MOVQ 72(SP), R11
- XORQ R9, R11
- MOVQ 80(SP), R12
- ROLQ $0x14, R11
- XORQ BX, R12
- ROLQ $0x03, R12
- MOVQ 24(SP), R10
- MOVQ R11, AX
- ORQ R12, AX
- XORQ R8, R10
- MOVQ 128(SP), R13
- MOVQ 176(SP), R14
- ROLQ $0x1c, R10
- XORQ R10, AX
- MOVQ AX, 40(DI)
- XORQ AX, SI
- XORQ CX, R13
- ROLQ $0x2d, R13
- MOVQ R12, AX
- ANDQ R13, AX
- XORQ R11, AX
- MOVQ AX, 48(DI)
- XORQ AX, BP
- XORQ DX, R14
- ROLQ $0x3d, R14
- MOVQ R14, AX
- ORQ R10, AX
- XORQ R13, AX
- MOVQ AX, 64(DI)
- ANDQ R11, R10
- XORQ R14, R10
- MOVQ R10, 72(DI)
- NOTQ R14
- XORQ R10, R15
- ORQ R14, R13
- XORQ R12, R13
- MOVQ R13, 56(DI)
-
- // Result k
- MOVQ 8(SP), R10
- MOVQ 56(SP), R11
- MOVQ 104(SP), R12
- MOVQ 152(SP), R13
- MOVQ 160(SP), R14
- XORQ DX, R11
- ROLQ $0x06, R11
- XORQ R8, R12
- ROLQ $0x19, R12
- MOVQ R11, AX
- ORQ R12, AX
- XORQ CX, R10
- ROLQ $0x01, R10
- XORQ R10, AX
- MOVQ AX, 80(DI)
- XORQ AX, SI
- XORQ R9, R13
- ROLQ $0x08, R13
- MOVQ R12, AX
- ANDQ R13, AX
- XORQ R11, AX
- MOVQ AX, 88(DI)
- XORQ AX, BP
- XORQ BX, R14
- ROLQ $0x12, R14
- NOTQ R13
- MOVQ R13, AX
- ANDQ R14, AX
- XORQ R12, AX
- MOVQ AX, 96(DI)
- MOVQ R14, AX
- ORQ R10, AX
- XORQ R13, AX
- MOVQ AX, 104(DI)
- ANDQ R11, R10
- XORQ R14, R10
- MOVQ R10, 112(DI)
- XORQ R10, R15
-
- // Result m
- MOVQ 40(SP), R11
- XORQ BX, R11
- MOVQ 88(SP), R12
- ROLQ $0x24, R11
- XORQ CX, R12
- MOVQ 32(SP), R10
- ROLQ $0x0a, R12
- MOVQ R11, AX
- MOVQ 136(SP), R13
- ANDQ R12, AX
- XORQ R9, R10
- MOVQ 184(SP), R14
- ROLQ $0x1b, R10
- XORQ R10, AX
- MOVQ AX, 120(DI)
- XORQ AX, SI
- XORQ DX, R13
- ROLQ $0x0f, R13
- MOVQ R12, AX
- ORQ R13, AX
- XORQ R11, AX
- MOVQ AX, 128(DI)
- XORQ AX, BP
- XORQ R8, R14
- ROLQ $0x38, R14
- NOTQ R13
- MOVQ R13, AX
- ORQ R14, AX
- XORQ R12, AX
- MOVQ AX, 136(DI)
- ORQ R10, R11
- XORQ R14, R11
- MOVQ R11, 152(DI)
- ANDQ R10, R14
- XORQ R13, R14
- MOVQ R14, 144(DI)
- XORQ R11, R15
-
- // Result s
- MOVQ 16(SP), R10
- MOVQ 64(SP), R11
- MOVQ 112(SP), R12
- XORQ DX, R10
- MOVQ 120(SP), R13
- ROLQ $0x3e, R10
- XORQ R8, R11
- MOVQ 168(SP), R14
- ROLQ $0x37, R11
- XORQ R9, R12
- MOVQ R10, R9
- XORQ CX, R14
- ROLQ $0x02, R14
- ANDQ R11, R9
- XORQ R14, R9
- MOVQ R9, 192(DI)
- ROLQ $0x27, R12
- XORQ R9, R15
- NOTQ R11
- XORQ BX, R13
- MOVQ R11, BX
- ANDQ R12, BX
- XORQ R10, BX
- MOVQ BX, 160(DI)
- XORQ BX, SI
- ROLQ $0x29, R13
- MOVQ R12, CX
- ORQ R13, CX
- XORQ R11, CX
- MOVQ CX, 168(DI)
- XORQ CX, BP
- MOVQ R13, DX
- MOVQ R14, R8
- ANDQ R14, DX
- ORQ R10, R8
- XORQ R12, DX
- XORQ R13, R8
- MOVQ DX, 176(DI)
- MOVQ R8, 184(DI)
-
- // Prepare round
- MOVQ BP, BX
- ROLQ $0x01, BX
- MOVQ 16(DI), R12
- XORQ 56(DI), DX
- XORQ R15, BX
- XORQ 96(DI), R12
- XORQ 136(DI), DX
- XORQ DX, R12
- MOVQ R12, CX
- ROLQ $0x01, CX
- MOVQ 24(DI), R13
- XORQ 64(DI), R8
- XORQ SI, CX
- XORQ 104(DI), R13
- XORQ 144(DI), R8
- XORQ R8, R13
- MOVQ R13, DX
- ROLQ $0x01, DX
- MOVQ R15, R8
- XORQ BP, DX
- ROLQ $0x01, R8
- MOVQ SI, R9
- XORQ R12, R8
- ROLQ $0x01, R9
-
- // Result b
- MOVQ (DI), R10
- MOVQ 48(DI), R11
- XORQ R13, R9
- MOVQ 96(DI), R12
- MOVQ 144(DI), R13
- MOVQ 192(DI), R14
- XORQ CX, R11
- ROLQ $0x2c, R11
- XORQ DX, R12
- XORQ BX, R10
- ROLQ $0x2b, R12
- MOVQ R11, SI
- MOVQ $0x8000000080008081, AX
- ORQ R12, SI
- XORQ R10, AX
- XORQ AX, SI
- MOVQ SI, (SP)
- XORQ R9, R14
- ROLQ $0x0e, R14
- MOVQ R10, R15
- ANDQ R11, R15
- XORQ R14, R15
- MOVQ R15, 32(SP)
- XORQ R8, R13
- ROLQ $0x15, R13
- MOVQ R13, AX
- ANDQ R14, AX
- XORQ R12, AX
- MOVQ AX, 16(SP)
- NOTQ R12
- ORQ R10, R14
- ORQ R13, R12
- XORQ R13, R14
- XORQ R11, R12
- MOVQ R14, 24(SP)
- MOVQ R12, 8(SP)
- MOVQ R12, BP
-
- // Result g
- MOVQ 72(DI), R11
- XORQ R9, R11
- MOVQ 80(DI), R12
- ROLQ $0x14, R11
- XORQ BX, R12
- ROLQ $0x03, R12
- MOVQ 24(DI), R10
- MOVQ R11, AX
- ORQ R12, AX
- XORQ R8, R10
- MOVQ 128(DI), R13
- MOVQ 176(DI), R14
- ROLQ $0x1c, R10
- XORQ R10, AX
- MOVQ AX, 40(SP)
- XORQ AX, SI
- XORQ CX, R13
- ROLQ $0x2d, R13
- MOVQ R12, AX
- ANDQ R13, AX
- XORQ R11, AX
- MOVQ AX, 48(SP)
- XORQ AX, BP
- XORQ DX, R14
- ROLQ $0x3d, R14
- MOVQ R14, AX
- ORQ R10, AX
- XORQ R13, AX
- MOVQ AX, 64(SP)
- ANDQ R11, R10
- XORQ R14, R10
- MOVQ R10, 72(SP)
- NOTQ R14
- XORQ R10, R15
- ORQ R14, R13
- XORQ R12, R13
- MOVQ R13, 56(SP)
-
- // Result k
- MOVQ 8(DI), R10
- MOVQ 56(DI), R11
- MOVQ 104(DI), R12
- MOVQ 152(DI), R13
- MOVQ 160(DI), R14
- XORQ DX, R11
- ROLQ $0x06, R11
- XORQ R8, R12
- ROLQ $0x19, R12
- MOVQ R11, AX
- ORQ R12, AX
- XORQ CX, R10
- ROLQ $0x01, R10
- XORQ R10, AX
- MOVQ AX, 80(SP)
- XORQ AX, SI
- XORQ R9, R13
- ROLQ $0x08, R13
- MOVQ R12, AX
- ANDQ R13, AX
- XORQ R11, AX
- MOVQ AX, 88(SP)
- XORQ AX, BP
- XORQ BX, R14
- ROLQ $0x12, R14
- NOTQ R13
- MOVQ R13, AX
- ANDQ R14, AX
- XORQ R12, AX
- MOVQ AX, 96(SP)
- MOVQ R14, AX
- ORQ R10, AX
- XORQ R13, AX
- MOVQ AX, 104(SP)
- ANDQ R11, R10
- XORQ R14, R10
- MOVQ R10, 112(SP)
- XORQ R10, R15
-
- // Result m
- MOVQ 40(DI), R11
- XORQ BX, R11
- MOVQ 88(DI), R12
- ROLQ $0x24, R11
- XORQ CX, R12
- MOVQ 32(DI), R10
- ROLQ $0x0a, R12
- MOVQ R11, AX
- MOVQ 136(DI), R13
- ANDQ R12, AX
- XORQ R9, R10
- MOVQ 184(DI), R14
- ROLQ $0x1b, R10
- XORQ R10, AX
- MOVQ AX, 120(SP)
- XORQ AX, SI
- XORQ DX, R13
- ROLQ $0x0f, R13
- MOVQ R12, AX
- ORQ R13, AX
- XORQ R11, AX
- MOVQ AX, 128(SP)
- XORQ AX, BP
- XORQ R8, R14
- ROLQ $0x38, R14
- NOTQ R13
- MOVQ R13, AX
- ORQ R14, AX
- XORQ R12, AX
- MOVQ AX, 136(SP)
- ORQ R10, R11
- XORQ R14, R11
- MOVQ R11, 152(SP)
- ANDQ R10, R14
- XORQ R13, R14
- MOVQ R14, 144(SP)
- XORQ R11, R15
-
- // Result s
- MOVQ 16(DI), R10
- MOVQ 64(DI), R11
- MOVQ 112(DI), R12
- XORQ DX, R10
- MOVQ 120(DI), R13
- ROLQ $0x3e, R10
- XORQ R8, R11
- MOVQ 168(DI), R14
- ROLQ $0x37, R11
- XORQ R9, R12
- MOVQ R10, R9
- XORQ CX, R14
- ROLQ $0x02, R14
- ANDQ R11, R9
- XORQ R14, R9
- MOVQ R9, 192(SP)
- ROLQ $0x27, R12
- XORQ R9, R15
- NOTQ R11
- XORQ BX, R13
- MOVQ R11, BX
- ANDQ R12, BX
- XORQ R10, BX
- MOVQ BX, 160(SP)
- XORQ BX, SI
- ROLQ $0x29, R13
- MOVQ R12, CX
- ORQ R13, CX
- XORQ R11, CX
- MOVQ CX, 168(SP)
- XORQ CX, BP
- MOVQ R13, DX
- MOVQ R14, R8
- ANDQ R14, DX
- ORQ R10, R8
- XORQ R12, DX
- XORQ R13, R8
- MOVQ DX, 176(SP)
- MOVQ R8, 184(SP)
-
- // Prepare round
- MOVQ BP, BX
- ROLQ $0x01, BX
- MOVQ 16(SP), R12
- XORQ 56(SP), DX
- XORQ R15, BX
- XORQ 96(SP), R12
- XORQ 136(SP), DX
- XORQ DX, R12
- MOVQ R12, CX
- ROLQ $0x01, CX
- MOVQ 24(SP), R13
- XORQ 64(SP), R8
- XORQ SI, CX
- XORQ 104(SP), R13
- XORQ 144(SP), R8
- XORQ R8, R13
- MOVQ R13, DX
- ROLQ $0x01, DX
- MOVQ R15, R8
- XORQ BP, DX
- ROLQ $0x01, R8
- MOVQ SI, R9
- XORQ R12, R8
- ROLQ $0x01, R9
-
- // Result b
- MOVQ (SP), R10
- MOVQ 48(SP), R11
- XORQ R13, R9
- MOVQ 96(SP), R12
- MOVQ 144(SP), R13
- MOVQ 192(SP), R14
- XORQ CX, R11
- ROLQ $0x2c, R11
- XORQ DX, R12
- XORQ BX, R10
- ROLQ $0x2b, R12
- MOVQ R11, SI
- MOVQ $0x8000000000008009, AX
- ORQ R12, SI
- XORQ R10, AX
- XORQ AX, SI
- MOVQ SI, (DI)
- XORQ R9, R14
- ROLQ $0x0e, R14
- MOVQ R10, R15
- ANDQ R11, R15
- XORQ R14, R15
- MOVQ R15, 32(DI)
- XORQ R8, R13
- ROLQ $0x15, R13
- MOVQ R13, AX
- ANDQ R14, AX
- XORQ R12, AX
- MOVQ AX, 16(DI)
- NOTQ R12
- ORQ R10, R14
- ORQ R13, R12
- XORQ R13, R14
- XORQ R11, R12
- MOVQ R14, 24(DI)
- MOVQ R12, 8(DI)
- MOVQ R12, BP
-
- // Result g
- MOVQ 72(SP), R11
- XORQ R9, R11
- MOVQ 80(SP), R12
- ROLQ $0x14, R11
- XORQ BX, R12
- ROLQ $0x03, R12
- MOVQ 24(SP), R10
- MOVQ R11, AX
- ORQ R12, AX
- XORQ R8, R10
- MOVQ 128(SP), R13
- MOVQ 176(SP), R14
- ROLQ $0x1c, R10
- XORQ R10, AX
- MOVQ AX, 40(DI)
- XORQ AX, SI
- XORQ CX, R13
- ROLQ $0x2d, R13
- MOVQ R12, AX
- ANDQ R13, AX
- XORQ R11, AX
- MOVQ AX, 48(DI)
- XORQ AX, BP
- XORQ DX, R14
- ROLQ $0x3d, R14
- MOVQ R14, AX
- ORQ R10, AX
- XORQ R13, AX
- MOVQ AX, 64(DI)
- ANDQ R11, R10
- XORQ R14, R10
- MOVQ R10, 72(DI)
- NOTQ R14
- XORQ R10, R15
- ORQ R14, R13
- XORQ R12, R13
- MOVQ R13, 56(DI)
-
- // Result k
- MOVQ 8(SP), R10
- MOVQ 56(SP), R11
- MOVQ 104(SP), R12
- MOVQ 152(SP), R13
- MOVQ 160(SP), R14
- XORQ DX, R11
- ROLQ $0x06, R11
- XORQ R8, R12
- ROLQ $0x19, R12
- MOVQ R11, AX
- ORQ R12, AX
- XORQ CX, R10
- ROLQ $0x01, R10
- XORQ R10, AX
- MOVQ AX, 80(DI)
- XORQ AX, SI
- XORQ R9, R13
- ROLQ $0x08, R13
- MOVQ R12, AX
- ANDQ R13, AX
- XORQ R11, AX
- MOVQ AX, 88(DI)
- XORQ AX, BP
- XORQ BX, R14
- ROLQ $0x12, R14
- NOTQ R13
- MOVQ R13, AX
- ANDQ R14, AX
- XORQ R12, AX
- MOVQ AX, 96(DI)
- MOVQ R14, AX
- ORQ R10, AX
- XORQ R13, AX
- MOVQ AX, 104(DI)
- ANDQ R11, R10
- XORQ R14, R10
- MOVQ R10, 112(DI)
- XORQ R10, R15
-
- // Result m
- MOVQ 40(SP), R11
- XORQ BX, R11
- MOVQ 88(SP), R12
- ROLQ $0x24, R11
- XORQ CX, R12
- MOVQ 32(SP), R10
- ROLQ $0x0a, R12
- MOVQ R11, AX
- MOVQ 136(SP), R13
- ANDQ R12, AX
- XORQ R9, R10
- MOVQ 184(SP), R14
- ROLQ $0x1b, R10
- XORQ R10, AX
- MOVQ AX, 120(DI)
- XORQ AX, SI
- XORQ DX, R13
- ROLQ $0x0f, R13
- MOVQ R12, AX
- ORQ R13, AX
- XORQ R11, AX
- MOVQ AX, 128(DI)
- XORQ AX, BP
- XORQ R8, R14
- ROLQ $0x38, R14
- NOTQ R13
- MOVQ R13, AX
- ORQ R14, AX
- XORQ R12, AX
- MOVQ AX, 136(DI)
- ORQ R10, R11
- XORQ R14, R11
- MOVQ R11, 152(DI)
- ANDQ R10, R14
- XORQ R13, R14
- MOVQ R14, 144(DI)
- XORQ R11, R15
-
- // Result s
- MOVQ 16(SP), R10
- MOVQ 64(SP), R11
- MOVQ 112(SP), R12
- XORQ DX, R10
- MOVQ 120(SP), R13
- ROLQ $0x3e, R10
- XORQ R8, R11
- MOVQ 168(SP), R14
- ROLQ $0x37, R11
- XORQ R9, R12
- MOVQ R10, R9
- XORQ CX, R14
- ROLQ $0x02, R14
- ANDQ R11, R9
- XORQ R14, R9
- MOVQ R9, 192(DI)
- ROLQ $0x27, R12
- XORQ R9, R15
- NOTQ R11
- XORQ BX, R13
- MOVQ R11, BX
- ANDQ R12, BX
- XORQ R10, BX
- MOVQ BX, 160(DI)
- XORQ BX, SI
- ROLQ $0x29, R13
- MOVQ R12, CX
- ORQ R13, CX
- XORQ R11, CX
- MOVQ CX, 168(DI)
- XORQ CX, BP
- MOVQ R13, DX
- MOVQ R14, R8
- ANDQ R14, DX
- ORQ R10, R8
- XORQ R12, DX
- XORQ R13, R8
- MOVQ DX, 176(DI)
- MOVQ R8, 184(DI)
-
- // Prepare round
- MOVQ BP, BX
- ROLQ $0x01, BX
- MOVQ 16(DI), R12
- XORQ 56(DI), DX
- XORQ R15, BX
- XORQ 96(DI), R12
- XORQ 136(DI), DX
- XORQ DX, R12
- MOVQ R12, CX
- ROLQ $0x01, CX
- MOVQ 24(DI), R13
- XORQ 64(DI), R8
- XORQ SI, CX
- XORQ 104(DI), R13
- XORQ 144(DI), R8
- XORQ R8, R13
- MOVQ R13, DX
- ROLQ $0x01, DX
- MOVQ R15, R8
- XORQ BP, DX
- ROLQ $0x01, R8
- MOVQ SI, R9
- XORQ R12, R8
- ROLQ $0x01, R9
-
- // Result b
- MOVQ (DI), R10
- MOVQ 48(DI), R11
- XORQ R13, R9
- MOVQ 96(DI), R12
- MOVQ 144(DI), R13
- MOVQ 192(DI), R14
- XORQ CX, R11
- ROLQ $0x2c, R11
- XORQ DX, R12
- XORQ BX, R10
- ROLQ $0x2b, R12
- MOVQ R11, SI
- MOVQ $0x000000000000008a, AX
- ORQ R12, SI
- XORQ R10, AX
- XORQ AX, SI
- MOVQ SI, (SP)
- XORQ R9, R14
- ROLQ $0x0e, R14
- MOVQ R10, R15
- ANDQ R11, R15
- XORQ R14, R15
- MOVQ R15, 32(SP)
- XORQ R8, R13
- ROLQ $0x15, R13
- MOVQ R13, AX
- ANDQ R14, AX
- XORQ R12, AX
- MOVQ AX, 16(SP)
- NOTQ R12
- ORQ R10, R14
- ORQ R13, R12
- XORQ R13, R14
- XORQ R11, R12
- MOVQ R14, 24(SP)
- MOVQ R12, 8(SP)
- MOVQ R12, BP
-
- // Result g
- MOVQ 72(DI), R11
- XORQ R9, R11
- MOVQ 80(DI), R12
- ROLQ $0x14, R11
- XORQ BX, R12
- ROLQ $0x03, R12
- MOVQ 24(DI), R10
- MOVQ R11, AX
- ORQ R12, AX
- XORQ R8, R10
- MOVQ 128(DI), R13
- MOVQ 176(DI), R14
- ROLQ $0x1c, R10
- XORQ R10, AX
- MOVQ AX, 40(SP)
- XORQ AX, SI
- XORQ CX, R13
- ROLQ $0x2d, R13
- MOVQ R12, AX
- ANDQ R13, AX
- XORQ R11, AX
- MOVQ AX, 48(SP)
- XORQ AX, BP
- XORQ DX, R14
- ROLQ $0x3d, R14
- MOVQ R14, AX
- ORQ R10, AX
- XORQ R13, AX
- MOVQ AX, 64(SP)
- ANDQ R11, R10
- XORQ R14, R10
- MOVQ R10, 72(SP)
- NOTQ R14
- XORQ R10, R15
- ORQ R14, R13
- XORQ R12, R13
- MOVQ R13, 56(SP)
-
- // Result k
- MOVQ 8(DI), R10
- MOVQ 56(DI), R11
- MOVQ 104(DI), R12
- MOVQ 152(DI), R13
- MOVQ 160(DI), R14
- XORQ DX, R11
- ROLQ $0x06, R11
- XORQ R8, R12
- ROLQ $0x19, R12
- MOVQ R11, AX
- ORQ R12, AX
- XORQ CX, R10
- ROLQ $0x01, R10
- XORQ R10, AX
- MOVQ AX, 80(SP)
- XORQ AX, SI
- XORQ R9, R13
- ROLQ $0x08, R13
- MOVQ R12, AX
- ANDQ R13, AX
- XORQ R11, AX
- MOVQ AX, 88(SP)
- XORQ AX, BP
- XORQ BX, R14
- ROLQ $0x12, R14
- NOTQ R13
- MOVQ R13, AX
- ANDQ R14, AX
- XORQ R12, AX
- MOVQ AX, 96(SP)
- MOVQ R14, AX
- ORQ R10, AX
- XORQ R13, AX
- MOVQ AX, 104(SP)
- ANDQ R11, R10
- XORQ R14, R10
- MOVQ R10, 112(SP)
- XORQ R10, R15
-
- // Result m
- MOVQ 40(DI), R11
- XORQ BX, R11
- MOVQ 88(DI), R12
- ROLQ $0x24, R11
- XORQ CX, R12
- MOVQ 32(DI), R10
- ROLQ $0x0a, R12
- MOVQ R11, AX
- MOVQ 136(DI), R13
- ANDQ R12, AX
- XORQ R9, R10
- MOVQ 184(DI), R14
- ROLQ $0x1b, R10
- XORQ R10, AX
- MOVQ AX, 120(SP)
- XORQ AX, SI
- XORQ DX, R13
- ROLQ $0x0f, R13
- MOVQ R12, AX
- ORQ R13, AX
- XORQ R11, AX
- MOVQ AX, 128(SP)
- XORQ AX, BP
- XORQ R8, R14
- ROLQ $0x38, R14
- NOTQ R13
- MOVQ R13, AX
- ORQ R14, AX
- XORQ R12, AX
- MOVQ AX, 136(SP)
- ORQ R10, R11
- XORQ R14, R11
- MOVQ R11, 152(SP)
- ANDQ R10, R14
- XORQ R13, R14
- MOVQ R14, 144(SP)
- XORQ R11, R15
-
- // Result s
- MOVQ 16(DI), R10
- MOVQ 64(DI), R11
- MOVQ 112(DI), R12
- XORQ DX, R10
- MOVQ 120(DI), R13
- ROLQ $0x3e, R10
- XORQ R8, R11
- MOVQ 168(DI), R14
- ROLQ $0x37, R11
- XORQ R9, R12
- MOVQ R10, R9
- XORQ CX, R14
- ROLQ $0x02, R14
- ANDQ R11, R9
- XORQ R14, R9
- MOVQ R9, 192(SP)
- ROLQ $0x27, R12
- XORQ R9, R15
- NOTQ R11
- XORQ BX, R13
- MOVQ R11, BX
- ANDQ R12, BX
- XORQ R10, BX
- MOVQ BX, 160(SP)
- XORQ BX, SI
- ROLQ $0x29, R13
- MOVQ R12, CX
- ORQ R13, CX
- XORQ R11, CX
- MOVQ CX, 168(SP)
- XORQ CX, BP
- MOVQ R13, DX
- MOVQ R14, R8
- ANDQ R14, DX
- ORQ R10, R8
- XORQ R12, DX
- XORQ R13, R8
- MOVQ DX, 176(SP)
- MOVQ R8, 184(SP)
-
- // Prepare round
- MOVQ BP, BX
- ROLQ $0x01, BX
- MOVQ 16(SP), R12
- XORQ 56(SP), DX
- XORQ R15, BX
- XORQ 96(SP), R12
- XORQ 136(SP), DX
- XORQ DX, R12
- MOVQ R12, CX
- ROLQ $0x01, CX
- MOVQ 24(SP), R13
- XORQ 64(SP), R8
- XORQ SI, CX
- XORQ 104(SP), R13
- XORQ 144(SP), R8
- XORQ R8, R13
- MOVQ R13, DX
- ROLQ $0x01, DX
- MOVQ R15, R8
- XORQ BP, DX
- ROLQ $0x01, R8
- MOVQ SI, R9
- XORQ R12, R8
- ROLQ $0x01, R9
-
- // Result b
- MOVQ (SP), R10
- MOVQ 48(SP), R11
- XORQ R13, R9
- MOVQ 96(SP), R12
- MOVQ 144(SP), R13
- MOVQ 192(SP), R14
- XORQ CX, R11
- ROLQ $0x2c, R11
- XORQ DX, R12
- XORQ BX, R10
- ROLQ $0x2b, R12
- MOVQ R11, SI
- MOVQ $0x0000000000000088, AX
- ORQ R12, SI
- XORQ R10, AX
- XORQ AX, SI
- MOVQ SI, (DI)
- XORQ R9, R14
- ROLQ $0x0e, R14
- MOVQ R10, R15
- ANDQ R11, R15
- XORQ R14, R15
- MOVQ R15, 32(DI)
- XORQ R8, R13
- ROLQ $0x15, R13
- MOVQ R13, AX
- ANDQ R14, AX
- XORQ R12, AX
- MOVQ AX, 16(DI)
- NOTQ R12
- ORQ R10, R14
- ORQ R13, R12
- XORQ R13, R14
- XORQ R11, R12
- MOVQ R14, 24(DI)
- MOVQ R12, 8(DI)
- MOVQ R12, BP
-
- // Result g
- MOVQ 72(SP), R11
- XORQ R9, R11
- MOVQ 80(SP), R12
- ROLQ $0x14, R11
- XORQ BX, R12
- ROLQ $0x03, R12
- MOVQ 24(SP), R10
- MOVQ R11, AX
- ORQ R12, AX
- XORQ R8, R10
- MOVQ 128(SP), R13
- MOVQ 176(SP), R14
- ROLQ $0x1c, R10
- XORQ R10, AX
- MOVQ AX, 40(DI)
- XORQ AX, SI
- XORQ CX, R13
- ROLQ $0x2d, R13
- MOVQ R12, AX
- ANDQ R13, AX
- XORQ R11, AX
- MOVQ AX, 48(DI)
- XORQ AX, BP
- XORQ DX, R14
- ROLQ $0x3d, R14
- MOVQ R14, AX
- ORQ R10, AX
- XORQ R13, AX
- MOVQ AX, 64(DI)
- ANDQ R11, R10
- XORQ R14, R10
- MOVQ R10, 72(DI)
- NOTQ R14
- XORQ R10, R15
- ORQ R14, R13
- XORQ R12, R13
- MOVQ R13, 56(DI)
-
- // Result k
- MOVQ 8(SP), R10
- MOVQ 56(SP), R11
- MOVQ 104(SP), R12
- MOVQ 152(SP), R13
- MOVQ 160(SP), R14
- XORQ DX, R11
- ROLQ $0x06, R11
- XORQ R8, R12
- ROLQ $0x19, R12
- MOVQ R11, AX
- ORQ R12, AX
- XORQ CX, R10
- ROLQ $0x01, R10
- XORQ R10, AX
- MOVQ AX, 80(DI)
- XORQ AX, SI
- XORQ R9, R13
- ROLQ $0x08, R13
- MOVQ R12, AX
- ANDQ R13, AX
- XORQ R11, AX
- MOVQ AX, 88(DI)
- XORQ AX, BP
- XORQ BX, R14
- ROLQ $0x12, R14
- NOTQ R13
- MOVQ R13, AX
- ANDQ R14, AX
- XORQ R12, AX
- MOVQ AX, 96(DI)
- MOVQ R14, AX
- ORQ R10, AX
- XORQ R13, AX
- MOVQ AX, 104(DI)
- ANDQ R11, R10
- XORQ R14, R10
- MOVQ R10, 112(DI)
- XORQ R10, R15
-
- // Result m
- MOVQ 40(SP), R11
- XORQ BX, R11
- MOVQ 88(SP), R12
- ROLQ $0x24, R11
- XORQ CX, R12
- MOVQ 32(SP), R10
- ROLQ $0x0a, R12
- MOVQ R11, AX
- MOVQ 136(SP), R13
- ANDQ R12, AX
- XORQ R9, R10
- MOVQ 184(SP), R14
- ROLQ $0x1b, R10
- XORQ R10, AX
- MOVQ AX, 120(DI)
- XORQ AX, SI
- XORQ DX, R13
- ROLQ $0x0f, R13
- MOVQ R12, AX
- ORQ R13, AX
- XORQ R11, AX
- MOVQ AX, 128(DI)
- XORQ AX, BP
- XORQ R8, R14
- ROLQ $0x38, R14
- NOTQ R13
- MOVQ R13, AX
- ORQ R14, AX
- XORQ R12, AX
- MOVQ AX, 136(DI)
- ORQ R10, R11
- XORQ R14, R11
- MOVQ R11, 152(DI)
- ANDQ R10, R14
- XORQ R13, R14
- MOVQ R14, 144(DI)
- XORQ R11, R15
-
- // Result s
- MOVQ 16(SP), R10
- MOVQ 64(SP), R11
- MOVQ 112(SP), R12
- XORQ DX, R10
- MOVQ 120(SP), R13
- ROLQ $0x3e, R10
- XORQ R8, R11
- MOVQ 168(SP), R14
- ROLQ $0x37, R11
- XORQ R9, R12
- MOVQ R10, R9
- XORQ CX, R14
- ROLQ $0x02, R14
- ANDQ R11, R9
- XORQ R14, R9
- MOVQ R9, 192(DI)
- ROLQ $0x27, R12
- XORQ R9, R15
- NOTQ R11
- XORQ BX, R13
- MOVQ R11, BX
- ANDQ R12, BX
- XORQ R10, BX
- MOVQ BX, 160(DI)
- XORQ BX, SI
- ROLQ $0x29, R13
- MOVQ R12, CX
- ORQ R13, CX
- XORQ R11, CX
- MOVQ CX, 168(DI)
- XORQ CX, BP
- MOVQ R13, DX
- MOVQ R14, R8
- ANDQ R14, DX
- ORQ R10, R8
- XORQ R12, DX
- XORQ R13, R8
- MOVQ DX, 176(DI)
- MOVQ R8, 184(DI)
-
- // Prepare round
- MOVQ BP, BX
- ROLQ $0x01, BX
- MOVQ 16(DI), R12
- XORQ 56(DI), DX
- XORQ R15, BX
- XORQ 96(DI), R12
- XORQ 136(DI), DX
- XORQ DX, R12
- MOVQ R12, CX
- ROLQ $0x01, CX
- MOVQ 24(DI), R13
- XORQ 64(DI), R8
- XORQ SI, CX
- XORQ 104(DI), R13
- XORQ 144(DI), R8
- XORQ R8, R13
- MOVQ R13, DX
- ROLQ $0x01, DX
- MOVQ R15, R8
- XORQ BP, DX
- ROLQ $0x01, R8
- MOVQ SI, R9
- XORQ R12, R8
- ROLQ $0x01, R9
-
- // Result b
- MOVQ (DI), R10
- MOVQ 48(DI), R11
- XORQ R13, R9
- MOVQ 96(DI), R12
- MOVQ 144(DI), R13
- MOVQ 192(DI), R14
- XORQ CX, R11
- ROLQ $0x2c, R11
- XORQ DX, R12
- XORQ BX, R10
- ROLQ $0x2b, R12
- MOVQ R11, SI
- MOVQ $0x0000000080008009, AX
- ORQ R12, SI
- XORQ R10, AX
- XORQ AX, SI
- MOVQ SI, (SP)
- XORQ R9, R14
- ROLQ $0x0e, R14
- MOVQ R10, R15
- ANDQ R11, R15
- XORQ R14, R15
- MOVQ R15, 32(SP)
- XORQ R8, R13
- ROLQ $0x15, R13
- MOVQ R13, AX
- ANDQ R14, AX
- XORQ R12, AX
- MOVQ AX, 16(SP)
- NOTQ R12
- ORQ R10, R14
- ORQ R13, R12
- XORQ R13, R14
- XORQ R11, R12
- MOVQ R14, 24(SP)
- MOVQ R12, 8(SP)
- MOVQ R12, BP
-
- // Result g
- MOVQ 72(DI), R11
- XORQ R9, R11
- MOVQ 80(DI), R12
- ROLQ $0x14, R11
- XORQ BX, R12
- ROLQ $0x03, R12
- MOVQ 24(DI), R10
- MOVQ R11, AX
- ORQ R12, AX
- XORQ R8, R10
- MOVQ 128(DI), R13
- MOVQ 176(DI), R14
- ROLQ $0x1c, R10
- XORQ R10, AX
- MOVQ AX, 40(SP)
- XORQ AX, SI
- XORQ CX, R13
- ROLQ $0x2d, R13
- MOVQ R12, AX
- ANDQ R13, AX
- XORQ R11, AX
- MOVQ AX, 48(SP)
- XORQ AX, BP
- XORQ DX, R14
- ROLQ $0x3d, R14
- MOVQ R14, AX
- ORQ R10, AX
- XORQ R13, AX
- MOVQ AX, 64(SP)
- ANDQ R11, R10
- XORQ R14, R10
- MOVQ R10, 72(SP)
- NOTQ R14
- XORQ R10, R15
- ORQ R14, R13
- XORQ R12, R13
- MOVQ R13, 56(SP)
-
- // Result k
- MOVQ 8(DI), R10
- MOVQ 56(DI), R11
- MOVQ 104(DI), R12
- MOVQ 152(DI), R13
- MOVQ 160(DI), R14
- XORQ DX, R11
- ROLQ $0x06, R11
- XORQ R8, R12
- ROLQ $0x19, R12
- MOVQ R11, AX
- ORQ R12, AX
- XORQ CX, R10
- ROLQ $0x01, R10
- XORQ R10, AX
- MOVQ AX, 80(SP)
- XORQ AX, SI
- XORQ R9, R13
- ROLQ $0x08, R13
- MOVQ R12, AX
- ANDQ R13, AX
- XORQ R11, AX
- MOVQ AX, 88(SP)
- XORQ AX, BP
- XORQ BX, R14
- ROLQ $0x12, R14
- NOTQ R13
- MOVQ R13, AX
- ANDQ R14, AX
- XORQ R12, AX
- MOVQ AX, 96(SP)
- MOVQ R14, AX
- ORQ R10, AX
- XORQ R13, AX
- MOVQ AX, 104(SP)
- ANDQ R11, R10
- XORQ R14, R10
- MOVQ R10, 112(SP)
- XORQ R10, R15
-
- // Result m
- MOVQ 40(DI), R11
- XORQ BX, R11
- MOVQ 88(DI), R12
- ROLQ $0x24, R11
- XORQ CX, R12
- MOVQ 32(DI), R10
- ROLQ $0x0a, R12
- MOVQ R11, AX
- MOVQ 136(DI), R13
- ANDQ R12, AX
- XORQ R9, R10
- MOVQ 184(DI), R14
- ROLQ $0x1b, R10
- XORQ R10, AX
- MOVQ AX, 120(SP)
- XORQ AX, SI
- XORQ DX, R13
- ROLQ $0x0f, R13
- MOVQ R12, AX
- ORQ R13, AX
- XORQ R11, AX
- MOVQ AX, 128(SP)
- XORQ AX, BP
- XORQ R8, R14
- ROLQ $0x38, R14
- NOTQ R13
- MOVQ R13, AX
- ORQ R14, AX
- XORQ R12, AX
- MOVQ AX, 136(SP)
- ORQ R10, R11
- XORQ R14, R11
- MOVQ R11, 152(SP)
- ANDQ R10, R14
- XORQ R13, R14
- MOVQ R14, 144(SP)
- XORQ R11, R15
-
- // Result s
- MOVQ 16(DI), R10
- MOVQ 64(DI), R11
- MOVQ 112(DI), R12
- XORQ DX, R10
- MOVQ 120(DI), R13
- ROLQ $0x3e, R10
- XORQ R8, R11
- MOVQ 168(DI), R14
- ROLQ $0x37, R11
- XORQ R9, R12
- MOVQ R10, R9
- XORQ CX, R14
- ROLQ $0x02, R14
- ANDQ R11, R9
- XORQ R14, R9
- MOVQ R9, 192(SP)
- ROLQ $0x27, R12
- XORQ R9, R15
- NOTQ R11
- XORQ BX, R13
- MOVQ R11, BX
- ANDQ R12, BX
- XORQ R10, BX
- MOVQ BX, 160(SP)
- XORQ BX, SI
- ROLQ $0x29, R13
- MOVQ R12, CX
- ORQ R13, CX
- XORQ R11, CX
- MOVQ CX, 168(SP)
- XORQ CX, BP
- MOVQ R13, DX
- MOVQ R14, R8
- ANDQ R14, DX
- ORQ R10, R8
- XORQ R12, DX
- XORQ R13, R8
- MOVQ DX, 176(SP)
- MOVQ R8, 184(SP)
-
- // Prepare round
- MOVQ BP, BX
- ROLQ $0x01, BX
- MOVQ 16(SP), R12
- XORQ 56(SP), DX
- XORQ R15, BX
- XORQ 96(SP), R12
- XORQ 136(SP), DX
- XORQ DX, R12
- MOVQ R12, CX
- ROLQ $0x01, CX
- MOVQ 24(SP), R13
- XORQ 64(SP), R8
- XORQ SI, CX
- XORQ 104(SP), R13
- XORQ 144(SP), R8
- XORQ R8, R13
- MOVQ R13, DX
- ROLQ $0x01, DX
- MOVQ R15, R8
- XORQ BP, DX
- ROLQ $0x01, R8
- MOVQ SI, R9
- XORQ R12, R8
- ROLQ $0x01, R9
-
- // Result b
- MOVQ (SP), R10
- MOVQ 48(SP), R11
- XORQ R13, R9
- MOVQ 96(SP), R12
- MOVQ 144(SP), R13
- MOVQ 192(SP), R14
- XORQ CX, R11
- ROLQ $0x2c, R11
- XORQ DX, R12
- XORQ BX, R10
- ROLQ $0x2b, R12
- MOVQ R11, SI
- MOVQ $0x000000008000000a, AX
- ORQ R12, SI
- XORQ R10, AX
- XORQ AX, SI
- MOVQ SI, (DI)
- XORQ R9, R14
- ROLQ $0x0e, R14
- MOVQ R10, R15
- ANDQ R11, R15
- XORQ R14, R15
- MOVQ R15, 32(DI)
- XORQ R8, R13
- ROLQ $0x15, R13
- MOVQ R13, AX
- ANDQ R14, AX
- XORQ R12, AX
- MOVQ AX, 16(DI)
- NOTQ R12
- ORQ R10, R14
- ORQ R13, R12
- XORQ R13, R14
- XORQ R11, R12
- MOVQ R14, 24(DI)
- MOVQ R12, 8(DI)
- MOVQ R12, BP
-
- // Result g
- MOVQ 72(SP), R11
- XORQ R9, R11
- MOVQ 80(SP), R12
- ROLQ $0x14, R11
- XORQ BX, R12
- ROLQ $0x03, R12
- MOVQ 24(SP), R10
- MOVQ R11, AX
- ORQ R12, AX
- XORQ R8, R10
- MOVQ 128(SP), R13
- MOVQ 176(SP), R14
- ROLQ $0x1c, R10
- XORQ R10, AX
- MOVQ AX, 40(DI)
- XORQ AX, SI
- XORQ CX, R13
- ROLQ $0x2d, R13
- MOVQ R12, AX
- ANDQ R13, AX
- XORQ R11, AX
- MOVQ AX, 48(DI)
- XORQ AX, BP
- XORQ DX, R14
- ROLQ $0x3d, R14
- MOVQ R14, AX
- ORQ R10, AX
- XORQ R13, AX
- MOVQ AX, 64(DI)
- ANDQ R11, R10
- XORQ R14, R10
- MOVQ R10, 72(DI)
- NOTQ R14
- XORQ R10, R15
- ORQ R14, R13
- XORQ R12, R13
- MOVQ R13, 56(DI)
-
- // Result k
- MOVQ 8(SP), R10
- MOVQ 56(SP), R11
- MOVQ 104(SP), R12
- MOVQ 152(SP), R13
- MOVQ 160(SP), R14
- XORQ DX, R11
- ROLQ $0x06, R11
- XORQ R8, R12
- ROLQ $0x19, R12
- MOVQ R11, AX
- ORQ R12, AX
- XORQ CX, R10
- ROLQ $0x01, R10
- XORQ R10, AX
- MOVQ AX, 80(DI)
- XORQ AX, SI
- XORQ R9, R13
- ROLQ $0x08, R13
- MOVQ R12, AX
- ANDQ R13, AX
- XORQ R11, AX
- MOVQ AX, 88(DI)
- XORQ AX, BP
- XORQ BX, R14
- ROLQ $0x12, R14
- NOTQ R13
- MOVQ R13, AX
- ANDQ R14, AX
- XORQ R12, AX
- MOVQ AX, 96(DI)
- MOVQ R14, AX
- ORQ R10, AX
- XORQ R13, AX
- MOVQ AX, 104(DI)
- ANDQ R11, R10
- XORQ R14, R10
- MOVQ R10, 112(DI)
- XORQ R10, R15
-
- // Result m
- MOVQ 40(SP), R11
- XORQ BX, R11
- MOVQ 88(SP), R12
- ROLQ $0x24, R11
- XORQ CX, R12
- MOVQ 32(SP), R10
- ROLQ $0x0a, R12
- MOVQ R11, AX
- MOVQ 136(SP), R13
- ANDQ R12, AX
- XORQ R9, R10
- MOVQ 184(SP), R14
- ROLQ $0x1b, R10
- XORQ R10, AX
- MOVQ AX, 120(DI)
- XORQ AX, SI
- XORQ DX, R13
- ROLQ $0x0f, R13
- MOVQ R12, AX
- ORQ R13, AX
- XORQ R11, AX
- MOVQ AX, 128(DI)
- XORQ AX, BP
- XORQ R8, R14
- ROLQ $0x38, R14
- NOTQ R13
- MOVQ R13, AX
- ORQ R14, AX
- XORQ R12, AX
- MOVQ AX, 136(DI)
- ORQ R10, R11
- XORQ R14, R11
- MOVQ R11, 152(DI)
- ANDQ R10, R14
- XORQ R13, R14
- MOVQ R14, 144(DI)
- XORQ R11, R15
-
- // Result s
- MOVQ 16(SP), R10
- MOVQ 64(SP), R11
- MOVQ 112(SP), R12
- XORQ DX, R10
- MOVQ 120(SP), R13
- ROLQ $0x3e, R10
- XORQ R8, R11
- MOVQ 168(SP), R14
- ROLQ $0x37, R11
- XORQ R9, R12
- MOVQ R10, R9
- XORQ CX, R14
- ROLQ $0x02, R14
- ANDQ R11, R9
- XORQ R14, R9
- MOVQ R9, 192(DI)
- ROLQ $0x27, R12
- XORQ R9, R15
- NOTQ R11
- XORQ BX, R13
- MOVQ R11, BX
- ANDQ R12, BX
- XORQ R10, BX
- MOVQ BX, 160(DI)
- XORQ BX, SI
- ROLQ $0x29, R13
- MOVQ R12, CX
- ORQ R13, CX
- XORQ R11, CX
- MOVQ CX, 168(DI)
- XORQ CX, BP
- MOVQ R13, DX
- MOVQ R14, R8
- ANDQ R14, DX
- ORQ R10, R8
- XORQ R12, DX
- XORQ R13, R8
- MOVQ DX, 176(DI)
- MOVQ R8, 184(DI)
-
- // Prepare round
- MOVQ BP, BX
- ROLQ $0x01, BX
- MOVQ 16(DI), R12
- XORQ 56(DI), DX
- XORQ R15, BX
- XORQ 96(DI), R12
- XORQ 136(DI), DX
- XORQ DX, R12
- MOVQ R12, CX
- ROLQ $0x01, CX
- MOVQ 24(DI), R13
- XORQ 64(DI), R8
- XORQ SI, CX
- XORQ 104(DI), R13
- XORQ 144(DI), R8
- XORQ R8, R13
- MOVQ R13, DX
- ROLQ $0x01, DX
- MOVQ R15, R8
- XORQ BP, DX
- ROLQ $0x01, R8
- MOVQ SI, R9
- XORQ R12, R8
- ROLQ $0x01, R9
-
- // Result b
- MOVQ (DI), R10
- MOVQ 48(DI), R11
- XORQ R13, R9
- MOVQ 96(DI), R12
- MOVQ 144(DI), R13
- MOVQ 192(DI), R14
- XORQ CX, R11
- ROLQ $0x2c, R11
- XORQ DX, R12
- XORQ BX, R10
- ROLQ $0x2b, R12
- MOVQ R11, SI
- MOVQ $0x000000008000808b, AX
- ORQ R12, SI
- XORQ R10, AX
- XORQ AX, SI
- MOVQ SI, (SP)
- XORQ R9, R14
- ROLQ $0x0e, R14
- MOVQ R10, R15
- ANDQ R11, R15
- XORQ R14, R15
- MOVQ R15, 32(SP)
- XORQ R8, R13
- ROLQ $0x15, R13
- MOVQ R13, AX
- ANDQ R14, AX
- XORQ R12, AX
- MOVQ AX, 16(SP)
- NOTQ R12
- ORQ R10, R14
- ORQ R13, R12
- XORQ R13, R14
- XORQ R11, R12
- MOVQ R14, 24(SP)
- MOVQ R12, 8(SP)
- MOVQ R12, BP
-
- // Result g
- MOVQ 72(DI), R11
- XORQ R9, R11
- MOVQ 80(DI), R12
- ROLQ $0x14, R11
- XORQ BX, R12
- ROLQ $0x03, R12
- MOVQ 24(DI), R10
- MOVQ R11, AX
- ORQ R12, AX
- XORQ R8, R10
- MOVQ 128(DI), R13
- MOVQ 176(DI), R14
- ROLQ $0x1c, R10
- XORQ R10, AX
- MOVQ AX, 40(SP)
- XORQ AX, SI
- XORQ CX, R13
- ROLQ $0x2d, R13
- MOVQ R12, AX
- ANDQ R13, AX
- XORQ R11, AX
- MOVQ AX, 48(SP)
- XORQ AX, BP
- XORQ DX, R14
- ROLQ $0x3d, R14
- MOVQ R14, AX
- ORQ R10, AX
- XORQ R13, AX
- MOVQ AX, 64(SP)
- ANDQ R11, R10
- XORQ R14, R10
- MOVQ R10, 72(SP)
- NOTQ R14
- XORQ R10, R15
- ORQ R14, R13
- XORQ R12, R13
- MOVQ R13, 56(SP)
-
- // Result k
- MOVQ 8(DI), R10
- MOVQ 56(DI), R11
- MOVQ 104(DI), R12
- MOVQ 152(DI), R13
- MOVQ 160(DI), R14
- XORQ DX, R11
- ROLQ $0x06, R11
- XORQ R8, R12
- ROLQ $0x19, R12
- MOVQ R11, AX
- ORQ R12, AX
- XORQ CX, R10
- ROLQ $0x01, R10
- XORQ R10, AX
- MOVQ AX, 80(SP)
- XORQ AX, SI
- XORQ R9, R13
- ROLQ $0x08, R13
- MOVQ R12, AX
- ANDQ R13, AX
- XORQ R11, AX
- MOVQ AX, 88(SP)
- XORQ AX, BP
- XORQ BX, R14
- ROLQ $0x12, R14
- NOTQ R13
- MOVQ R13, AX
- ANDQ R14, AX
- XORQ R12, AX
- MOVQ AX, 96(SP)
- MOVQ R14, AX
- ORQ R10, AX
- XORQ R13, AX
- MOVQ AX, 104(SP)
- ANDQ R11, R10
- XORQ R14, R10
- MOVQ R10, 112(SP)
- XORQ R10, R15
-
- // Result m
- MOVQ 40(DI), R11
- XORQ BX, R11
- MOVQ 88(DI), R12
- ROLQ $0x24, R11
- XORQ CX, R12
- MOVQ 32(DI), R10
- ROLQ $0x0a, R12
- MOVQ R11, AX
- MOVQ 136(DI), R13
- ANDQ R12, AX
- XORQ R9, R10
- MOVQ 184(DI), R14
- ROLQ $0x1b, R10
- XORQ R10, AX
- MOVQ AX, 120(SP)
- XORQ AX, SI
- XORQ DX, R13
- ROLQ $0x0f, R13
- MOVQ R12, AX
- ORQ R13, AX
- XORQ R11, AX
- MOVQ AX, 128(SP)
- XORQ AX, BP
- XORQ R8, R14
- ROLQ $0x38, R14
- NOTQ R13
- MOVQ R13, AX
- ORQ R14, AX
- XORQ R12, AX
- MOVQ AX, 136(SP)
- ORQ R10, R11
- XORQ R14, R11
- MOVQ R11, 152(SP)
- ANDQ R10, R14
- XORQ R13, R14
- MOVQ R14, 144(SP)
- XORQ R11, R15
-
- // Result s
- MOVQ 16(DI), R10
- MOVQ 64(DI), R11
- MOVQ 112(DI), R12
- XORQ DX, R10
- MOVQ 120(DI), R13
- ROLQ $0x3e, R10
- XORQ R8, R11
- MOVQ 168(DI), R14
- ROLQ $0x37, R11
- XORQ R9, R12
- MOVQ R10, R9
- XORQ CX, R14
- ROLQ $0x02, R14
- ANDQ R11, R9
- XORQ R14, R9
- MOVQ R9, 192(SP)
- ROLQ $0x27, R12
- XORQ R9, R15
- NOTQ R11
- XORQ BX, R13
- MOVQ R11, BX
- ANDQ R12, BX
- XORQ R10, BX
- MOVQ BX, 160(SP)
- XORQ BX, SI
- ROLQ $0x29, R13
- MOVQ R12, CX
- ORQ R13, CX
- XORQ R11, CX
- MOVQ CX, 168(SP)
- XORQ CX, BP
- MOVQ R13, DX
- MOVQ R14, R8
- ANDQ R14, DX
- ORQ R10, R8
- XORQ R12, DX
- XORQ R13, R8
- MOVQ DX, 176(SP)
- MOVQ R8, 184(SP)
-
- // Prepare round
- MOVQ BP, BX
- ROLQ $0x01, BX
- MOVQ 16(SP), R12
- XORQ 56(SP), DX
- XORQ R15, BX
- XORQ 96(SP), R12
- XORQ 136(SP), DX
- XORQ DX, R12
- MOVQ R12, CX
- ROLQ $0x01, CX
- MOVQ 24(SP), R13
- XORQ 64(SP), R8
- XORQ SI, CX
- XORQ 104(SP), R13
- XORQ 144(SP), R8
- XORQ R8, R13
- MOVQ R13, DX
- ROLQ $0x01, DX
- MOVQ R15, R8
- XORQ BP, DX
- ROLQ $0x01, R8
- MOVQ SI, R9
- XORQ R12, R8
- ROLQ $0x01, R9
-
- // Result b
- MOVQ (SP), R10
- MOVQ 48(SP), R11
- XORQ R13, R9
- MOVQ 96(SP), R12
- MOVQ 144(SP), R13
- MOVQ 192(SP), R14
- XORQ CX, R11
- ROLQ $0x2c, R11
- XORQ DX, R12
- XORQ BX, R10
- ROLQ $0x2b, R12
- MOVQ R11, SI
- MOVQ $0x800000000000008b, AX
- ORQ R12, SI
- XORQ R10, AX
- XORQ AX, SI
- MOVQ SI, (DI)
- XORQ R9, R14
- ROLQ $0x0e, R14
- MOVQ R10, R15
- ANDQ R11, R15
- XORQ R14, R15
- MOVQ R15, 32(DI)
- XORQ R8, R13
- ROLQ $0x15, R13
- MOVQ R13, AX
- ANDQ R14, AX
- XORQ R12, AX
- MOVQ AX, 16(DI)
- NOTQ R12
- ORQ R10, R14
- ORQ R13, R12
- XORQ R13, R14
- XORQ R11, R12
- MOVQ R14, 24(DI)
- MOVQ R12, 8(DI)
- MOVQ R12, BP
-
- // Result g
- MOVQ 72(SP), R11
- XORQ R9, R11
- MOVQ 80(SP), R12
- ROLQ $0x14, R11
- XORQ BX, R12
- ROLQ $0x03, R12
- MOVQ 24(SP), R10
- MOVQ R11, AX
- ORQ R12, AX
- XORQ R8, R10
- MOVQ 128(SP), R13
- MOVQ 176(SP), R14
- ROLQ $0x1c, R10
- XORQ R10, AX
- MOVQ AX, 40(DI)
- XORQ AX, SI
- XORQ CX, R13
- ROLQ $0x2d, R13
- MOVQ R12, AX
- ANDQ R13, AX
- XORQ R11, AX
- MOVQ AX, 48(DI)
- XORQ AX, BP
- XORQ DX, R14
- ROLQ $0x3d, R14
- MOVQ R14, AX
- ORQ R10, AX
- XORQ R13, AX
- MOVQ AX, 64(DI)
- ANDQ R11, R10
- XORQ R14, R10
- MOVQ R10, 72(DI)
- NOTQ R14
- XORQ R10, R15
- ORQ R14, R13
- XORQ R12, R13
- MOVQ R13, 56(DI)
-
- // Result k
- MOVQ 8(SP), R10
- MOVQ 56(SP), R11
- MOVQ 104(SP), R12
- MOVQ 152(SP), R13
- MOVQ 160(SP), R14
- XORQ DX, R11
- ROLQ $0x06, R11
- XORQ R8, R12
- ROLQ $0x19, R12
- MOVQ R11, AX
- ORQ R12, AX
- XORQ CX, R10
- ROLQ $0x01, R10
- XORQ R10, AX
- MOVQ AX, 80(DI)
- XORQ AX, SI
- XORQ R9, R13
- ROLQ $0x08, R13
- MOVQ R12, AX
- ANDQ R13, AX
- XORQ R11, AX
- MOVQ AX, 88(DI)
- XORQ AX, BP
- XORQ BX, R14
- ROLQ $0x12, R14
- NOTQ R13
- MOVQ R13, AX
- ANDQ R14, AX
- XORQ R12, AX
- MOVQ AX, 96(DI)
- MOVQ R14, AX
- ORQ R10, AX
- XORQ R13, AX
- MOVQ AX, 104(DI)
- ANDQ R11, R10
- XORQ R14, R10
- MOVQ R10, 112(DI)
- XORQ R10, R15
-
- // Result m
- MOVQ 40(SP), R11
- XORQ BX, R11
- MOVQ 88(SP), R12
- ROLQ $0x24, R11
- XORQ CX, R12
- MOVQ 32(SP), R10
- ROLQ $0x0a, R12
- MOVQ R11, AX
- MOVQ 136(SP), R13
- ANDQ R12, AX
- XORQ R9, R10
- MOVQ 184(SP), R14
- ROLQ $0x1b, R10
- XORQ R10, AX
- MOVQ AX, 120(DI)
- XORQ AX, SI
- XORQ DX, R13
- ROLQ $0x0f, R13
- MOVQ R12, AX
- ORQ R13, AX
- XORQ R11, AX
- MOVQ AX, 128(DI)
- XORQ AX, BP
- XORQ R8, R14
- ROLQ $0x38, R14
- NOTQ R13
- MOVQ R13, AX
- ORQ R14, AX
- XORQ R12, AX
- MOVQ AX, 136(DI)
- ORQ R10, R11
- XORQ R14, R11
- MOVQ R11, 152(DI)
- ANDQ R10, R14
- XORQ R13, R14
- MOVQ R14, 144(DI)
- XORQ R11, R15
-
- // Result s
- MOVQ 16(SP), R10
- MOVQ 64(SP), R11
- MOVQ 112(SP), R12
- XORQ DX, R10
- MOVQ 120(SP), R13
- ROLQ $0x3e, R10
- XORQ R8, R11
- MOVQ 168(SP), R14
- ROLQ $0x37, R11
- XORQ R9, R12
- MOVQ R10, R9
- XORQ CX, R14
- ROLQ $0x02, R14
- ANDQ R11, R9
- XORQ R14, R9
- MOVQ R9, 192(DI)
- ROLQ $0x27, R12
- XORQ R9, R15
- NOTQ R11
- XORQ BX, R13
- MOVQ R11, BX
- ANDQ R12, BX
- XORQ R10, BX
- MOVQ BX, 160(DI)
- XORQ BX, SI
- ROLQ $0x29, R13
- MOVQ R12, CX
- ORQ R13, CX
- XORQ R11, CX
- MOVQ CX, 168(DI)
- XORQ CX, BP
- MOVQ R13, DX
- MOVQ R14, R8
- ANDQ R14, DX
- ORQ R10, R8
- XORQ R12, DX
- XORQ R13, R8
- MOVQ DX, 176(DI)
- MOVQ R8, 184(DI)
-
- // Prepare round
- MOVQ BP, BX
- ROLQ $0x01, BX
- MOVQ 16(DI), R12
- XORQ 56(DI), DX
- XORQ R15, BX
- XORQ 96(DI), R12
- XORQ 136(DI), DX
- XORQ DX, R12
- MOVQ R12, CX
- ROLQ $0x01, CX
- MOVQ 24(DI), R13
- XORQ 64(DI), R8
- XORQ SI, CX
- XORQ 104(DI), R13
- XORQ 144(DI), R8
- XORQ R8, R13
- MOVQ R13, DX
- ROLQ $0x01, DX
- MOVQ R15, R8
- XORQ BP, DX
- ROLQ $0x01, R8
- MOVQ SI, R9
- XORQ R12, R8
- ROLQ $0x01, R9
-
- // Result b
- MOVQ (DI), R10
- MOVQ 48(DI), R11
- XORQ R13, R9
- MOVQ 96(DI), R12
- MOVQ 144(DI), R13
- MOVQ 192(DI), R14
- XORQ CX, R11
- ROLQ $0x2c, R11
- XORQ DX, R12
- XORQ BX, R10
- ROLQ $0x2b, R12
- MOVQ R11, SI
- MOVQ $0x8000000000008089, AX
- ORQ R12, SI
- XORQ R10, AX
- XORQ AX, SI
- MOVQ SI, (SP)
- XORQ R9, R14
- ROLQ $0x0e, R14
- MOVQ R10, R15
- ANDQ R11, R15
- XORQ R14, R15
- MOVQ R15, 32(SP)
- XORQ R8, R13
- ROLQ $0x15, R13
- MOVQ R13, AX
- ANDQ R14, AX
- XORQ R12, AX
- MOVQ AX, 16(SP)
- NOTQ R12
- ORQ R10, R14
- ORQ R13, R12
- XORQ R13, R14
- XORQ R11, R12
- MOVQ R14, 24(SP)
- MOVQ R12, 8(SP)
- MOVQ R12, BP
-
- // Result g
- MOVQ 72(DI), R11
- XORQ R9, R11
- MOVQ 80(DI), R12
- ROLQ $0x14, R11
- XORQ BX, R12
- ROLQ $0x03, R12
- MOVQ 24(DI), R10
- MOVQ R11, AX
- ORQ R12, AX
- XORQ R8, R10
- MOVQ 128(DI), R13
- MOVQ 176(DI), R14
- ROLQ $0x1c, R10
- XORQ R10, AX
- MOVQ AX, 40(SP)
- XORQ AX, SI
- XORQ CX, R13
- ROLQ $0x2d, R13
- MOVQ R12, AX
- ANDQ R13, AX
- XORQ R11, AX
- MOVQ AX, 48(SP)
- XORQ AX, BP
- XORQ DX, R14
- ROLQ $0x3d, R14
- MOVQ R14, AX
- ORQ R10, AX
- XORQ R13, AX
- MOVQ AX, 64(SP)
- ANDQ R11, R10
- XORQ R14, R10
- MOVQ R10, 72(SP)
- NOTQ R14
- XORQ R10, R15
- ORQ R14, R13
- XORQ R12, R13
- MOVQ R13, 56(SP)
-
- // Result k
- MOVQ 8(DI), R10
- MOVQ 56(DI), R11
- MOVQ 104(DI), R12
- MOVQ 152(DI), R13
- MOVQ 160(DI), R14
- XORQ DX, R11
- ROLQ $0x06, R11
- XORQ R8, R12
- ROLQ $0x19, R12
- MOVQ R11, AX
- ORQ R12, AX
- XORQ CX, R10
- ROLQ $0x01, R10
- XORQ R10, AX
- MOVQ AX, 80(SP)
- XORQ AX, SI
- XORQ R9, R13
- ROLQ $0x08, R13
- MOVQ R12, AX
- ANDQ R13, AX
- XORQ R11, AX
- MOVQ AX, 88(SP)
- XORQ AX, BP
- XORQ BX, R14
- ROLQ $0x12, R14
- NOTQ R13
- MOVQ R13, AX
- ANDQ R14, AX
- XORQ R12, AX
- MOVQ AX, 96(SP)
- MOVQ R14, AX
- ORQ R10, AX
- XORQ R13, AX
- MOVQ AX, 104(SP)
- ANDQ R11, R10
- XORQ R14, R10
- MOVQ R10, 112(SP)
- XORQ R10, R15
-
- // Result m
- MOVQ 40(DI), R11
- XORQ BX, R11
- MOVQ 88(DI), R12
- ROLQ $0x24, R11
- XORQ CX, R12
- MOVQ 32(DI), R10
- ROLQ $0x0a, R12
- MOVQ R11, AX
- MOVQ 136(DI), R13
- ANDQ R12, AX
- XORQ R9, R10
- MOVQ 184(DI), R14
- ROLQ $0x1b, R10
- XORQ R10, AX
- MOVQ AX, 120(SP)
- XORQ AX, SI
- XORQ DX, R13
- ROLQ $0x0f, R13
- MOVQ R12, AX
- ORQ R13, AX
- XORQ R11, AX
- MOVQ AX, 128(SP)
- XORQ AX, BP
- XORQ R8, R14
- ROLQ $0x38, R14
- NOTQ R13
- MOVQ R13, AX
- ORQ R14, AX
- XORQ R12, AX
- MOVQ AX, 136(SP)
- ORQ R10, R11
- XORQ R14, R11
- MOVQ R11, 152(SP)
- ANDQ R10, R14
- XORQ R13, R14
- MOVQ R14, 144(SP)
- XORQ R11, R15
-
- // Result s
- MOVQ 16(DI), R10
- MOVQ 64(DI), R11
- MOVQ 112(DI), R12
- XORQ DX, R10
- MOVQ 120(DI), R13
- ROLQ $0x3e, R10
- XORQ R8, R11
- MOVQ 168(DI), R14
- ROLQ $0x37, R11
- XORQ R9, R12
- MOVQ R10, R9
- XORQ CX, R14
- ROLQ $0x02, R14
- ANDQ R11, R9
- XORQ R14, R9
- MOVQ R9, 192(SP)
- ROLQ $0x27, R12
- XORQ R9, R15
- NOTQ R11
- XORQ BX, R13
- MOVQ R11, BX
- ANDQ R12, BX
- XORQ R10, BX
- MOVQ BX, 160(SP)
- XORQ BX, SI
- ROLQ $0x29, R13
- MOVQ R12, CX
- ORQ R13, CX
- XORQ R11, CX
- MOVQ CX, 168(SP)
- XORQ CX, BP
- MOVQ R13, DX
- MOVQ R14, R8
- ANDQ R14, DX
- ORQ R10, R8
- XORQ R12, DX
- XORQ R13, R8
- MOVQ DX, 176(SP)
- MOVQ R8, 184(SP)
-
- // Prepare round
- MOVQ BP, BX
- ROLQ $0x01, BX
- MOVQ 16(SP), R12
- XORQ 56(SP), DX
- XORQ R15, BX
- XORQ 96(SP), R12
- XORQ 136(SP), DX
- XORQ DX, R12
- MOVQ R12, CX
- ROLQ $0x01, CX
- MOVQ 24(SP), R13
- XORQ 64(SP), R8
- XORQ SI, CX
- XORQ 104(SP), R13
- XORQ 144(SP), R8
- XORQ R8, R13
- MOVQ R13, DX
- ROLQ $0x01, DX
- MOVQ R15, R8
- XORQ BP, DX
- ROLQ $0x01, R8
- MOVQ SI, R9
- XORQ R12, R8
- ROLQ $0x01, R9
-
- // Result b
- MOVQ (SP), R10
- MOVQ 48(SP), R11
- XORQ R13, R9
- MOVQ 96(SP), R12
- MOVQ 144(SP), R13
- MOVQ 192(SP), R14
- XORQ CX, R11
- ROLQ $0x2c, R11
- XORQ DX, R12
- XORQ BX, R10
- ROLQ $0x2b, R12
- MOVQ R11, SI
- MOVQ $0x8000000000008003, AX
- ORQ R12, SI
- XORQ R10, AX
- XORQ AX, SI
- MOVQ SI, (DI)
- XORQ R9, R14
- ROLQ $0x0e, R14
- MOVQ R10, R15
- ANDQ R11, R15
- XORQ R14, R15
- MOVQ R15, 32(DI)
- XORQ R8, R13
- ROLQ $0x15, R13
- MOVQ R13, AX
- ANDQ R14, AX
- XORQ R12, AX
- MOVQ AX, 16(DI)
- NOTQ R12
- ORQ R10, R14
- ORQ R13, R12
- XORQ R13, R14
- XORQ R11, R12
- MOVQ R14, 24(DI)
- MOVQ R12, 8(DI)
- MOVQ R12, BP
-
- // Result g
- MOVQ 72(SP), R11
- XORQ R9, R11
- MOVQ 80(SP), R12
- ROLQ $0x14, R11
- XORQ BX, R12
- ROLQ $0x03, R12
- MOVQ 24(SP), R10
- MOVQ R11, AX
- ORQ R12, AX
- XORQ R8, R10
- MOVQ 128(SP), R13
- MOVQ 176(SP), R14
- ROLQ $0x1c, R10
- XORQ R10, AX
- MOVQ AX, 40(DI)
- XORQ AX, SI
- XORQ CX, R13
- ROLQ $0x2d, R13
- MOVQ R12, AX
- ANDQ R13, AX
- XORQ R11, AX
- MOVQ AX, 48(DI)
- XORQ AX, BP
- XORQ DX, R14
- ROLQ $0x3d, R14
- MOVQ R14, AX
- ORQ R10, AX
- XORQ R13, AX
- MOVQ AX, 64(DI)
- ANDQ R11, R10
- XORQ R14, R10
- MOVQ R10, 72(DI)
- NOTQ R14
- XORQ R10, R15
- ORQ R14, R13
- XORQ R12, R13
- MOVQ R13, 56(DI)
-
- // Result k
- MOVQ 8(SP), R10
- MOVQ 56(SP), R11
- MOVQ 104(SP), R12
- MOVQ 152(SP), R13
- MOVQ 160(SP), R14
- XORQ DX, R11
- ROLQ $0x06, R11
- XORQ R8, R12
- ROLQ $0x19, R12
- MOVQ R11, AX
- ORQ R12, AX
- XORQ CX, R10
- ROLQ $0x01, R10
- XORQ R10, AX
- MOVQ AX, 80(DI)
- XORQ AX, SI
- XORQ R9, R13
- ROLQ $0x08, R13
- MOVQ R12, AX
- ANDQ R13, AX
- XORQ R11, AX
- MOVQ AX, 88(DI)
- XORQ AX, BP
- XORQ BX, R14
- ROLQ $0x12, R14
- NOTQ R13
- MOVQ R13, AX
- ANDQ R14, AX
- XORQ R12, AX
- MOVQ AX, 96(DI)
- MOVQ R14, AX
- ORQ R10, AX
- XORQ R13, AX
- MOVQ AX, 104(DI)
- ANDQ R11, R10
- XORQ R14, R10
- MOVQ R10, 112(DI)
- XORQ R10, R15
-
- // Result m
- MOVQ 40(SP), R11
- XORQ BX, R11
- MOVQ 88(SP), R12
- ROLQ $0x24, R11
- XORQ CX, R12
- MOVQ 32(SP), R10
- ROLQ $0x0a, R12
- MOVQ R11, AX
- MOVQ 136(SP), R13
- ANDQ R12, AX
- XORQ R9, R10
- MOVQ 184(SP), R14
- ROLQ $0x1b, R10
- XORQ R10, AX
- MOVQ AX, 120(DI)
- XORQ AX, SI
- XORQ DX, R13
- ROLQ $0x0f, R13
- MOVQ R12, AX
- ORQ R13, AX
- XORQ R11, AX
- MOVQ AX, 128(DI)
- XORQ AX, BP
- XORQ R8, R14
- ROLQ $0x38, R14
- NOTQ R13
- MOVQ R13, AX
- ORQ R14, AX
- XORQ R12, AX
- MOVQ AX, 136(DI)
- ORQ R10, R11
- XORQ R14, R11
- MOVQ R11, 152(DI)
- ANDQ R10, R14
- XORQ R13, R14
- MOVQ R14, 144(DI)
- XORQ R11, R15
-
- // Result s
- MOVQ 16(SP), R10
- MOVQ 64(SP), R11
- MOVQ 112(SP), R12
- XORQ DX, R10
- MOVQ 120(SP), R13
- ROLQ $0x3e, R10
- XORQ R8, R11
- MOVQ 168(SP), R14
- ROLQ $0x37, R11
- XORQ R9, R12
- MOVQ R10, R9
- XORQ CX, R14
- ROLQ $0x02, R14
- ANDQ R11, R9
- XORQ R14, R9
- MOVQ R9, 192(DI)
- ROLQ $0x27, R12
- XORQ R9, R15
- NOTQ R11
- XORQ BX, R13
- MOVQ R11, BX
- ANDQ R12, BX
- XORQ R10, BX
- MOVQ BX, 160(DI)
- XORQ BX, SI
- ROLQ $0x29, R13
- MOVQ R12, CX
- ORQ R13, CX
- XORQ R11, CX
- MOVQ CX, 168(DI)
- XORQ CX, BP
- MOVQ R13, DX
- MOVQ R14, R8
- ANDQ R14, DX
- ORQ R10, R8
- XORQ R12, DX
- XORQ R13, R8
- MOVQ DX, 176(DI)
- MOVQ R8, 184(DI)
-
- // Prepare round
- MOVQ BP, BX
- ROLQ $0x01, BX
- MOVQ 16(DI), R12
- XORQ 56(DI), DX
- XORQ R15, BX
- XORQ 96(DI), R12
- XORQ 136(DI), DX
- XORQ DX, R12
- MOVQ R12, CX
- ROLQ $0x01, CX
- MOVQ 24(DI), R13
- XORQ 64(DI), R8
- XORQ SI, CX
- XORQ 104(DI), R13
- XORQ 144(DI), R8
- XORQ R8, R13
- MOVQ R13, DX
- ROLQ $0x01, DX
- MOVQ R15, R8
- XORQ BP, DX
- ROLQ $0x01, R8
- MOVQ SI, R9
- XORQ R12, R8
- ROLQ $0x01, R9
-
- // Result b
- MOVQ (DI), R10
- MOVQ 48(DI), R11
- XORQ R13, R9
- MOVQ 96(DI), R12
- MOVQ 144(DI), R13
- MOVQ 192(DI), R14
- XORQ CX, R11
- ROLQ $0x2c, R11
- XORQ DX, R12
- XORQ BX, R10
- ROLQ $0x2b, R12
- MOVQ R11, SI
- MOVQ $0x8000000000008002, AX
- ORQ R12, SI
- XORQ R10, AX
- XORQ AX, SI
- MOVQ SI, (SP)
- XORQ R9, R14
- ROLQ $0x0e, R14
- MOVQ R10, R15
- ANDQ R11, R15
- XORQ R14, R15
- MOVQ R15, 32(SP)
- XORQ R8, R13
- ROLQ $0x15, R13
- MOVQ R13, AX
- ANDQ R14, AX
- XORQ R12, AX
- MOVQ AX, 16(SP)
- NOTQ R12
- ORQ R10, R14
- ORQ R13, R12
- XORQ R13, R14
- XORQ R11, R12
- MOVQ R14, 24(SP)
- MOVQ R12, 8(SP)
- MOVQ R12, BP
-
- // Result g
- MOVQ 72(DI), R11
- XORQ R9, R11
- MOVQ 80(DI), R12
- ROLQ $0x14, R11
- XORQ BX, R12
- ROLQ $0x03, R12
- MOVQ 24(DI), R10
- MOVQ R11, AX
- ORQ R12, AX
- XORQ R8, R10
- MOVQ 128(DI), R13
- MOVQ 176(DI), R14
- ROLQ $0x1c, R10
- XORQ R10, AX
- MOVQ AX, 40(SP)
- XORQ AX, SI
- XORQ CX, R13
- ROLQ $0x2d, R13
- MOVQ R12, AX
- ANDQ R13, AX
- XORQ R11, AX
- MOVQ AX, 48(SP)
- XORQ AX, BP
- XORQ DX, R14
- ROLQ $0x3d, R14
- MOVQ R14, AX
- ORQ R10, AX
- XORQ R13, AX
- MOVQ AX, 64(SP)
- ANDQ R11, R10
- XORQ R14, R10
- MOVQ R10, 72(SP)
- NOTQ R14
- XORQ R10, R15
- ORQ R14, R13
- XORQ R12, R13
- MOVQ R13, 56(SP)
-
- // Result k
- MOVQ 8(DI), R10
- MOVQ 56(DI), R11
- MOVQ 104(DI), R12
- MOVQ 152(DI), R13
- MOVQ 160(DI), R14
- XORQ DX, R11
- ROLQ $0x06, R11
- XORQ R8, R12
- ROLQ $0x19, R12
- MOVQ R11, AX
- ORQ R12, AX
- XORQ CX, R10
- ROLQ $0x01, R10
- XORQ R10, AX
- MOVQ AX, 80(SP)
- XORQ AX, SI
- XORQ R9, R13
- ROLQ $0x08, R13
- MOVQ R12, AX
- ANDQ R13, AX
- XORQ R11, AX
- MOVQ AX, 88(SP)
- XORQ AX, BP
- XORQ BX, R14
- ROLQ $0x12, R14
- NOTQ R13
- MOVQ R13, AX
- ANDQ R14, AX
- XORQ R12, AX
- MOVQ AX, 96(SP)
- MOVQ R14, AX
- ORQ R10, AX
- XORQ R13, AX
- MOVQ AX, 104(SP)
- ANDQ R11, R10
- XORQ R14, R10
- MOVQ R10, 112(SP)
- XORQ R10, R15
-
- // Result m
- MOVQ 40(DI), R11
- XORQ BX, R11
- MOVQ 88(DI), R12
- ROLQ $0x24, R11
- XORQ CX, R12
- MOVQ 32(DI), R10
- ROLQ $0x0a, R12
- MOVQ R11, AX
- MOVQ 136(DI), R13
- ANDQ R12, AX
- XORQ R9, R10
- MOVQ 184(DI), R14
- ROLQ $0x1b, R10
- XORQ R10, AX
- MOVQ AX, 120(SP)
- XORQ AX, SI
- XORQ DX, R13
- ROLQ $0x0f, R13
- MOVQ R12, AX
- ORQ R13, AX
- XORQ R11, AX
- MOVQ AX, 128(SP)
- XORQ AX, BP
- XORQ R8, R14
- ROLQ $0x38, R14
- NOTQ R13
- MOVQ R13, AX
- ORQ R14, AX
- XORQ R12, AX
- MOVQ AX, 136(SP)
- ORQ R10, R11
- XORQ R14, R11
- MOVQ R11, 152(SP)
- ANDQ R10, R14
- XORQ R13, R14
- MOVQ R14, 144(SP)
- XORQ R11, R15
-
- // Result s
- MOVQ 16(DI), R10
- MOVQ 64(DI), R11
- MOVQ 112(DI), R12
- XORQ DX, R10
- MOVQ 120(DI), R13
- ROLQ $0x3e, R10
- XORQ R8, R11
- MOVQ 168(DI), R14
- ROLQ $0x37, R11
- XORQ R9, R12
- MOVQ R10, R9
- XORQ CX, R14
- ROLQ $0x02, R14
- ANDQ R11, R9
- XORQ R14, R9
- MOVQ R9, 192(SP)
- ROLQ $0x27, R12
- XORQ R9, R15
- NOTQ R11
- XORQ BX, R13
- MOVQ R11, BX
- ANDQ R12, BX
- XORQ R10, BX
- MOVQ BX, 160(SP)
- XORQ BX, SI
- ROLQ $0x29, R13
- MOVQ R12, CX
- ORQ R13, CX
- XORQ R11, CX
- MOVQ CX, 168(SP)
- XORQ CX, BP
- MOVQ R13, DX
- MOVQ R14, R8
- ANDQ R14, DX
- ORQ R10, R8
- XORQ R12, DX
- XORQ R13, R8
- MOVQ DX, 176(SP)
- MOVQ R8, 184(SP)
-
- // Prepare round
- MOVQ BP, BX
- ROLQ $0x01, BX
- MOVQ 16(SP), R12
- XORQ 56(SP), DX
- XORQ R15, BX
- XORQ 96(SP), R12
- XORQ 136(SP), DX
- XORQ DX, R12
- MOVQ R12, CX
- ROLQ $0x01, CX
- MOVQ 24(SP), R13
- XORQ 64(SP), R8
- XORQ SI, CX
- XORQ 104(SP), R13
- XORQ 144(SP), R8
- XORQ R8, R13
- MOVQ R13, DX
- ROLQ $0x01, DX
- MOVQ R15, R8
- XORQ BP, DX
- ROLQ $0x01, R8
- MOVQ SI, R9
- XORQ R12, R8
- ROLQ $0x01, R9
-
- // Result b
- MOVQ (SP), R10
- MOVQ 48(SP), R11
- XORQ R13, R9
- MOVQ 96(SP), R12
- MOVQ 144(SP), R13
- MOVQ 192(SP), R14
- XORQ CX, R11
- ROLQ $0x2c, R11
- XORQ DX, R12
- XORQ BX, R10
- ROLQ $0x2b, R12
- MOVQ R11, SI
- MOVQ $0x8000000000000080, AX
- ORQ R12, SI
- XORQ R10, AX
- XORQ AX, SI
- MOVQ SI, (DI)
- XORQ R9, R14
- ROLQ $0x0e, R14
- MOVQ R10, R15
- ANDQ R11, R15
- XORQ R14, R15
- MOVQ R15, 32(DI)
- XORQ R8, R13
- ROLQ $0x15, R13
- MOVQ R13, AX
- ANDQ R14, AX
- XORQ R12, AX
- MOVQ AX, 16(DI)
- NOTQ R12
- ORQ R10, R14
- ORQ R13, R12
- XORQ R13, R14
- XORQ R11, R12
- MOVQ R14, 24(DI)
- MOVQ R12, 8(DI)
- MOVQ R12, BP
-
- // Result g
- MOVQ 72(SP), R11
- XORQ R9, R11
- MOVQ 80(SP), R12
- ROLQ $0x14, R11
- XORQ BX, R12
- ROLQ $0x03, R12
- MOVQ 24(SP), R10
- MOVQ R11, AX
- ORQ R12, AX
- XORQ R8, R10
- MOVQ 128(SP), R13
- MOVQ 176(SP), R14
- ROLQ $0x1c, R10
- XORQ R10, AX
- MOVQ AX, 40(DI)
- XORQ AX, SI
- XORQ CX, R13
- ROLQ $0x2d, R13
- MOVQ R12, AX
- ANDQ R13, AX
- XORQ R11, AX
- MOVQ AX, 48(DI)
- XORQ AX, BP
- XORQ DX, R14
- ROLQ $0x3d, R14
- MOVQ R14, AX
- ORQ R10, AX
- XORQ R13, AX
- MOVQ AX, 64(DI)
- ANDQ R11, R10
- XORQ R14, R10
- MOVQ R10, 72(DI)
- NOTQ R14
- XORQ R10, R15
- ORQ R14, R13
- XORQ R12, R13
- MOVQ R13, 56(DI)
-
- // Result k
- MOVQ 8(SP), R10
- MOVQ 56(SP), R11
- MOVQ 104(SP), R12
- MOVQ 152(SP), R13
- MOVQ 160(SP), R14
- XORQ DX, R11
- ROLQ $0x06, R11
- XORQ R8, R12
- ROLQ $0x19, R12
- MOVQ R11, AX
- ORQ R12, AX
- XORQ CX, R10
- ROLQ $0x01, R10
- XORQ R10, AX
- MOVQ AX, 80(DI)
- XORQ AX, SI
- XORQ R9, R13
- ROLQ $0x08, R13
- MOVQ R12, AX
- ANDQ R13, AX
- XORQ R11, AX
- MOVQ AX, 88(DI)
- XORQ AX, BP
- XORQ BX, R14
- ROLQ $0x12, R14
- NOTQ R13
- MOVQ R13, AX
- ANDQ R14, AX
- XORQ R12, AX
- MOVQ AX, 96(DI)
- MOVQ R14, AX
- ORQ R10, AX
- XORQ R13, AX
- MOVQ AX, 104(DI)
- ANDQ R11, R10
- XORQ R14, R10
- MOVQ R10, 112(DI)
- XORQ R10, R15
-
- // Result m
- MOVQ 40(SP), R11
- XORQ BX, R11
- MOVQ 88(SP), R12
- ROLQ $0x24, R11
- XORQ CX, R12
- MOVQ 32(SP), R10
- ROLQ $0x0a, R12
- MOVQ R11, AX
- MOVQ 136(SP), R13
- ANDQ R12, AX
- XORQ R9, R10
- MOVQ 184(SP), R14
- ROLQ $0x1b, R10
- XORQ R10, AX
- MOVQ AX, 120(DI)
- XORQ AX, SI
- XORQ DX, R13
- ROLQ $0x0f, R13
- MOVQ R12, AX
- ORQ R13, AX
- XORQ R11, AX
- MOVQ AX, 128(DI)
- XORQ AX, BP
- XORQ R8, R14
- ROLQ $0x38, R14
- NOTQ R13
- MOVQ R13, AX
- ORQ R14, AX
- XORQ R12, AX
- MOVQ AX, 136(DI)
- ORQ R10, R11
- XORQ R14, R11
- MOVQ R11, 152(DI)
- ANDQ R10, R14
- XORQ R13, R14
- MOVQ R14, 144(DI)
- XORQ R11, R15
-
- // Result s
- MOVQ 16(SP), R10
- MOVQ 64(SP), R11
- MOVQ 112(SP), R12
- XORQ DX, R10
- MOVQ 120(SP), R13
- ROLQ $0x3e, R10
- XORQ R8, R11
- MOVQ 168(SP), R14
- ROLQ $0x37, R11
- XORQ R9, R12
- MOVQ R10, R9
- XORQ CX, R14
- ROLQ $0x02, R14
- ANDQ R11, R9
- XORQ R14, R9
- MOVQ R9, 192(DI)
- ROLQ $0x27, R12
- XORQ R9, R15
- NOTQ R11
- XORQ BX, R13
- MOVQ R11, BX
- ANDQ R12, BX
- XORQ R10, BX
- MOVQ BX, 160(DI)
- XORQ BX, SI
- ROLQ $0x29, R13
- MOVQ R12, CX
- ORQ R13, CX
- XORQ R11, CX
- MOVQ CX, 168(DI)
- XORQ CX, BP
- MOVQ R13, DX
- MOVQ R14, R8
- ANDQ R14, DX
- ORQ R10, R8
- XORQ R12, DX
- XORQ R13, R8
- MOVQ DX, 176(DI)
- MOVQ R8, 184(DI)
-
- // Prepare round
- MOVQ BP, BX
- ROLQ $0x01, BX
- MOVQ 16(DI), R12
- XORQ 56(DI), DX
- XORQ R15, BX
- XORQ 96(DI), R12
- XORQ 136(DI), DX
- XORQ DX, R12
- MOVQ R12, CX
- ROLQ $0x01, CX
- MOVQ 24(DI), R13
- XORQ 64(DI), R8
- XORQ SI, CX
- XORQ 104(DI), R13
- XORQ 144(DI), R8
- XORQ R8, R13
- MOVQ R13, DX
- ROLQ $0x01, DX
- MOVQ R15, R8
- XORQ BP, DX
- ROLQ $0x01, R8
- MOVQ SI, R9
- XORQ R12, R8
- ROLQ $0x01, R9
-
- // Result b
- MOVQ (DI), R10
- MOVQ 48(DI), R11
- XORQ R13, R9
- MOVQ 96(DI), R12
- MOVQ 144(DI), R13
- MOVQ 192(DI), R14
- XORQ CX, R11
- ROLQ $0x2c, R11
- XORQ DX, R12
- XORQ BX, R10
- ROLQ $0x2b, R12
- MOVQ R11, SI
- MOVQ $0x000000000000800a, AX
- ORQ R12, SI
- XORQ R10, AX
- XORQ AX, SI
- MOVQ SI, (SP)
- XORQ R9, R14
- ROLQ $0x0e, R14
- MOVQ R10, R15
- ANDQ R11, R15
- XORQ R14, R15
- MOVQ R15, 32(SP)
- XORQ R8, R13
- ROLQ $0x15, R13
- MOVQ R13, AX
- ANDQ R14, AX
- XORQ R12, AX
- MOVQ AX, 16(SP)
- NOTQ R12
- ORQ R10, R14
- ORQ R13, R12
- XORQ R13, R14
- XORQ R11, R12
- MOVQ R14, 24(SP)
- MOVQ R12, 8(SP)
- MOVQ R12, BP
-
- // Result g
- MOVQ 72(DI), R11
- XORQ R9, R11
- MOVQ 80(DI), R12
- ROLQ $0x14, R11
- XORQ BX, R12
- ROLQ $0x03, R12
- MOVQ 24(DI), R10
- MOVQ R11, AX
- ORQ R12, AX
- XORQ R8, R10
- MOVQ 128(DI), R13
- MOVQ 176(DI), R14
- ROLQ $0x1c, R10
- XORQ R10, AX
- MOVQ AX, 40(SP)
- XORQ AX, SI
- XORQ CX, R13
- ROLQ $0x2d, R13
- MOVQ R12, AX
- ANDQ R13, AX
- XORQ R11, AX
- MOVQ AX, 48(SP)
- XORQ AX, BP
- XORQ DX, R14
- ROLQ $0x3d, R14
- MOVQ R14, AX
- ORQ R10, AX
- XORQ R13, AX
- MOVQ AX, 64(SP)
- ANDQ R11, R10
- XORQ R14, R10
- MOVQ R10, 72(SP)
- NOTQ R14
- XORQ R10, R15
- ORQ R14, R13
- XORQ R12, R13
- MOVQ R13, 56(SP)
-
- // Result k
- MOVQ 8(DI), R10
- MOVQ 56(DI), R11
- MOVQ 104(DI), R12
- MOVQ 152(DI), R13
- MOVQ 160(DI), R14
- XORQ DX, R11
- ROLQ $0x06, R11
- XORQ R8, R12
- ROLQ $0x19, R12
- MOVQ R11, AX
- ORQ R12, AX
- XORQ CX, R10
- ROLQ $0x01, R10
- XORQ R10, AX
- MOVQ AX, 80(SP)
- XORQ AX, SI
- XORQ R9, R13
- ROLQ $0x08, R13
- MOVQ R12, AX
- ANDQ R13, AX
- XORQ R11, AX
- MOVQ AX, 88(SP)
- XORQ AX, BP
- XORQ BX, R14
- ROLQ $0x12, R14
- NOTQ R13
- MOVQ R13, AX
- ANDQ R14, AX
- XORQ R12, AX
- MOVQ AX, 96(SP)
- MOVQ R14, AX
- ORQ R10, AX
- XORQ R13, AX
- MOVQ AX, 104(SP)
- ANDQ R11, R10
- XORQ R14, R10
- MOVQ R10, 112(SP)
- XORQ R10, R15
-
- // Result m
- MOVQ 40(DI), R11
- XORQ BX, R11
- MOVQ 88(DI), R12
- ROLQ $0x24, R11
- XORQ CX, R12
- MOVQ 32(DI), R10
- ROLQ $0x0a, R12
- MOVQ R11, AX
- MOVQ 136(DI), R13
- ANDQ R12, AX
- XORQ R9, R10
- MOVQ 184(DI), R14
- ROLQ $0x1b, R10
- XORQ R10, AX
- MOVQ AX, 120(SP)
- XORQ AX, SI
- XORQ DX, R13
- ROLQ $0x0f, R13
- MOVQ R12, AX
- ORQ R13, AX
- XORQ R11, AX
- MOVQ AX, 128(SP)
- XORQ AX, BP
- XORQ R8, R14
- ROLQ $0x38, R14
- NOTQ R13
- MOVQ R13, AX
- ORQ R14, AX
- XORQ R12, AX
- MOVQ AX, 136(SP)
- ORQ R10, R11
- XORQ R14, R11
- MOVQ R11, 152(SP)
- ANDQ R10, R14
- XORQ R13, R14
- MOVQ R14, 144(SP)
- XORQ R11, R15
-
- // Result s
- MOVQ 16(DI), R10
- MOVQ 64(DI), R11
- MOVQ 112(DI), R12
- XORQ DX, R10
- MOVQ 120(DI), R13
- ROLQ $0x3e, R10
- XORQ R8, R11
- MOVQ 168(DI), R14
- ROLQ $0x37, R11
- XORQ R9, R12
- MOVQ R10, R9
- XORQ CX, R14
- ROLQ $0x02, R14
- ANDQ R11, R9
- XORQ R14, R9
- MOVQ R9, 192(SP)
- ROLQ $0x27, R12
- XORQ R9, R15
- NOTQ R11
- XORQ BX, R13
- MOVQ R11, BX
- ANDQ R12, BX
- XORQ R10, BX
- MOVQ BX, 160(SP)
- XORQ BX, SI
- ROLQ $0x29, R13
- MOVQ R12, CX
- ORQ R13, CX
- XORQ R11, CX
- MOVQ CX, 168(SP)
- XORQ CX, BP
- MOVQ R13, DX
- MOVQ R14, R8
- ANDQ R14, DX
- ORQ R10, R8
- XORQ R12, DX
- XORQ R13, R8
- MOVQ DX, 176(SP)
- MOVQ R8, 184(SP)
-
- // Prepare round
- MOVQ BP, BX
- ROLQ $0x01, BX
- MOVQ 16(SP), R12
- XORQ 56(SP), DX
- XORQ R15, BX
- XORQ 96(SP), R12
- XORQ 136(SP), DX
- XORQ DX, R12
- MOVQ R12, CX
- ROLQ $0x01, CX
- MOVQ 24(SP), R13
- XORQ 64(SP), R8
- XORQ SI, CX
- XORQ 104(SP), R13
- XORQ 144(SP), R8
- XORQ R8, R13
- MOVQ R13, DX
- ROLQ $0x01, DX
- MOVQ R15, R8
- XORQ BP, DX
- ROLQ $0x01, R8
- MOVQ SI, R9
- XORQ R12, R8
- ROLQ $0x01, R9
-
- // Result b
- MOVQ (SP), R10
- MOVQ 48(SP), R11
- XORQ R13, R9
- MOVQ 96(SP), R12
- MOVQ 144(SP), R13
- MOVQ 192(SP), R14
- XORQ CX, R11
- ROLQ $0x2c, R11
- XORQ DX, R12
- XORQ BX, R10
- ROLQ $0x2b, R12
- MOVQ R11, SI
- MOVQ $0x800000008000000a, AX
- ORQ R12, SI
- XORQ R10, AX
- XORQ AX, SI
- MOVQ SI, (DI)
- XORQ R9, R14
- ROLQ $0x0e, R14
- MOVQ R10, R15
- ANDQ R11, R15
- XORQ R14, R15
- MOVQ R15, 32(DI)
- XORQ R8, R13
- ROLQ $0x15, R13
- MOVQ R13, AX
- ANDQ R14, AX
- XORQ R12, AX
- MOVQ AX, 16(DI)
- NOTQ R12
- ORQ R10, R14
- ORQ R13, R12
- XORQ R13, R14
- XORQ R11, R12
- MOVQ R14, 24(DI)
- MOVQ R12, 8(DI)
- MOVQ R12, BP
-
- // Result g
- MOVQ 72(SP), R11
- XORQ R9, R11
- MOVQ 80(SP), R12
- ROLQ $0x14, R11
- XORQ BX, R12
- ROLQ $0x03, R12
- MOVQ 24(SP), R10
- MOVQ R11, AX
- ORQ R12, AX
- XORQ R8, R10
- MOVQ 128(SP), R13
- MOVQ 176(SP), R14
- ROLQ $0x1c, R10
- XORQ R10, AX
- MOVQ AX, 40(DI)
- XORQ AX, SI
- XORQ CX, R13
- ROLQ $0x2d, R13
- MOVQ R12, AX
- ANDQ R13, AX
- XORQ R11, AX
- MOVQ AX, 48(DI)
- XORQ AX, BP
- XORQ DX, R14
- ROLQ $0x3d, R14
- MOVQ R14, AX
- ORQ R10, AX
- XORQ R13, AX
- MOVQ AX, 64(DI)
- ANDQ R11, R10
- XORQ R14, R10
- MOVQ R10, 72(DI)
- NOTQ R14
- XORQ R10, R15
- ORQ R14, R13
- XORQ R12, R13
- MOVQ R13, 56(DI)
-
- // Result k
- MOVQ 8(SP), R10
- MOVQ 56(SP), R11
- MOVQ 104(SP), R12
- MOVQ 152(SP), R13
- MOVQ 160(SP), R14
- XORQ DX, R11
- ROLQ $0x06, R11
- XORQ R8, R12
- ROLQ $0x19, R12
- MOVQ R11, AX
- ORQ R12, AX
- XORQ CX, R10
- ROLQ $0x01, R10
- XORQ R10, AX
- MOVQ AX, 80(DI)
- XORQ AX, SI
- XORQ R9, R13
- ROLQ $0x08, R13
- MOVQ R12, AX
- ANDQ R13, AX
- XORQ R11, AX
- MOVQ AX, 88(DI)
- XORQ AX, BP
- XORQ BX, R14
- ROLQ $0x12, R14
- NOTQ R13
- MOVQ R13, AX
- ANDQ R14, AX
- XORQ R12, AX
- MOVQ AX, 96(DI)
- MOVQ R14, AX
- ORQ R10, AX
- XORQ R13, AX
- MOVQ AX, 104(DI)
- ANDQ R11, R10
- XORQ R14, R10
- MOVQ R10, 112(DI)
- XORQ R10, R15
-
- // Result m
- MOVQ 40(SP), R11
- XORQ BX, R11
- MOVQ 88(SP), R12
- ROLQ $0x24, R11
- XORQ CX, R12
- MOVQ 32(SP), R10
- ROLQ $0x0a, R12
- MOVQ R11, AX
- MOVQ 136(SP), R13
- ANDQ R12, AX
- XORQ R9, R10
- MOVQ 184(SP), R14
- ROLQ $0x1b, R10
- XORQ R10, AX
- MOVQ AX, 120(DI)
- XORQ AX, SI
- XORQ DX, R13
- ROLQ $0x0f, R13
- MOVQ R12, AX
- ORQ R13, AX
- XORQ R11, AX
- MOVQ AX, 128(DI)
- XORQ AX, BP
- XORQ R8, R14
- ROLQ $0x38, R14
- NOTQ R13
- MOVQ R13, AX
- ORQ R14, AX
- XORQ R12, AX
- MOVQ AX, 136(DI)
- ORQ R10, R11
- XORQ R14, R11
- MOVQ R11, 152(DI)
- ANDQ R10, R14
- XORQ R13, R14
- MOVQ R14, 144(DI)
- XORQ R11, R15
-
- // Result s
- MOVQ 16(SP), R10
- MOVQ 64(SP), R11
- MOVQ 112(SP), R12
- XORQ DX, R10
- MOVQ 120(SP), R13
- ROLQ $0x3e, R10
- XORQ R8, R11
- MOVQ 168(SP), R14
- ROLQ $0x37, R11
- XORQ R9, R12
- MOVQ R10, R9
- XORQ CX, R14
- ROLQ $0x02, R14
- ANDQ R11, R9
- XORQ R14, R9
- MOVQ R9, 192(DI)
- ROLQ $0x27, R12
- XORQ R9, R15
- NOTQ R11
- XORQ BX, R13
- MOVQ R11, BX
- ANDQ R12, BX
- XORQ R10, BX
- MOVQ BX, 160(DI)
- XORQ BX, SI
- ROLQ $0x29, R13
- MOVQ R12, CX
- ORQ R13, CX
- XORQ R11, CX
- MOVQ CX, 168(DI)
- XORQ CX, BP
- MOVQ R13, DX
- MOVQ R14, R8
- ANDQ R14, DX
- ORQ R10, R8
- XORQ R12, DX
- XORQ R13, R8
- MOVQ DX, 176(DI)
- MOVQ R8, 184(DI)
-
- // Prepare round
- MOVQ BP, BX
- ROLQ $0x01, BX
- MOVQ 16(DI), R12
- XORQ 56(DI), DX
- XORQ R15, BX
- XORQ 96(DI), R12
- XORQ 136(DI), DX
- XORQ DX, R12
- MOVQ R12, CX
- ROLQ $0x01, CX
- MOVQ 24(DI), R13
- XORQ 64(DI), R8
- XORQ SI, CX
- XORQ 104(DI), R13
- XORQ 144(DI), R8
- XORQ R8, R13
- MOVQ R13, DX
- ROLQ $0x01, DX
- MOVQ R15, R8
- XORQ BP, DX
- ROLQ $0x01, R8
- MOVQ SI, R9
- XORQ R12, R8
- ROLQ $0x01, R9
-
- // Result b
- MOVQ (DI), R10
- MOVQ 48(DI), R11
- XORQ R13, R9
- MOVQ 96(DI), R12
- MOVQ 144(DI), R13
- MOVQ 192(DI), R14
- XORQ CX, R11
- ROLQ $0x2c, R11
- XORQ DX, R12
- XORQ BX, R10
- ROLQ $0x2b, R12
- MOVQ R11, SI
- MOVQ $0x8000000080008081, AX
- ORQ R12, SI
- XORQ R10, AX
- XORQ AX, SI
- MOVQ SI, (SP)
- XORQ R9, R14
- ROLQ $0x0e, R14
- MOVQ R10, R15
- ANDQ R11, R15
- XORQ R14, R15
- MOVQ R15, 32(SP)
- XORQ R8, R13
- ROLQ $0x15, R13
- MOVQ R13, AX
- ANDQ R14, AX
- XORQ R12, AX
- MOVQ AX, 16(SP)
- NOTQ R12
- ORQ R10, R14
- ORQ R13, R12
- XORQ R13, R14
- XORQ R11, R12
- MOVQ R14, 24(SP)
- MOVQ R12, 8(SP)
- MOVQ R12, BP
-
- // Result g
- MOVQ 72(DI), R11
- XORQ R9, R11
- MOVQ 80(DI), R12
- ROLQ $0x14, R11
- XORQ BX, R12
- ROLQ $0x03, R12
- MOVQ 24(DI), R10
- MOVQ R11, AX
- ORQ R12, AX
- XORQ R8, R10
- MOVQ 128(DI), R13
- MOVQ 176(DI), R14
- ROLQ $0x1c, R10
- XORQ R10, AX
- MOVQ AX, 40(SP)
- XORQ AX, SI
- XORQ CX, R13
- ROLQ $0x2d, R13
- MOVQ R12, AX
- ANDQ R13, AX
- XORQ R11, AX
- MOVQ AX, 48(SP)
- XORQ AX, BP
- XORQ DX, R14
- ROLQ $0x3d, R14
- MOVQ R14, AX
- ORQ R10, AX
- XORQ R13, AX
- MOVQ AX, 64(SP)
- ANDQ R11, R10
- XORQ R14, R10
- MOVQ R10, 72(SP)
- NOTQ R14
- XORQ R10, R15
- ORQ R14, R13
- XORQ R12, R13
- MOVQ R13, 56(SP)
-
- // Result k
- MOVQ 8(DI), R10
- MOVQ 56(DI), R11
- MOVQ 104(DI), R12
- MOVQ 152(DI), R13
- MOVQ 160(DI), R14
- XORQ DX, R11
- ROLQ $0x06, R11
- XORQ R8, R12
- ROLQ $0x19, R12
- MOVQ R11, AX
- ORQ R12, AX
- XORQ CX, R10
- ROLQ $0x01, R10
- XORQ R10, AX
- MOVQ AX, 80(SP)
- XORQ AX, SI
- XORQ R9, R13
- ROLQ $0x08, R13
- MOVQ R12, AX
- ANDQ R13, AX
- XORQ R11, AX
- MOVQ AX, 88(SP)
- XORQ AX, BP
- XORQ BX, R14
- ROLQ $0x12, R14
- NOTQ R13
- MOVQ R13, AX
- ANDQ R14, AX
- XORQ R12, AX
- MOVQ AX, 96(SP)
- MOVQ R14, AX
- ORQ R10, AX
- XORQ R13, AX
- MOVQ AX, 104(SP)
- ANDQ R11, R10
- XORQ R14, R10
- MOVQ R10, 112(SP)
- XORQ R10, R15
-
- // Result m
- MOVQ 40(DI), R11
- XORQ BX, R11
- MOVQ 88(DI), R12
- ROLQ $0x24, R11
- XORQ CX, R12
- MOVQ 32(DI), R10
- ROLQ $0x0a, R12
- MOVQ R11, AX
- MOVQ 136(DI), R13
- ANDQ R12, AX
- XORQ R9, R10
- MOVQ 184(DI), R14
- ROLQ $0x1b, R10
- XORQ R10, AX
- MOVQ AX, 120(SP)
- XORQ AX, SI
- XORQ DX, R13
- ROLQ $0x0f, R13
- MOVQ R12, AX
- ORQ R13, AX
- XORQ R11, AX
- MOVQ AX, 128(SP)
- XORQ AX, BP
- XORQ R8, R14
- ROLQ $0x38, R14
- NOTQ R13
- MOVQ R13, AX
- ORQ R14, AX
- XORQ R12, AX
- MOVQ AX, 136(SP)
- ORQ R10, R11
- XORQ R14, R11
- MOVQ R11, 152(SP)
- ANDQ R10, R14
- XORQ R13, R14
- MOVQ R14, 144(SP)
- XORQ R11, R15
-
- // Result s
- MOVQ 16(DI), R10
- MOVQ 64(DI), R11
- MOVQ 112(DI), R12
- XORQ DX, R10
- MOVQ 120(DI), R13
- ROLQ $0x3e, R10
- XORQ R8, R11
- MOVQ 168(DI), R14
- ROLQ $0x37, R11
- XORQ R9, R12
- MOVQ R10, R9
- XORQ CX, R14
- ROLQ $0x02, R14
- ANDQ R11, R9
- XORQ R14, R9
- MOVQ R9, 192(SP)
- ROLQ $0x27, R12
- XORQ R9, R15
- NOTQ R11
- XORQ BX, R13
- MOVQ R11, BX
- ANDQ R12, BX
- XORQ R10, BX
- MOVQ BX, 160(SP)
- XORQ BX, SI
- ROLQ $0x29, R13
- MOVQ R12, CX
- ORQ R13, CX
- XORQ R11, CX
- MOVQ CX, 168(SP)
- XORQ CX, BP
- MOVQ R13, DX
- MOVQ R14, R8
- ANDQ R14, DX
- ORQ R10, R8
- XORQ R12, DX
- XORQ R13, R8
- MOVQ DX, 176(SP)
- MOVQ R8, 184(SP)
-
- // Prepare round
- MOVQ BP, BX
- ROLQ $0x01, BX
- MOVQ 16(SP), R12
- XORQ 56(SP), DX
- XORQ R15, BX
- XORQ 96(SP), R12
- XORQ 136(SP), DX
- XORQ DX, R12
- MOVQ R12, CX
- ROLQ $0x01, CX
- MOVQ 24(SP), R13
- XORQ 64(SP), R8
- XORQ SI, CX
- XORQ 104(SP), R13
- XORQ 144(SP), R8
- XORQ R8, R13
- MOVQ R13, DX
- ROLQ $0x01, DX
- MOVQ R15, R8
- XORQ BP, DX
- ROLQ $0x01, R8
- MOVQ SI, R9
- XORQ R12, R8
- ROLQ $0x01, R9
-
- // Result b
- MOVQ (SP), R10
- MOVQ 48(SP), R11
- XORQ R13, R9
- MOVQ 96(SP), R12
- MOVQ 144(SP), R13
- MOVQ 192(SP), R14
- XORQ CX, R11
- ROLQ $0x2c, R11
- XORQ DX, R12
- XORQ BX, R10
- ROLQ $0x2b, R12
- MOVQ R11, SI
- MOVQ $0x8000000000008080, AX
- ORQ R12, SI
- XORQ R10, AX
- XORQ AX, SI
- MOVQ SI, (DI)
- XORQ R9, R14
- ROLQ $0x0e, R14
- MOVQ R10, R15
- ANDQ R11, R15
- XORQ R14, R15
- MOVQ R15, 32(DI)
- XORQ R8, R13
- ROLQ $0x15, R13
- MOVQ R13, AX
- ANDQ R14, AX
- XORQ R12, AX
- MOVQ AX, 16(DI)
- NOTQ R12
- ORQ R10, R14
- ORQ R13, R12
- XORQ R13, R14
- XORQ R11, R12
- MOVQ R14, 24(DI)
- MOVQ R12, 8(DI)
- MOVQ R12, BP
-
- // Result g
- MOVQ 72(SP), R11
- XORQ R9, R11
- MOVQ 80(SP), R12
- ROLQ $0x14, R11
- XORQ BX, R12
- ROLQ $0x03, R12
- MOVQ 24(SP), R10
- MOVQ R11, AX
- ORQ R12, AX
- XORQ R8, R10
- MOVQ 128(SP), R13
- MOVQ 176(SP), R14
- ROLQ $0x1c, R10
- XORQ R10, AX
- MOVQ AX, 40(DI)
- XORQ AX, SI
- XORQ CX, R13
- ROLQ $0x2d, R13
- MOVQ R12, AX
- ANDQ R13, AX
- XORQ R11, AX
- MOVQ AX, 48(DI)
- XORQ AX, BP
- XORQ DX, R14
- ROLQ $0x3d, R14
- MOVQ R14, AX
- ORQ R10, AX
- XORQ R13, AX
- MOVQ AX, 64(DI)
- ANDQ R11, R10
- XORQ R14, R10
- MOVQ R10, 72(DI)
- NOTQ R14
- XORQ R10, R15
- ORQ R14, R13
- XORQ R12, R13
- MOVQ R13, 56(DI)
-
- // Result k
- MOVQ 8(SP), R10
- MOVQ 56(SP), R11
- MOVQ 104(SP), R12
- MOVQ 152(SP), R13
- MOVQ 160(SP), R14
- XORQ DX, R11
- ROLQ $0x06, R11
- XORQ R8, R12
- ROLQ $0x19, R12
- MOVQ R11, AX
- ORQ R12, AX
- XORQ CX, R10
- ROLQ $0x01, R10
- XORQ R10, AX
- MOVQ AX, 80(DI)
- XORQ AX, SI
- XORQ R9, R13
- ROLQ $0x08, R13
- MOVQ R12, AX
- ANDQ R13, AX
- XORQ R11, AX
- MOVQ AX, 88(DI)
- XORQ AX, BP
- XORQ BX, R14
- ROLQ $0x12, R14
- NOTQ R13
- MOVQ R13, AX
- ANDQ R14, AX
- XORQ R12, AX
- MOVQ AX, 96(DI)
- MOVQ R14, AX
- ORQ R10, AX
- XORQ R13, AX
- MOVQ AX, 104(DI)
- ANDQ R11, R10
- XORQ R14, R10
- MOVQ R10, 112(DI)
- XORQ R10, R15
-
- // Result m
- MOVQ 40(SP), R11
- XORQ BX, R11
- MOVQ 88(SP), R12
- ROLQ $0x24, R11
- XORQ CX, R12
- MOVQ 32(SP), R10
- ROLQ $0x0a, R12
- MOVQ R11, AX
- MOVQ 136(SP), R13
- ANDQ R12, AX
- XORQ R9, R10
- MOVQ 184(SP), R14
- ROLQ $0x1b, R10
- XORQ R10, AX
- MOVQ AX, 120(DI)
- XORQ AX, SI
- XORQ DX, R13
- ROLQ $0x0f, R13
- MOVQ R12, AX
- ORQ R13, AX
- XORQ R11, AX
- MOVQ AX, 128(DI)
- XORQ AX, BP
- XORQ R8, R14
- ROLQ $0x38, R14
- NOTQ R13
- MOVQ R13, AX
- ORQ R14, AX
- XORQ R12, AX
- MOVQ AX, 136(DI)
- ORQ R10, R11
- XORQ R14, R11
- MOVQ R11, 152(DI)
- ANDQ R10, R14
- XORQ R13, R14
- MOVQ R14, 144(DI)
- XORQ R11, R15
-
- // Result s
- MOVQ 16(SP), R10
- MOVQ 64(SP), R11
- MOVQ 112(SP), R12
- XORQ DX, R10
- MOVQ 120(SP), R13
- ROLQ $0x3e, R10
- XORQ R8, R11
- MOVQ 168(SP), R14
- ROLQ $0x37, R11
- XORQ R9, R12
- MOVQ R10, R9
- XORQ CX, R14
- ROLQ $0x02, R14
- ANDQ R11, R9
- XORQ R14, R9
- MOVQ R9, 192(DI)
- ROLQ $0x27, R12
- XORQ R9, R15
- NOTQ R11
- XORQ BX, R13
- MOVQ R11, BX
- ANDQ R12, BX
- XORQ R10, BX
- MOVQ BX, 160(DI)
- XORQ BX, SI
- ROLQ $0x29, R13
- MOVQ R12, CX
- ORQ R13, CX
- XORQ R11, CX
- MOVQ CX, 168(DI)
- XORQ CX, BP
- MOVQ R13, DX
- MOVQ R14, R8
- ANDQ R14, DX
- ORQ R10, R8
- XORQ R12, DX
- XORQ R13, R8
- MOVQ DX, 176(DI)
- MOVQ R8, 184(DI)
-
- // Prepare round
- MOVQ BP, BX
- ROLQ $0x01, BX
- MOVQ 16(DI), R12
- XORQ 56(DI), DX
- XORQ R15, BX
- XORQ 96(DI), R12
- XORQ 136(DI), DX
- XORQ DX, R12
- MOVQ R12, CX
- ROLQ $0x01, CX
- MOVQ 24(DI), R13
- XORQ 64(DI), R8
- XORQ SI, CX
- XORQ 104(DI), R13
- XORQ 144(DI), R8
- XORQ R8, R13
- MOVQ R13, DX
- ROLQ $0x01, DX
- MOVQ R15, R8
- XORQ BP, DX
- ROLQ $0x01, R8
- MOVQ SI, R9
- XORQ R12, R8
- ROLQ $0x01, R9
-
- // Result b
- MOVQ (DI), R10
- MOVQ 48(DI), R11
- XORQ R13, R9
- MOVQ 96(DI), R12
- MOVQ 144(DI), R13
- MOVQ 192(DI), R14
- XORQ CX, R11
- ROLQ $0x2c, R11
- XORQ DX, R12
- XORQ BX, R10
- ROLQ $0x2b, R12
- MOVQ R11, SI
- MOVQ $0x0000000080000001, AX
- ORQ R12, SI
- XORQ R10, AX
- XORQ AX, SI
- MOVQ SI, (SP)
- XORQ R9, R14
- ROLQ $0x0e, R14
- MOVQ R10, R15
- ANDQ R11, R15
- XORQ R14, R15
- MOVQ R15, 32(SP)
- XORQ R8, R13
- ROLQ $0x15, R13
- MOVQ R13, AX
- ANDQ R14, AX
- XORQ R12, AX
- MOVQ AX, 16(SP)
- NOTQ R12
- ORQ R10, R14
- ORQ R13, R12
- XORQ R13, R14
- XORQ R11, R12
- MOVQ R14, 24(SP)
- MOVQ R12, 8(SP)
- MOVQ R12, BP
-
- // Result g
- MOVQ 72(DI), R11
- XORQ R9, R11
- MOVQ 80(DI), R12
- ROLQ $0x14, R11
- XORQ BX, R12
- ROLQ $0x03, R12
- MOVQ 24(DI), R10
- MOVQ R11, AX
- ORQ R12, AX
- XORQ R8, R10
- MOVQ 128(DI), R13
- MOVQ 176(DI), R14
- ROLQ $0x1c, R10
- XORQ R10, AX
- MOVQ AX, 40(SP)
- XORQ AX, SI
- XORQ CX, R13
- ROLQ $0x2d, R13
- MOVQ R12, AX
- ANDQ R13, AX
- XORQ R11, AX
- MOVQ AX, 48(SP)
- XORQ AX, BP
- XORQ DX, R14
- ROLQ $0x3d, R14
- MOVQ R14, AX
- ORQ R10, AX
- XORQ R13, AX
- MOVQ AX, 64(SP)
- ANDQ R11, R10
- XORQ R14, R10
- MOVQ R10, 72(SP)
- NOTQ R14
- XORQ R10, R15
- ORQ R14, R13
- XORQ R12, R13
- MOVQ R13, 56(SP)
-
- // Result k
- MOVQ 8(DI), R10
- MOVQ 56(DI), R11
- MOVQ 104(DI), R12
- MOVQ 152(DI), R13
- MOVQ 160(DI), R14
- XORQ DX, R11
- ROLQ $0x06, R11
- XORQ R8, R12
- ROLQ $0x19, R12
- MOVQ R11, AX
- ORQ R12, AX
- XORQ CX, R10
- ROLQ $0x01, R10
- XORQ R10, AX
- MOVQ AX, 80(SP)
- XORQ AX, SI
- XORQ R9, R13
- ROLQ $0x08, R13
- MOVQ R12, AX
- ANDQ R13, AX
- XORQ R11, AX
- MOVQ AX, 88(SP)
- XORQ AX, BP
- XORQ BX, R14
- ROLQ $0x12, R14
- NOTQ R13
- MOVQ R13, AX
- ANDQ R14, AX
- XORQ R12, AX
- MOVQ AX, 96(SP)
- MOVQ R14, AX
- ORQ R10, AX
- XORQ R13, AX
- MOVQ AX, 104(SP)
- ANDQ R11, R10
- XORQ R14, R10
- MOVQ R10, 112(SP)
- XORQ R10, R15
-
- // Result m
- MOVQ 40(DI), R11
- XORQ BX, R11
- MOVQ 88(DI), R12
- ROLQ $0x24, R11
- XORQ CX, R12
- MOVQ 32(DI), R10
- ROLQ $0x0a, R12
- MOVQ R11, AX
- MOVQ 136(DI), R13
- ANDQ R12, AX
- XORQ R9, R10
- MOVQ 184(DI), R14
- ROLQ $0x1b, R10
- XORQ R10, AX
- MOVQ AX, 120(SP)
- XORQ AX, SI
- XORQ DX, R13
- ROLQ $0x0f, R13
- MOVQ R12, AX
- ORQ R13, AX
- XORQ R11, AX
- MOVQ AX, 128(SP)
- XORQ AX, BP
- XORQ R8, R14
- ROLQ $0x38, R14
- NOTQ R13
- MOVQ R13, AX
- ORQ R14, AX
- XORQ R12, AX
- MOVQ AX, 136(SP)
- ORQ R10, R11
- XORQ R14, R11
- MOVQ R11, 152(SP)
- ANDQ R10, R14
- XORQ R13, R14
- MOVQ R14, 144(SP)
- XORQ R11, R15
-
- // Result s
- MOVQ 16(DI), R10
- MOVQ 64(DI), R11
- MOVQ 112(DI), R12
- XORQ DX, R10
- MOVQ 120(DI), R13
- ROLQ $0x3e, R10
- XORQ R8, R11
- MOVQ 168(DI), R14
- ROLQ $0x37, R11
- XORQ R9, R12
- MOVQ R10, R9
- XORQ CX, R14
- ROLQ $0x02, R14
- ANDQ R11, R9
- XORQ R14, R9
- MOVQ R9, 192(SP)
- ROLQ $0x27, R12
- XORQ R9, R15
- NOTQ R11
- XORQ BX, R13
- MOVQ R11, BX
- ANDQ R12, BX
- XORQ R10, BX
- MOVQ BX, 160(SP)
- XORQ BX, SI
- ROLQ $0x29, R13
- MOVQ R12, CX
- ORQ R13, CX
- XORQ R11, CX
- MOVQ CX, 168(SP)
- XORQ CX, BP
- MOVQ R13, DX
- MOVQ R14, R8
- ANDQ R14, DX
- ORQ R10, R8
- XORQ R12, DX
- XORQ R13, R8
- MOVQ DX, 176(SP)
- MOVQ R8, 184(SP)
-
- // Prepare round
- MOVQ BP, BX
- ROLQ $0x01, BX
- MOVQ 16(SP), R12
- XORQ 56(SP), DX
- XORQ R15, BX
- XORQ 96(SP), R12
- XORQ 136(SP), DX
- XORQ DX, R12
- MOVQ R12, CX
- ROLQ $0x01, CX
- MOVQ 24(SP), R13
- XORQ 64(SP), R8
- XORQ SI, CX
- XORQ 104(SP), R13
- XORQ 144(SP), R8
- XORQ R8, R13
- MOVQ R13, DX
- ROLQ $0x01, DX
- MOVQ R15, R8
- XORQ BP, DX
- ROLQ $0x01, R8
- MOVQ SI, R9
- XORQ R12, R8
- ROLQ $0x01, R9
-
- // Result b
- MOVQ (SP), R10
- MOVQ 48(SP), R11
- XORQ R13, R9
- MOVQ 96(SP), R12
- MOVQ 144(SP), R13
- MOVQ 192(SP), R14
- XORQ CX, R11
- ROLQ $0x2c, R11
- XORQ DX, R12
- XORQ BX, R10
- ROLQ $0x2b, R12
- MOVQ R11, SI
- MOVQ $0x8000000080008008, AX
- ORQ R12, SI
- XORQ R10, AX
- XORQ AX, SI
- MOVQ SI, (DI)
- XORQ R9, R14
- ROLQ $0x0e, R14
- MOVQ R10, R15
- ANDQ R11, R15
- XORQ R14, R15
- MOVQ R15, 32(DI)
- XORQ R8, R13
- ROLQ $0x15, R13
- MOVQ R13, AX
- ANDQ R14, AX
- XORQ R12, AX
- MOVQ AX, 16(DI)
- NOTQ R12
- ORQ R10, R14
- ORQ R13, R12
- XORQ R13, R14
- XORQ R11, R12
- MOVQ R14, 24(DI)
- MOVQ R12, 8(DI)
- NOP
-
- // Result g
- MOVQ 72(SP), R11
- XORQ R9, R11
- MOVQ 80(SP), R12
- ROLQ $0x14, R11
- XORQ BX, R12
- ROLQ $0x03, R12
- MOVQ 24(SP), R10
- MOVQ R11, AX
- ORQ R12, AX
- XORQ R8, R10
- MOVQ 128(SP), R13
- MOVQ 176(SP), R14
- ROLQ $0x1c, R10
- XORQ R10, AX
- MOVQ AX, 40(DI)
- NOP
- XORQ CX, R13
- ROLQ $0x2d, R13
- MOVQ R12, AX
- ANDQ R13, AX
- XORQ R11, AX
- MOVQ AX, 48(DI)
- NOP
- XORQ DX, R14
- ROLQ $0x3d, R14
- MOVQ R14, AX
- ORQ R10, AX
- XORQ R13, AX
- MOVQ AX, 64(DI)
- ANDQ R11, R10
- XORQ R14, R10
- MOVQ R10, 72(DI)
- NOTQ R14
- NOP
- ORQ R14, R13
- XORQ R12, R13
- MOVQ R13, 56(DI)
-
- // Result k
- MOVQ 8(SP), R10
- MOVQ 56(SP), R11
- MOVQ 104(SP), R12
- MOVQ 152(SP), R13
- MOVQ 160(SP), R14
- XORQ DX, R11
- ROLQ $0x06, R11
- XORQ R8, R12
- ROLQ $0x19, R12
- MOVQ R11, AX
- ORQ R12, AX
- XORQ CX, R10
- ROLQ $0x01, R10
- XORQ R10, AX
- MOVQ AX, 80(DI)
- NOP
- XORQ R9, R13
- ROLQ $0x08, R13
- MOVQ R12, AX
- ANDQ R13, AX
- XORQ R11, AX
- MOVQ AX, 88(DI)
- NOP
- XORQ BX, R14
- ROLQ $0x12, R14
- NOTQ R13
- MOVQ R13, AX
- ANDQ R14, AX
- XORQ R12, AX
- MOVQ AX, 96(DI)
- MOVQ R14, AX
- ORQ R10, AX
- XORQ R13, AX
- MOVQ AX, 104(DI)
- ANDQ R11, R10
- XORQ R14, R10
- MOVQ R10, 112(DI)
- NOP
-
- // Result m
- MOVQ 40(SP), R11
- XORQ BX, R11
- MOVQ 88(SP), R12
- ROLQ $0x24, R11
- XORQ CX, R12
- MOVQ 32(SP), R10
- ROLQ $0x0a, R12
- MOVQ R11, AX
- MOVQ 136(SP), R13
- ANDQ R12, AX
- XORQ R9, R10
- MOVQ 184(SP), R14
- ROLQ $0x1b, R10
- XORQ R10, AX
- MOVQ AX, 120(DI)
- NOP
- XORQ DX, R13
- ROLQ $0x0f, R13
- MOVQ R12, AX
- ORQ R13, AX
- XORQ R11, AX
- MOVQ AX, 128(DI)
- NOP
- XORQ R8, R14
- ROLQ $0x38, R14
- NOTQ R13
- MOVQ R13, AX
- ORQ R14, AX
- XORQ R12, AX
- MOVQ AX, 136(DI)
- ORQ R10, R11
- XORQ R14, R11
- MOVQ R11, 152(DI)
- ANDQ R10, R14
- XORQ R13, R14
- MOVQ R14, 144(DI)
- NOP
-
- // Result s
- MOVQ 16(SP), R10
- MOVQ 64(SP), R11
- MOVQ 112(SP), R12
- XORQ DX, R10
- MOVQ 120(SP), R13
- ROLQ $0x3e, R10
- XORQ R8, R11
- MOVQ 168(SP), R14
- ROLQ $0x37, R11
- XORQ R9, R12
- MOVQ R10, R9
- XORQ CX, R14
- ROLQ $0x02, R14
- ANDQ R11, R9
- XORQ R14, R9
- MOVQ R9, 192(DI)
- ROLQ $0x27, R12
- NOP
- NOTQ R11
- XORQ BX, R13
- MOVQ R11, BX
- ANDQ R12, BX
- XORQ R10, BX
- MOVQ BX, 160(DI)
- NOP
- ROLQ $0x29, R13
- MOVQ R12, CX
- ORQ R13, CX
- XORQ R11, CX
- MOVQ CX, 168(DI)
- NOP
- MOVQ R13, DX
- MOVQ R14, R8
- ANDQ R14, DX
- ORQ R10, R8
- XORQ R12, DX
- XORQ R13, R8
- MOVQ DX, 176(DI)
- MOVQ R8, 184(DI)
-
- // Revert the internal state to the user state
- NOTQ 8(DI)
- NOTQ 16(DI)
- NOTQ 64(DI)
- NOTQ 96(DI)
- NOTQ 136(DI)
- NOTQ 160(DI)
- RET
diff --git a/vendor/golang.org/x/crypto/sha3/sha3.go b/vendor/golang.org/x/crypto/sha3/sha3.go
deleted file mode 100644
index afedde5abf1..00000000000
--- a/vendor/golang.org/x/crypto/sha3/sha3.go
+++ /dev/null
@@ -1,185 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package sha3
-
-// spongeDirection indicates the direction bytes are flowing through the sponge.
-type spongeDirection int
-
-const (
- // spongeAbsorbing indicates that the sponge is absorbing input.
- spongeAbsorbing spongeDirection = iota
- // spongeSqueezing indicates that the sponge is being squeezed.
- spongeSqueezing
-)
-
-const (
- // maxRate is the maximum size of the internal buffer. SHAKE-256
- // currently needs the largest buffer.
- maxRate = 168
-)
-
-type state struct {
- // Generic sponge components.
- a [25]uint64 // main state of the hash
- rate int // the number of bytes of state to use
-
- // dsbyte contains the "domain separation" bits and the first bit of
- // the padding. Sections 6.1 and 6.2 of [1] separate the outputs of the
- // SHA-3 and SHAKE functions by appending bitstrings to the message.
- // Using a little-endian bit-ordering convention, these are "01" for SHA-3
- // and "1111" for SHAKE, or 00000010b and 00001111b, respectively. Then the
- // padding rule from section 5.1 is applied to pad the message to a multiple
- // of the rate, which involves adding a "1" bit, zero or more "0" bits, and
- // a final "1" bit. We merge the first "1" bit from the padding into dsbyte,
- // giving 00000110b (0x06) and 00011111b (0x1f).
- // [1] http://csrc.nist.gov/publications/drafts/fips-202/fips_202_draft.pdf
- // "Draft FIPS 202: SHA-3 Standard: Permutation-Based Hash and
- // Extendable-Output Functions (May 2014)"
- dsbyte byte
-
- i, n int // storage[i:n] is the buffer, i is only used while squeezing
- storage [maxRate]byte
-
- // Specific to SHA-3 and SHAKE.
- outputLen int // the default output size in bytes
- state spongeDirection // whether the sponge is absorbing or squeezing
-}
-
-// BlockSize returns the rate of sponge underlying this hash function.
-func (d *state) BlockSize() int { return d.rate }
-
-// Size returns the output size of the hash function in bytes.
-func (d *state) Size() int { return d.outputLen }
-
-// Reset clears the internal state by zeroing the sponge state and
-// the buffer indexes, and setting Sponge.state to absorbing.
-func (d *state) Reset() {
- // Zero the permutation's state.
- for i := range d.a {
- d.a[i] = 0
- }
- d.state = spongeAbsorbing
- d.i, d.n = 0, 0
-}
-
-func (d *state) clone() *state {
- ret := *d
- return &ret
-}
-
-// permute applies the KeccakF-1600 permutation. It handles
-// any input-output buffering.
-func (d *state) permute() {
- switch d.state {
- case spongeAbsorbing:
- // If we're absorbing, we need to xor the input into the state
- // before applying the permutation.
- xorIn(d, d.storage[:d.rate])
- d.n = 0
- keccakF1600(&d.a)
- case spongeSqueezing:
- // If we're squeezing, we need to apply the permutation before
- // copying more output.
- keccakF1600(&d.a)
- d.i = 0
- copyOut(d, d.storage[:d.rate])
- }
-}
-
-// pads appends the domain separation bits in dsbyte, applies
-// the multi-bitrate 10..1 padding rule, and permutes the state.
-func (d *state) padAndPermute() {
- // Pad with this instance's domain-separator bits. We know that there's
- // at least one byte of space in d.buf because, if it were full,
- // permute would have been called to empty it. dsbyte also contains the
- // first one bit for the padding. See the comment in the state struct.
- d.storage[d.n] = d.dsbyte
- d.n++
- for d.n < d.rate {
- d.storage[d.n] = 0
- d.n++
- }
- // This adds the final one bit for the padding. Because of the way that
- // bits are numbered from the LSB upwards, the final bit is the MSB of
- // the last byte.
- d.storage[d.rate-1] ^= 0x80
- // Apply the permutation
- d.permute()
- d.state = spongeSqueezing
- d.n = d.rate
- copyOut(d, d.storage[:d.rate])
-}
-
-// Write absorbs more data into the hash's state. It panics if any
-// output has already been read.
-func (d *state) Write(p []byte) (written int, err error) {
- if d.state != spongeAbsorbing {
- panic("sha3: Write after Read")
- }
- written = len(p)
-
- for len(p) > 0 {
- if d.n == 0 && len(p) >= d.rate {
- // The fast path; absorb a full "rate" bytes of input and apply the permutation.
- xorIn(d, p[:d.rate])
- p = p[d.rate:]
- keccakF1600(&d.a)
- } else {
- // The slow path; buffer the input until we can fill the sponge, and then xor it in.
- todo := d.rate - d.n
- if todo > len(p) {
- todo = len(p)
- }
- d.n += copy(d.storage[d.n:], p[:todo])
- p = p[todo:]
-
- // If the sponge is full, apply the permutation.
- if d.n == d.rate {
- d.permute()
- }
- }
- }
-
- return
-}
-
-// Read squeezes an arbitrary number of bytes from the sponge.
-func (d *state) Read(out []byte) (n int, err error) {
- // If we're still absorbing, pad and apply the permutation.
- if d.state == spongeAbsorbing {
- d.padAndPermute()
- }
-
- n = len(out)
-
- // Now, do the squeezing.
- for len(out) > 0 {
- n := copy(out, d.storage[d.i:d.n])
- d.i += n
- out = out[n:]
-
- // Apply the permutation if we've squeezed the sponge dry.
- if d.i == d.rate {
- d.permute()
- }
- }
-
- return
-}
-
-// Sum applies padding to the hash state and then squeezes out the desired
-// number of output bytes. It panics if any output has already been read.
-func (d *state) Sum(in []byte) []byte {
- if d.state != spongeAbsorbing {
- panic("sha3: Sum after Read")
- }
-
- // Make a copy of the original hash so that caller can keep writing
- // and summing.
- dup := d.clone()
- hash := make([]byte, dup.outputLen, 64) // explicit cap to allow stack allocation
- dup.Read(hash)
- return append(in, hash...)
-}
diff --git a/vendor/golang.org/x/crypto/sha3/sha3_s390x.go b/vendor/golang.org/x/crypto/sha3/sha3_s390x.go
deleted file mode 100644
index 00d8034ae62..00000000000
--- a/vendor/golang.org/x/crypto/sha3/sha3_s390x.go
+++ /dev/null
@@ -1,303 +0,0 @@
-// Copyright 2017 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build gc && !purego
-
-package sha3
-
-// This file contains code for using the 'compute intermediate
-// message digest' (KIMD) and 'compute last message digest' (KLMD)
-// instructions to compute SHA-3 and SHAKE hashes on IBM Z.
-
-import (
- "hash"
-
- "golang.org/x/sys/cpu"
-)
-
-// codes represent 7-bit KIMD/KLMD function codes as defined in
-// the Principles of Operation.
-type code uint64
-
-const (
- // function codes for KIMD/KLMD
- sha3_224 code = 32
- sha3_256 = 33
- sha3_384 = 34
- sha3_512 = 35
- shake_128 = 36
- shake_256 = 37
- nopad = 0x100
-)
-
-// kimd is a wrapper for the 'compute intermediate message digest' instruction.
-// src must be a multiple of the rate for the given function code.
-//
-//go:noescape
-func kimd(function code, chain *[200]byte, src []byte)
-
-// klmd is a wrapper for the 'compute last message digest' instruction.
-// src padding is handled by the instruction.
-//
-//go:noescape
-func klmd(function code, chain *[200]byte, dst, src []byte)
-
-type asmState struct {
- a [200]byte // 1600 bit state
- buf []byte // care must be taken to ensure cap(buf) is a multiple of rate
- rate int // equivalent to block size
- storage [3072]byte // underlying storage for buf
- outputLen int // output length for full security
- function code // KIMD/KLMD function code
- state spongeDirection // whether the sponge is absorbing or squeezing
-}
-
-func newAsmState(function code) *asmState {
- var s asmState
- s.function = function
- switch function {
- case sha3_224:
- s.rate = 144
- s.outputLen = 28
- case sha3_256:
- s.rate = 136
- s.outputLen = 32
- case sha3_384:
- s.rate = 104
- s.outputLen = 48
- case sha3_512:
- s.rate = 72
- s.outputLen = 64
- case shake_128:
- s.rate = 168
- s.outputLen = 32
- case shake_256:
- s.rate = 136
- s.outputLen = 64
- default:
- panic("sha3: unrecognized function code")
- }
-
- // limit s.buf size to a multiple of s.rate
- s.resetBuf()
- return &s
-}
-
-func (s *asmState) clone() *asmState {
- c := *s
- c.buf = c.storage[:len(s.buf):cap(s.buf)]
- return &c
-}
-
-// copyIntoBuf copies b into buf. It will panic if there is not enough space to
-// store all of b.
-func (s *asmState) copyIntoBuf(b []byte) {
- bufLen := len(s.buf)
- s.buf = s.buf[:len(s.buf)+len(b)]
- copy(s.buf[bufLen:], b)
-}
-
-// resetBuf points buf at storage, sets the length to 0 and sets cap to be a
-// multiple of the rate.
-func (s *asmState) resetBuf() {
- max := (cap(s.storage) / s.rate) * s.rate
- s.buf = s.storage[:0:max]
-}
-
-// Write (via the embedded io.Writer interface) adds more data to the running hash.
-// It never returns an error.
-func (s *asmState) Write(b []byte) (int, error) {
- if s.state != spongeAbsorbing {
- panic("sha3: Write after Read")
- }
- length := len(b)
- for len(b) > 0 {
- if len(s.buf) == 0 && len(b) >= cap(s.buf) {
- // Hash the data directly and push any remaining bytes
- // into the buffer.
- remainder := len(b) % s.rate
- kimd(s.function, &s.a, b[:len(b)-remainder])
- if remainder != 0 {
- s.copyIntoBuf(b[len(b)-remainder:])
- }
- return length, nil
- }
-
- if len(s.buf) == cap(s.buf) {
- // flush the buffer
- kimd(s.function, &s.a, s.buf)
- s.buf = s.buf[:0]
- }
-
- // copy as much as we can into the buffer
- n := len(b)
- if len(b) > cap(s.buf)-len(s.buf) {
- n = cap(s.buf) - len(s.buf)
- }
- s.copyIntoBuf(b[:n])
- b = b[n:]
- }
- return length, nil
-}
-
-// Read squeezes an arbitrary number of bytes from the sponge.
-func (s *asmState) Read(out []byte) (n int, err error) {
- // The 'compute last message digest' instruction only stores the digest
- // at the first operand (dst) for SHAKE functions.
- if s.function != shake_128 && s.function != shake_256 {
- panic("sha3: can only call Read for SHAKE functions")
- }
-
- n = len(out)
-
- // need to pad if we were absorbing
- if s.state == spongeAbsorbing {
- s.state = spongeSqueezing
-
- // write hash directly into out if possible
- if len(out)%s.rate == 0 {
- klmd(s.function, &s.a, out, s.buf) // len(out) may be 0
- s.buf = s.buf[:0]
- return
- }
-
- // write hash into buffer
- max := cap(s.buf)
- if max > len(out) {
- max = (len(out)/s.rate)*s.rate + s.rate
- }
- klmd(s.function, &s.a, s.buf[:max], s.buf)
- s.buf = s.buf[:max]
- }
-
- for len(out) > 0 {
- // flush the buffer
- if len(s.buf) != 0 {
- c := copy(out, s.buf)
- out = out[c:]
- s.buf = s.buf[c:]
- continue
- }
-
- // write hash directly into out if possible
- if len(out)%s.rate == 0 {
- klmd(s.function|nopad, &s.a, out, nil)
- return
- }
-
- // write hash into buffer
- s.resetBuf()
- if cap(s.buf) > len(out) {
- s.buf = s.buf[:(len(out)/s.rate)*s.rate+s.rate]
- }
- klmd(s.function|nopad, &s.a, s.buf, nil)
- }
- return
-}
-
-// Sum appends the current hash to b and returns the resulting slice.
-// It does not change the underlying hash state.
-func (s *asmState) Sum(b []byte) []byte {
- if s.state != spongeAbsorbing {
- panic("sha3: Sum after Read")
- }
-
- // Copy the state to preserve the original.
- a := s.a
-
- // Hash the buffer. Note that we don't clear it because we
- // aren't updating the state.
- switch s.function {
- case sha3_224, sha3_256, sha3_384, sha3_512:
- klmd(s.function, &a, nil, s.buf)
- return append(b, a[:s.outputLen]...)
- case shake_128, shake_256:
- d := make([]byte, s.outputLen, 64)
- klmd(s.function, &a, d, s.buf)
- return append(b, d[:s.outputLen]...)
- default:
- panic("sha3: unknown function")
- }
-}
-
-// Reset resets the Hash to its initial state.
-func (s *asmState) Reset() {
- for i := range s.a {
- s.a[i] = 0
- }
- s.resetBuf()
- s.state = spongeAbsorbing
-}
-
-// Size returns the number of bytes Sum will return.
-func (s *asmState) Size() int {
- return s.outputLen
-}
-
-// BlockSize returns the hash's underlying block size.
-// The Write method must be able to accept any amount
-// of data, but it may operate more efficiently if all writes
-// are a multiple of the block size.
-func (s *asmState) BlockSize() int {
- return s.rate
-}
-
-// Clone returns a copy of the ShakeHash in its current state.
-func (s *asmState) Clone() ShakeHash {
- return s.clone()
-}
-
-// new224 returns an assembly implementation of SHA3-224 if available,
-// otherwise it returns a generic implementation.
-func new224() hash.Hash {
- if cpu.S390X.HasSHA3 {
- return newAsmState(sha3_224)
- }
- return new224Generic()
-}
-
-// new256 returns an assembly implementation of SHA3-256 if available,
-// otherwise it returns a generic implementation.
-func new256() hash.Hash {
- if cpu.S390X.HasSHA3 {
- return newAsmState(sha3_256)
- }
- return new256Generic()
-}
-
-// new384 returns an assembly implementation of SHA3-384 if available,
-// otherwise it returns a generic implementation.
-func new384() hash.Hash {
- if cpu.S390X.HasSHA3 {
- return newAsmState(sha3_384)
- }
- return new384Generic()
-}
-
-// new512 returns an assembly implementation of SHA3-512 if available,
-// otherwise it returns a generic implementation.
-func new512() hash.Hash {
- if cpu.S390X.HasSHA3 {
- return newAsmState(sha3_512)
- }
- return new512Generic()
-}
-
-// newShake128 returns an assembly implementation of SHAKE-128 if available,
-// otherwise it returns a generic implementation.
-func newShake128() ShakeHash {
- if cpu.S390X.HasSHA3 {
- return newAsmState(shake_128)
- }
- return newShake128Generic()
-}
-
-// newShake256 returns an assembly implementation of SHAKE-256 if available,
-// otherwise it returns a generic implementation.
-func newShake256() ShakeHash {
- if cpu.S390X.HasSHA3 {
- return newAsmState(shake_256)
- }
- return newShake256Generic()
-}
diff --git a/vendor/golang.org/x/crypto/sha3/sha3_s390x.s b/vendor/golang.org/x/crypto/sha3/sha3_s390x.s
deleted file mode 100644
index 826b862c779..00000000000
--- a/vendor/golang.org/x/crypto/sha3/sha3_s390x.s
+++ /dev/null
@@ -1,33 +0,0 @@
-// Copyright 2017 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build gc && !purego
-
-#include "textflag.h"
-
-// func kimd(function code, chain *[200]byte, src []byte)
-TEXT ·kimd(SB), NOFRAME|NOSPLIT, $0-40
- MOVD function+0(FP), R0
- MOVD chain+8(FP), R1
- LMG src+16(FP), R2, R3 // R2=base, R3=len
-
-continue:
- WORD $0xB93E0002 // KIMD --, R2
- BVS continue // continue if interrupted
- MOVD $0, R0 // reset R0 for pre-go1.8 compilers
- RET
-
-// func klmd(function code, chain *[200]byte, dst, src []byte)
-TEXT ·klmd(SB), NOFRAME|NOSPLIT, $0-64
- // TODO: SHAKE support
- MOVD function+0(FP), R0
- MOVD chain+8(FP), R1
- LMG dst+16(FP), R2, R3 // R2=base, R3=len
- LMG src+40(FP), R4, R5 // R4=base, R5=len
-
-continue:
- WORD $0xB93F0024 // KLMD R2, R4
- BVS continue // continue if interrupted
- MOVD $0, R0 // reset R0 for pre-go1.8 compilers
- RET
diff --git a/vendor/golang.org/x/crypto/sha3/shake.go b/vendor/golang.org/x/crypto/sha3/shake.go
deleted file mode 100644
index 1ea9275b8b7..00000000000
--- a/vendor/golang.org/x/crypto/sha3/shake.go
+++ /dev/null
@@ -1,174 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package sha3
-
-// This file defines the ShakeHash interface, and provides
-// functions for creating SHAKE and cSHAKE instances, as well as utility
-// functions for hashing bytes to arbitrary-length output.
-//
-//
-// SHAKE implementation is based on FIPS PUB 202 [1]
-// cSHAKE implementations is based on NIST SP 800-185 [2]
-//
-// [1] https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.202.pdf
-// [2] https://doi.org/10.6028/NIST.SP.800-185
-
-import (
- "encoding/binary"
- "hash"
- "io"
-)
-
-// ShakeHash defines the interface to hash functions that support
-// arbitrary-length output. When used as a plain [hash.Hash], it
-// produces minimum-length outputs that provide full-strength generic
-// security.
-type ShakeHash interface {
- hash.Hash
-
- // Read reads more output from the hash; reading affects the hash's
- // state. (ShakeHash.Read is thus very different from Hash.Sum)
- // It never returns an error, but subsequent calls to Write or Sum
- // will panic.
- io.Reader
-
- // Clone returns a copy of the ShakeHash in its current state.
- Clone() ShakeHash
-}
-
-// cSHAKE specific context
-type cshakeState struct {
- *state // SHA-3 state context and Read/Write operations
-
- // initBlock is the cSHAKE specific initialization set of bytes. It is initialized
- // by newCShake function and stores concatenation of N followed by S, encoded
- // by the method specified in 3.3 of [1].
- // It is stored here in order for Reset() to be able to put context into
- // initial state.
- initBlock []byte
-}
-
-// Consts for configuring initial SHA-3 state
-const (
- dsbyteShake = 0x1f
- dsbyteCShake = 0x04
- rate128 = 168
- rate256 = 136
-)
-
-func bytepad(input []byte, w int) []byte {
- // leftEncode always returns max 9 bytes
- buf := make([]byte, 0, 9+len(input)+w)
- buf = append(buf, leftEncode(uint64(w))...)
- buf = append(buf, input...)
- padlen := w - (len(buf) % w)
- return append(buf, make([]byte, padlen)...)
-}
-
-func leftEncode(value uint64) []byte {
- var b [9]byte
- binary.BigEndian.PutUint64(b[1:], value)
- // Trim all but last leading zero bytes
- i := byte(1)
- for i < 8 && b[i] == 0 {
- i++
- }
- // Prepend number of encoded bytes
- b[i-1] = 9 - i
- return b[i-1:]
-}
-
-func newCShake(N, S []byte, rate, outputLen int, dsbyte byte) ShakeHash {
- c := cshakeState{state: &state{rate: rate, outputLen: outputLen, dsbyte: dsbyte}}
-
- // leftEncode returns max 9 bytes
- c.initBlock = make([]byte, 0, 9*2+len(N)+len(S))
- c.initBlock = append(c.initBlock, leftEncode(uint64(len(N)*8))...)
- c.initBlock = append(c.initBlock, N...)
- c.initBlock = append(c.initBlock, leftEncode(uint64(len(S)*8))...)
- c.initBlock = append(c.initBlock, S...)
- c.Write(bytepad(c.initBlock, c.rate))
- return &c
-}
-
-// Reset resets the hash to initial state.
-func (c *cshakeState) Reset() {
- c.state.Reset()
- c.Write(bytepad(c.initBlock, c.rate))
-}
-
-// Clone returns copy of a cSHAKE context within its current state.
-func (c *cshakeState) Clone() ShakeHash {
- b := make([]byte, len(c.initBlock))
- copy(b, c.initBlock)
- return &cshakeState{state: c.clone(), initBlock: b}
-}
-
-// Clone returns copy of SHAKE context within its current state.
-func (c *state) Clone() ShakeHash {
- return c.clone()
-}
-
-// NewShake128 creates a new SHAKE128 variable-output-length ShakeHash.
-// Its generic security strength is 128 bits against all attacks if at
-// least 32 bytes of its output are used.
-func NewShake128() ShakeHash {
- return newShake128()
-}
-
-// NewShake256 creates a new SHAKE256 variable-output-length ShakeHash.
-// Its generic security strength is 256 bits against all attacks if
-// at least 64 bytes of its output are used.
-func NewShake256() ShakeHash {
- return newShake256()
-}
-
-func newShake128Generic() *state {
- return &state{rate: rate128, outputLen: 32, dsbyte: dsbyteShake}
-}
-
-func newShake256Generic() *state {
- return &state{rate: rate256, outputLen: 64, dsbyte: dsbyteShake}
-}
-
-// NewCShake128 creates a new instance of cSHAKE128 variable-output-length ShakeHash,
-// a customizable variant of SHAKE128.
-// N is used to define functions based on cSHAKE, it can be empty when plain cSHAKE is
-// desired. S is a customization byte string used for domain separation - two cSHAKE
-// computations on same input with different S yield unrelated outputs.
-// When N and S are both empty, this is equivalent to NewShake128.
-func NewCShake128(N, S []byte) ShakeHash {
- if len(N) == 0 && len(S) == 0 {
- return NewShake128()
- }
- return newCShake(N, S, rate128, 32, dsbyteCShake)
-}
-
-// NewCShake256 creates a new instance of cSHAKE256 variable-output-length ShakeHash,
-// a customizable variant of SHAKE256.
-// N is used to define functions based on cSHAKE, it can be empty when plain cSHAKE is
-// desired. S is a customization byte string used for domain separation - two cSHAKE
-// computations on same input with different S yield unrelated outputs.
-// When N and S are both empty, this is equivalent to NewShake256.
-func NewCShake256(N, S []byte) ShakeHash {
- if len(N) == 0 && len(S) == 0 {
- return NewShake256()
- }
- return newCShake(N, S, rate256, 64, dsbyteCShake)
-}
-
-// ShakeSum128 writes an arbitrary-length digest of data into hash.
-func ShakeSum128(hash, data []byte) {
- h := NewShake128()
- h.Write(data)
- h.Read(hash)
-}
-
-// ShakeSum256 writes an arbitrary-length digest of data into hash.
-func ShakeSum256(hash, data []byte) {
- h := NewShake256()
- h.Write(data)
- h.Read(hash)
-}
diff --git a/vendor/golang.org/x/crypto/sha3/shake_noasm.go b/vendor/golang.org/x/crypto/sha3/shake_noasm.go
deleted file mode 100644
index 4276ba4ab2c..00000000000
--- a/vendor/golang.org/x/crypto/sha3/shake_noasm.go
+++ /dev/null
@@ -1,15 +0,0 @@
-// Copyright 2023 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build !gc || purego || !s390x
-
-package sha3
-
-func newShake128() *state {
- return newShake128Generic()
-}
-
-func newShake256() *state {
- return newShake256Generic()
-}
diff --git a/vendor/golang.org/x/crypto/sha3/xor.go b/vendor/golang.org/x/crypto/sha3/xor.go
deleted file mode 100644
index 6ada5c9574e..00000000000
--- a/vendor/golang.org/x/crypto/sha3/xor.go
+++ /dev/null
@@ -1,40 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package sha3
-
-import (
- "crypto/subtle"
- "encoding/binary"
- "unsafe"
-
- "golang.org/x/sys/cpu"
-)
-
-// xorIn xors the bytes in buf into the state.
-func xorIn(d *state, buf []byte) {
- if cpu.IsBigEndian {
- for i := 0; len(buf) >= 8; i++ {
- a := binary.LittleEndian.Uint64(buf)
- d.a[i] ^= a
- buf = buf[8:]
- }
- } else {
- ab := (*[25 * 64 / 8]byte)(unsafe.Pointer(&d.a))
- subtle.XORBytes(ab[:], ab[:], buf)
- }
-}
-
-// copyOut copies uint64s to a byte buffer.
-func copyOut(d *state, b []byte) {
- if cpu.IsBigEndian {
- for i := 0; len(b) >= 8; i++ {
- binary.LittleEndian.PutUint64(b, d.a[i])
- b = b[8:]
- }
- } else {
- ab := (*[25 * 64 / 8]byte)(unsafe.Pointer(&d.a))
- copy(b, ab[:])
- }
-}
diff --git a/vendor/golang.org/x/crypto/ssh/agent/client.go b/vendor/golang.org/x/crypto/ssh/agent/client.go
deleted file mode 100644
index 106708d289e..00000000000
--- a/vendor/golang.org/x/crypto/ssh/agent/client.go
+++ /dev/null
@@ -1,854 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package agent implements the ssh-agent protocol, and provides both
-// a client and a server. The client can talk to a standard ssh-agent
-// that uses UNIX sockets, and one could implement an alternative
-// ssh-agent process using the sample server.
-//
-// References:
-//
-// [PROTOCOL.agent]: https://tools.ietf.org/html/draft-miller-ssh-agent-00
-package agent
-
-import (
- "bytes"
- "crypto/dsa"
- "crypto/ecdsa"
- "crypto/ed25519"
- "crypto/elliptic"
- "crypto/rsa"
- "encoding/base64"
- "encoding/binary"
- "errors"
- "fmt"
- "io"
- "math/big"
- "sync"
-
- "golang.org/x/crypto/ssh"
-)
-
-// SignatureFlags represent additional flags that can be passed to the signature
-// requests an defined in [PROTOCOL.agent] section 4.5.1.
-type SignatureFlags uint32
-
-// SignatureFlag values as defined in [PROTOCOL.agent] section 5.3.
-const (
- SignatureFlagReserved SignatureFlags = 1 << iota
- SignatureFlagRsaSha256
- SignatureFlagRsaSha512
-)
-
-// Agent represents the capabilities of an ssh-agent.
-type Agent interface {
- // List returns the identities known to the agent.
- List() ([]*Key, error)
-
- // Sign has the agent sign the data using a protocol 2 key as defined
- // in [PROTOCOL.agent] section 2.6.2.
- Sign(key ssh.PublicKey, data []byte) (*ssh.Signature, error)
-
- // Add adds a private key to the agent.
- Add(key AddedKey) error
-
- // Remove removes all identities with the given public key.
- Remove(key ssh.PublicKey) error
-
- // RemoveAll removes all identities.
- RemoveAll() error
-
- // Lock locks the agent. Sign and Remove will fail, and List will empty an empty list.
- Lock(passphrase []byte) error
-
- // Unlock undoes the effect of Lock
- Unlock(passphrase []byte) error
-
- // Signers returns signers for all the known keys.
- Signers() ([]ssh.Signer, error)
-}
-
-type ExtendedAgent interface {
- Agent
-
- // SignWithFlags signs like Sign, but allows for additional flags to be sent/received
- SignWithFlags(key ssh.PublicKey, data []byte, flags SignatureFlags) (*ssh.Signature, error)
-
- // Extension processes a custom extension request. Standard-compliant agents are not
- // required to support any extensions, but this method allows agents to implement
- // vendor-specific methods or add experimental features. See [PROTOCOL.agent] section 4.7.
- // If agent extensions are unsupported entirely this method MUST return an
- // ErrExtensionUnsupported error. Similarly, if just the specific extensionType in
- // the request is unsupported by the agent then ErrExtensionUnsupported MUST be
- // returned.
- //
- // In the case of success, since [PROTOCOL.agent] section 4.7 specifies that the contents
- // of the response are unspecified (including the type of the message), the complete
- // response will be returned as a []byte slice, including the "type" byte of the message.
- Extension(extensionType string, contents []byte) ([]byte, error)
-}
-
-// ConstraintExtension describes an optional constraint defined by users.
-type ConstraintExtension struct {
- // ExtensionName consist of a UTF-8 string suffixed by the
- // implementation domain following the naming scheme defined
- // in Section 4.2 of RFC 4251, e.g. "foo@example.com".
- ExtensionName string
- // ExtensionDetails contains the actual content of the extended
- // constraint.
- ExtensionDetails []byte
-}
-
-// AddedKey describes an SSH key to be added to an Agent.
-type AddedKey struct {
- // PrivateKey must be a *rsa.PrivateKey, *dsa.PrivateKey,
- // ed25519.PrivateKey or *ecdsa.PrivateKey, which will be inserted into the
- // agent.
- PrivateKey interface{}
- // Certificate, if not nil, is communicated to the agent and will be
- // stored with the key.
- Certificate *ssh.Certificate
- // Comment is an optional, free-form string.
- Comment string
- // LifetimeSecs, if not zero, is the number of seconds that the
- // agent will store the key for.
- LifetimeSecs uint32
- // ConfirmBeforeUse, if true, requests that the agent confirm with the
- // user before each use of this key.
- ConfirmBeforeUse bool
- // ConstraintExtensions are the experimental or private-use constraints
- // defined by users.
- ConstraintExtensions []ConstraintExtension
-}
-
-// See [PROTOCOL.agent], section 3.
-const (
- agentRequestV1Identities = 1
- agentRemoveAllV1Identities = 9
-
- // 3.2 Requests from client to agent for protocol 2 key operations
- agentAddIdentity = 17
- agentRemoveIdentity = 18
- agentRemoveAllIdentities = 19
- agentAddIDConstrained = 25
-
- // 3.3 Key-type independent requests from client to agent
- agentAddSmartcardKey = 20
- agentRemoveSmartcardKey = 21
- agentLock = 22
- agentUnlock = 23
- agentAddSmartcardKeyConstrained = 26
-
- // 3.7 Key constraint identifiers
- agentConstrainLifetime = 1
- agentConstrainConfirm = 2
- // Constraint extension identifier up to version 2 of the protocol. A
- // backward incompatible change will be required if we want to add support
- // for SSH_AGENT_CONSTRAIN_MAXSIGN which uses the same ID.
- agentConstrainExtensionV00 = 3
- // Constraint extension identifier in version 3 and later of the protocol.
- agentConstrainExtension = 255
-)
-
-// maxAgentResponseBytes is the maximum agent reply size that is accepted. This
-// is a sanity check, not a limit in the spec.
-const maxAgentResponseBytes = 16 << 20
-
-// Agent messages:
-// These structures mirror the wire format of the corresponding ssh agent
-// messages found in [PROTOCOL.agent].
-
-// 3.4 Generic replies from agent to client
-const agentFailure = 5
-
-type failureAgentMsg struct{}
-
-const agentSuccess = 6
-
-type successAgentMsg struct{}
-
-// See [PROTOCOL.agent], section 2.5.2.
-const agentRequestIdentities = 11
-
-type requestIdentitiesAgentMsg struct{}
-
-// See [PROTOCOL.agent], section 2.5.2.
-const agentIdentitiesAnswer = 12
-
-type identitiesAnswerAgentMsg struct {
- NumKeys uint32 `sshtype:"12"`
- Keys []byte `ssh:"rest"`
-}
-
-// See [PROTOCOL.agent], section 2.6.2.
-const agentSignRequest = 13
-
-type signRequestAgentMsg struct {
- KeyBlob []byte `sshtype:"13"`
- Data []byte
- Flags uint32
-}
-
-// See [PROTOCOL.agent], section 2.6.2.
-
-// 3.6 Replies from agent to client for protocol 2 key operations
-const agentSignResponse = 14
-
-type signResponseAgentMsg struct {
- SigBlob []byte `sshtype:"14"`
-}
-
-type publicKey struct {
- Format string
- Rest []byte `ssh:"rest"`
-}
-
-// 3.7 Key constraint identifiers
-type constrainLifetimeAgentMsg struct {
- LifetimeSecs uint32 `sshtype:"1"`
-}
-
-type constrainExtensionAgentMsg struct {
- ExtensionName string `sshtype:"255|3"`
- ExtensionDetails []byte
-
- // Rest is a field used for parsing, not part of message
- Rest []byte `ssh:"rest"`
-}
-
-// See [PROTOCOL.agent], section 4.7
-const agentExtension = 27
-const agentExtensionFailure = 28
-
-// ErrExtensionUnsupported indicates that an extension defined in
-// [PROTOCOL.agent] section 4.7 is unsupported by the agent. Specifically this
-// error indicates that the agent returned a standard SSH_AGENT_FAILURE message
-// as the result of a SSH_AGENTC_EXTENSION request. Note that the protocol
-// specification (and therefore this error) does not distinguish between a
-// specific extension being unsupported and extensions being unsupported entirely.
-var ErrExtensionUnsupported = errors.New("agent: extension unsupported")
-
-type extensionAgentMsg struct {
- ExtensionType string `sshtype:"27"`
- // NOTE: this matches OpenSSH's PROTOCOL.agent, not the IETF draft [PROTOCOL.agent],
- // so that it matches what OpenSSH actually implements in the wild.
- Contents []byte `ssh:"rest"`
-}
-
-// Key represents a protocol 2 public key as defined in
-// [PROTOCOL.agent], section 2.5.2.
-type Key struct {
- Format string
- Blob []byte
- Comment string
-}
-
-func clientErr(err error) error {
- return fmt.Errorf("agent: client error: %v", err)
-}
-
-// String returns the storage form of an agent key with the format, base64
-// encoded serialized key, and the comment if it is not empty.
-func (k *Key) String() string {
- s := string(k.Format) + " " + base64.StdEncoding.EncodeToString(k.Blob)
-
- if k.Comment != "" {
- s += " " + k.Comment
- }
-
- return s
-}
-
-// Type returns the public key type.
-func (k *Key) Type() string {
- return k.Format
-}
-
-// Marshal returns key blob to satisfy the ssh.PublicKey interface.
-func (k *Key) Marshal() []byte {
- return k.Blob
-}
-
-// Verify satisfies the ssh.PublicKey interface.
-func (k *Key) Verify(data []byte, sig *ssh.Signature) error {
- pubKey, err := ssh.ParsePublicKey(k.Blob)
- if err != nil {
- return fmt.Errorf("agent: bad public key: %v", err)
- }
- return pubKey.Verify(data, sig)
-}
-
-type wireKey struct {
- Format string
- Rest []byte `ssh:"rest"`
-}
-
-func parseKey(in []byte) (out *Key, rest []byte, err error) {
- var record struct {
- Blob []byte
- Comment string
- Rest []byte `ssh:"rest"`
- }
-
- if err := ssh.Unmarshal(in, &record); err != nil {
- return nil, nil, err
- }
-
- var wk wireKey
- if err := ssh.Unmarshal(record.Blob, &wk); err != nil {
- return nil, nil, err
- }
-
- return &Key{
- Format: wk.Format,
- Blob: record.Blob,
- Comment: record.Comment,
- }, record.Rest, nil
-}
-
-// client is a client for an ssh-agent process.
-type client struct {
- // conn is typically a *net.UnixConn
- conn io.ReadWriter
- // mu is used to prevent concurrent access to the agent
- mu sync.Mutex
-}
-
-// NewClient returns an Agent that talks to an ssh-agent process over
-// the given connection.
-func NewClient(rw io.ReadWriter) ExtendedAgent {
- return &client{conn: rw}
-}
-
-// call sends an RPC to the agent. On success, the reply is
-// unmarshaled into reply and replyType is set to the first byte of
-// the reply, which contains the type of the message.
-func (c *client) call(req []byte) (reply interface{}, err error) {
- buf, err := c.callRaw(req)
- if err != nil {
- return nil, err
- }
- reply, err = unmarshal(buf)
- if err != nil {
- return nil, clientErr(err)
- }
- return reply, nil
-}
-
-// callRaw sends an RPC to the agent. On success, the raw
-// bytes of the response are returned; no unmarshalling is
-// performed on the response.
-func (c *client) callRaw(req []byte) (reply []byte, err error) {
- c.mu.Lock()
- defer c.mu.Unlock()
-
- msg := make([]byte, 4+len(req))
- binary.BigEndian.PutUint32(msg, uint32(len(req)))
- copy(msg[4:], req)
- if _, err = c.conn.Write(msg); err != nil {
- return nil, clientErr(err)
- }
-
- var respSizeBuf [4]byte
- if _, err = io.ReadFull(c.conn, respSizeBuf[:]); err != nil {
- return nil, clientErr(err)
- }
- respSize := binary.BigEndian.Uint32(respSizeBuf[:])
- if respSize > maxAgentResponseBytes {
- return nil, clientErr(errors.New("response too large"))
- }
-
- buf := make([]byte, respSize)
- if _, err = io.ReadFull(c.conn, buf); err != nil {
- return nil, clientErr(err)
- }
- return buf, nil
-}
-
-func (c *client) simpleCall(req []byte) error {
- resp, err := c.call(req)
- if err != nil {
- return err
- }
- if _, ok := resp.(*successAgentMsg); ok {
- return nil
- }
- return errors.New("agent: failure")
-}
-
-func (c *client) RemoveAll() error {
- return c.simpleCall([]byte{agentRemoveAllIdentities})
-}
-
-func (c *client) Remove(key ssh.PublicKey) error {
- req := ssh.Marshal(&agentRemoveIdentityMsg{
- KeyBlob: key.Marshal(),
- })
- return c.simpleCall(req)
-}
-
-func (c *client) Lock(passphrase []byte) error {
- req := ssh.Marshal(&agentLockMsg{
- Passphrase: passphrase,
- })
- return c.simpleCall(req)
-}
-
-func (c *client) Unlock(passphrase []byte) error {
- req := ssh.Marshal(&agentUnlockMsg{
- Passphrase: passphrase,
- })
- return c.simpleCall(req)
-}
-
-// List returns the identities known to the agent.
-func (c *client) List() ([]*Key, error) {
- // see [PROTOCOL.agent] section 2.5.2.
- req := []byte{agentRequestIdentities}
-
- msg, err := c.call(req)
- if err != nil {
- return nil, err
- }
-
- switch msg := msg.(type) {
- case *identitiesAnswerAgentMsg:
- if msg.NumKeys > maxAgentResponseBytes/8 {
- return nil, errors.New("agent: too many keys in agent reply")
- }
- keys := make([]*Key, msg.NumKeys)
- data := msg.Keys
- for i := uint32(0); i < msg.NumKeys; i++ {
- var key *Key
- var err error
- if key, data, err = parseKey(data); err != nil {
- return nil, err
- }
- keys[i] = key
- }
- return keys, nil
- case *failureAgentMsg:
- return nil, errors.New("agent: failed to list keys")
- }
- panic("unreachable")
-}
-
-// Sign has the agent sign the data using a protocol 2 key as defined
-// in [PROTOCOL.agent] section 2.6.2.
-func (c *client) Sign(key ssh.PublicKey, data []byte) (*ssh.Signature, error) {
- return c.SignWithFlags(key, data, 0)
-}
-
-func (c *client) SignWithFlags(key ssh.PublicKey, data []byte, flags SignatureFlags) (*ssh.Signature, error) {
- req := ssh.Marshal(signRequestAgentMsg{
- KeyBlob: key.Marshal(),
- Data: data,
- Flags: uint32(flags),
- })
-
- msg, err := c.call(req)
- if err != nil {
- return nil, err
- }
-
- switch msg := msg.(type) {
- case *signResponseAgentMsg:
- var sig ssh.Signature
- if err := ssh.Unmarshal(msg.SigBlob, &sig); err != nil {
- return nil, err
- }
-
- return &sig, nil
- case *failureAgentMsg:
- return nil, errors.New("agent: failed to sign challenge")
- }
- panic("unreachable")
-}
-
-// unmarshal parses an agent message in packet, returning the parsed
-// form and the message type of packet.
-func unmarshal(packet []byte) (interface{}, error) {
- if len(packet) < 1 {
- return nil, errors.New("agent: empty packet")
- }
- var msg interface{}
- switch packet[0] {
- case agentFailure:
- return new(failureAgentMsg), nil
- case agentSuccess:
- return new(successAgentMsg), nil
- case agentIdentitiesAnswer:
- msg = new(identitiesAnswerAgentMsg)
- case agentSignResponse:
- msg = new(signResponseAgentMsg)
- case agentV1IdentitiesAnswer:
- msg = new(agentV1IdentityMsg)
- default:
- return nil, fmt.Errorf("agent: unknown type tag %d", packet[0])
- }
- if err := ssh.Unmarshal(packet, msg); err != nil {
- return nil, err
- }
- return msg, nil
-}
-
-type rsaKeyMsg struct {
- Type string `sshtype:"17|25"`
- N *big.Int
- E *big.Int
- D *big.Int
- Iqmp *big.Int // IQMP = Inverse Q Mod P
- P *big.Int
- Q *big.Int
- Comments string
- Constraints []byte `ssh:"rest"`
-}
-
-type dsaKeyMsg struct {
- Type string `sshtype:"17|25"`
- P *big.Int
- Q *big.Int
- G *big.Int
- Y *big.Int
- X *big.Int
- Comments string
- Constraints []byte `ssh:"rest"`
-}
-
-type ecdsaKeyMsg struct {
- Type string `sshtype:"17|25"`
- Curve string
- KeyBytes []byte
- D *big.Int
- Comments string
- Constraints []byte `ssh:"rest"`
-}
-
-type ed25519KeyMsg struct {
- Type string `sshtype:"17|25"`
- Pub []byte
- Priv []byte
- Comments string
- Constraints []byte `ssh:"rest"`
-}
-
-// Insert adds a private key to the agent.
-func (c *client) insertKey(s interface{}, comment string, constraints []byte) error {
- var req []byte
- switch k := s.(type) {
- case *rsa.PrivateKey:
- if len(k.Primes) != 2 {
- return fmt.Errorf("agent: unsupported RSA key with %d primes", len(k.Primes))
- }
- k.Precompute()
- req = ssh.Marshal(rsaKeyMsg{
- Type: ssh.KeyAlgoRSA,
- N: k.N,
- E: big.NewInt(int64(k.E)),
- D: k.D,
- Iqmp: k.Precomputed.Qinv,
- P: k.Primes[0],
- Q: k.Primes[1],
- Comments: comment,
- Constraints: constraints,
- })
- case *dsa.PrivateKey:
- req = ssh.Marshal(dsaKeyMsg{
- Type: ssh.KeyAlgoDSA,
- P: k.P,
- Q: k.Q,
- G: k.G,
- Y: k.Y,
- X: k.X,
- Comments: comment,
- Constraints: constraints,
- })
- case *ecdsa.PrivateKey:
- nistID := fmt.Sprintf("nistp%d", k.Params().BitSize)
- req = ssh.Marshal(ecdsaKeyMsg{
- Type: "ecdsa-sha2-" + nistID,
- Curve: nistID,
- KeyBytes: elliptic.Marshal(k.Curve, k.X, k.Y),
- D: k.D,
- Comments: comment,
- Constraints: constraints,
- })
- case ed25519.PrivateKey:
- req = ssh.Marshal(ed25519KeyMsg{
- Type: ssh.KeyAlgoED25519,
- Pub: []byte(k)[32:],
- Priv: []byte(k),
- Comments: comment,
- Constraints: constraints,
- })
- // This function originally supported only *ed25519.PrivateKey, however the
- // general idiom is to pass ed25519.PrivateKey by value, not by pointer.
- // We still support the pointer variant for backwards compatibility.
- case *ed25519.PrivateKey:
- req = ssh.Marshal(ed25519KeyMsg{
- Type: ssh.KeyAlgoED25519,
- Pub: []byte(*k)[32:],
- Priv: []byte(*k),
- Comments: comment,
- Constraints: constraints,
- })
- default:
- return fmt.Errorf("agent: unsupported key type %T", s)
- }
-
- // if constraints are present then the message type needs to be changed.
- if len(constraints) != 0 {
- req[0] = agentAddIDConstrained
- }
-
- resp, err := c.call(req)
- if err != nil {
- return err
- }
- if _, ok := resp.(*successAgentMsg); ok {
- return nil
- }
- return errors.New("agent: failure")
-}
-
-type rsaCertMsg struct {
- Type string `sshtype:"17|25"`
- CertBytes []byte
- D *big.Int
- Iqmp *big.Int // IQMP = Inverse Q Mod P
- P *big.Int
- Q *big.Int
- Comments string
- Constraints []byte `ssh:"rest"`
-}
-
-type dsaCertMsg struct {
- Type string `sshtype:"17|25"`
- CertBytes []byte
- X *big.Int
- Comments string
- Constraints []byte `ssh:"rest"`
-}
-
-type ecdsaCertMsg struct {
- Type string `sshtype:"17|25"`
- CertBytes []byte
- D *big.Int
- Comments string
- Constraints []byte `ssh:"rest"`
-}
-
-type ed25519CertMsg struct {
- Type string `sshtype:"17|25"`
- CertBytes []byte
- Pub []byte
- Priv []byte
- Comments string
- Constraints []byte `ssh:"rest"`
-}
-
-// Add adds a private key to the agent. If a certificate is given,
-// that certificate is added instead as public key.
-func (c *client) Add(key AddedKey) error {
- var constraints []byte
-
- if secs := key.LifetimeSecs; secs != 0 {
- constraints = append(constraints, ssh.Marshal(constrainLifetimeAgentMsg{secs})...)
- }
-
- if key.ConfirmBeforeUse {
- constraints = append(constraints, agentConstrainConfirm)
- }
-
- cert := key.Certificate
- if cert == nil {
- return c.insertKey(key.PrivateKey, key.Comment, constraints)
- }
- return c.insertCert(key.PrivateKey, cert, key.Comment, constraints)
-}
-
-func (c *client) insertCert(s interface{}, cert *ssh.Certificate, comment string, constraints []byte) error {
- var req []byte
- switch k := s.(type) {
- case *rsa.PrivateKey:
- if len(k.Primes) != 2 {
- return fmt.Errorf("agent: unsupported RSA key with %d primes", len(k.Primes))
- }
- k.Precompute()
- req = ssh.Marshal(rsaCertMsg{
- Type: cert.Type(),
- CertBytes: cert.Marshal(),
- D: k.D,
- Iqmp: k.Precomputed.Qinv,
- P: k.Primes[0],
- Q: k.Primes[1],
- Comments: comment,
- Constraints: constraints,
- })
- case *dsa.PrivateKey:
- req = ssh.Marshal(dsaCertMsg{
- Type: cert.Type(),
- CertBytes: cert.Marshal(),
- X: k.X,
- Comments: comment,
- Constraints: constraints,
- })
- case *ecdsa.PrivateKey:
- req = ssh.Marshal(ecdsaCertMsg{
- Type: cert.Type(),
- CertBytes: cert.Marshal(),
- D: k.D,
- Comments: comment,
- Constraints: constraints,
- })
- case ed25519.PrivateKey:
- req = ssh.Marshal(ed25519CertMsg{
- Type: cert.Type(),
- CertBytes: cert.Marshal(),
- Pub: []byte(k)[32:],
- Priv: []byte(k),
- Comments: comment,
- Constraints: constraints,
- })
- // This function originally supported only *ed25519.PrivateKey, however the
- // general idiom is to pass ed25519.PrivateKey by value, not by pointer.
- // We still support the pointer variant for backwards compatibility.
- case *ed25519.PrivateKey:
- req = ssh.Marshal(ed25519CertMsg{
- Type: cert.Type(),
- CertBytes: cert.Marshal(),
- Pub: []byte(*k)[32:],
- Priv: []byte(*k),
- Comments: comment,
- Constraints: constraints,
- })
- default:
- return fmt.Errorf("agent: unsupported key type %T", s)
- }
-
- // if constraints are present then the message type needs to be changed.
- if len(constraints) != 0 {
- req[0] = agentAddIDConstrained
- }
-
- signer, err := ssh.NewSignerFromKey(s)
- if err != nil {
- return err
- }
- if !bytes.Equal(cert.Key.Marshal(), signer.PublicKey().Marshal()) {
- return errors.New("agent: signer and cert have different public key")
- }
-
- resp, err := c.call(req)
- if err != nil {
- return err
- }
- if _, ok := resp.(*successAgentMsg); ok {
- return nil
- }
- return errors.New("agent: failure")
-}
-
-// Signers provides a callback for client authentication.
-func (c *client) Signers() ([]ssh.Signer, error) {
- keys, err := c.List()
- if err != nil {
- return nil, err
- }
-
- var result []ssh.Signer
- for _, k := range keys {
- result = append(result, &agentKeyringSigner{c, k})
- }
- return result, nil
-}
-
-type agentKeyringSigner struct {
- agent *client
- pub ssh.PublicKey
-}
-
-func (s *agentKeyringSigner) PublicKey() ssh.PublicKey {
- return s.pub
-}
-
-func (s *agentKeyringSigner) Sign(rand io.Reader, data []byte) (*ssh.Signature, error) {
- // The agent has its own entropy source, so the rand argument is ignored.
- return s.agent.Sign(s.pub, data)
-}
-
-func (s *agentKeyringSigner) SignWithAlgorithm(rand io.Reader, data []byte, algorithm string) (*ssh.Signature, error) {
- if algorithm == "" || algorithm == underlyingAlgo(s.pub.Type()) {
- return s.Sign(rand, data)
- }
-
- var flags SignatureFlags
- switch algorithm {
- case ssh.KeyAlgoRSASHA256:
- flags = SignatureFlagRsaSha256
- case ssh.KeyAlgoRSASHA512:
- flags = SignatureFlagRsaSha512
- default:
- return nil, fmt.Errorf("agent: unsupported algorithm %q", algorithm)
- }
-
- return s.agent.SignWithFlags(s.pub, data, flags)
-}
-
-var _ ssh.AlgorithmSigner = &agentKeyringSigner{}
-
-// certKeyAlgoNames is a mapping from known certificate algorithm names to the
-// corresponding public key signature algorithm.
-//
-// This map must be kept in sync with the one in certs.go.
-var certKeyAlgoNames = map[string]string{
- ssh.CertAlgoRSAv01: ssh.KeyAlgoRSA,
- ssh.CertAlgoRSASHA256v01: ssh.KeyAlgoRSASHA256,
- ssh.CertAlgoRSASHA512v01: ssh.KeyAlgoRSASHA512,
- ssh.CertAlgoDSAv01: ssh.KeyAlgoDSA,
- ssh.CertAlgoECDSA256v01: ssh.KeyAlgoECDSA256,
- ssh.CertAlgoECDSA384v01: ssh.KeyAlgoECDSA384,
- ssh.CertAlgoECDSA521v01: ssh.KeyAlgoECDSA521,
- ssh.CertAlgoSKECDSA256v01: ssh.KeyAlgoSKECDSA256,
- ssh.CertAlgoED25519v01: ssh.KeyAlgoED25519,
- ssh.CertAlgoSKED25519v01: ssh.KeyAlgoSKED25519,
-}
-
-// underlyingAlgo returns the signature algorithm associated with algo (which is
-// an advertised or negotiated public key or host key algorithm). These are
-// usually the same, except for certificate algorithms.
-func underlyingAlgo(algo string) string {
- if a, ok := certKeyAlgoNames[algo]; ok {
- return a
- }
- return algo
-}
-
-// Calls an extension method. It is up to the agent implementation as to whether or not
-// any particular extension is supported and may always return an error. Because the
-// type of the response is up to the implementation, this returns the bytes of the
-// response and does not attempt any type of unmarshalling.
-func (c *client) Extension(extensionType string, contents []byte) ([]byte, error) {
- req := ssh.Marshal(extensionAgentMsg{
- ExtensionType: extensionType,
- Contents: contents,
- })
- buf, err := c.callRaw(req)
- if err != nil {
- return nil, err
- }
- if len(buf) == 0 {
- return nil, errors.New("agent: failure; empty response")
- }
- // [PROTOCOL.agent] section 4.7 indicates that an SSH_AGENT_FAILURE message
- // represents an agent that does not support the extension
- if buf[0] == agentFailure {
- return nil, ErrExtensionUnsupported
- }
- if buf[0] == agentExtensionFailure {
- return nil, errors.New("agent: generic extension failure")
- }
-
- return buf, nil
-}
diff --git a/vendor/golang.org/x/crypto/ssh/agent/forward.go b/vendor/golang.org/x/crypto/ssh/agent/forward.go
deleted file mode 100644
index fd24ba900d2..00000000000
--- a/vendor/golang.org/x/crypto/ssh/agent/forward.go
+++ /dev/null
@@ -1,103 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package agent
-
-import (
- "errors"
- "io"
- "net"
- "sync"
-
- "golang.org/x/crypto/ssh"
-)
-
-// RequestAgentForwarding sets up agent forwarding for the session.
-// ForwardToAgent or ForwardToRemote should be called to route
-// the authentication requests.
-func RequestAgentForwarding(session *ssh.Session) error {
- ok, err := session.SendRequest("auth-agent-req@openssh.com", true, nil)
- if err != nil {
- return err
- }
- if !ok {
- return errors.New("forwarding request denied")
- }
- return nil
-}
-
-// ForwardToAgent routes authentication requests to the given keyring.
-func ForwardToAgent(client *ssh.Client, keyring Agent) error {
- channels := client.HandleChannelOpen(channelType)
- if channels == nil {
- return errors.New("agent: already have handler for " + channelType)
- }
-
- go func() {
- for ch := range channels {
- channel, reqs, err := ch.Accept()
- if err != nil {
- continue
- }
- go ssh.DiscardRequests(reqs)
- go func() {
- ServeAgent(keyring, channel)
- channel.Close()
- }()
- }
- }()
- return nil
-}
-
-const channelType = "auth-agent@openssh.com"
-
-// ForwardToRemote routes authentication requests to the ssh-agent
-// process serving on the given unix socket.
-func ForwardToRemote(client *ssh.Client, addr string) error {
- channels := client.HandleChannelOpen(channelType)
- if channels == nil {
- return errors.New("agent: already have handler for " + channelType)
- }
- conn, err := net.Dial("unix", addr)
- if err != nil {
- return err
- }
- conn.Close()
-
- go func() {
- for ch := range channels {
- channel, reqs, err := ch.Accept()
- if err != nil {
- continue
- }
- go ssh.DiscardRequests(reqs)
- go forwardUnixSocket(channel, addr)
- }
- }()
- return nil
-}
-
-func forwardUnixSocket(channel ssh.Channel, addr string) {
- conn, err := net.Dial("unix", addr)
- if err != nil {
- return
- }
-
- var wg sync.WaitGroup
- wg.Add(2)
- go func() {
- io.Copy(conn, channel)
- conn.(*net.UnixConn).CloseWrite()
- wg.Done()
- }()
- go func() {
- io.Copy(channel, conn)
- channel.CloseWrite()
- wg.Done()
- }()
-
- wg.Wait()
- conn.Close()
- channel.Close()
-}
diff --git a/vendor/golang.org/x/crypto/ssh/agent/keyring.go b/vendor/golang.org/x/crypto/ssh/agent/keyring.go
deleted file mode 100644
index 21bfa870fa4..00000000000
--- a/vendor/golang.org/x/crypto/ssh/agent/keyring.go
+++ /dev/null
@@ -1,241 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package agent
-
-import (
- "bytes"
- "crypto/rand"
- "crypto/subtle"
- "errors"
- "fmt"
- "sync"
- "time"
-
- "golang.org/x/crypto/ssh"
-)
-
-type privKey struct {
- signer ssh.Signer
- comment string
- expire *time.Time
-}
-
-type keyring struct {
- mu sync.Mutex
- keys []privKey
-
- locked bool
- passphrase []byte
-}
-
-var errLocked = errors.New("agent: locked")
-
-// NewKeyring returns an Agent that holds keys in memory. It is safe
-// for concurrent use by multiple goroutines.
-func NewKeyring() Agent {
- return &keyring{}
-}
-
-// RemoveAll removes all identities.
-func (r *keyring) RemoveAll() error {
- r.mu.Lock()
- defer r.mu.Unlock()
- if r.locked {
- return errLocked
- }
-
- r.keys = nil
- return nil
-}
-
-// removeLocked does the actual key removal. The caller must already be holding the
-// keyring mutex.
-func (r *keyring) removeLocked(want []byte) error {
- found := false
- for i := 0; i < len(r.keys); {
- if bytes.Equal(r.keys[i].signer.PublicKey().Marshal(), want) {
- found = true
- r.keys[i] = r.keys[len(r.keys)-1]
- r.keys = r.keys[:len(r.keys)-1]
- continue
- } else {
- i++
- }
- }
-
- if !found {
- return errors.New("agent: key not found")
- }
- return nil
-}
-
-// Remove removes all identities with the given public key.
-func (r *keyring) Remove(key ssh.PublicKey) error {
- r.mu.Lock()
- defer r.mu.Unlock()
- if r.locked {
- return errLocked
- }
-
- return r.removeLocked(key.Marshal())
-}
-
-// Lock locks the agent. Sign and Remove will fail, and List will return an empty list.
-func (r *keyring) Lock(passphrase []byte) error {
- r.mu.Lock()
- defer r.mu.Unlock()
- if r.locked {
- return errLocked
- }
-
- r.locked = true
- r.passphrase = passphrase
- return nil
-}
-
-// Unlock undoes the effect of Lock
-func (r *keyring) Unlock(passphrase []byte) error {
- r.mu.Lock()
- defer r.mu.Unlock()
- if !r.locked {
- return errors.New("agent: not locked")
- }
- if 1 != subtle.ConstantTimeCompare(passphrase, r.passphrase) {
- return fmt.Errorf("agent: incorrect passphrase")
- }
-
- r.locked = false
- r.passphrase = nil
- return nil
-}
-
-// expireKeysLocked removes expired keys from the keyring. If a key was added
-// with a lifetimesecs contraint and seconds >= lifetimesecs seconds have
-// elapsed, it is removed. The caller *must* be holding the keyring mutex.
-func (r *keyring) expireKeysLocked() {
- for _, k := range r.keys {
- if k.expire != nil && time.Now().After(*k.expire) {
- r.removeLocked(k.signer.PublicKey().Marshal())
- }
- }
-}
-
-// List returns the identities known to the agent.
-func (r *keyring) List() ([]*Key, error) {
- r.mu.Lock()
- defer r.mu.Unlock()
- if r.locked {
- // section 2.7: locked agents return empty.
- return nil, nil
- }
-
- r.expireKeysLocked()
- var ids []*Key
- for _, k := range r.keys {
- pub := k.signer.PublicKey()
- ids = append(ids, &Key{
- Format: pub.Type(),
- Blob: pub.Marshal(),
- Comment: k.comment})
- }
- return ids, nil
-}
-
-// Insert adds a private key to the keyring. If a certificate
-// is given, that certificate is added as public key. Note that
-// any constraints given are ignored.
-func (r *keyring) Add(key AddedKey) error {
- r.mu.Lock()
- defer r.mu.Unlock()
- if r.locked {
- return errLocked
- }
- signer, err := ssh.NewSignerFromKey(key.PrivateKey)
-
- if err != nil {
- return err
- }
-
- if cert := key.Certificate; cert != nil {
- signer, err = ssh.NewCertSigner(cert, signer)
- if err != nil {
- return err
- }
- }
-
- p := privKey{
- signer: signer,
- comment: key.Comment,
- }
-
- if key.LifetimeSecs > 0 {
- t := time.Now().Add(time.Duration(key.LifetimeSecs) * time.Second)
- p.expire = &t
- }
-
- r.keys = append(r.keys, p)
-
- return nil
-}
-
-// Sign returns a signature for the data.
-func (r *keyring) Sign(key ssh.PublicKey, data []byte) (*ssh.Signature, error) {
- return r.SignWithFlags(key, data, 0)
-}
-
-func (r *keyring) SignWithFlags(key ssh.PublicKey, data []byte, flags SignatureFlags) (*ssh.Signature, error) {
- r.mu.Lock()
- defer r.mu.Unlock()
- if r.locked {
- return nil, errLocked
- }
-
- r.expireKeysLocked()
- wanted := key.Marshal()
- for _, k := range r.keys {
- if bytes.Equal(k.signer.PublicKey().Marshal(), wanted) {
- if flags == 0 {
- return k.signer.Sign(rand.Reader, data)
- } else {
- if algorithmSigner, ok := k.signer.(ssh.AlgorithmSigner); !ok {
- return nil, fmt.Errorf("agent: signature does not support non-default signature algorithm: %T", k.signer)
- } else {
- var algorithm string
- switch flags {
- case SignatureFlagRsaSha256:
- algorithm = ssh.KeyAlgoRSASHA256
- case SignatureFlagRsaSha512:
- algorithm = ssh.KeyAlgoRSASHA512
- default:
- return nil, fmt.Errorf("agent: unsupported signature flags: %d", flags)
- }
- return algorithmSigner.SignWithAlgorithm(rand.Reader, data, algorithm)
- }
- }
- }
- }
- return nil, errors.New("not found")
-}
-
-// Signers returns signers for all the known keys.
-func (r *keyring) Signers() ([]ssh.Signer, error) {
- r.mu.Lock()
- defer r.mu.Unlock()
- if r.locked {
- return nil, errLocked
- }
-
- r.expireKeysLocked()
- s := make([]ssh.Signer, 0, len(r.keys))
- for _, k := range r.keys {
- s = append(s, k.signer)
- }
- return s, nil
-}
-
-// The keyring does not support any extensions
-func (r *keyring) Extension(extensionType string, contents []byte) ([]byte, error) {
- return nil, ErrExtensionUnsupported
-}
diff --git a/vendor/golang.org/x/crypto/ssh/agent/server.go b/vendor/golang.org/x/crypto/ssh/agent/server.go
deleted file mode 100644
index e35ca7ce318..00000000000
--- a/vendor/golang.org/x/crypto/ssh/agent/server.go
+++ /dev/null
@@ -1,570 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package agent
-
-import (
- "crypto/dsa"
- "crypto/ecdsa"
- "crypto/ed25519"
- "crypto/elliptic"
- "crypto/rsa"
- "encoding/binary"
- "errors"
- "fmt"
- "io"
- "log"
- "math/big"
-
- "golang.org/x/crypto/ssh"
-)
-
-// server wraps an Agent and uses it to implement the agent side of
-// the SSH-agent, wire protocol.
-type server struct {
- agent Agent
-}
-
-func (s *server) processRequestBytes(reqData []byte) []byte {
- rep, err := s.processRequest(reqData)
- if err != nil {
- if err != errLocked {
- // TODO(hanwen): provide better logging interface?
- log.Printf("agent %d: %v", reqData[0], err)
- }
- return []byte{agentFailure}
- }
-
- if err == nil && rep == nil {
- return []byte{agentSuccess}
- }
-
- return ssh.Marshal(rep)
-}
-
-func marshalKey(k *Key) []byte {
- var record struct {
- Blob []byte
- Comment string
- }
- record.Blob = k.Marshal()
- record.Comment = k.Comment
-
- return ssh.Marshal(&record)
-}
-
-// See [PROTOCOL.agent], section 2.5.1.
-const agentV1IdentitiesAnswer = 2
-
-type agentV1IdentityMsg struct {
- Numkeys uint32 `sshtype:"2"`
-}
-
-type agentRemoveIdentityMsg struct {
- KeyBlob []byte `sshtype:"18"`
-}
-
-type agentLockMsg struct {
- Passphrase []byte `sshtype:"22"`
-}
-
-type agentUnlockMsg struct {
- Passphrase []byte `sshtype:"23"`
-}
-
-func (s *server) processRequest(data []byte) (interface{}, error) {
- switch data[0] {
- case agentRequestV1Identities:
- return &agentV1IdentityMsg{0}, nil
-
- case agentRemoveAllV1Identities:
- return nil, nil
-
- case agentRemoveIdentity:
- var req agentRemoveIdentityMsg
- if err := ssh.Unmarshal(data, &req); err != nil {
- return nil, err
- }
-
- var wk wireKey
- if err := ssh.Unmarshal(req.KeyBlob, &wk); err != nil {
- return nil, err
- }
-
- return nil, s.agent.Remove(&Key{Format: wk.Format, Blob: req.KeyBlob})
-
- case agentRemoveAllIdentities:
- return nil, s.agent.RemoveAll()
-
- case agentLock:
- var req agentLockMsg
- if err := ssh.Unmarshal(data, &req); err != nil {
- return nil, err
- }
-
- return nil, s.agent.Lock(req.Passphrase)
-
- case agentUnlock:
- var req agentUnlockMsg
- if err := ssh.Unmarshal(data, &req); err != nil {
- return nil, err
- }
- return nil, s.agent.Unlock(req.Passphrase)
-
- case agentSignRequest:
- var req signRequestAgentMsg
- if err := ssh.Unmarshal(data, &req); err != nil {
- return nil, err
- }
-
- var wk wireKey
- if err := ssh.Unmarshal(req.KeyBlob, &wk); err != nil {
- return nil, err
- }
-
- k := &Key{
- Format: wk.Format,
- Blob: req.KeyBlob,
- }
-
- var sig *ssh.Signature
- var err error
- if extendedAgent, ok := s.agent.(ExtendedAgent); ok {
- sig, err = extendedAgent.SignWithFlags(k, req.Data, SignatureFlags(req.Flags))
- } else {
- sig, err = s.agent.Sign(k, req.Data)
- }
-
- if err != nil {
- return nil, err
- }
- return &signResponseAgentMsg{SigBlob: ssh.Marshal(sig)}, nil
-
- case agentRequestIdentities:
- keys, err := s.agent.List()
- if err != nil {
- return nil, err
- }
-
- rep := identitiesAnswerAgentMsg{
- NumKeys: uint32(len(keys)),
- }
- for _, k := range keys {
- rep.Keys = append(rep.Keys, marshalKey(k)...)
- }
- return rep, nil
-
- case agentAddIDConstrained, agentAddIdentity:
- return nil, s.insertIdentity(data)
-
- case agentExtension:
- // Return a stub object where the whole contents of the response gets marshaled.
- var responseStub struct {
- Rest []byte `ssh:"rest"`
- }
-
- if extendedAgent, ok := s.agent.(ExtendedAgent); !ok {
- // If this agent doesn't implement extensions, [PROTOCOL.agent] section 4.7
- // requires that we return a standard SSH_AGENT_FAILURE message.
- responseStub.Rest = []byte{agentFailure}
- } else {
- var req extensionAgentMsg
- if err := ssh.Unmarshal(data, &req); err != nil {
- return nil, err
- }
- res, err := extendedAgent.Extension(req.ExtensionType, req.Contents)
- if err != nil {
- // If agent extensions are unsupported, return a standard SSH_AGENT_FAILURE
- // message as required by [PROTOCOL.agent] section 4.7.
- if err == ErrExtensionUnsupported {
- responseStub.Rest = []byte{agentFailure}
- } else {
- // As the result of any other error processing an extension request,
- // [PROTOCOL.agent] section 4.7 requires that we return a
- // SSH_AGENT_EXTENSION_FAILURE code.
- responseStub.Rest = []byte{agentExtensionFailure}
- }
- } else {
- if len(res) == 0 {
- return nil, nil
- }
- responseStub.Rest = res
- }
- }
-
- return responseStub, nil
- }
-
- return nil, fmt.Errorf("unknown opcode %d", data[0])
-}
-
-func parseConstraints(constraints []byte) (lifetimeSecs uint32, confirmBeforeUse bool, extensions []ConstraintExtension, err error) {
- for len(constraints) != 0 {
- switch constraints[0] {
- case agentConstrainLifetime:
- lifetimeSecs = binary.BigEndian.Uint32(constraints[1:5])
- constraints = constraints[5:]
- case agentConstrainConfirm:
- confirmBeforeUse = true
- constraints = constraints[1:]
- case agentConstrainExtension, agentConstrainExtensionV00:
- var msg constrainExtensionAgentMsg
- if err = ssh.Unmarshal(constraints, &msg); err != nil {
- return 0, false, nil, err
- }
- extensions = append(extensions, ConstraintExtension{
- ExtensionName: msg.ExtensionName,
- ExtensionDetails: msg.ExtensionDetails,
- })
- constraints = msg.Rest
- default:
- return 0, false, nil, fmt.Errorf("unknown constraint type: %d", constraints[0])
- }
- }
- return
-}
-
-func setConstraints(key *AddedKey, constraintBytes []byte) error {
- lifetimeSecs, confirmBeforeUse, constraintExtensions, err := parseConstraints(constraintBytes)
- if err != nil {
- return err
- }
-
- key.LifetimeSecs = lifetimeSecs
- key.ConfirmBeforeUse = confirmBeforeUse
- key.ConstraintExtensions = constraintExtensions
- return nil
-}
-
-func parseRSAKey(req []byte) (*AddedKey, error) {
- var k rsaKeyMsg
- if err := ssh.Unmarshal(req, &k); err != nil {
- return nil, err
- }
- if k.E.BitLen() > 30 {
- return nil, errors.New("agent: RSA public exponent too large")
- }
- priv := &rsa.PrivateKey{
- PublicKey: rsa.PublicKey{
- E: int(k.E.Int64()),
- N: k.N,
- },
- D: k.D,
- Primes: []*big.Int{k.P, k.Q},
- }
- priv.Precompute()
-
- addedKey := &AddedKey{PrivateKey: priv, Comment: k.Comments}
- if err := setConstraints(addedKey, k.Constraints); err != nil {
- return nil, err
- }
- return addedKey, nil
-}
-
-func parseEd25519Key(req []byte) (*AddedKey, error) {
- var k ed25519KeyMsg
- if err := ssh.Unmarshal(req, &k); err != nil {
- return nil, err
- }
- priv := ed25519.PrivateKey(k.Priv)
-
- addedKey := &AddedKey{PrivateKey: &priv, Comment: k.Comments}
- if err := setConstraints(addedKey, k.Constraints); err != nil {
- return nil, err
- }
- return addedKey, nil
-}
-
-func parseDSAKey(req []byte) (*AddedKey, error) {
- var k dsaKeyMsg
- if err := ssh.Unmarshal(req, &k); err != nil {
- return nil, err
- }
- priv := &dsa.PrivateKey{
- PublicKey: dsa.PublicKey{
- Parameters: dsa.Parameters{
- P: k.P,
- Q: k.Q,
- G: k.G,
- },
- Y: k.Y,
- },
- X: k.X,
- }
-
- addedKey := &AddedKey{PrivateKey: priv, Comment: k.Comments}
- if err := setConstraints(addedKey, k.Constraints); err != nil {
- return nil, err
- }
- return addedKey, nil
-}
-
-func unmarshalECDSA(curveName string, keyBytes []byte, privScalar *big.Int) (priv *ecdsa.PrivateKey, err error) {
- priv = &ecdsa.PrivateKey{
- D: privScalar,
- }
-
- switch curveName {
- case "nistp256":
- priv.Curve = elliptic.P256()
- case "nistp384":
- priv.Curve = elliptic.P384()
- case "nistp521":
- priv.Curve = elliptic.P521()
- default:
- return nil, fmt.Errorf("agent: unknown curve %q", curveName)
- }
-
- priv.X, priv.Y = elliptic.Unmarshal(priv.Curve, keyBytes)
- if priv.X == nil || priv.Y == nil {
- return nil, errors.New("agent: point not on curve")
- }
-
- return priv, nil
-}
-
-func parseEd25519Cert(req []byte) (*AddedKey, error) {
- var k ed25519CertMsg
- if err := ssh.Unmarshal(req, &k); err != nil {
- return nil, err
- }
- pubKey, err := ssh.ParsePublicKey(k.CertBytes)
- if err != nil {
- return nil, err
- }
- priv := ed25519.PrivateKey(k.Priv)
- cert, ok := pubKey.(*ssh.Certificate)
- if !ok {
- return nil, errors.New("agent: bad ED25519 certificate")
- }
-
- addedKey := &AddedKey{PrivateKey: &priv, Certificate: cert, Comment: k.Comments}
- if err := setConstraints(addedKey, k.Constraints); err != nil {
- return nil, err
- }
- return addedKey, nil
-}
-
-func parseECDSAKey(req []byte) (*AddedKey, error) {
- var k ecdsaKeyMsg
- if err := ssh.Unmarshal(req, &k); err != nil {
- return nil, err
- }
-
- priv, err := unmarshalECDSA(k.Curve, k.KeyBytes, k.D)
- if err != nil {
- return nil, err
- }
-
- addedKey := &AddedKey{PrivateKey: priv, Comment: k.Comments}
- if err := setConstraints(addedKey, k.Constraints); err != nil {
- return nil, err
- }
- return addedKey, nil
-}
-
-func parseRSACert(req []byte) (*AddedKey, error) {
- var k rsaCertMsg
- if err := ssh.Unmarshal(req, &k); err != nil {
- return nil, err
- }
-
- pubKey, err := ssh.ParsePublicKey(k.CertBytes)
- if err != nil {
- return nil, err
- }
-
- cert, ok := pubKey.(*ssh.Certificate)
- if !ok {
- return nil, errors.New("agent: bad RSA certificate")
- }
-
- // An RSA publickey as marshaled by rsaPublicKey.Marshal() in keys.go
- var rsaPub struct {
- Name string
- E *big.Int
- N *big.Int
- }
- if err := ssh.Unmarshal(cert.Key.Marshal(), &rsaPub); err != nil {
- return nil, fmt.Errorf("agent: Unmarshal failed to parse public key: %v", err)
- }
-
- if rsaPub.E.BitLen() > 30 {
- return nil, errors.New("agent: RSA public exponent too large")
- }
-
- priv := rsa.PrivateKey{
- PublicKey: rsa.PublicKey{
- E: int(rsaPub.E.Int64()),
- N: rsaPub.N,
- },
- D: k.D,
- Primes: []*big.Int{k.Q, k.P},
- }
- priv.Precompute()
-
- addedKey := &AddedKey{PrivateKey: &priv, Certificate: cert, Comment: k.Comments}
- if err := setConstraints(addedKey, k.Constraints); err != nil {
- return nil, err
- }
- return addedKey, nil
-}
-
-func parseDSACert(req []byte) (*AddedKey, error) {
- var k dsaCertMsg
- if err := ssh.Unmarshal(req, &k); err != nil {
- return nil, err
- }
- pubKey, err := ssh.ParsePublicKey(k.CertBytes)
- if err != nil {
- return nil, err
- }
- cert, ok := pubKey.(*ssh.Certificate)
- if !ok {
- return nil, errors.New("agent: bad DSA certificate")
- }
-
- // A DSA publickey as marshaled by dsaPublicKey.Marshal() in keys.go
- var w struct {
- Name string
- P, Q, G, Y *big.Int
- }
- if err := ssh.Unmarshal(cert.Key.Marshal(), &w); err != nil {
- return nil, fmt.Errorf("agent: Unmarshal failed to parse public key: %v", err)
- }
-
- priv := &dsa.PrivateKey{
- PublicKey: dsa.PublicKey{
- Parameters: dsa.Parameters{
- P: w.P,
- Q: w.Q,
- G: w.G,
- },
- Y: w.Y,
- },
- X: k.X,
- }
-
- addedKey := &AddedKey{PrivateKey: priv, Certificate: cert, Comment: k.Comments}
- if err := setConstraints(addedKey, k.Constraints); err != nil {
- return nil, err
- }
- return addedKey, nil
-}
-
-func parseECDSACert(req []byte) (*AddedKey, error) {
- var k ecdsaCertMsg
- if err := ssh.Unmarshal(req, &k); err != nil {
- return nil, err
- }
-
- pubKey, err := ssh.ParsePublicKey(k.CertBytes)
- if err != nil {
- return nil, err
- }
- cert, ok := pubKey.(*ssh.Certificate)
- if !ok {
- return nil, errors.New("agent: bad ECDSA certificate")
- }
-
- // An ECDSA publickey as marshaled by ecdsaPublicKey.Marshal() in keys.go
- var ecdsaPub struct {
- Name string
- ID string
- Key []byte
- }
- if err := ssh.Unmarshal(cert.Key.Marshal(), &ecdsaPub); err != nil {
- return nil, err
- }
-
- priv, err := unmarshalECDSA(ecdsaPub.ID, ecdsaPub.Key, k.D)
- if err != nil {
- return nil, err
- }
-
- addedKey := &AddedKey{PrivateKey: priv, Certificate: cert, Comment: k.Comments}
- if err := setConstraints(addedKey, k.Constraints); err != nil {
- return nil, err
- }
- return addedKey, nil
-}
-
-func (s *server) insertIdentity(req []byte) error {
- var record struct {
- Type string `sshtype:"17|25"`
- Rest []byte `ssh:"rest"`
- }
-
- if err := ssh.Unmarshal(req, &record); err != nil {
- return err
- }
-
- var addedKey *AddedKey
- var err error
-
- switch record.Type {
- case ssh.KeyAlgoRSA:
- addedKey, err = parseRSAKey(req)
- case ssh.KeyAlgoDSA:
- addedKey, err = parseDSAKey(req)
- case ssh.KeyAlgoECDSA256, ssh.KeyAlgoECDSA384, ssh.KeyAlgoECDSA521:
- addedKey, err = parseECDSAKey(req)
- case ssh.KeyAlgoED25519:
- addedKey, err = parseEd25519Key(req)
- case ssh.CertAlgoRSAv01:
- addedKey, err = parseRSACert(req)
- case ssh.CertAlgoDSAv01:
- addedKey, err = parseDSACert(req)
- case ssh.CertAlgoECDSA256v01, ssh.CertAlgoECDSA384v01, ssh.CertAlgoECDSA521v01:
- addedKey, err = parseECDSACert(req)
- case ssh.CertAlgoED25519v01:
- addedKey, err = parseEd25519Cert(req)
- default:
- return fmt.Errorf("agent: not implemented: %q", record.Type)
- }
-
- if err != nil {
- return err
- }
- return s.agent.Add(*addedKey)
-}
-
-// ServeAgent serves the agent protocol on the given connection. It
-// returns when an I/O error occurs.
-func ServeAgent(agent Agent, c io.ReadWriter) error {
- s := &server{agent}
-
- var length [4]byte
- for {
- if _, err := io.ReadFull(c, length[:]); err != nil {
- return err
- }
- l := binary.BigEndian.Uint32(length[:])
- if l == 0 {
- return fmt.Errorf("agent: request size is 0")
- }
- if l > maxAgentResponseBytes {
- // We also cap requests.
- return fmt.Errorf("agent: request too large: %d", l)
- }
-
- req := make([]byte, l)
- if _, err := io.ReadFull(c, req); err != nil {
- return err
- }
-
- repData := s.processRequestBytes(req)
- if len(repData) > maxAgentResponseBytes {
- return fmt.Errorf("agent: reply too large: %d bytes", len(repData))
- }
-
- binary.BigEndian.PutUint32(length[:], uint32(len(repData)))
- if _, err := c.Write(length[:]); err != nil {
- return err
- }
- if _, err := c.Write(repData); err != nil {
- return err
- }
- }
-}
diff --git a/vendor/golang.org/x/crypto/ssh/knownhosts/knownhosts.go b/vendor/golang.org/x/crypto/ssh/knownhosts/knownhosts.go
deleted file mode 100644
index 7376a8dff23..00000000000
--- a/vendor/golang.org/x/crypto/ssh/knownhosts/knownhosts.go
+++ /dev/null
@@ -1,540 +0,0 @@
-// Copyright 2017 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package knownhosts implements a parser for the OpenSSH known_hosts
-// host key database, and provides utility functions for writing
-// OpenSSH compliant known_hosts files.
-package knownhosts
-
-import (
- "bufio"
- "bytes"
- "crypto/hmac"
- "crypto/rand"
- "crypto/sha1"
- "encoding/base64"
- "errors"
- "fmt"
- "io"
- "net"
- "os"
- "strings"
-
- "golang.org/x/crypto/ssh"
-)
-
-// See the sshd manpage
-// (http://man.openbsd.org/sshd#SSH_KNOWN_HOSTS_FILE_FORMAT) for
-// background.
-
-type addr struct{ host, port string }
-
-func (a *addr) String() string {
- h := a.host
- if strings.Contains(h, ":") {
- h = "[" + h + "]"
- }
- return h + ":" + a.port
-}
-
-type matcher interface {
- match(addr) bool
-}
-
-type hostPattern struct {
- negate bool
- addr addr
-}
-
-func (p *hostPattern) String() string {
- n := ""
- if p.negate {
- n = "!"
- }
-
- return n + p.addr.String()
-}
-
-type hostPatterns []hostPattern
-
-func (ps hostPatterns) match(a addr) bool {
- matched := false
- for _, p := range ps {
- if !p.match(a) {
- continue
- }
- if p.negate {
- return false
- }
- matched = true
- }
- return matched
-}
-
-// See
-// https://android.googlesource.com/platform/external/openssh/+/ab28f5495c85297e7a597c1ba62e996416da7c7e/addrmatch.c
-// The matching of * has no regard for separators, unlike filesystem globs
-func wildcardMatch(pat []byte, str []byte) bool {
- for {
- if len(pat) == 0 {
- return len(str) == 0
- }
- if len(str) == 0 {
- return false
- }
-
- if pat[0] == '*' {
- if len(pat) == 1 {
- return true
- }
-
- for j := range str {
- if wildcardMatch(pat[1:], str[j:]) {
- return true
- }
- }
- return false
- }
-
- if pat[0] == '?' || pat[0] == str[0] {
- pat = pat[1:]
- str = str[1:]
- } else {
- return false
- }
- }
-}
-
-func (p *hostPattern) match(a addr) bool {
- return wildcardMatch([]byte(p.addr.host), []byte(a.host)) && p.addr.port == a.port
-}
-
-type keyDBLine struct {
- cert bool
- matcher matcher
- knownKey KnownKey
-}
-
-func serialize(k ssh.PublicKey) string {
- return k.Type() + " " + base64.StdEncoding.EncodeToString(k.Marshal())
-}
-
-func (l *keyDBLine) match(a addr) bool {
- return l.matcher.match(a)
-}
-
-type hostKeyDB struct {
- // Serialized version of revoked keys
- revoked map[string]*KnownKey
- lines []keyDBLine
-}
-
-func newHostKeyDB() *hostKeyDB {
- db := &hostKeyDB{
- revoked: make(map[string]*KnownKey),
- }
-
- return db
-}
-
-func keyEq(a, b ssh.PublicKey) bool {
- return bytes.Equal(a.Marshal(), b.Marshal())
-}
-
-// IsHostAuthority can be used as a callback in ssh.CertChecker
-func (db *hostKeyDB) IsHostAuthority(remote ssh.PublicKey, address string) bool {
- h, p, err := net.SplitHostPort(address)
- if err != nil {
- return false
- }
- a := addr{host: h, port: p}
-
- for _, l := range db.lines {
- if l.cert && keyEq(l.knownKey.Key, remote) && l.match(a) {
- return true
- }
- }
- return false
-}
-
-// IsRevoked can be used as a callback in ssh.CertChecker
-func (db *hostKeyDB) IsRevoked(key *ssh.Certificate) bool {
- _, ok := db.revoked[string(key.Marshal())]
- return ok
-}
-
-const markerCert = "@cert-authority"
-const markerRevoked = "@revoked"
-
-func nextWord(line []byte) (string, []byte) {
- i := bytes.IndexAny(line, "\t ")
- if i == -1 {
- return string(line), nil
- }
-
- return string(line[:i]), bytes.TrimSpace(line[i:])
-}
-
-func parseLine(line []byte) (marker, host string, key ssh.PublicKey, err error) {
- if w, next := nextWord(line); w == markerCert || w == markerRevoked {
- marker = w
- line = next
- }
-
- host, line = nextWord(line)
- if len(line) == 0 {
- return "", "", nil, errors.New("knownhosts: missing host pattern")
- }
-
- // ignore the keytype as it's in the key blob anyway.
- _, line = nextWord(line)
- if len(line) == 0 {
- return "", "", nil, errors.New("knownhosts: missing key type pattern")
- }
-
- keyBlob, _ := nextWord(line)
-
- keyBytes, err := base64.StdEncoding.DecodeString(keyBlob)
- if err != nil {
- return "", "", nil, err
- }
- key, err = ssh.ParsePublicKey(keyBytes)
- if err != nil {
- return "", "", nil, err
- }
-
- return marker, host, key, nil
-}
-
-func (db *hostKeyDB) parseLine(line []byte, filename string, linenum int) error {
- marker, pattern, key, err := parseLine(line)
- if err != nil {
- return err
- }
-
- if marker == markerRevoked {
- db.revoked[string(key.Marshal())] = &KnownKey{
- Key: key,
- Filename: filename,
- Line: linenum,
- }
-
- return nil
- }
-
- entry := keyDBLine{
- cert: marker == markerCert,
- knownKey: KnownKey{
- Filename: filename,
- Line: linenum,
- Key: key,
- },
- }
-
- if pattern[0] == '|' {
- entry.matcher, err = newHashedHost(pattern)
- } else {
- entry.matcher, err = newHostnameMatcher(pattern)
- }
-
- if err != nil {
- return err
- }
-
- db.lines = append(db.lines, entry)
- return nil
-}
-
-func newHostnameMatcher(pattern string) (matcher, error) {
- var hps hostPatterns
- for _, p := range strings.Split(pattern, ",") {
- if len(p) == 0 {
- continue
- }
-
- var a addr
- var negate bool
- if p[0] == '!' {
- negate = true
- p = p[1:]
- }
-
- if len(p) == 0 {
- return nil, errors.New("knownhosts: negation without following hostname")
- }
-
- var err error
- if p[0] == '[' {
- a.host, a.port, err = net.SplitHostPort(p)
- if err != nil {
- return nil, err
- }
- } else {
- a.host, a.port, err = net.SplitHostPort(p)
- if err != nil {
- a.host = p
- a.port = "22"
- }
- }
- hps = append(hps, hostPattern{
- negate: negate,
- addr: a,
- })
- }
- return hps, nil
-}
-
-// KnownKey represents a key declared in a known_hosts file.
-type KnownKey struct {
- Key ssh.PublicKey
- Filename string
- Line int
-}
-
-func (k *KnownKey) String() string {
- return fmt.Sprintf("%s:%d: %s", k.Filename, k.Line, serialize(k.Key))
-}
-
-// KeyError is returned if we did not find the key in the host key
-// database, or there was a mismatch. Typically, in batch
-// applications, this should be interpreted as failure. Interactive
-// applications can offer an interactive prompt to the user.
-type KeyError struct {
- // Want holds the accepted host keys. For each key algorithm,
- // there can be one hostkey. If Want is empty, the host is
- // unknown. If Want is non-empty, there was a mismatch, which
- // can signify a MITM attack.
- Want []KnownKey
-}
-
-func (u *KeyError) Error() string {
- if len(u.Want) == 0 {
- return "knownhosts: key is unknown"
- }
- return "knownhosts: key mismatch"
-}
-
-// RevokedError is returned if we found a key that was revoked.
-type RevokedError struct {
- Revoked KnownKey
-}
-
-func (r *RevokedError) Error() string {
- return "knownhosts: key is revoked"
-}
-
-// check checks a key against the host database. This should not be
-// used for verifying certificates.
-func (db *hostKeyDB) check(address string, remote net.Addr, remoteKey ssh.PublicKey) error {
- if revoked := db.revoked[string(remoteKey.Marshal())]; revoked != nil {
- return &RevokedError{Revoked: *revoked}
- }
-
- host, port, err := net.SplitHostPort(remote.String())
- if err != nil {
- return fmt.Errorf("knownhosts: SplitHostPort(%s): %v", remote, err)
- }
-
- hostToCheck := addr{host, port}
- if address != "" {
- // Give preference to the hostname if available.
- host, port, err := net.SplitHostPort(address)
- if err != nil {
- return fmt.Errorf("knownhosts: SplitHostPort(%s): %v", address, err)
- }
-
- hostToCheck = addr{host, port}
- }
-
- return db.checkAddr(hostToCheck, remoteKey)
-}
-
-// checkAddr checks if we can find the given public key for the
-// given address. If we only find an entry for the IP address,
-// or only the hostname, then this still succeeds.
-func (db *hostKeyDB) checkAddr(a addr, remoteKey ssh.PublicKey) error {
- // TODO(hanwen): are these the right semantics? What if there
- // is just a key for the IP address, but not for the
- // hostname?
-
- // Algorithm => key.
- knownKeys := map[string]KnownKey{}
- for _, l := range db.lines {
- if l.match(a) {
- typ := l.knownKey.Key.Type()
- if _, ok := knownKeys[typ]; !ok {
- knownKeys[typ] = l.knownKey
- }
- }
- }
-
- keyErr := &KeyError{}
- for _, v := range knownKeys {
- keyErr.Want = append(keyErr.Want, v)
- }
-
- // Unknown remote host.
- if len(knownKeys) == 0 {
- return keyErr
- }
-
- // If the remote host starts using a different, unknown key type, we
- // also interpret that as a mismatch.
- if known, ok := knownKeys[remoteKey.Type()]; !ok || !keyEq(known.Key, remoteKey) {
- return keyErr
- }
-
- return nil
-}
-
-// The Read function parses file contents.
-func (db *hostKeyDB) Read(r io.Reader, filename string) error {
- scanner := bufio.NewScanner(r)
-
- lineNum := 0
- for scanner.Scan() {
- lineNum++
- line := scanner.Bytes()
- line = bytes.TrimSpace(line)
- if len(line) == 0 || line[0] == '#' {
- continue
- }
-
- if err := db.parseLine(line, filename, lineNum); err != nil {
- return fmt.Errorf("knownhosts: %s:%d: %v", filename, lineNum, err)
- }
- }
- return scanner.Err()
-}
-
-// New creates a host key callback from the given OpenSSH host key
-// files. The returned callback is for use in
-// ssh.ClientConfig.HostKeyCallback. By preference, the key check
-// operates on the hostname if available, i.e. if a server changes its
-// IP address, the host key check will still succeed, even though a
-// record of the new IP address is not available.
-func New(files ...string) (ssh.HostKeyCallback, error) {
- db := newHostKeyDB()
- for _, fn := range files {
- f, err := os.Open(fn)
- if err != nil {
- return nil, err
- }
- defer f.Close()
- if err := db.Read(f, fn); err != nil {
- return nil, err
- }
- }
-
- var certChecker ssh.CertChecker
- certChecker.IsHostAuthority = db.IsHostAuthority
- certChecker.IsRevoked = db.IsRevoked
- certChecker.HostKeyFallback = db.check
-
- return certChecker.CheckHostKey, nil
-}
-
-// Normalize normalizes an address into the form used in known_hosts
-func Normalize(address string) string {
- host, port, err := net.SplitHostPort(address)
- if err != nil {
- host = address
- port = "22"
- }
- entry := host
- if port != "22" {
- entry = "[" + entry + "]:" + port
- } else if strings.Contains(host, ":") && !strings.HasPrefix(host, "[") {
- entry = "[" + entry + "]"
- }
- return entry
-}
-
-// Line returns a line to add append to the known_hosts files.
-func Line(addresses []string, key ssh.PublicKey) string {
- var trimmed []string
- for _, a := range addresses {
- trimmed = append(trimmed, Normalize(a))
- }
-
- return strings.Join(trimmed, ",") + " " + serialize(key)
-}
-
-// HashHostname hashes the given hostname. The hostname is not
-// normalized before hashing.
-func HashHostname(hostname string) string {
- // TODO(hanwen): check if we can safely normalize this always.
- salt := make([]byte, sha1.Size)
-
- _, err := rand.Read(salt)
- if err != nil {
- panic(fmt.Sprintf("crypto/rand failure %v", err))
- }
-
- hash := hashHost(hostname, salt)
- return encodeHash(sha1HashType, salt, hash)
-}
-
-func decodeHash(encoded string) (hashType string, salt, hash []byte, err error) {
- if len(encoded) == 0 || encoded[0] != '|' {
- err = errors.New("knownhosts: hashed host must start with '|'")
- return
- }
- components := strings.Split(encoded, "|")
- if len(components) != 4 {
- err = fmt.Errorf("knownhosts: got %d components, want 3", len(components))
- return
- }
-
- hashType = components[1]
- if salt, err = base64.StdEncoding.DecodeString(components[2]); err != nil {
- return
- }
- if hash, err = base64.StdEncoding.DecodeString(components[3]); err != nil {
- return
- }
- return
-}
-
-func encodeHash(typ string, salt []byte, hash []byte) string {
- return strings.Join([]string{"",
- typ,
- base64.StdEncoding.EncodeToString(salt),
- base64.StdEncoding.EncodeToString(hash),
- }, "|")
-}
-
-// See https://android.googlesource.com/platform/external/openssh/+/ab28f5495c85297e7a597c1ba62e996416da7c7e/hostfile.c#120
-func hashHost(hostname string, salt []byte) []byte {
- mac := hmac.New(sha1.New, salt)
- mac.Write([]byte(hostname))
- return mac.Sum(nil)
-}
-
-type hashedHost struct {
- salt []byte
- hash []byte
-}
-
-const sha1HashType = "1"
-
-func newHashedHost(encoded string) (*hashedHost, error) {
- typ, salt, hash, err := decodeHash(encoded)
- if err != nil {
- return nil, err
- }
-
- // The type field seems for future algorithm agility, but it's
- // actually hardcoded in openssh currently, see
- // https://android.googlesource.com/platform/external/openssh/+/ab28f5495c85297e7a597c1ba62e996416da7c7e/hostfile.c#120
- if typ != sha1HashType {
- return nil, fmt.Errorf("knownhosts: got hash type %s, must be '1'", typ)
- }
-
- return &hashedHost{salt: salt, hash: hash}, nil
-}
-
-func (h *hashedHost) match(a addr) bool {
- return bytes.Equal(hashHost(Normalize(a.String()), h.salt), h.hash)
-}
diff --git a/vendor/golang.org/x/sys/cpu/cpu.go b/vendor/golang.org/x/sys/cpu/cpu.go
index ec07aab0578..02609d5b21d 100644
--- a/vendor/golang.org/x/sys/cpu/cpu.go
+++ b/vendor/golang.org/x/sys/cpu/cpu.go
@@ -201,6 +201,25 @@ var S390X struct {
_ CacheLinePad
}
+// RISCV64 contains the supported CPU features and performance characteristics for riscv64
+// platforms. The booleans in RISCV64, with the exception of HasFastMisaligned, indicate
+// the presence of RISC-V extensions.
+//
+// It is safe to assume that all the RV64G extensions are supported and so they are omitted from
+// this structure. As riscv64 Go programs require at least RV64G, the code that populates
+// this structure cannot run successfully if some of the RV64G extensions are missing.
+// The struct is padded to avoid false sharing.
+var RISCV64 struct {
+ _ CacheLinePad
+ HasFastMisaligned bool // Fast misaligned accesses
+ HasC bool // Compressed instruction-set extension
+ HasV bool // Vector extension compatible with RVV 1.0
+ HasZba bool // Address generation instructions extension
+ HasZbb bool // Basic bit-manipulation extension
+ HasZbs bool // Single-bit instructions extension
+ _ CacheLinePad
+}
+
func init() {
archInit()
initOptions()
diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_noinit.go b/vendor/golang.org/x/sys/cpu/cpu_linux_noinit.go
index cd63e733557..7d902b6847b 100644
--- a/vendor/golang.org/x/sys/cpu/cpu_linux_noinit.go
+++ b/vendor/golang.org/x/sys/cpu/cpu_linux_noinit.go
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-//go:build linux && !arm && !arm64 && !mips64 && !mips64le && !ppc64 && !ppc64le && !s390x
+//go:build linux && !arm && !arm64 && !mips64 && !mips64le && !ppc64 && !ppc64le && !s390x && !riscv64
package cpu
diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_riscv64.go b/vendor/golang.org/x/sys/cpu/cpu_linux_riscv64.go
new file mode 100644
index 00000000000..cb4a0c57280
--- /dev/null
+++ b/vendor/golang.org/x/sys/cpu/cpu_linux_riscv64.go
@@ -0,0 +1,137 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cpu
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+// RISC-V extension discovery code for Linux. The approach here is to first try the riscv_hwprobe
+// syscall falling back to HWCAP to check for the C extension if riscv_hwprobe is not available.
+//
+// A note on detection of the Vector extension using HWCAP.
+//
+// Support for the Vector extension version 1.0 was added to the Linux kernel in release 6.5.
+// Support for the riscv_hwprobe syscall was added in 6.4. It follows that if the riscv_hwprobe
+// syscall is not available then neither is the Vector extension (which needs kernel support).
+// The riscv_hwprobe syscall should then be all we need to detect the Vector extension.
+// However, some RISC-V board manufacturers ship boards with an older kernel on top of which
+// they have back-ported various versions of the Vector extension patches but not the riscv_hwprobe
+// patches. These kernels advertise support for the Vector extension using HWCAP. Falling
+// back to HWCAP to detect the Vector extension, if riscv_hwprobe is not available, or simply not
+// bothering with riscv_hwprobe at all and just using HWCAP may then seem like an attractive option.
+//
+// Unfortunately, simply checking the 'V' bit in AT_HWCAP will not work as this bit is used by
+// RISC-V board and cloud instance providers to mean different things. The Lichee Pi 4A board
+// and the Scaleway RV1 cloud instances use the 'V' bit to advertise their support for the unratified
+// 0.7.1 version of the Vector Specification. The Banana Pi BPI-F3 and the CanMV-K230 board use
+// it to advertise support for 1.0 of the Vector extension. Versions 0.7.1 and 1.0 of the Vector
+// extension are binary incompatible. HWCAP can then not be used in isolation to populate the
+// HasV field as this field indicates that the underlying CPU is compatible with RVV 1.0.
+//
+// There is a way at runtime to distinguish between versions 0.7.1 and 1.0 of the Vector
+// specification by issuing a RVV 1.0 vsetvli instruction and checking the vill bit of the vtype
+// register. This check would allow us to safely detect version 1.0 of the Vector extension
+// with HWCAP, if riscv_hwprobe were not available. However, the check cannot
+// be added until the assembler supports the Vector instructions.
+//
+// Note the riscv_hwprobe syscall does not suffer from these ambiguities by design as all of the
+// extensions it advertises support for are explicitly versioned. It's also worth noting that
+// the riscv_hwprobe syscall is the only way to detect multi-letter RISC-V extensions, e.g., Zba.
+// These cannot be detected using HWCAP and so riscv_hwprobe must be used to detect the majority
+// of RISC-V extensions.
+//
+// Please see https://docs.kernel.org/arch/riscv/hwprobe.html for more information.
+
+// golang.org/x/sys/cpu is not allowed to depend on golang.org/x/sys/unix so we must
+// reproduce the constants, types and functions needed to make the riscv_hwprobe syscall
+// here.
+
+const (
+ // Copied from golang.org/x/sys/unix/ztypes_linux_riscv64.go.
+ riscv_HWPROBE_KEY_IMA_EXT_0 = 0x4
+ riscv_HWPROBE_IMA_C = 0x2
+ riscv_HWPROBE_IMA_V = 0x4
+ riscv_HWPROBE_EXT_ZBA = 0x8
+ riscv_HWPROBE_EXT_ZBB = 0x10
+ riscv_HWPROBE_EXT_ZBS = 0x20
+ riscv_HWPROBE_KEY_CPUPERF_0 = 0x5
+ riscv_HWPROBE_MISALIGNED_FAST = 0x3
+ riscv_HWPROBE_MISALIGNED_MASK = 0x7
+)
+
+const (
+ // sys_RISCV_HWPROBE is copied from golang.org/x/sys/unix/zsysnum_linux_riscv64.go.
+ sys_RISCV_HWPROBE = 258
+)
+
+// riscvHWProbePairs is copied from golang.org/x/sys/unix/ztypes_linux_riscv64.go.
+type riscvHWProbePairs struct {
+ key int64
+ value uint64
+}
+
+const (
+ // CPU features
+ hwcap_RISCV_ISA_C = 1 << ('C' - 'A')
+)
+
+func doinit() {
+ // A slice of key/value pair structures is passed to the RISCVHWProbe syscall. The key
+ // field should be initialised with one of the key constants defined above, e.g.,
+ // RISCV_HWPROBE_KEY_IMA_EXT_0. The syscall will set the value field to the appropriate value.
+ // If the kernel does not recognise a key it will set the key field to -1 and the value field to 0.
+
+ pairs := []riscvHWProbePairs{
+ {riscv_HWPROBE_KEY_IMA_EXT_0, 0},
+ {riscv_HWPROBE_KEY_CPUPERF_0, 0},
+ }
+
+ // This call only indicates that extensions are supported if they are implemented on all cores.
+ if riscvHWProbe(pairs, 0) {
+ if pairs[0].key != -1 {
+ v := uint(pairs[0].value)
+ RISCV64.HasC = isSet(v, riscv_HWPROBE_IMA_C)
+ RISCV64.HasV = isSet(v, riscv_HWPROBE_IMA_V)
+ RISCV64.HasZba = isSet(v, riscv_HWPROBE_EXT_ZBA)
+ RISCV64.HasZbb = isSet(v, riscv_HWPROBE_EXT_ZBB)
+ RISCV64.HasZbs = isSet(v, riscv_HWPROBE_EXT_ZBS)
+ }
+ if pairs[1].key != -1 {
+ v := pairs[1].value & riscv_HWPROBE_MISALIGNED_MASK
+ RISCV64.HasFastMisaligned = v == riscv_HWPROBE_MISALIGNED_FAST
+ }
+ }
+
+ // Let's double check with HWCAP if the C extension does not appear to be supported.
+ // This may happen if we're running on a kernel older than 6.4.
+
+ if !RISCV64.HasC {
+ RISCV64.HasC = isSet(hwCap, hwcap_RISCV_ISA_C)
+ }
+}
+
+func isSet(hwc uint, value uint) bool {
+ return hwc&value != 0
+}
+
+// riscvHWProbe is a simplified version of the generated wrapper function found in
+// golang.org/x/sys/unix/zsyscall_linux_riscv64.go. We simplify it by removing the
+// cpuCount and cpus parameters which we do not need. We always want to pass 0 for
+// these parameters here so the kernel only reports the extensions that are present
+// on all cores.
+func riscvHWProbe(pairs []riscvHWProbePairs, flags uint) bool {
+ var _zero uintptr
+ var p0 unsafe.Pointer
+ if len(pairs) > 0 {
+ p0 = unsafe.Pointer(&pairs[0])
+ } else {
+ p0 = unsafe.Pointer(&_zero)
+ }
+
+ _, _, e1 := syscall.Syscall6(sys_RISCV_HWPROBE, uintptr(p0), uintptr(len(pairs)), uintptr(0), uintptr(0), uintptr(flags), 0)
+ return e1 == 0
+}
diff --git a/vendor/golang.org/x/sys/cpu/cpu_riscv64.go b/vendor/golang.org/x/sys/cpu/cpu_riscv64.go
index 7f0c79c004b..aca3199c911 100644
--- a/vendor/golang.org/x/sys/cpu/cpu_riscv64.go
+++ b/vendor/golang.org/x/sys/cpu/cpu_riscv64.go
@@ -8,4 +8,13 @@ package cpu
const cacheLineSize = 64
-func initOptions() {}
+func initOptions() {
+ options = []option{
+ {Name: "fastmisaligned", Feature: &RISCV64.HasFastMisaligned},
+ {Name: "c", Feature: &RISCV64.HasC},
+ {Name: "v", Feature: &RISCV64.HasV},
+ {Name: "zba", Feature: &RISCV64.HasZba},
+ {Name: "zbb", Feature: &RISCV64.HasZbb},
+ {Name: "zbs", Feature: &RISCV64.HasZbs},
+ }
+}
diff --git a/vendor/golang.org/x/sys/unix/mkerrors.sh b/vendor/golang.org/x/sys/unix/mkerrors.sh
index d07dd09eb50..e14b766a32c 100644
--- a/vendor/golang.org/x/sys/unix/mkerrors.sh
+++ b/vendor/golang.org/x/sys/unix/mkerrors.sh
@@ -552,6 +552,7 @@ ccflags="$@"
$2 !~ /^RTC_VL_(ACCURACY|BACKUP|DATA)/ &&
$2 ~ /^(NETLINK|NLM|NLMSG|NLA|IFA|IFAN|RT|RTC|RTCF|RTN|RTPROT|RTNH|ARPHRD|ETH_P|NETNSA)_/ ||
$2 ~ /^SOCK_|SK_DIAG_|SKNLGRP_$/ ||
+ $2 ~ /^(CONNECT|SAE)_/ ||
$2 ~ /^FIORDCHK$/ ||
$2 ~ /^SIOC/ ||
$2 ~ /^TIOC/ ||
diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin.go b/vendor/golang.org/x/sys/unix/syscall_darwin.go
index 2d15200adb4..099867deede 100644
--- a/vendor/golang.org/x/sys/unix/syscall_darwin.go
+++ b/vendor/golang.org/x/sys/unix/syscall_darwin.go
@@ -566,6 +566,43 @@ func PthreadFchdir(fd int) (err error) {
return pthread_fchdir_np(fd)
}
+// Connectx calls connectx(2) to initiate a connection on a socket.
+//
+// srcIf, srcAddr, and dstAddr are filled into a [SaEndpoints] struct and passed as the endpoints argument.
+//
+// - srcIf is the optional source interface index. 0 means unspecified.
+// - srcAddr is the optional source address. nil means unspecified.
+// - dstAddr is the destination address.
+//
+// On success, Connectx returns the number of bytes enqueued for transmission.
+func Connectx(fd int, srcIf uint32, srcAddr, dstAddr Sockaddr, associd SaeAssocID, flags uint32, iov []Iovec, connid *SaeConnID) (n uintptr, err error) {
+ endpoints := SaEndpoints{
+ Srcif: srcIf,
+ }
+
+ if srcAddr != nil {
+ addrp, addrlen, err := srcAddr.sockaddr()
+ if err != nil {
+ return 0, err
+ }
+ endpoints.Srcaddr = (*RawSockaddr)(addrp)
+ endpoints.Srcaddrlen = uint32(addrlen)
+ }
+
+ if dstAddr != nil {
+ addrp, addrlen, err := dstAddr.sockaddr()
+ if err != nil {
+ return 0, err
+ }
+ endpoints.Dstaddr = (*RawSockaddr)(addrp)
+ endpoints.Dstaddrlen = uint32(addrlen)
+ }
+
+ err = connectx(fd, &endpoints, associd, flags, iov, &n, connid)
+ return
+}
+
+//sys connectx(fd int, endpoints *SaEndpoints, associd SaeAssocID, flags uint32, iov []Iovec, n *uintptr, connid *SaeConnID) (err error)
//sys sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer, flags int) (err error)
//sys shmat(id int, addr uintptr, flag int) (ret uintptr, err error)
diff --git a/vendor/golang.org/x/sys/unix/syscall_hurd.go b/vendor/golang.org/x/sys/unix/syscall_hurd.go
index ba46651f8e3..a6a2d2fc2b9 100644
--- a/vendor/golang.org/x/sys/unix/syscall_hurd.go
+++ b/vendor/golang.org/x/sys/unix/syscall_hurd.go
@@ -11,6 +11,7 @@ package unix
int ioctl(int, unsigned long int, uintptr_t);
*/
import "C"
+import "unsafe"
func ioctl(fd int, req uint, arg uintptr) (err error) {
r0, er := C.ioctl(C.int(fd), C.ulong(req), C.uintptr_t(arg))
diff --git a/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go
index 4308ac1772b..d73c4652e6c 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go
@@ -237,6 +237,9 @@ const (
CLOCK_UPTIME_RAW_APPROX = 0x9
CLONE_NOFOLLOW = 0x1
CLONE_NOOWNERCOPY = 0x2
+ CONNECT_DATA_AUTHENTICATED = 0x4
+ CONNECT_DATA_IDEMPOTENT = 0x2
+ CONNECT_RESUME_ON_READ_WRITE = 0x1
CR0 = 0x0
CR1 = 0x1000
CR2 = 0x2000
@@ -1265,6 +1268,10 @@ const (
RTV_SSTHRESH = 0x20
RUSAGE_CHILDREN = -0x1
RUSAGE_SELF = 0x0
+ SAE_ASSOCID_ALL = 0xffffffff
+ SAE_ASSOCID_ANY = 0x0
+ SAE_CONNID_ALL = 0xffffffff
+ SAE_CONNID_ANY = 0x0
SCM_CREDS = 0x3
SCM_RIGHTS = 0x1
SCM_TIMESTAMP = 0x2
diff --git a/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go
index c8068a7a169..4a55a400588 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go
@@ -237,6 +237,9 @@ const (
CLOCK_UPTIME_RAW_APPROX = 0x9
CLONE_NOFOLLOW = 0x1
CLONE_NOOWNERCOPY = 0x2
+ CONNECT_DATA_AUTHENTICATED = 0x4
+ CONNECT_DATA_IDEMPOTENT = 0x2
+ CONNECT_RESUME_ON_READ_WRITE = 0x1
CR0 = 0x0
CR1 = 0x1000
CR2 = 0x2000
@@ -1265,6 +1268,10 @@ const (
RTV_SSTHRESH = 0x20
RUSAGE_CHILDREN = -0x1
RUSAGE_SELF = 0x0
+ SAE_ASSOCID_ALL = 0xffffffff
+ SAE_ASSOCID_ANY = 0x0
+ SAE_CONNID_ALL = 0xffffffff
+ SAE_CONNID_ANY = 0x0
SCM_CREDS = 0x3
SCM_RIGHTS = 0x1
SCM_TIMESTAMP = 0x2
diff --git a/vendor/golang.org/x/sys/unix/zerrors_zos_s390x.go b/vendor/golang.org/x/sys/unix/zerrors_zos_s390x.go
index da08b2ab3d9..1ec2b1407b1 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_zos_s390x.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_zos_s390x.go
@@ -581,6 +581,8 @@ const (
AT_EMPTY_PATH = 0x1000
AT_REMOVEDIR = 0x200
RENAME_NOREPLACE = 1 << 0
+ ST_RDONLY = 1
+ ST_NOSUID = 2
)
const (
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go
index b622533ef2c..24b346e1a35 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go
@@ -841,6 +841,26 @@ var libc_pthread_fchdir_np_trampoline_addr uintptr
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func connectx(fd int, endpoints *SaEndpoints, associd SaeAssocID, flags uint32, iov []Iovec, n *uintptr, connid *SaeConnID) (err error) {
+ var _p0 unsafe.Pointer
+ if len(iov) > 0 {
+ _p0 = unsafe.Pointer(&iov[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ _, _, e1 := syscall_syscall9(libc_connectx_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(endpoints)), uintptr(associd), uintptr(flags), uintptr(_p0), uintptr(len(iov)), uintptr(unsafe.Pointer(n)), uintptr(unsafe.Pointer(connid)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+var libc_connectx_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_connectx connectx "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer, flags int) (err error) {
_, _, e1 := syscall_syscall6(libc_sendfile_trampoline_addr, uintptr(infd), uintptr(outfd), uintptr(offset), uintptr(unsafe.Pointer(len)), uintptr(hdtr), uintptr(flags))
if e1 != 0 {
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s
index cfe6646baf2..ebd213100b3 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s
+++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s
@@ -248,6 +248,11 @@ TEXT libc_pthread_fchdir_np_trampoline<>(SB),NOSPLIT,$0-0
GLOBL ·libc_pthread_fchdir_np_trampoline_addr(SB), RODATA, $8
DATA ·libc_pthread_fchdir_np_trampoline_addr(SB)/8, $libc_pthread_fchdir_np_trampoline<>(SB)
+TEXT libc_connectx_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_connectx(SB)
+GLOBL ·libc_connectx_trampoline_addr(SB), RODATA, $8
+DATA ·libc_connectx_trampoline_addr(SB)/8, $libc_connectx_trampoline<>(SB)
+
TEXT libc_sendfile_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_sendfile(SB)
GLOBL ·libc_sendfile_trampoline_addr(SB), RODATA, $8
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go
index 13f624f69f1..824b9c2d5e0 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go
@@ -841,6 +841,26 @@ var libc_pthread_fchdir_np_trampoline_addr uintptr
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func connectx(fd int, endpoints *SaEndpoints, associd SaeAssocID, flags uint32, iov []Iovec, n *uintptr, connid *SaeConnID) (err error) {
+ var _p0 unsafe.Pointer
+ if len(iov) > 0 {
+ _p0 = unsafe.Pointer(&iov[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ _, _, e1 := syscall_syscall9(libc_connectx_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(endpoints)), uintptr(associd), uintptr(flags), uintptr(_p0), uintptr(len(iov)), uintptr(unsafe.Pointer(n)), uintptr(unsafe.Pointer(connid)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+var libc_connectx_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_connectx connectx "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer, flags int) (err error) {
_, _, e1 := syscall_syscall6(libc_sendfile_trampoline_addr, uintptr(infd), uintptr(outfd), uintptr(offset), uintptr(unsafe.Pointer(len)), uintptr(hdtr), uintptr(flags))
if e1 != 0 {
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s
index fe222b75df0..4f178a22934 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s
+++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s
@@ -248,6 +248,11 @@ TEXT libc_pthread_fchdir_np_trampoline<>(SB),NOSPLIT,$0-0
GLOBL ·libc_pthread_fchdir_np_trampoline_addr(SB), RODATA, $8
DATA ·libc_pthread_fchdir_np_trampoline_addr(SB)/8, $libc_pthread_fchdir_np_trampoline<>(SB)
+TEXT libc_connectx_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_connectx(SB)
+GLOBL ·libc_connectx_trampoline_addr(SB), RODATA, $8
+DATA ·libc_connectx_trampoline_addr(SB)/8, $libc_connectx_trampoline<>(SB)
+
TEXT libc_sendfile_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_sendfile(SB)
GLOBL ·libc_sendfile_trampoline_addr(SB), RODATA, $8
diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go
index 091d107f3a5..d003c3d4378 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go
@@ -306,6 +306,19 @@ type XVSockPgen struct {
type _Socklen uint32
+type SaeAssocID uint32
+
+type SaeConnID uint32
+
+type SaEndpoints struct {
+ Srcif uint32
+ Srcaddr *RawSockaddr
+ Srcaddrlen uint32
+ Dstaddr *RawSockaddr
+ Dstaddrlen uint32
+ _ [4]byte
+}
+
type Xucred struct {
Version uint32
Uid uint32
diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go
index 28ff4ef74d0..0d45a941aae 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go
@@ -306,6 +306,19 @@ type XVSockPgen struct {
type _Socklen uint32
+type SaeAssocID uint32
+
+type SaeConnID uint32
+
+type SaEndpoints struct {
+ Srcif uint32
+ Srcaddr *RawSockaddr
+ Srcaddrlen uint32
+ Dstaddr *RawSockaddr
+ Dstaddrlen uint32
+ _ [4]byte
+}
+
type Xucred struct {
Version uint32
Uid uint32
diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go
index 6cbd094a3aa..51e13eb055f 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go
@@ -625,6 +625,7 @@ const (
POLLRDNORM = 0x40
POLLWRBAND = 0x100
POLLWRNORM = 0x4
+ POLLRDHUP = 0x4000
)
type CapRights struct {
diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go
index 7c03b6ee77f..d002d8ef3cc 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go
@@ -630,6 +630,7 @@ const (
POLLRDNORM = 0x40
POLLWRBAND = 0x100
POLLWRNORM = 0x4
+ POLLRDHUP = 0x4000
)
type CapRights struct {
diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go
index 422107ee8b1..3f863d898dd 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go
@@ -616,6 +616,7 @@ const (
POLLRDNORM = 0x40
POLLWRBAND = 0x100
POLLWRNORM = 0x4
+ POLLRDHUP = 0x4000
)
type CapRights struct {
diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go
index 505a12acfd9..61c72931066 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go
@@ -610,6 +610,7 @@ const (
POLLRDNORM = 0x40
POLLWRBAND = 0x100
POLLWRNORM = 0x4
+ POLLRDHUP = 0x4000
)
type CapRights struct {
diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_riscv64.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_riscv64.go
index cc986c79006..b5d17414f03 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_riscv64.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_riscv64.go
@@ -612,6 +612,7 @@ const (
POLLRDNORM = 0x40
POLLWRBAND = 0x100
POLLWRNORM = 0x4
+ POLLRDHUP = 0x4000
)
type CapRights struct {
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux.go b/vendor/golang.org/x/sys/unix/ztypes_linux.go
index 7f1961b907a..9f2550dc312 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux.go
@@ -2486,7 +2486,7 @@ type XDPMmapOffsets struct {
type XDPUmemReg struct {
Addr uint64
Len uint64
- Chunk_size uint32
+ Size uint32
Headroom uint32
Flags uint32
Tx_metadata_len uint32
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go
index 15adc04142f..ad05b51a603 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go
@@ -727,6 +727,37 @@ const (
RISCV_HWPROBE_EXT_ZBA = 0x8
RISCV_HWPROBE_EXT_ZBB = 0x10
RISCV_HWPROBE_EXT_ZBS = 0x20
+ RISCV_HWPROBE_EXT_ZICBOZ = 0x40
+ RISCV_HWPROBE_EXT_ZBC = 0x80
+ RISCV_HWPROBE_EXT_ZBKB = 0x100
+ RISCV_HWPROBE_EXT_ZBKC = 0x200
+ RISCV_HWPROBE_EXT_ZBKX = 0x400
+ RISCV_HWPROBE_EXT_ZKND = 0x800
+ RISCV_HWPROBE_EXT_ZKNE = 0x1000
+ RISCV_HWPROBE_EXT_ZKNH = 0x2000
+ RISCV_HWPROBE_EXT_ZKSED = 0x4000
+ RISCV_HWPROBE_EXT_ZKSH = 0x8000
+ RISCV_HWPROBE_EXT_ZKT = 0x10000
+ RISCV_HWPROBE_EXT_ZVBB = 0x20000
+ RISCV_HWPROBE_EXT_ZVBC = 0x40000
+ RISCV_HWPROBE_EXT_ZVKB = 0x80000
+ RISCV_HWPROBE_EXT_ZVKG = 0x100000
+ RISCV_HWPROBE_EXT_ZVKNED = 0x200000
+ RISCV_HWPROBE_EXT_ZVKNHA = 0x400000
+ RISCV_HWPROBE_EXT_ZVKNHB = 0x800000
+ RISCV_HWPROBE_EXT_ZVKSED = 0x1000000
+ RISCV_HWPROBE_EXT_ZVKSH = 0x2000000
+ RISCV_HWPROBE_EXT_ZVKT = 0x4000000
+ RISCV_HWPROBE_EXT_ZFH = 0x8000000
+ RISCV_HWPROBE_EXT_ZFHMIN = 0x10000000
+ RISCV_HWPROBE_EXT_ZIHINTNTL = 0x20000000
+ RISCV_HWPROBE_EXT_ZVFH = 0x40000000
+ RISCV_HWPROBE_EXT_ZVFHMIN = 0x80000000
+ RISCV_HWPROBE_EXT_ZFA = 0x100000000
+ RISCV_HWPROBE_EXT_ZTSO = 0x200000000
+ RISCV_HWPROBE_EXT_ZACAS = 0x400000000
+ RISCV_HWPROBE_EXT_ZICOND = 0x800000000
+ RISCV_HWPROBE_EXT_ZIHINTPAUSE = 0x1000000000
RISCV_HWPROBE_KEY_CPUPERF_0 = 0x5
RISCV_HWPROBE_MISALIGNED_UNKNOWN = 0x0
RISCV_HWPROBE_MISALIGNED_EMULATED = 0x1
@@ -734,4 +765,6 @@ const (
RISCV_HWPROBE_MISALIGNED_FAST = 0x3
RISCV_HWPROBE_MISALIGNED_UNSUPPORTED = 0x4
RISCV_HWPROBE_MISALIGNED_MASK = 0x7
+ RISCV_HWPROBE_KEY_ZICBOZ_BLOCK_SIZE = 0x6
+ RISCV_HWPROBE_WHICH_CPUS = 0x1
)
diff --git a/vendor/golang.org/x/sys/windows/syscall_windows.go b/vendor/golang.org/x/sys/windows/syscall_windows.go
index 1fa34fd17c5..5cee9a3143f 100644
--- a/vendor/golang.org/x/sys/windows/syscall_windows.go
+++ b/vendor/golang.org/x/sys/windows/syscall_windows.go
@@ -313,6 +313,10 @@ func NewCallbackCDecl(fn interface{}) uintptr {
//sys SetConsoleMode(console Handle, mode uint32) (err error) = kernel32.SetConsoleMode
//sys GetConsoleScreenBufferInfo(console Handle, info *ConsoleScreenBufferInfo) (err error) = kernel32.GetConsoleScreenBufferInfo
//sys setConsoleCursorPosition(console Handle, position uint32) (err error) = kernel32.SetConsoleCursorPosition
+//sys GetConsoleCP() (cp uint32, err error) = kernel32.GetConsoleCP
+//sys GetConsoleOutputCP() (cp uint32, err error) = kernel32.GetConsoleOutputCP
+//sys SetConsoleCP(cp uint32) (err error) = kernel32.SetConsoleCP
+//sys SetConsoleOutputCP(cp uint32) (err error) = kernel32.SetConsoleOutputCP
//sys WriteConsole(console Handle, buf *uint16, towrite uint32, written *uint32, reserved *byte) (err error) = kernel32.WriteConsoleW
//sys ReadConsole(console Handle, buf *uint16, toread uint32, read *uint32, inputControl *byte) (err error) = kernel32.ReadConsoleW
//sys resizePseudoConsole(pconsole Handle, size uint32) (hr error) = kernel32.ResizePseudoConsole
diff --git a/vendor/golang.org/x/sys/windows/types_windows.go b/vendor/golang.org/x/sys/windows/types_windows.go
index 3f03b3d57cc..7b97a154c95 100644
--- a/vendor/golang.org/x/sys/windows/types_windows.go
+++ b/vendor/golang.org/x/sys/windows/types_windows.go
@@ -1060,6 +1060,7 @@ const (
SIO_GET_EXTENSION_FUNCTION_POINTER = IOC_INOUT | IOC_WS2 | 6
SIO_KEEPALIVE_VALS = IOC_IN | IOC_VENDOR | 4
SIO_UDP_CONNRESET = IOC_IN | IOC_VENDOR | 12
+ SIO_UDP_NETRESET = IOC_IN | IOC_VENDOR | 15
// cf. http://support.microsoft.com/default.aspx?scid=kb;en-us;257460
diff --git a/vendor/golang.org/x/sys/windows/zsyscall_windows.go b/vendor/golang.org/x/sys/windows/zsyscall_windows.go
index 9bb979a3e47..4c2e1bdc01e 100644
--- a/vendor/golang.org/x/sys/windows/zsyscall_windows.go
+++ b/vendor/golang.org/x/sys/windows/zsyscall_windows.go
@@ -247,7 +247,9 @@ var (
procGetCommandLineW = modkernel32.NewProc("GetCommandLineW")
procGetComputerNameExW = modkernel32.NewProc("GetComputerNameExW")
procGetComputerNameW = modkernel32.NewProc("GetComputerNameW")
+ procGetConsoleCP = modkernel32.NewProc("GetConsoleCP")
procGetConsoleMode = modkernel32.NewProc("GetConsoleMode")
+ procGetConsoleOutputCP = modkernel32.NewProc("GetConsoleOutputCP")
procGetConsoleScreenBufferInfo = modkernel32.NewProc("GetConsoleScreenBufferInfo")
procGetCurrentDirectoryW = modkernel32.NewProc("GetCurrentDirectoryW")
procGetCurrentProcessId = modkernel32.NewProc("GetCurrentProcessId")
@@ -347,8 +349,10 @@ var (
procSetCommMask = modkernel32.NewProc("SetCommMask")
procSetCommState = modkernel32.NewProc("SetCommState")
procSetCommTimeouts = modkernel32.NewProc("SetCommTimeouts")
+ procSetConsoleCP = modkernel32.NewProc("SetConsoleCP")
procSetConsoleCursorPosition = modkernel32.NewProc("SetConsoleCursorPosition")
procSetConsoleMode = modkernel32.NewProc("SetConsoleMode")
+ procSetConsoleOutputCP = modkernel32.NewProc("SetConsoleOutputCP")
procSetCurrentDirectoryW = modkernel32.NewProc("SetCurrentDirectoryW")
procSetDefaultDllDirectories = modkernel32.NewProc("SetDefaultDllDirectories")
procSetDllDirectoryW = modkernel32.NewProc("SetDllDirectoryW")
@@ -2162,6 +2166,15 @@ func GetComputerName(buf *uint16, n *uint32) (err error) {
return
}
+func GetConsoleCP() (cp uint32, err error) {
+ r0, _, e1 := syscall.Syscall(procGetConsoleCP.Addr(), 0, 0, 0, 0)
+ cp = uint32(r0)
+ if cp == 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
func GetConsoleMode(console Handle, mode *uint32) (err error) {
r1, _, e1 := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(console), uintptr(unsafe.Pointer(mode)), 0)
if r1 == 0 {
@@ -2170,6 +2183,15 @@ func GetConsoleMode(console Handle, mode *uint32) (err error) {
return
}
+func GetConsoleOutputCP() (cp uint32, err error) {
+ r0, _, e1 := syscall.Syscall(procGetConsoleOutputCP.Addr(), 0, 0, 0, 0)
+ cp = uint32(r0)
+ if cp == 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
func GetConsoleScreenBufferInfo(console Handle, info *ConsoleScreenBufferInfo) (err error) {
r1, _, e1 := syscall.Syscall(procGetConsoleScreenBufferInfo.Addr(), 2, uintptr(console), uintptr(unsafe.Pointer(info)), 0)
if r1 == 0 {
@@ -3038,6 +3060,14 @@ func SetCommTimeouts(handle Handle, timeouts *CommTimeouts) (err error) {
return
}
+func SetConsoleCP(cp uint32) (err error) {
+ r1, _, e1 := syscall.Syscall(procSetConsoleCP.Addr(), 1, uintptr(cp), 0, 0)
+ if r1 == 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
func setConsoleCursorPosition(console Handle, position uint32) (err error) {
r1, _, e1 := syscall.Syscall(procSetConsoleCursorPosition.Addr(), 2, uintptr(console), uintptr(position), 0)
if r1 == 0 {
@@ -3054,6 +3084,14 @@ func SetConsoleMode(console Handle, mode uint32) (err error) {
return
}
+func SetConsoleOutputCP(cp uint32) (err error) {
+ r1, _, e1 := syscall.Syscall(procSetConsoleOutputCP.Addr(), 1, uintptr(cp), 0, 0)
+ if r1 == 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
func SetCurrentDirectory(path *uint16) (err error) {
r1, _, e1 := syscall.Syscall(procSetCurrentDirectoryW.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0)
if r1 == 0 {
diff --git a/vendor/gopkg.in/djherbis/times.v1/.travis.sh b/vendor/gopkg.in/djherbis/times.v1/.travis.sh
deleted file mode 100644
index c59e062f715..00000000000
--- a/vendor/gopkg.in/djherbis/times.v1/.travis.sh
+++ /dev/null
@@ -1,28 +0,0 @@
-#!/bin/bash
-set -e
-
-script() {
- if [ "${TRAVIS_PULL_REQUEST}" == "false" ];
- then
- COVERALLS_PARALLEL=true
-
- if [ ! -z "$JS" ];
- then
- bash js.cover.sh
- else
- go test -covermode=count -coverprofile=profile.cov
- fi
-
- go get github.com/axw/gocov/gocov github.com/mattn/goveralls golang.org/x/tools/cmd/cover
- $HOME/gopath/bin/goveralls --coverprofile=profile.cov -service=travis-ci
- fi
-
- if [ -z "$JS" ];
- then
- go get golang.org/x/lint/golint && golint ./...
- go vet
- go test -bench=.* -v ./...
- fi
-}
-
-"$@"
\ No newline at end of file
diff --git a/vendor/gopkg.in/djherbis/times.v1/.travis.yml b/vendor/gopkg.in/djherbis/times.v1/.travis.yml
deleted file mode 100644
index b3cda1363d8..00000000000
--- a/vendor/gopkg.in/djherbis/times.v1/.travis.yml
+++ /dev/null
@@ -1,28 +0,0 @@
-language: go
-matrix:
- include:
- - os: linux
- go: tip
- - os: linux
- go: tip
- env:
- - JS=1
- - os: osx
- go: tip
- - os: windows
- go: 1.x
-#Added power jobs
- - os: linux
- go: tip
- arch: ppc64le
- - os: linux
- go: tip
- arch: ppc64le
- env:
- - JS=1
-script: bash .travis.sh script
-notifications:
- webhooks: https://coveralls.io/webhook
- email:
- on_success: never
- on_failure: change
diff --git a/vendor/gopkg.in/warnings.v0/LICENSE b/vendor/gopkg.in/warnings.v0/LICENSE
deleted file mode 100644
index d65f7e9d8cd..00000000000
--- a/vendor/gopkg.in/warnings.v0/LICENSE
+++ /dev/null
@@ -1,24 +0,0 @@
-Copyright (c) 2016 Péter Surányi.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
-
- * Redistributions of source code must retain the above copyright
-notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above
-copyright notice, this list of conditions and the following disclaimer
-in the documentation and/or other materials provided with the
-distribution.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/gopkg.in/warnings.v0/README b/vendor/gopkg.in/warnings.v0/README
deleted file mode 100644
index 974212ba1b9..00000000000
--- a/vendor/gopkg.in/warnings.v0/README
+++ /dev/null
@@ -1,77 +0,0 @@
-Package warnings implements error handling with non-fatal errors (warnings).
-
-import path: "gopkg.in/warnings.v0"
-package docs: https://godoc.org/gopkg.in/warnings.v0
-issues: https://github.com/go-warnings/warnings/issues
-pull requests: https://github.com/go-warnings/warnings/pulls
-
-A recurring pattern in Go programming is the following:
-
- func myfunc(params) error {
- if err := doSomething(...); err != nil {
- return err
- }
- if err := doSomethingElse(...); err != nil {
- return err
- }
- if ok := doAnotherThing(...); !ok {
- return errors.New("my error")
- }
- ...
- return nil
- }
-
-This pattern allows interrupting the flow on any received error. But what if
-there are errors that should be noted but still not fatal, for which the flow
-should not be interrupted? Implementing such logic at each if statement would
-make the code complex and the flow much harder to follow.
-
-Package warnings provides the Collector type and a clean and simple pattern
-for achieving such logic. The Collector takes care of deciding when to break
-the flow and when to continue, collecting any non-fatal errors (warnings)
-along the way. The only requirement is that fatal and non-fatal errors can be
-distinguished programmatically; that is a function such as
-
- IsFatal(error) bool
-
-must be implemented. The following is an example of what the above snippet
-could look like using the warnings package:
-
- import "gopkg.in/warnings.v0"
-
- func isFatal(err error) bool {
- _, ok := err.(WarningType)
- return !ok
- }
-
- func myfunc(params) error {
- c := warnings.NewCollector(isFatal)
- c.FatalWithWarnings = true
- if err := c.Collect(doSomething()); err != nil {
- return err
- }
- if err := c.Collect(doSomethingElse(...)); err != nil {
- return err
- }
- if ok := doAnotherThing(...); !ok {
- if err := c.Collect(errors.New("my error")); err != nil {
- return err
- }
- }
- ...
- return c.Done()
- }
-
-For an example of a non-trivial code base using this library, see
-gopkg.in/gcfg.v1
-
-Rules for using warnings
-
- - ensure that warnings are programmatically distinguishable from fatal
- errors (i.e. implement an isFatal function and any necessary error types)
- - ensure that there is a single Collector instance for a call of each
- exported function
- - ensure that all errors (fatal or warning) are fed through Collect
- - ensure that every time an error is returned, it is one returned by a
- Collector (from Collect or Done)
- - ensure that Collect is never called after Done
diff --git a/vendor/gopkg.in/warnings.v0/warnings.go b/vendor/gopkg.in/warnings.v0/warnings.go
deleted file mode 100644
index b849d1e3d9a..00000000000
--- a/vendor/gopkg.in/warnings.v0/warnings.go
+++ /dev/null
@@ -1,194 +0,0 @@
-// Package warnings implements error handling with non-fatal errors (warnings).
-//
-// A recurring pattern in Go programming is the following:
-//
-// func myfunc(params) error {
-// if err := doSomething(...); err != nil {
-// return err
-// }
-// if err := doSomethingElse(...); err != nil {
-// return err
-// }
-// if ok := doAnotherThing(...); !ok {
-// return errors.New("my error")
-// }
-// ...
-// return nil
-// }
-//
-// This pattern allows interrupting the flow on any received error. But what if
-// there are errors that should be noted but still not fatal, for which the flow
-// should not be interrupted? Implementing such logic at each if statement would
-// make the code complex and the flow much harder to follow.
-//
-// Package warnings provides the Collector type and a clean and simple pattern
-// for achieving such logic. The Collector takes care of deciding when to break
-// the flow and when to continue, collecting any non-fatal errors (warnings)
-// along the way. The only requirement is that fatal and non-fatal errors can be
-// distinguished programmatically; that is a function such as
-//
-// IsFatal(error) bool
-//
-// must be implemented. The following is an example of what the above snippet
-// could look like using the warnings package:
-//
-// import "gopkg.in/warnings.v0"
-//
-// func isFatal(err error) bool {
-// _, ok := err.(WarningType)
-// return !ok
-// }
-//
-// func myfunc(params) error {
-// c := warnings.NewCollector(isFatal)
-// c.FatalWithWarnings = true
-// if err := c.Collect(doSomething()); err != nil {
-// return err
-// }
-// if err := c.Collect(doSomethingElse(...)); err != nil {
-// return err
-// }
-// if ok := doAnotherThing(...); !ok {
-// if err := c.Collect(errors.New("my error")); err != nil {
-// return err
-// }
-// }
-// ...
-// return c.Done()
-// }
-//
-// For an example of a non-trivial code base using this library, see
-// gopkg.in/gcfg.v1
-//
-// Rules for using warnings
-//
-// - ensure that warnings are programmatically distinguishable from fatal
-// errors (i.e. implement an isFatal function and any necessary error types)
-// - ensure that there is a single Collector instance for a call of each
-// exported function
-// - ensure that all errors (fatal or warning) are fed through Collect
-// - ensure that every time an error is returned, it is one returned by a
-// Collector (from Collect or Done)
-// - ensure that Collect is never called after Done
-//
-// TODO
-//
-// - optionally limit the number of warnings (e.g. stop after 20 warnings) (?)
-// - consider interaction with contexts
-// - go vet-style invocations verifier
-// - semi-automatic code converter
-//
-package warnings // import "gopkg.in/warnings.v0"
-
-import (
- "bytes"
- "fmt"
-)
-
-// List holds a collection of warnings and optionally one fatal error.
-type List struct {
- Warnings []error
- Fatal error
-}
-
-// Error implements the error interface.
-func (l List) Error() string {
- b := bytes.NewBuffer(nil)
- if l.Fatal != nil {
- fmt.Fprintln(b, "fatal:")
- fmt.Fprintln(b, l.Fatal)
- }
- switch len(l.Warnings) {
- case 0:
- // nop
- case 1:
- fmt.Fprintln(b, "warning:")
- default:
- fmt.Fprintln(b, "warnings:")
- }
- for _, err := range l.Warnings {
- fmt.Fprintln(b, err)
- }
- return b.String()
-}
-
-// A Collector collects errors up to the first fatal error.
-type Collector struct {
- // IsFatal distinguishes between warnings and fatal errors.
- IsFatal func(error) bool
- // FatalWithWarnings set to true means that a fatal error is returned as
- // a List together with all warnings so far. The default behavior is to
- // only return the fatal error and discard any warnings that have been
- // collected.
- FatalWithWarnings bool
-
- l List
- done bool
-}
-
-// NewCollector returns a new Collector; it uses isFatal to distinguish between
-// warnings and fatal errors.
-func NewCollector(isFatal func(error) bool) *Collector {
- return &Collector{IsFatal: isFatal}
-}
-
-// Collect collects a single error (warning or fatal). It returns nil if
-// collection can continue (only warnings so far), or otherwise the errors
-// collected. Collect mustn't be called after the first fatal error or after
-// Done has been called.
-func (c *Collector) Collect(err error) error {
- if c.done {
- panic("warnings.Collector already done")
- }
- if err == nil {
- return nil
- }
- if c.IsFatal(err) {
- c.done = true
- c.l.Fatal = err
- } else {
- c.l.Warnings = append(c.l.Warnings, err)
- }
- if c.l.Fatal != nil {
- return c.erorr()
- }
- return nil
-}
-
-// Done ends collection and returns the collected error(s).
-func (c *Collector) Done() error {
- c.done = true
- return c.erorr()
-}
-
-func (c *Collector) erorr() error {
- if !c.FatalWithWarnings && c.l.Fatal != nil {
- return c.l.Fatal
- }
- if c.l.Fatal == nil && len(c.l.Warnings) == 0 {
- return nil
- }
- // Note that a single warning is also returned as a List. This is to make it
- // easier to determine fatal-ness of the returned error.
- return c.l
-}
-
-// FatalOnly returns the fatal error, if any, **in an error returned by a
-// Collector**. It returns nil if and only if err is nil or err is a List
-// with err.Fatal == nil.
-func FatalOnly(err error) error {
- l, ok := err.(List)
- if !ok {
- return err
- }
- return l.Fatal
-}
-
-// WarningsOnly returns the warnings **in an error returned by a Collector**.
-func WarningsOnly(err error) []error {
- l, ok := err.(List)
- if !ok {
- return nil
- }
- return l.Warnings
-}
diff --git a/vendor/modules.txt b/vendor/modules.txt
index a3d5c882764..117c5ffa20e 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -1,4 +1,4 @@
-# dario.cat/mergo v1.0.0
+# dario.cat/mergo v1.0.1
## explicit; go 1.13
dario.cat/mergo
# github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1
@@ -6,11 +6,11 @@ dario.cat/mergo
# github.com/Masterminds/goutils v1.1.1
## explicit
github.com/Masterminds/goutils
-# github.com/Masterminds/semver/v3 v3.2.1
-## explicit; go 1.18
+# github.com/Masterminds/semver/v3 v3.3.0
+## explicit; go 1.21
github.com/Masterminds/semver/v3
-# github.com/Masterminds/sprig/v3 v3.2.3
-## explicit; go 1.13
+# github.com/Masterminds/sprig/v3 v3.3.0
+## explicit; go 1.21
github.com/Masterminds/sprig/v3
# github.com/Microsoft/go-winio v0.6.2
## explicit; go 1.21
@@ -35,30 +35,6 @@ github.com/Microsoft/hcsshim/internal/wclayer
github.com/Microsoft/hcsshim/internal/winapi
github.com/Microsoft/hcsshim/osversion
github.com/Microsoft/hcsshim/pkg/ociwclayer
-# github.com/ProtonMail/go-crypto v1.1.0-alpha.2
-## explicit; go 1.17
-github.com/ProtonMail/go-crypto/bitcurves
-github.com/ProtonMail/go-crypto/brainpool
-github.com/ProtonMail/go-crypto/eax
-github.com/ProtonMail/go-crypto/internal/byteutil
-github.com/ProtonMail/go-crypto/ocb
-github.com/ProtonMail/go-crypto/openpgp
-github.com/ProtonMail/go-crypto/openpgp/aes/keywrap
-github.com/ProtonMail/go-crypto/openpgp/armor
-github.com/ProtonMail/go-crypto/openpgp/ecdh
-github.com/ProtonMail/go-crypto/openpgp/ecdsa
-github.com/ProtonMail/go-crypto/openpgp/ed25519
-github.com/ProtonMail/go-crypto/openpgp/ed448
-github.com/ProtonMail/go-crypto/openpgp/eddsa
-github.com/ProtonMail/go-crypto/openpgp/elgamal
-github.com/ProtonMail/go-crypto/openpgp/errors
-github.com/ProtonMail/go-crypto/openpgp/internal/algorithm
-github.com/ProtonMail/go-crypto/openpgp/internal/ecc
-github.com/ProtonMail/go-crypto/openpgp/internal/encoding
-github.com/ProtonMail/go-crypto/openpgp/packet
-github.com/ProtonMail/go-crypto/openpgp/s2k
-github.com/ProtonMail/go-crypto/openpgp/x25519
-github.com/ProtonMail/go-crypto/openpgp/x448
# github.com/StackExchange/wmi v1.2.1
## explicit; go 1.13
github.com/StackExchange/wmi
@@ -81,20 +57,6 @@ github.com/cavaliergopher/grab/v3/pkg/bps
# github.com/cenkalti/backoff/v4 v4.3.0
## explicit; go 1.18
github.com/cenkalti/backoff/v4
-# github.com/cloudflare/circl v1.3.8
-## explicit; go 1.21
-github.com/cloudflare/circl/dh/x25519
-github.com/cloudflare/circl/dh/x448
-github.com/cloudflare/circl/ecc/goldilocks
-github.com/cloudflare/circl/internal/conv
-github.com/cloudflare/circl/internal/sha3
-github.com/cloudflare/circl/math
-github.com/cloudflare/circl/math/fp25519
-github.com/cloudflare/circl/math/fp448
-github.com/cloudflare/circl/math/mlsbset
-github.com/cloudflare/circl/sign
-github.com/cloudflare/circl/sign/ed25519
-github.com/cloudflare/circl/sign/ed448
# github.com/containerd/cgroups v1.1.0
## explicit; go 1.17
github.com/containerd/cgroups/stats/v1
@@ -122,17 +84,17 @@ github.com/containerd/stargz-snapshotter/estargz/errorutil
# github.com/cpuguy83/go-md2man/v2 v2.0.4
## explicit; go 1.11
github.com/cpuguy83/go-md2man/v2/md2man
-# github.com/cyphar/filepath-securejoin v0.2.5
-## explicit; go 1.13
-github.com/cyphar/filepath-securejoin
# github.com/denisbrodbeck/machineid v1.0.1
## explicit
github.com/denisbrodbeck/machineid
-# github.com/diskfs/go-diskfs v1.4.0
-## explicit; go 1.19
+# github.com/diskfs/go-diskfs v1.4.1
+## explicit; go 1.21
github.com/diskfs/go-diskfs
github.com/diskfs/go-diskfs/disk
github.com/diskfs/go-diskfs/filesystem
+github.com/diskfs/go-diskfs/filesystem/ext4
+github.com/diskfs/go-diskfs/filesystem/ext4/crc
+github.com/diskfs/go-diskfs/filesystem/ext4/md4
github.com/diskfs/go-diskfs/filesystem/fat32
github.com/diskfs/go-diskfs/filesystem/iso9660
github.com/diskfs/go-diskfs/filesystem/squashfs
@@ -145,6 +107,9 @@ github.com/diskfs/go-diskfs/util
## explicit
github.com/distribution/distribution/reference
github.com/distribution/distribution/uuid
+# github.com/djherbis/times v1.6.0
+## explicit; go 1.16
+github.com/djherbis/times
# github.com/docker/cli v27.1.1+incompatible
## explicit
github.com/docker/cli/cli/config
@@ -188,85 +153,15 @@ github.com/docker/go-connections/tlsconfig
# github.com/docker/go-units v0.5.0
## explicit
github.com/docker/go-units
-# github.com/elliotwutingfeng/asciiset v0.0.0-20230602022725-51bbb787efab
+# github.com/elliotwutingfeng/asciiset v0.0.0-20240214025120-24af97c84155
## explicit; go 1.11
github.com/elliotwutingfeng/asciiset
-# github.com/emirpasic/gods v1.18.1
-## explicit; go 1.2
-github.com/emirpasic/gods/containers
-github.com/emirpasic/gods/lists
-github.com/emirpasic/gods/lists/arraylist
-github.com/emirpasic/gods/trees
-github.com/emirpasic/gods/trees/binaryheap
-github.com/emirpasic/gods/utils
# github.com/fsnotify/fsnotify v1.7.0
## explicit; go 1.17
github.com/fsnotify/fsnotify
# github.com/ghodss/yaml v1.0.0
## explicit
github.com/ghodss/yaml
-# github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376
-## explicit; go 1.13
-github.com/go-git/gcfg
-github.com/go-git/gcfg/scanner
-github.com/go-git/gcfg/token
-github.com/go-git/gcfg/types
-# github.com/go-git/go-billy/v5 v5.5.0
-## explicit; go 1.19
-github.com/go-git/go-billy/v5
-github.com/go-git/go-billy/v5/helper/chroot
-github.com/go-git/go-billy/v5/helper/polyfill
-github.com/go-git/go-billy/v5/memfs
-github.com/go-git/go-billy/v5/osfs
-github.com/go-git/go-billy/v5/util
-# github.com/go-git/go-git/v5 v5.12.0
-## explicit; go 1.19
-github.com/go-git/go-git/v5
-github.com/go-git/go-git/v5/config
-github.com/go-git/go-git/v5/internal/path_util
-github.com/go-git/go-git/v5/internal/revision
-github.com/go-git/go-git/v5/internal/url
-github.com/go-git/go-git/v5/plumbing
-github.com/go-git/go-git/v5/plumbing/cache
-github.com/go-git/go-git/v5/plumbing/color
-github.com/go-git/go-git/v5/plumbing/filemode
-github.com/go-git/go-git/v5/plumbing/format/config
-github.com/go-git/go-git/v5/plumbing/format/diff
-github.com/go-git/go-git/v5/plumbing/format/gitignore
-github.com/go-git/go-git/v5/plumbing/format/idxfile
-github.com/go-git/go-git/v5/plumbing/format/index
-github.com/go-git/go-git/v5/plumbing/format/objfile
-github.com/go-git/go-git/v5/plumbing/format/packfile
-github.com/go-git/go-git/v5/plumbing/format/pktline
-github.com/go-git/go-git/v5/plumbing/hash
-github.com/go-git/go-git/v5/plumbing/object
-github.com/go-git/go-git/v5/plumbing/protocol/packp
-github.com/go-git/go-git/v5/plumbing/protocol/packp/capability
-github.com/go-git/go-git/v5/plumbing/protocol/packp/sideband
-github.com/go-git/go-git/v5/plumbing/revlist
-github.com/go-git/go-git/v5/plumbing/storer
-github.com/go-git/go-git/v5/plumbing/transport
-github.com/go-git/go-git/v5/plumbing/transport/client
-github.com/go-git/go-git/v5/plumbing/transport/file
-github.com/go-git/go-git/v5/plumbing/transport/git
-github.com/go-git/go-git/v5/plumbing/transport/http
-github.com/go-git/go-git/v5/plumbing/transport/internal/common
-github.com/go-git/go-git/v5/plumbing/transport/server
-github.com/go-git/go-git/v5/plumbing/transport/ssh
-github.com/go-git/go-git/v5/storage
-github.com/go-git/go-git/v5/storage/filesystem
-github.com/go-git/go-git/v5/storage/filesystem/dotgit
-github.com/go-git/go-git/v5/storage/memory
-github.com/go-git/go-git/v5/utils/binary
-github.com/go-git/go-git/v5/utils/diff
-github.com/go-git/go-git/v5/utils/ioutil
-github.com/go-git/go-git/v5/utils/merkletrie
-github.com/go-git/go-git/v5/utils/merkletrie/filesystem
-github.com/go-git/go-git/v5/utils/merkletrie/index
-github.com/go-git/go-git/v5/utils/merkletrie/internal/frame
-github.com/go-git/go-git/v5/utils/merkletrie/noder
-github.com/go-git/go-git/v5/utils/sync
-github.com/go-git/go-git/v5/utils/trace
# github.com/go-logr/logr v1.4.2
## explicit; go 1.18
github.com/go-logr/logr
@@ -278,8 +173,8 @@ github.com/go-ole/go-ole/oleutil
# github.com/go-task/slim-sprig/v3 v3.0.0
## explicit; go 1.20
github.com/go-task/slim-sprig/v3
-# github.com/gofrs/flock v0.8.1
-## explicit
+# github.com/gofrs/flock v0.12.1
+## explicit; go 1.21.0
github.com/gofrs/flock
# github.com/gogo/protobuf v1.3.2
## explicit; go 1.15
@@ -352,12 +247,9 @@ github.com/hashicorp/hcl/hcl/token
github.com/hashicorp/hcl/json/parser
github.com/hashicorp/hcl/json/scanner
github.com/hashicorp/hcl/json/token
-# github.com/huandu/xstrings v1.4.0
+# github.com/huandu/xstrings v1.5.0
## explicit; go 1.12
github.com/huandu/xstrings
-# github.com/imdario/mergo v0.3.13
-## explicit; go 1.13
-github.com/imdario/mergo
# github.com/inconshreveable/mousetrap v1.1.0
## explicit; go 1.18
github.com/inconshreveable/mousetrap
@@ -393,20 +285,14 @@ github.com/jaypipes/ghw/pkg/util
# github.com/jaypipes/pcidb v1.0.0
## explicit; go 1.17
github.com/jaypipes/pcidb
-# github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99
-## explicit
-github.com/jbenet/go-context/io
# github.com/joho/godotenv v1.5.1
## explicit; go 1.12
github.com/joho/godotenv
-# github.com/kendru/darwin/go/depgraph v0.0.0-20221105232959-877d6a81060c
+# github.com/kendru/darwin/go/depgraph v0.0.0-20230809052043-4d1c7e9d1767
## explicit; go 1.16
github.com/kendru/darwin/go/depgraph
-# github.com/kevinburke/ssh_config v1.2.0
-## explicit
-github.com/kevinburke/ssh_config
-# github.com/klauspost/compress v1.17.4
-## explicit; go 1.19
+# github.com/klauspost/compress v1.17.9
+## explicit; go 1.20
github.com/klauspost/compress
github.com/klauspost/compress/fse
github.com/klauspost/compress/huff0
@@ -438,7 +324,7 @@ github.com/moby/sys/mountinfo
# github.com/moby/sys/sequential v0.5.0
## explicit; go 1.17
github.com/moby/sys/sequential
-# github.com/mudler/entities v0.8.0
+# github.com/mudler/entities v0.8.1
## explicit; go 1.21
github.com/mudler/entities/pkg/entities
# github.com/onsi/ginkgo/v2 v2.20.2
@@ -502,21 +388,16 @@ github.com/pierrec/lz4/v4/internal/lz4block
github.com/pierrec/lz4/v4/internal/lz4errors
github.com/pierrec/lz4/v4/internal/lz4stream
github.com/pierrec/lz4/v4/internal/xxh32
-# github.com/pjbgf/sha1cd v0.3.0
-## explicit; go 1.19
-github.com/pjbgf/sha1cd
-github.com/pjbgf/sha1cd/internal
-github.com/pjbgf/sha1cd/ubc
# github.com/pkg/errors v0.9.1
## explicit
github.com/pkg/errors
-# github.com/pkg/xattr v0.4.9
+# github.com/pkg/xattr v0.4.10
## explicit; go 1.14
github.com/pkg/xattr
# github.com/rancher-sandbox/linuxkit v1.0.2
## explicit; go 1.18
github.com/rancher-sandbox/linuxkit/providers
-# github.com/rancher/yip v1.9.2
+# github.com/rancher/yip v1.9.3
## explicit; go 1.21
github.com/rancher/yip/pkg/dag
github.com/rancher/yip/pkg/executor
@@ -540,18 +421,12 @@ github.com/sanity-io/litter
# github.com/satori/go.uuid v1.2.0
## explicit
github.com/satori/go.uuid
-# github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3
-## explicit; go 1.13
-github.com/sergi/go-diff/diffmatchpatch
# github.com/shopspring/decimal v1.4.0
## explicit; go 1.10
github.com/shopspring/decimal
-# github.com/sirupsen/logrus v1.9.3
+# github.com/sirupsen/logrus v1.9.4-0.20230606125235-dd1b4c2e81af
## explicit; go 1.13
github.com/sirupsen/logrus
-# github.com/skeema/knownhosts v1.2.2
-## explicit; go 1.17
-github.com/skeema/knownhosts
# github.com/sourcegraph/conc v0.3.0
## explicit; go 1.19
github.com/sourcegraph/conc
@@ -563,7 +438,7 @@ github.com/sourcegraph/conc/panics
github.com/spf13/afero
github.com/spf13/afero/internal/common
github.com/spf13/afero/mem
-# github.com/spf13/cast v1.6.0
+# github.com/spf13/cast v1.7.0
## explicit; go 1.19
github.com/spf13/cast
# github.com/spf13/cobra v1.8.1
@@ -606,7 +481,7 @@ github.com/ulikunitz/xz/lzma
# github.com/vbatts/tar-split v0.11.3
## explicit; go 1.15
github.com/vbatts/tar-split/archive/tar
-# github.com/vishvananda/netlink v1.2.1-beta.2
+# github.com/vishvananda/netlink v1.3.0
## explicit; go 1.12
github.com/vishvananda/netlink
github.com/vishvananda/netlink/nl
@@ -620,11 +495,8 @@ github.com/vmware/vmw-guestinfo/message
github.com/vmware/vmw-guestinfo/rpcout
github.com/vmware/vmw-guestinfo/rpcvmx
github.com/vmware/vmw-guestinfo/vmcheck
-# github.com/xanzy/ssh-agent v0.3.3
-## explicit; go 1.16
-github.com/xanzy/ssh-agent
-# github.com/zcalusic/sysinfo v1.0.2
-## explicit; go 1.21
+# github.com/zcalusic/sysinfo v1.1.0
+## explicit; go 1.22
github.com/zcalusic/sysinfo
github.com/zcalusic/sysinfo/cpuid
# go.opencensus.io v0.24.0
@@ -642,25 +514,18 @@ go.uber.org/atomic
go.uber.org/multierr
# golang.org/x/crypto v0.26.0
## explicit; go 1.20
-golang.org/x/crypto/argon2
golang.org/x/crypto/bcrypt
-golang.org/x/crypto/blake2b
golang.org/x/crypto/blowfish
-golang.org/x/crypto/cast5
golang.org/x/crypto/chacha20
golang.org/x/crypto/cryptobyte
golang.org/x/crypto/cryptobyte/asn1
golang.org/x/crypto/curve25519
-golang.org/x/crypto/hkdf
golang.org/x/crypto/internal/alias
golang.org/x/crypto/internal/poly1305
golang.org/x/crypto/pbkdf2
golang.org/x/crypto/scrypt
-golang.org/x/crypto/sha3
golang.org/x/crypto/ssh
-golang.org/x/crypto/ssh/agent
golang.org/x/crypto/ssh/internal/bcrypt_pbkdf
-golang.org/x/crypto/ssh/knownhosts
# golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56
## explicit; go 1.20
golang.org/x/exp/constraints
@@ -679,7 +544,7 @@ golang.org/x/net/proxy
# golang.org/x/sync v0.8.0
## explicit; go 1.18
golang.org/x/sync/errgroup
-# golang.org/x/sys v0.24.0
+# golang.org/x/sys v0.25.0
## explicit; go 1.18
golang.org/x/sys/cpu
golang.org/x/sys/execabs
@@ -761,15 +626,9 @@ google.golang.org/protobuf/runtime/protoimpl
google.golang.org/protobuf/types/descriptorpb
google.golang.org/protobuf/types/gofeaturespb
google.golang.org/protobuf/types/known/anypb
-# gopkg.in/djherbis/times.v1 v1.3.0
-## explicit
-gopkg.in/djherbis/times.v1
# gopkg.in/ini.v1 v1.67.0
## explicit
gopkg.in/ini.v1
-# gopkg.in/warnings.v0 v0.1.2
-## explicit
-gopkg.in/warnings.v0
# gopkg.in/yaml.v2 v2.4.0
## explicit; go 1.15
gopkg.in/yaml.v2