diff --git a/.github/workflows/builder-arm64.yaml b/.github/workflows/builder-arm64.yaml index 48b6c2f89..bfb723e1b 100644 --- a/.github/workflows/builder-arm64.yaml +++ b/.github/workflows/builder-arm64.yaml @@ -18,9 +18,14 @@ jobs: GOARCH: ${{ matrix.goarch }} steps: - uses: actions/checkout@master - - uses: actions/setup-go@v2 + - uses: actions/setup-go@v3 with: - go-version: 1.19 + go-version: '1.19.1' + check-latest: true + cache: true + cache-dependency-path: | + **/go.sum + **/go.mod - name: generate resources run: mkdir -p {build/data,build/static} - name: package bin diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 4df8bbd77..9ec479851 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -18,9 +18,14 @@ jobs: GOARCH: ${{ matrix.goarch }} steps: - uses: actions/checkout@master - - uses: actions/setup-go@v2 + - uses: actions/setup-go@v3 with: - go-version: 1.19 + go-version: '1.19.1' + check-latest: true + cache: true + cache-dependency-path: | + **/go.sum + **/go.mod - name: check depends run: sudo apt-get install -y libseccomp-dev - name: generate resources diff --git a/.github/workflows/testing.yml b/.github/workflows/testing.yml index 6ad300ddc..f693a75ef 100644 --- a/.github/workflows/testing.yml +++ b/.github/workflows/testing.yml @@ -19,9 +19,14 @@ jobs: - uses: actions/checkout@v2 with: ref: 'dev' - - uses: actions/setup-go@v2 + - uses: actions/setup-go@v3 with: - go-version: 1.19 + go-version: '1.19.1' + check-latest: true + cache: true + cache-dependency-path: | + **/go.sum + **/go.mod - name: generate resources run: | mkdir -p {build/data,build/static} diff --git a/Dockerfile.dapper b/Dockerfile.dapper index 5357fc757..6b190ca8d 100644 --- a/Dockerfile.dapper +++ b/Dockerfile.dapper @@ -1,4 +1,4 @@ -ARG GOLANG=golang:1.19.1-alpine3.15 +ARG GOLANG=golang:1.19.2-alpine3.15 FROM ${GOLANG} ARG http_proxy=$http_proxy diff --git a/go.mod b/go.mod index 724283c6f..2faae426c 100644 --- a/go.mod +++ b/go.mod @@ -31,37 +31,37 @@ replace ( google.golang.org/genproto => google.golang.org/genproto v0.0.0-20220107163113-42d7afdf6368 google.golang.org/grpc => google.golang.org/grpc v1.40.0 gopkg.in/square/go-jose.v2 => gopkg.in/square/go-jose.v2 v2.2.2 - k8s.io/api => github.com/k3s-io/kubernetes/staging/src/k8s.io/api v1.25.2-k3s1 - k8s.io/apiextensions-apiserver => github.com/k3s-io/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.25.2-k3s1 - k8s.io/apimachinery => github.com/k3s-io/kubernetes/staging/src/k8s.io/apimachinery v1.25.2-k3s1 - k8s.io/apiserver => github.com/k3s-io/kubernetes/staging/src/k8s.io/apiserver v1.25.2-k3s1 - k8s.io/cli-runtime => github.com/k3s-io/kubernetes/staging/src/k8s.io/cli-runtime v1.25.2-k3s1 - k8s.io/client-go => github.com/k3s-io/kubernetes/staging/src/k8s.io/client-go v1.25.2-k3s1 - k8s.io/cloud-provider => github.com/k3s-io/kubernetes/staging/src/k8s.io/cloud-provider v1.25.2-k3s1 - k8s.io/cluster-bootstrap => github.com/k3s-io/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.25.2-k3s1 - k8s.io/code-generator => github.com/k3s-io/kubernetes/staging/src/k8s.io/code-generator v1.25.2-k3s1 - k8s.io/component-base => github.com/k3s-io/kubernetes/staging/src/k8s.io/component-base v1.25.2-k3s1 - k8s.io/component-helpers => github.com/k3s-io/kubernetes/staging/src/k8s.io/component-helpers v1.25.2-k3s1 - k8s.io/controller-manager => github.com/k3s-io/kubernetes/staging/src/k8s.io/controller-manager v1.25.2-k3s1 - k8s.io/cri-api => github.com/k3s-io/kubernetes/staging/src/k8s.io/cri-api v1.25.2-k3s1 - k8s.io/csi-translation-lib => github.com/k3s-io/kubernetes/staging/src/k8s.io/csi-translation-lib v1.25.2-k3s1 + k8s.io/api => github.com/k3s-io/kubernetes/staging/src/k8s.io/api v1.25.3-k3s1 + k8s.io/apiextensions-apiserver => github.com/k3s-io/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.25.3-k3s1 + k8s.io/apimachinery => github.com/k3s-io/kubernetes/staging/src/k8s.io/apimachinery v1.25.3-k3s1 + k8s.io/apiserver => github.com/k3s-io/kubernetes/staging/src/k8s.io/apiserver v1.25.3-k3s1 + k8s.io/cli-runtime => github.com/k3s-io/kubernetes/staging/src/k8s.io/cli-runtime v1.25.3-k3s1 + k8s.io/client-go => github.com/k3s-io/kubernetes/staging/src/k8s.io/client-go v1.25.3-k3s1 + k8s.io/cloud-provider => github.com/k3s-io/kubernetes/staging/src/k8s.io/cloud-provider v1.25.3-k3s1 + k8s.io/cluster-bootstrap => github.com/k3s-io/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.25.3-k3s1 + k8s.io/code-generator => github.com/k3s-io/kubernetes/staging/src/k8s.io/code-generator v1.25.3-k3s1 + k8s.io/component-base => github.com/k3s-io/kubernetes/staging/src/k8s.io/component-base v1.25.3-k3s1 + k8s.io/component-helpers => github.com/k3s-io/kubernetes/staging/src/k8s.io/component-helpers v1.25.3-k3s1 + k8s.io/controller-manager => github.com/k3s-io/kubernetes/staging/src/k8s.io/controller-manager v1.25.3-k3s1 + k8s.io/cri-api => github.com/k3s-io/kubernetes/staging/src/k8s.io/cri-api v1.25.3-k3s1 + k8s.io/csi-translation-lib => github.com/k3s-io/kubernetes/staging/src/k8s.io/csi-translation-lib v1.25.3-k3s1 k8s.io/klog => github.com/k3s-io/klog v1.0.0-k3s2 // k3s-release-1.x k8s.io/klog/v2 => github.com/k3s-io/klog/v2 v2.60.1-k3s1 // k3s-main - k8s.io/kube-aggregator => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-aggregator v1.25.2-k3s1 - k8s.io/kube-controller-manager => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-controller-manager v1.25.2-k3s1 - k8s.io/kube-proxy => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-proxy v1.25.2-k3s1 - k8s.io/kube-scheduler => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-scheduler v1.25.2-k3s1 - k8s.io/kubectl => github.com/k3s-io/kubernetes/staging/src/k8s.io/kubectl v1.25.2-k3s1 - k8s.io/kubelet => github.com/k3s-io/kubernetes/staging/src/k8s.io/kubelet v1.25.2-k3s1 - k8s.io/kubernetes => github.com/k3s-io/kubernetes v1.25.2-k3s1 - k8s.io/legacy-cloud-providers => github.com/k3s-io/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.25.2-k3s1 - k8s.io/metrics => github.com/k3s-io/kubernetes/staging/src/k8s.io/metrics v1.25.2-k3s1 - k8s.io/mount-utils => github.com/k3s-io/kubernetes/staging/src/k8s.io/mount-utils v1.25.2-k3s1 - k8s.io/node-api => github.com/k3s-io/kubernetes/staging/src/k8s.io/node-api v1.25.2-k3s1 - k8s.io/pod-security-admission => github.com/k3s-io/kubernetes/staging/src/k8s.io/pod-security-admission v1.25.2-k3s1 - k8s.io/sample-apiserver => github.com/k3s-io/kubernetes/staging/src/k8s.io/sample-apiserver v1.25.2-k3s1 - k8s.io/sample-cli-plugin => github.com/k3s-io/kubernetes/staging/src/k8s.io/sample-cli-plugin v1.25.2-k3s1 - k8s.io/sample-controller => github.com/k3s-io/kubernetes/staging/src/k8s.io/sample-controller v1.25.2-k3s1 + k8s.io/kube-aggregator => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-aggregator v1.25.3-k3s1 + k8s.io/kube-controller-manager => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-controller-manager v1.25.3-k3s1 + k8s.io/kube-proxy => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-proxy v1.25.3-k3s1 + k8s.io/kube-scheduler => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-scheduler v1.25.3-k3s1 + k8s.io/kubectl => github.com/k3s-io/kubernetes/staging/src/k8s.io/kubectl v1.25.3-k3s1 + k8s.io/kubelet => github.com/k3s-io/kubernetes/staging/src/k8s.io/kubelet v1.25.3-k3s1 + k8s.io/kubernetes => github.com/k3s-io/kubernetes v1.25.3-k3s1 + k8s.io/legacy-cloud-providers => github.com/k3s-io/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.25.3-k3s1 + k8s.io/metrics => github.com/k3s-io/kubernetes/staging/src/k8s.io/metrics v1.25.3-k3s1 + k8s.io/mount-utils => github.com/k3s-io/kubernetes/staging/src/k8s.io/mount-utils v1.25.3-k3s1 + k8s.io/node-api => github.com/k3s-io/kubernetes/staging/src/k8s.io/node-api v1.25.3-k3s1 + k8s.io/pod-security-admission => github.com/k3s-io/kubernetes/staging/src/k8s.io/pod-security-admission v1.25.3-k3s1 + k8s.io/sample-apiserver => github.com/k3s-io/kubernetes/staging/src/k8s.io/sample-apiserver v1.25.3-k3s1 + k8s.io/sample-cli-plugin => github.com/k3s-io/kubernetes/staging/src/k8s.io/sample-cli-plugin v1.25.3-k3s1 + k8s.io/sample-controller => github.com/k3s-io/kubernetes/staging/src/k8s.io/sample-controller v1.25.3-k3s1 mvdan.cc/unparam => mvdan.cc/unparam v0.0.0-20210104141923-aac4ce9116a7 ) @@ -102,7 +102,7 @@ require ( github.com/robfig/cron/v3 v3.0.1 github.com/rootless-containers/rootlesskit v1.0.1 github.com/sirupsen/logrus v1.9.0 - github.com/stretchr/testify v1.7.1 + github.com/stretchr/testify v1.8.0 github.com/urfave/cli v1.22.9 github.com/yl2chen/cidranger v1.0.2 go.etcd.io/etcd/api/v3 v3.5.4 @@ -117,17 +117,17 @@ require ( google.golang.org/grpc v1.47.0 gopkg.in/yaml.v2 v2.4.0 inet.af/tcpproxy v0.0.0-20200125044825-b6bb9b5b8252 - k8s.io/api v0.25.2 - k8s.io/apimachinery v0.25.2 - k8s.io/apiserver v0.25.2 + k8s.io/api v0.25.3 + k8s.io/apimachinery v0.25.3 + k8s.io/apiserver v0.25.3 k8s.io/client-go v11.0.1-0.20190409021438-1a26190bd76a+incompatible - k8s.io/cloud-provider v0.25.2 - k8s.io/component-base v0.25.2 - k8s.io/component-helpers v0.25.2 - k8s.io/cri-api v0.25.2 + k8s.io/cloud-provider v0.25.3 + k8s.io/component-base v0.25.3 + k8s.io/component-helpers v0.25.3 + k8s.io/cri-api v0.25.3 k8s.io/klog v1.0.0 - k8s.io/kubectl v0.25.2 - k8s.io/kubernetes v1.25.2 + k8s.io/kubectl v0.25.3 + k8s.io/kubernetes v1.25.3 k8s.io/utils v0.0.0-20220728103510-ee6ede2d64ed sigs.k8s.io/yaml v1.3.0 ) @@ -300,7 +300,7 @@ require ( github.com/spf13/pflag v1.0.5 // indirect github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980 // indirect github.com/stoewer/go-strcase v1.2.0 // indirect - github.com/stretchr/objx v0.2.0 // indirect + github.com/stretchr/objx v0.4.0 // indirect github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 // indirect github.com/tchap/go-patricia v2.3.0+incompatible // indirect github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802 // indirect @@ -370,7 +370,7 @@ require ( k8s.io/metrics v0.0.0 // indirect k8s.io/mount-utils v0.25.2 // indirect k8s.io/pod-security-admission v0.0.0 // indirect - sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.32 // indirect + sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.33 // indirect sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 // indirect sigs.k8s.io/kustomize/api v0.12.1 // indirect sigs.k8s.io/kustomize/kustomize/v4 v4.5.7 // indirect diff --git a/go.sum b/go.sum index 47906eaec..f3718c2a4 100644 --- a/go.sum +++ b/go.sum @@ -626,57 +626,57 @@ github.com/k3s-io/klog v1.0.0-k3s2 h1:yyvD2bQbxG7m85/pvNctLX2bUDmva5kOBvuZ77tTGB github.com/k3s-io/klog v1.0.0-k3s2/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= github.com/k3s-io/klog/v2 v2.60.1-k3s1 h1:C1hsMF1Eo6heGVQzts6cZ+rDZAReSiOBUxsYMuUkkZI= github.com/k3s-io/klog/v2 v2.60.1-k3s1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= -github.com/k3s-io/kubernetes v1.25.2-k3s1 h1:ps0qXdKdQmxJ1Y3T6Ykm3EsfdG3LhoZXB61t4LPkcvA= -github.com/k3s-io/kubernetes v1.25.2-k3s1/go.mod h1:OlhMy4mww2SLr1KtdV1vC014/MwfYlts/C66gtnDkKE= -github.com/k3s-io/kubernetes/staging/src/k8s.io/api v1.25.2-k3s1 h1:/J6MWo5W0jTSMjcuySJohGoj6A3m2fO9UOdqRahSRDE= -github.com/k3s-io/kubernetes/staging/src/k8s.io/api v1.25.2-k3s1/go.mod h1:wvMgOM5Av+A4jB2697Nd/g3aPCgj8YEVsmJpGmfopZk= -github.com/k3s-io/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.25.2-k3s1 h1:loGGtOvOEm6X7GFjR4rqb3XiSYI+q1LobZuFDI7QGwg= -github.com/k3s-io/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.25.2-k3s1/go.mod h1:wvUiiJ1cSEmgkaIPwANpAy/opBm7PU8KndyDyjrJ0KQ= -github.com/k3s-io/kubernetes/staging/src/k8s.io/apimachinery v1.25.2-k3s1 h1:K8/ezFxiJTyWBKnFMC05CtK2GbsXBFUM7Xt7byqh3Fo= -github.com/k3s-io/kubernetes/staging/src/k8s.io/apimachinery v1.25.2-k3s1/go.mod h1:vM6raI5G7nvbTN88iIRp57Iyr+JqMS3EdMZR+hmvI74= -github.com/k3s-io/kubernetes/staging/src/k8s.io/apiserver v1.25.2-k3s1 h1:Vf6QsUaRPM91mimbzkpJ4Fn1llmJMV+dpfiWaoju0mI= -github.com/k3s-io/kubernetes/staging/src/k8s.io/apiserver v1.25.2-k3s1/go.mod h1:IEYLN7rCUDUYW+KNk6COLKJ7ZmRptpWSzvvlZilPuIA= -github.com/k3s-io/kubernetes/staging/src/k8s.io/cli-runtime v1.25.2-k3s1 h1:SUPQxUpgBZSGOHAXYQBinyyiFSjQIYKkZLMLzM9mEp0= -github.com/k3s-io/kubernetes/staging/src/k8s.io/cli-runtime v1.25.2-k3s1/go.mod h1:inWX+hRBFEmY9BWjjz3258LXo0sFhq3mlMS/zLqosAc= -github.com/k3s-io/kubernetes/staging/src/k8s.io/client-go v1.25.2-k3s1 h1:bPo4sIbzHpOt0y5R1Qw/OFEaQU1EZww2hFP+PoIzu/8= -github.com/k3s-io/kubernetes/staging/src/k8s.io/client-go v1.25.2-k3s1/go.mod h1:WPKhWQ8eR2VxuPXxJqSQLDwWi0gh5mHw7BxxvP+dhFU= -github.com/k3s-io/kubernetes/staging/src/k8s.io/cloud-provider v1.25.2-k3s1 h1:F5IsEqHS+qfeVQArUqL9I8J4IJQfRFBXF8kVYMYMeI4= -github.com/k3s-io/kubernetes/staging/src/k8s.io/cloud-provider v1.25.2-k3s1/go.mod h1:bjCFUvxddDW+FeuAol8sOtXK02etI6NYHdbmfrGpbDI= -github.com/k3s-io/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.25.2-k3s1 h1:evPMcxQkHI4nWhDPBKjI+x/RMb3076E7w9+xvApibx0= -github.com/k3s-io/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.25.2-k3s1/go.mod h1:kQtEocKbT8p/hQHM9UeC7PRY5Nqsnkg0VeQTg/B/qgU= -github.com/k3s-io/kubernetes/staging/src/k8s.io/code-generator v1.25.2-k3s1 h1:RYISVBsJGMPI/kZ6q0VZ3ARDmwyErAuFRnmuvtk75xM= -github.com/k3s-io/kubernetes/staging/src/k8s.io/code-generator v1.25.2-k3s1/go.mod h1:Vu0aQHjOEoSgpw8/vZZWv8bBnYFiTypdvFZC0iB8rxI= -github.com/k3s-io/kubernetes/staging/src/k8s.io/component-base v1.25.2-k3s1 h1:qhRwqF9mKJnGdGh+Ho879aVrbff/bJzWSjOO/xbjoh4= -github.com/k3s-io/kubernetes/staging/src/k8s.io/component-base v1.25.2-k3s1/go.mod h1:jkPkm8xMLsXfuFcsom7s/2j/8Wd1+c8k0CaNm0m/xGw= -github.com/k3s-io/kubernetes/staging/src/k8s.io/component-helpers v1.25.2-k3s1 h1:AS3LIJB7nDnY963V06faSTSP74U/RCLfixL6s0tzQPg= -github.com/k3s-io/kubernetes/staging/src/k8s.io/component-helpers v1.25.2-k3s1/go.mod h1:4UjptCN6e4loBUfs0h1R7kevZfGg1cMW1DiMdHNv7oY= -github.com/k3s-io/kubernetes/staging/src/k8s.io/controller-manager v1.25.2-k3s1 h1:+waSuBTDWYRPrGGHt6ljEYed91gqshsD854JnnBDETA= -github.com/k3s-io/kubernetes/staging/src/k8s.io/controller-manager v1.25.2-k3s1/go.mod h1:sU/Wg23n3eEY+zH1l7uKk6i/XG07FddteJQmX/v6mi4= -github.com/k3s-io/kubernetes/staging/src/k8s.io/cri-api v1.25.2-k3s1 h1:yL0HoPTig5BaSzNwz8RJSssp1Q3CHE4I5C9r/fOcuc8= -github.com/k3s-io/kubernetes/staging/src/k8s.io/cri-api v1.25.2-k3s1/go.mod h1:2LFpaWXEd8PJYF1xnz46lCU4xfbg3O3YHgkLU+ZPIZ0= -github.com/k3s-io/kubernetes/staging/src/k8s.io/csi-translation-lib v1.25.2-k3s1 h1:TXTi0271UgTECf2pv14BfcGRPf64W7q5FJdD7xAgr78= -github.com/k3s-io/kubernetes/staging/src/k8s.io/csi-translation-lib v1.25.2-k3s1/go.mod h1:aFc9omOWDcTcj1O7UMNVO1M2ncZ/962NOz1fey7Kz/Y= -github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-aggregator v1.25.2-k3s1 h1:I8nziwNhPO51EfZdPIvQ+moIjP8anlhhvYHmBDPPNTY= -github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-aggregator v1.25.2-k3s1/go.mod h1:0aKg89lhJONBYW7H3kHg4kjn85W/awEO97OKAt9LJTM= -github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-controller-manager v1.25.2-k3s1 h1:iVmmXoqVvtL8/CGBJZinmyA+vi0ueoa+wnk7ZA4iPyE= -github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-controller-manager v1.25.2-k3s1/go.mod h1:XkGYt1v3TsRhw2YhJCDINgdiOHpr1wW1HcXhLGwfSx4= -github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-proxy v1.25.2-k3s1 h1:S2nuxGcuNTAjiCdbCj5V5PDrez9qZg1jm306U0/z55w= -github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-proxy v1.25.2-k3s1/go.mod h1:5GZH00/YrMKONtqNvexlj7ziy2bp8x3khtUdGZvXwUw= -github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-scheduler v1.25.2-k3s1 h1:9NpkXCaY8sBR8qprkV9M0yzAhC0y1DxJ1O4dHgfNw0Y= -github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-scheduler v1.25.2-k3s1/go.mod h1:pbpOZITqscSg+osCNe6WnboJFzEEWZsG9jh1qzjluDg= -github.com/k3s-io/kubernetes/staging/src/k8s.io/kubectl v1.25.2-k3s1 h1:1wgtWb/7jNW7wtJKRT+JSPJiA1GoSS0CoX8JvDXL9x8= -github.com/k3s-io/kubernetes/staging/src/k8s.io/kubectl v1.25.2-k3s1/go.mod h1:049xURDos4SNMOjwu5DtpYHDAbIULQFkQJB5SxwOW/c= -github.com/k3s-io/kubernetes/staging/src/k8s.io/kubelet v1.25.2-k3s1 h1:z2tcGR9RQOutSlDB7AW1PR8yWJHtJlKqOv7t7jCZkYU= -github.com/k3s-io/kubernetes/staging/src/k8s.io/kubelet v1.25.2-k3s1/go.mod h1:G0TM5PU+ALqSGPjSXZB5wOpOWApdNkCBwMD3r1v0z0o= -github.com/k3s-io/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.25.2-k3s1 h1:kRphBF4w2XiEcw5n+Oy8zTTFoIEORkz1UgL7xaLfcwI= -github.com/k3s-io/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.25.2-k3s1/go.mod h1:FO33IO/BAupssNt4gSsA6SlL7c7ZAxx6Zzjox8wkI2I= -github.com/k3s-io/kubernetes/staging/src/k8s.io/metrics v1.25.2-k3s1 h1:QUPbKqqeH09vfuF/EVrSi2Gp1zq8Sm6j4jfL0g5ubH0= -github.com/k3s-io/kubernetes/staging/src/k8s.io/metrics v1.25.2-k3s1/go.mod h1:bT9UtMfmBcP6wNpelKpMzUrjRB07e6hX5BHt6B6HF/E= -github.com/k3s-io/kubernetes/staging/src/k8s.io/mount-utils v1.25.2-k3s1 h1:5rHeZ9OMKGH7QXO0i8kv3QWRDTdm63OLdhl8WJIU1YE= -github.com/k3s-io/kubernetes/staging/src/k8s.io/mount-utils v1.25.2-k3s1/go.mod h1:YxtPi+a+eiz9JvIICzyaK9r9FfPb1BIx/rowusM2tGU= -github.com/k3s-io/kubernetes/staging/src/k8s.io/pod-security-admission v1.25.2-k3s1 h1:6hQVaWCJz19RCT0jDzNt28cU+M+mP+yIuqb6inCQCRc= -github.com/k3s-io/kubernetes/staging/src/k8s.io/pod-security-admission v1.25.2-k3s1/go.mod h1:a7g+y/kbzRIjglPHuoC4Kl3ME4JLpSeZ3X15nF3egSM= -github.com/k3s-io/kubernetes/staging/src/k8s.io/sample-apiserver v1.25.2-k3s1/go.mod h1:Ve5QCbplxzZPn8/7SPTpcGqFpk28UfMQgfbp1AM19ZI= +github.com/k3s-io/kubernetes v1.25.3-k3s1 h1:8f7YI1oy1SfUgLChqLrdzg1SrqS8Oh9K3Bm9JgOpB9A= +github.com/k3s-io/kubernetes v1.25.3-k3s1/go.mod h1:lvEY+3iJhh+sGIK1LorGkI56rW0eLGsfalnp68wQwYU= +github.com/k3s-io/kubernetes/staging/src/k8s.io/api v1.25.3-k3s1 h1:UQi6J9KXQgx0c3WqvJ3wqU8XIgpORzaK269oWbV782g= +github.com/k3s-io/kubernetes/staging/src/k8s.io/api v1.25.3-k3s1/go.mod h1:wjRFTXXZymwTTM1+dzztXh20Ujp21kJSfdUIz7N5IKY= +github.com/k3s-io/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.25.3-k3s1 h1:3Le71FCvRqYxqmiZXM2pCRSmxvXc+j91HB73iCbnmiQ= +github.com/k3s-io/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.25.3-k3s1/go.mod h1:rlHPlyHmZ+P2BeqS3Zod24q+zS2JVZ4KcRi4wRjfR4g= +github.com/k3s-io/kubernetes/staging/src/k8s.io/apimachinery v1.25.3-k3s1 h1:+QGSogtFxBTYZMONTPSxZSjlINUh0scPgV8MPmg/Gi4= +github.com/k3s-io/kubernetes/staging/src/k8s.io/apimachinery v1.25.3-k3s1/go.mod h1:H4xpF1hftG+e4p/Gh9Z3Z+lgRIfWU+ixFP2Mqd6ZqJw= +github.com/k3s-io/kubernetes/staging/src/k8s.io/apiserver v1.25.3-k3s1 h1:hDWHHqQUISkxDv34HkfohYiOdFpK8ukiVmV1iPB9r/s= +github.com/k3s-io/kubernetes/staging/src/k8s.io/apiserver v1.25.3-k3s1/go.mod h1:gpLtc297xJg44+C/jttW5ZDNnCTp+JiVmeFCKjxKk/8= +github.com/k3s-io/kubernetes/staging/src/k8s.io/cli-runtime v1.25.3-k3s1 h1:5oS4fUN+C41z+pXnOevKGxYTokiqz8YEPdKYzCtES3I= +github.com/k3s-io/kubernetes/staging/src/k8s.io/cli-runtime v1.25.3-k3s1/go.mod h1:JV7sp4PCFf8PRZ6aJc67/aiQyhF3DggqJsq5az5ZMO4= +github.com/k3s-io/kubernetes/staging/src/k8s.io/client-go v1.25.3-k3s1 h1:X850ybfskUAglx7vHfWWeb2JrJI+qZi0IqAKZsSzcNQ= +github.com/k3s-io/kubernetes/staging/src/k8s.io/client-go v1.25.3-k3s1/go.mod h1:pyRKsq9uUmpchY0VDoMChkIHcTU2PWz2L9SW7ZbIxaI= +github.com/k3s-io/kubernetes/staging/src/k8s.io/cloud-provider v1.25.3-k3s1 h1:3jc1nHYafdkTPHjZa78OOCdwsP148ECqYtsNlmwu3FM= +github.com/k3s-io/kubernetes/staging/src/k8s.io/cloud-provider v1.25.3-k3s1/go.mod h1:NPc228CI/24ePftLz0NA7HPkc3lus5T99BwYHPXiTho= +github.com/k3s-io/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.25.3-k3s1 h1:d/Aqb0Eroh8W7JMk5uh04EobaRsvW7I2Em47SfjJ3N0= +github.com/k3s-io/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.25.3-k3s1/go.mod h1:XoaROISKxlHYNLfzGtFkEizsn1BiUynQU7fj+RYD+1c= +github.com/k3s-io/kubernetes/staging/src/k8s.io/code-generator v1.25.3-k3s1 h1:NgE373nmrMOzsziHffn0SLlK1xFsFM38fQ4OUDWIWs8= +github.com/k3s-io/kubernetes/staging/src/k8s.io/code-generator v1.25.3-k3s1/go.mod h1:5bEIk1BBY5UHGsZjHTNAZdtM1+4twQb/B2O8B7vqJTI= +github.com/k3s-io/kubernetes/staging/src/k8s.io/component-base v1.25.3-k3s1 h1:tHFrC8ETq+o/Tfe7CH1ivTV/Zz3WpFPaB7iYVxuR6fE= +github.com/k3s-io/kubernetes/staging/src/k8s.io/component-base v1.25.3-k3s1/go.mod h1:hDStPwnclwHN5HEz0o8Zf8daFpZGE2ZsnUIgx5e5Z4M= +github.com/k3s-io/kubernetes/staging/src/k8s.io/component-helpers v1.25.3-k3s1 h1:WPn+xTfakDJxGvweL4pT8BE+ywg1SCWpaMdRZX4cT0U= +github.com/k3s-io/kubernetes/staging/src/k8s.io/component-helpers v1.25.3-k3s1/go.mod h1:4UjptCN6e4loBUfs0h1R7kevZfGg1cMW1DiMdHNv7oY= +github.com/k3s-io/kubernetes/staging/src/k8s.io/controller-manager v1.25.3-k3s1 h1:h/dVlH2EFtMFgHiLZ5KPnMyklB29Wcg+hRxcmmmCeL0= +github.com/k3s-io/kubernetes/staging/src/k8s.io/controller-manager v1.25.3-k3s1/go.mod h1:AwNQ21OAxFMkw57sbLhIefr7uhRCgoscLoQQembtQ7k= +github.com/k3s-io/kubernetes/staging/src/k8s.io/cri-api v1.25.3-k3s1 h1:2HVc+dAi3CANIzqGxWyLQaEfEx09Lm0x11L4G5fyo54= +github.com/k3s-io/kubernetes/staging/src/k8s.io/cri-api v1.25.3-k3s1/go.mod h1:ttX089LC/ZXNRGycQWkSdAD7iUMfC7M/HAtUocN0JXU= +github.com/k3s-io/kubernetes/staging/src/k8s.io/csi-translation-lib v1.25.3-k3s1 h1:+GgzPyhyGAUV6zlR71ip7iudHoAeIH2CoIbKi/qaX4g= +github.com/k3s-io/kubernetes/staging/src/k8s.io/csi-translation-lib v1.25.3-k3s1/go.mod h1:MckRs2LMf6uVe5Dc/VHn2pojefo8K0VvsnFHYaIQlpg= +github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-aggregator v1.25.3-k3s1 h1:k6Cuk4zCga2V8jgPK+wsU7ETfjo2j7w6ObLHWAAW9w8= +github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-aggregator v1.25.3-k3s1/go.mod h1:7BlImxwC/exXwYlzdFcDrU5971WbLqPkxrLdCi0Wrbg= +github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-controller-manager v1.25.3-k3s1 h1:niRhtf04m0MFNANIfPgzxpv/VN3odiwPyZ31/rXgVBg= +github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-controller-manager v1.25.3-k3s1/go.mod h1:XkGYt1v3TsRhw2YhJCDINgdiOHpr1wW1HcXhLGwfSx4= +github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-proxy v1.25.3-k3s1 h1:OScmatISYAp9mZmI11Xvk1y1gww6iqnpD9l0AldPeLM= +github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-proxy v1.25.3-k3s1/go.mod h1:5GZH00/YrMKONtqNvexlj7ziy2bp8x3khtUdGZvXwUw= +github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-scheduler v1.25.3-k3s1 h1:0Z/lPtkfXB7dYEVwN5PJaUrnsn08s/K3z1mPjUf7DEY= +github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-scheduler v1.25.3-k3s1/go.mod h1:pbpOZITqscSg+osCNe6WnboJFzEEWZsG9jh1qzjluDg= +github.com/k3s-io/kubernetes/staging/src/k8s.io/kubectl v1.25.3-k3s1 h1:eulTBSSZizpJi41cg3tWwJTjvCvSH+L/x8sMzuHK0k0= +github.com/k3s-io/kubernetes/staging/src/k8s.io/kubectl v1.25.3-k3s1/go.mod h1:yXeGpCulm686/gh5Kn8fTeZ1Y6Ee4XSsVLB3X+tudpQ= +github.com/k3s-io/kubernetes/staging/src/k8s.io/kubelet v1.25.3-k3s1 h1:uSSQ3LkRfplIlrrouEnH5mZzs1EAOZd3+cIKt4ljDDo= +github.com/k3s-io/kubernetes/staging/src/k8s.io/kubelet v1.25.3-k3s1/go.mod h1:G0TM5PU+ALqSGPjSXZB5wOpOWApdNkCBwMD3r1v0z0o= +github.com/k3s-io/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.25.3-k3s1 h1:YYNvk4HmaEPvamJKIoeE9hXWkOyebvuiN4Y3E68Sbws= +github.com/k3s-io/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.25.3-k3s1/go.mod h1:/D56oDpSC+5GMgC5U451GWPmaO7K+elp47XlXkf7DO8= +github.com/k3s-io/kubernetes/staging/src/k8s.io/metrics v1.25.3-k3s1 h1:9JQR9GmgMYs9IWiSbLGhZgyYxEH8g6D6gRJSDpeiFpY= +github.com/k3s-io/kubernetes/staging/src/k8s.io/metrics v1.25.3-k3s1/go.mod h1:+8o4FADtQ3BMWV/P5C0365mucwzzf78/LOPsrVmQXGw= +github.com/k3s-io/kubernetes/staging/src/k8s.io/mount-utils v1.25.3-k3s1 h1:zsEb6BH3UmED/O6fkWafp12T5duLANKZvplYJIIuFp0= +github.com/k3s-io/kubernetes/staging/src/k8s.io/mount-utils v1.25.3-k3s1/go.mod h1:RdND6kf45dn+BhLsd5rFKLA5kchAyjaL9Ag/kY3PWbc= +github.com/k3s-io/kubernetes/staging/src/k8s.io/pod-security-admission v1.25.3-k3s1 h1:zRreQ6DqreN9TvbD2zTtzdclf+sjLoJH0L1rekOmMuc= +github.com/k3s-io/kubernetes/staging/src/k8s.io/pod-security-admission v1.25.3-k3s1/go.mod h1:UXa1SCtlt+sdjF4COcwEsWS4SA2pqktxmZKHQRqyy+c= +github.com/k3s-io/kubernetes/staging/src/k8s.io/sample-apiserver v1.25.3-k3s1/go.mod h1:Uf/5aiSpuf/56f2iVq/HsQlBGerTbxYkf5k3XiRMK+E= github.com/karrick/godirwalk v1.16.1 h1:DynhcF+bztK8gooS0+NDJFrdNZjJ3gzVzC545UNA9iw= github.com/karrick/godirwalk v1.16.1/go.mod h1:j4mkqPuvaLI8mp1DroR3P6ad7cyYd4c1qeJ3RV7ULlk= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= @@ -1016,8 +1016,9 @@ github.com/stoewer/go-strcase v1.2.0 h1:Z2iHWqGXH00XYgqDmNgQbIBxf3wrNq0F3feEy0ai github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.2.0 h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48= github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= +github.com/stretchr/objx v0.4.0 h1:M2gUjqZET1qApGOWNSnZ49BAIMX4F/1plDv3+l31EJ4= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.3-0.20181224173747-660f15d67dbb/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= @@ -1025,8 +1026,9 @@ github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81P github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.1 h1:5TQK59W5E3v0r2duFAb7P95B6hEeOyEnHRa8MjYSMTY= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 h1:kdXcSzyDtseVEc4yCz2qF8ZrQvIDBJLl4S1c3GCXmoI= github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= @@ -1138,8 +1140,9 @@ go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= -go.uber.org/goleak v1.1.10 h1:z+mqJhf6ss6BSfSM671tgKyZBFPTTJM+HLxnhPC3wu0= go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= +go.uber.org/goleak v1.2.0 h1:xqgm/S+aQvhWFTtR0XK3Jvg7z8kGV8P4X14IzwN3Eqk= +go.uber.org/goleak v1.2.0/go.mod h1:XJYK+MuIchqpmGmUSAzotztawfKvYLUIgg7guXrwVUo= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= @@ -1178,7 +1181,6 @@ golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRu golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 h1:VLliZ0d+/avPrXXH+OakdXhpJuEoBZuwh1m2j7U6Iug= golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= @@ -1477,8 +1479,8 @@ rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8 rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.32 h1:2WjukG7txtEsbXsSKWtTibCdsyYAhcu6KFnttyDdZOQ= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.32/go.mod h1:fEO7lRTdivWO2qYVCVG7dEADOMo/MLDCVr8So2g88Uw= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.33 h1:LYqFq+6Cj2D0gFfrJvL7iElD4ET6ir3VDdhDdTK7rgc= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.33/go.mod h1:soWkSNf2tZC7aMibXEqVhCd73GOY5fJikn8qbdzemB0= sigs.k8s.io/cli-utils v0.16.0/go.mod h1:9Jqm9K2W6ShhCxsEuaz6HSRKKOXigPUx3ZfypGgxBLY= sigs.k8s.io/cli-utils v0.27.0/go.mod h1:8ll2fyx+bzjbwmwUnKBQU+2LDbMDsxy44DiDZ+drALg= sigs.k8s.io/controller-runtime v0.4.0/go.mod h1:ApC79lpY3PHW9xj/w9pj+lYkLgwAAUZwfXkME1Lajns= diff --git a/hack/airgap/image-list.txt b/hack/airgap/image-list.txt index ebfea487a..11f016b6f 100644 --- a/hack/airgap/image-list.txt +++ b/hack/airgap/image-list.txt @@ -1,7 +1,7 @@ docker.io/rancher/local-path-provisioner:v0.0.21 docker.io/rancher/mirrored-coredns-coredns:1.9.1 docker.io/rancher/mirrored-library-busybox:1.34.1 -docker.io/rancher/mirrored-metrics-server:v0.5.2 +docker.io/rancher/mirrored-metrics-server:v0.6.1 docker.io/rancher/mirrored-pause:3.6 quay.io/cilium/cilium:v1.12.2 quay.io/cilium/operator-generic:v1.12.2 diff --git a/manifests/ccm.yaml b/manifests/ccm.yaml index 815f28335..bbd7d2d04 100644 --- a/manifests/ccm.yaml +++ b/manifests/ccm.yaml @@ -24,22 +24,30 @@ rules: resources: - nodes verbs: - - '*' + - "*" - apiGroups: - "" resources: - nodes/status + - services/status verbs: - patch - apiGroups: - "" resources: - services + - pods verbs: + - get - list + - watch +- apiGroups: + - "" + resources: + - services + verbs: - patch - update - - watch - apiGroups: - "" resources: @@ -49,22 +57,16 @@ rules: - apiGroups: - "" resources: - - persistentvolumes + - namespaces verbs: + - create - get - - list - - update - - watch - apiGroups: - - "" + - apps resources: - - endpoints + - daemonsets verbs: - - create - - get - - list - - watch - - update + - "*" --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding diff --git a/manifests/metrics-server/metrics-server-deployment.yaml b/manifests/metrics-server/metrics-server-deployment.yaml index 07c2823fa..4887a5450 100644 --- a/manifests/metrics-server/metrics-server-deployment.yaml +++ b/manifests/metrics-server/metrics-server-deployment.yaml @@ -39,10 +39,10 @@ spec: emptyDir: {} containers: - name: metrics-server - image: %{SYSTEM_DEFAULT_REGISTRY}%rancher/mirrored-metrics-server:v0.5.2 + image: %{SYSTEM_DEFAULT_REGISTRY}%rancher/mirrored-metrics-server:v0.6.1 args: - --cert-dir=/tmp - - --secure-port=4443 + - --secure-port=10250 - --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname - --kubelet-use-node-status-port - --metric-resolution=15s @@ -52,7 +52,7 @@ spec: memory: 70Mi ports: - name: https - containerPort: 4443 + containerPort: 10250 protocol: TCP readinessProbe: httpGet: diff --git a/manifests/metrics-server/resource-reader.yaml b/manifests/metrics-server/resource-reader.yaml index b12b4905b..acc8ef16e 100644 --- a/manifests/metrics-server/resource-reader.yaml +++ b/manifests/metrics-server/resource-reader.yaml @@ -4,13 +4,17 @@ kind: ClusterRole metadata: name: system:metrics-server rules: +- apiGroups: + - "" + resources: + - nodes/metrics + verbs: + - get - apiGroups: - "" resources: - pods - nodes - - nodes/stats - - namespaces verbs: - get - list diff --git a/pkg/agent/config/config.go b/pkg/agent/config/config.go index 3273879f0..39dbacb7a 100644 --- a/pkg/agent/config/config.go +++ b/pkg/agent/config/config.go @@ -8,7 +8,7 @@ import ( "encoding/hex" "encoding/pem" "fmt" - "io/ioutil" + "io" "net" "net/http" "net/url" @@ -144,13 +144,13 @@ func getNodeNamedCrt(nodeName string, nodeIPs []net.IP, nodePasswordFile string) return nil, fmt.Errorf("%s: %s", u, resp.Status) } - return ioutil.ReadAll(resp.Body) + return io.ReadAll(resp.Body) } } func ensureNodeID(nodeIDFile string) (string, error) { if _, err := os.Stat(nodeIDFile); err == nil { - id, err := ioutil.ReadFile(nodeIDFile) + id, err := os.ReadFile(nodeIDFile) return strings.TrimSpace(string(id)), err } id := make([]byte, 4, 4) @@ -159,12 +159,12 @@ func ensureNodeID(nodeIDFile string) (string, error) { return "", err } nodeID := hex.EncodeToString(id) - return nodeID, ioutil.WriteFile(nodeIDFile, []byte(nodeID+"\n"), 0644) + return nodeID, os.WriteFile(nodeIDFile, []byte(nodeID+"\n"), 0644) } func ensureNodePassword(nodePasswordFile string) (string, error) { if _, err := os.Stat(nodePasswordFile); err == nil { - password, err := ioutil.ReadFile(nodePasswordFile) + password, err := os.ReadFile(nodePasswordFile) return strings.TrimSpace(string(password)), err } password := make([]byte, 16, 16) @@ -173,15 +173,15 @@ func ensureNodePassword(nodePasswordFile string) (string, error) { return "", err } nodePassword := hex.EncodeToString(password) - return nodePassword, ioutil.WriteFile(nodePasswordFile, []byte(nodePassword+"\n"), 0600) + return nodePassword, os.WriteFile(nodePasswordFile, []byte(nodePassword+"\n"), 0600) } func upgradeOldNodePasswordPath(oldNodePasswordFile, newNodePasswordFile string) { - password, err := ioutil.ReadFile(oldNodePasswordFile) + password, err := os.ReadFile(oldNodePasswordFile) if err != nil { return } - if err := ioutil.WriteFile(newNodePasswordFile, password, 0600); err != nil { + if err := os.WriteFile(newNodePasswordFile, password, 0600); err != nil { logrus.Warnf("Unable to write password file: %v", err) return } @@ -199,11 +199,11 @@ func getServingCert(nodeName string, nodeIPs []net.IP, servingCertFile, servingK servingCert, servingKey := splitCertKeyPEM(servingCert) - if err := ioutil.WriteFile(servingCertFile, servingCert, 0600); err != nil { + if err := os.WriteFile(servingCertFile, servingCert, 0600); err != nil { return nil, errors.Wrapf(err, "failed to write node cert") } - if err := ioutil.WriteFile(servingKeyFile, servingKey, 0600); err != nil { + if err := os.WriteFile(servingKeyFile, servingKey, 0600); err != nil { return nil, errors.Wrapf(err, "failed to write node key") } @@ -221,15 +221,15 @@ func getHostFile(filename, keyFile string, info *clientaccess.Info) error { return err } if keyFile == "" { - if err := ioutil.WriteFile(filename, fileBytes, 0600); err != nil { + if err := os.WriteFile(filename, fileBytes, 0600); err != nil { return errors.Wrapf(err, "failed to write cert %s", filename) } } else { fileBytes, keyBytes := splitCertKeyPEM(fileBytes) - if err := ioutil.WriteFile(filename, fileBytes, 0600); err != nil { + if err := os.WriteFile(filename, fileBytes, 0600); err != nil { return errors.Wrapf(err, "failed to write cert %s", filename) } - if err := ioutil.WriteFile(keyFile, keyBytes, 0600); err != nil { + if err := os.WriteFile(keyFile, keyBytes, 0600); err != nil { return errors.Wrapf(err, "failed to write key %s", filename) } } @@ -262,10 +262,10 @@ func getNodeNamedHostFile(filename, keyFile, nodeName string, nodeIPs []net.IP, } fileBytes, keyBytes := splitCertKeyPEM(fileBytes) - if err := ioutil.WriteFile(filename, fileBytes, 0600); err != nil { + if err := os.WriteFile(filename, fileBytes, 0600); err != nil { return errors.Wrapf(err, "failed to write cert %s", filename) } - if err := ioutil.WriteFile(keyFile, keyBytes, 0600); err != nil { + if err := os.WriteFile(keyFile, keyBytes, 0600); err != nil { return errors.Wrapf(err, "failed to write key %s", filename) } return nil @@ -355,7 +355,7 @@ func get(ctx context.Context, envInfo *cmds.Agent, proxy proxy.Proxy) (*config.N if envInfo.Rootless { nodePasswordRoot = filepath.Join(envInfo.DataDir, "agent") } - nodeConfigPath := filepath.Join(nodePasswordRoot, "etc", "k8e", "node") + nodeConfigPath := filepath.Join(nodePasswordRoot, "etc", "rancher", "node") if err := os.MkdirAll(nodeConfigPath, 0755); err != nil { return nil, err } @@ -562,6 +562,7 @@ func get(ctx context.Context, envInfo *cmds.Agent, proxy proxy.Proxy) (*config.N nodeConfig.AgentConfig.Rootless = envInfo.Rootless nodeConfig.AgentConfig.PodManifests = filepath.Join(envInfo.DataDir, "agent", DefaultPodManifestPath) nodeConfig.AgentConfig.ProtectKernelDefaults = envInfo.ProtectKernelDefaults + nodeConfig.AgentConfig.DisableServiceLB = envInfo.DisableServiceLB if err := validateNetworkConfig(nodeConfig); err != nil { return nil, err @@ -634,7 +635,7 @@ func validateNetworkConfig(nodeConfig *config.Node) error { // need to upgrade the server to at least the same version as the agent, or disable the NPC // cluster-wide. if nodeConfig.AgentConfig.ServiceCIDR == nil || nodeConfig.AgentConfig.ServiceNodePortRange.Size == 0 { - return fmt.Errorf("incompatible down-level server detected; servers must be upgraded to at least %s", version.Version) + return fmt.Errorf("incompatible down-level server detected; servers must be upgraded to at least %s, or restarted with --disable-network-policy", version.Version) } return nil diff --git a/pkg/agent/containerd/config_linux.go b/pkg/agent/containerd/config_linux.go index c929a0741..281c9a764 100644 --- a/pkg/agent/containerd/config_linux.go +++ b/pkg/agent/containerd/config_linux.go @@ -5,21 +5,20 @@ package containerd import ( "context" - "io/ioutil" "os" "time" "github.com/containerd/containerd" "github.com/docker/docker/pkg/parsers/kernel" + "github.com/opencontainers/runc/libcontainer/userns" + "github.com/pkg/errors" + "github.com/rancher/wharfie/pkg/registries" + "github.com/sirupsen/logrus" "github.com/xiaods/k8e/pkg/agent/templates" util2 "github.com/xiaods/k8e/pkg/agent/util" "github.com/xiaods/k8e/pkg/cgroups" "github.com/xiaods/k8e/pkg/daemons/config" "github.com/xiaods/k8e/pkg/version" - "github.com/opencontainers/runc/libcontainer/userns" - "github.com/pkg/errors" - "github.com/rancher/wharfie/pkg/registries" - "github.com/sirupsen/logrus" "golang.org/x/sys/unix" "google.golang.org/grpc" runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" @@ -83,7 +82,7 @@ func setupContainerdConfig(ctx context.Context, cfg *config.Node) error { logrus.Warnf("SELinux is enabled for "+version.Program+" but process is not running in context '%s', "+version.Program+"-selinux policy may need to be applied", SELinuxContextType) } - containerdTemplateBytes, err := ioutil.ReadFile(cfg.Containerd.Template) + containerdTemplateBytes, err := os.ReadFile(cfg.Containerd.Template) if err == nil { logrus.Infof("Using containerd template at %s", cfg.Containerd.Template) containerdTemplate = string(containerdTemplateBytes) @@ -131,4 +130,4 @@ func Client(address string) (*containerd.Client, error) { } return containerd.New(addr) -} \ No newline at end of file +} diff --git a/pkg/agent/containerd/containerd.go b/pkg/agent/containerd/containerd.go index 71a808864..6130c4a3c 100644 --- a/pkg/agent/containerd/containerd.go +++ b/pkg/agent/containerd/containerd.go @@ -5,7 +5,6 @@ import ( "context" "fmt" "io" - "io/ioutil" "os" "os/exec" "path/filepath" @@ -144,7 +143,7 @@ func preloadImages(ctx context.Context, cfg *config.Node) error { return nil } - fileInfos, err := ioutil.ReadDir(cfg.Images) + fileInfos, err := os.ReadDir(cfg.Images) if err != nil { logrus.Errorf("Unable to read images in %s: %v", cfg.Images, err) return nil diff --git a/pkg/agent/loadbalancer/config.go b/pkg/agent/loadbalancer/config.go index 8a4c3ecd6..878f4e255 100644 --- a/pkg/agent/loadbalancer/config.go +++ b/pkg/agent/loadbalancer/config.go @@ -2,7 +2,7 @@ package loadbalancer import ( "encoding/json" - "io/ioutil" + "os" "github.com/xiaods/k8e/pkg/agent/util" ) @@ -17,7 +17,7 @@ func (lb *LoadBalancer) writeConfig() error { func (lb *LoadBalancer) updateConfig() error { writeConfig := true - if configBytes, err := ioutil.ReadFile(lb.configFile); err == nil { + if configBytes, err := os.ReadFile(lb.configFile); err == nil { config := &LoadBalancer{} if err := json.Unmarshal(configBytes, config); err == nil { if config.ServerURL == lb.ServerURL { diff --git a/pkg/agent/loadbalancer/loadbalancer_test.go b/pkg/agent/loadbalancer/loadbalancer_test.go index 61b214603..cc9405f8b 100644 --- a/pkg/agent/loadbalancer/loadbalancer_test.go +++ b/pkg/agent/loadbalancer/loadbalancer_test.go @@ -5,7 +5,6 @@ import ( "context" "errors" "fmt" - "io/ioutil" "net" "net/url" "os" @@ -85,7 +84,7 @@ func assertNotEqual(t *testing.T, a interface{}, b interface{}) { } func Test_UnitFailOver(t *testing.T) { - tmpDir, err := ioutil.TempDir("", "lb-test") + tmpDir, err := os.MkdirTemp("", "lb-test") if err != nil { assertEqual(t, err, nil) } @@ -146,7 +145,7 @@ func Test_UnitFailOver(t *testing.T) { } func Test_UnitFailFast(t *testing.T) { - tmpDir, err := ioutil.TempDir("", "lb-test") + tmpDir, err := os.MkdirTemp("", "lb-test") if err != nil { assertEqual(t, err, nil) } diff --git a/pkg/agent/run_linux.go b/pkg/agent/run_linux.go index 623896fb5..7d29b2307 100644 --- a/pkg/agent/run_linux.go +++ b/pkg/agent/run_linux.go @@ -4,7 +4,6 @@ package agent import ( - "io/ioutil" "os" "path/filepath" @@ -38,5 +37,5 @@ func setupCriCtlConfig(cfg cmds.Agent, nodeConfig *config.Node) error { } crp := "runtime-endpoint: " + cre + "\n" - return ioutil.WriteFile(agentConfDir+"/crictl.yaml", []byte(crp), 0600) + return os.WriteFile(agentConfDir+"/crictl.yaml", []byte(crp), 0600) } diff --git a/pkg/agent/run_windows.go b/pkg/agent/run_windows.go index acc9d2991..cff0c7dc9 100644 --- a/pkg/agent/run_windows.go +++ b/pkg/agent/run_windows.go @@ -4,7 +4,6 @@ package agent import ( - "io/ioutil" "os" "path/filepath" "strings" @@ -40,5 +39,5 @@ func setupCriCtlConfig(cfg cmds.Agent, nodeConfig *config.Node) error { } crp := "runtime-endpoint: " + cre + "\n" - return ioutil.WriteFile(filepath.Join(agentConfDir, "crictl.yaml"), []byte(crp), 0600) + return os.WriteFile(filepath.Join(agentConfDir, "crictl.yaml"), []byte(crp), 0600) } diff --git a/pkg/agent/tunnel/tunnel.go b/pkg/agent/tunnel/tunnel.go index eacd3b46a..af5af46e4 100644 --- a/pkg/agent/tunnel/tunnel.go +++ b/pkg/agent/tunnel/tunnel.go @@ -7,7 +7,9 @@ import ( "net" "os" "reflect" + "strconv" "sync" + "time" "github.com/gorilla/websocket" "github.com/rancher/remotedialer" @@ -22,6 +24,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/watch" "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" @@ -31,10 +34,11 @@ import ( ) type agentTunnel struct { - client kubernetes.Interface - cidrs cidranger.Ranger - ports map[string]bool - mode string + client kubernetes.Interface + cidrs cidranger.Ranger + ports map[string]bool + mode string + kubeletPort string } // explicit interface check @@ -85,6 +89,9 @@ func Setup(ctx context.Context, config *daemonconfig.Node, proxy proxy.Proxy) er close(apiServerReady) }() + // Allow the kubelet port, as published via our node object + go tunnel.setKubeletPort(ctx, apiServerReady) + switch tunnel.mode { case daemonconfig.EgressSelectorModeCluster: // In Cluster mode, we allow the cluster CIDRs, and any connections to the node's IPs for pods using host network. @@ -135,6 +142,23 @@ func Setup(ctx context.Context, config *daemonconfig.Node, proxy proxy.Proxy) er return nil } +// setKubeletPort retrieves the configured kubelet port from our node object +func (a *agentTunnel) setKubeletPort(ctx context.Context, apiServerReady <-chan struct{}) { + <-apiServerReady + + wait.PollImmediateWithContext(ctx, time.Second, util.DefaultAPIServerReadyTimeout, func(ctx context.Context) (bool, error) { + nodeName := os.Getenv("NODE_NAME") + node, err := a.client.CoreV1().Nodes().Get(ctx, nodeName, metav1.GetOptions{}) + if err != nil { + logrus.Debugf("Tunnel authorizer failed to get Kubelet Port: %v", err) + return false, nil + } + a.kubeletPort = strconv.FormatInt(int64(node.Status.DaemonEndpoints.KubeletEndpoint.Port), 10) + logrus.Infof("Tunnel authorizer set Kubelet Port %s", a.kubeletPort) + return true, nil + }) +} + func (a *agentTunnel) clusterAuth(config *daemonconfig.Node) { // In Cluster mode, we add static entries for the Node IPs and Cluster CIDRs for _, ip := range config.AgentConfig.NodeIPs { @@ -304,7 +328,7 @@ func (a *agentTunnel) authorized(ctx context.Context, proto, address string) boo logrus.Debugf("Tunnel authorizer checking dial request for %s", address) host, port, err := net.SplitHostPort(address) if err == nil { - if proto == "tcp" && daemonconfig.KubeletReservedPorts[port] && (host == "127.0.0.1" || host == "::1") { + if a.isKubeletPort(proto, host, port) { return true } if ip := net.ParseIP(host); ip != nil { @@ -359,3 +383,8 @@ func (a *agentTunnel) connect(rootCtx context.Context, waitGroup *sync.WaitGroup return cancel } + +// isKubeletPort returns true if the connection is to a reserved TCP port on a loopback address. +func (a *agentTunnel) isKubeletPort(proto, host, port string) bool { + return proto == "tcp" && (host == "127.0.0.1" || host == "::1") && (port == a.kubeletPort || port == daemonconfig.StreamServerPort) +} diff --git a/pkg/agent/util/file.go b/pkg/agent/util/file.go index 17c263c14..ae4633e7a 100644 --- a/pkg/agent/util/file.go +++ b/pkg/agent/util/file.go @@ -1,7 +1,6 @@ package util import ( - "io/ioutil" "os" "path/filepath" @@ -10,7 +9,7 @@ import ( func WriteFile(name string, content string) error { os.MkdirAll(filepath.Dir(name), 0755) - err := ioutil.WriteFile(name, []byte(content), 0644) + err := os.WriteFile(name, []byte(content), 0644) if err != nil { return errors.Wrapf(err, "writing %s", name) } @@ -19,11 +18,11 @@ func WriteFile(name string, content string) error { func CopyFile(sourceFile string, destinationFile string) error { os.MkdirAll(filepath.Dir(destinationFile), 0755) - input, err := ioutil.ReadFile(sourceFile) + input, err := os.ReadFile(sourceFile) if err != nil { return errors.Wrapf(err, "copying %s to %s", sourceFile, destinationFile) } - err = ioutil.WriteFile(destinationFile, input, 0644) + err = os.WriteFile(destinationFile, input, 0644) if err != nil { return errors.Wrapf(err, "copying %s to %s", sourceFile, destinationFile) } diff --git a/pkg/authenticator/passwordfile/passwordfile_test.go b/pkg/authenticator/passwordfile/passwordfile_test.go index e9f43d6dc..0ebbfae8c 100644 --- a/pkg/authenticator/passwordfile/passwordfile_test.go +++ b/pkg/authenticator/passwordfile/passwordfile_test.go @@ -18,7 +18,6 @@ package passwordfile import ( "context" - "io/ioutil" "os" "reflect" "testing" @@ -146,14 +145,14 @@ func TestInsufficientColumnsPasswordFile(t *testing.T) { } func newWithContents(t *testing.T, contents string) (auth *PasswordAuthenticator, err error) { - f, err := ioutil.TempFile("", "passwordfile_test") + f, err := os.CreateTemp("", "passwordfile_test") if err != nil { t.Fatalf("unexpected error creating passwordfile: %v", err) } f.Close() defer os.Remove(f.Name()) - if err := ioutil.WriteFile(f.Name(), []byte(contents), 0700); err != nil { + if err := os.WriteFile(f.Name(), []byte(contents), 0700); err != nil { t.Fatalf("unexpected error writing passwordfile: %v", err) } diff --git a/pkg/bootstrap/bootstrap.go b/pkg/bootstrap/bootstrap.go index 9262178e1..16da2dee6 100644 --- a/pkg/bootstrap/bootstrap.go +++ b/pkg/bootstrap/bootstrap.go @@ -3,7 +3,6 @@ package bootstrap import ( "encoding/json" "io" - "io/ioutil" "net/http" "os" "path/filepath" @@ -34,7 +33,7 @@ func ReadFromDisk(w io.Writer, bootstrap *config.ControlRuntimeBootstrap) error if path == "" { continue } - data, err := ioutil.ReadFile(path) + data, err := os.ReadFile(path) if err != nil { logrus.Warnf("failed to read %s", path) continue diff --git a/pkg/cgroups/cgroups_linux.go b/pkg/cgroups/cgroups_linux.go index f96b8c9cf..27a4fd09d 100644 --- a/pkg/cgroups/cgroups_linux.go +++ b/pkg/cgroups/cgroups_linux.go @@ -7,7 +7,6 @@ import ( "bufio" "errors" "fmt" - "io/ioutil" "os" "path/filepath" "strings" @@ -26,7 +25,7 @@ func Validate() error { } func validateCgroupsV1() error { - cgroups, err := ioutil.ReadFile("/proc/self/cgroup") + cgroups, err := os.ReadFile("/proc/self/cgroup") if err != nil { return err } diff --git a/pkg/cli/cert/cert.go b/pkg/cli/cert/cert.go index 69bc62494..d5732c077 100644 --- a/pkg/cli/cert/cert.go +++ b/pkg/cli/cert/cert.go @@ -2,7 +2,6 @@ package cert import ( "errors" - "io/ioutil" "os" "path/filepath" "strconv" @@ -154,7 +153,7 @@ func rotate(app *cli.Context, cfg *cmds.Server) error { serverConfig.ControlConfig.Runtime.ClientCloudControllerKey) case version.Program + k8eServerService: dynamicListenerRegenFilePath := filepath.Join(serverDataDir, "tls", "dynamic-cert-regenerate") - if err := ioutil.WriteFile(dynamicListenerRegenFilePath, []byte{}, 0600); err != nil { + if err := os.WriteFile(dynamicListenerRegenFilePath, []byte{}, 0600); err != nil { return err } logrus.Infof("Rotating dynamic listener certificate") @@ -199,11 +198,11 @@ func rotate(app *cli.Context, cfg *cmds.Server) error { func copyFile(src, destDir string) error { _, err := os.Stat(src) if err == nil { - input, err := ioutil.ReadFile(src) + input, err := os.ReadFile(src) if err != nil { return err } - return ioutil.WriteFile(filepath.Join(destDir, filepath.Base(src)), input, 0644) + return os.WriteFile(filepath.Join(destDir, filepath.Base(src)), input, 0644) } else if errors.Is(err, os.ErrNotExist) { return nil } diff --git a/pkg/cli/cmds/agent.go b/pkg/cli/cmds/agent.go index 33cd41906..7ed3ed628 100644 --- a/pkg/cli/cmds/agent.go +++ b/pkg/cli/cmds/agent.go @@ -16,6 +16,7 @@ type Agent struct { ServerURL string APIAddressCh chan []string DisableLoadBalancer bool + DisableServiceLB bool ETCDAgent bool LBServerPort int ResolvConf string diff --git a/pkg/cli/cmds/server.go b/pkg/cli/cmds/server.go index e548e2b20..0f962089c 100644 --- a/pkg/cli/cmds/server.go +++ b/pkg/cli/cmds/server.go @@ -98,6 +98,7 @@ type Server struct { EtcdS3Folder string EtcdS3Timeout time.Duration EtcdS3Insecure bool + ServiceLBNamespace string } var ( @@ -207,6 +208,12 @@ var ServerFlags = []cli.Flag{ Destination: &ServerConfig.EgressSelectorMode, Value: "agent", }, + cli.StringFlag{ + Name: "servicelb-namespace", + Usage: "(networking) Namespace of the pods for the servicelb component", + Destination: &ServerConfig.ServiceLBNamespace, + Value: "kube-system", + }, cli.StringFlag{ Name: "write-kubeconfig,o", Usage: "(client) Write kubeconfig for admin client to this file", diff --git a/pkg/cli/secretsencrypt/secrets_encrypt.go b/pkg/cli/secretsencrypt/secrets_encrypt.go index 87878a495..c3ae9c73c 100644 --- a/pkg/cli/secretsencrypt/secrets_encrypt.go +++ b/pkg/cli/secretsencrypt/secrets_encrypt.go @@ -4,7 +4,6 @@ import ( "bytes" "encoding/json" "fmt" - "io/ioutil" "os" "path/filepath" "strings" @@ -33,7 +32,7 @@ func commandPrep(app *cli.Context, cfg *cmds.Server) (*clientaccess.Info, error) if cfg.Token == "" { fp := filepath.Join(dataDir, "token") - tokenByte, err := ioutil.ReadFile(fp) + tokenByte, err := os.ReadFile(fp) if err != nil { return nil, err } diff --git a/pkg/cli/server/server.go b/pkg/cli/server/server.go index 71a4a3a28..b03ef7beb 100644 --- a/pkg/cli/server/server.go +++ b/pkg/cli/server/server.go @@ -113,7 +113,8 @@ func run(app *cli.Context, cfg *cmds.Server, leaderControllers server.CustomCont serverConfig.ControlConfig.DataDir = cfg.DataDir serverConfig.ControlConfig.KubeConfigOutput = cfg.KubeConfigOutput serverConfig.ControlConfig.KubeConfigMode = cfg.KubeConfigMode - serverConfig.Rootless = cfg.Rootless + serverConfig.ControlConfig.Rootless = cfg.Rootless + serverConfig.ControlConfig.ServiceLBNamespace = cfg.ServiceLBNamespace serverConfig.ControlConfig.SANs = cfg.TLSSan serverConfig.ControlConfig.BindAddress = cfg.BindAddress serverConfig.ControlConfig.SupervisorPort = cfg.SupervisorPort @@ -349,8 +350,11 @@ func run(app *cli.Context, cfg *cmds.Server, leaderControllers server.CustomCont serverConfig.ControlConfig.Disables[v] = true } } + if serverConfig.ControlConfig.Skips["servicelb"] { + serverConfig.ControlConfig.DisableServiceLB = true + } - if serverConfig.ControlConfig.DisableCCM { + if serverConfig.ControlConfig.DisableCCM && serverConfig.ControlConfig.DisableServiceLB { serverConfig.ControlConfig.Skips["ccm"] = true serverConfig.ControlConfig.Disables["ccm"] = true } diff --git a/pkg/clientaccess/kubeconfig.go b/pkg/clientaccess/kubeconfig.go index bcdb66f3f..2c9d75388 100644 --- a/pkg/clientaccess/kubeconfig.go +++ b/pkg/clientaccess/kubeconfig.go @@ -1,7 +1,7 @@ package clientaccess import ( - "io/ioutil" + "os" "github.com/pkg/errors" "k8s.io/client-go/tools/clientcmd" @@ -10,17 +10,17 @@ import ( // WriteClientKubeConfig generates a kubeconfig at destFile that can be used to connect to a server at url with the given certs and keys func WriteClientKubeConfig(destFile, url, serverCAFile, clientCertFile, clientKeyFile string) error { - serverCA, err := ioutil.ReadFile(serverCAFile) + serverCA, err := os.ReadFile(serverCAFile) if err != nil { return errors.Wrapf(err, "failed to read %s", serverCAFile) } - clientCert, err := ioutil.ReadFile(clientCertFile) + clientCert, err := os.ReadFile(clientCertFile) if err != nil { return errors.Wrapf(err, "failed to read %s", clientCertFile) } - clientKey, err := ioutil.ReadFile(clientKeyFile) + clientKey, err := os.ReadFile(clientKeyFile) if err != nil { return errors.Wrapf(err, "failed to read %s", clientKeyFile) } diff --git a/pkg/clientaccess/token.go b/pkg/clientaccess/token.go index 57192fc56..cfad23b74 100644 --- a/pkg/clientaccess/token.go +++ b/pkg/clientaccess/token.go @@ -7,9 +7,10 @@ import ( "crypto/x509" "encoding/hex" "fmt" - "io/ioutil" + "io" "net/http" "net/url" + "os" "strings" "time" @@ -296,7 +297,7 @@ func get(u string, client *http.Client, username, password string) ([]byte, erro return nil, fmt.Errorf("%s: %s", u, resp.Status) } - return ioutil.ReadAll(resp.Body) + return io.ReadAll(resp.Body) } // put makes a request to a url using a provided client, username, and password @@ -317,7 +318,7 @@ func put(u string, body []byte, client *http.Client, username, password string) } defer resp.Body.Close() - respBody, _ := ioutil.ReadAll(resp.Body) + respBody, _ := io.ReadAll(resp.Body) if resp.StatusCode != http.StatusOK { return fmt.Errorf("%s: %s %s", u, resp.Status, string(respBody)) } @@ -332,7 +333,7 @@ func FormatToken(token, certFile string) (string, error) { certHash := "" if len(certFile) > 0 { - b, err := ioutil.ReadFile(certFile) + b, err := os.ReadFile(certFile) if err != nil { return "", nil } diff --git a/pkg/cloudprovider/cloudprovider.go b/pkg/cloudprovider/cloudprovider.go index 35cdd2c90..43094185e 100644 --- a/pkg/cloudprovider/cloudprovider.go +++ b/pkg/cloudprovider/cloudprovider.go @@ -1,47 +1,130 @@ package cloudprovider import ( + "encoding/json" + "fmt" "io" + "github.com/rancher/wrangler/pkg/apply" + "github.com/rancher/wrangler/pkg/generated/controllers/apps" + appsclient "github.com/rancher/wrangler/pkg/generated/controllers/apps/v1" + "github.com/rancher/wrangler/pkg/generated/controllers/core" + coreclient "github.com/rancher/wrangler/pkg/generated/controllers/core/v1" + "github.com/rancher/wrangler/pkg/generic" + "github.com/rancher/wrangler/pkg/start" + "github.com/sirupsen/logrus" + "github.com/xiaods/k8e/pkg/util" "github.com/xiaods/k8e/pkg/version" - "k8s.io/client-go/informers" - informercorev1 "k8s.io/client-go/informers/core/v1" - "k8s.io/client-go/tools/cache" + meta "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/tools/record" + "k8s.io/client-go/util/workqueue" cloudprovider "k8s.io/cloud-provider" ) +// Config describes externally-configurable cloud provider configuration. +// This is normally unmarshalled from a JSON config file. +type Config struct { + LBEnabled bool `json:"lbEnabled"` + LBImage string `json:"lbImage"` + LBNamespace string `json:"lbNamespace"` + NodeEnabled bool `json:"nodeEnabled"` + Rootless bool `json:"rootless"` +} + type k8e struct { - nodeInformer informercorev1.NodeInformer - nodeInformerHasSynced cache.InformerSynced + Config + + client kubernetes.Interface + recorder record.EventRecorder + + processor apply.Apply + daemonsetCache appsclient.DaemonSetCache + nodeCache coreclient.NodeCache + podCache coreclient.PodCache + workqueue workqueue.RateLimitingInterface } var _ cloudprovider.Interface = &k8e{} -var _ cloudprovider.InformerUser = &k8e{} func init() { cloudprovider.RegisterCloudProvider(version.Program, func(config io.Reader) (cloudprovider.Interface, error) { - return &k8e{}, nil + var err error + k := k8e{ + Config: Config{ + LBEnabled: true, + LBImage: DefaultLBImage, + LBNamespace: DefaultLBNS, + NodeEnabled: true, + }, + } + + if config != nil { + var bytes []byte + bytes, err = io.ReadAll(config) + if err == nil { + err = json.Unmarshal(bytes, &k.Config) + } + } + + if !k.LBEnabled && !k.NodeEnabled { + return nil, fmt.Errorf("all cloud-provider functionality disabled by config") + } + + return &k, err }) } func (k *k8e) Initialize(clientBuilder cloudprovider.ControllerClientBuilder, stop <-chan struct{}) { -} + ctx, _ := wait.ContextForChannel(stop) + config := clientBuilder.ConfigOrDie(controllerName) + k.client = kubernetes.NewForConfigOrDie(config) -func (k *k8e) SetInformers(informerFactory informers.SharedInformerFactory) { - k.nodeInformer = informerFactory.Core().V1().Nodes() - k.nodeInformerHasSynced = k.nodeInformer.Informer().HasSynced + if k.LBEnabled { + // Wrangler controller and caches are only needed if the load balancer controller is enabled. + k.recorder = util.BuildControllerEventRecorder(k.client, controllerName, meta.NamespaceAll) + coreFactory := core.NewFactoryFromConfigOrDie(config) + k.nodeCache = coreFactory.Core().V1().Node().Cache() + + lbCoreFactory := core.NewFactoryFromConfigWithOptionsOrDie(config, &generic.FactoryOptions{Namespace: k.LBNamespace}) + lbAppsFactory := apps.NewFactoryFromConfigWithOptionsOrDie(config, &generic.FactoryOptions{Namespace: k.LBNamespace}) + + processor, err := apply.NewForConfig(config) + if err != nil { + logrus.Fatalf("Failed to create apply processor for %s: %v", controllerName, err) + } + k.processor = processor.WithDynamicLookup().WithCacheTypes(lbAppsFactory.Apps().V1().DaemonSet()) + k.daemonsetCache = lbAppsFactory.Apps().V1().DaemonSet().Cache() + k.podCache = lbCoreFactory.Core().V1().Pod().Cache() + k.workqueue = workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()) + + if err := k.Register(ctx, coreFactory.Core().V1().Node(), lbCoreFactory.Core().V1().Pod()); err != nil { + logrus.Fatalf("Failed to register %s handlers: %v", controllerName, err) + } + + if err := start.All(ctx, 1, coreFactory, lbCoreFactory, lbAppsFactory); err != nil { + logrus.Fatalf("Failed to start %s controllers: %v", controllerName, err) + } + } else { + // If load-balancer functionality has not been enabled, delete managed daemonsets. + // This uses the raw kubernetes client, as the controllers are not started when the load balancer controller is disabled. + if err := k.deleteAllDaemonsets(ctx); err != nil { + logrus.Fatalf("Failed to clean up %s daemonsets: %v", controllerName, err) + } + } } func (k *k8e) Instances() (cloudprovider.Instances, bool) { - return k, true + return nil, false } func (k *k8e) InstancesV2() (cloudprovider.InstancesV2, bool) { - return nil, false + return k, k.NodeEnabled } func (k *k8e) LoadBalancer() (cloudprovider.LoadBalancer, bool) { - return nil, false + return k, k.LBEnabled } func (k *k8e) Zones() (cloudprovider.Zones, bool) { @@ -61,5 +144,5 @@ func (k *k8e) ProviderName() string { } func (k *k8e) HasClusterID() bool { - return true + return false } diff --git a/pkg/cloudprovider/instances.go b/pkg/cloudprovider/instances.go index 26e13906b..cc7de43e9 100644 --- a/pkg/cloudprovider/instances.go +++ b/pkg/cloudprovider/instances.go @@ -9,7 +9,7 @@ import ( "github.com/sirupsen/logrus" "github.com/xiaods/k8e/pkg/version" corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/types" + v1 "k8s.io/api/core/v1" cloudprovider "k8s.io/cloud-provider" ) @@ -19,56 +19,26 @@ var ( HostnameKey = version.Program + ".io/hostname" ) -func (k *k8e) AddSSHKeyToAllInstances(ctx context.Context, user string, keyData []byte) error { - return cloudprovider.NotImplemented -} - -func (k *k8e) CurrentNodeName(ctx context.Context, hostname string) (types.NodeName, error) { - return types.NodeName(hostname), nil -} +var _ cloudprovider.InstancesV2 = &k8e{} -func (k *k8e) InstanceExistsByProviderID(ctx context.Context, providerID string) (bool, error) { +// InstanceExists returns true if the instance for the given node exists according to the cloud provider. +// K8e nodes always exist. +func (k *k8e) InstanceExists(ctx context.Context, node *corev1.Node) (bool, error) { return true, nil } -func (k *k8e) InstanceID(ctx context.Context, nodeName types.NodeName) (string, error) { - if k.nodeInformerHasSynced == nil || !k.nodeInformerHasSynced() { - return "", errors.New("Node informer has not synced yet") - } - - _, err := k.nodeInformer.Lister().Get(string(nodeName)) - if err != nil { - return "", fmt.Errorf("Failed to find node %s: %v", nodeName, err) - } - return string(nodeName), nil -} - -func (k *k8e) InstanceShutdownByProviderID(ctx context.Context, providerID string) (bool, error) { - return true, cloudprovider.NotImplemented +// InstanceShutdown returns true if the instance is shutdown according to the cloud provider. +// K8e nodes are never shutdown. +func (k *k8e) InstanceShutdown(ctx context.Context, node *v1.Node) (bool, error) { + return false, nil } -func (k *k8e) InstanceType(ctx context.Context, name types.NodeName) (string, error) { - _, err := k.InstanceID(ctx, name) - if err != nil { - return "", err +// InstanceMetadata returns the instance's metadata. +func (k *k8e) InstanceMetadata(ctx context.Context, node *v1.Node) (*cloudprovider.InstanceMetadata, error) { + if (node.Annotations[InternalIPKey] == "") && (node.Labels[InternalIPKey] == "") { + return nil, errors.New("address annotations not yet set") } - return version.Program, nil -} - -func (k *k8e) InstanceTypeByProviderID(ctx context.Context, providerID string) (string, error) { - return "", cloudprovider.NotImplemented -} - -func (k *k8e) NodeAddresses(ctx context.Context, name types.NodeName) ([]corev1.NodeAddress, error) { addresses := []corev1.NodeAddress{} - if k.nodeInformerHasSynced == nil || !k.nodeInformerHasSynced() { - return nil, errors.New("Node informer has not synced yet") - } - - node, err := k.nodeInformer.Lister().Get(string(name)) - if err != nil { - return nil, fmt.Errorf("Failed to find node %s: %v", name, err) - } // check internal address if address := node.Annotations[InternalIPKey]; address != "" { for _, v := range strings.Split(address, ",") { @@ -77,7 +47,7 @@ func (k *k8e) NodeAddresses(ctx context.Context, name types.NodeName) ([]corev1. } else if address = node.Labels[InternalIPKey]; address != "" { addresses = append(addresses, corev1.NodeAddress{Type: corev1.NodeInternalIP, Address: address}) } else { - logrus.Infof("Couldn't find node internal ip annotation or label on node %s", name) + logrus.Infof("Couldn't find node internal ip annotation or label on node %s", node.Name) } // check external address @@ -95,12 +65,14 @@ func (k *k8e) NodeAddresses(ctx context.Context, name types.NodeName) ([]corev1. } else if address = node.Labels[HostnameKey]; address != "" { addresses = append(addresses, corev1.NodeAddress{Type: corev1.NodeHostName, Address: address}) } else { - logrus.Infof("Couldn't find node hostname annotation or label on node %s", name) + logrus.Infof("Couldn't find node hostname annotation or label on node %s", node.Name) } - return addresses, nil -} - -func (k *k8e) NodeAddressesByProviderID(ctx context.Context, providerID string) ([]corev1.NodeAddress, error) { - return nil, cloudprovider.NotImplemented + return &cloudprovider.InstanceMetadata{ + ProviderID: fmt.Sprintf("%s://%s", version.Program, node.Name), + InstanceType: version.Program, + NodeAddresses: addresses, + Zone: "", + Region: "", + }, nil } diff --git a/pkg/cloudprovider/loadbalancer.go b/pkg/cloudprovider/loadbalancer.go new file mode 100644 index 000000000..c0cb80826 --- /dev/null +++ b/pkg/cloudprovider/loadbalancer.go @@ -0,0 +1,53 @@ +package cloudprovider + +import ( + "context" + + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + cloudprovider "k8s.io/cloud-provider" +) + +var _ cloudprovider.LoadBalancer = &k8e{} + +// GetLoadBalancer returns whether the specified load balancer exists, and if so, what its status is. +func (k *k8e) GetLoadBalancer(ctx context.Context, clusterName string, service *corev1.Service) (*corev1.LoadBalancerStatus, bool, error) { + if _, err := k.getDaemonSet(service); err != nil { + if apierrors.IsNotFound(err) { + return nil, false, nil + } + return nil, false, err + } + + status, err := k.getStatus(service) + return status, true, err +} + +// GetLoadBalancerName returns the name of the load balancer. +func (k *k8e) GetLoadBalancerName(ctx context.Context, clusterName string, service *corev1.Service) string { + return generateName(service) +} + +// EnsureLoadBalancer creates a new load balancer 'name', or updates the existing one. Returns the status of the balancer. +// The node list is unused; see the comment on UpdateLoadBalancer for information on why. +// This is called when the Service is created or changes. +func (k *k8e) EnsureLoadBalancer(ctx context.Context, clusterName string, service *corev1.Service, nodes []*corev1.Node) (*corev1.LoadBalancerStatus, error) { + if err := k.deployDaemonSet(ctx, service); err != nil { + return nil, err + } + return nil, cloudprovider.ImplementedElsewhere +} + +// UpdateLoadBalancer updates hosts under the specified load balancer. +// This is not used, as it filters node updates based on criteria not compatible with how our DaemonSet selects +// nodes for inclusion. It also does not provide any opportunity to update the load balancer status. +// https://github.com/kubernetes/kubernetes/blob/v1.25.0/staging/src/k8s.io/cloud-provider/controllers/service/controller.go#L985-L993 +func (k *k8e) UpdateLoadBalancer(ctx context.Context, clusterName string, service *corev1.Service, nodes []*corev1.Node) error { + return cloudprovider.ImplementedElsewhere +} + +// EnsureLoadBalancerDeleted deletes the specified load balancer if it exists, +// returning nil if the load balancer specified either didn't exist or was successfully deleted. +func (k *k8e) EnsureLoadBalancerDeleted(ctx context.Context, clusterName string, service *corev1.Service) error { + return k.deleteDaemonSet(ctx, service) +} diff --git a/pkg/cloudprovider/servicelb.go b/pkg/cloudprovider/servicelb.go new file mode 100644 index 000000000..6e5de4c10 --- /dev/null +++ b/pkg/cloudprovider/servicelb.go @@ -0,0 +1,661 @@ +package cloudprovider + +import ( + "context" + "errors" + "fmt" + "sort" + "strconv" + "strings" + "time" + + "github.com/rancher/wrangler/pkg/condition" + coreclient "github.com/rancher/wrangler/pkg/generated/controllers/core/v1" + "github.com/rancher/wrangler/pkg/merr" + "github.com/rancher/wrangler/pkg/objectset" + "github.com/sirupsen/logrus" + "github.com/xiaods/k8e/pkg/version" + apps "k8s.io/api/apps/v1" + core "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + meta "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/util/retry" + ccmapp "k8s.io/cloud-provider/app" + servicehelper "k8s.io/cloud-provider/service/helpers" + utilsnet "k8s.io/utils/net" + utilpointer "k8s.io/utils/pointer" +) + +var ( + finalizerName = "svccontroller." + version.Program + ".cattle.io/daemonset" + svcNameLabel = "svccontroller." + version.Program + ".cattle.io/svcname" + svcNamespaceLabel = "svccontroller." + version.Program + ".cattle.io/svcnamespace" + daemonsetNodeLabel = "svccontroller." + version.Program + ".cattle.io/enablelb" + daemonsetNodePoolLabel = "svccontroller." + version.Program + ".cattle.io/lbpool" + nodeSelectorLabel = "svccontroller." + version.Program + ".cattle.io/nodeselector" + controllerName = ccmapp.DefaultInitFuncConstructors["service"].InitContext.ClientName +) + +const ( + Ready = condition.Cond("Ready") + DefaultLBNS = meta.NamespaceSystem + DefaultLBImage = "rancher/klipper-lb:v0.3.5" +) + +func (k *k8e) Register(ctx context.Context, + nodes coreclient.NodeController, + pods coreclient.PodController, +) error { + nodes.OnChange(ctx, controllerName, k.onChangeNode) + pods.OnChange(ctx, controllerName, k.onChangePod) + + if err := k.createServiceLBNamespace(ctx); err != nil { + return err + } + + if err := k.createServiceLBServiceAccount(ctx); err != nil { + return err + } + + go wait.Until(k.runWorker, time.Second, ctx.Done()) + + return k.removeServiceFinalizers(ctx) +} + +// createServiceLBNamespace ensures that the configured namespace exists. +func (k *k8e) createServiceLBNamespace(ctx context.Context) error { + _, err := k.client.CoreV1().Namespaces().Create(ctx, &core.Namespace{ + ObjectMeta: meta.ObjectMeta{ + Name: k.LBNamespace, + }, + }, meta.CreateOptions{}) + if apierrors.IsAlreadyExists(err) { + return nil + } + return err +} + +// createServiceLBServiceAccount ensures that the ServiceAccount used by pods exists +func (k *k8e) createServiceLBServiceAccount(ctx context.Context) error { + _, err := k.client.CoreV1().ServiceAccounts(k.LBNamespace).Create(ctx, &core.ServiceAccount{ + ObjectMeta: meta.ObjectMeta{ + Name: "svclb", + Namespace: k.LBNamespace, + }, + }, meta.CreateOptions{}) + if apierrors.IsAlreadyExists(err) { + return nil + } + return err +} + +// onChangePod handles changes to Pods. +// If the pod has labels that tie it to a service, and the pod has an IP assigned, +// enqueue an update to the service's status. +func (k *k8e) onChangePod(key string, pod *core.Pod) (*core.Pod, error) { + if pod == nil { + return nil, nil + } + + serviceName := pod.Labels[svcNameLabel] + if serviceName == "" { + return pod, nil + } + + serviceNamespace := pod.Labels[svcNamespaceLabel] + if serviceNamespace == "" { + return pod, nil + } + + if pod.Status.PodIP == "" { + return pod, nil + } + + k.workqueue.Add(serviceNamespace + "/" + serviceName) + return pod, nil +} + +// onChangeNode handles changes to Nodes. We need to handle this as we may need to kick the DaemonSet +// to add or remove pods from nodes if labels have changed. +func (k *k8e) onChangeNode(key string, node *core.Node) (*core.Node, error) { + if node == nil { + return nil, nil + } + if _, ok := node.Labels[daemonsetNodeLabel]; !ok { + return node, nil + } + + if err := k.updateDaemonSets(); err != nil { + return node, err + } + + return node, nil +} + +// runWorker dequeues Service changes from the work queue +// We run a lightweight work queue to handle service updates. We don't need the full overhead +// of a wrangler service controller and shared informer cache, but we do want to run changes +// through a keyed queue to reduce thrashing when pods are updated. Much of this is cribbed from +// https://github.com/rancher/lasso/blob/release/v2.5/pkg/controller/controller.go#L173-L215 +func (k *k8e) runWorker() { + for k.processNextWorkItem() { + } +} + +// processNextWorkItem does work for a single item in the queue, +// returning a boolean that indicates if the queue should continue +// to be serviced. +func (k *k8e) processNextWorkItem() bool { + obj, shutdown := k.workqueue.Get() + + if shutdown { + return false + } + + if err := k.processSingleItem(obj); err != nil && !apierrors.IsConflict(err) { + logrus.Errorf("%s: %v", controllerName, err) + } + return true +} + +// processSingleItem processes a single item from the work queue, +// requeueing it if the handler fails. +func (k *k8e) processSingleItem(obj interface{}) error { + var ( + key string + ok bool + ) + + defer k.workqueue.Done(obj) + + if key, ok = obj.(string); !ok { + logrus.Errorf("expected string in workqueue but got %#v", obj) + k.workqueue.Forget(obj) + return nil + } + keyParts := strings.SplitN(key, "/", 2) + if err := k.updateStatus(keyParts[0], keyParts[1]); err != nil { + k.workqueue.AddRateLimited(key) + return fmt.Errorf("error updating LoadBalancer Status for %s: %v, requeueing", key, err) + } + + k.workqueue.Forget(obj) + return nil + +} + +// updateServiceStatus updates the load balancer status for the matching service, if it exists and is a +// LoadBalancer service. The patchStatus function handles checking to see if status needs updating. +func (k *k8e) updateStatus(namespace, name string) error { + svc, err := k.client.CoreV1().Services(namespace).Get(context.TODO(), name, meta.GetOptions{}) + if err != nil { + if apierrors.IsNotFound(err) { + return nil + } + return err + } + + if svc.Spec.Type != core.ServiceTypeLoadBalancer { + return nil + } + + previousStatus := svc.Status.LoadBalancer.DeepCopy() + newStatus, err := k.getStatus(svc) + if err != nil { + return err + } + + return k.patchStatus(svc, previousStatus, newStatus) +} + +// getDaemonSet returns the DaemonSet that should exist for the Service. +func (k *k8e) getDaemonSet(svc *core.Service) (*apps.DaemonSet, error) { + return k.daemonsetCache.Get(k.LBNamespace, generateName(svc)) +} + +// getStatus returns a LoadBalancerStatus listing ingress IPs for all ready pods +// matching the selected service. +func (k *k8e) getStatus(svc *core.Service) (*core.LoadBalancerStatus, error) { + pods, err := k.podCache.List(k.LBNamespace, labels.SelectorFromSet(map[string]string{ + svcNameLabel: svc.Name, + svcNamespaceLabel: svc.Namespace, + })) + + if err != nil { + return nil, err + } + + expectedIPs, err := k.podIPs(pods, svc) + if err != nil { + return nil, err + } + + sort.Strings(expectedIPs) + + loadbalancer := &core.LoadBalancerStatus{} + for _, ip := range expectedIPs { + loadbalancer.Ingress = append(loadbalancer.Ingress, core.LoadBalancerIngress{ + IP: ip, + }) + } + + return loadbalancer, nil +} + +// patchStatus patches the service status. If the status has not changed, this function is a no-op. +func (k *k8e) patchStatus(svc *core.Service, previousStatus, newStatus *core.LoadBalancerStatus) error { + if servicehelper.LoadBalancerStatusEqual(previousStatus, newStatus) { + return nil + } + + updated := svc.DeepCopy() + updated.Status.LoadBalancer = *newStatus + _, err := servicehelper.PatchService(k.client.CoreV1(), svc, updated) + if err == nil { + if len(newStatus.Ingress) == 0 { + k.recorder.Event(svc, core.EventTypeWarning, "UnAvailableLoadBalancer", "There are no available nodes for LoadBalancer") + } else { + k.recorder.Eventf(svc, core.EventTypeNormal, "UpdatedLoadBalancer", "Updated LoadBalancer with new IPs: %v -> %v", ingressToString(previousStatus.Ingress), ingressToString(newStatus.Ingress)) + } + } + return err +} + +// podIPs returns a list of IPs for Nodes hosting ServiceLB Pods. +// If at least one node has External IPs available, only external IPs are returned. +// If no nodes have External IPs set, the Internal IPs of all nodes running pods are returned. +func (k *k8e) podIPs(pods []*core.Pod, svc *core.Service) ([]string, error) { + // Go doesn't have sets so we stuff things into a map of bools and then get lists of keys + // to determine the unique set of IPs in use by pods. + extIPs := map[string]bool{} + intIPs := map[string]bool{} + + for _, pod := range pods { + if pod.Spec.NodeName == "" || pod.Status.PodIP == "" { + continue + } + if !Ready.IsTrue(pod) { + continue + } + + node, err := k.nodeCache.Get(pod.Spec.NodeName) + if apierrors.IsNotFound(err) { + continue + } else if err != nil { + return nil, err + } + + for _, addr := range node.Status.Addresses { + if addr.Type == core.NodeExternalIP { + extIPs[addr.Address] = true + } else if addr.Type == core.NodeInternalIP { + intIPs[addr.Address] = true + } + } + } + + keys := func(addrs map[string]bool) (ips []string) { + for k := range addrs { + ips = append(ips, k) + } + return ips + } + + var ips []string + if len(extIPs) > 0 { + ips = keys(extIPs) + } else { + ips = keys(intIPs) + } + + ips, err := filterByIPFamily(ips, svc) + if err != nil { + return nil, err + } + + if len(ips) > 0 && k.Rootless { + return []string{"127.0.0.1"}, nil + } + + return ips, nil +} + +// filterByIPFamily filters ips based on dual-stack parameters of the service +func filterByIPFamily(ips []string, svc *core.Service) ([]string, error) { + var ipFamilyPolicy core.IPFamilyPolicyType + var ipv4Addresses []string + var ipv6Addresses []string + + for _, ip := range ips { + if utilsnet.IsIPv4String(ip) { + ipv4Addresses = append(ipv4Addresses, ip) + } + if utilsnet.IsIPv6String(ip) { + ipv6Addresses = append(ipv6Addresses, ip) + } + } + + if svc.Spec.IPFamilyPolicy != nil { + ipFamilyPolicy = *svc.Spec.IPFamilyPolicy + } + + switch ipFamilyPolicy { + case core.IPFamilyPolicySingleStack: + if svc.Spec.IPFamilies[0] == core.IPv4Protocol { + return ipv4Addresses, nil + } + if svc.Spec.IPFamilies[0] == core.IPv6Protocol { + return ipv6Addresses, nil + } + case core.IPFamilyPolicyPreferDualStack: + if svc.Spec.IPFamilies[0] == core.IPv4Protocol { + ipAddresses := append(ipv4Addresses, ipv6Addresses...) + return ipAddresses, nil + } + if svc.Spec.IPFamilies[0] == core.IPv6Protocol { + ipAddresses := append(ipv6Addresses, ipv4Addresses...) + return ipAddresses, nil + } + case core.IPFamilyPolicyRequireDualStack: + if (len(ipv4Addresses) == 0) || (len(ipv6Addresses) == 0) { + return nil, errors.New("one or more IP families did not have addresses available for service with ipFamilyPolicy=RequireDualStack") + } + if svc.Spec.IPFamilies[0] == core.IPv4Protocol { + ipAddresses := append(ipv4Addresses, ipv6Addresses...) + return ipAddresses, nil + } + if svc.Spec.IPFamilies[0] == core.IPv6Protocol { + ipAddresses := append(ipv6Addresses, ipv4Addresses...) + return ipAddresses, nil + } + } + + return nil, errors.New("unhandled ipFamilyPolicy") +} + +// deployDaemonSet ensures that there is a DaemonSet for the service. +func (k *k8e) deployDaemonSet(ctx context.Context, svc *core.Service) error { + ds, err := k.newDaemonSet(svc) + if err != nil { + return err + } + + defer k.recorder.Eventf(svc, core.EventTypeNormal, "AppliedDaemonSet", "Applied LoadBalancer DaemonSet %s/%s", ds.Namespace, ds.Name) + return k.processor.WithContext(ctx).WithOwner(svc).Apply(objectset.NewObjectSet(ds)) +} + +// deleteDaemonSet ensures that there are no DaemonSets for the given service. +func (k *k8e) deleteDaemonSet(ctx context.Context, svc *core.Service) error { + name := generateName(svc) + if err := k.client.AppsV1().DaemonSets(k.LBNamespace).Delete(ctx, name, meta.DeleteOptions{}); err != nil { + if apierrors.IsNotFound(err) { + return nil + } + return err + } + defer k.recorder.Eventf(svc, core.EventTypeNormal, "DeletedDaemonSet", "Deleted LoadBalancer DaemonSet %s/%s", k.LBNamespace, name) + return nil +} + +// newDaemonSet creates a DaemonSet to ensure that ServiceLB pods are run on +// each eligible node. +func (k *k8e) newDaemonSet(svc *core.Service) (*apps.DaemonSet, error) { + name := generateName(svc) + oneInt := intstr.FromInt(1) + + sourceRanges, err := servicehelper.GetLoadBalancerSourceRanges(svc) + if err != nil { + return nil, err + } + + ds := &apps.DaemonSet{ + ObjectMeta: meta.ObjectMeta{ + Name: name, + Namespace: k.LBNamespace, + Labels: map[string]string{ + nodeSelectorLabel: "false", + svcNameLabel: svc.Name, + svcNamespaceLabel: svc.Namespace, + }, + }, + TypeMeta: meta.TypeMeta{ + Kind: "DaemonSet", + APIVersion: "apps/v1", + }, + Spec: apps.DaemonSetSpec{ + Selector: &meta.LabelSelector{ + MatchLabels: map[string]string{ + "app": name, + }, + }, + Template: core.PodTemplateSpec{ + ObjectMeta: meta.ObjectMeta{ + Labels: map[string]string{ + "app": name, + svcNameLabel: svc.Name, + svcNamespaceLabel: svc.Namespace, + }, + }, + Spec: core.PodSpec{ + ServiceAccountName: "svclb", + AutomountServiceAccountToken: utilpointer.Bool(false), + }, + }, + UpdateStrategy: apps.DaemonSetUpdateStrategy{ + Type: apps.RollingUpdateDaemonSetStrategyType, + RollingUpdate: &apps.RollingUpdateDaemonSet{ + MaxUnavailable: &oneInt, + }, + }, + }, + } + + var sysctls []core.Sysctl + for _, ipFamily := range svc.Spec.IPFamilies { + switch ipFamily { + case core.IPv4Protocol: + sysctls = append(sysctls, core.Sysctl{Name: "net.ipv4.ip_forward", Value: "1"}) + case core.IPv6Protocol: + sysctls = append(sysctls, core.Sysctl{Name: "net.ipv6.conf.all.forwarding", Value: "1"}) + } + } + + ds.Spec.Template.Spec.SecurityContext = &core.PodSecurityContext{Sysctls: sysctls} + + for _, port := range svc.Spec.Ports { + portName := fmt.Sprintf("lb-%s-%d", strings.ToLower(string(port.Protocol)), port.Port) + container := core.Container{ + Name: portName, + Image: k.LBImage, + ImagePullPolicy: core.PullIfNotPresent, + Ports: []core.ContainerPort{ + { + Name: portName, + ContainerPort: port.Port, + HostPort: port.Port, + Protocol: port.Protocol, + }, + }, + Env: []core.EnvVar{ + { + Name: "SRC_PORT", + Value: strconv.Itoa(int(port.Port)), + }, + { + Name: "SRC_RANGES", + Value: strings.Join(sourceRanges.StringSlice(), " "), + }, + { + Name: "DEST_PROTO", + Value: string(port.Protocol), + }, + { + Name: "DEST_PORT", + Value: strconv.Itoa(int(port.Port)), + }, + { + Name: "DEST_IPS", + Value: strings.Join(svc.Spec.ClusterIPs, " "), + }, + }, + SecurityContext: &core.SecurityContext{ + Capabilities: &core.Capabilities{ + Add: []core.Capability{ + "NET_ADMIN", + }, + }, + }, + } + + ds.Spec.Template.Spec.Containers = append(ds.Spec.Template.Spec.Containers, container) + } + + // Add toleration to noderole.kubernetes.io/master=*:NoSchedule + masterToleration := core.Toleration{ + Key: "node-role.kubernetes.io/master", + Operator: "Exists", + Effect: "NoSchedule", + } + ds.Spec.Template.Spec.Tolerations = append(ds.Spec.Template.Spec.Tolerations, masterToleration) + + // Add toleration to noderole.kubernetes.io/control-plane=*:NoSchedule + controlPlaneToleration := core.Toleration{ + Key: "node-role.kubernetes.io/control-plane", + Operator: "Exists", + Effect: "NoSchedule", + } + ds.Spec.Template.Spec.Tolerations = append(ds.Spec.Template.Spec.Tolerations, controlPlaneToleration) + + // Add toleration to CriticalAddonsOnly + criticalAddonsOnlyToleration := core.Toleration{ + Key: "CriticalAddonsOnly", + Operator: "Exists", + } + ds.Spec.Template.Spec.Tolerations = append(ds.Spec.Template.Spec.Tolerations, criticalAddonsOnlyToleration) + + // Add node selector only if label "svccontroller.k3s.cattle.io/enablelb" exists on the nodes + enableNodeSelector, err := k.nodeHasDaemonSetLabel() + if err != nil { + return nil, err + } + if enableNodeSelector { + ds.Spec.Template.Spec.NodeSelector = map[string]string{ + daemonsetNodeLabel: "true", + } + // Add node selector for "svccontroller.k3s.cattle.io/lbpool=" if service has lbpool label + if svc.Labels[daemonsetNodePoolLabel] != "" { + ds.Spec.Template.Spec.NodeSelector[daemonsetNodePoolLabel] = svc.Labels[daemonsetNodePoolLabel] + } + ds.Labels[nodeSelectorLabel] = "true" + } + return ds, nil +} + +// updateDaemonSets ensures that our DaemonSets have a NodeSelector present if one is enabled, +// and do not have one if it is not. Nodes are checked for this label when the DaemonSet is generated, +// but node labels may change between Service updates and the NodeSelector needs to be updated appropriately. +func (k *k8e) updateDaemonSets() error { + enableNodeSelector, err := k.nodeHasDaemonSetLabel() + if err != nil { + return err + } + + nodeSelector := labels.SelectorFromSet(map[string]string{nodeSelectorLabel: fmt.Sprintf("%t", !enableNodeSelector)}) + daemonsets, err := k.daemonsetCache.List(k.LBNamespace, nodeSelector) + if err != nil { + return err + } + + for _, ds := range daemonsets { + ds.Labels[nodeSelectorLabel] = fmt.Sprintf("%t", enableNodeSelector) + ds.Spec.Template.Spec.NodeSelector = map[string]string{} + if enableNodeSelector { + ds.Spec.Template.Spec.NodeSelector[daemonsetNodeLabel] = "true" + } + if _, err := k.client.AppsV1().DaemonSets(ds.Namespace).Update(context.TODO(), ds, meta.UpdateOptions{}); err != nil { + return err + } + } + return nil +} + +// nodeHasDaemonSetLabel returns true if any node is labeled for inclusion or exclusion +// from use by ServiceLB. If any node is labeled, only nodes with a label value of "true" +// will be used. +func (k *k8e) nodeHasDaemonSetLabel() (bool, error) { + selector, err := labels.Parse(daemonsetNodeLabel) + if err != nil { + return false, err + } + nodesWithLabel, err := k.nodeCache.List(selector) + return len(nodesWithLabel) > 0, err +} + +// deleteAllDaemonsets deletes all daemonsets created by this controller +func (k *k8e) deleteAllDaemonsets(ctx context.Context) error { + return k.client.AppsV1().DaemonSets(k.LBNamespace).DeleteCollection(ctx, meta.DeleteOptions{}, meta.ListOptions{LabelSelector: nodeSelectorLabel}) +} + +// removeServiceFinalizers ensures that there are no finalizers left on any services. +// Previous implementations of the servicelb controller manually added finalizers to services it managed; +// these need to be removed in order to release ownership to the cloud provider implementation. +func (k *k8e) removeServiceFinalizers(ctx context.Context) error { + services, err := k.client.CoreV1().Services(meta.NamespaceAll).List(ctx, meta.ListOptions{}) + if err != nil { + return err + } + + var errs merr.Errors + for _, svc := range services.Items { + if err := retry.RetryOnConflict(retry.DefaultRetry, func() error { + s, err := k.removeFinalizer(ctx, &svc) + svc = *s + return err + }); err != nil { + errs = append(errs, err) + } + } + + if len(errs) > 0 { + return errs + } + return nil +} + +// removeFinalizer ensures that there is not a finalizer for this controller on the Service +func (k *k8e) removeFinalizer(ctx context.Context, svc *core.Service) (*core.Service, error) { + var found bool + for k, v := range svc.Finalizers { + if v != finalizerName { + continue + } + found = true + svc.Finalizers = append(svc.Finalizers[:k], svc.Finalizers[k+1:]...) + } + + if found { + return k.client.CoreV1().Services(svc.Namespace).Update(ctx, svc, meta.UpdateOptions{}) + } + return svc, nil +} + +// generateName generates a distinct name for the DaemonSet based on the service name and UID +func generateName(svc *core.Service) string { + return fmt.Sprintf("svclb-%s-%s", svc.Name, svc.UID[:8]) +} + +// ingressToString converts a list of LoadBalancerIngress entries to strings +func ingressToString(ingresses []core.LoadBalancerIngress) []string { + parts := make([]string, len(ingresses)) + for i, ingress := range ingresses { + if ingress.IP != "" { + parts[i] = ingress.IP + } else { + parts[i] = ingress.Hostname + } + } + return parts +} diff --git a/pkg/cluster/https.go b/pkg/cluster/https.go index 72f83ee96..502f0d011 100644 --- a/pkg/cluster/https.go +++ b/pkg/cluster/https.go @@ -5,7 +5,7 @@ import ( "crypto/tls" "errors" "fmt" - "io/ioutil" + "io" "log" "net" "net/http" @@ -115,7 +115,7 @@ func (c *Cluster) initClusterAndHTTPS(ctx context.Context) error { if logrus.IsLevelEnabled(logrus.DebugLevel) { server.ErrorLog = log.New(logrus.StandardLogger().Writer(), "Cluster-Http-Server ", log.LstdFlags) } else { - server.ErrorLog = log.New(ioutil.Discard, "Cluster-Http-Server", 0) + server.ErrorLog = log.New(io.Discard, "Cluster-Http-Server", 0) } // Start the supervisor http server on the tls listener diff --git a/pkg/cluster/storage.go b/pkg/cluster/storage.go index a56f909f9..ee8afb050 100644 --- a/pkg/cluster/storage.go +++ b/pkg/cluster/storage.go @@ -4,7 +4,6 @@ import ( "bytes" "context" "errors" - "io/ioutil" "os" "path/filepath" "strings" @@ -183,7 +182,7 @@ func getBootstrapKeyFromStorage(ctx context.Context, storageClient client.Client func readTokenFromFile(serverToken, certs, dataDir string) (string, error) { tokenFile := filepath.Join(dataDir, "token") - b, err := ioutil.ReadFile(tokenFile) + b, err := os.ReadFile(tokenFile) if err != nil { if os.IsNotExist(err) { token, err := clientaccess.FormatToken(serverToken, certs) diff --git a/pkg/configfilearg/parser.go b/pkg/configfilearg/parser.go index 7fa18eb11..3848c26f2 100644 --- a/pkg/configfilearg/parser.go +++ b/pkg/configfilearg/parser.go @@ -2,7 +2,7 @@ package configfilearg import ( "fmt" - "io/ioutil" + "io" "net/http" "net/url" "os" @@ -183,7 +183,7 @@ func (p *Parser) findStart(args []string) ([]string, []string, bool) { } func dotDFiles(basefile string) (result []string, _ error) { - files, err := ioutil.ReadDir(basefile + ".d") + files, err := os.ReadDir(basefile + ".d") if os.IsNotExist(err) { return nil, nil } else if err != nil { @@ -295,8 +295,8 @@ func readConfigFileData(file string) ([]byte, error) { return nil, fmt.Errorf("failed to read http config %s: %w", file, err) } defer resp.Body.Close() - return ioutil.ReadAll(resp.Body) + return io.ReadAll(resp.Body) default: - return ioutil.ReadFile(file) + return os.ReadFile(file) } } diff --git a/pkg/daemons/agent/agent_linux.go b/pkg/daemons/agent/agent_linux.go index 141c9b3d3..6f6df67aa 100644 --- a/pkg/daemons/agent/agent_linux.go +++ b/pkg/daemons/agent/agent_linux.go @@ -173,5 +173,9 @@ func kubeletArgs(cfg *config.Agent) map[string]string { argsMap["protect-kernel-defaults"] = "true" } + if !cfg.DisableServiceLB { + argsMap["allowed-unsafe-sysctls"] = "net.ipv4.ip_forward,net.ipv6.conf.all.forwarding" + } + return argsMap } diff --git a/pkg/daemons/config/types.go b/pkg/daemons/config/types.go index bc3cc93c8..1c898006d 100644 --- a/pkg/daemons/config/types.go +++ b/pkg/daemons/config/types.go @@ -26,16 +26,8 @@ const ( EgressSelectorModePod = "pod" CertificateRenewDays = 90 StreamServerPort = "10010" - KubeletPort = "10250" ) -// These ports can always be accessed via the tunnel server, at the loopback address. -// Other addresses and ports are only accessible via the tunnel on newer agents, when used by a pod. -var KubeletReservedPorts = map[string]bool{ - StreamServerPort: true, - KubeletPort: true, -} - type Node struct { Docker bool ContainerRuntimeEndpoint string @@ -113,6 +105,7 @@ type Agent struct { DisableCCM bool Rootless bool ProtectKernelDefaults bool + DisableServiceLB bool EnableIPv4 bool EnableIPv6 bool } @@ -156,6 +149,9 @@ type Control struct { DisableETCD bool DisableKubeProxy bool DisableScheduler bool + DisableServiceLB bool + Rootless bool + ServiceLBNamespace string EnablePProf bool ExtraAPIArgs []string ExtraControllerArgs []string @@ -286,7 +282,8 @@ type ControlRuntime struct { Tunnel http.Handler Authenticator authenticator.Request - EgressSelectorConfig string + EgressSelectorConfig string + CloudControllerConfig string ClientAuthProxyCert string ClientAuthProxyKey string diff --git a/pkg/daemons/control/deps/deps.go b/pkg/daemons/control/deps/deps.go index 1b774ce2a..aa35a11fd 100644 --- a/pkg/daemons/control/deps/deps.go +++ b/pkg/daemons/control/deps/deps.go @@ -10,7 +10,6 @@ import ( "encoding/json" "errors" "fmt" - "io/ioutil" "net" "os" "path/filepath" @@ -21,6 +20,7 @@ import ( certutil "github.com/rancher/dynamiclistener/cert" "github.com/sirupsen/logrus" "github.com/xiaods/k8e/pkg/clientaccess" + "github.com/xiaods/k8e/pkg/cloudprovider" "github.com/xiaods/k8e/pkg/daemons/config" "github.com/xiaods/k8e/pkg/passwd" "github.com/xiaods/k8e/pkg/token" @@ -139,6 +139,7 @@ func CreateRuntimeCertFiles(config *config.Control) { runtime.ServingKubeletKey = filepath.Join(config.DataDir, "tls", "serving-kubelet.key") runtime.EgressSelectorConfig = filepath.Join(config.DataDir, "etc", "egress-selector-config.yaml") + runtime.CloudControllerConfig = filepath.Join(config.DataDir, "etc", "cloud-config.yaml") runtime.ClientAuthProxyCert = filepath.Join(config.DataDir, "tls", "client-auth-proxy.crt") runtime.ClientAuthProxyKey = filepath.Join(config.DataDir, "tls", "client-auth-proxy.key") @@ -188,6 +189,10 @@ func GenServerDeps(config *config.Control) error { return err } + if err := genCloudConfig(config); err != nil { + return err + } + return readTokens(runtime) } @@ -249,7 +254,7 @@ func genUsers(config *config.Control) error { func genEncryptedNetworkInfo(controlConfig *config.Control) error { runtime := controlConfig.Runtime if s, err := os.Stat(runtime.IPSECKey); err == nil && s.Size() > 0 { - psk, err := ioutil.ReadFile(runtime.IPSECKey) + psk, err := os.ReadFile(runtime.IPSECKey) if err != nil { return err } @@ -263,7 +268,7 @@ func genEncryptedNetworkInfo(controlConfig *config.Control) error { } controlConfig.IPSECPSK = psk - return ioutil.WriteFile(runtime.IPSECKey, []byte(psk+"\n"), 0600) + return os.WriteFile(runtime.IPSECKey, []byte(psk+"\n"), 0600) } func getServerPass(passwd *passwd.Passwd, config *config.Control) (string, error) { @@ -668,13 +673,13 @@ func genEncryptionConfigAndState(controlConfig *config.Control) error { if s, err := os.Stat(runtime.EncryptionConfig); err == nil && s.Size() > 0 { // On upgrade from older versions, the encryption hash may not exist, create it if _, err := os.Stat(runtime.EncryptionHash); errors.Is(err, os.ErrNotExist) { - curEncryptionByte, err := ioutil.ReadFile(runtime.EncryptionConfig) + curEncryptionByte, err := os.ReadFile(runtime.EncryptionConfig) if err != nil { return err } encryptionConfigHash := sha256.Sum256(curEncryptionByte) ann := "start-" + hex.EncodeToString(encryptionConfigHash[:]) - return ioutil.WriteFile(controlConfig.Runtime.EncryptionHash, []byte(ann), 0600) + return os.WriteFile(controlConfig.Runtime.EncryptionHash, []byte(ann), 0600) } return nil } @@ -716,12 +721,12 @@ func genEncryptionConfigAndState(controlConfig *config.Control) error { if err != nil { return err } - if err := ioutil.WriteFile(runtime.EncryptionConfig, b, 0600); err != nil { + if err := os.WriteFile(runtime.EncryptionConfig, b, 0600); err != nil { return err } encryptionConfigHash := sha256.Sum256(b) ann := "start-" + hex.EncodeToString(encryptionConfigHash[:]) - return ioutil.WriteFile(controlConfig.Runtime.EncryptionHash, []byte(ann), 0600) + return os.WriteFile(controlConfig.Runtime.EncryptionHash, []byte(ann), 0600) } func genEgressSelectorConfig(controlConfig *config.Control) error { @@ -764,5 +769,24 @@ func genEgressSelectorConfig(controlConfig *config.Control) error { if err != nil { return err } - return ioutil.WriteFile(controlConfig.Runtime.EgressSelectorConfig, b, 0600) + return os.WriteFile(controlConfig.Runtime.EgressSelectorConfig, b, 0600) +} + +func genCloudConfig(controlConfig *config.Control) error { + cloudConfig := cloudprovider.Config{ + LBEnabled: !controlConfig.DisableServiceLB, + LBNamespace: controlConfig.ServiceLBNamespace, + LBImage: cloudprovider.DefaultLBImage, + Rootless: controlConfig.Rootless, + NodeEnabled: !controlConfig.DisableCCM, + } + if controlConfig.SystemDefaultRegistry != "" { + cloudConfig.LBImage = controlConfig.SystemDefaultRegistry + "/" + cloudConfig.LBImage + } + b, err := json.Marshal(cloudConfig) + if err != nil { + return err + } + return os.WriteFile(controlConfig.Runtime.CloudControllerConfig, b, 0600) + } diff --git a/pkg/daemons/control/proxy/proxy.go b/pkg/daemons/control/proxy/proxy.go index 426d3e81d..455534302 100644 --- a/pkg/daemons/control/proxy/proxy.go +++ b/pkg/daemons/control/proxy/proxy.go @@ -2,7 +2,6 @@ package proxy import ( "io" - "net" "github.com/pkg/errors" "github.com/sirupsen/logrus" @@ -14,7 +13,7 @@ type proxy struct { errc chan error } -func Proxy(lconn, rconn net.Conn) error { +func Proxy(lconn, rconn io.ReadWriteCloser) error { p := &proxy{ lconn: lconn, rconn: rconn, diff --git a/pkg/daemons/control/server.go b/pkg/daemons/control/server.go index c33da0c1a..8319d1aca 100644 --- a/pkg/daemons/control/server.go +++ b/pkg/daemons/control/server.go @@ -81,7 +81,7 @@ func Server(ctx context.Context, cfg *config.Control) error { } } - if !cfg.DisableCCM { + if !cfg.DisableCCM || !cfg.DisableServiceLB { if err := cloudControllerManager(ctx, cfg); err != nil { return err } @@ -301,9 +301,12 @@ func cloudControllerManager(ctx context.Context, cfg *config.Control) error { argsMap := map[string]string{ "profiling": "false", "allocate-node-cidrs": "true", + "leader-elect-resource-name": version.Program + "-cloud-controller-manager", "cloud-provider": version.Program, + "cloud-config": runtime.CloudControllerConfig, "cluster-cidr": util.JoinIPNets(cfg.ClusterIPRanges), "configure-cloud-routes": "false", + "controllers": "*,-route", "kubeconfig": runtime.KubeConfigCloudController, "authorization-kubeconfig": runtime.KubeConfigCloudController, "authentication-kubeconfig": runtime.KubeConfigCloudController, @@ -313,6 +316,12 @@ func cloudControllerManager(ctx context.Context, cfg *config.Control) error { if cfg.NoLeaderElect { argsMap["leader-elect"] = "false" } + if cfg.DisableCCM { + argsMap["controllers"] = argsMap["controllers"] + ",-cloud-node,-cloud-node-lifecycle" + } + if cfg.DisableServiceLB { + argsMap["controllers"] = argsMap["controllers"] + ",-service" + } args := config.GetArgs(argsMap, cfg.ExtraCloudControllerArgs) logrus.Infof("Running cloud-controller-manager %s", config.ArgString(args)) @@ -371,9 +380,9 @@ func checkForCloudControllerPrivileges(ctx context.Context, runtime *config.Cont User: version.Program + "-cloud-controller-manager", ResourceAttributes: &authorizationv1.ResourceAttributes{ Namespace: metav1.NamespaceSystem, - Verb: "get", - Resource: "configmaps", - Name: "extension-apiserver-authentication", + Verb: "*", + Resource: "daemonsets", + Group: "apps", }, }, } diff --git a/pkg/daemons/control/tunnel.go b/pkg/daemons/control/tunnel.go index a2eba978c..d7cae80d2 100644 --- a/pkg/daemons/control/tunnel.go +++ b/pkg/daemons/control/tunnel.go @@ -1,10 +1,13 @@ package control import ( + "bufio" "context" "fmt" + "io" "net" "net/http" + "strconv" "strings" "sync" "time" @@ -75,15 +78,21 @@ type TunnelServer struct { var _ cidranger.RangerEntry = &tunnelEntry{} type tunnelEntry struct { - cidr net.IPNet - nodeName string - node bool + kubeletPort string + nodeName string + cidr net.IPNet } func (n *tunnelEntry) Network() net.IPNet { return n.cidr } +// Some ports can always be accessed via the tunnel server, at the loopback address. +// Other addresses and ports are only accessible via the tunnel on newer agents, when used by a pod. +func (n *tunnelEntry) IsReservedPort(port string) bool { + return n.kubeletPort != "" && (port == n.kubeletPort || port == config.StreamServerPort) +} + // ServeHTTP handles either CONNECT requests, or websocket requests to the remotedialer server func (t *TunnelServer) ServeHTTP(resp http.ResponseWriter, req *http.Request) { logrus.Debugf("Tunnel server handing %s %s request for %s from %s", req.Proto, req.Method, req.URL, req.RemoteAddr) @@ -132,7 +141,8 @@ func (t *TunnelServer) onChangeNode(nodeName string, node *v1.Node) (*v1.Node, e t.cidrs.Remove(*n) } else { logrus.Debugf("Tunnel server egress proxy updating Node %s IP %v", nodeName, n) - t.cidrs.Insert(&tunnelEntry{cidr: *n, nodeName: nodeName, node: true}) + kubeletPort := strconv.FormatInt(int64(node.Status.DaemonEndpoints.KubeletEndpoint.Port), 10) + t.cidrs.Insert(&tunnelEntry{cidr: *n, nodeName: nodeName, kubeletPort: kubeletPort}) } } } @@ -188,7 +198,7 @@ func (t *TunnelServer) serveConnect(resp http.ResponseWriter, req *http.Request) } resp.WriteHeader(http.StatusOK) - rconn, _, err := hijacker.Hijack() + rconn, bufrw, err := hijacker.Hijack() if err != nil { responsewriters.ErrorNegotiated( apierrors.NewInternalError(err), @@ -197,7 +207,7 @@ func (t *TunnelServer) serveConnect(resp http.ResponseWriter, req *http.Request) return } - proxy.Proxy(rconn, bconn) + proxy.Proxy(newConnReadWriteCloser(rconn, bufrw), bconn) } // dialBackend determines where to route the connection request to, and returns @@ -220,7 +230,7 @@ func (t *TunnelServer) dialBackend(ctx context.Context, addr string) (net.Conn, if nets, err := t.cidrs.ContainingNetworks(ip); err == nil && len(nets) > 0 { if n, ok := nets[0].(*tunnelEntry); ok { nodeName = n.nodeName - if n.node && config.KubeletReservedPorts[port] { + if n.IsReservedPort(port) { toKubelet = true useTunnel = true } else { @@ -270,3 +280,32 @@ func (t *TunnelServer) dialBackend(ctx context.Context, addr string) (net.Conn, logrus.Debugf("Tunnel server egress proxy dialing %s directly", addr) return defaultDialer.DialContext(ctx, "tcp", addr) } + +// connReadWriteCloser bundles a net.Conn and a wrapping bufio.ReadWriter together into a type that +// meets the ReadWriteCloser interface. The http.Hijacker interface returns such a pair, and reads +// need to go through the buffered reader (because the http handler may have already read from the +// underlying connection), but writes and closes need to hit the connection directly. +type connReadWriteCloser struct { + conn net.Conn + once sync.Once + rw *bufio.ReadWriter +} + +var _ io.ReadWriteCloser = &connReadWriteCloser{} + +func newConnReadWriteCloser(conn net.Conn, rw *bufio.ReadWriter) *connReadWriteCloser { + return &connReadWriteCloser{conn: conn, rw: rw} +} + +func (crw *connReadWriteCloser) Read(p []byte) (n int, err error) { + return crw.rw.Read(p) +} + +func (crw *connReadWriteCloser) Write(b []byte) (n int, err error) { + return crw.conn.Write(b) +} + +func (crw *connReadWriteCloser) Close() (err error) { + crw.once.Do(func() { err = crw.conn.Close() }) + return +} diff --git a/pkg/daemons/executor/embed.go b/pkg/daemons/executor/embed.go index 7cd3cb894..8e31c1cf7 100644 --- a/pkg/daemons/executor/embed.go +++ b/pkg/daemons/executor/embed.go @@ -18,7 +18,6 @@ import ( v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" k8sruntime "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/watch" "k8s.io/apiserver/pkg/authentication/authenticator" typedcorev1 "k8s.io/client-go/kubernetes/typed/core/v1" @@ -164,7 +163,7 @@ func (*Embedded) CloudControllerManager(ctx context.Context, ccmRBACReady <-chan } cloudInitializer := func(config *cloudcontrollerconfig.CompletedConfig) cloudprovider.Interface { - cloud, err := ccm.InitCloudProvider(version.Program, "") + cloud, err := ccm.InitCloudProvider(version.Program, config.ComponentConfig.KubeCloudShared.CloudProvider.CloudConfigFile) if err != nil { logrus.Fatalf("Cloud provider could not be initialized: %v", err) } @@ -172,19 +171,10 @@ func (*Embedded) CloudControllerManager(ctx context.Context, ccmRBACReady <-chan logrus.Fatalf("Cloud provider is nil") } - cloud.Initialize(config.ClientBuilder, make(chan struct{})) - if informerUserCloud, ok := cloud.(ccm.InformerUser); ok { - informerUserCloud.SetInformers(config.SharedInformers) - } - return cloud } - controllerInitializers := ccmapp.DefaultInitFuncConstructors - delete(controllerInitializers, "service") - delete(controllerInitializers, "route") - - command := ccmapp.NewCloudControllerManagerCommand(ccmOptions, cloudInitializer, controllerInitializers, cliflag.NamedFlagSets{}, wait.NeverStop) + command := ccmapp.NewCloudControllerManagerCommand(ccmOptions, cloudInitializer, ccmapp.DefaultInitFuncConstructors, cliflag.NamedFlagSets{}, ctx.Done()) command.SetArgs(args) go func() { diff --git a/pkg/daemons/executor/etcd.go b/pkg/daemons/executor/etcd.go index 7fe47389b..a65c2dec7 100644 --- a/pkg/daemons/executor/etcd.go +++ b/pkg/daemons/executor/etcd.go @@ -3,7 +3,7 @@ package executor import ( "context" "errors" - "io/ioutil" + "os" "path/filepath" "github.com/sirupsen/logrus" @@ -37,7 +37,7 @@ func (e *Embedded) ETCD(ctx context.Context, args ETCDConfig, extraArgs []string case err := <-etcd.Server.ErrNotify(): if errors.Is(err, rafthttp.ErrMemberRemoved) { tombstoneFile := filepath.Join(args.DataDir, "tombstone") - if err := ioutil.WriteFile(tombstoneFile, []byte{}, 0600); err != nil { + if err := os.WriteFile(tombstoneFile, []byte{}, 0600); err != nil { logrus.Fatalf("failed to write tombstone file to %s", tombstoneFile) } logrus.Infof("this node has been removed from the cluster please restart %s to rejoin the cluster", version.Program) diff --git a/pkg/daemons/executor/executor.go b/pkg/daemons/executor/executor.go index 530ff86f5..cb6cb6f4c 100644 --- a/pkg/daemons/executor/executor.go +++ b/pkg/daemons/executor/executor.go @@ -2,7 +2,6 @@ package executor import ( "context" - "io/ioutil" "net/http" "os" "path/filepath" @@ -123,7 +122,7 @@ func (e ETCDConfig) ToConfigFile(extraArgs []string) (string, error) { if err := os.MkdirAll(e.DataDir, 0700); err != nil { return "", err } - return confFile, ioutil.WriteFile(confFile, bytes, 0600) + return confFile, os.WriteFile(confFile, bytes, 0600) } func Set(driver Executor) { diff --git a/pkg/deploy/controller.go b/pkg/deploy/controller.go index b18bacc47..b40d8f663 100644 --- a/pkg/deploy/controller.go +++ b/pkg/deploy/controller.go @@ -7,7 +7,6 @@ import ( "crypto/sha256" "encoding/hex" "io" - "io/ioutil" "os" "path/filepath" "sort" @@ -176,7 +175,7 @@ func (w *watcher) deploy(path string, compareChecksum bool) error { addon = *newAddon } - content, err := ioutil.ReadFile(path) + content, err := os.ReadFile(path) if err != nil { w.recorder.Eventf(&addon, corev1.EventTypeWarning, "ReadManifestFailed", "Read manifest at %q failed: %v", path, err) return err @@ -224,7 +223,7 @@ func (w *watcher) delete(path string) error { return err } - content, err := ioutil.ReadFile(path) + content, err := os.ReadFile(path) if err != nil { w.recorder.Eventf(&addon, corev1.EventTypeWarning, "ReadManifestFailed", "Read manifest at %q failed: %v", path, err) } else { diff --git a/pkg/deploy/stage.go b/pkg/deploy/stage.go index 4101ca2fd..b4be10cc2 100644 --- a/pkg/deploy/stage.go +++ b/pkg/deploy/stage.go @@ -4,7 +4,6 @@ package deploy import ( "bytes" - "io/ioutil" "os" "path/filepath" "strings" @@ -38,7 +37,7 @@ staging: p := filepath.Join(dataDir, name) os.MkdirAll(filepath.Dir(p), 0700) logrus.Info("Writing manifest: ", p) - if err := ioutil.WriteFile(p, content, 0600); err != nil { + if err := os.WriteFile(p, content, 0600); err != nil { return errors.Wrapf(err, "failed to write to %s", name) } } diff --git a/pkg/deploy/zz_generated_bindata.go b/pkg/deploy/zz_generated_bindata.go index 71cb79d1a..793fb5cdd 100644 --- a/pkg/deploy/zz_generated_bindata.go +++ b/pkg/deploy/zz_generated_bindata.go @@ -111,7 +111,7 @@ func Ds_store() (*asset, error) { return a, nil } -var _ccmYaml = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xcc\x94\x41\x6f\x13\x31\x10\x85\xef\xfe\x15\x56\x2f\x95\x90\x9c\x8a\x5b\xb5\x47\x38\x70\xaf\x04\xf7\x89\xfd\x48\x4d\xbc\x1e\xcb\x33\xde\x02\xbf\x1e\x39\x5b\x50\xd9\x25\x51\x12\x40\x70\xb3\xa2\x99\xef\xbd\x79\x93\x59\x2a\xf1\x03\xaa\x44\xce\x83\xad\x5b\xf2\x1b\x6a\xfa\xc8\x35\x7e\x25\x8d\x9c\x37\xfb\x7b\xd9\x44\xbe\x9b\x5e\x9b\x7d\xcc\x61\xb0\x6f\x53\x13\x45\x7d\xe0\x04\x33\x42\x29\x90\xd2\x60\xac\xcd\x34\x62\xb0\xfb\x7b\x38\x9f\xb8\x05\xe7\x39\x6b\xe5\x94\x50\xdd\x48\x99\x76\xa8\xa6\xb6\x04\x19\x8c\xb3\x54\xe2\xbb\xca\xad\x48\x6f\x74\xd6\x33\xd7\x10\xf3\x4b\x3d\x63\x6d\x85\x70\xab\x1e\xcf\x45\x09\x24\x10\x63\xed\x84\xba\x7d\xfe\x6d\x07\x9d\x01\x15\xa4\x38\x3c\x5b\x09\xfd\xb9\xd2\xb8\xb9\x59\x23\x31\x21\xeb\x02\xf9\x02\x55\x48\xfd\xe3\xc5\xd0\xcc\x61\x69\xf3\xf6\xd5\xed\x05\xbd\x77\xa2\xa4\x6d\x81\x98\xbd\x9c\x05\x11\xd4\x29\xfa\xa5\x87\x14\x45\x7f\x3d\x55\x7f\x3e\x5d\x8c\x27\xef\xb9\x1d\x4b\xef\x2c\x50\xe9\x7f\x3a\x51\x64\x9d\x38\xb5\xf1\xd8\x6e\x7f\x18\xbf\xce\x2e\x72\x28\x1c\x4f\xad\x79\x25\xf4\xb4\xda\xbb\x73\xe6\xfa\x2b\x79\x13\x73\x88\x79\x77\xf1\xb1\x70\xc2\x03\x3e\xf6\xea\xef\x63\x9e\x50\x36\xd6\xae\xcf\xf3\x2c\x1d\x69\xdb\x4f\xf0\x7a\xb8\xcb\x19\xf1\x5e\x50\xcf\xeb\x9d\x8b\xa4\x90\xef\x95\x6d\x0b\x27\x5f\x44\x31\xfe\x93\xc4\x5c\xe7\xbb\x80\x84\x1d\x29\xff\xd1\x00\xe7\xa9\x86\x85\xc0\xff\x92\xdc\x6f\x46\x86\xac\xd1\x1f\xc8\xae\x82\xc2\x29\x73\x57\x46\xfa\x53\x96\xf8\xac\xc8\x7d\x36\x47\x25\xf6\x8f\xc9\x51\x1b\x7f\x23\xdf\x6f\x01\x00\x00\xff\xff\xb7\xfc\x51\x87\xed\x06\x00\x00") +var _ccmYaml = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xcc\x94\x4f\x8f\x13\x31\x0c\xc5\xef\xf9\x14\x51\x8f\x48\xe9\x8a\xdb\x6a\x8e\x70\xe0\xbe\x12\xdc\xdd\xe4\xd1\x0d\xcd\xc4\x51\xec\x94\x3f\x9f\x1e\xcd\x4c\x57\x0c\x1d\xb5\xea\x14\x10\x7b\xb3\xac\xf8\xe7\xe7\x67\x39\x54\xe2\x27\x54\x89\x9c\x3b\x5b\x77\xe4\xb7\xd4\xf4\x99\x6b\xfc\x41\x1a\x39\x6f\x0f\x8f\xb2\x8d\xfc\x70\x7c\x6b\x0e\x31\x87\xce\xbe\x4f\x4d\x14\xf5\x89\x13\x4c\x0f\xa5\x40\x4a\x9d\xb1\x36\x53\x8f\xce\x1e\x1e\xe1\x7c\xe2\x16\x9c\xe7\xac\x95\x53\x42\x75\x3d\x65\xda\xa3\x9a\xda\x12\xa4\x33\xce\x52\x89\x1f\x2a\xb7\x22\x43\xa1\xb3\x9e\xb9\x86\x98\xe7\xfd\x8c\xb5\x15\xc2\xad\x7a\x9c\x1e\x25\x90\x40\x8c\xb5\x47\xd4\xdd\x29\xb7\x87\x4e\x80\x0a\x52\x8c\x61\x2b\x61\x08\x17\x3d\x36\x9b\x25\x12\x47\x64\x3d\x43\xce\x50\x85\xd4\x3f\xaf\x86\x66\x0e\xe7\x32\x37\x6f\x36\x2b\x6a\x1f\x44\x49\x9b\x8c\x09\x41\x3d\x46\x3f\xcf\xcd\xb0\x93\xbe\x9b\xc0\x2f\x9c\xa9\x8e\xc3\x05\x1f\x53\x94\x29\xf8\x7a\x17\x7a\xa1\x6d\xad\x77\x27\x16\x79\xcf\xed\xd2\x66\x6e\x33\x92\x7a\x48\xa1\x85\xac\xd9\x76\x87\x99\x17\x2c\x2a\x45\x96\xb4\x40\xe8\x39\x0b\xce\x15\x8d\x7b\x75\xce\xdc\x7f\x41\xef\x62\x0e\x31\xef\x57\x1f\x12\x27\x3c\xe1\xf3\xf0\xfa\x65\x80\x2b\x9d\x8d\xb5\xcb\xd3\xbd\xa9\x8f\xb4\xdd\x17\x78\x1d\x6f\x76\x42\x7c\x14\xd4\xdb\x6a\xed\xaf\x25\x74\xf6\xd0\x76\x70\xf2\x5d\x14\xfd\x7f\x71\xcc\x0d\x7c\x17\x90\xb0\x27\xe5\xbf\x6a\xe0\x34\x55\x77\xd6\xe0\xb5\x38\xf7\x87\x96\x21\x6b\xf4\x23\xd9\x55\x50\xb8\x26\xee\x4e\x4b\x7f\xf3\x12\xdf\x14\x79\x98\xcd\x51\x89\xc3\x67\x70\x51\xc6\xbf\xf0\xf7\x67\x00\x00\x00\xff\xff\x80\x6c\x3c\x5c\x09\x07\x00\x00") func ccmYamlBytes() ([]byte, error) { return bindataRead( @@ -251,7 +251,7 @@ func metricsServerMetricsApiserviceYaml() (*asset, error) { return a, nil } -var _metricsServerMetricsServerDeploymentYaml = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xb4\x55\xdf\x6f\x1b\x37\x0c\x7e\xf7\x5f\x41\x78\xe8\xdb\x2e\x76\xba\x76\x1b\x04\xe4\x21\x48\xdc\x36\x40\x93\x19\xb1\x3b\xa0\x4f\x85\xa2\xa3\x63\x21\x92\xa8\x91\x3c\x37\xb7\xa2\xff\xfb\x20\x5f\x7a\xb9\x4b\x93\xa2\xc3\xd6\x7b\x3a\xf0\xe3\x8f\x8f\x1f\x25\xaa\xaa\xaa\x89\xcd\xfe\x4f\x64\xf1\x94\x0c\xec\x0e\x27\x37\x3e\xd5\x06\x56\xc8\x3b\xef\xf0\xd8\x39\x6a\x92\x4e\x22\xaa\xad\xad\x5a\x33\x01\x48\x36\xa2\x81\x88\xca\xde\x49\x25\xc8\x3b\xe4\x3b\xb3\x64\xeb\xd0\xc0\x4d\x73\x85\x95\xb4\xa2\x18\x27\x0f\x2b\xd8\x9c\x65\xd6\x97\x39\xc5\x1c\xa8\x8d\xf8\x9f\x4a\x00\x04\x7b\x85\x41\x4a\x24\xc0\xcd\xef\x52\xd9\x9c\xbf\x0a\x97\x8c\xae\x78\x08\x06\x74\x4a\xdc\x79\x47\xab\x6e\xfb\x76\x10\xfe\x74\x02\x00\xc5\x98\x83\x55\xbc\x0b\x1d\x10\x2e\xdf\x13\xa4\xcb\x17\x46\x05\xbe\x55\x02\xe0\x0b\xcf\xf2\x65\xf6\xc4\x5e\xdb\x93\x60\x45\x2e\xf6\xf9\xa7\x5d\xd3\x55\xa2\x1a\x2b\xc7\x5e\xbd\xb3\x61\x7a\xe7\x2f\xa3\xa9\x5d\x3c\x4d\x48\x29\x20\x5b\xf5\x94\x06\xac\x2a\xb8\xc1\xd6\xc0\xf4\xe4\x2e\xeb\x71\x5d\x53\x92\x3f\x52\x68\xa7\xbd\x0f\x00\xe5\x12\x49\x6c\x60\xba\xb8\xf5\xa2\x32\xfd\x2a\xc1\x9e\x1b\x53\xc0\x83\x32\x26\x4e\xa8\x28\x07\x9e\x66\x8e\x92\x32\x85\x2a\x07\x9b\xf0\x3b\x73\x02\xe0\x66\x83\x4e\x0d\x4c\x2f\x68\xe5\xb6\x58\x37\x01\xbf\xbf\x64\xb4\xa2\xc8\xff\x47\xad\x1d\x85\x26\x62\x2f\xd7\x4f\x10\x8b\xc6\xe0\x13\x68\xcc\x20\x04\x1f\x11\x9c\x4d\x20\x76\x83\xa1\x85\x46\x10\x36\x4c\xb1\x12\xc7\xe5\x8c\x81\x8f\xf6\x1a\x05\x6c\xaa\x67\xc4\xc0\x68\xeb\x8a\x52\x68\xa1\x88\x62\x7d\x42\x96\xc9\x97\x96\xba\x93\xa4\x31\x57\xb5\xe7\x9e\x1d\xc6\xac\xed\xa9\x67\x03\x9f\x3e\xdf\x19\xef\x63\xcd\x83\xe0\x47\xa7\x0e\x1d\x09\x03\xcf\x3e\xad\xde\xaf\xd6\x8b\xf3\x0f\xa7\x8b\x57\xc7\xef\xde\xae\x3f\x5c\x2e\x5e\x9f\xad\xd6\x97\xef\x3f\x3f\x63\x9b\xdc\x16\x79\x16\x3d\x33\x31\xd6\xd5\x38\x93\xd9\xcd\x0f\x5e\x1e\x3c\xef\x13\x5a\xbe\x1e\x9d\xa0\xaa\x72\xc8\x5a\x78\x1f\xcd\x34\xe6\x11\x22\xe8\x1a\xc6\x2a\x13\xeb\xd1\x8b\x17\x2f\x7e\x19\x81\x65\x6c\x01\xb5\xca\x8c\x1b\xe4\x52\xd8\xd6\x35\xa3\x48\xa5\x6d\x46\x39\x3a\x4b\x8a\x9c\x6c\x38\x5b\xfe\xbc\xb8\xed\x7f\xdf\x90\x68\x69\xf8\xd1\x54\x8d\x60\x77\x4d\x44\xad\x36\xb2\x2f\x3c\x72\xec\x5a\xab\x18\x85\x42\x53\x2e\xc3\xd1\xe1\x4b\xe9\x3d\x8a\xb9\x61\x87\x83\xfe\x8a\xf1\xaf\x06\x45\x47\x36\x00\x97\x1b\x03\x87\xf3\x79\x1c\x59\x23\x46\xe2\xd6\xc0\x6f\xf3\x73\xdf\x03\x85\xc4\x48\xb1\x6e\x5e\x5b\xd5\x2c\x83\xe8\x7e\xb2\x4b\x62\x35\x30\x92\xab\xec\x05\x52\x72\x14\x0c\xac\x4f\x96\x03\xc2\xb6\xf6\x09\x45\x96\x4c\x57\x38\x64\x58\xb2\xbf\x46\x1d\x93\xce\x56\xb7\x06\x66\x25\xaa\xfd\x7b\x8c\xec\x6b\x3e\xa4\x04\x20\x6e\x8b\x85\xec\x9b\xf5\x7a\xb9\x1a\x20\x3e\x79\xf5\x36\x9c\x62\xb0\xed\x0a\x1d\xa5\x5a\x0c\xcc\x87\x7c\x91\x3d\xd5\x3d\xf4\x7c\x00\xa9\x8f\x48\x8d\xf6\xd8\xe1\x00\x93\xc6\x39\x14\x59\x6f\x19\x65\x4b\xa1\x1e\xa3\x1b\xeb\x43\xc3\x38\x40\xef\x25\x0a\x7e\x87\xff\x5a\x89\x12\xf4\x03\x84\xf8\xf5\x1b\x4a\x1c\xce\x7f\xb8\x14\xfb\x5b\x57\xde\x10\x4a\x8a\xb7\x3a\x3e\xcc\xb6\x2e\xeb\xfd\x92\x48\x5f\xf9\x80\xdd\xd3\x62\x40\xb9\xc1\xa1\x5b\x93\x8e\xe5\x82\x52\x71\x7b\x1c\x7c\x27\xc8\xfb\x0b\x30\x6c\xc7\x86\x40\x1f\x97\xec\x77\x3e\xe0\x35\x2e\xc4\xd9\xb0\x7f\x71\x0c\x6c\x6c\x90\xfb\x1c\xdd\x62\x3d\x2f\xdb\xf4\x91\x8b\xf1\x70\x0b\x42\xb7\x77\x97\xdd\xc8\x34\xe6\x7f\x02\x00\x00\xff\xff\x5e\xc9\x64\x8d\xc3\x08\x00\x00") +var _metricsServerMetricsServerDeploymentYaml = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xb4\x55\x41\x6f\x1b\x39\x0f\xbd\xfb\x57\x10\xfe\xd0\xdb\xa7\xd8\xee\xa2\xdd\xc5\x00\x39\x04\x89\xdb\x06\x68\xb2\x46\xec\x2e\xd0\x53\xa1\x68\xe8\x58\x88\x24\x6a\x49\x8e\x9b\xd9\xa2\xff\x7d\x21\x4f\x3a\x9d\x49\x93\xa2\x8b\xdd\xce\x69\xc0\x47\x3e\x3e\x92\x12\x65\x8c\x99\xd8\xec\xff\x40\x16\x4f\xa9\x82\xfd\x62\x72\xeb\x53\x5d\xc1\x1a\x79\xef\x1d\x9e\x38\x47\x4d\xd2\x49\x44\xb5\xb5\x55\x5b\x4d\x00\x92\x8d\x58\x41\x44\x65\xef\xc4\x08\xf2\x1e\xf9\xde\x2c\xd9\x3a\xac\xe0\xb6\xb9\x46\x23\xad\x28\xc6\xc9\xc3\x0c\x36\x67\x99\xf5\x69\xce\x30\x07\x6a\x23\xfe\xab\x14\x00\xc1\x5e\x63\x90\x12\x09\x70\xfb\x9b\x18\x9b\xf3\x37\xe1\x92\xd1\x15\x0f\xc1\x80\x4e\x89\x3b\xef\x68\xd5\xed\xde\x0e\xc2\x9f\x26\x00\x50\x8c\x39\x58\xc5\xfb\xd0\x81\xe0\xf2\x3d\x21\xba\x7c\x61\x94\xe0\x7b\x29\x00\xbe\xe8\x2c\x5f\x66\x4f\xec\xb5\x3d\x0d\x56\xe4\xf2\xc0\x3f\xed\x8a\x36\x89\x6a\x34\x8e\xbd\x7a\x67\xc3\xf4\xde\x5f\x46\x53\xbb\x7c\x5a\x90\x52\x40\xb6\xea\x29\x0d\x54\x19\xb8\xc5\xb6\x82\xe9\xe9\x3d\xeb\x49\x5d\x53\x92\xdf\x53\x68\xa7\xbd\x0f\x00\xe5\x12\x49\x5c\xc1\x74\x79\xe7\x45\x65\xfa\x0d\xc1\x41\x1b\x53\xc0\xa3\x32\x26\x4e\xa8\x28\x47\x9e\x66\x8e\x92\x32\x05\x93\x83\x4d\xf8\x83\x9c\x00\xb8\xdd\xa2\xd3\x0a\xa6\x97\xb4\x76\x3b\xac\x9b\x80\x3f\x9e\x32\x5a\x51\xe4\xff\x22\xd7\x9e\x42\x13\xb1\x6f\xd7\xff\x20\x96\x1e\x83\x4f\xa0\x31\x83\x10\x7c\x44\x70\x36\x81\xd8\x2d\x86\x16\x1a\x41\xd8\x32\x45\x23\x8e\xcb\x19\x03\x1f\xed\x0d\x0a\xd8\x54\xcf\x88\x81\xd1\xd6\x86\x52\x68\xa1\x34\xc5\xfa\x84\x2c\x93\x2f\x25\x75\x27\x49\x63\x36\xb5\xe7\x5e\x1d\xc6\xac\xed\x99\xe7\x0a\x3e\x7d\xbe\x37\x7e\x8d\xad\x1e\x04\x3f\x3a\x75\xe8\x44\x54\xf0\xec\xd3\xfa\xfd\x7a\xb3\xbc\xf8\x70\xb6\x7c\x75\xf2\xee\xed\xe6\xc3\xd5\xf2\xf5\xf9\x7a\x73\xf5\xfe\xf3\x33\xb6\xc9\xed\x90\x67\xd1\x33\x13\x63\x6d\xc6\x4c\xd5\x7e\x7e\xf4\xf2\x68\xd1\x13\x5a\xbe\x19\x9d\x20\x63\x1c\xb2\x16\xdd\xc7\x33\x8d\x79\x84\x08\xba\x86\xd1\x64\x62\x3d\x5e\xcc\x9f\xbf\x98\x8f\xd0\x32\xb7\x80\x6a\x32\xe3\x16\xb9\x64\xb6\x75\xcd\x28\x62\xb4\xcd\x28\xc7\xe7\x49\x91\x93\x0d\xe7\xab\xff\x2f\xef\xfa\xdf\x37\x24\x5a\x2a\x7e\x94\xaa\x11\xec\xee\x89\xa8\xd5\x46\x0e\x99\x47\x8e\x5d\x6d\x86\x51\x28\x34\xe5\x36\x1c\x2f\x5e\x48\xef\x51\xcc\x0d\x3b\x1c\x14\x58\x8c\x7f\x36\x28\x3a\xb2\x01\xb8\xdc\x54\xb0\x98\xcf\xe3\xc8\x1a\x31\x12\xb7\x15\xfc\x3a\xbf\xf0\x3d\x50\x44\x8c\x5a\xd6\x0d\x6c\xa7\x9a\x65\x10\xdd\x8f\x76\x45\xac\x85\x7b\xd8\xaf\xb2\x19\x48\xc9\x51\xa8\x60\x73\xba\x1a\x28\xb6\xb5\x4f\x28\xb2\x62\xba\xc6\xa1\xc4\x42\xff\x1a\x75\xac\x3a\x5b\xdd\x55\x30\x2b\x51\xed\x5f\x63\xe4\x90\xf4\xa1\x26\x00\x71\x3b\x2c\x6a\xdf\x6c\x36\xab\xf5\x00\xf1\xc9\xab\xb7\xe1\x0c\x83\x6d\xd7\xe8\x28\xd5\x52\xc1\x48\x2f\xb2\xa7\xba\x87\x9e\x0f\x20\xf5\x11\xa9\xd1\x1e\x5b\x0c\x30\x69\x9c\x43\x91\xcd\x8e\x51\x76\x14\xea\x31\xba\xb5\x3e\x34\x8c\x03\xf4\x97\x1e\x0d\x7e\x8f\xff\xb8\x13\x25\xe8\x27\x34\xe2\xe5\x77\x3a\xb1\x98\xff\xf4\x56\x1c\xee\x5d\x79\x45\x28\x29\xde\xe9\xf8\x34\xdb\xba\x2c\xf8\x2b\x22\x7d\xe5\x03\x76\x8f\x4b\x05\xca\x0d\x0e\xdd\x9a\x74\x22\x97\x94\x8a\xdb\xe3\xe0\x3b\x41\x3e\xdc\x80\x61\x39\x36\x04\xfa\xb8\x62\xbf\xf7\x01\x6f\x70\x29\xce\x86\xc3\x9b\x53\xc1\xd6\x06\xf9\xca\xd1\xad\xd6\x8b\xb2\x4f\x1f\xb9\x19\x0f\xf7\x20\x74\x9b\x77\xd5\x8d\x4c\x63\xfe\x3b\x00\x00\xff\xff\xf7\x58\x0d\x6a\xc5\x08\x00\x00") func metricsServerMetricsServerDeploymentYamlBytes() ([]byte, error) { return bindataRead( @@ -291,7 +291,7 @@ func metricsServerMetricsServerServiceYaml() (*asset, error) { return a, nil } -var _metricsServerResourceReaderYaml = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xa4\x90\x3f\x4f\xc4\x30\x0c\xc5\xf7\x7c\x0a\xeb\xf6\xf4\xc4\x86\xb2\x01\x03\xfb\x21\xb1\xbb\xa9\xb9\x33\x6d\xe3\xca\x76\x8a\xe0\xd3\xa3\x6b\xcb\x1f\x81\x74\x42\x62\xca\x4b\xe2\x9f\x9f\xde\x8b\x31\x06\x9c\xf8\x91\xd4\x58\x4a\x02\x6d\x31\x37\x58\xfd\x24\xca\x6f\xe8\x2c\xa5\xe9\xaf\xad\x61\xd9\xcf\x57\xa1\xe7\xd2\x25\xb8\x1b\xaa\x39\xe9\x41\x06\x0a\x23\x39\x76\xe8\x98\x02\x40\xc1\x91\x12\xd8\xab\x39\x8d\x69\x24\x57\xce\x16\x8d\x74\x26\x0d\x5a\x07\xb2\x14\x22\xe0\xc4\xf7\x2a\x75\xb2\x33\x11\x61\xb7\x0b\x00\x4a\x26\x55\x33\x6d\x6f\x93\x74\xb6\x88\x22\x1d\x7d\x53\x7b\x73\xf4\xed\x8e\x23\xd9\x84\x79\xf9\x9e\x49\xdb\x0d\x3d\x92\x2f\xe7\xc0\xb6\x8a\x17\xf4\x7c\x0a\xff\x0b\x79\xcb\xa5\xe3\x72\xfc\x7b\x56\x19\xe8\x40\x4f\xe7\xb1\x8f\xb4\x17\x2c\x03\xc0\xef\x5a\x2f\x1b\x58\x6d\x9f\x29\xfb\xd2\xe7\xca\x3e\x90\xce\x9c\xe9\x26\x67\xa9\xc5\x3f\xf1\x1f\x1c\x7c\xf5\x96\xa0\xaf\x2d\xc5\x75\xff\x7b\x00\x00\x00\xff\xff\x83\x30\x56\xaa\x04\x02\x00\x00") +var _metricsServerResourceReaderYaml = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xa4\x90\x41\x4b\xc4\x30\x10\x85\xef\xf9\x15\xc3\xde\xd3\xc5\x9b\xe4\xa6\x1e\xbc\xaf\xe0\x3d\x4d\x9f\xbb\x63\xdb\xa4\xcc\x4c\x2a\xfa\xeb\xa5\xb6\x2a\xb8\xb0\x2c\x78\x4a\x98\xe4\x7d\x8f\xf9\xbc\xf7\x2e\x4e\xfc\x0c\x51\x2e\x39\x90\xb4\x31\x35\xb1\xda\xa9\x08\x7f\x44\xe3\x92\x9b\xfe\x56\x1b\x2e\xfb\xf9\xc6\xf5\x9c\xbb\x40\x0f\x43\x55\x83\x1c\xca\x00\x37\xc2\x62\x17\x2d\x06\x47\x94\xe3\x88\x40\xfa\xae\x86\x31\x8c\x30\xe1\xa4\x5e\x21\x33\xc4\x49\x1d\xa0\xc1\x79\x8a\x13\x3f\x4a\xa9\x93\x2e\x09\x4f\xbb\x9d\x23\x12\x68\xa9\x92\xb0\xcd\x72\xe9\xa0\xfb\x0d\xe0\x88\x66\x48\xbb\x3d\x1d\x61\xd7\x31\xa6\xd2\xe9\x2f\xec\x1c\xb2\x9c\x03\xeb\x7a\x79\x8b\x96\x4e\xee\x7f\x26\xee\x39\x77\x9c\x8f\xd7\x0b\x29\x03\x0e\x78\x59\xbe\x7d\xaf\x73\xa1\xd2\x11\x9d\xbb\xbf\x5c\xa0\xb5\x7d\x45\xb2\x2f\xe9\x6b\xf6\x09\x32\x73\xc2\x5d\x4a\xa5\x66\xfb\x89\xff\xc9\xad\x63\x9d\x62\x42\xa0\xbe\xb6\xf0\x2b\xff\x33\x00\x00\xff\xff\xf2\x78\xc0\xfa\x29\x02\x00\x00") func metricsServerResourceReaderYamlBytes() ([]byte, error) { return bindataRead( diff --git a/pkg/etcd/etcd.go b/pkg/etcd/etcd.go index 08dae27ee..fd1626db7 100644 --- a/pkg/etcd/etcd.go +++ b/pkg/etcd/etcd.go @@ -9,6 +9,7 @@ import ( "encoding/json" "fmt" "io" + "io/fs" "io/ioutil" "net" "net/http" @@ -352,7 +353,7 @@ func (e *ETCD) Reset(ctx context.Context, rebootstrap func() error) error { return err } // touch a file to avoid multiple resets - if err := ioutil.WriteFile(ResetFile(e.config), []byte{}, 0600); err != nil { + if err := os.WriteFile(ResetFile(e.config), []byte{}, 0600); err != nil { return err } return e.newCluster(ctx, true) @@ -561,7 +562,7 @@ func (e *ETCD) Register(ctx context.Context, config *config.Control, handler htt // name is used on subsequent calls. func (e *ETCD) setName(force bool) error { fileName := nameFile(e.config) - data, err := ioutil.ReadFile(fileName) + data, err := os.ReadFile(fileName) if os.IsNotExist(err) || force { e.name = e.config.ServerNodeName + "-" + uuid.New().String()[:8] if err := os.MkdirAll(filepath.Dir(fileName), 0700); err != nil { @@ -1488,22 +1489,26 @@ func (e *ETCD) listLocalSnapshots() (map[string]snapshotFile, error) { return snapshots, errors.Wrap(err, "failed to get the snapshot dir") } - files, err := ioutil.ReadDir(snapshotDir) + dirEntries, err := os.ReadDir(snapshotDir) if err != nil { return nil, err } nodeName := os.Getenv("NODE_NAME") - for _, f := range files { + for _, de := range dirEntries { + file, err := de.Info() + if err != nil { + return nil, err + } sf := snapshotFile{ - Name: f.Name(), - Location: "file://" + filepath.Join(snapshotDir, f.Name()), + Name: file.Name(), + Location: "file://" + filepath.Join(snapshotDir, file.Name()), NodeName: nodeName, CreatedAt: &metav1.Time{ - Time: f.ModTime(), + Time: file.ModTime(), }, - Size: f.Size(), + Size: file.Size(), Status: successfulSnapshotStatus, } sfKey := generateSnapshotConfigMapKey(sf) @@ -2024,7 +2029,18 @@ func backupDirWithRetention(dir string, maxBackupRetention int) (string, error) if _, err := os.Stat(dir); err != nil { return "", nil } - files, err := ioutil.ReadDir(filepath.Dir(dir)) + entries, err := os.ReadDir(filepath.Dir(dir)) + if err != nil { + return "", err + } + files := make([]fs.FileInfo, 0, len(entries)) + for _, entry := range entries { + info, err := entry.Info() + if err != nil { + return "", err + } + files = append(files, info) + } if err != nil { return "", err } diff --git a/pkg/etcd/s3.go b/pkg/etcd/s3.go index 0bbb217af..09f495be1 100644 --- a/pkg/etcd/s3.go +++ b/pkg/etcd/s3.go @@ -8,7 +8,6 @@ import ( "encoding/pem" "fmt" "io" - "io/ioutil" "net/http" "os" "path/filepath" @@ -269,7 +268,7 @@ func (s *S3) snapshotRetention(ctx context.Context) error { func readS3EndpointCA(endpointCA string) ([]byte, error) { ca, err := base64.StdEncoding.DecodeString(endpointCA) if err != nil { - return ioutil.ReadFile(endpointCA) + return os.ReadFile(endpointCA) } return ca, nil } diff --git a/pkg/nodepassword/nodepassword_test.go b/pkg/nodepassword/nodepassword_test.go index 4693f39b6..92bfe8e4b 100644 --- a/pkg/nodepassword/nodepassword_test.go +++ b/pkg/nodepassword/nodepassword_test.go @@ -2,7 +2,6 @@ package nodepassword import ( "fmt" - "io/ioutil" "log" "os" "runtime" @@ -209,7 +208,7 @@ func assertNotEqual(t *testing.T, a interface{}, b interface{}) { } func generateNodePasswordFile(migrateNumNodes int) string { - tempFile, err := ioutil.TempFile("", "node-password-test.*") + tempFile, err := os.CreateTemp("", "node-password-test.*") if err != nil { log.Fatal(err) } @@ -219,7 +218,7 @@ func generateNodePasswordFile(migrateNumNodes int) string { for i := 1; i <= migrateNumNodes; i++ { passwordEntries += fmt.Sprintf("node%d,node%d\n", i, i) } - if err := ioutil.WriteFile(tempFile.Name(), []byte(passwordEntries), 0600); err != nil { + if err := os.WriteFile(tempFile.Name(), []byte(passwordEntries), 0600); err != nil { log.Fatal(err) } diff --git a/pkg/rootless/rootless.go b/pkg/rootless/rootless.go index 4f2d2d8c7..4c8aeaa90 100644 --- a/pkg/rootless/rootless.go +++ b/pkg/rootless/rootless.go @@ -4,7 +4,6 @@ package rootless import ( - "io/ioutil" "net" "os" "os/exec" @@ -100,7 +99,7 @@ func validateSysctl() error { func readSysctl(key string) (string, error) { p := "/proc/sys/" + strings.ReplaceAll(key, ".", "/") - b, err := ioutil.ReadFile(p) + b, err := os.ReadFile(p) if err != nil { return "", err } @@ -126,7 +125,7 @@ func createParentOpt(stateDir string) (*parent.Opt, error) { return nil, errors.Wrapf(err, "failed to mkdir %s", stateDir) } - stateDir, err := ioutil.TempDir("", "rootless") + stateDir, err := os.MkdirTemp("", "rootless") if err != nil { return nil, err } diff --git a/pkg/rootlessports/controller.go b/pkg/rootlessports/controller.go index 436a4267b..4f728b309 100644 --- a/pkg/rootlessports/controller.go +++ b/pkg/rootlessports/controller.go @@ -20,7 +20,7 @@ var ( all = "_all_" ) -func Register(ctx context.Context, serviceController coreClients.ServiceController, httpsPort int) error { +func Register(ctx context.Context, serviceController coreClients.ServiceController, enabled bool, httpsPort int) error { var ( err error rootlessClient client.Client @@ -44,6 +44,7 @@ func Register(ctx context.Context, serviceController coreClients.ServiceControll } h := &handler{ + enabled: enabled, rootlessClient: rootlessClient, serviceClient: serviceController, serviceCache: serviceController.Cache(), @@ -57,6 +58,7 @@ func Register(ctx context.Context, serviceController coreClients.ServiceControll } type handler struct { + enabled bool rootlessClient client.Client serviceClient coreClients.ServiceController serviceCache coreClients.ServiceCache @@ -126,6 +128,10 @@ func (h *handler) toBindPorts() (map[int]int, error) { h.httpsPort: h.httpsPort, } + if !h.enabled { + return toBindPorts, nil + } + for _, svc := range svcs { for _, ingress := range svc.Status.LoadBalancer.Ingress { if ingress.IP == "" { diff --git a/pkg/secretsencrypt/config.go b/pkg/secretsencrypt/config.go index 8dd584b92..f7fda3cd8 100644 --- a/pkg/secretsencrypt/config.go +++ b/pkg/secretsencrypt/config.go @@ -5,7 +5,7 @@ import ( "encoding/hex" "encoding/json" "fmt" - "io/ioutil" + "os" "github.com/xiaods/k8e/pkg/daemons/config" "github.com/xiaods/k8e/pkg/version" @@ -28,7 +28,7 @@ const ( var EncryptionHashAnnotation = version.Program + ".io/encryption-config-hash" func GetEncryptionProviders(runtime *config.ControlRuntime) ([]apiserverconfigv1.ProviderConfiguration, error) { - curEncryptionByte, err := ioutil.ReadFile(runtime.EncryptionConfig) + curEncryptionByte, err := os.ReadFile(runtime.EncryptionConfig) if err != nil { return nil, err } @@ -106,11 +106,11 @@ func WriteEncryptionConfig(runtime *config.ControlRuntime, keys []apiserverconfi if err != nil { return err } - return ioutil.WriteFile(runtime.EncryptionConfig, jsonfile, 0600) + return os.WriteFile(runtime.EncryptionConfig, jsonfile, 0600) } func GenEncryptionConfigHash(runtime *config.ControlRuntime) (string, error) { - curEncryptionByte, err := ioutil.ReadFile(runtime.EncryptionConfig) + curEncryptionByte, err := os.ReadFile(runtime.EncryptionConfig) if err != nil { return "", err } @@ -140,7 +140,7 @@ func GenReencryptHash(runtime *config.ControlRuntime, keyName string) (string, e } func getEncryptionHashFile(runtime *config.ControlRuntime) (string, error) { - curEncryptionByte, err := ioutil.ReadFile(runtime.EncryptionHash) + curEncryptionByte, err := os.ReadFile(runtime.EncryptionHash) if err != nil { return "", err } @@ -170,5 +170,5 @@ func WriteEncryptionHashAnnotation(runtime *config.ControlRuntime, node *corev1. return err } logrus.Debugf("encryption hash annotation set successfully on node: %s\n", node.ObjectMeta.Name) - return ioutil.WriteFile(runtime.EncryptionHash, []byte(ann), 0600) + return os.WriteFile(runtime.EncryptionHash, []byte(ann), 0600) } diff --git a/pkg/server/etcd.go b/pkg/server/etcd.go index cf31f6759..397ba9660 100644 --- a/pkg/server/etcd.go +++ b/pkg/server/etcd.go @@ -2,7 +2,6 @@ package server import ( "context" - "io/ioutil" "os" "path/filepath" "time" @@ -75,7 +74,7 @@ func setETCDLabelsAndAnnotations(ctx context.Context, config *Config) error { } fileName := filepath.Join(controlConfig.DataDir, "db", "etcd", "name") - data, err := ioutil.ReadFile(fileName) + data, err := os.ReadFile(fileName) if err != nil { logrus.Infof("Waiting for etcd node name file to be available: %v", err) continue diff --git a/pkg/server/router.go b/pkg/server/router.go index 462104ef4..159fad6a0 100644 --- a/pkg/server/router.go +++ b/pkg/server/router.go @@ -5,7 +5,6 @@ import ( "crypto" "crypto/x509" "fmt" - "io/ioutil" "net" "net/http" "os" @@ -131,7 +130,7 @@ func cacerts(serverCA string) http.Handler { return http.HandlerFunc(func(resp http.ResponseWriter, req *http.Request) { if ca == nil { var err error - ca, err = ioutil.ReadFile(serverCA) + ca, err = os.ReadFile(serverCA) if err != nil { sendError(err, resp) return @@ -157,7 +156,7 @@ func getNodeInfo(req *http.Request) (string, string, error) { } func getCACertAndKeys(caCertFile, caKeyFile, signingKeyFile string) ([]*x509.Certificate, crypto.Signer, crypto.Signer, error) { - keyBytes, err := ioutil.ReadFile(signingKeyFile) + keyBytes, err := os.ReadFile(signingKeyFile) if err != nil { return nil, nil, nil, err } @@ -167,7 +166,7 @@ func getCACertAndKeys(caCertFile, caKeyFile, signingKeyFile string) ([]*x509.Cer return nil, nil, nil, err } - caKeyBytes, err := ioutil.ReadFile(caKeyFile) + caKeyBytes, err := os.ReadFile(caKeyFile) if err != nil { return nil, nil, nil, err } @@ -177,7 +176,7 @@ func getCACertAndKeys(caCertFile, caKeyFile, signingKeyFile string) ([]*x509.Cer return nil, nil, nil, err } - caBytes, err := ioutil.ReadFile(caCertFile) + caBytes, err := os.ReadFile(caCertFile) if err != nil { return nil, nil, nil, err } @@ -235,7 +234,7 @@ func servingKubeletCert(server *config.Control, keyFile string, auth nodePassBoo return } - keyBytes, err := ioutil.ReadFile(keyFile) + keyBytes, err := os.ReadFile(keyFile) if err != nil { http.Error(resp, err.Error(), http.StatusInternalServerError) return @@ -275,7 +274,7 @@ func clientKubeletCert(server *config.Control, keyFile string, auth nodePassBoot return } - keyBytes, err := ioutil.ReadFile(keyFile) + keyBytes, err := os.ReadFile(keyFile) if err != nil { http.Error(resp, err.Error(), http.StatusInternalServerError) return @@ -300,7 +299,7 @@ func fileHandler(fileName ...string) http.Handler { } for _, f := range fileName { - bytes, err := ioutil.ReadFile(f) + bytes, err := os.ReadFile(f) if err != nil { logrus.Errorf("Failed to read %s: %v", f, err) resp.WriteHeader(http.StatusInternalServerError) @@ -436,13 +435,13 @@ func passwordBootstrap(ctx context.Context, config *Config) nodePassBootstrapper func verifyLocalPassword(ctx context.Context, config *Config, mu *sync.Mutex, deferredNodes map[string]bool, nodeName, nodePassword string) (string, int, error) { // use same password file location that the agent creates nodePasswordRoot := "/" - if config.Rootless { + if config.ControlConfig.Rootless { nodePasswordRoot = filepath.Join(config.ControlConfig.DataDir, "agent") } nodeConfigPath := filepath.Join(nodePasswordRoot, "etc", "k8e", "node") nodePasswordFile := filepath.Join(nodeConfigPath, "password") - passBytes, err := ioutil.ReadFile(nodePasswordFile) + passBytes, err := os.ReadFile(nodePasswordFile) if err != nil { return "", http.StatusInternalServerError, errors.Wrap(err, "unable to read node password file") } diff --git a/pkg/server/secrets-encrypt.go b/pkg/server/secrets-encrypt.go index 0348c978d..7aed18e17 100644 --- a/pkg/server/secrets-encrypt.go +++ b/pkg/server/secrets-encrypt.go @@ -6,7 +6,7 @@ import ( "encoding/base64" "encoding/json" "fmt" - "io/ioutil" + "io" "math/big" "net/http" "os" @@ -43,7 +43,7 @@ type EncryptionRequest struct { } func getEncryptionRequest(req *http.Request) (EncryptionRequest, error) { - b, err := ioutil.ReadAll(req.Body) + b, err := io.ReadAll(req.Body) if err != nil { return EncryptionRequest{}, err } diff --git a/pkg/server/server.go b/pkg/server/server.go index f37ce5480..1712ea2fe 100644 --- a/pkg/server/server.go +++ b/pkg/server/server.go @@ -3,7 +3,6 @@ package server import ( "context" "fmt" - "io/ioutil" "os" "path" "path/filepath" @@ -213,9 +212,10 @@ func coreControllers(ctx context.Context, sc *Context, config *Config) error { } } - if config.Rootless { + if config.ControlConfig.Rootless { return rootlessports.Register(ctx, sc.Core.Core().V1().Service(), + !config.ControlConfig.DisableServiceLB, config.ControlConfig.HTTPSPort) } @@ -253,7 +253,7 @@ func stageFiles(ctx context.Context, sc *Context, controlConfig *config.Control) dataDir) } -// registryTemplate behaves like the system_default_registry template in k8e helm charts, +// registryTemplate behaves like the system_default_registry template in Rancher helm charts, // and returns the registry value with a trailing forward slash if the registry string is not empty. // If it is empty, it is passed through as a no-op. func registryTemplate(registry string) string { @@ -360,7 +360,7 @@ func writeKubeConfig(certs string, config *Config) error { port = config.ControlConfig.APIServerPort } url := fmt.Sprintf("https://%s:%d", ip, port) - kubeConfig, err := HomeKubeConfig(true, config.Rootless) + kubeConfig, err := HomeKubeConfig(true, config.ControlConfig.Rootless) def := true if err != nil { kubeConfig = filepath.Join(config.ControlConfig.DataDir, "kubeconfig-"+version.Program+".yaml") @@ -445,7 +445,7 @@ func writeToken(token, file, certs string) error { if err != nil { return err } - return ioutil.WriteFile(file, []byte(token+"\n"), 0600) + return os.WriteFile(file, []byte(token+"\n"), 0600) } func setNoProxyEnv(config *config.Control) error { diff --git a/pkg/server/types.go b/pkg/server/types.go index 693070648..651641c8c 100644 --- a/pkg/server/types.go +++ b/pkg/server/types.go @@ -10,7 +10,6 @@ import ( type Config struct { DisableAgent bool ControlConfig config.Control - Rootless bool SupervisorPort int StartupHooks []cmds.StartupHook LeaderControllers CustomControllers diff --git a/pkg/static/stage.go b/pkg/static/stage.go index 6e28aec39..0d61bd91a 100644 --- a/pkg/static/stage.go +++ b/pkg/static/stage.go @@ -1,9 +1,9 @@ +//go:build !no_stage // +build !no_stage package static import ( - "io/ioutil" "os" "path/filepath" @@ -20,7 +20,7 @@ func Stage(dataDir string) error { p := filepath.Join(dataDir, name) logrus.Info("Writing static file: ", p) os.MkdirAll(filepath.Dir(p), 0700) - if err := ioutil.WriteFile(p, content, 0600); err != nil { + if err := os.WriteFile(p, content, 0600); err != nil { return errors.Wrapf(err, "failed to write to %s", name) } } diff --git a/pkg/token/read.go b/pkg/token/read.go index 1789ef80b..101046e39 100644 --- a/pkg/token/read.go +++ b/pkg/token/read.go @@ -3,7 +3,6 @@ package token import ( cryptorand "crypto/rand" "encoding/hex" - "io/ioutil" "os" "strings" "time" @@ -26,7 +25,7 @@ func ReadFile(path string) (string, error) { } for { - tokenBytes, err := ioutil.ReadFile(path) + tokenBytes, err := os.ReadFile(path) if err == nil { return strings.TrimSpace(string(tokenBytes)), nil } else if os.IsNotExist(err) { diff --git a/pkg/util/api.go b/pkg/util/api.go index 1e88d3add..1556d4c9b 100644 --- a/pkg/util/api.go +++ b/pkg/util/api.go @@ -104,7 +104,7 @@ func WaitForAPIServerReady(ctx context.Context, kubeconfigPath string, timeout t func BuildControllerEventRecorder(k8s clientset.Interface, controllerName, namespace string) record.EventRecorder { logrus.Infof("Creating %s event broadcaster", controllerName) eventBroadcaster := record.NewBroadcaster() - eventBroadcaster.StartLogging(logrus.Infof) + eventBroadcaster.StartStructuredLogging(0) eventBroadcaster.StartRecordingToSink(&coregetter.EventSinkImpl{Interface: k8s.CoreV1().Events(namespace)}) nodeName := os.Getenv("NODE_NAME") return eventBroadcaster.NewRecorder(schemes.All, v1.EventSource{Component: controllerName, Host: nodeName})