diff --git a/constraint/go.mod b/constraint/go.mod index 14ed61a12..55538cc9b 100644 --- a/constraint/go.mod +++ b/constraint/go.mod @@ -42,6 +42,7 @@ require ( github.com/emicklei/go-restful v2.9.5+incompatible // indirect github.com/evanphx/json-patch v4.12.0+incompatible // indirect github.com/felixge/httpsnoop v1.0.2 // indirect + github.com/fsnotify/fsnotify v1.5.4 // indirect github.com/ghodss/yaml v1.0.0 // indirect github.com/go-logr/logr v1.2.3 // indirect github.com/go-openapi/jsonpointer v0.19.5 // indirect diff --git a/constraint/pkg/client/drivers/local/args.go b/constraint/pkg/client/drivers/local/args.go index 072687ba1..8a4d8794e 100644 --- a/constraint/pkg/client/drivers/local/args.go +++ b/constraint/pkg/client/drivers/local/args.go @@ -2,7 +2,6 @@ package local import ( "fmt" - "os" "github.com/open-policy-agent/frameworks/constraint/pkg/client/errors" "github.com/open-policy-agent/frameworks/constraint/pkg/externaldata" @@ -10,6 +9,7 @@ import ( "github.com/open-policy-agent/opa/storage" "github.com/open-policy-agent/opa/topdown/print" opatypes "github.com/open-policy-agent/opa/types" + "sigs.k8s.io/controller-runtime/pkg/certwatcher" ) type Arg func(*Driver) error @@ -48,10 +48,6 @@ func Defaults() Arg { d.sendRequestToProvider = externaldata.DefaultSendRequestToProvider } - if d.fs == nil { - d.fs = os.DirFS("/") - } - return nil } } @@ -120,10 +116,9 @@ func DisableBuiltins(builtins ...string) Arg { } } -func AddClientTLSKeyPair(certFile, keyFile string) Arg { +func AddExternalDataClientCertWatcher(clientCertWatcher *certwatcher.CertWatcher) Arg { return func(d *Driver) error { - d.clientCertFile = certFile - d.clientKeyFile = keyFile + d.clientCertWatcher = clientCertWatcher return nil } diff --git a/constraint/pkg/client/drivers/local/driver.go b/constraint/pkg/client/drivers/local/driver.go index 0b215de67..850cd05b9 100644 --- a/constraint/pkg/client/drivers/local/driver.go +++ b/constraint/pkg/client/drivers/local/driver.go @@ -6,8 +6,6 @@ import ( "crypto/tls" "encoding/json" "fmt" - "io/fs" - "io/ioutil" "sort" "strings" "sync" @@ -25,6 +23,7 @@ import ( "github.com/open-policy-agent/opa/topdown/print" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/utils/pointer" + "sigs.k8s.io/controller-runtime/pkg/certwatcher" ) const ( @@ -69,14 +68,8 @@ type Driver struct { // that is used to communicate with providers. enableExternalDataClientAuth bool - // fs is the filesystem to use for reading files. - fs fs.FS - - // clientCertFile is the path to the client's certificate file. - clientCertFile string - - // clientKeyFile is the path to the client's key file. - clientKeyFile string + // clientCertWatcher is a watcher for the TLS certificate used to communicate with providers. + clientCertWatcher *certwatcher.CertWatcher } // AddTemplate adds templ to Driver. Normalizes modules into usable forms for @@ -337,38 +330,16 @@ func (d *Driver) Dump(ctx context.Context) (string, error) { return string(b), nil } -// readFile reads a file from driver's filesystem and returns its contents. -func (d *Driver) readFile(name string) ([]byte, error) { - file, err := d.fs.Open(name) - if err != nil { - return nil, fmt.Errorf("failed to open file %s: %w", name, err) - } - defer file.Close() - - return ioutil.ReadAll(file) -} - func (d *Driver) getTLSCertificate() (*tls.Certificate, error) { if !d.enableExternalDataClientAuth { return nil, nil } - certPEM, err := d.readFile(d.clientCertFile) - if err != nil { - return nil, err - } - - keyPEM, err := d.readFile(d.clientKeyFile) - if err != nil { - return nil, err - } - - clientCert, err := tls.X509KeyPair(certPEM, keyPEM) - if err != nil { - return nil, err + if d.clientCertWatcher == nil { + return nil, fmt.Errorf("certWatcher should not be nil when enableExternalDataClientAuth is true") } - return &clientCert, nil + return d.clientCertWatcher.GetCertificate(nil) } // rewriteModulePackage rewrites the module's package path to path. diff --git a/constraint/pkg/client/drivers/local/driver_unit_test.go b/constraint/pkg/client/drivers/local/driver_unit_test.go index b46335ff2..7c1c4b503 100644 --- a/constraint/pkg/client/drivers/local/driver_unit_test.go +++ b/constraint/pkg/client/drivers/local/driver_unit_test.go @@ -5,11 +5,11 @@ import ( "crypto/tls" "errors" "fmt" - "io/fs" + "io/ioutil" "net/http" + "os" "sort" "testing" - "testing/fstest" "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" @@ -24,6 +24,7 @@ import ( "github.com/open-policy-agent/opa/storage/inmem" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "sigs.k8s.io/controller-runtime/pkg/certwatcher" ) const ( @@ -177,73 +178,16 @@ func TestDriver_ExternalData(t *testing.T) { for _, tt := range []struct { name string provider *v1alpha1.Provider - fs fs.FS + clientCertContent string + clientKeyContent string sendRequestToProvider externaldata.SendRequestToProvider errorExpected bool }{ { - name: "provider not found", - errorExpected: true, - }, - { - name: "invalid client cert path", - provider: &v1alpha1.Provider{ - ObjectMeta: metav1.ObjectMeta{ - Name: "dummy-provider", - }, - Spec: v1alpha1.ProviderSpec{ - URL: "https://example.com", - Timeout: 1, - CABundle: caBundle, - }, - }, - fs: fstest.MapFS{ - "client.key": { - Data: []byte("test"), - }, - }, - errorExpected: true, - }, - { - name: "invalid client key path", - provider: &v1alpha1.Provider{ - ObjectMeta: metav1.ObjectMeta{ - Name: "dummy-provider", - }, - Spec: v1alpha1.ProviderSpec{ - URL: "https://example.com", - Timeout: 1, - CABundle: caBundle, - }, - }, - fs: fstest.MapFS{ - "client.crt": { - Data: []byte("test"), - }, - }, - errorExpected: true, - }, - { - name: "invalid client cert and key content", - provider: &v1alpha1.Provider{ - ObjectMeta: metav1.ObjectMeta{ - Name: "dummy-provider", - }, - Spec: v1alpha1.ProviderSpec{ - URL: "https://example.com", - Timeout: 1, - CABundle: caBundle, - }, - }, - fs: fstest.MapFS{ - "client.crt": { - Data: []byte("test"), - }, - "client.key": { - Data: []byte("test"), - }, - }, - errorExpected: true, + name: "provider not found", + clientCertContent: clientCert, + clientKeyContent: clientKey, + errorExpected: true, }, { name: "error from SendRequestToProvider", @@ -257,14 +201,8 @@ func TestDriver_ExternalData(t *testing.T) { CABundle: caBundle, }, }, - fs: fstest.MapFS{ - "client.crt": { - Data: []byte(clientCert), - }, - "client.key": { - Data: []byte(clientKey), - }, - }, + clientCertContent: clientCert, + clientKeyContent: clientKey, sendRequestToProvider: func(ctx context.Context, provider *v1alpha1.Provider, keys []string, clientCert *tls.Certificate) (*externaldata.ProviderResponse, int, error) { return nil, http.StatusBadRequest, errors.New("error from SendRequestToProvider") }, @@ -282,14 +220,8 @@ func TestDriver_ExternalData(t *testing.T) { CABundle: caBundle, }, }, - fs: fstest.MapFS{ - "client.crt": { - Data: []byte(clientCert), - }, - "client.key": { - Data: []byte(clientKey), - }, - }, + clientCertContent: clientCert, + clientKeyContent: clientKey, sendRequestToProvider: func(ctx context.Context, provider *v1alpha1.Provider, keys []string, clientCert *tls.Certificate) (*externaldata.ProviderResponse, int, error) { return &externaldata.ProviderResponse{ APIVersion: "v1alpha1", @@ -308,9 +240,37 @@ func TestDriver_ExternalData(t *testing.T) { }, } { t.Run(tt.name, func(t *testing.T) { + clientCertFile, err := ioutil.TempFile("", "client-cert") + if err != nil { + t.Fatal(err) + } + defer os.Remove(clientCertFile.Name()) + + _, _ = clientCertFile.WriteString(tt.clientCertContent) + clientCertFile.Close() + + clientKeyFile, err := ioutil.TempFile("", "client-key") + if err != nil { + t.Fatal(err) + } + defer os.Remove(clientKeyFile.Name()) + + _, _ = clientKeyFile.WriteString(tt.clientKeyContent) + clientKeyFile.Close() + + clientCertWatcher, err := certwatcher.New(clientCertFile.Name(), clientKeyFile.Name()) + if err != nil { + t.Fatal(err) + } + + go func() { + _ = clientCertWatcher.Start(context.Background()) + }() + d, err := New( AddExternalDataProviderCache(externaldata.NewCache()), EnableExternalDataClientAuth(), + AddExternalDataClientCertWatcher(clientCertWatcher), ) if err != nil { t.Fatal(err) @@ -322,12 +282,6 @@ func TestDriver_ExternalData(t *testing.T) { } } - if tt.fs != nil { - d.fs = tt.fs - d.clientCertFile = "client.crt" - d.clientKeyFile = "client.key" - } - if tt.sendRequestToProvider != nil { d.sendRequestToProvider = tt.sendRequestToProvider } diff --git a/constraint/vendor/github.com/fsnotify/fsnotify/.editorconfig b/constraint/vendor/github.com/fsnotify/fsnotify/.editorconfig new file mode 100644 index 000000000..fad895851 --- /dev/null +++ b/constraint/vendor/github.com/fsnotify/fsnotify/.editorconfig @@ -0,0 +1,12 @@ +root = true + +[*.go] +indent_style = tab +indent_size = 4 +insert_final_newline = true + +[*.{yml,yaml}] +indent_style = space +indent_size = 2 +insert_final_newline = true +trim_trailing_whitespace = true diff --git a/constraint/vendor/github.com/fsnotify/fsnotify/.gitattributes b/constraint/vendor/github.com/fsnotify/fsnotify/.gitattributes new file mode 100644 index 000000000..32f1001be --- /dev/null +++ b/constraint/vendor/github.com/fsnotify/fsnotify/.gitattributes @@ -0,0 +1 @@ +go.sum linguist-generated diff --git a/constraint/vendor/github.com/fsnotify/fsnotify/.gitignore b/constraint/vendor/github.com/fsnotify/fsnotify/.gitignore new file mode 100644 index 000000000..4cd0cbaf4 --- /dev/null +++ b/constraint/vendor/github.com/fsnotify/fsnotify/.gitignore @@ -0,0 +1,6 @@ +# Setup a Global .gitignore for OS and editor generated files: +# https://help.github.com/articles/ignoring-files +# git config --global core.excludesfile ~/.gitignore_global + +.vagrant +*.sublime-project diff --git a/constraint/vendor/github.com/fsnotify/fsnotify/.mailmap b/constraint/vendor/github.com/fsnotify/fsnotify/.mailmap new file mode 100644 index 000000000..a04f2907f --- /dev/null +++ b/constraint/vendor/github.com/fsnotify/fsnotify/.mailmap @@ -0,0 +1,2 @@ +Chris Howey +Nathan Youngman <4566+nathany@users.noreply.github.com> diff --git a/constraint/vendor/github.com/fsnotify/fsnotify/AUTHORS b/constraint/vendor/github.com/fsnotify/fsnotify/AUTHORS new file mode 100644 index 000000000..6cbabe5ef --- /dev/null +++ b/constraint/vendor/github.com/fsnotify/fsnotify/AUTHORS @@ -0,0 +1,62 @@ +# Names should be added to this file as +# Name or Organization +# The email address is not required for organizations. + +# You can update this list using the following command: +# +# $ (head -n10 AUTHORS && git shortlog -se | sed -E 's/^\s+[0-9]+\t//') | tee AUTHORS + +# Please keep the list sorted. + +Aaron L +Adrien Bustany +Alexey Kazakov +Amit Krishnan +Anmol Sethi +Bjørn Erik Pedersen +Brian Goff +Bruno Bigras +Caleb Spare +Case Nelson +Chris Howey +Christoffer Buchholz +Daniel Wagner-Hall +Dave Cheney +Eric Lin +Evan Phoenix +Francisco Souza +Gautam Dey +Hari haran +Ichinose Shogo +Johannes Ebke +John C Barstow +Kelvin Fo +Ken-ichirou MATSUZAWA +Matt Layher +Matthias Stone +Nathan Youngman +Nickolai Zeldovich +Oliver Bristow +Patrick +Paul Hammond +Pawel Knap +Pieter Droogendijk +Pratik Shinde +Pursuit92 +Riku Voipio +Rob Figueiredo +Rodrigo Chiossi +Slawek Ligus +Soge Zhang +Tiffany Jernigan +Tilak Sharma +Tobias Klauser +Tom Payne +Travis Cline +Tudor Golubenco +Vahe Khachikyan +Yukang +bronze1man +debrando +henrikedwards +铁哥 diff --git a/constraint/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md b/constraint/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md new file mode 100644 index 000000000..cc01c08f5 --- /dev/null +++ b/constraint/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md @@ -0,0 +1,357 @@ +# Changelog + +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), +and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +## [Unreleased] + +## [1.5.4] - 2022-04-25 + +* Windows: add missing defer to `Watcher.WatchList` [#447](https://github.com/fsnotify/fsnotify/pull/447) +* go.mod: use latest x/sys [#444](https://github.com/fsnotify/fsnotify/pull/444) +* Fix compilation for OpenBSD [#443](https://github.com/fsnotify/fsnotify/pull/443) + +## [1.5.3] - 2022-04-22 + +* This version is retracted. An incorrect branch is published accidentally [#445](https://github.com/fsnotify/fsnotify/issues/445) + +## [1.5.2] - 2022-04-21 + +* Add a feature to return the directories and files that are being monitored [#374](https://github.com/fsnotify/fsnotify/pull/374) +* Fix potential crash on windows if `raw.FileNameLength` exceeds `syscall.MAX_PATH` [#361](https://github.com/fsnotify/fsnotify/pull/361) +* Allow build on unsupported GOOS [#424](https://github.com/fsnotify/fsnotify/pull/424) +* Don't set `poller.fd` twice in `newFdPoller` [#406](https://github.com/fsnotify/fsnotify/pull/406) +* fix go vet warnings: call to `(*T).Fatalf` from a non-test goroutine [#416](https://github.com/fsnotify/fsnotify/pull/416) + +## [1.5.1] - 2021-08-24 + +* Revert Add AddRaw to not follow symlinks [#394](https://github.com/fsnotify/fsnotify/pull/394) + +## [1.5.0] - 2021-08-20 + +* Go: Increase minimum required version to Go 1.12 [#381](https://github.com/fsnotify/fsnotify/pull/381) +* Feature: Add AddRaw method which does not follow symlinks when adding a watch [#289](https://github.com/fsnotify/fsnotify/pull/298) +* Windows: Follow symlinks by default like on all other systems [#289](https://github.com/fsnotify/fsnotify/pull/289) +* CI: Use GitHub Actions for CI and cover go 1.12-1.17 + [#378](https://github.com/fsnotify/fsnotify/pull/378) + [#381](https://github.com/fsnotify/fsnotify/pull/381) + [#385](https://github.com/fsnotify/fsnotify/pull/385) +* Go 1.14+: Fix unsafe pointer conversion [#325](https://github.com/fsnotify/fsnotify/pull/325) + +## [1.4.7] - 2018-01-09 + +* BSD/macOS: Fix possible deadlock on closing the watcher on kqueue (thanks @nhooyr and @glycerine) +* Tests: Fix missing verb on format string (thanks @rchiossi) +* Linux: Fix deadlock in Remove (thanks @aarondl) +* Linux: Watch.Add improvements (avoid race, fix consistency, reduce garbage) (thanks @twpayne) +* Docs: Moved FAQ into the README (thanks @vahe) +* Linux: Properly handle inotify's IN_Q_OVERFLOW event (thanks @zeldovich) +* Docs: replace references to OS X with macOS + +## [1.4.2] - 2016-10-10 + +* Linux: use InotifyInit1 with IN_CLOEXEC to stop leaking a file descriptor to a child process when using fork/exec [#178](https://github.com/fsnotify/fsnotify/pull/178) (thanks @pattyshack) + +## [1.4.1] - 2016-10-04 + +* Fix flaky inotify stress test on Linux [#177](https://github.com/fsnotify/fsnotify/pull/177) (thanks @pattyshack) + +## [1.4.0] - 2016-10-01 + +* add a String() method to Event.Op [#165](https://github.com/fsnotify/fsnotify/pull/165) (thanks @oozie) + +## [1.3.1] - 2016-06-28 + +* Windows: fix for double backslash when watching the root of a drive [#151](https://github.com/fsnotify/fsnotify/issues/151) (thanks @brunoqc) + +## [1.3.0] - 2016-04-19 + +* Support linux/arm64 by [patching](https://go-review.googlesource.com/#/c/21971/) x/sys/unix and switching to to it from syscall (thanks @suihkulokki) [#135](https://github.com/fsnotify/fsnotify/pull/135) + +## [1.2.10] - 2016-03-02 + +* Fix golint errors in windows.go [#121](https://github.com/fsnotify/fsnotify/pull/121) (thanks @tiffanyfj) + +## [1.2.9] - 2016-01-13 + +kqueue: Fix logic for CREATE after REMOVE [#111](https://github.com/fsnotify/fsnotify/pull/111) (thanks @bep) + +## [1.2.8] - 2015-12-17 + +* kqueue: fix race condition in Close [#105](https://github.com/fsnotify/fsnotify/pull/105) (thanks @djui for reporting the issue and @ppknap for writing a failing test) +* inotify: fix race in test +* enable race detection for continuous integration (Linux, Mac, Windows) + +## [1.2.5] - 2015-10-17 + +* inotify: use epoll_create1 for arm64 support (requires Linux 2.6.27 or later) [#100](https://github.com/fsnotify/fsnotify/pull/100) (thanks @suihkulokki) +* inotify: fix path leaks [#73](https://github.com/fsnotify/fsnotify/pull/73) (thanks @chamaken) +* kqueue: watch for rename events on subdirectories [#83](https://github.com/fsnotify/fsnotify/pull/83) (thanks @guotie) +* kqueue: avoid infinite loops from symlinks cycles [#101](https://github.com/fsnotify/fsnotify/pull/101) (thanks @illicitonion) + +## [1.2.1] - 2015-10-14 + +* kqueue: don't watch named pipes [#98](https://github.com/fsnotify/fsnotify/pull/98) (thanks @evanphx) + +## [1.2.0] - 2015-02-08 + +* inotify: use epoll to wake up readEvents [#66](https://github.com/fsnotify/fsnotify/pull/66) (thanks @PieterD) +* inotify: closing watcher should now always shut down goroutine [#63](https://github.com/fsnotify/fsnotify/pull/63) (thanks @PieterD) +* kqueue: close kqueue after removing watches, fixes [#59](https://github.com/fsnotify/fsnotify/issues/59) + +## [1.1.1] - 2015-02-05 + +* inotify: Retry read on EINTR [#61](https://github.com/fsnotify/fsnotify/issues/61) (thanks @PieterD) + +## [1.1.0] - 2014-12-12 + +* kqueue: rework internals [#43](https://github.com/fsnotify/fsnotify/pull/43) + * add low-level functions + * only need to store flags on directories + * less mutexes [#13](https://github.com/fsnotify/fsnotify/issues/13) + * done can be an unbuffered channel + * remove calls to os.NewSyscallError +* More efficient string concatenation for Event.String() [#52](https://github.com/fsnotify/fsnotify/pull/52) (thanks @mdlayher) +* kqueue: fix regression in rework causing subdirectories to be watched [#48](https://github.com/fsnotify/fsnotify/issues/48) +* kqueue: cleanup internal watch before sending remove event [#51](https://github.com/fsnotify/fsnotify/issues/51) + +## [1.0.4] - 2014-09-07 + +* kqueue: add dragonfly to the build tags. +* Rename source code files, rearrange code so exported APIs are at the top. +* Add done channel to example code. [#37](https://github.com/fsnotify/fsnotify/pull/37) (thanks @chenyukang) + +## [1.0.3] - 2014-08-19 + +* [Fix] Windows MOVED_TO now translates to Create like on BSD and Linux. [#36](https://github.com/fsnotify/fsnotify/issues/36) + +## [1.0.2] - 2014-08-17 + +* [Fix] Missing create events on macOS. [#14](https://github.com/fsnotify/fsnotify/issues/14) (thanks @zhsso) +* [Fix] Make ./path and path equivalent. (thanks @zhsso) + +## [1.0.0] - 2014-08-15 + +* [API] Remove AddWatch on Windows, use Add. +* Improve documentation for exported identifiers. [#30](https://github.com/fsnotify/fsnotify/issues/30) +* Minor updates based on feedback from golint. + +## dev / 2014-07-09 + +* Moved to [github.com/fsnotify/fsnotify](https://github.com/fsnotify/fsnotify). +* Use os.NewSyscallError instead of returning errno (thanks @hariharan-uno) + +## dev / 2014-07-04 + +* kqueue: fix incorrect mutex used in Close() +* Update example to demonstrate usage of Op. + +## dev / 2014-06-28 + +* [API] Don't set the Write Op for attribute notifications [#4](https://github.com/fsnotify/fsnotify/issues/4) +* Fix for String() method on Event (thanks Alex Brainman) +* Don't build on Plan 9 or Solaris (thanks @4ad) + +## dev / 2014-06-21 + +* Events channel of type Event rather than *Event. +* [internal] use syscall constants directly for inotify and kqueue. +* [internal] kqueue: rename events to kevents and fileEvent to event. + +## dev / 2014-06-19 + +* Go 1.3+ required on Windows (uses syscall.ERROR_MORE_DATA internally). +* [internal] remove cookie from Event struct (unused). +* [internal] Event struct has the same definition across every OS. +* [internal] remove internal watch and removeWatch methods. + +## dev / 2014-06-12 + +* [API] Renamed Watch() to Add() and RemoveWatch() to Remove(). +* [API] Pluralized channel names: Events and Errors. +* [API] Renamed FileEvent struct to Event. +* [API] Op constants replace methods like IsCreate(). + +## dev / 2014-06-12 + +* Fix data race on kevent buffer (thanks @tilaks) [#98](https://github.com/howeyc/fsnotify/pull/98) + +## dev / 2014-05-23 + +* [API] Remove current implementation of WatchFlags. + * current implementation doesn't take advantage of OS for efficiency + * provides little benefit over filtering events as they are received, but has extra bookkeeping and mutexes + * no tests for the current implementation + * not fully implemented on Windows [#93](https://github.com/howeyc/fsnotify/issues/93#issuecomment-39285195) + +## [0.9.3] - 2014-12-31 + +* kqueue: cleanup internal watch before sending remove event [#51](https://github.com/fsnotify/fsnotify/issues/51) + +## [0.9.2] - 2014-08-17 + +* [Backport] Fix missing create events on macOS. [#14](https://github.com/fsnotify/fsnotify/issues/14) (thanks @zhsso) + +## [0.9.1] - 2014-06-12 + +* Fix data race on kevent buffer (thanks @tilaks) [#98](https://github.com/howeyc/fsnotify/pull/98) + +## [0.9.0] - 2014-01-17 + +* IsAttrib() for events that only concern a file's metadata [#79][] (thanks @abustany) +* [Fix] kqueue: fix deadlock [#77][] (thanks @cespare) +* [NOTICE] Development has moved to `code.google.com/p/go.exp/fsnotify` in preparation for inclusion in the Go standard library. + +## [0.8.12] - 2013-11-13 + +* [API] Remove FD_SET and friends from Linux adapter + +## [0.8.11] - 2013-11-02 + +* [Doc] Add Changelog [#72][] (thanks @nathany) +* [Doc] Spotlight and double modify events on macOS [#62][] (reported by @paulhammond) + +## [0.8.10] - 2013-10-19 + +* [Fix] kqueue: remove file watches when parent directory is removed [#71][] (reported by @mdwhatcott) +* [Fix] kqueue: race between Close and readEvents [#70][] (reported by @bernerdschaefer) +* [Doc] specify OS-specific limits in README (thanks @debrando) + +## [0.8.9] - 2013-09-08 + +* [Doc] Contributing (thanks @nathany) +* [Doc] update package path in example code [#63][] (thanks @paulhammond) +* [Doc] GoCI badge in README (Linux only) [#60][] +* [Doc] Cross-platform testing with Vagrant [#59][] (thanks @nathany) + +## [0.8.8] - 2013-06-17 + +* [Fix] Windows: handle `ERROR_MORE_DATA` on Windows [#49][] (thanks @jbowtie) + +## [0.8.7] - 2013-06-03 + +* [API] Make syscall flags internal +* [Fix] inotify: ignore event changes +* [Fix] race in symlink test [#45][] (reported by @srid) +* [Fix] tests on Windows +* lower case error messages + +## [0.8.6] - 2013-05-23 + +* kqueue: Use EVT_ONLY flag on Darwin +* [Doc] Update README with full example + +## [0.8.5] - 2013-05-09 + +* [Fix] inotify: allow monitoring of "broken" symlinks (thanks @tsg) + +## [0.8.4] - 2013-04-07 + +* [Fix] kqueue: watch all file events [#40][] (thanks @ChrisBuchholz) + +## [0.8.3] - 2013-03-13 + +* [Fix] inoitfy/kqueue memory leak [#36][] (reported by @nbkolchin) +* [Fix] kqueue: use fsnFlags for watching a directory [#33][] (reported by @nbkolchin) + +## [0.8.2] - 2013-02-07 + +* [Doc] add Authors +* [Fix] fix data races for map access [#29][] (thanks @fsouza) + +## [0.8.1] - 2013-01-09 + +* [Fix] Windows path separators +* [Doc] BSD License + +## [0.8.0] - 2012-11-09 + +* kqueue: directory watching improvements (thanks @vmirage) +* inotify: add `IN_MOVED_TO` [#25][] (requested by @cpisto) +* [Fix] kqueue: deleting watched directory [#24][] (reported by @jakerr) + +## [0.7.4] - 2012-10-09 + +* [Fix] inotify: fixes from https://codereview.appspot.com/5418045/ (ugorji) +* [Fix] kqueue: preserve watch flags when watching for delete [#21][] (reported by @robfig) +* [Fix] kqueue: watch the directory even if it isn't a new watch (thanks @robfig) +* [Fix] kqueue: modify after recreation of file + +## [0.7.3] - 2012-09-27 + +* [Fix] kqueue: watch with an existing folder inside the watched folder (thanks @vmirage) +* [Fix] kqueue: no longer get duplicate CREATE events + +## [0.7.2] - 2012-09-01 + +* kqueue: events for created directories + +## [0.7.1] - 2012-07-14 + +* [Fix] for renaming files + +## [0.7.0] - 2012-07-02 + +* [Feature] FSNotify flags +* [Fix] inotify: Added file name back to event path + +## [0.6.0] - 2012-06-06 + +* kqueue: watch files after directory created (thanks @tmc) + +## [0.5.1] - 2012-05-22 + +* [Fix] inotify: remove all watches before Close() + +## [0.5.0] - 2012-05-03 + +* [API] kqueue: return errors during watch instead of sending over channel +* kqueue: match symlink behavior on Linux +* inotify: add `DELETE_SELF` (requested by @taralx) +* [Fix] kqueue: handle EINTR (reported by @robfig) +* [Doc] Godoc example [#1][] (thanks @davecheney) + +## [0.4.0] - 2012-03-30 + +* Go 1 released: build with go tool +* [Feature] Windows support using winfsnotify +* Windows does not have attribute change notifications +* Roll attribute notifications into IsModify + +## [0.3.0] - 2012-02-19 + +* kqueue: add files when watch directory + +## [0.2.0] - 2011-12-30 + +* update to latest Go weekly code + +## [0.1.0] - 2011-10-19 + +* kqueue: add watch on file creation to match inotify +* kqueue: create file event +* inotify: ignore `IN_IGNORED` events +* event String() +* linux: common FileEvent functions +* initial commit + +[#79]: https://github.com/howeyc/fsnotify/pull/79 +[#77]: https://github.com/howeyc/fsnotify/pull/77 +[#72]: https://github.com/howeyc/fsnotify/issues/72 +[#71]: https://github.com/howeyc/fsnotify/issues/71 +[#70]: https://github.com/howeyc/fsnotify/issues/70 +[#63]: https://github.com/howeyc/fsnotify/issues/63 +[#62]: https://github.com/howeyc/fsnotify/issues/62 +[#60]: https://github.com/howeyc/fsnotify/issues/60 +[#59]: https://github.com/howeyc/fsnotify/issues/59 +[#49]: https://github.com/howeyc/fsnotify/issues/49 +[#45]: https://github.com/howeyc/fsnotify/issues/45 +[#40]: https://github.com/howeyc/fsnotify/issues/40 +[#36]: https://github.com/howeyc/fsnotify/issues/36 +[#33]: https://github.com/howeyc/fsnotify/issues/33 +[#29]: https://github.com/howeyc/fsnotify/issues/29 +[#25]: https://github.com/howeyc/fsnotify/issues/25 +[#24]: https://github.com/howeyc/fsnotify/issues/24 +[#21]: https://github.com/howeyc/fsnotify/issues/21 diff --git a/constraint/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md b/constraint/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md new file mode 100644 index 000000000..8a642563d --- /dev/null +++ b/constraint/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md @@ -0,0 +1,60 @@ +# Contributing + +## Issues + +* Request features and report bugs using the [GitHub Issue Tracker](https://github.com/fsnotify/fsnotify/issues). +* Please indicate the platform you are using fsnotify on. +* A code example to reproduce the problem is appreciated. + +## Pull Requests + +### Contributor License Agreement + +fsnotify is derived from code in the [golang.org/x/exp](https://godoc.org/golang.org/x/exp) package and it may be included [in the standard library](https://github.com/fsnotify/fsnotify/issues/1) in the future. Therefore fsnotify carries the same [LICENSE](https://github.com/fsnotify/fsnotify/blob/master/LICENSE) as Go. Contributors retain their copyright, so you need to fill out a short form before we can accept your contribution: [Google Individual Contributor License Agreement](https://developers.google.com/open-source/cla/individual). + +Please indicate that you have signed the CLA in your pull request. + +### How fsnotify is Developed + +* Development is done on feature branches. +* Tests are run on BSD, Linux, macOS and Windows. +* Pull requests are reviewed and [applied to master][am] using [hub][]. + * Maintainers may modify or squash commits rather than asking contributors to. +* To issue a new release, the maintainers will: + * Update the CHANGELOG + * Tag a version, which will become available through gopkg.in. + +### How to Fork + +For smooth sailing, always use the original import path. Installing with `go get` makes this easy. + +1. Install from GitHub (`go get -u github.com/fsnotify/fsnotify`) +2. Create your feature branch (`git checkout -b my-new-feature`) +3. Ensure everything works and the tests pass (see below) +4. Commit your changes (`git commit -am 'Add some feature'`) + +Contribute upstream: + +1. Fork fsnotify on GitHub +2. Add your remote (`git remote add fork git@github.com:mycompany/repo.git`) +3. Push to the branch (`git push fork my-new-feature`) +4. Create a new Pull Request on GitHub + +This workflow is [thoroughly explained by Katrina Owen](https://splice.com/blog/contributing-open-source-git-repositories-go/). + +### Testing + +fsnotify uses build tags to compile different code on Linux, BSD, macOS, and Windows. + +Before doing a pull request, please do your best to test your changes on multiple platforms, and list which platforms you were able/unable to test on. + +### Maintainers + +Help maintaining fsnotify is welcome. To be a maintainer: + +* Submit a pull request and sign the CLA as above. +* You must be able to run the test suite on Mac, Windows, Linux and BSD. + +All code changes should be internal pull requests. + +Releases are tagged using [Semantic Versioning](http://semver.org/). diff --git a/constraint/vendor/github.com/fsnotify/fsnotify/LICENSE b/constraint/vendor/github.com/fsnotify/fsnotify/LICENSE new file mode 100644 index 000000000..e180c8fb0 --- /dev/null +++ b/constraint/vendor/github.com/fsnotify/fsnotify/LICENSE @@ -0,0 +1,28 @@ +Copyright (c) 2012 The Go Authors. All rights reserved. +Copyright (c) 2012-2019 fsnotify Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/constraint/vendor/github.com/fsnotify/fsnotify/README.md b/constraint/vendor/github.com/fsnotify/fsnotify/README.md new file mode 100644 index 000000000..0731c5ef8 --- /dev/null +++ b/constraint/vendor/github.com/fsnotify/fsnotify/README.md @@ -0,0 +1,120 @@ +# File system notifications for Go + +[![Go Reference](https://pkg.go.dev/badge/github.com/fsnotify/fsnotify.svg)](https://pkg.go.dev/github.com/fsnotify/fsnotify) [![Go Report Card](https://goreportcard.com/badge/github.com/fsnotify/fsnotify)](https://goreportcard.com/report/github.com/fsnotify/fsnotify) [![Maintainers Wanted](https://img.shields.io/badge/maintainers-wanted-red.svg)](https://github.com/fsnotify/fsnotify/issues/413) + +fsnotify utilizes [`golang.org/x/sys`](https://pkg.go.dev/golang.org/x/sys) rather than [`syscall`](https://pkg.go.dev/syscall) from the standard library. + +Cross platform: Windows, Linux, BSD and macOS. + +| Adapter | OS | Status | +| --------------------- | -------------------------------- | ------------------------------------------------------------------------------------------------------------------------------- | +| inotify | Linux 2.6.27 or later, Android\* | Supported | +| kqueue | BSD, macOS, iOS\* | Supported | +| ReadDirectoryChangesW | Windows | Supported | +| FSEvents | macOS | [Planned](https://github.com/fsnotify/fsnotify/issues/11) | +| FEN | Solaris 11 | [In Progress](https://github.com/fsnotify/fsnotify/pull/371) | +| fanotify | Linux 2.6.37+ | [Maybe](https://github.com/fsnotify/fsnotify/issues/114) | +| USN Journals | Windows | [Maybe](https://github.com/fsnotify/fsnotify/issues/53) | +| Polling | *All* | [Maybe](https://github.com/fsnotify/fsnotify/issues/9) | + +\* Android and iOS are untested. + +Please see [the documentation](https://pkg.go.dev/github.com/fsnotify/fsnotify) and consult the [FAQ](#faq) for usage information. + +## API stability + +fsnotify is a fork of [howeyc/fsnotify](https://github.com/howeyc/fsnotify) with a new API as of v1.0. The API is based on [this design document](http://goo.gl/MrYxyA). + +All [releases](https://github.com/fsnotify/fsnotify/releases) are tagged based on [Semantic Versioning](http://semver.org/). + +## Usage + +```go +package main + +import ( + "log" + + "github.com/fsnotify/fsnotify" +) + +func main() { + watcher, err := fsnotify.NewWatcher() + if err != nil { + log.Fatal(err) + } + defer watcher.Close() + + done := make(chan bool) + go func() { + for { + select { + case event, ok := <-watcher.Events: + if !ok { + return + } + log.Println("event:", event) + if event.Op&fsnotify.Write == fsnotify.Write { + log.Println("modified file:", event.Name) + } + case err, ok := <-watcher.Errors: + if !ok { + return + } + log.Println("error:", err) + } + } + }() + + err = watcher.Add("/tmp/foo") + if err != nil { + log.Fatal(err) + } + <-done +} +``` + +## Contributing + +Please refer to [CONTRIBUTING][] before opening an issue or pull request. + +## FAQ + +**When a file is moved to another directory is it still being watched?** + +No (it shouldn't be, unless you are watching where it was moved to). + +**When I watch a directory, are all subdirectories watched as well?** + +No, you must add watches for any directory you want to watch (a recursive watcher is on the roadmap [#18][]). + +**Do I have to watch the Error and Event channels in a separate goroutine?** + +As of now, yes. Looking into making this single-thread friendly (see [howeyc #7][#7]) + +**Why am I receiving multiple events for the same file on OS X?** + +Spotlight indexing on OS X can result in multiple events (see [howeyc #62][#62]). A temporary workaround is to add your folder(s) to the *Spotlight Privacy settings* until we have a native FSEvents implementation (see [#11][]). + +**How many files can be watched at once?** + +There are OS-specific limits as to how many watches can be created: +* Linux: /proc/sys/fs/inotify/max_user_watches contains the limit, reaching this limit results in a "no space left on device" error. +* BSD / OSX: sysctl variables "kern.maxfiles" and "kern.maxfilesperproc", reaching these limits results in a "too many open files" error. + +**Why don't notifications work with NFS filesystems or filesystem in userspace (FUSE)?** + +fsnotify requires support from underlying OS to work. The current NFS protocol does not provide network level support for file notifications. + +[#62]: https://github.com/howeyc/fsnotify/issues/62 +[#18]: https://github.com/fsnotify/fsnotify/issues/18 +[#11]: https://github.com/fsnotify/fsnotify/issues/11 +[#7]: https://github.com/howeyc/fsnotify/issues/7 + +[contributing]: https://github.com/fsnotify/fsnotify/blob/master/CONTRIBUTING.md + +## Related Projects + +* [notify](https://github.com/rjeczalik/notify) +* [fsevents](https://github.com/fsnotify/fsevents) + diff --git a/constraint/vendor/github.com/fsnotify/fsnotify/fen.go b/constraint/vendor/github.com/fsnotify/fsnotify/fen.go new file mode 100644 index 000000000..b3ac3d8f5 --- /dev/null +++ b/constraint/vendor/github.com/fsnotify/fsnotify/fen.go @@ -0,0 +1,38 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build solaris +// +build solaris + +package fsnotify + +import ( + "errors" +) + +// Watcher watches a set of files, delivering events to a channel. +type Watcher struct { + Events chan Event + Errors chan error +} + +// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events. +func NewWatcher() (*Watcher, error) { + return nil, errors.New("FEN based watcher not yet supported for fsnotify\n") +} + +// Close removes all watches and closes the events channel. +func (w *Watcher) Close() error { + return nil +} + +// Add starts watching the named file or directory (non-recursively). +func (w *Watcher) Add(name string) error { + return nil +} + +// Remove stops watching the the named file or directory (non-recursively). +func (w *Watcher) Remove(name string) error { + return nil +} diff --git a/constraint/vendor/github.com/fsnotify/fsnotify/fsnotify.go b/constraint/vendor/github.com/fsnotify/fsnotify/fsnotify.go new file mode 100644 index 000000000..0f4ee52e8 --- /dev/null +++ b/constraint/vendor/github.com/fsnotify/fsnotify/fsnotify.go @@ -0,0 +1,69 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !plan9 +// +build !plan9 + +// Package fsnotify provides a platform-independent interface for file system notifications. +package fsnotify + +import ( + "bytes" + "errors" + "fmt" +) + +// Event represents a single file system notification. +type Event struct { + Name string // Relative path to the file or directory. + Op Op // File operation that triggered the event. +} + +// Op describes a set of file operations. +type Op uint32 + +// These are the generalized file operations that can trigger a notification. +const ( + Create Op = 1 << iota + Write + Remove + Rename + Chmod +) + +func (op Op) String() string { + // Use a buffer for efficient string concatenation + var buffer bytes.Buffer + + if op&Create == Create { + buffer.WriteString("|CREATE") + } + if op&Remove == Remove { + buffer.WriteString("|REMOVE") + } + if op&Write == Write { + buffer.WriteString("|WRITE") + } + if op&Rename == Rename { + buffer.WriteString("|RENAME") + } + if op&Chmod == Chmod { + buffer.WriteString("|CHMOD") + } + if buffer.Len() == 0 { + return "" + } + return buffer.String()[1:] // Strip leading pipe +} + +// String returns a string representation of the event in the form +// "file: REMOVE|WRITE|..." +func (e Event) String() string { + return fmt.Sprintf("%q: %s", e.Name, e.Op.String()) +} + +// Common errors that can be reported by a watcher +var ( + ErrEventOverflow = errors.New("fsnotify queue overflow") +) diff --git a/constraint/vendor/github.com/fsnotify/fsnotify/fsnotify_unsupported.go b/constraint/vendor/github.com/fsnotify/fsnotify/fsnotify_unsupported.go new file mode 100644 index 000000000..596885598 --- /dev/null +++ b/constraint/vendor/github.com/fsnotify/fsnotify/fsnotify_unsupported.go @@ -0,0 +1,36 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !darwin && !dragonfly && !freebsd && !openbsd && !linux && !netbsd && !solaris && !windows +// +build !darwin,!dragonfly,!freebsd,!openbsd,!linux,!netbsd,!solaris,!windows + +package fsnotify + +import ( + "fmt" + "runtime" +) + +// Watcher watches a set of files, delivering events to a channel. +type Watcher struct{} + +// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events. +func NewWatcher() (*Watcher, error) { + return nil, fmt.Errorf("fsnotify not supported on %s", runtime.GOOS) +} + +// Close removes all watches and closes the events channel. +func (w *Watcher) Close() error { + return nil +} + +// Add starts watching the named file or directory (non-recursively). +func (w *Watcher) Add(name string) error { + return nil +} + +// Remove stops watching the the named file or directory (non-recursively). +func (w *Watcher) Remove(name string) error { + return nil +} diff --git a/constraint/vendor/github.com/fsnotify/fsnotify/inotify.go b/constraint/vendor/github.com/fsnotify/fsnotify/inotify.go new file mode 100644 index 000000000..a6d0e0ec8 --- /dev/null +++ b/constraint/vendor/github.com/fsnotify/fsnotify/inotify.go @@ -0,0 +1,351 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build linux +// +build linux + +package fsnotify + +import ( + "errors" + "fmt" + "io" + "os" + "path/filepath" + "strings" + "sync" + "unsafe" + + "golang.org/x/sys/unix" +) + +// Watcher watches a set of files, delivering events to a channel. +type Watcher struct { + Events chan Event + Errors chan error + mu sync.Mutex // Map access + fd int + poller *fdPoller + watches map[string]*watch // Map of inotify watches (key: path) + paths map[int]string // Map of watched paths (key: watch descriptor) + done chan struct{} // Channel for sending a "quit message" to the reader goroutine + doneResp chan struct{} // Channel to respond to Close +} + +// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events. +func NewWatcher() (*Watcher, error) { + // Create inotify fd + fd, errno := unix.InotifyInit1(unix.IN_CLOEXEC) + if fd == -1 { + return nil, errno + } + // Create epoll + poller, err := newFdPoller(fd) + if err != nil { + unix.Close(fd) + return nil, err + } + w := &Watcher{ + fd: fd, + poller: poller, + watches: make(map[string]*watch), + paths: make(map[int]string), + Events: make(chan Event), + Errors: make(chan error), + done: make(chan struct{}), + doneResp: make(chan struct{}), + } + + go w.readEvents() + return w, nil +} + +func (w *Watcher) isClosed() bool { + select { + case <-w.done: + return true + default: + return false + } +} + +// Close removes all watches and closes the events channel. +func (w *Watcher) Close() error { + if w.isClosed() { + return nil + } + + // Send 'close' signal to goroutine, and set the Watcher to closed. + close(w.done) + + // Wake up goroutine + w.poller.wake() + + // Wait for goroutine to close + <-w.doneResp + + return nil +} + +// Add starts watching the named file or directory (non-recursively). +func (w *Watcher) Add(name string) error { + name = filepath.Clean(name) + if w.isClosed() { + return errors.New("inotify instance already closed") + } + + const agnosticEvents = unix.IN_MOVED_TO | unix.IN_MOVED_FROM | + unix.IN_CREATE | unix.IN_ATTRIB | unix.IN_MODIFY | + unix.IN_MOVE_SELF | unix.IN_DELETE | unix.IN_DELETE_SELF + + var flags uint32 = agnosticEvents + + w.mu.Lock() + defer w.mu.Unlock() + watchEntry := w.watches[name] + if watchEntry != nil { + flags |= watchEntry.flags | unix.IN_MASK_ADD + } + wd, errno := unix.InotifyAddWatch(w.fd, name, flags) + if wd == -1 { + return errno + } + + if watchEntry == nil { + w.watches[name] = &watch{wd: uint32(wd), flags: flags} + w.paths[wd] = name + } else { + watchEntry.wd = uint32(wd) + watchEntry.flags = flags + } + + return nil +} + +// Remove stops watching the named file or directory (non-recursively). +func (w *Watcher) Remove(name string) error { + name = filepath.Clean(name) + + // Fetch the watch. + w.mu.Lock() + defer w.mu.Unlock() + watch, ok := w.watches[name] + + // Remove it from inotify. + if !ok { + return fmt.Errorf("can't remove non-existent inotify watch for: %s", name) + } + + // We successfully removed the watch if InotifyRmWatch doesn't return an + // error, we need to clean up our internal state to ensure it matches + // inotify's kernel state. + delete(w.paths, int(watch.wd)) + delete(w.watches, name) + + // inotify_rm_watch will return EINVAL if the file has been deleted; + // the inotify will already have been removed. + // watches and pathes are deleted in ignoreLinux() implicitly and asynchronously + // by calling inotify_rm_watch() below. e.g. readEvents() goroutine receives IN_IGNORE + // so that EINVAL means that the wd is being rm_watch()ed or its file removed + // by another thread and we have not received IN_IGNORE event. + success, errno := unix.InotifyRmWatch(w.fd, watch.wd) + if success == -1 { + // TODO: Perhaps it's not helpful to return an error here in every case. + // the only two possible errors are: + // EBADF, which happens when w.fd is not a valid file descriptor of any kind. + // EINVAL, which is when fd is not an inotify descriptor or wd is not a valid watch descriptor. + // Watch descriptors are invalidated when they are removed explicitly or implicitly; + // explicitly by inotify_rm_watch, implicitly when the file they are watching is deleted. + return errno + } + + return nil +} + +// WatchList returns the directories and files that are being monitered. +func (w *Watcher) WatchList() []string { + w.mu.Lock() + defer w.mu.Unlock() + + entries := make([]string, 0, len(w.watches)) + for pathname := range w.watches { + entries = append(entries, pathname) + } + + return entries +} + +type watch struct { + wd uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall) + flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags) +} + +// readEvents reads from the inotify file descriptor, converts the +// received events into Event objects and sends them via the Events channel +func (w *Watcher) readEvents() { + var ( + buf [unix.SizeofInotifyEvent * 4096]byte // Buffer for a maximum of 4096 raw events + n int // Number of bytes read with read() + errno error // Syscall errno + ok bool // For poller.wait + ) + + defer close(w.doneResp) + defer close(w.Errors) + defer close(w.Events) + defer unix.Close(w.fd) + defer w.poller.close() + + for { + // See if we have been closed. + if w.isClosed() { + return + } + + ok, errno = w.poller.wait() + if errno != nil { + select { + case w.Errors <- errno: + case <-w.done: + return + } + continue + } + + if !ok { + continue + } + + n, errno = unix.Read(w.fd, buf[:]) + // If a signal interrupted execution, see if we've been asked to close, and try again. + // http://man7.org/linux/man-pages/man7/signal.7.html : + // "Before Linux 3.8, reads from an inotify(7) file descriptor were not restartable" + if errno == unix.EINTR { + continue + } + + // unix.Read might have been woken up by Close. If so, we're done. + if w.isClosed() { + return + } + + if n < unix.SizeofInotifyEvent { + var err error + if n == 0 { + // If EOF is received. This should really never happen. + err = io.EOF + } else if n < 0 { + // If an error occurred while reading. + err = errno + } else { + // Read was too short. + err = errors.New("notify: short read in readEvents()") + } + select { + case w.Errors <- err: + case <-w.done: + return + } + continue + } + + var offset uint32 + // We don't know how many events we just read into the buffer + // While the offset points to at least one whole event... + for offset <= uint32(n-unix.SizeofInotifyEvent) { + // Point "raw" to the event in the buffer + raw := (*unix.InotifyEvent)(unsafe.Pointer(&buf[offset])) + + mask := uint32(raw.Mask) + nameLen := uint32(raw.Len) + + if mask&unix.IN_Q_OVERFLOW != 0 { + select { + case w.Errors <- ErrEventOverflow: + case <-w.done: + return + } + } + + // If the event happened to the watched directory or the watched file, the kernel + // doesn't append the filename to the event, but we would like to always fill the + // the "Name" field with a valid filename. We retrieve the path of the watch from + // the "paths" map. + w.mu.Lock() + name, ok := w.paths[int(raw.Wd)] + // IN_DELETE_SELF occurs when the file/directory being watched is removed. + // This is a sign to clean up the maps, otherwise we are no longer in sync + // with the inotify kernel state which has already deleted the watch + // automatically. + if ok && mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF { + delete(w.paths, int(raw.Wd)) + delete(w.watches, name) + } + w.mu.Unlock() + + if nameLen > 0 { + // Point "bytes" at the first byte of the filename + bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&buf[offset+unix.SizeofInotifyEvent]))[:nameLen:nameLen] + // The filename is padded with NULL bytes. TrimRight() gets rid of those. + name += "/" + strings.TrimRight(string(bytes[0:nameLen]), "\000") + } + + event := newEvent(name, mask) + + // Send the events that are not ignored on the events channel + if !event.ignoreLinux(mask) { + select { + case w.Events <- event: + case <-w.done: + return + } + } + + // Move to the next event in the buffer + offset += unix.SizeofInotifyEvent + nameLen + } + } +} + +// Certain types of events can be "ignored" and not sent over the Events +// channel. Such as events marked ignore by the kernel, or MODIFY events +// against files that do not exist. +func (e *Event) ignoreLinux(mask uint32) bool { + // Ignore anything the inotify API says to ignore + if mask&unix.IN_IGNORED == unix.IN_IGNORED { + return true + } + + // If the event is not a DELETE or RENAME, the file must exist. + // Otherwise the event is ignored. + // *Note*: this was put in place because it was seen that a MODIFY + // event was sent after the DELETE. This ignores that MODIFY and + // assumes a DELETE will come or has come if the file doesn't exist. + if !(e.Op&Remove == Remove || e.Op&Rename == Rename) { + _, statErr := os.Lstat(e.Name) + return os.IsNotExist(statErr) + } + return false +} + +// newEvent returns an platform-independent Event based on an inotify mask. +func newEvent(name string, mask uint32) Event { + e := Event{Name: name} + if mask&unix.IN_CREATE == unix.IN_CREATE || mask&unix.IN_MOVED_TO == unix.IN_MOVED_TO { + e.Op |= Create + } + if mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF || mask&unix.IN_DELETE == unix.IN_DELETE { + e.Op |= Remove + } + if mask&unix.IN_MODIFY == unix.IN_MODIFY { + e.Op |= Write + } + if mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF || mask&unix.IN_MOVED_FROM == unix.IN_MOVED_FROM { + e.Op |= Rename + } + if mask&unix.IN_ATTRIB == unix.IN_ATTRIB { + e.Op |= Chmod + } + return e +} diff --git a/constraint/vendor/github.com/fsnotify/fsnotify/inotify_poller.go b/constraint/vendor/github.com/fsnotify/fsnotify/inotify_poller.go new file mode 100644 index 000000000..b572a37c3 --- /dev/null +++ b/constraint/vendor/github.com/fsnotify/fsnotify/inotify_poller.go @@ -0,0 +1,187 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build linux +// +build linux + +package fsnotify + +import ( + "errors" + + "golang.org/x/sys/unix" +) + +type fdPoller struct { + fd int // File descriptor (as returned by the inotify_init() syscall) + epfd int // Epoll file descriptor + pipe [2]int // Pipe for waking up +} + +func emptyPoller(fd int) *fdPoller { + poller := new(fdPoller) + poller.fd = fd + poller.epfd = -1 + poller.pipe[0] = -1 + poller.pipe[1] = -1 + return poller +} + +// Create a new inotify poller. +// This creates an inotify handler, and an epoll handler. +func newFdPoller(fd int) (*fdPoller, error) { + var errno error + poller := emptyPoller(fd) + defer func() { + if errno != nil { + poller.close() + } + }() + + // Create epoll fd + poller.epfd, errno = unix.EpollCreate1(unix.EPOLL_CLOEXEC) + if poller.epfd == -1 { + return nil, errno + } + // Create pipe; pipe[0] is the read end, pipe[1] the write end. + errno = unix.Pipe2(poller.pipe[:], unix.O_NONBLOCK|unix.O_CLOEXEC) + if errno != nil { + return nil, errno + } + + // Register inotify fd with epoll + event := unix.EpollEvent{ + Fd: int32(poller.fd), + Events: unix.EPOLLIN, + } + errno = unix.EpollCtl(poller.epfd, unix.EPOLL_CTL_ADD, poller.fd, &event) + if errno != nil { + return nil, errno + } + + // Register pipe fd with epoll + event = unix.EpollEvent{ + Fd: int32(poller.pipe[0]), + Events: unix.EPOLLIN, + } + errno = unix.EpollCtl(poller.epfd, unix.EPOLL_CTL_ADD, poller.pipe[0], &event) + if errno != nil { + return nil, errno + } + + return poller, nil +} + +// Wait using epoll. +// Returns true if something is ready to be read, +// false if there is not. +func (poller *fdPoller) wait() (bool, error) { + // 3 possible events per fd, and 2 fds, makes a maximum of 6 events. + // I don't know whether epoll_wait returns the number of events returned, + // or the total number of events ready. + // I decided to catch both by making the buffer one larger than the maximum. + events := make([]unix.EpollEvent, 7) + for { + n, errno := unix.EpollWait(poller.epfd, events, -1) + if n == -1 { + if errno == unix.EINTR { + continue + } + return false, errno + } + if n == 0 { + // If there are no events, try again. + continue + } + if n > 6 { + // This should never happen. More events were returned than should be possible. + return false, errors.New("epoll_wait returned more events than I know what to do with") + } + ready := events[:n] + epollhup := false + epollerr := false + epollin := false + for _, event := range ready { + if event.Fd == int32(poller.fd) { + if event.Events&unix.EPOLLHUP != 0 { + // This should not happen, but if it does, treat it as a wakeup. + epollhup = true + } + if event.Events&unix.EPOLLERR != 0 { + // If an error is waiting on the file descriptor, we should pretend + // something is ready to read, and let unix.Read pick up the error. + epollerr = true + } + if event.Events&unix.EPOLLIN != 0 { + // There is data to read. + epollin = true + } + } + if event.Fd == int32(poller.pipe[0]) { + if event.Events&unix.EPOLLHUP != 0 { + // Write pipe descriptor was closed, by us. This means we're closing down the + // watcher, and we should wake up. + } + if event.Events&unix.EPOLLERR != 0 { + // If an error is waiting on the pipe file descriptor. + // This is an absolute mystery, and should never ever happen. + return false, errors.New("Error on the pipe descriptor.") + } + if event.Events&unix.EPOLLIN != 0 { + // This is a regular wakeup, so we have to clear the buffer. + err := poller.clearWake() + if err != nil { + return false, err + } + } + } + } + + if epollhup || epollerr || epollin { + return true, nil + } + return false, nil + } +} + +// Close the write end of the poller. +func (poller *fdPoller) wake() error { + buf := make([]byte, 1) + n, errno := unix.Write(poller.pipe[1], buf) + if n == -1 { + if errno == unix.EAGAIN { + // Buffer is full, poller will wake. + return nil + } + return errno + } + return nil +} + +func (poller *fdPoller) clearWake() error { + // You have to be woken up a LOT in order to get to 100! + buf := make([]byte, 100) + n, errno := unix.Read(poller.pipe[0], buf) + if n == -1 { + if errno == unix.EAGAIN { + // Buffer is empty, someone else cleared our wake. + return nil + } + return errno + } + return nil +} + +// Close all poller file descriptors, but not the one passed to it. +func (poller *fdPoller) close() { + if poller.pipe[1] != -1 { + unix.Close(poller.pipe[1]) + } + if poller.pipe[0] != -1 { + unix.Close(poller.pipe[0]) + } + if poller.epfd != -1 { + unix.Close(poller.epfd) + } +} diff --git a/constraint/vendor/github.com/fsnotify/fsnotify/kqueue.go b/constraint/vendor/github.com/fsnotify/fsnotify/kqueue.go new file mode 100644 index 000000000..6fb8d8532 --- /dev/null +++ b/constraint/vendor/github.com/fsnotify/fsnotify/kqueue.go @@ -0,0 +1,535 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build freebsd || openbsd || netbsd || dragonfly || darwin +// +build freebsd openbsd netbsd dragonfly darwin + +package fsnotify + +import ( + "errors" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "sync" + "time" + + "golang.org/x/sys/unix" +) + +// Watcher watches a set of files, delivering events to a channel. +type Watcher struct { + Events chan Event + Errors chan error + done chan struct{} // Channel for sending a "quit message" to the reader goroutine + + kq int // File descriptor (as returned by the kqueue() syscall). + + mu sync.Mutex // Protects access to watcher data + watches map[string]int // Map of watched file descriptors (key: path). + externalWatches map[string]bool // Map of watches added by user of the library. + dirFlags map[string]uint32 // Map of watched directories to fflags used in kqueue. + paths map[int]pathInfo // Map file descriptors to path names for processing kqueue events. + fileExists map[string]bool // Keep track of if we know this file exists (to stop duplicate create events). + isClosed bool // Set to true when Close() is first called +} + +type pathInfo struct { + name string + isDir bool +} + +// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events. +func NewWatcher() (*Watcher, error) { + kq, err := kqueue() + if err != nil { + return nil, err + } + + w := &Watcher{ + kq: kq, + watches: make(map[string]int), + dirFlags: make(map[string]uint32), + paths: make(map[int]pathInfo), + fileExists: make(map[string]bool), + externalWatches: make(map[string]bool), + Events: make(chan Event), + Errors: make(chan error), + done: make(chan struct{}), + } + + go w.readEvents() + return w, nil +} + +// Close removes all watches and closes the events channel. +func (w *Watcher) Close() error { + w.mu.Lock() + if w.isClosed { + w.mu.Unlock() + return nil + } + w.isClosed = true + + // copy paths to remove while locked + var pathsToRemove = make([]string, 0, len(w.watches)) + for name := range w.watches { + pathsToRemove = append(pathsToRemove, name) + } + w.mu.Unlock() + // unlock before calling Remove, which also locks + + for _, name := range pathsToRemove { + w.Remove(name) + } + + // send a "quit" message to the reader goroutine + close(w.done) + + return nil +} + +// Add starts watching the named file or directory (non-recursively). +func (w *Watcher) Add(name string) error { + w.mu.Lock() + w.externalWatches[name] = true + w.mu.Unlock() + _, err := w.addWatch(name, noteAllEvents) + return err +} + +// Remove stops watching the the named file or directory (non-recursively). +func (w *Watcher) Remove(name string) error { + name = filepath.Clean(name) + w.mu.Lock() + watchfd, ok := w.watches[name] + w.mu.Unlock() + if !ok { + return fmt.Errorf("can't remove non-existent kevent watch for: %s", name) + } + + const registerRemove = unix.EV_DELETE + if err := register(w.kq, []int{watchfd}, registerRemove, 0); err != nil { + return err + } + + unix.Close(watchfd) + + w.mu.Lock() + isDir := w.paths[watchfd].isDir + delete(w.watches, name) + delete(w.paths, watchfd) + delete(w.dirFlags, name) + w.mu.Unlock() + + // Find all watched paths that are in this directory that are not external. + if isDir { + var pathsToRemove []string + w.mu.Lock() + for _, path := range w.paths { + wdir, _ := filepath.Split(path.name) + if filepath.Clean(wdir) == name { + if !w.externalWatches[path.name] { + pathsToRemove = append(pathsToRemove, path.name) + } + } + } + w.mu.Unlock() + for _, name := range pathsToRemove { + // Since these are internal, not much sense in propagating error + // to the user, as that will just confuse them with an error about + // a path they did not explicitly watch themselves. + w.Remove(name) + } + } + + return nil +} + +// WatchList returns the directories and files that are being monitered. +func (w *Watcher) WatchList() []string { + w.mu.Lock() + defer w.mu.Unlock() + + entries := make([]string, 0, len(w.watches)) + for pathname := range w.watches { + entries = append(entries, pathname) + } + + return entries +} + +// Watch all events (except NOTE_EXTEND, NOTE_LINK, NOTE_REVOKE) +const noteAllEvents = unix.NOTE_DELETE | unix.NOTE_WRITE | unix.NOTE_ATTRIB | unix.NOTE_RENAME + +// keventWaitTime to block on each read from kevent +var keventWaitTime = durationToTimespec(100 * time.Millisecond) + +// addWatch adds name to the watched file set. +// The flags are interpreted as described in kevent(2). +// Returns the real path to the file which was added, if any, which may be different from the one passed in the case of symlinks. +func (w *Watcher) addWatch(name string, flags uint32) (string, error) { + var isDir bool + // Make ./name and name equivalent + name = filepath.Clean(name) + + w.mu.Lock() + if w.isClosed { + w.mu.Unlock() + return "", errors.New("kevent instance already closed") + } + watchfd, alreadyWatching := w.watches[name] + // We already have a watch, but we can still override flags. + if alreadyWatching { + isDir = w.paths[watchfd].isDir + } + w.mu.Unlock() + + if !alreadyWatching { + fi, err := os.Lstat(name) + if err != nil { + return "", err + } + + // Don't watch sockets. + if fi.Mode()&os.ModeSocket == os.ModeSocket { + return "", nil + } + + // Don't watch named pipes. + if fi.Mode()&os.ModeNamedPipe == os.ModeNamedPipe { + return "", nil + } + + // Follow Symlinks + // Unfortunately, Linux can add bogus symlinks to watch list without + // issue, and Windows can't do symlinks period (AFAIK). To maintain + // consistency, we will act like everything is fine. There will simply + // be no file events for broken symlinks. + // Hence the returns of nil on errors. + if fi.Mode()&os.ModeSymlink == os.ModeSymlink { + name, err = filepath.EvalSymlinks(name) + if err != nil { + return "", nil + } + + w.mu.Lock() + _, alreadyWatching = w.watches[name] + w.mu.Unlock() + + if alreadyWatching { + return name, nil + } + + fi, err = os.Lstat(name) + if err != nil { + return "", nil + } + } + + watchfd, err = unix.Open(name, openMode, 0700) + if watchfd == -1 { + return "", err + } + + isDir = fi.IsDir() + } + + const registerAdd = unix.EV_ADD | unix.EV_CLEAR | unix.EV_ENABLE + if err := register(w.kq, []int{watchfd}, registerAdd, flags); err != nil { + unix.Close(watchfd) + return "", err + } + + if !alreadyWatching { + w.mu.Lock() + w.watches[name] = watchfd + w.paths[watchfd] = pathInfo{name: name, isDir: isDir} + w.mu.Unlock() + } + + if isDir { + // Watch the directory if it has not been watched before, + // or if it was watched before, but perhaps only a NOTE_DELETE (watchDirectoryFiles) + w.mu.Lock() + + watchDir := (flags&unix.NOTE_WRITE) == unix.NOTE_WRITE && + (!alreadyWatching || (w.dirFlags[name]&unix.NOTE_WRITE) != unix.NOTE_WRITE) + // Store flags so this watch can be updated later + w.dirFlags[name] = flags + w.mu.Unlock() + + if watchDir { + if err := w.watchDirectoryFiles(name); err != nil { + return "", err + } + } + } + return name, nil +} + +// readEvents reads from kqueue and converts the received kevents into +// Event values that it sends down the Events channel. +func (w *Watcher) readEvents() { + eventBuffer := make([]unix.Kevent_t, 10) + +loop: + for { + // See if there is a message on the "done" channel + select { + case <-w.done: + break loop + default: + } + + // Get new events + kevents, err := read(w.kq, eventBuffer, &keventWaitTime) + // EINTR is okay, the syscall was interrupted before timeout expired. + if err != nil && err != unix.EINTR { + select { + case w.Errors <- err: + case <-w.done: + break loop + } + continue + } + + // Flush the events we received to the Events channel + for len(kevents) > 0 { + kevent := &kevents[0] + watchfd := int(kevent.Ident) + mask := uint32(kevent.Fflags) + w.mu.Lock() + path := w.paths[watchfd] + w.mu.Unlock() + event := newEvent(path.name, mask) + + if path.isDir && !(event.Op&Remove == Remove) { + // Double check to make sure the directory exists. This can happen when + // we do a rm -fr on a recursively watched folders and we receive a + // modification event first but the folder has been deleted and later + // receive the delete event + if _, err := os.Lstat(event.Name); os.IsNotExist(err) { + // mark is as delete event + event.Op |= Remove + } + } + + if event.Op&Rename == Rename || event.Op&Remove == Remove { + w.Remove(event.Name) + w.mu.Lock() + delete(w.fileExists, event.Name) + w.mu.Unlock() + } + + if path.isDir && event.Op&Write == Write && !(event.Op&Remove == Remove) { + w.sendDirectoryChangeEvents(event.Name) + } else { + // Send the event on the Events channel. + select { + case w.Events <- event: + case <-w.done: + break loop + } + } + + if event.Op&Remove == Remove { + // Look for a file that may have overwritten this. + // For example, mv f1 f2 will delete f2, then create f2. + if path.isDir { + fileDir := filepath.Clean(event.Name) + w.mu.Lock() + _, found := w.watches[fileDir] + w.mu.Unlock() + if found { + // make sure the directory exists before we watch for changes. When we + // do a recursive watch and perform rm -fr, the parent directory might + // have gone missing, ignore the missing directory and let the + // upcoming delete event remove the watch from the parent directory. + if _, err := os.Lstat(fileDir); err == nil { + w.sendDirectoryChangeEvents(fileDir) + } + } + } else { + filePath := filepath.Clean(event.Name) + if fileInfo, err := os.Lstat(filePath); err == nil { + w.sendFileCreatedEventIfNew(filePath, fileInfo) + } + } + } + + // Move to next event + kevents = kevents[1:] + } + } + + // cleanup + err := unix.Close(w.kq) + if err != nil { + // only way the previous loop breaks is if w.done was closed so we need to async send to w.Errors. + select { + case w.Errors <- err: + default: + } + } + close(w.Events) + close(w.Errors) +} + +// newEvent returns an platform-independent Event based on kqueue Fflags. +func newEvent(name string, mask uint32) Event { + e := Event{Name: name} + if mask&unix.NOTE_DELETE == unix.NOTE_DELETE { + e.Op |= Remove + } + if mask&unix.NOTE_WRITE == unix.NOTE_WRITE { + e.Op |= Write + } + if mask&unix.NOTE_RENAME == unix.NOTE_RENAME { + e.Op |= Rename + } + if mask&unix.NOTE_ATTRIB == unix.NOTE_ATTRIB { + e.Op |= Chmod + } + return e +} + +func newCreateEvent(name string) Event { + return Event{Name: name, Op: Create} +} + +// watchDirectoryFiles to mimic inotify when adding a watch on a directory +func (w *Watcher) watchDirectoryFiles(dirPath string) error { + // Get all files + files, err := ioutil.ReadDir(dirPath) + if err != nil { + return err + } + + for _, fileInfo := range files { + filePath := filepath.Join(dirPath, fileInfo.Name()) + filePath, err = w.internalWatch(filePath, fileInfo) + if err != nil { + return err + } + + w.mu.Lock() + w.fileExists[filePath] = true + w.mu.Unlock() + } + + return nil +} + +// sendDirectoryEvents searches the directory for newly created files +// and sends them over the event channel. This functionality is to have +// the BSD version of fsnotify match Linux inotify which provides a +// create event for files created in a watched directory. +func (w *Watcher) sendDirectoryChangeEvents(dirPath string) { + // Get all files + files, err := ioutil.ReadDir(dirPath) + if err != nil { + select { + case w.Errors <- err: + case <-w.done: + return + } + } + + // Search for new files + for _, fileInfo := range files { + filePath := filepath.Join(dirPath, fileInfo.Name()) + err := w.sendFileCreatedEventIfNew(filePath, fileInfo) + + if err != nil { + return + } + } +} + +// sendFileCreatedEvent sends a create event if the file isn't already being tracked. +func (w *Watcher) sendFileCreatedEventIfNew(filePath string, fileInfo os.FileInfo) (err error) { + w.mu.Lock() + _, doesExist := w.fileExists[filePath] + w.mu.Unlock() + if !doesExist { + // Send create event + select { + case w.Events <- newCreateEvent(filePath): + case <-w.done: + return + } + } + + // like watchDirectoryFiles (but without doing another ReadDir) + filePath, err = w.internalWatch(filePath, fileInfo) + if err != nil { + return err + } + + w.mu.Lock() + w.fileExists[filePath] = true + w.mu.Unlock() + + return nil +} + +func (w *Watcher) internalWatch(name string, fileInfo os.FileInfo) (string, error) { + if fileInfo.IsDir() { + // mimic Linux providing delete events for subdirectories + // but preserve the flags used if currently watching subdirectory + w.mu.Lock() + flags := w.dirFlags[name] + w.mu.Unlock() + + flags |= unix.NOTE_DELETE | unix.NOTE_RENAME + return w.addWatch(name, flags) + } + + // watch file to mimic Linux inotify + return w.addWatch(name, noteAllEvents) +} + +// kqueue creates a new kernel event queue and returns a descriptor. +func kqueue() (kq int, err error) { + kq, err = unix.Kqueue() + if kq == -1 { + return kq, err + } + return kq, nil +} + +// register events with the queue +func register(kq int, fds []int, flags int, fflags uint32) error { + changes := make([]unix.Kevent_t, len(fds)) + + for i, fd := range fds { + // SetKevent converts int to the platform-specific types: + unix.SetKevent(&changes[i], fd, unix.EVFILT_VNODE, flags) + changes[i].Fflags = fflags + } + + // register the events + success, err := unix.Kevent(kq, changes, nil, nil) + if success == -1 { + return err + } + return nil +} + +// read retrieves pending events, or waits until an event occurs. +// A timeout of nil blocks indefinitely, while 0 polls the queue. +func read(kq int, events []unix.Kevent_t, timeout *unix.Timespec) ([]unix.Kevent_t, error) { + n, err := unix.Kevent(kq, nil, events, timeout) + if err != nil { + return nil, err + } + return events[0:n], nil +} + +// durationToTimespec prepares a timeout value +func durationToTimespec(d time.Duration) unix.Timespec { + return unix.NsecToTimespec(d.Nanoseconds()) +} diff --git a/constraint/vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go b/constraint/vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go new file mode 100644 index 000000000..36cc3845b --- /dev/null +++ b/constraint/vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go @@ -0,0 +1,12 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build freebsd || openbsd || netbsd || dragonfly +// +build freebsd openbsd netbsd dragonfly + +package fsnotify + +import "golang.org/x/sys/unix" + +const openMode = unix.O_NONBLOCK | unix.O_RDONLY | unix.O_CLOEXEC diff --git a/constraint/vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go b/constraint/vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go new file mode 100644 index 000000000..98cd8476f --- /dev/null +++ b/constraint/vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go @@ -0,0 +1,13 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build darwin +// +build darwin + +package fsnotify + +import "golang.org/x/sys/unix" + +// note: this constant is not defined on BSD +const openMode = unix.O_EVTONLY | unix.O_CLOEXEC diff --git a/constraint/vendor/github.com/fsnotify/fsnotify/windows.go b/constraint/vendor/github.com/fsnotify/fsnotify/windows.go new file mode 100644 index 000000000..02ce7deb0 --- /dev/null +++ b/constraint/vendor/github.com/fsnotify/fsnotify/windows.go @@ -0,0 +1,586 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build windows +// +build windows + +package fsnotify + +import ( + "errors" + "fmt" + "os" + "path/filepath" + "reflect" + "runtime" + "sync" + "syscall" + "unsafe" +) + +// Watcher watches a set of files, delivering events to a channel. +type Watcher struct { + Events chan Event + Errors chan error + isClosed bool // Set to true when Close() is first called + mu sync.Mutex // Map access + port syscall.Handle // Handle to completion port + watches watchMap // Map of watches (key: i-number) + input chan *input // Inputs to the reader are sent on this channel + quit chan chan<- error +} + +// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events. +func NewWatcher() (*Watcher, error) { + port, e := syscall.CreateIoCompletionPort(syscall.InvalidHandle, 0, 0, 0) + if e != nil { + return nil, os.NewSyscallError("CreateIoCompletionPort", e) + } + w := &Watcher{ + port: port, + watches: make(watchMap), + input: make(chan *input, 1), + Events: make(chan Event, 50), + Errors: make(chan error), + quit: make(chan chan<- error, 1), + } + go w.readEvents() + return w, nil +} + +// Close removes all watches and closes the events channel. +func (w *Watcher) Close() error { + if w.isClosed { + return nil + } + w.isClosed = true + + // Send "quit" message to the reader goroutine + ch := make(chan error) + w.quit <- ch + if err := w.wakeupReader(); err != nil { + return err + } + return <-ch +} + +// Add starts watching the named file or directory (non-recursively). +func (w *Watcher) Add(name string) error { + if w.isClosed { + return errors.New("watcher already closed") + } + in := &input{ + op: opAddWatch, + path: filepath.Clean(name), + flags: sysFSALLEVENTS, + reply: make(chan error), + } + w.input <- in + if err := w.wakeupReader(); err != nil { + return err + } + return <-in.reply +} + +// Remove stops watching the the named file or directory (non-recursively). +func (w *Watcher) Remove(name string) error { + in := &input{ + op: opRemoveWatch, + path: filepath.Clean(name), + reply: make(chan error), + } + w.input <- in + if err := w.wakeupReader(); err != nil { + return err + } + return <-in.reply +} + +// WatchList returns the directories and files that are being monitered. +func (w *Watcher) WatchList() []string { + w.mu.Lock() + defer w.mu.Unlock() + + entries := make([]string, 0, len(w.watches)) + for _, entry := range w.watches { + for _, watchEntry := range entry { + entries = append(entries, watchEntry.path) + } + } + + return entries +} + +const ( + // Options for AddWatch + sysFSONESHOT = 0x80000000 + sysFSONLYDIR = 0x1000000 + + // Events + sysFSACCESS = 0x1 + sysFSALLEVENTS = 0xfff + sysFSATTRIB = 0x4 + sysFSCLOSE = 0x18 + sysFSCREATE = 0x100 + sysFSDELETE = 0x200 + sysFSDELETESELF = 0x400 + sysFSMODIFY = 0x2 + sysFSMOVE = 0xc0 + sysFSMOVEDFROM = 0x40 + sysFSMOVEDTO = 0x80 + sysFSMOVESELF = 0x800 + + // Special events + sysFSIGNORED = 0x8000 + sysFSQOVERFLOW = 0x4000 +) + +func newEvent(name string, mask uint32) Event { + e := Event{Name: name} + if mask&sysFSCREATE == sysFSCREATE || mask&sysFSMOVEDTO == sysFSMOVEDTO { + e.Op |= Create + } + if mask&sysFSDELETE == sysFSDELETE || mask&sysFSDELETESELF == sysFSDELETESELF { + e.Op |= Remove + } + if mask&sysFSMODIFY == sysFSMODIFY { + e.Op |= Write + } + if mask&sysFSMOVE == sysFSMOVE || mask&sysFSMOVESELF == sysFSMOVESELF || mask&sysFSMOVEDFROM == sysFSMOVEDFROM { + e.Op |= Rename + } + if mask&sysFSATTRIB == sysFSATTRIB { + e.Op |= Chmod + } + return e +} + +const ( + opAddWatch = iota + opRemoveWatch +) + +const ( + provisional uint64 = 1 << (32 + iota) +) + +type input struct { + op int + path string + flags uint32 + reply chan error +} + +type inode struct { + handle syscall.Handle + volume uint32 + index uint64 +} + +type watch struct { + ov syscall.Overlapped + ino *inode // i-number + path string // Directory path + mask uint64 // Directory itself is being watched with these notify flags + names map[string]uint64 // Map of names being watched and their notify flags + rename string // Remembers the old name while renaming a file + buf [4096]byte +} + +type indexMap map[uint64]*watch +type watchMap map[uint32]indexMap + +func (w *Watcher) wakeupReader() error { + e := syscall.PostQueuedCompletionStatus(w.port, 0, 0, nil) + if e != nil { + return os.NewSyscallError("PostQueuedCompletionStatus", e) + } + return nil +} + +func getDir(pathname string) (dir string, err error) { + attr, e := syscall.GetFileAttributes(syscall.StringToUTF16Ptr(pathname)) + if e != nil { + return "", os.NewSyscallError("GetFileAttributes", e) + } + if attr&syscall.FILE_ATTRIBUTE_DIRECTORY != 0 { + dir = pathname + } else { + dir, _ = filepath.Split(pathname) + dir = filepath.Clean(dir) + } + return +} + +func getIno(path string) (ino *inode, err error) { + h, e := syscall.CreateFile(syscall.StringToUTF16Ptr(path), + syscall.FILE_LIST_DIRECTORY, + syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE, + nil, syscall.OPEN_EXISTING, + syscall.FILE_FLAG_BACKUP_SEMANTICS|syscall.FILE_FLAG_OVERLAPPED, 0) + if e != nil { + return nil, os.NewSyscallError("CreateFile", e) + } + var fi syscall.ByHandleFileInformation + if e = syscall.GetFileInformationByHandle(h, &fi); e != nil { + syscall.CloseHandle(h) + return nil, os.NewSyscallError("GetFileInformationByHandle", e) + } + ino = &inode{ + handle: h, + volume: fi.VolumeSerialNumber, + index: uint64(fi.FileIndexHigh)<<32 | uint64(fi.FileIndexLow), + } + return ino, nil +} + +// Must run within the I/O thread. +func (m watchMap) get(ino *inode) *watch { + if i := m[ino.volume]; i != nil { + return i[ino.index] + } + return nil +} + +// Must run within the I/O thread. +func (m watchMap) set(ino *inode, watch *watch) { + i := m[ino.volume] + if i == nil { + i = make(indexMap) + m[ino.volume] = i + } + i[ino.index] = watch +} + +// Must run within the I/O thread. +func (w *Watcher) addWatch(pathname string, flags uint64) error { + dir, err := getDir(pathname) + if err != nil { + return err + } + if flags&sysFSONLYDIR != 0 && pathname != dir { + return nil + } + ino, err := getIno(dir) + if err != nil { + return err + } + w.mu.Lock() + watchEntry := w.watches.get(ino) + w.mu.Unlock() + if watchEntry == nil { + if _, e := syscall.CreateIoCompletionPort(ino.handle, w.port, 0, 0); e != nil { + syscall.CloseHandle(ino.handle) + return os.NewSyscallError("CreateIoCompletionPort", e) + } + watchEntry = &watch{ + ino: ino, + path: dir, + names: make(map[string]uint64), + } + w.mu.Lock() + w.watches.set(ino, watchEntry) + w.mu.Unlock() + flags |= provisional + } else { + syscall.CloseHandle(ino.handle) + } + if pathname == dir { + watchEntry.mask |= flags + } else { + watchEntry.names[filepath.Base(pathname)] |= flags + } + if err = w.startRead(watchEntry); err != nil { + return err + } + if pathname == dir { + watchEntry.mask &= ^provisional + } else { + watchEntry.names[filepath.Base(pathname)] &= ^provisional + } + return nil +} + +// Must run within the I/O thread. +func (w *Watcher) remWatch(pathname string) error { + dir, err := getDir(pathname) + if err != nil { + return err + } + ino, err := getIno(dir) + if err != nil { + return err + } + w.mu.Lock() + watch := w.watches.get(ino) + w.mu.Unlock() + if watch == nil { + return fmt.Errorf("can't remove non-existent watch for: %s", pathname) + } + if pathname == dir { + w.sendEvent(watch.path, watch.mask&sysFSIGNORED) + watch.mask = 0 + } else { + name := filepath.Base(pathname) + w.sendEvent(filepath.Join(watch.path, name), watch.names[name]&sysFSIGNORED) + delete(watch.names, name) + } + return w.startRead(watch) +} + +// Must run within the I/O thread. +func (w *Watcher) deleteWatch(watch *watch) { + for name, mask := range watch.names { + if mask&provisional == 0 { + w.sendEvent(filepath.Join(watch.path, name), mask&sysFSIGNORED) + } + delete(watch.names, name) + } + if watch.mask != 0 { + if watch.mask&provisional == 0 { + w.sendEvent(watch.path, watch.mask&sysFSIGNORED) + } + watch.mask = 0 + } +} + +// Must run within the I/O thread. +func (w *Watcher) startRead(watch *watch) error { + if e := syscall.CancelIo(watch.ino.handle); e != nil { + w.Errors <- os.NewSyscallError("CancelIo", e) + w.deleteWatch(watch) + } + mask := toWindowsFlags(watch.mask) + for _, m := range watch.names { + mask |= toWindowsFlags(m) + } + if mask == 0 { + if e := syscall.CloseHandle(watch.ino.handle); e != nil { + w.Errors <- os.NewSyscallError("CloseHandle", e) + } + w.mu.Lock() + delete(w.watches[watch.ino.volume], watch.ino.index) + w.mu.Unlock() + return nil + } + e := syscall.ReadDirectoryChanges(watch.ino.handle, &watch.buf[0], + uint32(unsafe.Sizeof(watch.buf)), false, mask, nil, &watch.ov, 0) + if e != nil { + err := os.NewSyscallError("ReadDirectoryChanges", e) + if e == syscall.ERROR_ACCESS_DENIED && watch.mask&provisional == 0 { + // Watched directory was probably removed + if w.sendEvent(watch.path, watch.mask&sysFSDELETESELF) { + if watch.mask&sysFSONESHOT != 0 { + watch.mask = 0 + } + } + err = nil + } + w.deleteWatch(watch) + w.startRead(watch) + return err + } + return nil +} + +// readEvents reads from the I/O completion port, converts the +// received events into Event objects and sends them via the Events channel. +// Entry point to the I/O thread. +func (w *Watcher) readEvents() { + var ( + n, key uint32 + ov *syscall.Overlapped + ) + runtime.LockOSThread() + + for { + e := syscall.GetQueuedCompletionStatus(w.port, &n, &key, &ov, syscall.INFINITE) + watch := (*watch)(unsafe.Pointer(ov)) + + if watch == nil { + select { + case ch := <-w.quit: + w.mu.Lock() + var indexes []indexMap + for _, index := range w.watches { + indexes = append(indexes, index) + } + w.mu.Unlock() + for _, index := range indexes { + for _, watch := range index { + w.deleteWatch(watch) + w.startRead(watch) + } + } + var err error + if e := syscall.CloseHandle(w.port); e != nil { + err = os.NewSyscallError("CloseHandle", e) + } + close(w.Events) + close(w.Errors) + ch <- err + return + case in := <-w.input: + switch in.op { + case opAddWatch: + in.reply <- w.addWatch(in.path, uint64(in.flags)) + case opRemoveWatch: + in.reply <- w.remWatch(in.path) + } + default: + } + continue + } + + switch e { + case syscall.ERROR_MORE_DATA: + if watch == nil { + w.Errors <- errors.New("ERROR_MORE_DATA has unexpectedly null lpOverlapped buffer") + } else { + // The i/o succeeded but the buffer is full. + // In theory we should be building up a full packet. + // In practice we can get away with just carrying on. + n = uint32(unsafe.Sizeof(watch.buf)) + } + case syscall.ERROR_ACCESS_DENIED: + // Watched directory was probably removed + w.sendEvent(watch.path, watch.mask&sysFSDELETESELF) + w.deleteWatch(watch) + w.startRead(watch) + continue + case syscall.ERROR_OPERATION_ABORTED: + // CancelIo was called on this handle + continue + default: + w.Errors <- os.NewSyscallError("GetQueuedCompletionPort", e) + continue + case nil: + } + + var offset uint32 + for { + if n == 0 { + w.Events <- newEvent("", sysFSQOVERFLOW) + w.Errors <- errors.New("short read in readEvents()") + break + } + + // Point "raw" to the event in the buffer + raw := (*syscall.FileNotifyInformation)(unsafe.Pointer(&watch.buf[offset])) + // TODO: Consider using unsafe.Slice that is available from go1.17 + // https://stackoverflow.com/questions/51187973/how-to-create-an-array-or-a-slice-from-an-array-unsafe-pointer-in-golang + // instead of using a fixed syscall.MAX_PATH buf, we create a buf that is the size of the path name + size := int(raw.FileNameLength / 2) + var buf []uint16 + sh := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + sh.Data = uintptr(unsafe.Pointer(&raw.FileName)) + sh.Len = size + sh.Cap = size + name := syscall.UTF16ToString(buf) + fullname := filepath.Join(watch.path, name) + + var mask uint64 + switch raw.Action { + case syscall.FILE_ACTION_REMOVED: + mask = sysFSDELETESELF + case syscall.FILE_ACTION_MODIFIED: + mask = sysFSMODIFY + case syscall.FILE_ACTION_RENAMED_OLD_NAME: + watch.rename = name + case syscall.FILE_ACTION_RENAMED_NEW_NAME: + if watch.names[watch.rename] != 0 { + watch.names[name] |= watch.names[watch.rename] + delete(watch.names, watch.rename) + mask = sysFSMOVESELF + } + } + + sendNameEvent := func() { + if w.sendEvent(fullname, watch.names[name]&mask) { + if watch.names[name]&sysFSONESHOT != 0 { + delete(watch.names, name) + } + } + } + if raw.Action != syscall.FILE_ACTION_RENAMED_NEW_NAME { + sendNameEvent() + } + if raw.Action == syscall.FILE_ACTION_REMOVED { + w.sendEvent(fullname, watch.names[name]&sysFSIGNORED) + delete(watch.names, name) + } + if w.sendEvent(fullname, watch.mask&toFSnotifyFlags(raw.Action)) { + if watch.mask&sysFSONESHOT != 0 { + watch.mask = 0 + } + } + if raw.Action == syscall.FILE_ACTION_RENAMED_NEW_NAME { + fullname = filepath.Join(watch.path, watch.rename) + sendNameEvent() + } + + // Move to the next event in the buffer + if raw.NextEntryOffset == 0 { + break + } + offset += raw.NextEntryOffset + + // Error! + if offset >= n { + w.Errors <- errors.New("Windows system assumed buffer larger than it is, events have likely been missed.") + break + } + } + + if err := w.startRead(watch); err != nil { + w.Errors <- err + } + } +} + +func (w *Watcher) sendEvent(name string, mask uint64) bool { + if mask == 0 { + return false + } + event := newEvent(name, uint32(mask)) + select { + case ch := <-w.quit: + w.quit <- ch + case w.Events <- event: + } + return true +} + +func toWindowsFlags(mask uint64) uint32 { + var m uint32 + if mask&sysFSACCESS != 0 { + m |= syscall.FILE_NOTIFY_CHANGE_LAST_ACCESS + } + if mask&sysFSMODIFY != 0 { + m |= syscall.FILE_NOTIFY_CHANGE_LAST_WRITE + } + if mask&sysFSATTRIB != 0 { + m |= syscall.FILE_NOTIFY_CHANGE_ATTRIBUTES + } + if mask&(sysFSMOVE|sysFSCREATE|sysFSDELETE) != 0 { + m |= syscall.FILE_NOTIFY_CHANGE_FILE_NAME | syscall.FILE_NOTIFY_CHANGE_DIR_NAME + } + return m +} + +func toFSnotifyFlags(action uint32) uint64 { + switch action { + case syscall.FILE_ACTION_ADDED: + return sysFSCREATE + case syscall.FILE_ACTION_REMOVED: + return sysFSDELETE + case syscall.FILE_ACTION_MODIFIED: + return sysFSMODIFY + case syscall.FILE_ACTION_RENAMED_OLD_NAME: + return sysFSMOVEDFROM + case syscall.FILE_ACTION_RENAMED_NEW_NAME: + return sysFSMOVEDTO + } + return 0 +} diff --git a/constraint/vendor/k8s.io/apimachinery/pkg/util/cache/expiring.go b/constraint/vendor/k8s.io/apimachinery/pkg/util/cache/expiring.go new file mode 100644 index 000000000..0d2f153bf --- /dev/null +++ b/constraint/vendor/k8s.io/apimachinery/pkg/util/cache/expiring.go @@ -0,0 +1,192 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "container/heap" + "sync" + "time" + + "k8s.io/utils/clock" +) + +// NewExpiring returns an initialized expiring cache. +func NewExpiring() *Expiring { + return NewExpiringWithClock(clock.RealClock{}) +} + +// NewExpiringWithClock is like NewExpiring but allows passing in a custom +// clock for testing. +func NewExpiringWithClock(clock clock.Clock) *Expiring { + return &Expiring{ + clock: clock, + cache: make(map[interface{}]entry), + } +} + +// Expiring is a map whose entries expire after a per-entry timeout. +type Expiring struct { + clock clock.Clock + + // mu protects the below fields + mu sync.RWMutex + // cache is the internal map that backs the cache. + cache map[interface{}]entry + // generation is used as a cheap resource version for cache entries. Cleanups + // are scheduled with a key and generation. When the cleanup runs, it first + // compares its generation with the current generation of the entry. It + // deletes the entry iff the generation matches. This prevents cleanups + // scheduled for earlier versions of an entry from deleting later versions of + // an entry when Set() is called multiple times with the same key. + // + // The integer value of the generation of an entry is meaningless. + generation uint64 + + heap expiringHeap +} + +type entry struct { + val interface{} + expiry time.Time + generation uint64 +} + +// Get looks up an entry in the cache. +func (c *Expiring) Get(key interface{}) (val interface{}, ok bool) { + c.mu.RLock() + defer c.mu.RUnlock() + e, ok := c.cache[key] + if !ok || !c.clock.Now().Before(e.expiry) { + return nil, false + } + return e.val, true +} + +// Set sets a key/value/expiry entry in the map, overwriting any previous entry +// with the same key. The entry expires at the given expiry time, but its TTL +// may be lengthened or shortened by additional calls to Set(). Garbage +// collection of expired entries occurs during calls to Set(), however calls to +// Get() will not return expired entries that have not yet been garbage +// collected. +func (c *Expiring) Set(key interface{}, val interface{}, ttl time.Duration) { + now := c.clock.Now() + expiry := now.Add(ttl) + + c.mu.Lock() + defer c.mu.Unlock() + + c.generation++ + + c.cache[key] = entry{ + val: val, + expiry: expiry, + generation: c.generation, + } + + // Run GC inline before pushing the new entry. + c.gc(now) + + heap.Push(&c.heap, &expiringHeapEntry{ + key: key, + expiry: expiry, + generation: c.generation, + }) +} + +// Delete deletes an entry in the map. +func (c *Expiring) Delete(key interface{}) { + c.mu.Lock() + defer c.mu.Unlock() + c.del(key, 0) +} + +// del deletes the entry for the given key. The generation argument is the +// generation of the entry that should be deleted. If the generation has been +// changed (e.g. if a set has occurred on an existing element but the old +// cleanup still runs), this is a noop. If the generation argument is 0, the +// entry's generation is ignored and the entry is deleted. +// +// del must be called under the write lock. +func (c *Expiring) del(key interface{}, generation uint64) { + e, ok := c.cache[key] + if !ok { + return + } + if generation != 0 && generation != e.generation { + return + } + delete(c.cache, key) +} + +// Len returns the number of items in the cache. +func (c *Expiring) Len() int { + c.mu.RLock() + defer c.mu.RUnlock() + return len(c.cache) +} + +func (c *Expiring) gc(now time.Time) { + for { + // Return from gc if the heap is empty or the next element is not yet + // expired. + // + // heap[0] is a peek at the next element in the heap, which is not obvious + // from looking at the (*expiringHeap).Pop() implementation below. + // heap.Pop() swaps the first entry with the last entry of the heap, then + // calls (*expiringHeap).Pop() which returns the last element. + if len(c.heap) == 0 || now.Before(c.heap[0].expiry) { + return + } + cleanup := heap.Pop(&c.heap).(*expiringHeapEntry) + c.del(cleanup.key, cleanup.generation) + } +} + +type expiringHeapEntry struct { + key interface{} + expiry time.Time + generation uint64 +} + +// expiringHeap is a min-heap ordered by expiration time of its entries. The +// expiring cache uses this as a priority queue to efficiently organize entries +// which will be garbage collected once they expire. +type expiringHeap []*expiringHeapEntry + +var _ heap.Interface = &expiringHeap{} + +func (cq expiringHeap) Len() int { + return len(cq) +} + +func (cq expiringHeap) Less(i, j int) bool { + return cq[i].expiry.Before(cq[j].expiry) +} + +func (cq expiringHeap) Swap(i, j int) { + cq[i], cq[j] = cq[j], cq[i] +} + +func (cq *expiringHeap) Push(c interface{}) { + *cq = append(*cq, c.(*expiringHeapEntry)) +} + +func (cq *expiringHeap) Pop() interface{} { + c := (*cq)[cq.Len()-1] + *cq = (*cq)[:cq.Len()-1] + return c +} diff --git a/constraint/vendor/k8s.io/apimachinery/pkg/util/cache/lruexpirecache.go b/constraint/vendor/k8s.io/apimachinery/pkg/util/cache/lruexpirecache.go new file mode 100644 index 000000000..1328dd612 --- /dev/null +++ b/constraint/vendor/k8s.io/apimachinery/pkg/util/cache/lruexpirecache.go @@ -0,0 +1,160 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "container/list" + "sync" + "time" +) + +// Clock defines an interface for obtaining the current time +type Clock interface { + Now() time.Time +} + +// realClock implements the Clock interface by calling time.Now() +type realClock struct{} + +func (realClock) Now() time.Time { return time.Now() } + +// LRUExpireCache is a cache that ensures the mostly recently accessed keys are returned with +// a ttl beyond which keys are forcibly expired. +type LRUExpireCache struct { + // clock is used to obtain the current time + clock Clock + + lock sync.Mutex + + maxSize int + evictionList list.List + entries map[interface{}]*list.Element +} + +// NewLRUExpireCache creates an expiring cache with the given size +func NewLRUExpireCache(maxSize int) *LRUExpireCache { + return NewLRUExpireCacheWithClock(maxSize, realClock{}) +} + +// NewLRUExpireCacheWithClock creates an expiring cache with the given size, using the specified clock to obtain the current time. +func NewLRUExpireCacheWithClock(maxSize int, clock Clock) *LRUExpireCache { + if maxSize <= 0 { + panic("maxSize must be > 0") + } + + return &LRUExpireCache{ + clock: clock, + maxSize: maxSize, + entries: map[interface{}]*list.Element{}, + } +} + +type cacheEntry struct { + key interface{} + value interface{} + expireTime time.Time +} + +// Add adds the value to the cache at key with the specified maximum duration. +func (c *LRUExpireCache) Add(key interface{}, value interface{}, ttl time.Duration) { + c.lock.Lock() + defer c.lock.Unlock() + + // Key already exists + oldElement, ok := c.entries[key] + if ok { + c.evictionList.MoveToFront(oldElement) + oldElement.Value.(*cacheEntry).value = value + oldElement.Value.(*cacheEntry).expireTime = c.clock.Now().Add(ttl) + return + } + + // Make space if necessary + if c.evictionList.Len() >= c.maxSize { + toEvict := c.evictionList.Back() + c.evictionList.Remove(toEvict) + delete(c.entries, toEvict.Value.(*cacheEntry).key) + } + + // Add new entry + entry := &cacheEntry{ + key: key, + value: value, + expireTime: c.clock.Now().Add(ttl), + } + element := c.evictionList.PushFront(entry) + c.entries[key] = element +} + +// Get returns the value at the specified key from the cache if it exists and is not +// expired, or returns false. +func (c *LRUExpireCache) Get(key interface{}) (interface{}, bool) { + c.lock.Lock() + defer c.lock.Unlock() + + element, ok := c.entries[key] + if !ok { + return nil, false + } + + if c.clock.Now().After(element.Value.(*cacheEntry).expireTime) { + c.evictionList.Remove(element) + delete(c.entries, key) + return nil, false + } + + c.evictionList.MoveToFront(element) + + return element.Value.(*cacheEntry).value, true +} + +// Remove removes the specified key from the cache if it exists +func (c *LRUExpireCache) Remove(key interface{}) { + c.lock.Lock() + defer c.lock.Unlock() + + element, ok := c.entries[key] + if !ok { + return + } + + c.evictionList.Remove(element) + delete(c.entries, key) +} + +// Keys returns all unexpired keys in the cache. +// +// Keep in mind that subsequent calls to Get() for any of the returned keys +// might return "not found". +// +// Keys are returned ordered from least recently used to most recently used. +func (c *LRUExpireCache) Keys() []interface{} { + c.lock.Lock() + defer c.lock.Unlock() + + now := c.clock.Now() + + val := make([]interface{}, 0, c.evictionList.Len()) + for element := c.evictionList.Back(); element != nil; element = element.Prev() { + // Only return unexpired keys + if !now.After(element.Value.(*cacheEntry).expireTime) { + val = append(val, element.Value.(*cacheEntry).key) + } + } + + return val +} diff --git a/constraint/vendor/k8s.io/apimachinery/pkg/util/diff/diff.go b/constraint/vendor/k8s.io/apimachinery/pkg/util/diff/diff.go new file mode 100644 index 000000000..fa9ffa51b --- /dev/null +++ b/constraint/vendor/k8s.io/apimachinery/pkg/util/diff/diff.go @@ -0,0 +1,157 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package diff + +import ( + "bytes" + "fmt" + "reflect" + "strings" + "text/tabwriter" + + "github.com/davecgh/go-spew/spew" + "github.com/google/go-cmp/cmp" +) + +// StringDiff diffs a and b and returns a human readable diff. +func StringDiff(a, b string) string { + ba := []byte(a) + bb := []byte(b) + out := []byte{} + i := 0 + for ; i < len(ba) && i < len(bb); i++ { + if ba[i] != bb[i] { + break + } + out = append(out, ba[i]) + } + out = append(out, []byte("\n\nA: ")...) + out = append(out, ba[i:]...) + out = append(out, []byte("\n\nB: ")...) + out = append(out, bb[i:]...) + out = append(out, []byte("\n\n")...) + return string(out) +} + +func legacyDiff(a, b interface{}) string { + return cmp.Diff(a, b) +} + +// ObjectDiff prints the diff of two go objects and fails if the objects +// contain unhandled unexported fields. +// DEPRECATED: use github.com/google/go-cmp/cmp.Diff +func ObjectDiff(a, b interface{}) string { + return legacyDiff(a, b) +} + +// ObjectGoPrintDiff prints the diff of two go objects and fails if the objects +// contain unhandled unexported fields. +// DEPRECATED: use github.com/google/go-cmp/cmp.Diff +func ObjectGoPrintDiff(a, b interface{}) string { + return legacyDiff(a, b) +} + +// ObjectReflectDiff prints the diff of two go objects and fails if the objects +// contain unhandled unexported fields. +// DEPRECATED: use github.com/google/go-cmp/cmp.Diff +func ObjectReflectDiff(a, b interface{}) string { + return legacyDiff(a, b) +} + +// ObjectGoPrintSideBySide prints a and b as textual dumps side by side, +// enabling easy visual scanning for mismatches. +func ObjectGoPrintSideBySide(a, b interface{}) string { + s := spew.ConfigState{ + Indent: " ", + // Extra deep spew. + DisableMethods: true, + } + sA := s.Sdump(a) + sB := s.Sdump(b) + + linesA := strings.Split(sA, "\n") + linesB := strings.Split(sB, "\n") + width := 0 + for _, s := range linesA { + l := len(s) + if l > width { + width = l + } + } + for _, s := range linesB { + l := len(s) + if l > width { + width = l + } + } + buf := &bytes.Buffer{} + w := tabwriter.NewWriter(buf, width, 0, 1, ' ', 0) + max := len(linesA) + if len(linesB) > max { + max = len(linesB) + } + for i := 0; i < max; i++ { + var a, b string + if i < len(linesA) { + a = linesA[i] + } + if i < len(linesB) { + b = linesB[i] + } + fmt.Fprintf(w, "%s\t%s\n", a, b) + } + w.Flush() + return buf.String() +} + +// IgnoreUnset is an option that ignores fields that are unset on the right +// hand side of a comparison. This is useful in testing to assert that an +// object is a derivative. +func IgnoreUnset() cmp.Option { + return cmp.Options{ + // ignore unset fields in v2 + cmp.FilterPath(func(path cmp.Path) bool { + _, v2 := path.Last().Values() + switch v2.Kind() { + case reflect.Slice, reflect.Map: + if v2.IsNil() || v2.Len() == 0 { + return true + } + case reflect.String: + if v2.Len() == 0 { + return true + } + case reflect.Interface, reflect.Ptr: + if v2.IsNil() { + return true + } + } + return false + }, cmp.Ignore()), + // ignore map entries that aren't set in v2 + cmp.FilterPath(func(path cmp.Path) bool { + switch i := path.Last().(type) { + case cmp.MapIndex: + if _, v2 := i.Values(); !v2.IsValid() { + fmt.Println("E") + return true + } + } + return false + }, cmp.Ignore()), + } +} diff --git a/constraint/vendor/k8s.io/client-go/tools/cache/OWNERS b/constraint/vendor/k8s.io/client-go/tools/cache/OWNERS new file mode 100644 index 000000000..726205b3d --- /dev/null +++ b/constraint/vendor/k8s.io/client-go/tools/cache/OWNERS @@ -0,0 +1,28 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +approvers: + - thockin + - lavalamp + - smarterclayton + - wojtek-t + - deads2k + - caesarxuchao + - liggitt + - ncdc +reviewers: + - thockin + - lavalamp + - smarterclayton + - wojtek-t + - deads2k + - derekwaynecarr + - caesarxuchao + - mikedanese + - liggitt + - janetkuo + - justinsb + - soltysh + - jsafrane + - dims + - ingvagabund + - ncdc diff --git a/constraint/vendor/k8s.io/client-go/tools/cache/controller.go b/constraint/vendor/k8s.io/client-go/tools/cache/controller.go new file mode 100644 index 000000000..ff4c22de0 --- /dev/null +++ b/constraint/vendor/k8s.io/client-go/tools/cache/controller.go @@ -0,0 +1,498 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "errors" + "sync" + "time" + + "k8s.io/apimachinery/pkg/runtime" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/utils/clock" +) + +// This file implements a low-level controller that is used in +// sharedIndexInformer, which is an implementation of +// SharedIndexInformer. Such informers, in turn, are key components +// in the high level controllers that form the backbone of the +// Kubernetes control plane. Look at those for examples, or the +// example in +// https://github.com/kubernetes/client-go/tree/master/examples/workqueue +// . + +// Config contains all the settings for one of these low-level controllers. +type Config struct { + // The queue for your objects - has to be a DeltaFIFO due to + // assumptions in the implementation. Your Process() function + // should accept the output of this Queue's Pop() method. + Queue + + // Something that can list and watch your objects. + ListerWatcher + + // Something that can process a popped Deltas. + Process ProcessFunc + + // ObjectType is an example object of the type this controller is + // expected to handle. Only the type needs to be right, except + // that when that is `unstructured.Unstructured` the object's + // `"apiVersion"` and `"kind"` must also be right. + ObjectType runtime.Object + + // FullResyncPeriod is the period at which ShouldResync is considered. + FullResyncPeriod time.Duration + + // ShouldResync is periodically used by the reflector to determine + // whether to Resync the Queue. If ShouldResync is `nil` or + // returns true, it means the reflector should proceed with the + // resync. + ShouldResync ShouldResyncFunc + + // If true, when Process() returns an error, re-enqueue the object. + // TODO: add interface to let you inject a delay/backoff or drop + // the object completely if desired. Pass the object in + // question to this interface as a parameter. This is probably moot + // now that this functionality appears at a higher level. + RetryOnError bool + + // Called whenever the ListAndWatch drops the connection with an error. + WatchErrorHandler WatchErrorHandler + + // WatchListPageSize is the requested chunk size of initial and relist watch lists. + WatchListPageSize int64 +} + +// ShouldResyncFunc is a type of function that indicates if a reflector should perform a +// resync or not. It can be used by a shared informer to support multiple event handlers with custom +// resync periods. +type ShouldResyncFunc func() bool + +// ProcessFunc processes a single object. +type ProcessFunc func(obj interface{}) error + +// `*controller` implements Controller +type controller struct { + config Config + reflector *Reflector + reflectorMutex sync.RWMutex + clock clock.Clock +} + +// Controller is a low-level controller that is parameterized by a +// Config and used in sharedIndexInformer. +type Controller interface { + // Run does two things. One is to construct and run a Reflector + // to pump objects/notifications from the Config's ListerWatcher + // to the Config's Queue and possibly invoke the occasional Resync + // on that Queue. The other is to repeatedly Pop from the Queue + // and process with the Config's ProcessFunc. Both of these + // continue until `stopCh` is closed. + Run(stopCh <-chan struct{}) + + // HasSynced delegates to the Config's Queue + HasSynced() bool + + // LastSyncResourceVersion delegates to the Reflector when there + // is one, otherwise returns the empty string + LastSyncResourceVersion() string +} + +// New makes a new Controller from the given Config. +func New(c *Config) Controller { + ctlr := &controller{ + config: *c, + clock: &clock.RealClock{}, + } + return ctlr +} + +// Run begins processing items, and will continue until a value is sent down stopCh or it is closed. +// It's an error to call Run more than once. +// Run blocks; call via go. +func (c *controller) Run(stopCh <-chan struct{}) { + defer utilruntime.HandleCrash() + go func() { + <-stopCh + c.config.Queue.Close() + }() + r := NewReflector( + c.config.ListerWatcher, + c.config.ObjectType, + c.config.Queue, + c.config.FullResyncPeriod, + ) + r.ShouldResync = c.config.ShouldResync + r.WatchListPageSize = c.config.WatchListPageSize + r.clock = c.clock + if c.config.WatchErrorHandler != nil { + r.watchErrorHandler = c.config.WatchErrorHandler + } + + c.reflectorMutex.Lock() + c.reflector = r + c.reflectorMutex.Unlock() + + var wg wait.Group + + wg.StartWithChannel(stopCh, r.Run) + + wait.Until(c.processLoop, time.Second, stopCh) + wg.Wait() +} + +// Returns true once this controller has completed an initial resource listing +func (c *controller) HasSynced() bool { + return c.config.Queue.HasSynced() +} + +func (c *controller) LastSyncResourceVersion() string { + c.reflectorMutex.RLock() + defer c.reflectorMutex.RUnlock() + if c.reflector == nil { + return "" + } + return c.reflector.LastSyncResourceVersion() +} + +// processLoop drains the work queue. +// TODO: Consider doing the processing in parallel. This will require a little thought +// to make sure that we don't end up processing the same object multiple times +// concurrently. +// +// TODO: Plumb through the stopCh here (and down to the queue) so that this can +// actually exit when the controller is stopped. Or just give up on this stuff +// ever being stoppable. Converting this whole package to use Context would +// also be helpful. +func (c *controller) processLoop() { + for { + obj, err := c.config.Queue.Pop(PopProcessFunc(c.config.Process)) + if err != nil { + if err == ErrFIFOClosed { + return + } + if c.config.RetryOnError { + // This is the safe way to re-enqueue. + c.config.Queue.AddIfNotPresent(obj) + } + } + } +} + +// ResourceEventHandler can handle notifications for events that +// happen to a resource. The events are informational only, so you +// can't return an error. The handlers MUST NOT modify the objects +// received; this concerns not only the top level of structure but all +// the data structures reachable from it. +// * OnAdd is called when an object is added. +// * OnUpdate is called when an object is modified. Note that oldObj is the +// last known state of the object-- it is possible that several changes +// were combined together, so you can't use this to see every single +// change. OnUpdate is also called when a re-list happens, and it will +// get called even if nothing changed. This is useful for periodically +// evaluating or syncing something. +// * OnDelete will get the final state of the item if it is known, otherwise +// it will get an object of type DeletedFinalStateUnknown. This can +// happen if the watch is closed and misses the delete event and we don't +// notice the deletion until the subsequent re-list. +type ResourceEventHandler interface { + OnAdd(obj interface{}) + OnUpdate(oldObj, newObj interface{}) + OnDelete(obj interface{}) +} + +// ResourceEventHandlerFuncs is an adaptor to let you easily specify as many or +// as few of the notification functions as you want while still implementing +// ResourceEventHandler. This adapter does not remove the prohibition against +// modifying the objects. +type ResourceEventHandlerFuncs struct { + AddFunc func(obj interface{}) + UpdateFunc func(oldObj, newObj interface{}) + DeleteFunc func(obj interface{}) +} + +// OnAdd calls AddFunc if it's not nil. +func (r ResourceEventHandlerFuncs) OnAdd(obj interface{}) { + if r.AddFunc != nil { + r.AddFunc(obj) + } +} + +// OnUpdate calls UpdateFunc if it's not nil. +func (r ResourceEventHandlerFuncs) OnUpdate(oldObj, newObj interface{}) { + if r.UpdateFunc != nil { + r.UpdateFunc(oldObj, newObj) + } +} + +// OnDelete calls DeleteFunc if it's not nil. +func (r ResourceEventHandlerFuncs) OnDelete(obj interface{}) { + if r.DeleteFunc != nil { + r.DeleteFunc(obj) + } +} + +// FilteringResourceEventHandler applies the provided filter to all events coming +// in, ensuring the appropriate nested handler method is invoked. An object +// that starts passing the filter after an update is considered an add, and an +// object that stops passing the filter after an update is considered a delete. +// Like the handlers, the filter MUST NOT modify the objects it is given. +type FilteringResourceEventHandler struct { + FilterFunc func(obj interface{}) bool + Handler ResourceEventHandler +} + +// OnAdd calls the nested handler only if the filter succeeds +func (r FilteringResourceEventHandler) OnAdd(obj interface{}) { + if !r.FilterFunc(obj) { + return + } + r.Handler.OnAdd(obj) +} + +// OnUpdate ensures the proper handler is called depending on whether the filter matches +func (r FilteringResourceEventHandler) OnUpdate(oldObj, newObj interface{}) { + newer := r.FilterFunc(newObj) + older := r.FilterFunc(oldObj) + switch { + case newer && older: + r.Handler.OnUpdate(oldObj, newObj) + case newer && !older: + r.Handler.OnAdd(newObj) + case !newer && older: + r.Handler.OnDelete(oldObj) + default: + // do nothing + } +} + +// OnDelete calls the nested handler only if the filter succeeds +func (r FilteringResourceEventHandler) OnDelete(obj interface{}) { + if !r.FilterFunc(obj) { + return + } + r.Handler.OnDelete(obj) +} + +// DeletionHandlingMetaNamespaceKeyFunc checks for +// DeletedFinalStateUnknown objects before calling +// MetaNamespaceKeyFunc. +func DeletionHandlingMetaNamespaceKeyFunc(obj interface{}) (string, error) { + if d, ok := obj.(DeletedFinalStateUnknown); ok { + return d.Key, nil + } + return MetaNamespaceKeyFunc(obj) +} + +// NewInformer returns a Store and a controller for populating the store +// while also providing event notifications. You should only used the returned +// Store for Get/List operations; Add/Modify/Deletes will cause the event +// notifications to be faulty. +// +// Parameters: +// * lw is list and watch functions for the source of the resource you want to +// be informed of. +// * objType is an object of the type that you expect to receive. +// * resyncPeriod: if non-zero, will re-list this often (you will get OnUpdate +// calls, even if nothing changed). Otherwise, re-list will be delayed as +// long as possible (until the upstream source closes the watch or times out, +// or you stop the controller). +// * h is the object you want notifications sent to. +// +func NewInformer( + lw ListerWatcher, + objType runtime.Object, + resyncPeriod time.Duration, + h ResourceEventHandler, +) (Store, Controller) { + // This will hold the client state, as we know it. + clientState := NewStore(DeletionHandlingMetaNamespaceKeyFunc) + + return clientState, newInformer(lw, objType, resyncPeriod, h, clientState, nil) +} + +// NewIndexerInformer returns an Indexer and a Controller for populating the index +// while also providing event notifications. You should only used the returned +// Index for Get/List operations; Add/Modify/Deletes will cause the event +// notifications to be faulty. +// +// Parameters: +// * lw is list and watch functions for the source of the resource you want to +// be informed of. +// * objType is an object of the type that you expect to receive. +// * resyncPeriod: if non-zero, will re-list this often (you will get OnUpdate +// calls, even if nothing changed). Otherwise, re-list will be delayed as +// long as possible (until the upstream source closes the watch or times out, +// or you stop the controller). +// * h is the object you want notifications sent to. +// * indexers is the indexer for the received object type. +// +func NewIndexerInformer( + lw ListerWatcher, + objType runtime.Object, + resyncPeriod time.Duration, + h ResourceEventHandler, + indexers Indexers, +) (Indexer, Controller) { + // This will hold the client state, as we know it. + clientState := NewIndexer(DeletionHandlingMetaNamespaceKeyFunc, indexers) + + return clientState, newInformer(lw, objType, resyncPeriod, h, clientState, nil) +} + +// TransformFunc allows for transforming an object before it will be processed +// and put into the controller cache and before the corresponding handlers will +// be called on it. +// TransformFunc (similarly to ResourceEventHandler functions) should be able +// to correctly handle the tombstone of type cache.DeletedFinalStateUnknown +// +// The most common usage pattern is to clean-up some parts of the object to +// reduce component memory usage if a given component doesn't care about them. +// given controller doesn't care for them +type TransformFunc func(interface{}) (interface{}, error) + +// NewTransformingInformer returns a Store and a controller for populating +// the store while also providing event notifications. You should only used +// the returned Store for Get/List operations; Add/Modify/Deletes will cause +// the event notifications to be faulty. +// The given transform function will be called on all objects before they will +// put into the Store and corresponding Add/Modify/Delete handlers will +// be invoked for them. +func NewTransformingInformer( + lw ListerWatcher, + objType runtime.Object, + resyncPeriod time.Duration, + h ResourceEventHandler, + transformer TransformFunc, +) (Store, Controller) { + // This will hold the client state, as we know it. + clientState := NewStore(DeletionHandlingMetaNamespaceKeyFunc) + + return clientState, newInformer(lw, objType, resyncPeriod, h, clientState, transformer) +} + +// NewTransformingIndexerInformer returns an Indexer and a controller for +// populating the index while also providing event notifications. You should +// only used the returned Index for Get/List operations; Add/Modify/Deletes +// will cause the event notifications to be faulty. +// The given transform function will be called on all objects before they will +// be put into the Index and corresponding Add/Modify/Delete handlers will +// be invoked for them. +func NewTransformingIndexerInformer( + lw ListerWatcher, + objType runtime.Object, + resyncPeriod time.Duration, + h ResourceEventHandler, + indexers Indexers, + transformer TransformFunc, +) (Indexer, Controller) { + // This will hold the client state, as we know it. + clientState := NewIndexer(DeletionHandlingMetaNamespaceKeyFunc, indexers) + + return clientState, newInformer(lw, objType, resyncPeriod, h, clientState, transformer) +} + +// Multiplexes updates in the form of a list of Deltas into a Store, and informs +// a given handler of events OnUpdate, OnAdd, OnDelete +func processDeltas( + // Object which receives event notifications from the given deltas + handler ResourceEventHandler, + clientState Store, + transformer TransformFunc, + deltas Deltas, +) error { + // from oldest to newest + for _, d := range deltas { + obj := d.Object + if transformer != nil { + var err error + obj, err = transformer(obj) + if err != nil { + return err + } + } + + switch d.Type { + case Sync, Replaced, Added, Updated: + if old, exists, err := clientState.Get(obj); err == nil && exists { + if err := clientState.Update(obj); err != nil { + return err + } + handler.OnUpdate(old, obj) + } else { + if err := clientState.Add(obj); err != nil { + return err + } + handler.OnAdd(obj) + } + case Deleted: + if err := clientState.Delete(obj); err != nil { + return err + } + handler.OnDelete(obj) + } + } + return nil +} + +// newInformer returns a controller for populating the store while also +// providing event notifications. +// +// Parameters +// * lw is list and watch functions for the source of the resource you want to +// be informed of. +// * objType is an object of the type that you expect to receive. +// * resyncPeriod: if non-zero, will re-list this often (you will get OnUpdate +// calls, even if nothing changed). Otherwise, re-list will be delayed as +// long as possible (until the upstream source closes the watch or times out, +// or you stop the controller). +// * h is the object you want notifications sent to. +// * clientState is the store you want to populate +// +func newInformer( + lw ListerWatcher, + objType runtime.Object, + resyncPeriod time.Duration, + h ResourceEventHandler, + clientState Store, + transformer TransformFunc, +) Controller { + // This will hold incoming changes. Note how we pass clientState in as a + // KeyLister, that way resync operations will result in the correct set + // of update/delete deltas. + fifo := NewDeltaFIFOWithOptions(DeltaFIFOOptions{ + KnownObjects: clientState, + EmitDeltaTypeReplaced: true, + }) + + cfg := &Config{ + Queue: fifo, + ListerWatcher: lw, + ObjectType: objType, + FullResyncPeriod: resyncPeriod, + RetryOnError: false, + + Process: func(obj interface{}) error { + if deltas, ok := obj.(Deltas); ok { + return processDeltas(h, clientState, transformer, deltas) + } + return errors.New("object given as Process argument is not Deltas") + }, + } + return New(cfg) +} diff --git a/constraint/vendor/k8s.io/client-go/tools/cache/delta_fifo.go b/constraint/vendor/k8s.io/client-go/tools/cache/delta_fifo.go new file mode 100644 index 000000000..2da2933ab --- /dev/null +++ b/constraint/vendor/k8s.io/client-go/tools/cache/delta_fifo.go @@ -0,0 +1,757 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "errors" + "fmt" + "sync" + "time" + + "k8s.io/apimachinery/pkg/util/sets" + + "k8s.io/klog/v2" + utiltrace "k8s.io/utils/trace" +) + +// DeltaFIFOOptions is the configuration parameters for DeltaFIFO. All are +// optional. +type DeltaFIFOOptions struct { + + // KeyFunction is used to figure out what key an object should have. (It's + // exposed in the returned DeltaFIFO's KeyOf() method, with additional + // handling around deleted objects and queue state). + // Optional, the default is MetaNamespaceKeyFunc. + KeyFunction KeyFunc + + // KnownObjects is expected to return a list of keys that the consumer of + // this queue "knows about". It is used to decide which items are missing + // when Replace() is called; 'Deleted' deltas are produced for the missing items. + // KnownObjects may be nil if you can tolerate missing deletions on Replace(). + KnownObjects KeyListerGetter + + // EmitDeltaTypeReplaced indicates that the queue consumer + // understands the Replaced DeltaType. Before the `Replaced` event type was + // added, calls to Replace() were handled the same as Sync(). For + // backwards-compatibility purposes, this is false by default. + // When true, `Replaced` events will be sent for items passed to a Replace() call. + // When false, `Sync` events will be sent instead. + EmitDeltaTypeReplaced bool +} + +// DeltaFIFO is like FIFO, but differs in two ways. One is that the +// accumulator associated with a given object's key is not that object +// but rather a Deltas, which is a slice of Delta values for that +// object. Applying an object to a Deltas means to append a Delta +// except when the potentially appended Delta is a Deleted and the +// Deltas already ends with a Deleted. In that case the Deltas does +// not grow, although the terminal Deleted will be replaced by the new +// Deleted if the older Deleted's object is a +// DeletedFinalStateUnknown. +// +// The other difference is that DeltaFIFO has two additional ways that +// an object can be applied to an accumulator: Replaced and Sync. +// If EmitDeltaTypeReplaced is not set to true, Sync will be used in +// replace events for backwards compatibility. Sync is used for periodic +// resync events. +// +// DeltaFIFO is a producer-consumer queue, where a Reflector is +// intended to be the producer, and the consumer is whatever calls +// the Pop() method. +// +// DeltaFIFO solves this use case: +// * You want to process every object change (delta) at most once. +// * When you process an object, you want to see everything +// that's happened to it since you last processed it. +// * You want to process the deletion of some of the objects. +// * You might want to periodically reprocess objects. +// +// DeltaFIFO's Pop(), Get(), and GetByKey() methods return +// interface{} to satisfy the Store/Queue interfaces, but they +// will always return an object of type Deltas. List() returns +// the newest object from each accumulator in the FIFO. +// +// A DeltaFIFO's knownObjects KeyListerGetter provides the abilities +// to list Store keys and to get objects by Store key. The objects in +// question are called "known objects" and this set of objects +// modifies the behavior of the Delete, Replace, and Resync methods +// (each in a different way). +// +// A note on threading: If you call Pop() in parallel from multiple +// threads, you could end up with multiple threads processing slightly +// different versions of the same object. +type DeltaFIFO struct { + // lock/cond protects access to 'items' and 'queue'. + lock sync.RWMutex + cond sync.Cond + + // `items` maps a key to a Deltas. + // Each such Deltas has at least one Delta. + items map[string]Deltas + + // `queue` maintains FIFO order of keys for consumption in Pop(). + // There are no duplicates in `queue`. + // A key is in `queue` if and only if it is in `items`. + queue []string + + // populated is true if the first batch of items inserted by Replace() has been populated + // or Delete/Add/Update/AddIfNotPresent was called first. + populated bool + // initialPopulationCount is the number of items inserted by the first call of Replace() + initialPopulationCount int + + // keyFunc is used to make the key used for queued item + // insertion and retrieval, and should be deterministic. + keyFunc KeyFunc + + // knownObjects list keys that are "known" --- affecting Delete(), + // Replace(), and Resync() + knownObjects KeyListerGetter + + // Used to indicate a queue is closed so a control loop can exit when a queue is empty. + // Currently, not used to gate any of CRUD operations. + closed bool + + // emitDeltaTypeReplaced is whether to emit the Replaced or Sync + // DeltaType when Replace() is called (to preserve backwards compat). + emitDeltaTypeReplaced bool +} + +// DeltaType is the type of a change (addition, deletion, etc) +type DeltaType string + +// Change type definition +const ( + Added DeltaType = "Added" + Updated DeltaType = "Updated" + Deleted DeltaType = "Deleted" + // Replaced is emitted when we encountered watch errors and had to do a + // relist. We don't know if the replaced object has changed. + // + // NOTE: Previous versions of DeltaFIFO would use Sync for Replace events + // as well. Hence, Replaced is only emitted when the option + // EmitDeltaTypeReplaced is true. + Replaced DeltaType = "Replaced" + // Sync is for synthetic events during a periodic resync. + Sync DeltaType = "Sync" +) + +// Delta is a member of Deltas (a list of Delta objects) which +// in its turn is the type stored by a DeltaFIFO. It tells you what +// change happened, and the object's state after* that change. +// +// [*] Unless the change is a deletion, and then you'll get the final +// state of the object before it was deleted. +type Delta struct { + Type DeltaType + Object interface{} +} + +// Deltas is a list of one or more 'Delta's to an individual object. +// The oldest delta is at index 0, the newest delta is the last one. +type Deltas []Delta + +// NewDeltaFIFO returns a Queue which can be used to process changes to items. +// +// keyFunc is used to figure out what key an object should have. (It is +// exposed in the returned DeltaFIFO's KeyOf() method, with additional handling +// around deleted objects and queue state). +// +// 'knownObjects' may be supplied to modify the behavior of Delete, +// Replace, and Resync. It may be nil if you do not need those +// modifications. +// +// TODO: consider merging keyLister with this object, tracking a list of +// "known" keys when Pop() is called. Have to think about how that +// affects error retrying. +// +// NOTE: It is possible to misuse this and cause a race when using an +// external known object source. +// Whether there is a potential race depends on how the consumer +// modifies knownObjects. In Pop(), process function is called under +// lock, so it is safe to update data structures in it that need to be +// in sync with the queue (e.g. knownObjects). +// +// Example: +// In case of sharedIndexInformer being a consumer +// (https://github.com/kubernetes/kubernetes/blob/0cdd940f/staging/src/k8s.io/client-go/tools/cache/shared_informer.go#L192), +// there is no race as knownObjects (s.indexer) is modified safely +// under DeltaFIFO's lock. The only exceptions are GetStore() and +// GetIndexer() methods, which expose ways to modify the underlying +// storage. Currently these two methods are used for creating Lister +// and internal tests. +// +// Also see the comment on DeltaFIFO. +// +// Warning: This constructs a DeltaFIFO that does not differentiate between +// events caused by a call to Replace (e.g., from a relist, which may +// contain object updates), and synthetic events caused by a periodic resync +// (which just emit the existing object). See https://issue.k8s.io/86015 for details. +// +// Use `NewDeltaFIFOWithOptions(DeltaFIFOOptions{..., EmitDeltaTypeReplaced: true})` +// instead to receive a `Replaced` event depending on the type. +// +// Deprecated: Equivalent to NewDeltaFIFOWithOptions(DeltaFIFOOptions{KeyFunction: keyFunc, KnownObjects: knownObjects}) +func NewDeltaFIFO(keyFunc KeyFunc, knownObjects KeyListerGetter) *DeltaFIFO { + return NewDeltaFIFOWithOptions(DeltaFIFOOptions{ + KeyFunction: keyFunc, + KnownObjects: knownObjects, + }) +} + +// NewDeltaFIFOWithOptions returns a Queue which can be used to process changes to +// items. See also the comment on DeltaFIFO. +func NewDeltaFIFOWithOptions(opts DeltaFIFOOptions) *DeltaFIFO { + if opts.KeyFunction == nil { + opts.KeyFunction = MetaNamespaceKeyFunc + } + + f := &DeltaFIFO{ + items: map[string]Deltas{}, + queue: []string{}, + keyFunc: opts.KeyFunction, + knownObjects: opts.KnownObjects, + + emitDeltaTypeReplaced: opts.EmitDeltaTypeReplaced, + } + f.cond.L = &f.lock + return f +} + +var ( + _ = Queue(&DeltaFIFO{}) // DeltaFIFO is a Queue +) + +var ( + // ErrZeroLengthDeltasObject is returned in a KeyError if a Deltas + // object with zero length is encountered (should be impossible, + // but included for completeness). + ErrZeroLengthDeltasObject = errors.New("0 length Deltas object; can't get key") +) + +// Close the queue. +func (f *DeltaFIFO) Close() { + f.lock.Lock() + defer f.lock.Unlock() + f.closed = true + f.cond.Broadcast() +} + +// KeyOf exposes f's keyFunc, but also detects the key of a Deltas object or +// DeletedFinalStateUnknown objects. +func (f *DeltaFIFO) KeyOf(obj interface{}) (string, error) { + if d, ok := obj.(Deltas); ok { + if len(d) == 0 { + return "", KeyError{obj, ErrZeroLengthDeltasObject} + } + obj = d.Newest().Object + } + if d, ok := obj.(DeletedFinalStateUnknown); ok { + return d.Key, nil + } + return f.keyFunc(obj) +} + +// HasSynced returns true if an Add/Update/Delete/AddIfNotPresent are called first, +// or the first batch of items inserted by Replace() has been popped. +func (f *DeltaFIFO) HasSynced() bool { + f.lock.Lock() + defer f.lock.Unlock() + return f.populated && f.initialPopulationCount == 0 +} + +// Add inserts an item, and puts it in the queue. The item is only enqueued +// if it doesn't already exist in the set. +func (f *DeltaFIFO) Add(obj interface{}) error { + f.lock.Lock() + defer f.lock.Unlock() + f.populated = true + return f.queueActionLocked(Added, obj) +} + +// Update is just like Add, but makes an Updated Delta. +func (f *DeltaFIFO) Update(obj interface{}) error { + f.lock.Lock() + defer f.lock.Unlock() + f.populated = true + return f.queueActionLocked(Updated, obj) +} + +// Delete is just like Add, but makes a Deleted Delta. If the given +// object does not already exist, it will be ignored. (It may have +// already been deleted by a Replace (re-list), for example.) In this +// method `f.knownObjects`, if not nil, provides (via GetByKey) +// _additional_ objects that are considered to already exist. +func (f *DeltaFIFO) Delete(obj interface{}) error { + id, err := f.KeyOf(obj) + if err != nil { + return KeyError{obj, err} + } + f.lock.Lock() + defer f.lock.Unlock() + f.populated = true + if f.knownObjects == nil { + if _, exists := f.items[id]; !exists { + // Presumably, this was deleted when a relist happened. + // Don't provide a second report of the same deletion. + return nil + } + } else { + // We only want to skip the "deletion" action if the object doesn't + // exist in knownObjects and it doesn't have corresponding item in items. + // Note that even if there is a "deletion" action in items, we can ignore it, + // because it will be deduped automatically in "queueActionLocked" + _, exists, err := f.knownObjects.GetByKey(id) + _, itemsExist := f.items[id] + if err == nil && !exists && !itemsExist { + // Presumably, this was deleted when a relist happened. + // Don't provide a second report of the same deletion. + return nil + } + } + + // exist in items and/or KnownObjects + return f.queueActionLocked(Deleted, obj) +} + +// AddIfNotPresent inserts an item, and puts it in the queue. If the item is already +// present in the set, it is neither enqueued nor added to the set. +// +// This is useful in a single producer/consumer scenario so that the consumer can +// safely retry items without contending with the producer and potentially enqueueing +// stale items. +// +// Important: obj must be a Deltas (the output of the Pop() function). Yes, this is +// different from the Add/Update/Delete functions. +func (f *DeltaFIFO) AddIfNotPresent(obj interface{}) error { + deltas, ok := obj.(Deltas) + if !ok { + return fmt.Errorf("object must be of type deltas, but got: %#v", obj) + } + id, err := f.KeyOf(deltas) + if err != nil { + return KeyError{obj, err} + } + f.lock.Lock() + defer f.lock.Unlock() + f.addIfNotPresent(id, deltas) + return nil +} + +// addIfNotPresent inserts deltas under id if it does not exist, and assumes the caller +// already holds the fifo lock. +func (f *DeltaFIFO) addIfNotPresent(id string, deltas Deltas) { + f.populated = true + if _, exists := f.items[id]; exists { + return + } + + f.queue = append(f.queue, id) + f.items[id] = deltas + f.cond.Broadcast() +} + +// re-listing and watching can deliver the same update multiple times in any +// order. This will combine the most recent two deltas if they are the same. +func dedupDeltas(deltas Deltas) Deltas { + n := len(deltas) + if n < 2 { + return deltas + } + a := &deltas[n-1] + b := &deltas[n-2] + if out := isDup(a, b); out != nil { + deltas[n-2] = *out + return deltas[:n-1] + } + return deltas +} + +// If a & b represent the same event, returns the delta that ought to be kept. +// Otherwise, returns nil. +// TODO: is there anything other than deletions that need deduping? +func isDup(a, b *Delta) *Delta { + if out := isDeletionDup(a, b); out != nil { + return out + } + // TODO: Detect other duplicate situations? Are there any? + return nil +} + +// keep the one with the most information if both are deletions. +func isDeletionDup(a, b *Delta) *Delta { + if b.Type != Deleted || a.Type != Deleted { + return nil + } + // Do more sophisticated checks, or is this sufficient? + if _, ok := b.Object.(DeletedFinalStateUnknown); ok { + return a + } + return b +} + +// queueActionLocked appends to the delta list for the object. +// Caller must lock first. +func (f *DeltaFIFO) queueActionLocked(actionType DeltaType, obj interface{}) error { + id, err := f.KeyOf(obj) + if err != nil { + return KeyError{obj, err} + } + oldDeltas := f.items[id] + newDeltas := append(oldDeltas, Delta{actionType, obj}) + newDeltas = dedupDeltas(newDeltas) + + if len(newDeltas) > 0 { + if _, exists := f.items[id]; !exists { + f.queue = append(f.queue, id) + } + f.items[id] = newDeltas + f.cond.Broadcast() + } else { + // This never happens, because dedupDeltas never returns an empty list + // when given a non-empty list (as it is here). + // If somehow it happens anyway, deal with it but complain. + if oldDeltas == nil { + klog.Errorf("Impossible dedupDeltas for id=%q: oldDeltas=%#+v, obj=%#+v; ignoring", id, oldDeltas, obj) + return nil + } + klog.Errorf("Impossible dedupDeltas for id=%q: oldDeltas=%#+v, obj=%#+v; breaking invariant by storing empty Deltas", id, oldDeltas, obj) + f.items[id] = newDeltas + return fmt.Errorf("Impossible dedupDeltas for id=%q: oldDeltas=%#+v, obj=%#+v; broke DeltaFIFO invariant by storing empty Deltas", id, oldDeltas, obj) + } + return nil +} + +// List returns a list of all the items; it returns the object +// from the most recent Delta. +// You should treat the items returned inside the deltas as immutable. +func (f *DeltaFIFO) List() []interface{} { + f.lock.RLock() + defer f.lock.RUnlock() + return f.listLocked() +} + +func (f *DeltaFIFO) listLocked() []interface{} { + list := make([]interface{}, 0, len(f.items)) + for _, item := range f.items { + list = append(list, item.Newest().Object) + } + return list +} + +// ListKeys returns a list of all the keys of the objects currently +// in the FIFO. +func (f *DeltaFIFO) ListKeys() []string { + f.lock.RLock() + defer f.lock.RUnlock() + list := make([]string, 0, len(f.queue)) + for _, key := range f.queue { + list = append(list, key) + } + return list +} + +// Get returns the complete list of deltas for the requested item, +// or sets exists=false. +// You should treat the items returned inside the deltas as immutable. +func (f *DeltaFIFO) Get(obj interface{}) (item interface{}, exists bool, err error) { + key, err := f.KeyOf(obj) + if err != nil { + return nil, false, KeyError{obj, err} + } + return f.GetByKey(key) +} + +// GetByKey returns the complete list of deltas for the requested item, +// setting exists=false if that list is empty. +// You should treat the items returned inside the deltas as immutable. +func (f *DeltaFIFO) GetByKey(key string) (item interface{}, exists bool, err error) { + f.lock.RLock() + defer f.lock.RUnlock() + d, exists := f.items[key] + if exists { + // Copy item's slice so operations on this slice + // won't interfere with the object we return. + d = copyDeltas(d) + } + return d, exists, nil +} + +// IsClosed checks if the queue is closed +func (f *DeltaFIFO) IsClosed() bool { + f.lock.Lock() + defer f.lock.Unlock() + return f.closed +} + +// Pop blocks until the queue has some items, and then returns one. If +// multiple items are ready, they are returned in the order in which they were +// added/updated. The item is removed from the queue (and the store) before it +// is returned, so if you don't successfully process it, you need to add it back +// with AddIfNotPresent(). +// process function is called under lock, so it is safe to update data structures +// in it that need to be in sync with the queue (e.g. knownKeys). The PopProcessFunc +// may return an instance of ErrRequeue with a nested error to indicate the current +// item should be requeued (equivalent to calling AddIfNotPresent under the lock). +// process should avoid expensive I/O operation so that other queue operations, i.e. +// Add() and Get(), won't be blocked for too long. +// +// Pop returns a 'Deltas', which has a complete list of all the things +// that happened to the object (deltas) while it was sitting in the queue. +func (f *DeltaFIFO) Pop(process PopProcessFunc) (interface{}, error) { + f.lock.Lock() + defer f.lock.Unlock() + for { + for len(f.queue) == 0 { + // When the queue is empty, invocation of Pop() is blocked until new item is enqueued. + // When Close() is called, the f.closed is set and the condition is broadcasted. + // Which causes this loop to continue and return from the Pop(). + if f.closed { + return nil, ErrFIFOClosed + } + + f.cond.Wait() + } + id := f.queue[0] + f.queue = f.queue[1:] + depth := len(f.queue) + if f.initialPopulationCount > 0 { + f.initialPopulationCount-- + } + item, ok := f.items[id] + if !ok { + // This should never happen + klog.Errorf("Inconceivable! %q was in f.queue but not f.items; ignoring.", id) + continue + } + delete(f.items, id) + // Only log traces if the queue depth is greater than 10 and it takes more than + // 100 milliseconds to process one item from the queue. + // Queue depth never goes high because processing an item is locking the queue, + // and new items can't be added until processing finish. + // https://github.com/kubernetes/kubernetes/issues/103789 + if depth > 10 { + trace := utiltrace.New("DeltaFIFO Pop Process", + utiltrace.Field{Key: "ID", Value: id}, + utiltrace.Field{Key: "Depth", Value: depth}, + utiltrace.Field{Key: "Reason", Value: "slow event handlers blocking the queue"}) + defer trace.LogIfLong(100 * time.Millisecond) + } + err := process(item) + if e, ok := err.(ErrRequeue); ok { + f.addIfNotPresent(id, item) + err = e.Err + } + // Don't need to copyDeltas here, because we're transferring + // ownership to the caller. + return item, err + } +} + +// Replace atomically does two things: (1) it adds the given objects +// using the Sync or Replace DeltaType and then (2) it does some deletions. +// In particular: for every pre-existing key K that is not the key of +// an object in `list` there is the effect of +// `Delete(DeletedFinalStateUnknown{K, O})` where O is current object +// of K. If `f.knownObjects == nil` then the pre-existing keys are +// those in `f.items` and the current object of K is the `.Newest()` +// of the Deltas associated with K. Otherwise the pre-existing keys +// are those listed by `f.knownObjects` and the current object of K is +// what `f.knownObjects.GetByKey(K)` returns. +func (f *DeltaFIFO) Replace(list []interface{}, _ string) error { + f.lock.Lock() + defer f.lock.Unlock() + keys := make(sets.String, len(list)) + + // keep backwards compat for old clients + action := Sync + if f.emitDeltaTypeReplaced { + action = Replaced + } + + // Add Sync/Replaced action for each new item. + for _, item := range list { + key, err := f.KeyOf(item) + if err != nil { + return KeyError{item, err} + } + keys.Insert(key) + if err := f.queueActionLocked(action, item); err != nil { + return fmt.Errorf("couldn't enqueue object: %v", err) + } + } + + if f.knownObjects == nil { + // Do deletion detection against our own list. + queuedDeletions := 0 + for k, oldItem := range f.items { + if keys.Has(k) { + continue + } + // Delete pre-existing items not in the new list. + // This could happen if watch deletion event was missed while + // disconnected from apiserver. + var deletedObj interface{} + if n := oldItem.Newest(); n != nil { + deletedObj = n.Object + } + queuedDeletions++ + if err := f.queueActionLocked(Deleted, DeletedFinalStateUnknown{k, deletedObj}); err != nil { + return err + } + } + + if !f.populated { + f.populated = true + // While there shouldn't be any queued deletions in the initial + // population of the queue, it's better to be on the safe side. + f.initialPopulationCount = keys.Len() + queuedDeletions + } + + return nil + } + + // Detect deletions not already in the queue. + knownKeys := f.knownObjects.ListKeys() + queuedDeletions := 0 + for _, k := range knownKeys { + if keys.Has(k) { + continue + } + + deletedObj, exists, err := f.knownObjects.GetByKey(k) + if err != nil { + deletedObj = nil + klog.Errorf("Unexpected error %v during lookup of key %v, placing DeleteFinalStateUnknown marker without object", err, k) + } else if !exists { + deletedObj = nil + klog.Infof("Key %v does not exist in known objects store, placing DeleteFinalStateUnknown marker without object", k) + } + queuedDeletions++ + if err := f.queueActionLocked(Deleted, DeletedFinalStateUnknown{k, deletedObj}); err != nil { + return err + } + } + + if !f.populated { + f.populated = true + f.initialPopulationCount = keys.Len() + queuedDeletions + } + + return nil +} + +// Resync adds, with a Sync type of Delta, every object listed by +// `f.knownObjects` whose key is not already queued for processing. +// If `f.knownObjects` is `nil` then Resync does nothing. +func (f *DeltaFIFO) Resync() error { + f.lock.Lock() + defer f.lock.Unlock() + + if f.knownObjects == nil { + return nil + } + + keys := f.knownObjects.ListKeys() + for _, k := range keys { + if err := f.syncKeyLocked(k); err != nil { + return err + } + } + return nil +} + +func (f *DeltaFIFO) syncKeyLocked(key string) error { + obj, exists, err := f.knownObjects.GetByKey(key) + if err != nil { + klog.Errorf("Unexpected error %v during lookup of key %v, unable to queue object for sync", err, key) + return nil + } else if !exists { + klog.Infof("Key %v does not exist in known objects store, unable to queue object for sync", key) + return nil + } + + // If we are doing Resync() and there is already an event queued for that object, + // we ignore the Resync for it. This is to avoid the race, in which the resync + // comes with the previous value of object (since queueing an event for the object + // doesn't trigger changing the underlying store . + id, err := f.KeyOf(obj) + if err != nil { + return KeyError{obj, err} + } + if len(f.items[id]) > 0 { + return nil + } + + if err := f.queueActionLocked(Sync, obj); err != nil { + return fmt.Errorf("couldn't queue object: %v", err) + } + return nil +} + +// A KeyListerGetter is anything that knows how to list its keys and look up by key. +type KeyListerGetter interface { + KeyLister + KeyGetter +} + +// A KeyLister is anything that knows how to list its keys. +type KeyLister interface { + ListKeys() []string +} + +// A KeyGetter is anything that knows how to get the value stored under a given key. +type KeyGetter interface { + // GetByKey returns the value associated with the key, or sets exists=false. + GetByKey(key string) (value interface{}, exists bool, err error) +} + +// Oldest is a convenience function that returns the oldest delta, or +// nil if there are no deltas. +func (d Deltas) Oldest() *Delta { + if len(d) > 0 { + return &d[0] + } + return nil +} + +// Newest is a convenience function that returns the newest delta, or +// nil if there are no deltas. +func (d Deltas) Newest() *Delta { + if n := len(d); n > 0 { + return &d[n-1] + } + return nil +} + +// copyDeltas returns a shallow copy of d; that is, it copies the slice but not +// the objects in the slice. This allows Get/List to return an object that we +// know won't be clobbered by a subsequent modifications. +func copyDeltas(d Deltas) Deltas { + d2 := make(Deltas, len(d)) + copy(d2, d) + return d2 +} + +// DeletedFinalStateUnknown is placed into a DeltaFIFO in the case where an object +// was deleted but the watch deletion event was missed while disconnected from +// apiserver. In this case we don't know the final "resting" state of the object, so +// there's a chance the included `Obj` is stale. +type DeletedFinalStateUnknown struct { + Key string + Obj interface{} +} diff --git a/constraint/vendor/k8s.io/client-go/tools/cache/doc.go b/constraint/vendor/k8s.io/client-go/tools/cache/doc.go new file mode 100644 index 000000000..56b61d300 --- /dev/null +++ b/constraint/vendor/k8s.io/client-go/tools/cache/doc.go @@ -0,0 +1,24 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package cache is a client-side caching mechanism. It is useful for +// reducing the number of server calls you'd otherwise need to make. +// Reflector watches a server and updates a Store. Two stores are provided; +// one that simply caches objects (for example, to allow a scheduler to +// list currently available nodes), and one that additionally acts as +// a FIFO queue (for example, to allow a scheduler to process incoming +// pods). +package cache // import "k8s.io/client-go/tools/cache" diff --git a/constraint/vendor/k8s.io/client-go/tools/cache/expiration_cache.go b/constraint/vendor/k8s.io/client-go/tools/cache/expiration_cache.go new file mode 100644 index 000000000..7abdae737 --- /dev/null +++ b/constraint/vendor/k8s.io/client-go/tools/cache/expiration_cache.go @@ -0,0 +1,215 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "sync" + "time" + + "k8s.io/klog/v2" + "k8s.io/utils/clock" +) + +// ExpirationCache implements the store interface +// 1. All entries are automatically time stamped on insert +// a. The key is computed based off the original item/keyFunc +// b. The value inserted under that key is the timestamped item +// 2. Expiration happens lazily on read based on the expiration policy +// a. No item can be inserted into the store while we're expiring +// *any* item in the cache. +// 3. Time-stamps are stripped off unexpired entries before return +// Note that the ExpirationCache is inherently slower than a normal +// threadSafeStore because it takes a write lock every time it checks if +// an item has expired. +type ExpirationCache struct { + cacheStorage ThreadSafeStore + keyFunc KeyFunc + clock clock.Clock + expirationPolicy ExpirationPolicy + // expirationLock is a write lock used to guarantee that we don't clobber + // newly inserted objects because of a stale expiration timestamp comparison + expirationLock sync.Mutex +} + +// ExpirationPolicy dictates when an object expires. Currently only abstracted out +// so unittests don't rely on the system clock. +type ExpirationPolicy interface { + IsExpired(obj *TimestampedEntry) bool +} + +// TTLPolicy implements a ttl based ExpirationPolicy. +type TTLPolicy struct { + // >0: Expire entries with an age > ttl + // <=0: Don't expire any entry + TTL time.Duration + + // Clock used to calculate ttl expiration + Clock clock.Clock +} + +// IsExpired returns true if the given object is older than the ttl, or it can't +// determine its age. +func (p *TTLPolicy) IsExpired(obj *TimestampedEntry) bool { + return p.TTL > 0 && p.Clock.Since(obj.Timestamp) > p.TTL +} + +// TimestampedEntry is the only type allowed in a ExpirationCache. +// Keep in mind that it is not safe to share timestamps between computers. +// Behavior may be inconsistent if you get a timestamp from the API Server and +// use it on the client machine as part of your ExpirationCache. +type TimestampedEntry struct { + Obj interface{} + Timestamp time.Time + key string +} + +// getTimestampedEntry returns the TimestampedEntry stored under the given key. +func (c *ExpirationCache) getTimestampedEntry(key string) (*TimestampedEntry, bool) { + item, _ := c.cacheStorage.Get(key) + if tsEntry, ok := item.(*TimestampedEntry); ok { + return tsEntry, true + } + return nil, false +} + +// getOrExpire retrieves the object from the TimestampedEntry if and only if it hasn't +// already expired. It holds a write lock across deletion. +func (c *ExpirationCache) getOrExpire(key string) (interface{}, bool) { + // Prevent all inserts from the time we deem an item as "expired" to when we + // delete it, so an un-expired item doesn't sneak in under the same key, just + // before the Delete. + c.expirationLock.Lock() + defer c.expirationLock.Unlock() + timestampedItem, exists := c.getTimestampedEntry(key) + if !exists { + return nil, false + } + if c.expirationPolicy.IsExpired(timestampedItem) { + klog.V(4).Infof("Entry %v: %+v has expired", key, timestampedItem.Obj) + c.cacheStorage.Delete(key) + return nil, false + } + return timestampedItem.Obj, true +} + +// GetByKey returns the item stored under the key, or sets exists=false. +func (c *ExpirationCache) GetByKey(key string) (interface{}, bool, error) { + obj, exists := c.getOrExpire(key) + return obj, exists, nil +} + +// Get returns unexpired items. It purges the cache of expired items in the +// process. +func (c *ExpirationCache) Get(obj interface{}) (interface{}, bool, error) { + key, err := c.keyFunc(obj) + if err != nil { + return nil, false, KeyError{obj, err} + } + obj, exists := c.getOrExpire(key) + return obj, exists, nil +} + +// List retrieves a list of unexpired items. It purges the cache of expired +// items in the process. +func (c *ExpirationCache) List() []interface{} { + items := c.cacheStorage.List() + + list := make([]interface{}, 0, len(items)) + for _, item := range items { + key := item.(*TimestampedEntry).key + if obj, exists := c.getOrExpire(key); exists { + list = append(list, obj) + } + } + return list +} + +// ListKeys returns a list of all keys in the expiration cache. +func (c *ExpirationCache) ListKeys() []string { + return c.cacheStorage.ListKeys() +} + +// Add timestamps an item and inserts it into the cache, overwriting entries +// that might exist under the same key. +func (c *ExpirationCache) Add(obj interface{}) error { + key, err := c.keyFunc(obj) + if err != nil { + return KeyError{obj, err} + } + c.expirationLock.Lock() + defer c.expirationLock.Unlock() + + c.cacheStorage.Add(key, &TimestampedEntry{obj, c.clock.Now(), key}) + return nil +} + +// Update has not been implemented yet for lack of a use case, so this method +// simply calls `Add`. This effectively refreshes the timestamp. +func (c *ExpirationCache) Update(obj interface{}) error { + return c.Add(obj) +} + +// Delete removes an item from the cache. +func (c *ExpirationCache) Delete(obj interface{}) error { + key, err := c.keyFunc(obj) + if err != nil { + return KeyError{obj, err} + } + c.expirationLock.Lock() + defer c.expirationLock.Unlock() + c.cacheStorage.Delete(key) + return nil +} + +// Replace will convert all items in the given list to TimestampedEntries +// before attempting the replace operation. The replace operation will +// delete the contents of the ExpirationCache `c`. +func (c *ExpirationCache) Replace(list []interface{}, resourceVersion string) error { + items := make(map[string]interface{}, len(list)) + ts := c.clock.Now() + for _, item := range list { + key, err := c.keyFunc(item) + if err != nil { + return KeyError{item, err} + } + items[key] = &TimestampedEntry{item, ts, key} + } + c.expirationLock.Lock() + defer c.expirationLock.Unlock() + c.cacheStorage.Replace(items, resourceVersion) + return nil +} + +// Resync is a no-op for one of these +func (c *ExpirationCache) Resync() error { + return nil +} + +// NewTTLStore creates and returns a ExpirationCache with a TTLPolicy +func NewTTLStore(keyFunc KeyFunc, ttl time.Duration) Store { + return NewExpirationStore(keyFunc, &TTLPolicy{ttl, clock.RealClock{}}) +} + +// NewExpirationStore creates and returns a ExpirationCache for a given policy +func NewExpirationStore(keyFunc KeyFunc, expirationPolicy ExpirationPolicy) Store { + return &ExpirationCache{ + cacheStorage: NewThreadSafeStore(Indexers{}, Indices{}), + keyFunc: keyFunc, + clock: clock.RealClock{}, + expirationPolicy: expirationPolicy, + } +} diff --git a/constraint/vendor/k8s.io/client-go/tools/cache/expiration_cache_fakes.go b/constraint/vendor/k8s.io/client-go/tools/cache/expiration_cache_fakes.go new file mode 100644 index 000000000..a16f4735e --- /dev/null +++ b/constraint/vendor/k8s.io/client-go/tools/cache/expiration_cache_fakes.go @@ -0,0 +1,57 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/utils/clock" +) + +type fakeThreadSafeMap struct { + ThreadSafeStore + deletedKeys chan<- string +} + +func (c *fakeThreadSafeMap) Delete(key string) { + if c.deletedKeys != nil { + c.ThreadSafeStore.Delete(key) + c.deletedKeys <- key + } +} + +// FakeExpirationPolicy keeps the list for keys which never expires. +type FakeExpirationPolicy struct { + NeverExpire sets.String + RetrieveKeyFunc KeyFunc +} + +// IsExpired used to check if object is expired. +func (p *FakeExpirationPolicy) IsExpired(obj *TimestampedEntry) bool { + key, _ := p.RetrieveKeyFunc(obj) + return !p.NeverExpire.Has(key) +} + +// NewFakeExpirationStore creates a new instance for the ExpirationCache. +func NewFakeExpirationStore(keyFunc KeyFunc, deletedKeys chan<- string, expirationPolicy ExpirationPolicy, cacheClock clock.Clock) Store { + cacheStorage := NewThreadSafeStore(Indexers{}, Indices{}) + return &ExpirationCache{ + cacheStorage: &fakeThreadSafeMap{cacheStorage, deletedKeys}, + keyFunc: keyFunc, + clock: cacheClock, + expirationPolicy: expirationPolicy, + } +} diff --git a/constraint/vendor/k8s.io/client-go/tools/cache/fake_custom_store.go b/constraint/vendor/k8s.io/client-go/tools/cache/fake_custom_store.go new file mode 100644 index 000000000..462d22660 --- /dev/null +++ b/constraint/vendor/k8s.io/client-go/tools/cache/fake_custom_store.go @@ -0,0 +1,102 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +// FakeCustomStore lets you define custom functions for store operations. +type FakeCustomStore struct { + AddFunc func(obj interface{}) error + UpdateFunc func(obj interface{}) error + DeleteFunc func(obj interface{}) error + ListFunc func() []interface{} + ListKeysFunc func() []string + GetFunc func(obj interface{}) (item interface{}, exists bool, err error) + GetByKeyFunc func(key string) (item interface{}, exists bool, err error) + ReplaceFunc func(list []interface{}, resourceVersion string) error + ResyncFunc func() error +} + +// Add calls the custom Add function if defined +func (f *FakeCustomStore) Add(obj interface{}) error { + if f.AddFunc != nil { + return f.AddFunc(obj) + } + return nil +} + +// Update calls the custom Update function if defined +func (f *FakeCustomStore) Update(obj interface{}) error { + if f.UpdateFunc != nil { + return f.UpdateFunc(obj) + } + return nil +} + +// Delete calls the custom Delete function if defined +func (f *FakeCustomStore) Delete(obj interface{}) error { + if f.DeleteFunc != nil { + return f.DeleteFunc(obj) + } + return nil +} + +// List calls the custom List function if defined +func (f *FakeCustomStore) List() []interface{} { + if f.ListFunc != nil { + return f.ListFunc() + } + return nil +} + +// ListKeys calls the custom ListKeys function if defined +func (f *FakeCustomStore) ListKeys() []string { + if f.ListKeysFunc != nil { + return f.ListKeysFunc() + } + return nil +} + +// Get calls the custom Get function if defined +func (f *FakeCustomStore) Get(obj interface{}) (item interface{}, exists bool, err error) { + if f.GetFunc != nil { + return f.GetFunc(obj) + } + return nil, false, nil +} + +// GetByKey calls the custom GetByKey function if defined +func (f *FakeCustomStore) GetByKey(key string) (item interface{}, exists bool, err error) { + if f.GetByKeyFunc != nil { + return f.GetByKeyFunc(key) + } + return nil, false, nil +} + +// Replace calls the custom Replace function if defined +func (f *FakeCustomStore) Replace(list []interface{}, resourceVersion string) error { + if f.ReplaceFunc != nil { + return f.ReplaceFunc(list, resourceVersion) + } + return nil +} + +// Resync calls the custom Resync function if defined +func (f *FakeCustomStore) Resync() error { + if f.ResyncFunc != nil { + return f.ResyncFunc() + } + return nil +} diff --git a/constraint/vendor/k8s.io/client-go/tools/cache/fifo.go b/constraint/vendor/k8s.io/client-go/tools/cache/fifo.go new file mode 100644 index 000000000..5c9255027 --- /dev/null +++ b/constraint/vendor/k8s.io/client-go/tools/cache/fifo.go @@ -0,0 +1,373 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "errors" + "sync" + + "k8s.io/apimachinery/pkg/util/sets" +) + +// PopProcessFunc is passed to Pop() method of Queue interface. +// It is supposed to process the accumulator popped from the queue. +type PopProcessFunc func(interface{}) error + +// ErrRequeue may be returned by a PopProcessFunc to safely requeue +// the current item. The value of Err will be returned from Pop. +type ErrRequeue struct { + // Err is returned by the Pop function + Err error +} + +// ErrFIFOClosed used when FIFO is closed +var ErrFIFOClosed = errors.New("DeltaFIFO: manipulating with closed queue") + +func (e ErrRequeue) Error() string { + if e.Err == nil { + return "the popped item should be requeued without returning an error" + } + return e.Err.Error() +} + +// Queue extends Store with a collection of Store keys to "process". +// Every Add, Update, or Delete may put the object's key in that collection. +// A Queue has a way to derive the corresponding key given an accumulator. +// A Queue can be accessed concurrently from multiple goroutines. +// A Queue can be "closed", after which Pop operations return an error. +type Queue interface { + Store + + // Pop blocks until there is at least one key to process or the + // Queue is closed. In the latter case Pop returns with an error. + // In the former case Pop atomically picks one key to process, + // removes that (key, accumulator) association from the Store, and + // processes the accumulator. Pop returns the accumulator that + // was processed and the result of processing. The PopProcessFunc + // may return an ErrRequeue{inner} and in this case Pop will (a) + // return that (key, accumulator) association to the Queue as part + // of the atomic processing and (b) return the inner error from + // Pop. + Pop(PopProcessFunc) (interface{}, error) + + // AddIfNotPresent puts the given accumulator into the Queue (in + // association with the accumulator's key) if and only if that key + // is not already associated with a non-empty accumulator. + AddIfNotPresent(interface{}) error + + // HasSynced returns true if the first batch of keys have all been + // popped. The first batch of keys are those of the first Replace + // operation if that happened before any Add, AddIfNotPresent, + // Update, or Delete; otherwise the first batch is empty. + HasSynced() bool + + // Close the queue + Close() +} + +// Pop is helper function for popping from Queue. +// WARNING: Do NOT use this function in non-test code to avoid races +// unless you really really really really know what you are doing. +func Pop(queue Queue) interface{} { + var result interface{} + queue.Pop(func(obj interface{}) error { + result = obj + return nil + }) + return result +} + +// FIFO is a Queue in which (a) each accumulator is simply the most +// recently provided object and (b) the collection of keys to process +// is a FIFO. The accumulators all start out empty, and deleting an +// object from its accumulator empties the accumulator. The Resync +// operation is a no-op. +// +// Thus: if multiple adds/updates of a single object happen while that +// object's key is in the queue before it has been processed then it +// will only be processed once, and when it is processed the most +// recent version will be processed. This can't be done with a channel +// +// FIFO solves this use case: +// * You want to process every object (exactly) once. +// * You want to process the most recent version of the object when you process it. +// * You do not want to process deleted objects, they should be removed from the queue. +// * You do not want to periodically reprocess objects. +// Compare with DeltaFIFO for other use cases. +type FIFO struct { + lock sync.RWMutex + cond sync.Cond + // We depend on the property that every key in `items` is also in `queue` + items map[string]interface{} + queue []string + + // populated is true if the first batch of items inserted by Replace() has been populated + // or Delete/Add/Update was called first. + populated bool + // initialPopulationCount is the number of items inserted by the first call of Replace() + initialPopulationCount int + + // keyFunc is used to make the key used for queued item insertion and retrieval, and + // should be deterministic. + keyFunc KeyFunc + + // Indication the queue is closed. + // Used to indicate a queue is closed so a control loop can exit when a queue is empty. + // Currently, not used to gate any of CRUD operations. + closed bool +} + +var ( + _ = Queue(&FIFO{}) // FIFO is a Queue +) + +// Close the queue. +func (f *FIFO) Close() { + f.lock.Lock() + defer f.lock.Unlock() + f.closed = true + f.cond.Broadcast() +} + +// HasSynced returns true if an Add/Update/Delete/AddIfNotPresent are called first, +// or the first batch of items inserted by Replace() has been popped. +func (f *FIFO) HasSynced() bool { + f.lock.Lock() + defer f.lock.Unlock() + return f.populated && f.initialPopulationCount == 0 +} + +// Add inserts an item, and puts it in the queue. The item is only enqueued +// if it doesn't already exist in the set. +func (f *FIFO) Add(obj interface{}) error { + id, err := f.keyFunc(obj) + if err != nil { + return KeyError{obj, err} + } + f.lock.Lock() + defer f.lock.Unlock() + f.populated = true + if _, exists := f.items[id]; !exists { + f.queue = append(f.queue, id) + } + f.items[id] = obj + f.cond.Broadcast() + return nil +} + +// AddIfNotPresent inserts an item, and puts it in the queue. If the item is already +// present in the set, it is neither enqueued nor added to the set. +// +// This is useful in a single producer/consumer scenario so that the consumer can +// safely retry items without contending with the producer and potentially enqueueing +// stale items. +func (f *FIFO) AddIfNotPresent(obj interface{}) error { + id, err := f.keyFunc(obj) + if err != nil { + return KeyError{obj, err} + } + f.lock.Lock() + defer f.lock.Unlock() + f.addIfNotPresent(id, obj) + return nil +} + +// addIfNotPresent assumes the fifo lock is already held and adds the provided +// item to the queue under id if it does not already exist. +func (f *FIFO) addIfNotPresent(id string, obj interface{}) { + f.populated = true + if _, exists := f.items[id]; exists { + return + } + + f.queue = append(f.queue, id) + f.items[id] = obj + f.cond.Broadcast() +} + +// Update is the same as Add in this implementation. +func (f *FIFO) Update(obj interface{}) error { + return f.Add(obj) +} + +// Delete removes an item. It doesn't add it to the queue, because +// this implementation assumes the consumer only cares about the objects, +// not the order in which they were created/added. +func (f *FIFO) Delete(obj interface{}) error { + id, err := f.keyFunc(obj) + if err != nil { + return KeyError{obj, err} + } + f.lock.Lock() + defer f.lock.Unlock() + f.populated = true + delete(f.items, id) + return err +} + +// List returns a list of all the items. +func (f *FIFO) List() []interface{} { + f.lock.RLock() + defer f.lock.RUnlock() + list := make([]interface{}, 0, len(f.items)) + for _, item := range f.items { + list = append(list, item) + } + return list +} + +// ListKeys returns a list of all the keys of the objects currently +// in the FIFO. +func (f *FIFO) ListKeys() []string { + f.lock.RLock() + defer f.lock.RUnlock() + list := make([]string, 0, len(f.items)) + for key := range f.items { + list = append(list, key) + } + return list +} + +// Get returns the requested item, or sets exists=false. +func (f *FIFO) Get(obj interface{}) (item interface{}, exists bool, err error) { + key, err := f.keyFunc(obj) + if err != nil { + return nil, false, KeyError{obj, err} + } + return f.GetByKey(key) +} + +// GetByKey returns the requested item, or sets exists=false. +func (f *FIFO) GetByKey(key string) (item interface{}, exists bool, err error) { + f.lock.RLock() + defer f.lock.RUnlock() + item, exists = f.items[key] + return item, exists, nil +} + +// IsClosed checks if the queue is closed +func (f *FIFO) IsClosed() bool { + f.lock.Lock() + defer f.lock.Unlock() + return f.closed +} + +// Pop waits until an item is ready and processes it. If multiple items are +// ready, they are returned in the order in which they were added/updated. +// The item is removed from the queue (and the store) before it is processed, +// so if you don't successfully process it, it should be added back with +// AddIfNotPresent(). process function is called under lock, so it is safe +// update data structures in it that need to be in sync with the queue. +func (f *FIFO) Pop(process PopProcessFunc) (interface{}, error) { + f.lock.Lock() + defer f.lock.Unlock() + for { + for len(f.queue) == 0 { + // When the queue is empty, invocation of Pop() is blocked until new item is enqueued. + // When Close() is called, the f.closed is set and the condition is broadcasted. + // Which causes this loop to continue and return from the Pop(). + if f.closed { + return nil, ErrFIFOClosed + } + + f.cond.Wait() + } + id := f.queue[0] + f.queue = f.queue[1:] + if f.initialPopulationCount > 0 { + f.initialPopulationCount-- + } + item, ok := f.items[id] + if !ok { + // Item may have been deleted subsequently. + continue + } + delete(f.items, id) + err := process(item) + if e, ok := err.(ErrRequeue); ok { + f.addIfNotPresent(id, item) + err = e.Err + } + return item, err + } +} + +// Replace will delete the contents of 'f', using instead the given map. +// 'f' takes ownership of the map, you should not reference the map again +// after calling this function. f's queue is reset, too; upon return, it +// will contain the items in the map, in no particular order. +func (f *FIFO) Replace(list []interface{}, resourceVersion string) error { + items := make(map[string]interface{}, len(list)) + for _, item := range list { + key, err := f.keyFunc(item) + if err != nil { + return KeyError{item, err} + } + items[key] = item + } + + f.lock.Lock() + defer f.lock.Unlock() + + if !f.populated { + f.populated = true + f.initialPopulationCount = len(items) + } + + f.items = items + f.queue = f.queue[:0] + for id := range items { + f.queue = append(f.queue, id) + } + if len(f.queue) > 0 { + f.cond.Broadcast() + } + return nil +} + +// Resync will ensure that every object in the Store has its key in the queue. +// This should be a no-op, because that property is maintained by all operations. +func (f *FIFO) Resync() error { + f.lock.Lock() + defer f.lock.Unlock() + + inQueue := sets.NewString() + for _, id := range f.queue { + inQueue.Insert(id) + } + for id := range f.items { + if !inQueue.Has(id) { + f.queue = append(f.queue, id) + } + } + if len(f.queue) > 0 { + f.cond.Broadcast() + } + return nil +} + +// NewFIFO returns a Store which can be used to queue up items to +// process. +func NewFIFO(keyFunc KeyFunc) *FIFO { + f := &FIFO{ + items: map[string]interface{}{}, + queue: []string{}, + keyFunc: keyFunc, + } + f.cond.L = &f.lock + return f +} diff --git a/constraint/vendor/k8s.io/client-go/tools/cache/heap.go b/constraint/vendor/k8s.io/client-go/tools/cache/heap.go new file mode 100644 index 000000000..819325e9e --- /dev/null +++ b/constraint/vendor/k8s.io/client-go/tools/cache/heap.go @@ -0,0 +1,322 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file implements a heap data structure. + +package cache + +import ( + "container/heap" + "fmt" + "sync" +) + +const ( + closedMsg = "heap is closed" +) + +// LessFunc is used to compare two objects in the heap. +type LessFunc func(interface{}, interface{}) bool + +type heapItem struct { + obj interface{} // The object which is stored in the heap. + index int // The index of the object's key in the Heap.queue. +} + +type itemKeyValue struct { + key string + obj interface{} +} + +// heapData is an internal struct that implements the standard heap interface +// and keeps the data stored in the heap. +type heapData struct { + // items is a map from key of the objects to the objects and their index. + // We depend on the property that items in the map are in the queue and vice versa. + items map[string]*heapItem + // queue implements a heap data structure and keeps the order of elements + // according to the heap invariant. The queue keeps the keys of objects stored + // in "items". + queue []string + + // keyFunc is used to make the key used for queued item insertion and retrieval, and + // should be deterministic. + keyFunc KeyFunc + // lessFunc is used to compare two objects in the heap. + lessFunc LessFunc +} + +var ( + _ = heap.Interface(&heapData{}) // heapData is a standard heap +) + +// Less compares two objects and returns true if the first one should go +// in front of the second one in the heap. +func (h *heapData) Less(i, j int) bool { + if i > len(h.queue) || j > len(h.queue) { + return false + } + itemi, ok := h.items[h.queue[i]] + if !ok { + return false + } + itemj, ok := h.items[h.queue[j]] + if !ok { + return false + } + return h.lessFunc(itemi.obj, itemj.obj) +} + +// Len returns the number of items in the Heap. +func (h *heapData) Len() int { return len(h.queue) } + +// Swap implements swapping of two elements in the heap. This is a part of standard +// heap interface and should never be called directly. +func (h *heapData) Swap(i, j int) { + h.queue[i], h.queue[j] = h.queue[j], h.queue[i] + item := h.items[h.queue[i]] + item.index = i + item = h.items[h.queue[j]] + item.index = j +} + +// Push is supposed to be called by heap.Push only. +func (h *heapData) Push(kv interface{}) { + keyValue := kv.(*itemKeyValue) + n := len(h.queue) + h.items[keyValue.key] = &heapItem{keyValue.obj, n} + h.queue = append(h.queue, keyValue.key) +} + +// Pop is supposed to be called by heap.Pop only. +func (h *heapData) Pop() interface{} { + key := h.queue[len(h.queue)-1] + h.queue = h.queue[0 : len(h.queue)-1] + item, ok := h.items[key] + if !ok { + // This is an error + return nil + } + delete(h.items, key) + return item.obj +} + +// Heap is a thread-safe producer/consumer queue that implements a heap data structure. +// It can be used to implement priority queues and similar data structures. +type Heap struct { + lock sync.RWMutex + cond sync.Cond + + // data stores objects and has a queue that keeps their ordering according + // to the heap invariant. + data *heapData + + // closed indicates that the queue is closed. + // It is mainly used to let Pop() exit its control loop while waiting for an item. + closed bool +} + +// Close the Heap and signals condition variables that may be waiting to pop +// items from the heap. +func (h *Heap) Close() { + h.lock.Lock() + defer h.lock.Unlock() + h.closed = true + h.cond.Broadcast() +} + +// Add inserts an item, and puts it in the queue. The item is updated if it +// already exists. +func (h *Heap) Add(obj interface{}) error { + key, err := h.data.keyFunc(obj) + if err != nil { + return KeyError{obj, err} + } + h.lock.Lock() + defer h.lock.Unlock() + if h.closed { + return fmt.Errorf(closedMsg) + } + if _, exists := h.data.items[key]; exists { + h.data.items[key].obj = obj + heap.Fix(h.data, h.data.items[key].index) + } else { + h.addIfNotPresentLocked(key, obj) + } + h.cond.Broadcast() + return nil +} + +// BulkAdd adds all the items in the list to the queue and then signals the condition +// variable. It is useful when the caller would like to add all of the items +// to the queue before consumer starts processing them. +func (h *Heap) BulkAdd(list []interface{}) error { + h.lock.Lock() + defer h.lock.Unlock() + if h.closed { + return fmt.Errorf(closedMsg) + } + for _, obj := range list { + key, err := h.data.keyFunc(obj) + if err != nil { + return KeyError{obj, err} + } + if _, exists := h.data.items[key]; exists { + h.data.items[key].obj = obj + heap.Fix(h.data, h.data.items[key].index) + } else { + h.addIfNotPresentLocked(key, obj) + } + } + h.cond.Broadcast() + return nil +} + +// AddIfNotPresent inserts an item, and puts it in the queue. If an item with +// the key is present in the map, no changes is made to the item. +// +// This is useful in a single producer/consumer scenario so that the consumer can +// safely retry items without contending with the producer and potentially enqueueing +// stale items. +func (h *Heap) AddIfNotPresent(obj interface{}) error { + id, err := h.data.keyFunc(obj) + if err != nil { + return KeyError{obj, err} + } + h.lock.Lock() + defer h.lock.Unlock() + if h.closed { + return fmt.Errorf(closedMsg) + } + h.addIfNotPresentLocked(id, obj) + h.cond.Broadcast() + return nil +} + +// addIfNotPresentLocked assumes the lock is already held and adds the provided +// item to the queue if it does not already exist. +func (h *Heap) addIfNotPresentLocked(key string, obj interface{}) { + if _, exists := h.data.items[key]; exists { + return + } + heap.Push(h.data, &itemKeyValue{key, obj}) +} + +// Update is the same as Add in this implementation. When the item does not +// exist, it is added. +func (h *Heap) Update(obj interface{}) error { + return h.Add(obj) +} + +// Delete removes an item. +func (h *Heap) Delete(obj interface{}) error { + key, err := h.data.keyFunc(obj) + if err != nil { + return KeyError{obj, err} + } + h.lock.Lock() + defer h.lock.Unlock() + if item, ok := h.data.items[key]; ok { + heap.Remove(h.data, item.index) + return nil + } + return fmt.Errorf("object not found") +} + +// Pop waits until an item is ready. If multiple items are +// ready, they are returned in the order given by Heap.data.lessFunc. +func (h *Heap) Pop() (interface{}, error) { + h.lock.Lock() + defer h.lock.Unlock() + for len(h.data.queue) == 0 { + // When the queue is empty, invocation of Pop() is blocked until new item is enqueued. + // When Close() is called, the h.closed is set and the condition is broadcast, + // which causes this loop to continue and return from the Pop(). + if h.closed { + return nil, fmt.Errorf("heap is closed") + } + h.cond.Wait() + } + obj := heap.Pop(h.data) + if obj == nil { + return nil, fmt.Errorf("object was removed from heap data") + } + + return obj, nil +} + +// List returns a list of all the items. +func (h *Heap) List() []interface{} { + h.lock.RLock() + defer h.lock.RUnlock() + list := make([]interface{}, 0, len(h.data.items)) + for _, item := range h.data.items { + list = append(list, item.obj) + } + return list +} + +// ListKeys returns a list of all the keys of the objects currently in the Heap. +func (h *Heap) ListKeys() []string { + h.lock.RLock() + defer h.lock.RUnlock() + list := make([]string, 0, len(h.data.items)) + for key := range h.data.items { + list = append(list, key) + } + return list +} + +// Get returns the requested item, or sets exists=false. +func (h *Heap) Get(obj interface{}) (interface{}, bool, error) { + key, err := h.data.keyFunc(obj) + if err != nil { + return nil, false, KeyError{obj, err} + } + return h.GetByKey(key) +} + +// GetByKey returns the requested item, or sets exists=false. +func (h *Heap) GetByKey(key string) (interface{}, bool, error) { + h.lock.RLock() + defer h.lock.RUnlock() + item, exists := h.data.items[key] + if !exists { + return nil, false, nil + } + return item.obj, true, nil +} + +// IsClosed returns true if the queue is closed. +func (h *Heap) IsClosed() bool { + h.lock.RLock() + defer h.lock.RUnlock() + return h.closed +} + +// NewHeap returns a Heap which can be used to queue up items to process. +func NewHeap(keyFn KeyFunc, lessFn LessFunc) *Heap { + h := &Heap{ + data: &heapData{ + items: map[string]*heapItem{}, + queue: []string{}, + keyFunc: keyFn, + lessFunc: lessFn, + }, + } + h.cond.L = &h.lock + return h +} diff --git a/constraint/vendor/k8s.io/client-go/tools/cache/index.go b/constraint/vendor/k8s.io/client-go/tools/cache/index.go new file mode 100644 index 000000000..1d6aae560 --- /dev/null +++ b/constraint/vendor/k8s.io/client-go/tools/cache/index.go @@ -0,0 +1,101 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/util/sets" +) + +// Indexer extends Store with multiple indices and restricts each +// accumulator to simply hold the current object (and be empty after +// Delete). +// +// There are three kinds of strings here: +// 1. a storage key, as defined in the Store interface, +// 2. a name of an index, and +// 3. an "indexed value", which is produced by an IndexFunc and +// can be a field value or any other string computed from the object. +type Indexer interface { + Store + // Index returns the stored objects whose set of indexed values + // intersects the set of indexed values of the given object, for + // the named index + Index(indexName string, obj interface{}) ([]interface{}, error) + // IndexKeys returns the storage keys of the stored objects whose + // set of indexed values for the named index includes the given + // indexed value + IndexKeys(indexName, indexedValue string) ([]string, error) + // ListIndexFuncValues returns all the indexed values of the given index + ListIndexFuncValues(indexName string) []string + // ByIndex returns the stored objects whose set of indexed values + // for the named index includes the given indexed value + ByIndex(indexName, indexedValue string) ([]interface{}, error) + // GetIndexer return the indexers + GetIndexers() Indexers + + // AddIndexers adds more indexers to this store. If you call this after you already have data + // in the store, the results are undefined. + AddIndexers(newIndexers Indexers) error +} + +// IndexFunc knows how to compute the set of indexed values for an object. +type IndexFunc func(obj interface{}) ([]string, error) + +// IndexFuncToKeyFuncAdapter adapts an indexFunc to a keyFunc. This is only useful if your index function returns +// unique values for every object. This conversion can create errors when more than one key is found. You +// should prefer to make proper key and index functions. +func IndexFuncToKeyFuncAdapter(indexFunc IndexFunc) KeyFunc { + return func(obj interface{}) (string, error) { + indexKeys, err := indexFunc(obj) + if err != nil { + return "", err + } + if len(indexKeys) > 1 { + return "", fmt.Errorf("too many keys: %v", indexKeys) + } + if len(indexKeys) == 0 { + return "", fmt.Errorf("unexpected empty indexKeys") + } + return indexKeys[0], nil + } +} + +const ( + // NamespaceIndex is the lookup name for the most common index function, which is to index by the namespace field. + NamespaceIndex string = "namespace" +) + +// MetaNamespaceIndexFunc is a default index function that indexes based on an object's namespace +func MetaNamespaceIndexFunc(obj interface{}) ([]string, error) { + meta, err := meta.Accessor(obj) + if err != nil { + return []string{""}, fmt.Errorf("object has no meta: %v", err) + } + return []string{meta.GetNamespace()}, nil +} + +// Index maps the indexed value to a set of keys in the store that match on that value +type Index map[string]sets.String + +// Indexers maps a name to an IndexFunc +type Indexers map[string]IndexFunc + +// Indices maps a name to an Index +type Indices map[string]Index diff --git a/constraint/vendor/k8s.io/client-go/tools/cache/listers.go b/constraint/vendor/k8s.io/client-go/tools/cache/listers.go new file mode 100644 index 000000000..7885d2f76 --- /dev/null +++ b/constraint/vendor/k8s.io/client-go/tools/cache/listers.go @@ -0,0 +1,183 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "k8s.io/klog/v2" + + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// AppendFunc is used to add a matching item to whatever list the caller is using +type AppendFunc func(interface{}) + +// ListAll calls appendFn with each value retrieved from store which matches the selector. +func ListAll(store Store, selector labels.Selector, appendFn AppendFunc) error { + selectAll := selector.Empty() + for _, m := range store.List() { + if selectAll { + // Avoid computing labels of the objects to speed up common flows + // of listing all objects. + appendFn(m) + continue + } + metadata, err := meta.Accessor(m) + if err != nil { + return err + } + if selector.Matches(labels.Set(metadata.GetLabels())) { + appendFn(m) + } + } + return nil +} + +// ListAllByNamespace used to list items belongs to namespace from Indexer. +func ListAllByNamespace(indexer Indexer, namespace string, selector labels.Selector, appendFn AppendFunc) error { + selectAll := selector.Empty() + if namespace == metav1.NamespaceAll { + for _, m := range indexer.List() { + if selectAll { + // Avoid computing labels of the objects to speed up common flows + // of listing all objects. + appendFn(m) + continue + } + metadata, err := meta.Accessor(m) + if err != nil { + return err + } + if selector.Matches(labels.Set(metadata.GetLabels())) { + appendFn(m) + } + } + return nil + } + + items, err := indexer.Index(NamespaceIndex, &metav1.ObjectMeta{Namespace: namespace}) + if err != nil { + // Ignore error; do slow search without index. + klog.Warningf("can not retrieve list of objects using index : %v", err) + for _, m := range indexer.List() { + metadata, err := meta.Accessor(m) + if err != nil { + return err + } + if metadata.GetNamespace() == namespace && selector.Matches(labels.Set(metadata.GetLabels())) { + appendFn(m) + } + + } + return nil + } + for _, m := range items { + if selectAll { + // Avoid computing labels of the objects to speed up common flows + // of listing all objects. + appendFn(m) + continue + } + metadata, err := meta.Accessor(m) + if err != nil { + return err + } + if selector.Matches(labels.Set(metadata.GetLabels())) { + appendFn(m) + } + } + + return nil +} + +// GenericLister is a lister skin on a generic Indexer +type GenericLister interface { + // List will return all objects across namespaces + List(selector labels.Selector) (ret []runtime.Object, err error) + // Get will attempt to retrieve assuming that name==key + Get(name string) (runtime.Object, error) + // ByNamespace will give you a GenericNamespaceLister for one namespace + ByNamespace(namespace string) GenericNamespaceLister +} + +// GenericNamespaceLister is a lister skin on a generic Indexer +type GenericNamespaceLister interface { + // List will return all objects in this namespace + List(selector labels.Selector) (ret []runtime.Object, err error) + // Get will attempt to retrieve by namespace and name + Get(name string) (runtime.Object, error) +} + +// NewGenericLister creates a new instance for the genericLister. +func NewGenericLister(indexer Indexer, resource schema.GroupResource) GenericLister { + return &genericLister{indexer: indexer, resource: resource} +} + +type genericLister struct { + indexer Indexer + resource schema.GroupResource +} + +func (s *genericLister) List(selector labels.Selector) (ret []runtime.Object, err error) { + err = ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(runtime.Object)) + }) + return ret, err +} + +func (s *genericLister) ByNamespace(namespace string) GenericNamespaceLister { + return &genericNamespaceLister{indexer: s.indexer, namespace: namespace, resource: s.resource} +} + +func (s *genericLister) Get(name string) (runtime.Object, error) { + obj, exists, err := s.indexer.GetByKey(name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(s.resource, name) + } + return obj.(runtime.Object), nil +} + +type genericNamespaceLister struct { + indexer Indexer + namespace string + resource schema.GroupResource +} + +func (s *genericNamespaceLister) List(selector labels.Selector) (ret []runtime.Object, err error) { + err = ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(runtime.Object)) + }) + return ret, err +} + +func (s *genericNamespaceLister) Get(name string) (runtime.Object, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(s.resource, name) + } + return obj.(runtime.Object), nil +} diff --git a/constraint/vendor/k8s.io/client-go/tools/cache/listwatch.go b/constraint/vendor/k8s.io/client-go/tools/cache/listwatch.go new file mode 100644 index 000000000..10b7e6512 --- /dev/null +++ b/constraint/vendor/k8s.io/client-go/tools/cache/listwatch.go @@ -0,0 +1,112 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "context" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/watch" + restclient "k8s.io/client-go/rest" +) + +// Lister is any object that knows how to perform an initial list. +type Lister interface { + // List should return a list type object; the Items field will be extracted, and the + // ResourceVersion field will be used to start the watch in the right place. + List(options metav1.ListOptions) (runtime.Object, error) +} + +// Watcher is any object that knows how to start a watch on a resource. +type Watcher interface { + // Watch should begin a watch at the specified version. + Watch(options metav1.ListOptions) (watch.Interface, error) +} + +// ListerWatcher is any object that knows how to perform an initial list and start a watch on a resource. +type ListerWatcher interface { + Lister + Watcher +} + +// ListFunc knows how to list resources +type ListFunc func(options metav1.ListOptions) (runtime.Object, error) + +// WatchFunc knows how to watch resources +type WatchFunc func(options metav1.ListOptions) (watch.Interface, error) + +// ListWatch knows how to list and watch a set of apiserver resources. It satisfies the ListerWatcher interface. +// It is a convenience function for users of NewReflector, etc. +// ListFunc and WatchFunc must not be nil +type ListWatch struct { + ListFunc ListFunc + WatchFunc WatchFunc + // DisableChunking requests no chunking for this list watcher. + DisableChunking bool +} + +// Getter interface knows how to access Get method from RESTClient. +type Getter interface { + Get() *restclient.Request +} + +// NewListWatchFromClient creates a new ListWatch from the specified client, resource, namespace and field selector. +func NewListWatchFromClient(c Getter, resource string, namespace string, fieldSelector fields.Selector) *ListWatch { + optionsModifier := func(options *metav1.ListOptions) { + options.FieldSelector = fieldSelector.String() + } + return NewFilteredListWatchFromClient(c, resource, namespace, optionsModifier) +} + +// NewFilteredListWatchFromClient creates a new ListWatch from the specified client, resource, namespace, and option modifier. +// Option modifier is a function takes a ListOptions and modifies the consumed ListOptions. Provide customized modifier function +// to apply modification to ListOptions with a field selector, a label selector, or any other desired options. +func NewFilteredListWatchFromClient(c Getter, resource string, namespace string, optionsModifier func(options *metav1.ListOptions)) *ListWatch { + listFunc := func(options metav1.ListOptions) (runtime.Object, error) { + optionsModifier(&options) + return c.Get(). + Namespace(namespace). + Resource(resource). + VersionedParams(&options, metav1.ParameterCodec). + Do(context.TODO()). + Get() + } + watchFunc := func(options metav1.ListOptions) (watch.Interface, error) { + options.Watch = true + optionsModifier(&options) + return c.Get(). + Namespace(namespace). + Resource(resource). + VersionedParams(&options, metav1.ParameterCodec). + Watch(context.TODO()) + } + return &ListWatch{ListFunc: listFunc, WatchFunc: watchFunc} +} + +// List a set of apiserver resources +func (lw *ListWatch) List(options metav1.ListOptions) (runtime.Object, error) { + // ListWatch is used in Reflector, which already supports pagination. + // Don't paginate here to avoid duplication. + return lw.ListFunc(options) +} + +// Watch a set of apiserver resources +func (lw *ListWatch) Watch(options metav1.ListOptions) (watch.Interface, error) { + return lw.WatchFunc(options) +} diff --git a/constraint/vendor/k8s.io/client-go/tools/cache/mutation_cache.go b/constraint/vendor/k8s.io/client-go/tools/cache/mutation_cache.go new file mode 100644 index 000000000..c6f953d8e --- /dev/null +++ b/constraint/vendor/k8s.io/client-go/tools/cache/mutation_cache.go @@ -0,0 +1,262 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "fmt" + "strconv" + "sync" + "time" + + "k8s.io/klog/v2" + + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime" + utilcache "k8s.io/apimachinery/pkg/util/cache" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/sets" +) + +// MutationCache is able to take the result of update operations and stores them in an LRU +// that can be used to provide a more current view of a requested object. It requires interpreting +// resourceVersions for comparisons. +// Implementations must be thread-safe. +// TODO find a way to layer this into an informer/lister +type MutationCache interface { + GetByKey(key string) (interface{}, bool, error) + ByIndex(indexName, indexKey string) ([]interface{}, error) + Mutation(interface{}) +} + +// ResourceVersionComparator is able to compare object versions. +type ResourceVersionComparator interface { + CompareResourceVersion(lhs, rhs runtime.Object) int +} + +// NewIntegerResourceVersionMutationCache returns a MutationCache that understands how to +// deal with objects that have a resource version that: +// +// - is an integer +// - increases when updated +// - is comparable across the same resource in a namespace +// +// Most backends will have these semantics. Indexer may be nil. ttl controls how long an item +// remains in the mutation cache before it is removed. +// +// If includeAdds is true, objects in the mutation cache will be returned even if they don't exist +// in the underlying store. This is only safe if your use of the cache can handle mutation entries +// remaining in the cache for up to ttl when mutations and deletes occur very closely in time. +func NewIntegerResourceVersionMutationCache(backingCache Store, indexer Indexer, ttl time.Duration, includeAdds bool) MutationCache { + return &mutationCache{ + backingCache: backingCache, + indexer: indexer, + mutationCache: utilcache.NewLRUExpireCache(100), + comparator: etcdObjectVersioner{}, + ttl: ttl, + includeAdds: includeAdds, + } +} + +// mutationCache doesn't guarantee that it returns values added via Mutation since they can page out and +// since you can't distinguish between, "didn't observe create" and "was deleted after create", +// if the key is missing from the backing cache, we always return it as missing +type mutationCache struct { + lock sync.Mutex + backingCache Store + indexer Indexer + mutationCache *utilcache.LRUExpireCache + includeAdds bool + ttl time.Duration + + comparator ResourceVersionComparator +} + +// GetByKey is never guaranteed to return back the value set in Mutation. It could be paged out, it could +// be older than another copy, the backingCache may be more recent or, you might have written twice into the same key. +// You get a value that was valid at some snapshot of time and will always return the newer of backingCache and mutationCache. +func (c *mutationCache) GetByKey(key string) (interface{}, bool, error) { + c.lock.Lock() + defer c.lock.Unlock() + + obj, exists, err := c.backingCache.GetByKey(key) + if err != nil { + return nil, false, err + } + if !exists { + if !c.includeAdds { + // we can't distinguish between, "didn't observe create" and "was deleted after create", so + // if the key is missing, we always return it as missing + return nil, false, nil + } + obj, exists = c.mutationCache.Get(key) + if !exists { + return nil, false, nil + } + } + objRuntime, ok := obj.(runtime.Object) + if !ok { + return obj, true, nil + } + return c.newerObject(key, objRuntime), true, nil +} + +// ByIndex returns the newer objects that match the provided index and indexer key. +// Will return an error if no indexer was provided. +func (c *mutationCache) ByIndex(name string, indexKey string) ([]interface{}, error) { + c.lock.Lock() + defer c.lock.Unlock() + if c.indexer == nil { + return nil, fmt.Errorf("no indexer has been provided to the mutation cache") + } + keys, err := c.indexer.IndexKeys(name, indexKey) + if err != nil { + return nil, err + } + var items []interface{} + keySet := sets.NewString() + for _, key := range keys { + keySet.Insert(key) + obj, exists, err := c.indexer.GetByKey(key) + if err != nil { + return nil, err + } + if !exists { + continue + } + if objRuntime, ok := obj.(runtime.Object); ok { + items = append(items, c.newerObject(key, objRuntime)) + } else { + items = append(items, obj) + } + } + + if c.includeAdds { + fn := c.indexer.GetIndexers()[name] + // Keys() is returned oldest to newest, so full traversal does not alter the LRU behavior + for _, key := range c.mutationCache.Keys() { + updated, ok := c.mutationCache.Get(key) + if !ok { + continue + } + if keySet.Has(key.(string)) { + continue + } + elements, err := fn(updated) + if err != nil { + klog.V(4).Infof("Unable to calculate an index entry for mutation cache entry %s: %v", key, err) + continue + } + for _, inIndex := range elements { + if inIndex != indexKey { + continue + } + items = append(items, updated) + break + } + } + } + + return items, nil +} + +// newerObject checks the mutation cache for a newer object and returns one if found. If the +// mutated object is older than the backing object, it is removed from the Must be +// called while the lock is held. +func (c *mutationCache) newerObject(key string, backing runtime.Object) runtime.Object { + mutatedObj, exists := c.mutationCache.Get(key) + if !exists { + return backing + } + mutatedObjRuntime, ok := mutatedObj.(runtime.Object) + if !ok { + return backing + } + if c.comparator.CompareResourceVersion(backing, mutatedObjRuntime) >= 0 { + c.mutationCache.Remove(key) + return backing + } + return mutatedObjRuntime +} + +// Mutation adds a change to the cache that can be returned in GetByKey if it is newer than the backingCache +// copy. If you call Mutation twice with the same object on different threads, one will win, but its not defined +// which one. This doesn't affect correctness, since the GetByKey guaranteed of "later of these two caches" is +// preserved, but you may not get the version of the object you want. The object you get is only guaranteed to +// "one that was valid at some point in time", not "the one that I want". +func (c *mutationCache) Mutation(obj interface{}) { + c.lock.Lock() + defer c.lock.Unlock() + + key, err := DeletionHandlingMetaNamespaceKeyFunc(obj) + if err != nil { + // this is a "nice to have", so failures shouldn't do anything weird + utilruntime.HandleError(err) + return + } + + if objRuntime, ok := obj.(runtime.Object); ok { + if mutatedObj, exists := c.mutationCache.Get(key); exists { + if mutatedObjRuntime, ok := mutatedObj.(runtime.Object); ok { + if c.comparator.CompareResourceVersion(objRuntime, mutatedObjRuntime) < 0 { + return + } + } + } + } + c.mutationCache.Add(key, obj, c.ttl) +} + +// etcdObjectVersioner implements versioning and extracting etcd node information +// for objects that have an embedded ObjectMeta or ListMeta field. +type etcdObjectVersioner struct{} + +// ObjectResourceVersion implements Versioner +func (a etcdObjectVersioner) ObjectResourceVersion(obj runtime.Object) (uint64, error) { + accessor, err := meta.Accessor(obj) + if err != nil { + return 0, err + } + version := accessor.GetResourceVersion() + if len(version) == 0 { + return 0, nil + } + return strconv.ParseUint(version, 10, 64) +} + +// CompareResourceVersion compares etcd resource versions. Outside this API they are all strings, +// but etcd resource versions are special, they're actually ints, so we can easily compare them. +func (a etcdObjectVersioner) CompareResourceVersion(lhs, rhs runtime.Object) int { + lhsVersion, err := a.ObjectResourceVersion(lhs) + if err != nil { + // coder error + panic(err) + } + rhsVersion, err := a.ObjectResourceVersion(rhs) + if err != nil { + // coder error + panic(err) + } + + if lhsVersion == rhsVersion { + return 0 + } + if lhsVersion < rhsVersion { + return -1 + } + + return 1 +} diff --git a/constraint/vendor/k8s.io/client-go/tools/cache/mutation_detector.go b/constraint/vendor/k8s.io/client-go/tools/cache/mutation_detector.go new file mode 100644 index 000000000..b37537cbd --- /dev/null +++ b/constraint/vendor/k8s.io/client-go/tools/cache/mutation_detector.go @@ -0,0 +1,166 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "fmt" + "os" + "reflect" + "strconv" + "sync" + "time" + + "k8s.io/klog/v2" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/diff" +) + +var mutationDetectionEnabled = false + +func init() { + mutationDetectionEnabled, _ = strconv.ParseBool(os.Getenv("KUBE_CACHE_MUTATION_DETECTOR")) +} + +// MutationDetector is able to monitor objects for mutation within a limited window of time +type MutationDetector interface { + // AddObject adds the given object to the set being monitored for a while from now + AddObject(obj interface{}) + + // Run starts the monitoring and does not return until the monitoring is stopped. + Run(stopCh <-chan struct{}) +} + +// NewCacheMutationDetector creates a new instance for the defaultCacheMutationDetector. +func NewCacheMutationDetector(name string) MutationDetector { + if !mutationDetectionEnabled { + return dummyMutationDetector{} + } + klog.Warningln("Mutation detector is enabled, this will result in memory leakage.") + return &defaultCacheMutationDetector{name: name, period: 1 * time.Second, retainDuration: 2 * time.Minute} +} + +type dummyMutationDetector struct{} + +func (dummyMutationDetector) Run(stopCh <-chan struct{}) { +} +func (dummyMutationDetector) AddObject(obj interface{}) { +} + +// defaultCacheMutationDetector gives a way to detect if a cached object has been mutated +// It has a list of cached objects and their copies. I haven't thought of a way +// to see WHO is mutating it, just that it's getting mutated. +type defaultCacheMutationDetector struct { + name string + period time.Duration + + // compareLock ensures only a single call to CompareObjects runs at a time + compareObjectsLock sync.Mutex + + // addLock guards addedObjs between AddObject and CompareObjects + addedObjsLock sync.Mutex + addedObjs []cacheObj + + cachedObjs []cacheObj + + retainDuration time.Duration + lastRotated time.Time + retainedCachedObjs []cacheObj + + // failureFunc is injectable for unit testing. If you don't have it, the process will panic. + // This panic is intentional, since turning on this detection indicates you want a strong + // failure signal. This failure is effectively a p0 bug and you can't trust process results + // after a mutation anyway. + failureFunc func(message string) +} + +// cacheObj holds the actual object and a copy +type cacheObj struct { + cached interface{} + copied interface{} +} + +func (d *defaultCacheMutationDetector) Run(stopCh <-chan struct{}) { + // we DON'T want protection from panics. If we're running this code, we want to die + for { + if d.lastRotated.IsZero() { + d.lastRotated = time.Now() + } else if time.Since(d.lastRotated) > d.retainDuration { + d.retainedCachedObjs = d.cachedObjs + d.cachedObjs = nil + d.lastRotated = time.Now() + } + + d.CompareObjects() + + select { + case <-stopCh: + return + case <-time.After(d.period): + } + } +} + +// AddObject makes a deep copy of the object for later comparison. It only works on runtime.Object +// but that covers the vast majority of our cached objects +func (d *defaultCacheMutationDetector) AddObject(obj interface{}) { + if _, ok := obj.(DeletedFinalStateUnknown); ok { + return + } + if obj, ok := obj.(runtime.Object); ok { + copiedObj := obj.DeepCopyObject() + + d.addedObjsLock.Lock() + defer d.addedObjsLock.Unlock() + d.addedObjs = append(d.addedObjs, cacheObj{cached: obj, copied: copiedObj}) + } +} + +func (d *defaultCacheMutationDetector) CompareObjects() { + d.compareObjectsLock.Lock() + defer d.compareObjectsLock.Unlock() + + // move addedObjs into cachedObjs under lock + // this keeps the critical section small to avoid blocking AddObject while we compare cachedObjs + d.addedObjsLock.Lock() + d.cachedObjs = append(d.cachedObjs, d.addedObjs...) + d.addedObjs = nil + d.addedObjsLock.Unlock() + + altered := false + for i, obj := range d.cachedObjs { + if !reflect.DeepEqual(obj.cached, obj.copied) { + fmt.Printf("CACHE %s[%d] ALTERED!\n%v\n", d.name, i, diff.ObjectGoPrintSideBySide(obj.cached, obj.copied)) + altered = true + } + } + for i, obj := range d.retainedCachedObjs { + if !reflect.DeepEqual(obj.cached, obj.copied) { + fmt.Printf("CACHE %s[%d] ALTERED!\n%v\n", d.name, i, diff.ObjectGoPrintSideBySide(obj.cached, obj.copied)) + altered = true + } + } + + if altered { + msg := fmt.Sprintf("cache %s modified", d.name) + if d.failureFunc != nil { + d.failureFunc(msg) + return + } + panic(msg) + } +} diff --git a/constraint/vendor/k8s.io/client-go/tools/cache/reflector.go b/constraint/vendor/k8s.io/client-go/tools/cache/reflector.go new file mode 100644 index 000000000..84f242116 --- /dev/null +++ b/constraint/vendor/k8s.io/client-go/tools/cache/reflector.go @@ -0,0 +1,616 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "context" + "errors" + "fmt" + "io" + "math/rand" + "reflect" + "sync" + "time" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/naming" + utilnet "k8s.io/apimachinery/pkg/util/net" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/tools/pager" + "k8s.io/klog/v2" + "k8s.io/utils/clock" + "k8s.io/utils/trace" +) + +const defaultExpectedTypeName = "" + +// Reflector watches a specified resource and causes all changes to be reflected in the given store. +type Reflector struct { + // name identifies this reflector. By default it will be a file:line if possible. + name string + + // The name of the type we expect to place in the store. The name + // will be the stringification of expectedGVK if provided, and the + // stringification of expectedType otherwise. It is for display + // only, and should not be used for parsing or comparison. + expectedTypeName string + // An example object of the type we expect to place in the store. + // Only the type needs to be right, except that when that is + // `unstructured.Unstructured` the object's `"apiVersion"` and + // `"kind"` must also be right. + expectedType reflect.Type + // The GVK of the object we expect to place in the store if unstructured. + expectedGVK *schema.GroupVersionKind + // The destination to sync up with the watch source + store Store + // listerWatcher is used to perform lists and watches. + listerWatcher ListerWatcher + + // backoff manages backoff of ListWatch + backoffManager wait.BackoffManager + // initConnBackoffManager manages backoff the initial connection with the Watch call of ListAndWatch. + initConnBackoffManager wait.BackoffManager + + resyncPeriod time.Duration + // ShouldResync is invoked periodically and whenever it returns `true` the Store's Resync operation is invoked + ShouldResync func() bool + // clock allows tests to manipulate time + clock clock.Clock + // paginatedResult defines whether pagination should be forced for list calls. + // It is set based on the result of the initial list call. + paginatedResult bool + // lastSyncResourceVersion is the resource version token last + // observed when doing a sync with the underlying store + // it is thread safe, but not synchronized with the underlying store + lastSyncResourceVersion string + // isLastSyncResourceVersionUnavailable is true if the previous list or watch request with + // lastSyncResourceVersion failed with an "expired" or "too large resource version" error. + isLastSyncResourceVersionUnavailable bool + // lastSyncResourceVersionMutex guards read/write access to lastSyncResourceVersion + lastSyncResourceVersionMutex sync.RWMutex + // WatchListPageSize is the requested chunk size of initial and resync watch lists. + // If unset, for consistent reads (RV="") or reads that opt-into arbitrarily old data + // (RV="0") it will default to pager.PageSize, for the rest (RV != "" && RV != "0") + // it will turn off pagination to allow serving them from watch cache. + // NOTE: It should be used carefully as paginated lists are always served directly from + // etcd, which is significantly less efficient and may lead to serious performance and + // scalability problems. + WatchListPageSize int64 + // Called whenever the ListAndWatch drops the connection with an error. + watchErrorHandler WatchErrorHandler +} + +// ResourceVersionUpdater is an interface that allows store implementation to +// track the current resource version of the reflector. This is especially +// important if storage bookmarks are enabled. +type ResourceVersionUpdater interface { + // UpdateResourceVersion is called each time current resource version of the reflector + // is updated. + UpdateResourceVersion(resourceVersion string) +} + +// The WatchErrorHandler is called whenever ListAndWatch drops the +// connection with an error. After calling this handler, the informer +// will backoff and retry. +// +// The default implementation looks at the error type and tries to log +// the error message at an appropriate level. +// +// Implementations of this handler may display the error message in other +// ways. Implementations should return quickly - any expensive processing +// should be offloaded. +type WatchErrorHandler func(r *Reflector, err error) + +// DefaultWatchErrorHandler is the default implementation of WatchErrorHandler +func DefaultWatchErrorHandler(r *Reflector, err error) { + switch { + case isExpiredError(err): + // Don't set LastSyncResourceVersionUnavailable - LIST call with ResourceVersion=RV already + // has a semantic that it returns data at least as fresh as provided RV. + // So first try to LIST with setting RV to resource version of last observed object. + klog.V(4).Infof("%s: watch of %v closed with: %v", r.name, r.expectedTypeName, err) + case err == io.EOF: + // watch closed normally + case err == io.ErrUnexpectedEOF: + klog.V(1).Infof("%s: Watch for %v closed with unexpected EOF: %v", r.name, r.expectedTypeName, err) + default: + utilruntime.HandleError(fmt.Errorf("%s: Failed to watch %v: %v", r.name, r.expectedTypeName, err)) + } +} + +var ( + // We try to spread the load on apiserver by setting timeouts for + // watch requests - it is random in [minWatchTimeout, 2*minWatchTimeout]. + minWatchTimeout = 5 * time.Minute +) + +// NewNamespaceKeyedIndexerAndReflector creates an Indexer and a Reflector +// The indexer is configured to key on namespace +func NewNamespaceKeyedIndexerAndReflector(lw ListerWatcher, expectedType interface{}, resyncPeriod time.Duration) (indexer Indexer, reflector *Reflector) { + indexer = NewIndexer(MetaNamespaceKeyFunc, Indexers{NamespaceIndex: MetaNamespaceIndexFunc}) + reflector = NewReflector(lw, expectedType, indexer, resyncPeriod) + return indexer, reflector +} + +// NewReflector creates a new Reflector object which will keep the +// given store up to date with the server's contents for the given +// resource. Reflector promises to only put things in the store that +// have the type of expectedType, unless expectedType is nil. If +// resyncPeriod is non-zero, then the reflector will periodically +// consult its ShouldResync function to determine whether to invoke +// the Store's Resync operation; `ShouldResync==nil` means always +// "yes". This enables you to use reflectors to periodically process +// everything as well as incrementally processing the things that +// change. +func NewReflector(lw ListerWatcher, expectedType interface{}, store Store, resyncPeriod time.Duration) *Reflector { + return NewNamedReflector(naming.GetNameFromCallsite(internalPackages...), lw, expectedType, store, resyncPeriod) +} + +// NewNamedReflector same as NewReflector, but with a specified name for logging +func NewNamedReflector(name string, lw ListerWatcher, expectedType interface{}, store Store, resyncPeriod time.Duration) *Reflector { + realClock := &clock.RealClock{} + r := &Reflector{ + name: name, + listerWatcher: lw, + store: store, + // We used to make the call every 1sec (1 QPS), the goal here is to achieve ~98% traffic reduction when + // API server is not healthy. With these parameters, backoff will stop at [30,60) sec interval which is + // 0.22 QPS. If we don't backoff for 2min, assume API server is healthy and we reset the backoff. + backoffManager: wait.NewExponentialBackoffManager(800*time.Millisecond, 30*time.Second, 2*time.Minute, 2.0, 1.0, realClock), + initConnBackoffManager: wait.NewExponentialBackoffManager(800*time.Millisecond, 30*time.Second, 2*time.Minute, 2.0, 1.0, realClock), + resyncPeriod: resyncPeriod, + clock: realClock, + watchErrorHandler: WatchErrorHandler(DefaultWatchErrorHandler), + } + r.setExpectedType(expectedType) + return r +} + +func (r *Reflector) setExpectedType(expectedType interface{}) { + r.expectedType = reflect.TypeOf(expectedType) + if r.expectedType == nil { + r.expectedTypeName = defaultExpectedTypeName + return + } + + r.expectedTypeName = r.expectedType.String() + + if obj, ok := expectedType.(*unstructured.Unstructured); ok { + // Use gvk to check that watch event objects are of the desired type. + gvk := obj.GroupVersionKind() + if gvk.Empty() { + klog.V(4).Infof("Reflector from %s configured with expectedType of *unstructured.Unstructured with empty GroupVersionKind.", r.name) + return + } + r.expectedGVK = &gvk + r.expectedTypeName = gvk.String() + } +} + +// internalPackages are packages that ignored when creating a default reflector name. These packages are in the common +// call chains to NewReflector, so they'd be low entropy names for reflectors +var internalPackages = []string{"client-go/tools/cache/"} + +// Run repeatedly uses the reflector's ListAndWatch to fetch all the +// objects and subsequent deltas. +// Run will exit when stopCh is closed. +func (r *Reflector) Run(stopCh <-chan struct{}) { + klog.V(3).Infof("Starting reflector %s (%s) from %s", r.expectedTypeName, r.resyncPeriod, r.name) + wait.BackoffUntil(func() { + if err := r.ListAndWatch(stopCh); err != nil { + r.watchErrorHandler(r, err) + } + }, r.backoffManager, true, stopCh) + klog.V(3).Infof("Stopping reflector %s (%s) from %s", r.expectedTypeName, r.resyncPeriod, r.name) +} + +var ( + // nothing will ever be sent down this channel + neverExitWatch <-chan time.Time = make(chan time.Time) + + // Used to indicate that watching stopped because of a signal from the stop + // channel passed in from a client of the reflector. + errorStopRequested = errors.New("stop requested") +) + +// resyncChan returns a channel which will receive something when a resync is +// required, and a cleanup function. +func (r *Reflector) resyncChan() (<-chan time.Time, func() bool) { + if r.resyncPeriod == 0 { + return neverExitWatch, func() bool { return false } + } + // The cleanup function is required: imagine the scenario where watches + // always fail so we end up listing frequently. Then, if we don't + // manually stop the timer, we could end up with many timers active + // concurrently. + t := r.clock.NewTimer(r.resyncPeriod) + return t.C(), t.Stop +} + +// ListAndWatch first lists all items and get the resource version at the moment of call, +// and then use the resource version to watch. +// It returns error if ListAndWatch didn't even try to initialize watch. +func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error { + klog.V(3).Infof("Listing and watching %v from %s", r.expectedTypeName, r.name) + var resourceVersion string + + options := metav1.ListOptions{ResourceVersion: r.relistResourceVersion()} + + if err := func() error { + initTrace := trace.New("Reflector ListAndWatch", trace.Field{Key: "name", Value: r.name}) + defer initTrace.LogIfLong(10 * time.Second) + var list runtime.Object + var paginatedResult bool + var err error + listCh := make(chan struct{}, 1) + panicCh := make(chan interface{}, 1) + go func() { + defer func() { + if r := recover(); r != nil { + panicCh <- r + } + }() + // Attempt to gather list in chunks, if supported by listerWatcher, if not, the first + // list request will return the full response. + pager := pager.New(pager.SimplePageFunc(func(opts metav1.ListOptions) (runtime.Object, error) { + return r.listerWatcher.List(opts) + })) + switch { + case r.WatchListPageSize != 0: + pager.PageSize = r.WatchListPageSize + case r.paginatedResult: + // We got a paginated result initially. Assume this resource and server honor + // paging requests (i.e. watch cache is probably disabled) and leave the default + // pager size set. + case options.ResourceVersion != "" && options.ResourceVersion != "0": + // User didn't explicitly request pagination. + // + // With ResourceVersion != "", we have a possibility to list from watch cache, + // but we do that (for ResourceVersion != "0") only if Limit is unset. + // To avoid thundering herd on etcd (e.g. on master upgrades), we explicitly + // switch off pagination to force listing from watch cache (if enabled). + // With the existing semantic of RV (result is at least as fresh as provided RV), + // this is correct and doesn't lead to going back in time. + // + // We also don't turn off pagination for ResourceVersion="0", since watch cache + // is ignoring Limit in that case anyway, and if watch cache is not enabled + // we don't introduce regression. + pager.PageSize = 0 + } + + list, paginatedResult, err = pager.List(context.Background(), options) + if isExpiredError(err) || isTooLargeResourceVersionError(err) { + r.setIsLastSyncResourceVersionUnavailable(true) + // Retry immediately if the resource version used to list is unavailable. + // The pager already falls back to full list if paginated list calls fail due to an "Expired" error on + // continuation pages, but the pager might not be enabled, the full list might fail because the + // resource version it is listing at is expired or the cache may not yet be synced to the provided + // resource version. So we need to fallback to resourceVersion="" in all to recover and ensure + // the reflector makes forward progress. + list, paginatedResult, err = pager.List(context.Background(), metav1.ListOptions{ResourceVersion: r.relistResourceVersion()}) + } + close(listCh) + }() + select { + case <-stopCh: + return nil + case r := <-panicCh: + panic(r) + case <-listCh: + } + initTrace.Step("Objects listed", trace.Field{Key: "error", Value: err}) + if err != nil { + klog.Warningf("%s: failed to list %v: %v", r.name, r.expectedTypeName, err) + return fmt.Errorf("failed to list %v: %v", r.expectedTypeName, err) + } + + // We check if the list was paginated and if so set the paginatedResult based on that. + // However, we want to do that only for the initial list (which is the only case + // when we set ResourceVersion="0"). The reasoning behind it is that later, in some + // situations we may force listing directly from etcd (by setting ResourceVersion="") + // which will return paginated result, even if watch cache is enabled. However, in + // that case, we still want to prefer sending requests to watch cache if possible. + // + // Paginated result returned for request with ResourceVersion="0" mean that watch + // cache is disabled and there are a lot of objects of a given type. In such case, + // there is no need to prefer listing from watch cache. + if options.ResourceVersion == "0" && paginatedResult { + r.paginatedResult = true + } + + r.setIsLastSyncResourceVersionUnavailable(false) // list was successful + listMetaInterface, err := meta.ListAccessor(list) + if err != nil { + return fmt.Errorf("unable to understand list result %#v: %v", list, err) + } + resourceVersion = listMetaInterface.GetResourceVersion() + initTrace.Step("Resource version extracted") + items, err := meta.ExtractList(list) + if err != nil { + return fmt.Errorf("unable to understand list result %#v (%v)", list, err) + } + initTrace.Step("Objects extracted") + if err := r.syncWith(items, resourceVersion); err != nil { + return fmt.Errorf("unable to sync list result: %v", err) + } + initTrace.Step("SyncWith done") + r.setLastSyncResourceVersion(resourceVersion) + initTrace.Step("Resource version updated") + return nil + }(); err != nil { + return err + } + + resyncerrc := make(chan error, 1) + cancelCh := make(chan struct{}) + defer close(cancelCh) + go func() { + resyncCh, cleanup := r.resyncChan() + defer func() { + cleanup() // Call the last one written into cleanup + }() + for { + select { + case <-resyncCh: + case <-stopCh: + return + case <-cancelCh: + return + } + if r.ShouldResync == nil || r.ShouldResync() { + klog.V(4).Infof("%s: forcing resync", r.name) + if err := r.store.Resync(); err != nil { + resyncerrc <- err + return + } + } + cleanup() + resyncCh, cleanup = r.resyncChan() + } + }() + + for { + // give the stopCh a chance to stop the loop, even in case of continue statements further down on errors + select { + case <-stopCh: + return nil + default: + } + + timeoutSeconds := int64(minWatchTimeout.Seconds() * (rand.Float64() + 1.0)) + options = metav1.ListOptions{ + ResourceVersion: resourceVersion, + // We want to avoid situations of hanging watchers. Stop any watchers that do not + // receive any events within the timeout window. + TimeoutSeconds: &timeoutSeconds, + // To reduce load on kube-apiserver on watch restarts, you may enable watch bookmarks. + // Reflector doesn't assume bookmarks are returned at all (if the server do not support + // watch bookmarks, it will ignore this field). + AllowWatchBookmarks: true, + } + + // start the clock before sending the request, since some proxies won't flush headers until after the first watch event is sent + start := r.clock.Now() + w, err := r.listerWatcher.Watch(options) + if err != nil { + // If this is "connection refused" error, it means that most likely apiserver is not responsive. + // It doesn't make sense to re-list all objects because most likely we will be able to restart + // watch where we ended. + // If that's the case begin exponentially backing off and resend watch request. + // Do the same for "429" errors. + if utilnet.IsConnectionRefused(err) || apierrors.IsTooManyRequests(err) { + <-r.initConnBackoffManager.Backoff().C() + continue + } + return err + } + + if err := r.watchHandler(start, w, &resourceVersion, resyncerrc, stopCh); err != nil { + if err != errorStopRequested { + switch { + case isExpiredError(err): + // Don't set LastSyncResourceVersionUnavailable - LIST call with ResourceVersion=RV already + // has a semantic that it returns data at least as fresh as provided RV. + // So first try to LIST with setting RV to resource version of last observed object. + klog.V(4).Infof("%s: watch of %v closed with: %v", r.name, r.expectedTypeName, err) + case apierrors.IsTooManyRequests(err): + klog.V(2).Infof("%s: watch of %v returned 429 - backing off", r.name, r.expectedTypeName) + <-r.initConnBackoffManager.Backoff().C() + continue + default: + klog.Warningf("%s: watch of %v ended with: %v", r.name, r.expectedTypeName, err) + } + } + return nil + } + } +} + +// syncWith replaces the store's items with the given list. +func (r *Reflector) syncWith(items []runtime.Object, resourceVersion string) error { + found := make([]interface{}, 0, len(items)) + for _, item := range items { + found = append(found, item) + } + return r.store.Replace(found, resourceVersion) +} + +// watchHandler watches w and keeps *resourceVersion up to date. +func (r *Reflector) watchHandler(start time.Time, w watch.Interface, resourceVersion *string, errc chan error, stopCh <-chan struct{}) error { + eventCount := 0 + + // Stopping the watcher should be idempotent and if we return from this function there's no way + // we're coming back in with the same watch interface. + defer w.Stop() + +loop: + for { + select { + case <-stopCh: + return errorStopRequested + case err := <-errc: + return err + case event, ok := <-w.ResultChan(): + if !ok { + break loop + } + if event.Type == watch.Error { + return apierrors.FromObject(event.Object) + } + if r.expectedType != nil { + if e, a := r.expectedType, reflect.TypeOf(event.Object); e != a { + utilruntime.HandleError(fmt.Errorf("%s: expected type %v, but watch event object had type %v", r.name, e, a)) + continue + } + } + if r.expectedGVK != nil { + if e, a := *r.expectedGVK, event.Object.GetObjectKind().GroupVersionKind(); e != a { + utilruntime.HandleError(fmt.Errorf("%s: expected gvk %v, but watch event object had gvk %v", r.name, e, a)) + continue + } + } + meta, err := meta.Accessor(event.Object) + if err != nil { + utilruntime.HandleError(fmt.Errorf("%s: unable to understand watch event %#v", r.name, event)) + continue + } + newResourceVersion := meta.GetResourceVersion() + switch event.Type { + case watch.Added: + err := r.store.Add(event.Object) + if err != nil { + utilruntime.HandleError(fmt.Errorf("%s: unable to add watch event object (%#v) to store: %v", r.name, event.Object, err)) + } + case watch.Modified: + err := r.store.Update(event.Object) + if err != nil { + utilruntime.HandleError(fmt.Errorf("%s: unable to update watch event object (%#v) to store: %v", r.name, event.Object, err)) + } + case watch.Deleted: + // TODO: Will any consumers need access to the "last known + // state", which is passed in event.Object? If so, may need + // to change this. + err := r.store.Delete(event.Object) + if err != nil { + utilruntime.HandleError(fmt.Errorf("%s: unable to delete watch event object (%#v) from store: %v", r.name, event.Object, err)) + } + case watch.Bookmark: + // A `Bookmark` means watch has synced here, just update the resourceVersion + default: + utilruntime.HandleError(fmt.Errorf("%s: unable to understand watch event %#v", r.name, event)) + } + *resourceVersion = newResourceVersion + r.setLastSyncResourceVersion(newResourceVersion) + if rvu, ok := r.store.(ResourceVersionUpdater); ok { + rvu.UpdateResourceVersion(newResourceVersion) + } + eventCount++ + } + } + + watchDuration := r.clock.Since(start) + if watchDuration < 1*time.Second && eventCount == 0 { + return fmt.Errorf("very short watch: %s: Unexpected watch close - watch lasted less than a second and no items received", r.name) + } + klog.V(4).Infof("%s: Watch close - %v total %v items received", r.name, r.expectedTypeName, eventCount) + return nil +} + +// LastSyncResourceVersion is the resource version observed when last sync with the underlying store +// The value returned is not synchronized with access to the underlying store and is not thread-safe +func (r *Reflector) LastSyncResourceVersion() string { + r.lastSyncResourceVersionMutex.RLock() + defer r.lastSyncResourceVersionMutex.RUnlock() + return r.lastSyncResourceVersion +} + +func (r *Reflector) setLastSyncResourceVersion(v string) { + r.lastSyncResourceVersionMutex.Lock() + defer r.lastSyncResourceVersionMutex.Unlock() + r.lastSyncResourceVersion = v +} + +// relistResourceVersion determines the resource version the reflector should list or relist from. +// Returns either the lastSyncResourceVersion so that this reflector will relist with a resource +// versions no older than has already been observed in relist results or watch events, or, if the last relist resulted +// in an HTTP 410 (Gone) status code, returns "" so that the relist will use the latest resource version available in +// etcd via a quorum read. +func (r *Reflector) relistResourceVersion() string { + r.lastSyncResourceVersionMutex.RLock() + defer r.lastSyncResourceVersionMutex.RUnlock() + + if r.isLastSyncResourceVersionUnavailable { + // Since this reflector makes paginated list requests, and all paginated list requests skip the watch cache + // if the lastSyncResourceVersion is unavailable, we set ResourceVersion="" and list again to re-establish reflector + // to the latest available ResourceVersion, using a consistent read from etcd. + return "" + } + if r.lastSyncResourceVersion == "" { + // For performance reasons, initial list performed by reflector uses "0" as resource version to allow it to + // be served from the watch cache if it is enabled. + return "0" + } + return r.lastSyncResourceVersion +} + +// setIsLastSyncResourceVersionUnavailable sets if the last list or watch request with lastSyncResourceVersion returned +// "expired" or "too large resource version" error. +func (r *Reflector) setIsLastSyncResourceVersionUnavailable(isUnavailable bool) { + r.lastSyncResourceVersionMutex.Lock() + defer r.lastSyncResourceVersionMutex.Unlock() + r.isLastSyncResourceVersionUnavailable = isUnavailable +} + +func isExpiredError(err error) bool { + // In Kubernetes 1.17 and earlier, the api server returns both apierrors.StatusReasonExpired and + // apierrors.StatusReasonGone for HTTP 410 (Gone) status code responses. In 1.18 the kube server is more consistent + // and always returns apierrors.StatusReasonExpired. For backward compatibility we can only remove the apierrors.IsGone + // check when we fully drop support for Kubernetes 1.17 servers from reflectors. + return apierrors.IsResourceExpired(err) || apierrors.IsGone(err) +} + +func isTooLargeResourceVersionError(err error) bool { + if apierrors.HasStatusCause(err, metav1.CauseTypeResourceVersionTooLarge) { + return true + } + // In Kubernetes 1.17.0-1.18.5, the api server doesn't set the error status cause to + // metav1.CauseTypeResourceVersionTooLarge to indicate that the requested minimum resource + // version is larger than the largest currently available resource version. To ensure backward + // compatibility with these server versions we also need to detect the error based on the content + // of the error message field. + if !apierrors.IsTimeout(err) { + return false + } + apierr, ok := err.(apierrors.APIStatus) + if !ok || apierr == nil || apierr.Status().Details == nil { + return false + } + for _, cause := range apierr.Status().Details.Causes { + // Matches the message returned by api server 1.17.0-1.18.5 for this error condition + if cause.Message == "Too large resource version" { + return true + } + } + return false +} diff --git a/constraint/vendor/k8s.io/client-go/tools/cache/reflector_metrics.go b/constraint/vendor/k8s.io/client-go/tools/cache/reflector_metrics.go new file mode 100644 index 000000000..5c00115f5 --- /dev/null +++ b/constraint/vendor/k8s.io/client-go/tools/cache/reflector_metrics.go @@ -0,0 +1,89 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file provides abstractions for setting the provider (e.g., prometheus) +// of metrics. + +package cache + +import ( + "sync" +) + +// GaugeMetric represents a single numerical value that can arbitrarily go up +// and down. +type GaugeMetric interface { + Set(float64) +} + +// CounterMetric represents a single numerical value that only ever +// goes up. +type CounterMetric interface { + Inc() +} + +// SummaryMetric captures individual observations. +type SummaryMetric interface { + Observe(float64) +} + +type noopMetric struct{} + +func (noopMetric) Inc() {} +func (noopMetric) Dec() {} +func (noopMetric) Observe(float64) {} +func (noopMetric) Set(float64) {} + +// MetricsProvider generates various metrics used by the reflector. +type MetricsProvider interface { + NewListsMetric(name string) CounterMetric + NewListDurationMetric(name string) SummaryMetric + NewItemsInListMetric(name string) SummaryMetric + + NewWatchesMetric(name string) CounterMetric + NewShortWatchesMetric(name string) CounterMetric + NewWatchDurationMetric(name string) SummaryMetric + NewItemsInWatchMetric(name string) SummaryMetric + + NewLastResourceVersionMetric(name string) GaugeMetric +} + +type noopMetricsProvider struct{} + +func (noopMetricsProvider) NewListsMetric(name string) CounterMetric { return noopMetric{} } +func (noopMetricsProvider) NewListDurationMetric(name string) SummaryMetric { return noopMetric{} } +func (noopMetricsProvider) NewItemsInListMetric(name string) SummaryMetric { return noopMetric{} } +func (noopMetricsProvider) NewWatchesMetric(name string) CounterMetric { return noopMetric{} } +func (noopMetricsProvider) NewShortWatchesMetric(name string) CounterMetric { return noopMetric{} } +func (noopMetricsProvider) NewWatchDurationMetric(name string) SummaryMetric { return noopMetric{} } +func (noopMetricsProvider) NewItemsInWatchMetric(name string) SummaryMetric { return noopMetric{} } +func (noopMetricsProvider) NewLastResourceVersionMetric(name string) GaugeMetric { + return noopMetric{} +} + +var metricsFactory = struct { + metricsProvider MetricsProvider + setProviders sync.Once +}{ + metricsProvider: noopMetricsProvider{}, +} + +// SetReflectorMetricsProvider sets the metrics provider +func SetReflectorMetricsProvider(metricsProvider MetricsProvider) { + metricsFactory.setProviders.Do(func() { + metricsFactory.metricsProvider = metricsProvider + }) +} diff --git a/constraint/vendor/k8s.io/client-go/tools/cache/shared_informer.go b/constraint/vendor/k8s.io/client-go/tools/cache/shared_informer.go new file mode 100644 index 000000000..9f42782d1 --- /dev/null +++ b/constraint/vendor/k8s.io/client-go/tools/cache/shared_informer.go @@ -0,0 +1,855 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "errors" + "fmt" + "sync" + "time" + + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/utils/buffer" + "k8s.io/utils/clock" + + "k8s.io/klog/v2" +) + +// SharedInformer provides eventually consistent linkage of its +// clients to the authoritative state of a given collection of +// objects. An object is identified by its API group, kind/resource, +// namespace (if any), and name; the `ObjectMeta.UID` is not part of +// an object's ID as far as this contract is concerned. One +// SharedInformer provides linkage to objects of a particular API +// group and kind/resource. The linked object collection of a +// SharedInformer may be further restricted to one namespace (if +// applicable) and/or by label selector and/or field selector. +// +// The authoritative state of an object is what apiservers provide +// access to, and an object goes through a strict sequence of states. +// An object state is either (1) present with a ResourceVersion and +// other appropriate content or (2) "absent". +// +// A SharedInformer maintains a local cache --- exposed by GetStore(), +// by GetIndexer() in the case of an indexed informer, and possibly by +// machinery involved in creating and/or accessing the informer --- of +// the state of each relevant object. This cache is eventually +// consistent with the authoritative state. This means that, unless +// prevented by persistent communication problems, if ever a +// particular object ID X is authoritatively associated with a state S +// then for every SharedInformer I whose collection includes (X, S) +// eventually either (1) I's cache associates X with S or a later +// state of X, (2) I is stopped, or (3) the authoritative state +// service for X terminates. To be formally complete, we say that the +// absent state meets any restriction by label selector or field +// selector. +// +// For a given informer and relevant object ID X, the sequence of +// states that appears in the informer's cache is a subsequence of the +// states authoritatively associated with X. That is, some states +// might never appear in the cache but ordering among the appearing +// states is correct. Note, however, that there is no promise about +// ordering between states seen for different objects. +// +// The local cache starts out empty, and gets populated and updated +// during `Run()`. +// +// As a simple example, if a collection of objects is henceforth +// unchanging, a SharedInformer is created that links to that +// collection, and that SharedInformer is `Run()` then that +// SharedInformer's cache eventually holds an exact copy of that +// collection (unless it is stopped too soon, the authoritative state +// service ends, or communication problems between the two +// persistently thwart achievement). +// +// As another simple example, if the local cache ever holds a +// non-absent state for some object ID and the object is eventually +// removed from the authoritative state then eventually the object is +// removed from the local cache (unless the SharedInformer is stopped +// too soon, the authoritative state service ends, or communication +// problems persistently thwart the desired result). +// +// The keys in the Store are of the form namespace/name for namespaced +// objects, and are simply the name for non-namespaced objects. +// Clients can use `MetaNamespaceKeyFunc(obj)` to extract the key for +// a given object, and `SplitMetaNamespaceKey(key)` to split a key +// into its constituent parts. +// +// Every query against the local cache is answered entirely from one +// snapshot of the cache's state. Thus, the result of a `List` call +// will not contain two entries with the same namespace and name. +// +// A client is identified here by a ResourceEventHandler. For every +// update to the SharedInformer's local cache and for every client +// added before `Run()`, eventually either the SharedInformer is +// stopped or the client is notified of the update. A client added +// after `Run()` starts gets a startup batch of notifications of +// additions of the objects existing in the cache at the time that +// client was added; also, for every update to the SharedInformer's +// local cache after that client was added, eventually either the +// SharedInformer is stopped or that client is notified of that +// update. Client notifications happen after the corresponding cache +// update and, in the case of a SharedIndexInformer, after the +// corresponding index updates. It is possible that additional cache +// and index updates happen before such a prescribed notification. +// For a given SharedInformer and client, the notifications are +// delivered sequentially. For a given SharedInformer, client, and +// object ID, the notifications are delivered in order. Because +// `ObjectMeta.UID` has no role in identifying objects, it is possible +// that when (1) object O1 with ID (e.g. namespace and name) X and +// `ObjectMeta.UID` U1 in the SharedInformer's local cache is deleted +// and later (2) another object O2 with ID X and ObjectMeta.UID U2 is +// created the informer's clients are not notified of (1) and (2) but +// rather are notified only of an update from O1 to O2. Clients that +// need to detect such cases might do so by comparing the `ObjectMeta.UID` +// field of the old and the new object in the code that handles update +// notifications (i.e. `OnUpdate` method of ResourceEventHandler). +// +// A client must process each notification promptly; a SharedInformer +// is not engineered to deal well with a large backlog of +// notifications to deliver. Lengthy processing should be passed off +// to something else, for example through a +// `client-go/util/workqueue`. +// +// A delete notification exposes the last locally known non-absent +// state, except that its ResourceVersion is replaced with a +// ResourceVersion in which the object is actually absent. +type SharedInformer interface { + // AddEventHandler adds an event handler to the shared informer using the shared informer's resync + // period. Events to a single handler are delivered sequentially, but there is no coordination + // between different handlers. + AddEventHandler(handler ResourceEventHandler) + // AddEventHandlerWithResyncPeriod adds an event handler to the + // shared informer with the requested resync period; zero means + // this handler does not care about resyncs. The resync operation + // consists of delivering to the handler an update notification + // for every object in the informer's local cache; it does not add + // any interactions with the authoritative storage. Some + // informers do no resyncs at all, not even for handlers added + // with a non-zero resyncPeriod. For an informer that does + // resyncs, and for each handler that requests resyncs, that + // informer develops a nominal resync period that is no shorter + // than the requested period but may be longer. The actual time + // between any two resyncs may be longer than the nominal period + // because the implementation takes time to do work and there may + // be competing load and scheduling noise. + AddEventHandlerWithResyncPeriod(handler ResourceEventHandler, resyncPeriod time.Duration) + // GetStore returns the informer's local cache as a Store. + GetStore() Store + // GetController is deprecated, it does nothing useful + GetController() Controller + // Run starts and runs the shared informer, returning after it stops. + // The informer will be stopped when stopCh is closed. + Run(stopCh <-chan struct{}) + // HasSynced returns true if the shared informer's store has been + // informed by at least one full LIST of the authoritative state + // of the informer's object collection. This is unrelated to "resync". + HasSynced() bool + // LastSyncResourceVersion is the resource version observed when last synced with the underlying + // store. The value returned is not synchronized with access to the underlying store and is not + // thread-safe. + LastSyncResourceVersion() string + + // The WatchErrorHandler is called whenever ListAndWatch drops the + // connection with an error. After calling this handler, the informer + // will backoff and retry. + // + // The default implementation looks at the error type and tries to log + // the error message at an appropriate level. + // + // There's only one handler, so if you call this multiple times, last one + // wins; calling after the informer has been started returns an error. + // + // The handler is intended for visibility, not to e.g. pause the consumers. + // The handler should return quickly - any expensive processing should be + // offloaded. + SetWatchErrorHandler(handler WatchErrorHandler) error + + // The TransformFunc is called for each object which is about to be stored. + // + // This function is intended for you to take the opportunity to + // remove, transform, or normalize fields. One use case is to strip unused + // metadata fields out of objects to save on RAM cost. + // + // Must be set before starting the informer. + // + // Note: Since the object given to the handler may be already shared with + // other goroutines, it is advisable to copy the object being + // transform before mutating it at all and returning the copy to prevent + // data races. + SetTransform(handler TransformFunc) error +} + +// SharedIndexInformer provides add and get Indexers ability based on SharedInformer. +type SharedIndexInformer interface { + SharedInformer + // AddIndexers add indexers to the informer before it starts. + AddIndexers(indexers Indexers) error + GetIndexer() Indexer +} + +// NewSharedInformer creates a new instance for the listwatcher. +func NewSharedInformer(lw ListerWatcher, exampleObject runtime.Object, defaultEventHandlerResyncPeriod time.Duration) SharedInformer { + return NewSharedIndexInformer(lw, exampleObject, defaultEventHandlerResyncPeriod, Indexers{}) +} + +// NewSharedIndexInformer creates a new instance for the listwatcher. +// The created informer will not do resyncs if the given +// defaultEventHandlerResyncPeriod is zero. Otherwise: for each +// handler that with a non-zero requested resync period, whether added +// before or after the informer starts, the nominal resync period is +// the requested resync period rounded up to a multiple of the +// informer's resync checking period. Such an informer's resync +// checking period is established when the informer starts running, +// and is the maximum of (a) the minimum of the resync periods +// requested before the informer starts and the +// defaultEventHandlerResyncPeriod given here and (b) the constant +// `minimumResyncPeriod` defined in this file. +func NewSharedIndexInformer(lw ListerWatcher, exampleObject runtime.Object, defaultEventHandlerResyncPeriod time.Duration, indexers Indexers) SharedIndexInformer { + realClock := &clock.RealClock{} + sharedIndexInformer := &sharedIndexInformer{ + processor: &sharedProcessor{clock: realClock}, + indexer: NewIndexer(DeletionHandlingMetaNamespaceKeyFunc, indexers), + listerWatcher: lw, + objectType: exampleObject, + resyncCheckPeriod: defaultEventHandlerResyncPeriod, + defaultEventHandlerResyncPeriod: defaultEventHandlerResyncPeriod, + cacheMutationDetector: NewCacheMutationDetector(fmt.Sprintf("%T", exampleObject)), + clock: realClock, + } + return sharedIndexInformer +} + +// InformerSynced is a function that can be used to determine if an informer has synced. This is useful for determining if caches have synced. +type InformerSynced func() bool + +const ( + // syncedPollPeriod controls how often you look at the status of your sync funcs + syncedPollPeriod = 100 * time.Millisecond + + // initialBufferSize is the initial number of event notifications that can be buffered. + initialBufferSize = 1024 +) + +// WaitForNamedCacheSync is a wrapper around WaitForCacheSync that generates log messages +// indicating that the caller identified by name is waiting for syncs, followed by +// either a successful or failed sync. +func WaitForNamedCacheSync(controllerName string, stopCh <-chan struct{}, cacheSyncs ...InformerSynced) bool { + klog.Infof("Waiting for caches to sync for %s", controllerName) + + if !WaitForCacheSync(stopCh, cacheSyncs...) { + utilruntime.HandleError(fmt.Errorf("unable to sync caches for %s", controllerName)) + return false + } + + klog.Infof("Caches are synced for %s", controllerName) + return true +} + +// WaitForCacheSync waits for caches to populate. It returns true if it was successful, false +// if the controller should shutdown +// callers should prefer WaitForNamedCacheSync() +func WaitForCacheSync(stopCh <-chan struct{}, cacheSyncs ...InformerSynced) bool { + err := wait.PollImmediateUntil(syncedPollPeriod, + func() (bool, error) { + for _, syncFunc := range cacheSyncs { + if !syncFunc() { + return false, nil + } + } + return true, nil + }, + stopCh) + if err != nil { + klog.V(2).Infof("stop requested") + return false + } + + klog.V(4).Infof("caches populated") + return true +} + +// `*sharedIndexInformer` implements SharedIndexInformer and has three +// main components. One is an indexed local cache, `indexer Indexer`. +// The second main component is a Controller that pulls +// objects/notifications using the ListerWatcher and pushes them into +// a DeltaFIFO --- whose knownObjects is the informer's local cache +// --- while concurrently Popping Deltas values from that fifo and +// processing them with `sharedIndexInformer::HandleDeltas`. Each +// invocation of HandleDeltas, which is done with the fifo's lock +// held, processes each Delta in turn. For each Delta this both +// updates the local cache and stuffs the relevant notification into +// the sharedProcessor. The third main component is that +// sharedProcessor, which is responsible for relaying those +// notifications to each of the informer's clients. +type sharedIndexInformer struct { + indexer Indexer + controller Controller + + processor *sharedProcessor + cacheMutationDetector MutationDetector + + listerWatcher ListerWatcher + + // objectType is an example object of the type this informer is + // expected to handle. Only the type needs to be right, except + // that when that is `unstructured.Unstructured` the object's + // `"apiVersion"` and `"kind"` must also be right. + objectType runtime.Object + + // resyncCheckPeriod is how often we want the reflector's resync timer to fire so it can call + // shouldResync to check if any of our listeners need a resync. + resyncCheckPeriod time.Duration + // defaultEventHandlerResyncPeriod is the default resync period for any handlers added via + // AddEventHandler (i.e. they don't specify one and just want to use the shared informer's default + // value). + defaultEventHandlerResyncPeriod time.Duration + // clock allows for testability + clock clock.Clock + + started, stopped bool + startedLock sync.Mutex + + // blockDeltas gives a way to stop all event distribution so that a late event handler + // can safely join the shared informer. + blockDeltas sync.Mutex + + // Called whenever the ListAndWatch drops the connection with an error. + watchErrorHandler WatchErrorHandler + + transform TransformFunc +} + +// dummyController hides the fact that a SharedInformer is different from a dedicated one +// where a caller can `Run`. The run method is disconnected in this case, because higher +// level logic will decide when to start the SharedInformer and related controller. +// Because returning information back is always asynchronous, the legacy callers shouldn't +// notice any change in behavior. +type dummyController struct { + informer *sharedIndexInformer +} + +func (v *dummyController) Run(stopCh <-chan struct{}) { +} + +func (v *dummyController) HasSynced() bool { + return v.informer.HasSynced() +} + +func (v *dummyController) LastSyncResourceVersion() string { + return "" +} + +type updateNotification struct { + oldObj interface{} + newObj interface{} +} + +type addNotification struct { + newObj interface{} +} + +type deleteNotification struct { + oldObj interface{} +} + +func (s *sharedIndexInformer) SetWatchErrorHandler(handler WatchErrorHandler) error { + s.startedLock.Lock() + defer s.startedLock.Unlock() + + if s.started { + return fmt.Errorf("informer has already started") + } + + s.watchErrorHandler = handler + return nil +} + +func (s *sharedIndexInformer) SetTransform(handler TransformFunc) error { + s.startedLock.Lock() + defer s.startedLock.Unlock() + + if s.started { + return fmt.Errorf("informer has already started") + } + + s.transform = handler + return nil +} + +func (s *sharedIndexInformer) Run(stopCh <-chan struct{}) { + defer utilruntime.HandleCrash() + + if s.HasStarted() { + klog.Warningf("The sharedIndexInformer has started, run more than once is not allowed") + return + } + fifo := NewDeltaFIFOWithOptions(DeltaFIFOOptions{ + KnownObjects: s.indexer, + EmitDeltaTypeReplaced: true, + }) + + cfg := &Config{ + Queue: fifo, + ListerWatcher: s.listerWatcher, + ObjectType: s.objectType, + FullResyncPeriod: s.resyncCheckPeriod, + RetryOnError: false, + ShouldResync: s.processor.shouldResync, + + Process: s.HandleDeltas, + WatchErrorHandler: s.watchErrorHandler, + } + + func() { + s.startedLock.Lock() + defer s.startedLock.Unlock() + + s.controller = New(cfg) + s.controller.(*controller).clock = s.clock + s.started = true + }() + + // Separate stop channel because Processor should be stopped strictly after controller + processorStopCh := make(chan struct{}) + var wg wait.Group + defer wg.Wait() // Wait for Processor to stop + defer close(processorStopCh) // Tell Processor to stop + wg.StartWithChannel(processorStopCh, s.cacheMutationDetector.Run) + wg.StartWithChannel(processorStopCh, s.processor.run) + + defer func() { + s.startedLock.Lock() + defer s.startedLock.Unlock() + s.stopped = true // Don't want any new listeners + }() + s.controller.Run(stopCh) +} + +func (s *sharedIndexInformer) HasStarted() bool { + s.startedLock.Lock() + defer s.startedLock.Unlock() + return s.started +} + +func (s *sharedIndexInformer) HasSynced() bool { + s.startedLock.Lock() + defer s.startedLock.Unlock() + + if s.controller == nil { + return false + } + return s.controller.HasSynced() +} + +func (s *sharedIndexInformer) LastSyncResourceVersion() string { + s.startedLock.Lock() + defer s.startedLock.Unlock() + + if s.controller == nil { + return "" + } + return s.controller.LastSyncResourceVersion() +} + +func (s *sharedIndexInformer) GetStore() Store { + return s.indexer +} + +func (s *sharedIndexInformer) GetIndexer() Indexer { + return s.indexer +} + +func (s *sharedIndexInformer) AddIndexers(indexers Indexers) error { + s.startedLock.Lock() + defer s.startedLock.Unlock() + + if s.started { + return fmt.Errorf("informer has already started") + } + + return s.indexer.AddIndexers(indexers) +} + +func (s *sharedIndexInformer) GetController() Controller { + return &dummyController{informer: s} +} + +func (s *sharedIndexInformer) AddEventHandler(handler ResourceEventHandler) { + s.AddEventHandlerWithResyncPeriod(handler, s.defaultEventHandlerResyncPeriod) +} + +func determineResyncPeriod(desired, check time.Duration) time.Duration { + if desired == 0 { + return desired + } + if check == 0 { + klog.Warningf("The specified resyncPeriod %v is invalid because this shared informer doesn't support resyncing", desired) + return 0 + } + if desired < check { + klog.Warningf("The specified resyncPeriod %v is being increased to the minimum resyncCheckPeriod %v", desired, check) + return check + } + return desired +} + +const minimumResyncPeriod = 1 * time.Second + +func (s *sharedIndexInformer) AddEventHandlerWithResyncPeriod(handler ResourceEventHandler, resyncPeriod time.Duration) { + s.startedLock.Lock() + defer s.startedLock.Unlock() + + if s.stopped { + klog.V(2).Infof("Handler %v was not added to shared informer because it has stopped already", handler) + return + } + + if resyncPeriod > 0 { + if resyncPeriod < minimumResyncPeriod { + klog.Warningf("resyncPeriod %v is too small. Changing it to the minimum allowed value of %v", resyncPeriod, minimumResyncPeriod) + resyncPeriod = minimumResyncPeriod + } + + if resyncPeriod < s.resyncCheckPeriod { + if s.started { + klog.Warningf("resyncPeriod %v is smaller than resyncCheckPeriod %v and the informer has already started. Changing it to %v", resyncPeriod, s.resyncCheckPeriod, s.resyncCheckPeriod) + resyncPeriod = s.resyncCheckPeriod + } else { + // if the event handler's resyncPeriod is smaller than the current resyncCheckPeriod, update + // resyncCheckPeriod to match resyncPeriod and adjust the resync periods of all the listeners + // accordingly + s.resyncCheckPeriod = resyncPeriod + s.processor.resyncCheckPeriodChanged(resyncPeriod) + } + } + } + + listener := newProcessListener(handler, resyncPeriod, determineResyncPeriod(resyncPeriod, s.resyncCheckPeriod), s.clock.Now(), initialBufferSize) + + if !s.started { + s.processor.addListener(listener) + return + } + + // in order to safely join, we have to + // 1. stop sending add/update/delete notifications + // 2. do a list against the store + // 3. send synthetic "Add" events to the new handler + // 4. unblock + s.blockDeltas.Lock() + defer s.blockDeltas.Unlock() + + s.processor.addListener(listener) + for _, item := range s.indexer.List() { + listener.add(addNotification{newObj: item}) + } +} + +func (s *sharedIndexInformer) HandleDeltas(obj interface{}) error { + s.blockDeltas.Lock() + defer s.blockDeltas.Unlock() + + if deltas, ok := obj.(Deltas); ok { + return processDeltas(s, s.indexer, s.transform, deltas) + } + return errors.New("object given as Process argument is not Deltas") +} + +// Conforms to ResourceEventHandler +func (s *sharedIndexInformer) OnAdd(obj interface{}) { + // Invocation of this function is locked under s.blockDeltas, so it is + // save to distribute the notification + s.cacheMutationDetector.AddObject(obj) + s.processor.distribute(addNotification{newObj: obj}, false) +} + +// Conforms to ResourceEventHandler +func (s *sharedIndexInformer) OnUpdate(old, new interface{}) { + isSync := false + + // If is a Sync event, isSync should be true + // If is a Replaced event, isSync is true if resource version is unchanged. + // If RV is unchanged: this is a Sync/Replaced event, so isSync is true + + if accessor, err := meta.Accessor(new); err == nil { + if oldAccessor, err := meta.Accessor(old); err == nil { + // Events that didn't change resourceVersion are treated as resync events + // and only propagated to listeners that requested resync + isSync = accessor.GetResourceVersion() == oldAccessor.GetResourceVersion() + } + } + + // Invocation of this function is locked under s.blockDeltas, so it is + // save to distribute the notification + s.cacheMutationDetector.AddObject(new) + s.processor.distribute(updateNotification{oldObj: old, newObj: new}, isSync) +} + +// Conforms to ResourceEventHandler +func (s *sharedIndexInformer) OnDelete(old interface{}) { + // Invocation of this function is locked under s.blockDeltas, so it is + // save to distribute the notification + s.processor.distribute(deleteNotification{oldObj: old}, false) +} + +// sharedProcessor has a collection of processorListener and can +// distribute a notification object to its listeners. There are two +// kinds of distribute operations. The sync distributions go to a +// subset of the listeners that (a) is recomputed in the occasional +// calls to shouldResync and (b) every listener is initially put in. +// The non-sync distributions go to every listener. +type sharedProcessor struct { + listenersStarted bool + listenersLock sync.RWMutex + listeners []*processorListener + syncingListeners []*processorListener + clock clock.Clock + wg wait.Group +} + +func (p *sharedProcessor) addListener(listener *processorListener) { + p.listenersLock.Lock() + defer p.listenersLock.Unlock() + + p.addListenerLocked(listener) + if p.listenersStarted { + p.wg.Start(listener.run) + p.wg.Start(listener.pop) + } +} + +func (p *sharedProcessor) addListenerLocked(listener *processorListener) { + p.listeners = append(p.listeners, listener) + p.syncingListeners = append(p.syncingListeners, listener) +} + +func (p *sharedProcessor) distribute(obj interface{}, sync bool) { + p.listenersLock.RLock() + defer p.listenersLock.RUnlock() + + if sync { + for _, listener := range p.syncingListeners { + listener.add(obj) + } + } else { + for _, listener := range p.listeners { + listener.add(obj) + } + } +} + +func (p *sharedProcessor) run(stopCh <-chan struct{}) { + func() { + p.listenersLock.RLock() + defer p.listenersLock.RUnlock() + for _, listener := range p.listeners { + p.wg.Start(listener.run) + p.wg.Start(listener.pop) + } + p.listenersStarted = true + }() + <-stopCh + p.listenersLock.RLock() + defer p.listenersLock.RUnlock() + for _, listener := range p.listeners { + close(listener.addCh) // Tell .pop() to stop. .pop() will tell .run() to stop + } + p.wg.Wait() // Wait for all .pop() and .run() to stop +} + +// shouldResync queries every listener to determine if any of them need a resync, based on each +// listener's resyncPeriod. +func (p *sharedProcessor) shouldResync() bool { + p.listenersLock.Lock() + defer p.listenersLock.Unlock() + + p.syncingListeners = []*processorListener{} + + resyncNeeded := false + now := p.clock.Now() + for _, listener := range p.listeners { + // need to loop through all the listeners to see if they need to resync so we can prepare any + // listeners that are going to be resyncing. + if listener.shouldResync(now) { + resyncNeeded = true + p.syncingListeners = append(p.syncingListeners, listener) + listener.determineNextResync(now) + } + } + return resyncNeeded +} + +func (p *sharedProcessor) resyncCheckPeriodChanged(resyncCheckPeriod time.Duration) { + p.listenersLock.RLock() + defer p.listenersLock.RUnlock() + + for _, listener := range p.listeners { + resyncPeriod := determineResyncPeriod(listener.requestedResyncPeriod, resyncCheckPeriod) + listener.setResyncPeriod(resyncPeriod) + } +} + +// processorListener relays notifications from a sharedProcessor to +// one ResourceEventHandler --- using two goroutines, two unbuffered +// channels, and an unbounded ring buffer. The `add(notification)` +// function sends the given notification to `addCh`. One goroutine +// runs `pop()`, which pumps notifications from `addCh` to `nextCh` +// using storage in the ring buffer while `nextCh` is not keeping up. +// Another goroutine runs `run()`, which receives notifications from +// `nextCh` and synchronously invokes the appropriate handler method. +// +// processorListener also keeps track of the adjusted requested resync +// period of the listener. +type processorListener struct { + nextCh chan interface{} + addCh chan interface{} + + handler ResourceEventHandler + + // pendingNotifications is an unbounded ring buffer that holds all notifications not yet distributed. + // There is one per listener, but a failing/stalled listener will have infinite pendingNotifications + // added until we OOM. + // TODO: This is no worse than before, since reflectors were backed by unbounded DeltaFIFOs, but + // we should try to do something better. + pendingNotifications buffer.RingGrowing + + // requestedResyncPeriod is how frequently the listener wants a + // full resync from the shared informer, but modified by two + // adjustments. One is imposing a lower bound, + // `minimumResyncPeriod`. The other is another lower bound, the + // sharedIndexInformer's `resyncCheckPeriod`, that is imposed (a) only + // in AddEventHandlerWithResyncPeriod invocations made after the + // sharedIndexInformer starts and (b) only if the informer does + // resyncs at all. + requestedResyncPeriod time.Duration + // resyncPeriod is the threshold that will be used in the logic + // for this listener. This value differs from + // requestedResyncPeriod only when the sharedIndexInformer does + // not do resyncs, in which case the value here is zero. The + // actual time between resyncs depends on when the + // sharedProcessor's `shouldResync` function is invoked and when + // the sharedIndexInformer processes `Sync` type Delta objects. + resyncPeriod time.Duration + // nextResync is the earliest time the listener should get a full resync + nextResync time.Time + // resyncLock guards access to resyncPeriod and nextResync + resyncLock sync.Mutex +} + +func newProcessListener(handler ResourceEventHandler, requestedResyncPeriod, resyncPeriod time.Duration, now time.Time, bufferSize int) *processorListener { + ret := &processorListener{ + nextCh: make(chan interface{}), + addCh: make(chan interface{}), + handler: handler, + pendingNotifications: *buffer.NewRingGrowing(bufferSize), + requestedResyncPeriod: requestedResyncPeriod, + resyncPeriod: resyncPeriod, + } + + ret.determineNextResync(now) + + return ret +} + +func (p *processorListener) add(notification interface{}) { + p.addCh <- notification +} + +func (p *processorListener) pop() { + defer utilruntime.HandleCrash() + defer close(p.nextCh) // Tell .run() to stop + + var nextCh chan<- interface{} + var notification interface{} + for { + select { + case nextCh <- notification: + // Notification dispatched + var ok bool + notification, ok = p.pendingNotifications.ReadOne() + if !ok { // Nothing to pop + nextCh = nil // Disable this select case + } + case notificationToAdd, ok := <-p.addCh: + if !ok { + return + } + if notification == nil { // No notification to pop (and pendingNotifications is empty) + // Optimize the case - skip adding to pendingNotifications + notification = notificationToAdd + nextCh = p.nextCh + } else { // There is already a notification waiting to be dispatched + p.pendingNotifications.WriteOne(notificationToAdd) + } + } + } +} + +func (p *processorListener) run() { + // this call blocks until the channel is closed. When a panic happens during the notification + // we will catch it, **the offending item will be skipped!**, and after a short delay (one second) + // the next notification will be attempted. This is usually better than the alternative of never + // delivering again. + stopCh := make(chan struct{}) + wait.Until(func() { + for next := range p.nextCh { + switch notification := next.(type) { + case updateNotification: + p.handler.OnUpdate(notification.oldObj, notification.newObj) + case addNotification: + p.handler.OnAdd(notification.newObj) + case deleteNotification: + p.handler.OnDelete(notification.oldObj) + default: + utilruntime.HandleError(fmt.Errorf("unrecognized notification: %T", next)) + } + } + // the only way to get here is if the p.nextCh is empty and closed + close(stopCh) + }, 1*time.Second, stopCh) +} + +// shouldResync deterimines if the listener needs a resync. If the listener's resyncPeriod is 0, +// this always returns false. +func (p *processorListener) shouldResync(now time.Time) bool { + p.resyncLock.Lock() + defer p.resyncLock.Unlock() + + if p.resyncPeriod == 0 { + return false + } + + return now.After(p.nextResync) || now.Equal(p.nextResync) +} + +func (p *processorListener) determineNextResync(now time.Time) { + p.resyncLock.Lock() + defer p.resyncLock.Unlock() + + p.nextResync = now.Add(p.resyncPeriod) +} + +func (p *processorListener) setResyncPeriod(resyncPeriod time.Duration) { + p.resyncLock.Lock() + defer p.resyncLock.Unlock() + + p.resyncPeriod = resyncPeriod +} diff --git a/constraint/vendor/k8s.io/client-go/tools/cache/store.go b/constraint/vendor/k8s.io/client-go/tools/cache/store.go new file mode 100644 index 000000000..24ffabab7 --- /dev/null +++ b/constraint/vendor/k8s.io/client-go/tools/cache/store.go @@ -0,0 +1,271 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "fmt" + "strings" + + "k8s.io/apimachinery/pkg/api/meta" +) + +// Store is a generic object storage and processing interface. A +// Store holds a map from string keys to accumulators, and has +// operations to add, update, and delete a given object to/from the +// accumulator currently associated with a given key. A Store also +// knows how to extract the key from a given object, so many operations +// are given only the object. +// +// In the simplest Store implementations each accumulator is simply +// the last given object, or empty after Delete, and thus the Store's +// behavior is simple storage. +// +// Reflector knows how to watch a server and update a Store. This +// package provides a variety of implementations of Store. +type Store interface { + + // Add adds the given object to the accumulator associated with the given object's key + Add(obj interface{}) error + + // Update updates the given object in the accumulator associated with the given object's key + Update(obj interface{}) error + + // Delete deletes the given object from the accumulator associated with the given object's key + Delete(obj interface{}) error + + // List returns a list of all the currently non-empty accumulators + List() []interface{} + + // ListKeys returns a list of all the keys currently associated with non-empty accumulators + ListKeys() []string + + // Get returns the accumulator associated with the given object's key + Get(obj interface{}) (item interface{}, exists bool, err error) + + // GetByKey returns the accumulator associated with the given key + GetByKey(key string) (item interface{}, exists bool, err error) + + // Replace will delete the contents of the store, using instead the + // given list. Store takes ownership of the list, you should not reference + // it after calling this function. + Replace([]interface{}, string) error + + // Resync is meaningless in the terms appearing here but has + // meaning in some implementations that have non-trivial + // additional behavior (e.g., DeltaFIFO). + Resync() error +} + +// KeyFunc knows how to make a key from an object. Implementations should be deterministic. +type KeyFunc func(obj interface{}) (string, error) + +// KeyError will be returned any time a KeyFunc gives an error; it includes the object +// at fault. +type KeyError struct { + Obj interface{} + Err error +} + +// Error gives a human-readable description of the error. +func (k KeyError) Error() string { + return fmt.Sprintf("couldn't create key for object %+v: %v", k.Obj, k.Err) +} + +// Unwrap implements errors.Unwrap +func (k KeyError) Unwrap() error { + return k.Err +} + +// ExplicitKey can be passed to MetaNamespaceKeyFunc if you have the key for +// the object but not the object itself. +type ExplicitKey string + +// MetaNamespaceKeyFunc is a convenient default KeyFunc which knows how to make +// keys for API objects which implement meta.Interface. +// The key uses the format / unless is empty, then +// it's just . +// +// TODO: replace key-as-string with a key-as-struct so that this +// packing/unpacking won't be necessary. +func MetaNamespaceKeyFunc(obj interface{}) (string, error) { + if key, ok := obj.(ExplicitKey); ok { + return string(key), nil + } + meta, err := meta.Accessor(obj) + if err != nil { + return "", fmt.Errorf("object has no meta: %v", err) + } + if len(meta.GetNamespace()) > 0 { + return meta.GetNamespace() + "/" + meta.GetName(), nil + } + return meta.GetName(), nil +} + +// SplitMetaNamespaceKey returns the namespace and name that +// MetaNamespaceKeyFunc encoded into key. +// +// TODO: replace key-as-string with a key-as-struct so that this +// packing/unpacking won't be necessary. +func SplitMetaNamespaceKey(key string) (namespace, name string, err error) { + parts := strings.Split(key, "/") + switch len(parts) { + case 1: + // name only, no namespace + return "", parts[0], nil + case 2: + // namespace and name + return parts[0], parts[1], nil + } + + return "", "", fmt.Errorf("unexpected key format: %q", key) +} + +// `*cache` implements Indexer in terms of a ThreadSafeStore and an +// associated KeyFunc. +type cache struct { + // cacheStorage bears the burden of thread safety for the cache + cacheStorage ThreadSafeStore + // keyFunc is used to make the key for objects stored in and retrieved from items, and + // should be deterministic. + keyFunc KeyFunc +} + +var _ Store = &cache{} + +// Add inserts an item into the cache. +func (c *cache) Add(obj interface{}) error { + key, err := c.keyFunc(obj) + if err != nil { + return KeyError{obj, err} + } + c.cacheStorage.Add(key, obj) + return nil +} + +// Update sets an item in the cache to its updated state. +func (c *cache) Update(obj interface{}) error { + key, err := c.keyFunc(obj) + if err != nil { + return KeyError{obj, err} + } + c.cacheStorage.Update(key, obj) + return nil +} + +// Delete removes an item from the cache. +func (c *cache) Delete(obj interface{}) error { + key, err := c.keyFunc(obj) + if err != nil { + return KeyError{obj, err} + } + c.cacheStorage.Delete(key) + return nil +} + +// List returns a list of all the items. +// List is completely threadsafe as long as you treat all items as immutable. +func (c *cache) List() []interface{} { + return c.cacheStorage.List() +} + +// ListKeys returns a list of all the keys of the objects currently +// in the cache. +func (c *cache) ListKeys() []string { + return c.cacheStorage.ListKeys() +} + +// GetIndexers returns the indexers of cache +func (c *cache) GetIndexers() Indexers { + return c.cacheStorage.GetIndexers() +} + +// Index returns a list of items that match on the index function +// Index is thread-safe so long as you treat all items as immutable +func (c *cache) Index(indexName string, obj interface{}) ([]interface{}, error) { + return c.cacheStorage.Index(indexName, obj) +} + +func (c *cache) IndexKeys(indexName, indexKey string) ([]string, error) { + return c.cacheStorage.IndexKeys(indexName, indexKey) +} + +// ListIndexFuncValues returns the list of generated values of an Index func +func (c *cache) ListIndexFuncValues(indexName string) []string { + return c.cacheStorage.ListIndexFuncValues(indexName) +} + +func (c *cache) ByIndex(indexName, indexKey string) ([]interface{}, error) { + return c.cacheStorage.ByIndex(indexName, indexKey) +} + +func (c *cache) AddIndexers(newIndexers Indexers) error { + return c.cacheStorage.AddIndexers(newIndexers) +} + +// Get returns the requested item, or sets exists=false. +// Get is completely threadsafe as long as you treat all items as immutable. +func (c *cache) Get(obj interface{}) (item interface{}, exists bool, err error) { + key, err := c.keyFunc(obj) + if err != nil { + return nil, false, KeyError{obj, err} + } + return c.GetByKey(key) +} + +// GetByKey returns the request item, or exists=false. +// GetByKey is completely threadsafe as long as you treat all items as immutable. +func (c *cache) GetByKey(key string) (item interface{}, exists bool, err error) { + item, exists = c.cacheStorage.Get(key) + return item, exists, nil +} + +// Replace will delete the contents of 'c', using instead the given list. +// 'c' takes ownership of the list, you should not reference the list again +// after calling this function. +func (c *cache) Replace(list []interface{}, resourceVersion string) error { + items := make(map[string]interface{}, len(list)) + for _, item := range list { + key, err := c.keyFunc(item) + if err != nil { + return KeyError{item, err} + } + items[key] = item + } + c.cacheStorage.Replace(items, resourceVersion) + return nil +} + +// Resync is meaningless for one of these +func (c *cache) Resync() error { + return nil +} + +// NewStore returns a Store implemented simply with a map and a lock. +func NewStore(keyFunc KeyFunc) Store { + return &cache{ + cacheStorage: NewThreadSafeStore(Indexers{}, Indices{}), + keyFunc: keyFunc, + } +} + +// NewIndexer returns an Indexer implemented simply with a map and a lock. +func NewIndexer(keyFunc KeyFunc, indexers Indexers) Indexer { + return &cache{ + cacheStorage: NewThreadSafeStore(indexers, Indices{}), + keyFunc: keyFunc, + } +} diff --git a/constraint/vendor/k8s.io/client-go/tools/cache/thread_safe_store.go b/constraint/vendor/k8s.io/client-go/tools/cache/thread_safe_store.go new file mode 100644 index 000000000..6d58c0d69 --- /dev/null +++ b/constraint/vendor/k8s.io/client-go/tools/cache/thread_safe_store.go @@ -0,0 +1,332 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "fmt" + "sync" + + "k8s.io/apimachinery/pkg/util/sets" +) + +// ThreadSafeStore is an interface that allows concurrent indexed +// access to a storage backend. It is like Indexer but does not +// (necessarily) know how to extract the Store key from a given +// object. +// +// TL;DR caveats: you must not modify anything returned by Get or List as it will break +// the indexing feature in addition to not being thread safe. +// +// The guarantees of thread safety provided by List/Get are only valid if the caller +// treats returned items as read-only. For example, a pointer inserted in the store +// through `Add` will be returned as is by `Get`. Multiple clients might invoke `Get` +// on the same key and modify the pointer in a non-thread-safe way. Also note that +// modifying objects stored by the indexers (if any) will *not* automatically lead +// to a re-index. So it's not a good idea to directly modify the objects returned by +// Get/List, in general. +type ThreadSafeStore interface { + Add(key string, obj interface{}) + Update(key string, obj interface{}) + Delete(key string) + Get(key string) (item interface{}, exists bool) + List() []interface{} + ListKeys() []string + Replace(map[string]interface{}, string) + Index(indexName string, obj interface{}) ([]interface{}, error) + IndexKeys(indexName, indexKey string) ([]string, error) + ListIndexFuncValues(name string) []string + ByIndex(indexName, indexKey string) ([]interface{}, error) + GetIndexers() Indexers + + // AddIndexers adds more indexers to this store. If you call this after you already have data + // in the store, the results are undefined. + AddIndexers(newIndexers Indexers) error + // Resync is a no-op and is deprecated + Resync() error +} + +// threadSafeMap implements ThreadSafeStore +type threadSafeMap struct { + lock sync.RWMutex + items map[string]interface{} + + // indexers maps a name to an IndexFunc + indexers Indexers + // indices maps a name to an Index + indices Indices +} + +func (c *threadSafeMap) Add(key string, obj interface{}) { + c.Update(key, obj) +} + +func (c *threadSafeMap) Update(key string, obj interface{}) { + c.lock.Lock() + defer c.lock.Unlock() + oldObject := c.items[key] + c.items[key] = obj + c.updateIndices(oldObject, obj, key) +} + +func (c *threadSafeMap) Delete(key string) { + c.lock.Lock() + defer c.lock.Unlock() + if obj, exists := c.items[key]; exists { + c.updateIndices(obj, nil, key) + delete(c.items, key) + } +} + +func (c *threadSafeMap) Get(key string) (item interface{}, exists bool) { + c.lock.RLock() + defer c.lock.RUnlock() + item, exists = c.items[key] + return item, exists +} + +func (c *threadSafeMap) List() []interface{} { + c.lock.RLock() + defer c.lock.RUnlock() + list := make([]interface{}, 0, len(c.items)) + for _, item := range c.items { + list = append(list, item) + } + return list +} + +// ListKeys returns a list of all the keys of the objects currently +// in the threadSafeMap. +func (c *threadSafeMap) ListKeys() []string { + c.lock.RLock() + defer c.lock.RUnlock() + list := make([]string, 0, len(c.items)) + for key := range c.items { + list = append(list, key) + } + return list +} + +func (c *threadSafeMap) Replace(items map[string]interface{}, resourceVersion string) { + c.lock.Lock() + defer c.lock.Unlock() + c.items = items + + // rebuild any index + c.indices = Indices{} + for key, item := range c.items { + c.updateIndices(nil, item, key) + } +} + +// Index returns a list of items that match the given object on the index function. +// Index is thread-safe so long as you treat all items as immutable. +func (c *threadSafeMap) Index(indexName string, obj interface{}) ([]interface{}, error) { + c.lock.RLock() + defer c.lock.RUnlock() + + indexFunc := c.indexers[indexName] + if indexFunc == nil { + return nil, fmt.Errorf("Index with name %s does not exist", indexName) + } + + indexedValues, err := indexFunc(obj) + if err != nil { + return nil, err + } + index := c.indices[indexName] + + var storeKeySet sets.String + if len(indexedValues) == 1 { + // In majority of cases, there is exactly one value matching. + // Optimize the most common path - deduping is not needed here. + storeKeySet = index[indexedValues[0]] + } else { + // Need to de-dupe the return list. + // Since multiple keys are allowed, this can happen. + storeKeySet = sets.String{} + for _, indexedValue := range indexedValues { + for key := range index[indexedValue] { + storeKeySet.Insert(key) + } + } + } + + list := make([]interface{}, 0, storeKeySet.Len()) + for storeKey := range storeKeySet { + list = append(list, c.items[storeKey]) + } + return list, nil +} + +// ByIndex returns a list of the items whose indexed values in the given index include the given indexed value +func (c *threadSafeMap) ByIndex(indexName, indexedValue string) ([]interface{}, error) { + c.lock.RLock() + defer c.lock.RUnlock() + + indexFunc := c.indexers[indexName] + if indexFunc == nil { + return nil, fmt.Errorf("Index with name %s does not exist", indexName) + } + + index := c.indices[indexName] + + set := index[indexedValue] + list := make([]interface{}, 0, set.Len()) + for key := range set { + list = append(list, c.items[key]) + } + + return list, nil +} + +// IndexKeys returns a list of the Store keys of the objects whose indexed values in the given index include the given indexed value. +// IndexKeys is thread-safe so long as you treat all items as immutable. +func (c *threadSafeMap) IndexKeys(indexName, indexedValue string) ([]string, error) { + c.lock.RLock() + defer c.lock.RUnlock() + + indexFunc := c.indexers[indexName] + if indexFunc == nil { + return nil, fmt.Errorf("Index with name %s does not exist", indexName) + } + + index := c.indices[indexName] + + set := index[indexedValue] + return set.List(), nil +} + +func (c *threadSafeMap) ListIndexFuncValues(indexName string) []string { + c.lock.RLock() + defer c.lock.RUnlock() + + index := c.indices[indexName] + names := make([]string, 0, len(index)) + for key := range index { + names = append(names, key) + } + return names +} + +func (c *threadSafeMap) GetIndexers() Indexers { + return c.indexers +} + +func (c *threadSafeMap) AddIndexers(newIndexers Indexers) error { + c.lock.Lock() + defer c.lock.Unlock() + + if len(c.items) > 0 { + return fmt.Errorf("cannot add indexers to running index") + } + + oldKeys := sets.StringKeySet(c.indexers) + newKeys := sets.StringKeySet(newIndexers) + + if oldKeys.HasAny(newKeys.List()...) { + return fmt.Errorf("indexer conflict: %v", oldKeys.Intersection(newKeys)) + } + + for k, v := range newIndexers { + c.indexers[k] = v + } + return nil +} + +// updateIndices modifies the objects location in the managed indexes: +// - for create you must provide only the newObj +// - for update you must provide both the oldObj and the newObj +// - for delete you must provide only the oldObj +// updateIndices must be called from a function that already has a lock on the cache +func (c *threadSafeMap) updateIndices(oldObj interface{}, newObj interface{}, key string) { + var oldIndexValues, indexValues []string + var err error + for name, indexFunc := range c.indexers { + if oldObj != nil { + oldIndexValues, err = indexFunc(oldObj) + } else { + oldIndexValues = oldIndexValues[:0] + } + if err != nil { + panic(fmt.Errorf("unable to calculate an index entry for key %q on index %q: %v", key, name, err)) + } + + if newObj != nil { + indexValues, err = indexFunc(newObj) + } else { + indexValues = indexValues[:0] + } + if err != nil { + panic(fmt.Errorf("unable to calculate an index entry for key %q on index %q: %v", key, name, err)) + } + + index := c.indices[name] + if index == nil { + index = Index{} + c.indices[name] = index + } + + if len(indexValues) == 1 && len(oldIndexValues) == 1 && indexValues[0] == oldIndexValues[0] { + // We optimize for the most common case where indexFunc returns a single value which has not been changed + continue + } + + for _, value := range oldIndexValues { + c.deleteKeyFromIndex(key, value, index) + } + for _, value := range indexValues { + c.addKeyToIndex(key, value, index) + } + } +} + +func (c *threadSafeMap) addKeyToIndex(key, indexValue string, index Index) { + set := index[indexValue] + if set == nil { + set = sets.String{} + index[indexValue] = set + } + set.Insert(key) +} + +func (c *threadSafeMap) deleteKeyFromIndex(key, indexValue string, index Index) { + set := index[indexValue] + if set == nil { + return + } + set.Delete(key) + // If we don't delete the set when zero, indices with high cardinality + // short lived resources can cause memory to increase over time from + // unused empty sets. See `kubernetes/kubernetes/issues/84959`. + if len(set) == 0 { + delete(index, indexValue) + } +} + +func (c *threadSafeMap) Resync() error { + // Nothing to do + return nil +} + +// NewThreadSafeStore creates a new instance of ThreadSafeStore. +func NewThreadSafeStore(indexers Indexers, indices Indices) ThreadSafeStore { + return &threadSafeMap{ + items: map[string]interface{}{}, + indexers: indexers, + indices: indices, + } +} diff --git a/constraint/vendor/k8s.io/client-go/tools/cache/undelta_store.go b/constraint/vendor/k8s.io/client-go/tools/cache/undelta_store.go new file mode 100644 index 000000000..220845dd3 --- /dev/null +++ b/constraint/vendor/k8s.io/client-go/tools/cache/undelta_store.go @@ -0,0 +1,89 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +// UndeltaStore listens to incremental updates and sends complete state on every change. +// It implements the Store interface so that it can receive a stream of mirrored objects +// from Reflector. Whenever it receives any complete (Store.Replace) or incremental change +// (Store.Add, Store.Update, Store.Delete), it sends the complete state by calling PushFunc. +// It is thread-safe. It guarantees that every change (Add, Update, Replace, Delete) results +// in one call to PushFunc, but sometimes PushFunc may be called twice with the same values. +// PushFunc should be thread safe. +type UndeltaStore struct { + Store + PushFunc func([]interface{}) +} + +// Assert that it implements the Store interface. +var _ Store = &UndeltaStore{} + +// Add inserts an object into the store and sends complete state by calling PushFunc. +// Note about thread safety. The Store implementation (cache.cache) uses a lock for all methods. +// In the functions below, the lock gets released and reacquired betweend the {Add,Delete,etc} +// and the List. So, the following can happen, resulting in two identical calls to PushFunc. +// time thread 1 thread 2 +// 0 UndeltaStore.Add(a) +// 1 UndeltaStore.Add(b) +// 2 Store.Add(a) +// 3 Store.Add(b) +// 4 Store.List() -> [a,b] +// 5 Store.List() -> [a,b] +func (u *UndeltaStore) Add(obj interface{}) error { + if err := u.Store.Add(obj); err != nil { + return err + } + u.PushFunc(u.Store.List()) + return nil +} + +// Update sets an item in the cache to its updated state and sends complete state by calling PushFunc. +func (u *UndeltaStore) Update(obj interface{}) error { + if err := u.Store.Update(obj); err != nil { + return err + } + u.PushFunc(u.Store.List()) + return nil +} + +// Delete removes an item from the cache and sends complete state by calling PushFunc. +func (u *UndeltaStore) Delete(obj interface{}) error { + if err := u.Store.Delete(obj); err != nil { + return err + } + u.PushFunc(u.Store.List()) + return nil +} + +// Replace will delete the contents of current store, using instead the given list. +// 'u' takes ownership of the list, you should not reference the list again +// after calling this function. +// The new contents complete state will be sent by calling PushFunc after replacement. +func (u *UndeltaStore) Replace(list []interface{}, resourceVersion string) error { + if err := u.Store.Replace(list, resourceVersion); err != nil { + return err + } + u.PushFunc(u.Store.List()) + return nil +} + +// NewUndeltaStore returns an UndeltaStore implemented with a Store. +func NewUndeltaStore(pushFunc func([]interface{}), keyFunc KeyFunc) *UndeltaStore { + return &UndeltaStore{ + Store: NewStore(keyFunc), + PushFunc: pushFunc, + } +} diff --git a/constraint/vendor/k8s.io/client-go/tools/pager/pager.go b/constraint/vendor/k8s.io/client-go/tools/pager/pager.go new file mode 100644 index 000000000..805859e09 --- /dev/null +++ b/constraint/vendor/k8s.io/client-go/tools/pager/pager.go @@ -0,0 +1,250 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package pager + +import ( + "context" + "fmt" + + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metainternalversion "k8s.io/apimachinery/pkg/apis/meta/internalversion" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" +) + +const defaultPageSize = 500 +const defaultPageBufferSize = 10 + +// ListPageFunc returns a list object for the given list options. +type ListPageFunc func(ctx context.Context, opts metav1.ListOptions) (runtime.Object, error) + +// SimplePageFunc adapts a context-less list function into one that accepts a context. +func SimplePageFunc(fn func(opts metav1.ListOptions) (runtime.Object, error)) ListPageFunc { + return func(ctx context.Context, opts metav1.ListOptions) (runtime.Object, error) { + return fn(opts) + } +} + +// ListPager assists client code in breaking large list queries into multiple +// smaller chunks of PageSize or smaller. PageFn is expected to accept a +// metav1.ListOptions that supports paging and return a list. The pager does +// not alter the field or label selectors on the initial options list. +type ListPager struct { + PageSize int64 + PageFn ListPageFunc + + FullListIfExpired bool + + // Number of pages to buffer + PageBufferSize int32 +} + +// New creates a new pager from the provided pager function using the default +// options. It will fall back to a full list if an expiration error is encountered +// as a last resort. +func New(fn ListPageFunc) *ListPager { + return &ListPager{ + PageSize: defaultPageSize, + PageFn: fn, + FullListIfExpired: true, + PageBufferSize: defaultPageBufferSize, + } +} + +// TODO: introduce other types of paging functions - such as those that retrieve from a list +// of namespaces. + +// List returns a single list object, but attempts to retrieve smaller chunks from the +// server to reduce the impact on the server. If the chunk attempt fails, it will load +// the full list instead. The Limit field on options, if unset, will default to the page size. +func (p *ListPager) List(ctx context.Context, options metav1.ListOptions) (runtime.Object, bool, error) { + if options.Limit == 0 { + options.Limit = p.PageSize + } + requestedResourceVersion := options.ResourceVersion + requestedResourceVersionMatch := options.ResourceVersionMatch + var list *metainternalversion.List + paginatedResult := false + + for { + select { + case <-ctx.Done(): + return nil, paginatedResult, ctx.Err() + default: + } + + obj, err := p.PageFn(ctx, options) + if err != nil { + // Only fallback to full list if an "Expired" errors is returned, FullListIfExpired is true, and + // the "Expired" error occurred in page 2 or later (since full list is intended to prevent a pager.List from + // failing when the resource versions is established by the first page request falls out of the compaction + // during the subsequent list requests). + if !errors.IsResourceExpired(err) || !p.FullListIfExpired || options.Continue == "" { + return nil, paginatedResult, err + } + // the list expired while we were processing, fall back to a full list at + // the requested ResourceVersion. + options.Limit = 0 + options.Continue = "" + options.ResourceVersion = requestedResourceVersion + options.ResourceVersionMatch = requestedResourceVersionMatch + result, err := p.PageFn(ctx, options) + return result, paginatedResult, err + } + m, err := meta.ListAccessor(obj) + if err != nil { + return nil, paginatedResult, fmt.Errorf("returned object must be a list: %v", err) + } + + // exit early and return the object we got if we haven't processed any pages + if len(m.GetContinue()) == 0 && list == nil { + return obj, paginatedResult, nil + } + + // initialize the list and fill its contents + if list == nil { + list = &metainternalversion.List{Items: make([]runtime.Object, 0, options.Limit+1)} + list.ResourceVersion = m.GetResourceVersion() + list.SelfLink = m.GetSelfLink() + } + if err := meta.EachListItem(obj, func(obj runtime.Object) error { + list.Items = append(list.Items, obj) + return nil + }); err != nil { + return nil, paginatedResult, err + } + + // if we have no more items, return the list + if len(m.GetContinue()) == 0 { + return list, paginatedResult, nil + } + + // set the next loop up + options.Continue = m.GetContinue() + // Clear the ResourceVersion(Match) on the subsequent List calls to avoid the + // `specifying resource version is not allowed when using continue` error. + // See https://github.com/kubernetes/kubernetes/issues/85221#issuecomment-553748143. + options.ResourceVersion = "" + options.ResourceVersionMatch = "" + // At this point, result is already paginated. + paginatedResult = true + } +} + +// EachListItem fetches runtime.Object items using this ListPager and invokes fn on each item. If +// fn returns an error, processing stops and that error is returned. If fn does not return an error, +// any error encountered while retrieving the list from the server is returned. If the context +// cancels or times out, the context error is returned. Since the list is retrieved in paginated +// chunks, an "Expired" error (metav1.StatusReasonExpired) may be returned if the pagination list +// requests exceed the expiration limit of the apiserver being called. +// +// Items are retrieved in chunks from the server to reduce the impact on the server with up to +// ListPager.PageBufferSize chunks buffered concurrently in the background. +func (p *ListPager) EachListItem(ctx context.Context, options metav1.ListOptions, fn func(obj runtime.Object) error) error { + return p.eachListChunkBuffered(ctx, options, func(obj runtime.Object) error { + return meta.EachListItem(obj, fn) + }) +} + +// eachListChunkBuffered fetches runtimeObject list chunks using this ListPager and invokes fn on +// each list chunk. If fn returns an error, processing stops and that error is returned. If fn does +// not return an error, any error encountered while retrieving the list from the server is +// returned. If the context cancels or times out, the context error is returned. Since the list is +// retrieved in paginated chunks, an "Expired" error (metav1.StatusReasonExpired) may be returned if +// the pagination list requests exceed the expiration limit of the apiserver being called. +// +// Up to ListPager.PageBufferSize chunks are buffered concurrently in the background. +func (p *ListPager) eachListChunkBuffered(ctx context.Context, options metav1.ListOptions, fn func(obj runtime.Object) error) error { + if p.PageBufferSize < 0 { + return fmt.Errorf("ListPager.PageBufferSize must be >= 0, got %d", p.PageBufferSize) + } + + // Ensure background goroutine is stopped if this call exits before all list items are + // processed. Cancelation error from this deferred cancel call is never returned to caller; + // either the list result has already been sent to bgResultC or the fn error is returned and + // the cancelation error is discarded. + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + chunkC := make(chan runtime.Object, p.PageBufferSize) + bgResultC := make(chan error, 1) + go func() { + defer utilruntime.HandleCrash() + + var err error + defer func() { + close(chunkC) + bgResultC <- err + }() + err = p.eachListChunk(ctx, options, func(chunk runtime.Object) error { + select { + case chunkC <- chunk: // buffer the chunk, this can block + case <-ctx.Done(): + return ctx.Err() + } + return nil + }) + }() + + for o := range chunkC { + err := fn(o) + if err != nil { + return err // any fn error should be returned immediately + } + } + // promote the results of our background goroutine to the foreground + return <-bgResultC +} + +// eachListChunk fetches runtimeObject list chunks using this ListPager and invokes fn on each list +// chunk. If fn returns an error, processing stops and that error is returned. If fn does not return +// an error, any error encountered while retrieving the list from the server is returned. If the +// context cancels or times out, the context error is returned. Since the list is retrieved in +// paginated chunks, an "Expired" error (metav1.StatusReasonExpired) may be returned if the +// pagination list requests exceed the expiration limit of the apiserver being called. +func (p *ListPager) eachListChunk(ctx context.Context, options metav1.ListOptions, fn func(obj runtime.Object) error) error { + if options.Limit == 0 { + options.Limit = p.PageSize + } + for { + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + + obj, err := p.PageFn(ctx, options) + if err != nil { + return err + } + m, err := meta.ListAccessor(obj) + if err != nil { + return fmt.Errorf("returned object must be a list: %v", err) + } + if err := fn(obj); err != nil { + return err + } + // if we have no more items, return. + if len(m.GetContinue()) == 0 { + return nil + } + // set the next loop up + options.Continue = m.GetContinue() + } +} diff --git a/constraint/vendor/k8s.io/utils/buffer/ring_growing.go b/constraint/vendor/k8s.io/utils/buffer/ring_growing.go new file mode 100644 index 000000000..86965a513 --- /dev/null +++ b/constraint/vendor/k8s.io/utils/buffer/ring_growing.go @@ -0,0 +1,72 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package buffer + +// RingGrowing is a growing ring buffer. +// Not thread safe. +type RingGrowing struct { + data []interface{} + n int // Size of Data + beg int // First available element + readable int // Number of data items available +} + +// NewRingGrowing constructs a new RingGrowing instance with provided parameters. +func NewRingGrowing(initialSize int) *RingGrowing { + return &RingGrowing{ + data: make([]interface{}, initialSize), + n: initialSize, + } +} + +// ReadOne reads (consumes) first item from the buffer if it is available, otherwise returns false. +func (r *RingGrowing) ReadOne() (data interface{}, ok bool) { + if r.readable == 0 { + return nil, false + } + r.readable-- + element := r.data[r.beg] + r.data[r.beg] = nil // Remove reference to the object to help GC + if r.beg == r.n-1 { + // Was the last element + r.beg = 0 + } else { + r.beg++ + } + return element, true +} + +// WriteOne adds an item to the end of the buffer, growing it if it is full. +func (r *RingGrowing) WriteOne(data interface{}) { + if r.readable == r.n { + // Time to grow + newN := r.n * 2 + newData := make([]interface{}, newN) + to := r.beg + r.readable + if to <= r.n { + copy(newData, r.data[r.beg:to]) + } else { + copied := copy(newData, r.data[r.beg:]) + copy(newData[copied:], r.data[:(to%r.n)]) + } + r.beg = 0 + r.data = newData + r.n = newN + } + r.data[(r.readable+r.beg)%r.n] = data + r.readable++ +} diff --git a/constraint/vendor/modules.txt b/constraint/vendor/modules.txt index b69934c26..2d3c18282 100644 --- a/constraint/vendor/modules.txt +++ b/constraint/vendor/modules.txt @@ -38,6 +38,9 @@ github.com/evanphx/json-patch # github.com/felixge/httpsnoop v1.0.2 ## explicit; go 1.13 github.com/felixge/httpsnoop +# github.com/fsnotify/fsnotify v1.5.4 +## explicit; go 1.16 +github.com/fsnotify/fsnotify # github.com/ghodss/yaml v1.0.0 ## explicit github.com/ghodss/yaml @@ -585,6 +588,8 @@ k8s.io/apimachinery/pkg/runtime/serializer/streaming k8s.io/apimachinery/pkg/runtime/serializer/versioning k8s.io/apimachinery/pkg/selection k8s.io/apimachinery/pkg/types +k8s.io/apimachinery/pkg/util/cache +k8s.io/apimachinery/pkg/util/diff k8s.io/apimachinery/pkg/util/errors k8s.io/apimachinery/pkg/util/framer k8s.io/apimachinery/pkg/util/intstr @@ -639,11 +644,13 @@ k8s.io/client-go/rest k8s.io/client-go/rest/watch k8s.io/client-go/restmapper k8s.io/client-go/tools/auth +k8s.io/client-go/tools/cache k8s.io/client-go/tools/clientcmd k8s.io/client-go/tools/clientcmd/api k8s.io/client-go/tools/clientcmd/api/latest k8s.io/client-go/tools/clientcmd/api/v1 k8s.io/client-go/tools/metrics +k8s.io/client-go/tools/pager k8s.io/client-go/transport k8s.io/client-go/util/cert k8s.io/client-go/util/connrotation @@ -683,6 +690,7 @@ k8s.io/kube-openapi/pkg/validation/strfmt/bson k8s.io/kube-openapi/pkg/validation/validate # k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9 ## explicit; go 1.12 +k8s.io/utils/buffer k8s.io/utils/clock k8s.io/utils/clock/testing k8s.io/utils/integer @@ -700,6 +708,8 @@ sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/client sigs.k8s.io/apiserver-network-proxy/konnectivity-client/proto/client # sigs.k8s.io/controller-runtime v0.12.1 ## explicit; go 1.17 +sigs.k8s.io/controller-runtime/pkg/certwatcher +sigs.k8s.io/controller-runtime/pkg/certwatcher/metrics sigs.k8s.io/controller-runtime/pkg/client sigs.k8s.io/controller-runtime/pkg/client/apiutil sigs.k8s.io/controller-runtime/pkg/client/config @@ -713,6 +723,7 @@ sigs.k8s.io/controller-runtime/pkg/internal/testing/certs sigs.k8s.io/controller-runtime/pkg/internal/testing/controlplane sigs.k8s.io/controller-runtime/pkg/internal/testing/process sigs.k8s.io/controller-runtime/pkg/log +sigs.k8s.io/controller-runtime/pkg/metrics sigs.k8s.io/controller-runtime/pkg/scheme sigs.k8s.io/controller-runtime/pkg/webhook/conversion # sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2 diff --git a/constraint/vendor/sigs.k8s.io/controller-runtime/pkg/certwatcher/certwatcher.go b/constraint/vendor/sigs.k8s.io/controller-runtime/pkg/certwatcher/certwatcher.go new file mode 100644 index 000000000..1030013db --- /dev/null +++ b/constraint/vendor/sigs.k8s.io/controller-runtime/pkg/certwatcher/certwatcher.go @@ -0,0 +1,166 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package certwatcher + +import ( + "context" + "crypto/tls" + "sync" + + "github.com/fsnotify/fsnotify" + "sigs.k8s.io/controller-runtime/pkg/certwatcher/metrics" + logf "sigs.k8s.io/controller-runtime/pkg/internal/log" +) + +var log = logf.RuntimeLog.WithName("certwatcher") + +// CertWatcher watches certificate and key files for changes. When either file +// changes, it reads and parses both and calls an optional callback with the new +// certificate. +type CertWatcher struct { + sync.RWMutex + + currentCert *tls.Certificate + watcher *fsnotify.Watcher + + certPath string + keyPath string +} + +// New returns a new CertWatcher watching the given certificate and key. +func New(certPath, keyPath string) (*CertWatcher, error) { + var err error + + cw := &CertWatcher{ + certPath: certPath, + keyPath: keyPath, + } + + // Initial read of certificate and key. + if err := cw.ReadCertificate(); err != nil { + return nil, err + } + + cw.watcher, err = fsnotify.NewWatcher() + if err != nil { + return nil, err + } + + return cw, nil +} + +// GetCertificate fetches the currently loaded certificate, which may be nil. +func (cw *CertWatcher) GetCertificate(_ *tls.ClientHelloInfo) (*tls.Certificate, error) { + cw.RLock() + defer cw.RUnlock() + return cw.currentCert, nil +} + +// Start starts the watch on the certificate and key files. +func (cw *CertWatcher) Start(ctx context.Context) error { + files := []string{cw.certPath, cw.keyPath} + + for _, f := range files { + if err := cw.watcher.Add(f); err != nil { + return err + } + } + + go cw.Watch() + + log.Info("Starting certificate watcher") + + // Block until the context is done. + <-ctx.Done() + + return cw.watcher.Close() +} + +// Watch reads events from the watcher's channel and reacts to changes. +func (cw *CertWatcher) Watch() { + for { + select { + case event, ok := <-cw.watcher.Events: + // Channel is closed. + if !ok { + return + } + + cw.handleEvent(event) + + case err, ok := <-cw.watcher.Errors: + // Channel is closed. + if !ok { + return + } + + log.Error(err, "certificate watch error") + } + } +} + +// ReadCertificate reads the certificate and key files from disk, parses them, +// and updates the current certificate on the watcher. If a callback is set, it +// is invoked with the new certificate. +func (cw *CertWatcher) ReadCertificate() error { + metrics.ReadCertificateTotal.Inc() + cert, err := tls.LoadX509KeyPair(cw.certPath, cw.keyPath) + if err != nil { + metrics.ReadCertificateErrors.Inc() + return err + } + + cw.Lock() + cw.currentCert = &cert + cw.Unlock() + + log.Info("Updated current TLS certificate") + + return nil +} + +func (cw *CertWatcher) handleEvent(event fsnotify.Event) { + // Only care about events which may modify the contents of the file. + if !(isWrite(event) || isRemove(event) || isCreate(event)) { + return + } + + log.V(1).Info("certificate event", "event", event) + + // If the file was removed, re-add the watch. + if isRemove(event) { + if err := cw.watcher.Add(event.Name); err != nil { + log.Error(err, "error re-watching file") + } + } + + if err := cw.ReadCertificate(); err != nil { + log.Error(err, "error re-reading certificate") + } +} + +func isWrite(event fsnotify.Event) bool { + return event.Op&fsnotify.Write == fsnotify.Write +} + +func isCreate(event fsnotify.Event) bool { + return event.Op&fsnotify.Create == fsnotify.Create +} + +func isRemove(event fsnotify.Event) bool { + return event.Op&fsnotify.Remove == fsnotify.Remove +} diff --git a/constraint/vendor/sigs.k8s.io/controller-runtime/pkg/certwatcher/doc.go b/constraint/vendor/sigs.k8s.io/controller-runtime/pkg/certwatcher/doc.go new file mode 100644 index 000000000..40c2fc0bf --- /dev/null +++ b/constraint/vendor/sigs.k8s.io/controller-runtime/pkg/certwatcher/doc.go @@ -0,0 +1,23 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package certwatcher is a helper for reloading Certificates from disk to be used +with tls servers. It provides a helper func `GetCertificate` which can be +called from `tls.Config` and passed into your tls.Listener. For a detailed +example server view pkg/webhook/server.go. +*/ +package certwatcher diff --git a/constraint/vendor/sigs.k8s.io/controller-runtime/pkg/certwatcher/metrics/metrics.go b/constraint/vendor/sigs.k8s.io/controller-runtime/pkg/certwatcher/metrics/metrics.go new file mode 100644 index 000000000..05869eff0 --- /dev/null +++ b/constraint/vendor/sigs.k8s.io/controller-runtime/pkg/certwatcher/metrics/metrics.go @@ -0,0 +1,45 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "github.com/prometheus/client_golang/prometheus" + "sigs.k8s.io/controller-runtime/pkg/metrics" +) + +var ( + // ReadCertificateTotal is a prometheus counter metrics which holds the total + // number of certificate reads. + ReadCertificateTotal = prometheus.NewCounter(prometheus.CounterOpts{ + Name: "certwatcher_read_certificate_total", + Help: "Total number of certificate reads", + }) + + // ReadCertificateErrors is a prometheus counter metrics which holds the total + // number of errors from certificate read. + ReadCertificateErrors = prometheus.NewCounter(prometheus.CounterOpts{ + Name: "certwatcher_read_certificate_errors_total", + Help: "Total number of certificate read errors", + }) +) + +func init() { + metrics.Registry.MustRegister( + ReadCertificateTotal, + ReadCertificateErrors, + ) +} diff --git a/constraint/vendor/sigs.k8s.io/controller-runtime/pkg/metrics/client_go_adapter.go b/constraint/vendor/sigs.k8s.io/controller-runtime/pkg/metrics/client_go_adapter.go new file mode 100644 index 000000000..d32ce2534 --- /dev/null +++ b/constraint/vendor/sigs.k8s.io/controller-runtime/pkg/metrics/client_go_adapter.go @@ -0,0 +1,231 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "context" + "net/url" + "time" + + "github.com/prometheus/client_golang/prometheus" + reflectormetrics "k8s.io/client-go/tools/cache" + clientmetrics "k8s.io/client-go/tools/metrics" +) + +// this file contains setup logic to initialize the myriad of places +// that client-go registers metrics. We copy the names and formats +// from Kubernetes so that we match the core controllers. + +// Metrics subsystem and all of the keys used by the rest client. +const ( + RestClientSubsystem = "rest_client" + LatencyKey = "request_latency_seconds" + ResultKey = "requests_total" +) + +// Metrics subsystem and all keys used by the reflectors. +const ( + ReflectorSubsystem = "reflector" + ListsTotalKey = "lists_total" + ListsDurationKey = "list_duration_seconds" + ItemsPerListKey = "items_per_list" + WatchesTotalKey = "watches_total" + ShortWatchesTotalKey = "short_watches_total" + WatchDurationKey = "watch_duration_seconds" + ItemsPerWatchKey = "items_per_watch" + LastResourceVersionKey = "last_resource_version" +) + +var ( + // client metrics. + + // RequestLatency reports the request latency in seconds per verb/URL. + // Deprecated: This metric is deprecated for removal in a future release: using the URL as a + // dimension results in cardinality explosion for some consumers. It was deprecated upstream + // in k8s v1.14 and hidden in v1.17 via https://github.com/kubernetes/kubernetes/pull/83836. + // It is not registered by default. To register: + // import ( + // clientmetrics "k8s.io/client-go/tools/metrics" + // clmetrics "sigs.k8s.io/controller-runtime/metrics" + // ) + // + // func init() { + // clmetrics.Registry.MustRegister(clmetrics.RequestLatency) + // clientmetrics.Register(clientmetrics.RegisterOpts{ + // RequestLatency: clmetrics.LatencyAdapter + // }) + // } + RequestLatency = prometheus.NewHistogramVec(prometheus.HistogramOpts{ + Subsystem: RestClientSubsystem, + Name: LatencyKey, + Help: "Request latency in seconds. Broken down by verb and URL.", + Buckets: prometheus.ExponentialBuckets(0.001, 2, 10), + }, []string{"verb", "url"}) + + requestResult = prometheus.NewCounterVec(prometheus.CounterOpts{ + Subsystem: RestClientSubsystem, + Name: ResultKey, + Help: "Number of HTTP requests, partitioned by status code, method, and host.", + }, []string{"code", "method", "host"}) + + // reflector metrics. + + // TODO(directxman12): update these to be histograms once the metrics overhaul KEP + // PRs start landing. + + listsTotal = prometheus.NewCounterVec(prometheus.CounterOpts{ + Subsystem: ReflectorSubsystem, + Name: ListsTotalKey, + Help: "Total number of API lists done by the reflectors", + }, []string{"name"}) + + listsDuration = prometheus.NewSummaryVec(prometheus.SummaryOpts{ + Subsystem: ReflectorSubsystem, + Name: ListsDurationKey, + Help: "How long an API list takes to return and decode for the reflectors", + }, []string{"name"}) + + itemsPerList = prometheus.NewSummaryVec(prometheus.SummaryOpts{ + Subsystem: ReflectorSubsystem, + Name: ItemsPerListKey, + Help: "How many items an API list returns to the reflectors", + }, []string{"name"}) + + watchesTotal = prometheus.NewCounterVec(prometheus.CounterOpts{ + Subsystem: ReflectorSubsystem, + Name: WatchesTotalKey, + Help: "Total number of API watches done by the reflectors", + }, []string{"name"}) + + shortWatchesTotal = prometheus.NewCounterVec(prometheus.CounterOpts{ + Subsystem: ReflectorSubsystem, + Name: ShortWatchesTotalKey, + Help: "Total number of short API watches done by the reflectors", + }, []string{"name"}) + + watchDuration = prometheus.NewSummaryVec(prometheus.SummaryOpts{ + Subsystem: ReflectorSubsystem, + Name: WatchDurationKey, + Help: "How long an API watch takes to return and decode for the reflectors", + }, []string{"name"}) + + itemsPerWatch = prometheus.NewSummaryVec(prometheus.SummaryOpts{ + Subsystem: ReflectorSubsystem, + Name: ItemsPerWatchKey, + Help: "How many items an API watch returns to the reflectors", + }, []string{"name"}) + + lastResourceVersion = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Subsystem: ReflectorSubsystem, + Name: LastResourceVersionKey, + Help: "Last resource version seen for the reflectors", + }, []string{"name"}) +) + +func init() { + registerClientMetrics() + registerReflectorMetrics() +} + +// registerClientMetrics sets up the client latency metrics from client-go. +func registerClientMetrics() { + // register the metrics with our registry + Registry.MustRegister(requestResult) + + // register the metrics with client-go + clientmetrics.Register(clientmetrics.RegisterOpts{ + RequestResult: &resultAdapter{metric: requestResult}, + }) +} + +// registerReflectorMetrics sets up reflector (reconcile) loop metrics. +func registerReflectorMetrics() { + Registry.MustRegister(listsTotal) + Registry.MustRegister(listsDuration) + Registry.MustRegister(itemsPerList) + Registry.MustRegister(watchesTotal) + Registry.MustRegister(shortWatchesTotal) + Registry.MustRegister(watchDuration) + Registry.MustRegister(itemsPerWatch) + Registry.MustRegister(lastResourceVersion) + + reflectormetrics.SetReflectorMetricsProvider(reflectorMetricsProvider{}) +} + +// this section contains adapters, implementations, and other sundry organic, artisanally +// hand-crafted syntax trees required to convince client-go that it actually wants to let +// someone use its metrics. + +// Client metrics adapters (method #1 for client-go metrics), +// copied (more-or-less directly) from k8s.io/kubernetes setup code +// (which isn't anywhere in an easily-importable place). + +// LatencyAdapter implements LatencyMetric. +type LatencyAdapter struct { + metric *prometheus.HistogramVec +} + +// Observe increments the request latency metric for the given verb/URL. +func (l *LatencyAdapter) Observe(_ context.Context, verb string, u url.URL, latency time.Duration) { + l.metric.WithLabelValues(verb, u.String()).Observe(latency.Seconds()) +} + +type resultAdapter struct { + metric *prometheus.CounterVec +} + +func (r *resultAdapter) Increment(_ context.Context, code, method, host string) { + r.metric.WithLabelValues(code, method, host).Inc() +} + +// Reflector metrics provider (method #2 for client-go metrics), +// copied (more-or-less directly) from k8s.io/kubernetes setup code +// (which isn't anywhere in an easily-importable place). + +type reflectorMetricsProvider struct{} + +func (reflectorMetricsProvider) NewListsMetric(name string) reflectormetrics.CounterMetric { + return listsTotal.WithLabelValues(name) +} + +func (reflectorMetricsProvider) NewListDurationMetric(name string) reflectormetrics.SummaryMetric { + return listsDuration.WithLabelValues(name) +} + +func (reflectorMetricsProvider) NewItemsInListMetric(name string) reflectormetrics.SummaryMetric { + return itemsPerList.WithLabelValues(name) +} + +func (reflectorMetricsProvider) NewWatchesMetric(name string) reflectormetrics.CounterMetric { + return watchesTotal.WithLabelValues(name) +} + +func (reflectorMetricsProvider) NewShortWatchesMetric(name string) reflectormetrics.CounterMetric { + return shortWatchesTotal.WithLabelValues(name) +} + +func (reflectorMetricsProvider) NewWatchDurationMetric(name string) reflectormetrics.SummaryMetric { + return watchDuration.WithLabelValues(name) +} + +func (reflectorMetricsProvider) NewItemsInWatchMetric(name string) reflectormetrics.SummaryMetric { + return itemsPerWatch.WithLabelValues(name) +} + +func (reflectorMetricsProvider) NewLastResourceVersionMetric(name string) reflectormetrics.GaugeMetric { + return lastResourceVersion.WithLabelValues(name) +} diff --git a/constraint/vendor/sigs.k8s.io/controller-runtime/pkg/metrics/doc.go b/constraint/vendor/sigs.k8s.io/controller-runtime/pkg/metrics/doc.go new file mode 100644 index 000000000..6ed9df951 --- /dev/null +++ b/constraint/vendor/sigs.k8s.io/controller-runtime/pkg/metrics/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package metrics contains controller related metrics utilities +*/ +package metrics diff --git a/constraint/vendor/sigs.k8s.io/controller-runtime/pkg/metrics/listener.go b/constraint/vendor/sigs.k8s.io/controller-runtime/pkg/metrics/listener.go new file mode 100644 index 000000000..123d8c15f --- /dev/null +++ b/constraint/vendor/sigs.k8s.io/controller-runtime/pkg/metrics/listener.go @@ -0,0 +1,52 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "fmt" + "net" + + logf "sigs.k8s.io/controller-runtime/pkg/internal/log" +) + +var log = logf.RuntimeLog.WithName("metrics") + +// DefaultBindAddress sets the default bind address for the metrics listener +// The metrics is on by default. +var DefaultBindAddress = ":8080" + +// NewListener creates a new TCP listener bound to the given address. +func NewListener(addr string) (net.Listener, error) { + if addr == "" { + // If the metrics bind address is empty, default to ":8080" + addr = DefaultBindAddress + } + + // Add a case to disable metrics altogether + if addr == "0" { + return nil, nil + } + + log.Info("Metrics server is starting to listen", "addr", addr) + ln, err := net.Listen("tcp", addr) + if err != nil { + er := fmt.Errorf("error listening on %s: %w", addr, err) + log.Error(er, "metrics server failed to listen. You may want to disable the metrics server or use another port if it is due to conflicts") + return nil, er + } + return ln, nil +} diff --git a/constraint/vendor/sigs.k8s.io/controller-runtime/pkg/metrics/registry.go b/constraint/vendor/sigs.k8s.io/controller-runtime/pkg/metrics/registry.go new file mode 100644 index 000000000..ce17124d5 --- /dev/null +++ b/constraint/vendor/sigs.k8s.io/controller-runtime/pkg/metrics/registry.go @@ -0,0 +1,30 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import "github.com/prometheus/client_golang/prometheus" + +// RegistererGatherer combines both parts of the API of a Prometheus +// registry, both the Registerer and the Gatherer interfaces. +type RegistererGatherer interface { + prometheus.Registerer + prometheus.Gatherer +} + +// Registry is a prometheus registry for storing metrics within the +// controller-runtime. +var Registry RegistererGatherer = prometheus.NewRegistry() diff --git a/constraint/vendor/sigs.k8s.io/controller-runtime/pkg/metrics/workqueue.go b/constraint/vendor/sigs.k8s.io/controller-runtime/pkg/metrics/workqueue.go new file mode 100644 index 000000000..8ca47235d --- /dev/null +++ b/constraint/vendor/sigs.k8s.io/controller-runtime/pkg/metrics/workqueue.go @@ -0,0 +1,130 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "github.com/prometheus/client_golang/prometheus" + "k8s.io/client-go/util/workqueue" +) + +// This file is copied and adapted from k8s.io/kubernetes/pkg/util/workqueue/prometheus +// which registers metrics to the default prometheus Registry. We require very +// similar functionality, but must register metrics to a different Registry. + +// Metrics subsystem and all keys used by the workqueue. +const ( + WorkQueueSubsystem = "workqueue" + DepthKey = "depth" + AddsKey = "adds_total" + QueueLatencyKey = "queue_duration_seconds" + WorkDurationKey = "work_duration_seconds" + UnfinishedWorkKey = "unfinished_work_seconds" + LongestRunningProcessorKey = "longest_running_processor_seconds" + RetriesKey = "retries_total" +) + +var ( + depth = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Subsystem: WorkQueueSubsystem, + Name: DepthKey, + Help: "Current depth of workqueue", + }, []string{"name"}) + + adds = prometheus.NewCounterVec(prometheus.CounterOpts{ + Subsystem: WorkQueueSubsystem, + Name: AddsKey, + Help: "Total number of adds handled by workqueue", + }, []string{"name"}) + + latency = prometheus.NewHistogramVec(prometheus.HistogramOpts{ + Subsystem: WorkQueueSubsystem, + Name: QueueLatencyKey, + Help: "How long in seconds an item stays in workqueue before being requested", + Buckets: prometheus.ExponentialBuckets(10e-9, 10, 10), + }, []string{"name"}) + + workDuration = prometheus.NewHistogramVec(prometheus.HistogramOpts{ + Subsystem: WorkQueueSubsystem, + Name: WorkDurationKey, + Help: "How long in seconds processing an item from workqueue takes.", + Buckets: prometheus.ExponentialBuckets(10e-9, 10, 10), + }, []string{"name"}) + + unfinished = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Subsystem: WorkQueueSubsystem, + Name: UnfinishedWorkKey, + Help: "How many seconds of work has been done that " + + "is in progress and hasn't been observed by work_duration. Large " + + "values indicate stuck threads. One can deduce the number of stuck " + + "threads by observing the rate at which this increases.", + }, []string{"name"}) + + longestRunningProcessor = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Subsystem: WorkQueueSubsystem, + Name: LongestRunningProcessorKey, + Help: "How many seconds has the longest running " + + "processor for workqueue been running.", + }, []string{"name"}) + + retries = prometheus.NewCounterVec(prometheus.CounterOpts{ + Subsystem: WorkQueueSubsystem, + Name: RetriesKey, + Help: "Total number of retries handled by workqueue", + }, []string{"name"}) +) + +func init() { + Registry.MustRegister(depth) + Registry.MustRegister(adds) + Registry.MustRegister(latency) + Registry.MustRegister(workDuration) + Registry.MustRegister(unfinished) + Registry.MustRegister(longestRunningProcessor) + Registry.MustRegister(retries) + + workqueue.SetProvider(workqueueMetricsProvider{}) +} + +type workqueueMetricsProvider struct{} + +func (workqueueMetricsProvider) NewDepthMetric(name string) workqueue.GaugeMetric { + return depth.WithLabelValues(name) +} + +func (workqueueMetricsProvider) NewAddsMetric(name string) workqueue.CounterMetric { + return adds.WithLabelValues(name) +} + +func (workqueueMetricsProvider) NewLatencyMetric(name string) workqueue.HistogramMetric { + return latency.WithLabelValues(name) +} + +func (workqueueMetricsProvider) NewWorkDurationMetric(name string) workqueue.HistogramMetric { + return workDuration.WithLabelValues(name) +} + +func (workqueueMetricsProvider) NewUnfinishedWorkSecondsMetric(name string) workqueue.SettableGaugeMetric { + return unfinished.WithLabelValues(name) +} + +func (workqueueMetricsProvider) NewLongestRunningProcessorSecondsMetric(name string) workqueue.SettableGaugeMetric { + return longestRunningProcessor.WithLabelValues(name) +} + +func (workqueueMetricsProvider) NewRetriesMetric(name string) workqueue.CounterMetric { + return retries.WithLabelValues(name) +}