From a1a9b5f8817a78bcae035b0e1173075202dbd68f Mon Sep 17 00:00:00 2001 From: Sunjay Bhatia <5337253+sunjayBhatia@users.noreply.github.com> Date: Tue, 13 Oct 2020 23:03:58 -0400 Subject: [PATCH 01/17] Add Platform Specific Feature guidance to PR template (#13547) Signed-off-by: Sunjay Bhatia --- PULL_REQUESTS.md | 10 ++++++++++ PULL_REQUEST_TEMPLATE.md | 1 + 2 files changed, 11 insertions(+) diff --git a/PULL_REQUESTS.md b/PULL_REQUESTS.md index 9b3d5cd043ba..aa8c5e750247 100644 --- a/PULL_REQUESTS.md +++ b/PULL_REQUESTS.md @@ -70,6 +70,16 @@ current version. Please include any relevant links. Each release note should be relevant subsystem in **alphabetical order** (see existing examples as a guide) and include links to relevant parts of the documentation. Thank you! Please write in N/A if there are no release notes. +### Platform Specific Features + +If this change involves any platform specific features (e.g. utilizing OS-specific socket options) +or only implements new features for a limited set of platforms (e.g. Linux amd64 only), please +include an explanation that addresses the reasoning behind this. Please also open a new tracking +issue for each platform this change is not implemented on (and link them in the PR) to enable +maintainers and contributors to triage. Reviewers will look for the change to avoid +`#ifdef ` and rather prefer feature guards to not enable the change on a given platform +using the build system. + ### Runtime guard If this PR has a user-visible behavioral change, or otherwise falls under the diff --git a/PULL_REQUEST_TEMPLATE.md b/PULL_REQUEST_TEMPLATE.md index 5a1545aacd7a..366209eed929 100644 --- a/PULL_REQUEST_TEMPLATE.md +++ b/PULL_REQUEST_TEMPLATE.md @@ -18,6 +18,7 @@ Risk Level: Testing: Docs Changes: Release Notes: +Platform Specific Features: [Optional Runtime guard:] [Optional Fixes #Issue] [Optional Deprecated:] From bc5c44a1a842379daa6f810dc3841c71a513661b Mon Sep 17 00:00:00 2001 From: Zach Reyes <39203661+zasweq@users.noreply.github.com> Date: Tue, 13 Oct 2020 23:04:36 -0400 Subject: [PATCH 02/17] [fuzz] Added validation for secrets (#13543) Signed-off-by: Zach --- api/envoy/extensions/transport_sockets/tls/v3/secret.proto | 6 +++++- .../extensions/transport_sockets/tls/v4alpha/secret.proto | 3 ++- .../envoy/extensions/transport_sockets/tls/v3/secret.proto | 6 +++++- .../extensions/transport_sockets/tls/v4alpha/secret.proto | 3 ++- .../clusterfuzz-testcase-filter_fuzz_test-5681522444861440 | 7 +++++++ 5 files changed, 21 insertions(+), 4 deletions(-) create mode 100644 test/extensions/filters/http/common/fuzz/filter_corpus/clusterfuzz-testcase-filter_fuzz_test-5681522444861440 diff --git a/api/envoy/extensions/transport_sockets/tls/v3/secret.proto b/api/envoy/extensions/transport_sockets/tls/v3/secret.proto index 80c68a56f5ce..f25370c3c9f6 100644 --- a/api/envoy/extensions/transport_sockets/tls/v3/secret.proto +++ b/api/envoy/extensions/transport_sockets/tls/v3/secret.proto @@ -12,6 +12,7 @@ import "udpa/annotations/migrate.proto"; import "udpa/annotations/sensitive.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.transport_sockets.tls.v3"; option java_outer_classname = "SecretProto"; @@ -33,7 +34,10 @@ message SdsSecretConfig { // Name (FQDN, UUID, SPKI, SHA256, etc.) by which the secret can be uniquely referred to. // When both name and config are specified, then secret can be fetched and/or reloaded via // SDS. When only name is specified, then secret will be loaded from static resources. - string name = 1 [(udpa.annotations.field_migrate).oneof_promotion = "name_specifier"]; + string name = 1 [ + (validate.rules).string = {min_len: 1}, + (udpa.annotations.field_migrate).oneof_promotion = "name_specifier" + ]; // Resource locator for SDS. This is mutually exclusive to *name*. // [#not-implemented-hide:] diff --git a/api/envoy/extensions/transport_sockets/tls/v4alpha/secret.proto b/api/envoy/extensions/transport_sockets/tls/v4alpha/secret.proto index 11306f21415a..9848eaadef0b 100644 --- a/api/envoy/extensions/transport_sockets/tls/v4alpha/secret.proto +++ b/api/envoy/extensions/transport_sockets/tls/v4alpha/secret.proto @@ -11,6 +11,7 @@ import "udpa/core/v1/resource_locator.proto"; import "udpa/annotations/sensitive.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.transport_sockets.tls.v4alpha"; option java_outer_classname = "SecretProto"; @@ -35,7 +36,7 @@ message SdsSecretConfig { // Name (FQDN, UUID, SPKI, SHA256, etc.) by which the secret can be uniquely referred to. // When both name and config are specified, then secret can be fetched and/or reloaded via // SDS. When only name is specified, then secret will be loaded from static resources. - string name = 1; + string name = 1 [(validate.rules).string = {min_len: 1}]; // Resource locator for SDS. This is mutually exclusive to *name*. // [#not-implemented-hide:] diff --git a/generated_api_shadow/envoy/extensions/transport_sockets/tls/v3/secret.proto b/generated_api_shadow/envoy/extensions/transport_sockets/tls/v3/secret.proto index 80c68a56f5ce..f25370c3c9f6 100644 --- a/generated_api_shadow/envoy/extensions/transport_sockets/tls/v3/secret.proto +++ b/generated_api_shadow/envoy/extensions/transport_sockets/tls/v3/secret.proto @@ -12,6 +12,7 @@ import "udpa/annotations/migrate.proto"; import "udpa/annotations/sensitive.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.transport_sockets.tls.v3"; option java_outer_classname = "SecretProto"; @@ -33,7 +34,10 @@ message SdsSecretConfig { // Name (FQDN, UUID, SPKI, SHA256, etc.) by which the secret can be uniquely referred to. // When both name and config are specified, then secret can be fetched and/or reloaded via // SDS. When only name is specified, then secret will be loaded from static resources. - string name = 1 [(udpa.annotations.field_migrate).oneof_promotion = "name_specifier"]; + string name = 1 [ + (validate.rules).string = {min_len: 1}, + (udpa.annotations.field_migrate).oneof_promotion = "name_specifier" + ]; // Resource locator for SDS. This is mutually exclusive to *name*. // [#not-implemented-hide:] diff --git a/generated_api_shadow/envoy/extensions/transport_sockets/tls/v4alpha/secret.proto b/generated_api_shadow/envoy/extensions/transport_sockets/tls/v4alpha/secret.proto index 11306f21415a..9848eaadef0b 100644 --- a/generated_api_shadow/envoy/extensions/transport_sockets/tls/v4alpha/secret.proto +++ b/generated_api_shadow/envoy/extensions/transport_sockets/tls/v4alpha/secret.proto @@ -11,6 +11,7 @@ import "udpa/core/v1/resource_locator.proto"; import "udpa/annotations/sensitive.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.transport_sockets.tls.v4alpha"; option java_outer_classname = "SecretProto"; @@ -35,7 +36,7 @@ message SdsSecretConfig { // Name (FQDN, UUID, SPKI, SHA256, etc.) by which the secret can be uniquely referred to. // When both name and config are specified, then secret can be fetched and/or reloaded via // SDS. When only name is specified, then secret will be loaded from static resources. - string name = 1; + string name = 1 [(validate.rules).string = {min_len: 1}]; // Resource locator for SDS. This is mutually exclusive to *name*. // [#not-implemented-hide:] diff --git a/test/extensions/filters/http/common/fuzz/filter_corpus/clusterfuzz-testcase-filter_fuzz_test-5681522444861440 b/test/extensions/filters/http/common/fuzz/filter_corpus/clusterfuzz-testcase-filter_fuzz_test-5681522444861440 new file mode 100644 index 000000000000..60ffb84c5ac3 --- /dev/null +++ b/test/extensions/filters/http/common/fuzz/filter_corpus/clusterfuzz-testcase-filter_fuzz_test-5681522444861440 @@ -0,0 +1,7 @@ +config { + name: "envoy.filters.http.oauth" + typed_config { + type_url: "type.googleapis.com/envoy.extensions.filters.http.oauth2.v3alpha.OAuth2" + value: "\n\306\t\022\006\022\001(\032\001r\032<\n\035envoy.filters.\360\222\213\217Qgrpc_stats\022\r\022\013\022\002\010\006\"\005\010\200\200\200\001\032\014\022\n\n\001t\"\005\010\200\200\200\001\"\006\022\001(\032\001r*\005\n\003:\001=2\351\010\n\346\010*\343\010\n\010\n\006\010\200\200\200\200\004\022\326\010^^^^^j!^^.*..............................................*............................config {\n name: \"envoy.filters.http.jwt_authn\"\n typed....._config {\n type_url: \"type.googleapis.com/envoy.extensions.filters.http.jwt_authn.v3.JwtAu........[thentication\"\n value: \"\\n=\\n\\022not_health_check_f\\022\\\'\\032\\010\\n\\006\\n\\004\\177\\177\\177\\177B\\033envoyype/matcher/v3/number.\\n1\\n\\0A_]^06\\000\\000\\000\\000\\000\\002\\022\\\'\\032\\010\\n\\006\\n\\004\\177\\177\\177\\177B\\033envoyype/matche!^^.*..............................................*............................config {\n name: \"envoy.filters.http.jwt_authn\"\n typed....._config {\n type_url: \"type.googleapis.com/envoy.extensions.filters.http.jwt_authn.v3.JwtAu........[thentication\"\n value: \"\\n=\\n\\022not_health_check_f\\022\\\'\\032\\010\\n\\006\\n\\004\\177\\177\\177\\177B\\033envoyype/matcher/v3/number.\\n1\\n\\0A_]^06\\000\\000\\000\\000\\000\\002\\022\\\'\\032\\010\\n\\006\\n\\004\\177\\177\\177\\177B\\033envoyype/matcher/v3/number.\\n+\\n\\000\\022\\\'\\032\\010\\n\\006\\n\\004\\177\\177\\177\\177B\\r/v3/number.\\n+\\n\\000\\022\\\'\\032\\010\\n\\006\\n\\004\\177\\177\\177\\177B\\033envoyype/matcher/v3/number.\"\n }\n}\nB\003\n\001A" + } +} From 78dfea2cc80da8a89d868dc87a4b14f48d141500 Mon Sep 17 00:00:00 2001 From: Pengyuan Bian Date: Tue, 13 Oct 2020 20:25:58 -0700 Subject: [PATCH 03/17] [Wasm] Add cluster metadata fallback and upstream host metadata (#13477) Signed-off-by: Pengyuan Bian --- source/extensions/common/wasm/context.cc | 11 +++- .../filters/http/wasm/test_data/test_cpp.cc | 10 +++- .../filters/http/wasm/wasm_filter_test.cc | 50 +++++++++++++++++++ 3 files changed, 69 insertions(+), 2 deletions(-) diff --git a/source/extensions/common/wasm/context.cc b/source/extensions/common/wasm/context.cc index a064a229492f..e6e4f8ae0f05 100644 --- a/source/extensions/common/wasm/context.cc +++ b/source/extensions/common/wasm/context.cc @@ -403,7 +403,8 @@ WasmResult serializeValue(Filters::Common::Expr::CelValue value, std::string* re _f(METADATA) _f(REQUEST) _f(RESPONSE) _f(CONNECTION) _f(UPSTREAM) _f(NODE) _f(SOURCE) \ _f(DESTINATION) _f(LISTENER_DIRECTION) _f(LISTENER_METADATA) _f(CLUSTER_NAME) \ _f(CLUSTER_METADATA) _f(ROUTE_NAME) _f(ROUTE_METADATA) _f(PLUGIN_NAME) \ - _f(PLUGIN_ROOT_ID) _f(PLUGIN_VM_ID) _f(CONNECTION_ID) _f(FILTER_STATE) + _f(UPSTREAM_HOST_METADATA) _f(PLUGIN_ROOT_ID) _f(PLUGIN_VM_ID) _f(CONNECTION_ID) \ + _f(FILTER_STATE) static inline std::string downCase(std::string s) { std::transform(s.begin(), s.end(), s.begin(), [](unsigned char c) { return std::tolower(c); }); @@ -524,6 +525,14 @@ Context::findValue(absl::string_view name, Protobuf::Arena* arena, bool last) co case PropertyToken::CLUSTER_METADATA: if (info && info->upstreamHost()) { return CelValue::CreateMessage(&info->upstreamHost()->cluster().metadata(), arena); + } else if (info && info->upstreamClusterInfo().has_value() && + info->upstreamClusterInfo().value()) { + return CelValue::CreateMessage(&info->upstreamClusterInfo().value()->metadata(), arena); + } + break; + case PropertyToken::UPSTREAM_HOST_METADATA: + if (info && info->upstreamHost() && info->upstreamHost()->metadata()) { + return CelValue::CreateMessage(info->upstreamHost()->metadata().get(), arena); } break; case PropertyToken::ROUTE_NAME: diff --git a/test/extensions/filters/http/wasm/test_data/test_cpp.cc b/test/extensions/filters/http/wasm/test_data/test_cpp.cc index fe5d7722a6e3..3e009a3da8c3 100644 --- a/test/extensions/filters/http/wasm/test_data/test_cpp.cc +++ b/test/extensions/filters/http/wasm/test_data/test_cpp.cc @@ -328,6 +328,11 @@ void TestContext::onLog() { if (response_trailer && response_trailer->view() != "") { logWarn("response bogus-trailer found"); } + } else if (test == "cluster_metadata") { + std::string cluster_metadata; + if (getValue({"cluster_metadata", "filter_metadata", "namespace", "key"}, &cluster_metadata)) { + logWarn("cluster metadata: " + cluster_metadata); + } } else if (test == "property") { setFilterState("wasm_state", "wasm_value"); auto path = getRequestHeader(":path"); @@ -343,6 +348,10 @@ void TestContext::onLog() { if (getValue({"response", "code"}, &responseCode)) { logWarn("response.code: " + std::to_string(responseCode)); } + std::string upstream_host_metadata; + if (getValue({"upstream_host_metadata", "filter_metadata", "namespace", "key"}, &upstream_host_metadata)) { + logWarn("upstream host metadata: " + upstream_host_metadata); + } logWarn("state: " + getProperty({"wasm_state"}).value()->toString()); } else { logWarn("onLog " + std::to_string(id()) + " " + std::string(path->view())); @@ -541,7 +550,6 @@ void TestContext::onLog() { {{"source", "address"}, "127.0.0.1:0"}, {{"destination", "address"}, "127.0.0.2:0"}, {{"upstream", "address"}, "10.0.0.1:443"}, - {{"cluster_metadata"}, ""}, {{"route_metadata"}, ""}, }; for (const auto& property : properties) { diff --git a/test/extensions/filters/http/wasm/wasm_filter_test.cc b/test/extensions/filters/http/wasm/wasm_filter_test.cc index b001368763b0..538481f36f6c 100644 --- a/test/extensions/filters/http/wasm/wasm_filter_test.cc +++ b/test/extensions/filters/http/wasm/wasm_filter_test.cc @@ -1294,6 +1294,8 @@ TEST_P(WasmHttpFilterTest, Property) { log_(spdlog::level::warn, Eq(absl::string_view("metadata: wasm_request_get_value")))); EXPECT_CALL(filter(), log_(spdlog::level::warn, Eq(absl::string_view("response.code: 403")))); EXPECT_CALL(filter(), log_(spdlog::level::warn, Eq(absl::string_view("state: wasm_value")))); + EXPECT_CALL(filter(), + log_(spdlog::level::warn, Eq(absl::string_view("upstream host metadata: endpoint")))); root_context_->onTick(0); Http::TestRequestHeaderMapImpl request_headers{{":path", "/test_context"}}; @@ -1306,6 +1308,54 @@ TEST_P(WasmHttpFilterTest, Property) { EXPECT_CALL(encoder_callbacks_, connection()).WillRepeatedly(Return(&connection)); NiceMock route_entry; EXPECT_CALL(request_stream_info_, routeEntry()).WillRepeatedly(Return(&route_entry)); + std::shared_ptr> host_description( + new NiceMock()); + auto metadata = std::make_shared( + TestUtility::parseYaml( + R"EOF( + filter_metadata: + namespace: + key: endpoint + )EOF")); + EXPECT_CALL(*host_description, metadata()).WillRepeatedly(Return(metadata)); + EXPECT_CALL(request_stream_info_, upstreamHost()).WillRepeatedly(Return(host_description)); + filter().log(&request_headers, nullptr, nullptr, log_stream_info); +} + +TEST_P(WasmHttpFilterTest, ClusterMetadata) { + if (std::get<1>(GetParam()) == "rust") { + // TODO(PiotrSikora): test not yet implemented using Rust SDK. + return; + } + setupTest("", "cluster_metadata"); + setupFilter(); + EXPECT_CALL(filter(), + log_(spdlog::level::warn, Eq(absl::string_view("cluster metadata: cluster")))); + auto cluster = std::make_shared>(); + auto cluster_metadata = std::make_shared( + TestUtility::parseYaml( + R"EOF( + filter_metadata: + namespace: + key: cluster + )EOF")); + + std::shared_ptr> host_description( + new NiceMock()); + StreamInfo::MockStreamInfo log_stream_info; + Http::TestRequestHeaderMapImpl request_headers{{}}; + + EXPECT_CALL(encoder_callbacks_, streamInfo()).WillRepeatedly(ReturnRef(request_stream_info_)); + EXPECT_CALL(*cluster, metadata()).WillRepeatedly(ReturnRef(*cluster_metadata)); + EXPECT_CALL(*host_description, cluster()).WillRepeatedly(ReturnRef(*cluster)); + EXPECT_CALL(request_stream_info_, upstreamHost()).WillRepeatedly(Return(host_description)); + filter().log(&request_headers, nullptr, nullptr, log_stream_info); + + // If upstream host is empty, fallback to upstream cluster info for cluster metadata. + EXPECT_CALL(request_stream_info_, upstreamHost()).WillRepeatedly(Return(nullptr)); + EXPECT_CALL(request_stream_info_, upstreamClusterInfo()).WillRepeatedly(Return(cluster)); + EXPECT_CALL(filter(), + log_(spdlog::level::warn, Eq(absl::string_view("cluster metadata: cluster")))); filter().log(&request_headers, nullptr, nullptr, log_stream_info); } From 61d1f38e0bde7039370abbebf2ffdf3c0341a650 Mon Sep 17 00:00:00 2001 From: alyssawilk Date: Wed, 14 Oct 2020 09:26:26 -0400 Subject: [PATCH 04/17] check_format: adding 2 more release note checks (#13444) Making sure reloadable flags only get one set of backticks, making sure ref links aren't missing the : Risk Level: n/a (tooling) Testing: against last release's notes Docs Changes: n/a Release Notes: n/a Signed-off-by: Alyssa Wilk --- tools/code_format/check_format.py | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/tools/code_format/check_format.py b/tools/code_format/check_format.py index 5fd0fa59ded3..5f17d1468162 100755 --- a/tools/code_format/check_format.py +++ b/tools/code_format/check_format.py @@ -131,7 +131,8 @@ PROTO_VALIDATION_STRING = re.compile(r'\bmin_bytes\b') VERSION_HISTORY_NEW_LINE_REGEX = re.compile("\* ([a-z \-_]+): ([a-z:`]+)") VERSION_HISTORY_SECTION_NAME = re.compile("^[A-Z][A-Za-z ]*$") -RELOADABLE_FLAG_REGEX = re.compile(".*(.)(envoy.reloadable_features.[^ ]*)\s.*") +RELOADABLE_FLAG_REGEX = re.compile(".*(..)(envoy.reloadable_features.[^ ]*)\s.*") +INVALID_REFLINK = re.compile(".* ref:.*") # Check for punctuation in a terminal ref clause, e.g. # :ref:`panic mode. ` REF_WITH_PUNCTUATION_REGEX = re.compile(".*\. <[^<]*>`\s*") @@ -467,11 +468,16 @@ def reportError(message): next_word_to_check = '' # first word after : prior_line = '' + invalid_reflink_match = INVALID_REFLINK.match(line) + if invalid_reflink_match: + reportError("Found text \" ref:\". This should probably be \" :ref:\"\n%s" % line) + # make sure flags are surrounded by ``s flag_match = RELOADABLE_FLAG_REGEX.match(line) if flag_match: - if not flag_match.groups()[0].startswith('`'): - reportError("Flag `%s` should be enclosed in back ticks" % flag_match.groups()[1]) + if not flag_match.groups()[0].startswith(' `'): + reportError("Flag `%s` should be enclosed in a single set of back ticks" % + flag_match.groups()[1]) if line.startswith("* "): if not endsWithPeriod(prior_line): From 5607d1bdd693c145bad019763399817a4f80a3dc Mon Sep 17 00:00:00 2001 From: phlax Date: Wed, 14 Oct 2020 16:15:25 +0100 Subject: [PATCH 05/17] docs: Remove/make generic lyft references in docs (#13559) Signed-off-by: Ryan Northey --- docs/root/configuration/other_features/rate_limit.rst | 4 ++-- .../other_features/global_rate_limiting.rst | 2 +- docs/root/intro/arch_overview/other_protocols/dynamo.rst | 2 +- docs/root/intro/arch_overview/other_protocols/mongo.rst | 4 ++-- docs/root/intro/deployment_types/double_proxy.rst | 5 ++--- docs/root/intro/deployment_types/front_proxy.rst | 5 ++--- docs/root/intro/deployment_types/service_to_service.rst | 3 +-- docs/root/start/install/ref_configs.rst | 9 ++++----- 8 files changed, 15 insertions(+), 19 deletions(-) diff --git a/docs/root/configuration/other_features/rate_limit.rst b/docs/root/configuration/other_features/rate_limit.rst index d3503a899878..4fa374cb4a69 100644 --- a/docs/root/configuration/other_features/rate_limit.rst +++ b/docs/root/configuration/other_features/rate_limit.rst @@ -14,5 +14,5 @@ gRPC service IDL Envoy expects the rate limit service to support the gRPC IDL specified in :ref:`rls.proto `. See the IDL documentation -for more information on how the API works. See Lyft's reference implementation -`here `_. +for more information on how the API works. See Envoy's reference implementation +`here `_. diff --git a/docs/root/intro/arch_overview/other_features/global_rate_limiting.rst b/docs/root/intro/arch_overview/other_features/global_rate_limiting.rst index e8dbfbc2a2bb..538c5a7a1fe7 100644 --- a/docs/root/intro/arch_overview/other_features/global_rate_limiting.rst +++ b/docs/root/intro/arch_overview/other_features/global_rate_limiting.rst @@ -14,7 +14,7 @@ normally during typical request patterns but still prevent cascading failure whe to fail. Global rate limiting is a good solution for this case. Envoy integrates directly with a global gRPC rate limiting service. Although any service that -implements the defined RPC/IDL protocol can be used, Lyft provides a `reference implementation `_ +implements the defined RPC/IDL protocol can be used, Envoy provides a `reference implementation `_ written in Go which uses a Redis backend. Envoy’s rate limit integration has the following features: * **Network level rate limit filter**: Envoy will call the rate limit service for every new diff --git a/docs/root/intro/arch_overview/other_protocols/dynamo.rst b/docs/root/intro/arch_overview/other_protocols/dynamo.rst index d757fe5aa42d..8fa20b25cc27 100644 --- a/docs/root/intro/arch_overview/other_protocols/dynamo.rst +++ b/docs/root/intro/arch_overview/other_protocols/dynamo.rst @@ -12,7 +12,7 @@ Envoy supports an HTTP level DynamoDB sniffing filter with the following feature * Batch operation partial failure statistics. The DynamoDB filter is a good example of Envoy’s extensibility and core abstractions at the HTTP -layer. At Lyft we use this filter for all application communication with DynamoDB. It provides an +layer, and can be used to filter all application communication with DynamoDB. It provides an invaluable source of data agnostic to the application platform and specific AWS SDK in use. DynamoDB filter :ref:`configuration `. diff --git a/docs/root/intro/arch_overview/other_protocols/mongo.rst b/docs/root/intro/arch_overview/other_protocols/mongo.rst index 6ae713ea2087..e5ccf0a49d9f 100644 --- a/docs/root/intro/arch_overview/other_protocols/mongo.rst +++ b/docs/root/intro/arch_overview/other_protocols/mongo.rst @@ -12,8 +12,8 @@ Envoy supports a network level MongoDB sniffing filter with the following featur * Per callsite statistics via the $comment query parameter. * Fault injection. -The MongoDB filter is a good example of Envoy’s extensibility and core abstractions. At Lyft we use -this filter between all applications and our databases. It provides an invaluable source of data +The MongoDB filter is a good example of Envoy’s extensibility and core abstractions, and can be used +to filter between all applications and MongoDB databases. It provides an invaluable source of data that is agnostic to the application platform and specific MongoDB driver in use. MongoDB proxy filter :ref:`configuration reference `. diff --git a/docs/root/intro/deployment_types/double_proxy.rst b/docs/root/intro/deployment_types/double_proxy.rst index fd2757747bf4..ccf622eb63ac 100644 --- a/docs/root/intro/deployment_types/double_proxy.rst +++ b/docs/root/intro/deployment_types/double_proxy.rst @@ -21,6 +21,5 @@ ordinarily would not be trustable (such as the x-forwarded-for HTTP header). Configuration template ^^^^^^^^^^^^^^^^^^^^^^ -The source distribution includes an example double proxy configuration that is very similar to -the version that Lyft runs in production. See :ref:`here ` for more -information. +The source distribution includes an example double proxy configuration. See +:ref:`here ` for more information. diff --git a/docs/root/intro/deployment_types/front_proxy.rst b/docs/root/intro/deployment_types/front_proxy.rst index f89e8cb17da5..d7e6494b9e79 100644 --- a/docs/root/intro/deployment_types/front_proxy.rst +++ b/docs/root/intro/deployment_types/front_proxy.rst @@ -21,6 +21,5 @@ reverse proxy provides the following features: Configuration template ^^^^^^^^^^^^^^^^^^^^^^ -The source distribution includes an example front proxy configuration that is very similar to -the version that Lyft runs in production. See :ref:`here ` for more -information. +The source distribution includes an example front proxy configuration. See +:ref:`here ` for more information. diff --git a/docs/root/intro/deployment_types/service_to_service.rst b/docs/root/intro/deployment_types/service_to_service.rst index a4200a607ab3..7fb891f900ac 100644 --- a/docs/root/intro/deployment_types/service_to_service.rst +++ b/docs/root/intro/deployment_types/service_to_service.rst @@ -64,5 +64,4 @@ load balancing, statistics gathering, etc. Configuration template ^^^^^^^^^^^^^^^^^^^^^^ -The source distribution includes :ref:`an example service-to-service configuration` -that is very similar to the version that Lyft runs in production. +The source distribution includes :ref:`an example service-to-service configuration`. diff --git a/docs/root/start/install/ref_configs.rst b/docs/root/start/install/ref_configs.rst index 5981498beff3..7aebc814da96 100644 --- a/docs/root/start/install/ref_configs.rst +++ b/docs/root/start/install/ref_configs.rst @@ -17,11 +17,10 @@ see the :ref:`configuration reference `. Configuration generator ----------------------- -Envoy configurations can become relatively complicated. At Lyft we use `jinja -`_ templating to make the configurations easier to create and manage. The -source distribution includes a version of the configuration generator that loosely approximates what -we use at Lyft. We have also included three example configuration templates for each of the above -three scenarios. +Envoy configurations can become relatively complicated. The +source distribution includes a version of the configuration generator that uses `jinja +`_ templating to make the configurations easier to create and manage. We +have also included three example configuration templates for each of the above three scenarios. * Generator script: :repo:`configs/configgen.py` * Service to service template: :repo:`configs/envoy_service_to_service_v2.template.yaml` From b7a47564ca2ee6844f6344a4f8152d40c052da86 Mon Sep 17 00:00:00 2001 From: Lizan Zhou Date: Wed, 14 Oct 2020 08:16:42 -0700 Subject: [PATCH 06/17] ci: use azp for api and go-control-plane sync (#13550) Signed-off-by: Lizan Zhou --- .azure-pipelines/pipelines.yml | 48 +++++++++++++++++++++++++++++++ .circleci/config.yml | 43 --------------------------- ci/api_mirror.sh | 13 ++++----- ci/do_ci.sh | 14 +++++---- ci/go_mirror.sh | 10 +++++-- ci/run_envoy_docker.sh | 1 + tools/api/generate_go_protobuf.py | 12 ++++---- tools/api_boost/api_boost.py | 1 - 8 files changed, 77 insertions(+), 65 deletions(-) delete mode 100644 .circleci/config.yml diff --git a/.azure-pipelines/pipelines.yml b/.azure-pipelines/pipelines.yml index 31fb5e06fae0..f8c5ce972174 100644 --- a/.azure-pipelines/pipelines.yml +++ b/.azure-pipelines/pipelines.yml @@ -118,6 +118,8 @@ jobs: - job: filter_example displayName: "filter-example sync" dependsOn: [] + pool: + vmImage: "ubuntu-18.04" condition: and(succeeded(), eq(variables['PostSubmit'], true)) steps: - task: InstallSSHKey@0 @@ -133,6 +135,50 @@ jobs: env: AZP_BRANCH: $(Build.SourceBranch) + - job: api + displayName: "data-plane-api sync" + dependsOn: [] + condition: and(succeeded(), eq(variables['PostSubmit'], true)) + pool: + vmImage: "ubuntu-18.04" + steps: + - task: InstallSSHKey@0 + inputs: + hostName: "github.com ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAq2A7hRGmdnm9tUDbO9IDSwBK6TbQa+PXYPCPy6rbTrTtw7PHkccKrpp0yVhp5HdEIcKr6pLlVDBfOLX9QUsyCOV0wzfjIJNlGEYsdlLJizHhbn2mUjvSAHQqZETYP81eFzLQNnPHt4EVVUh7VfDESU84KezmD5QlWpXLmvU31/yMf+Se8xhHTvKSCZIFImWwoG6mbUoWf9nzpIoaSjB+weqqUUmpaaasXVal72J+UX2B+2RPW3RcT0eOzQgqlJL3RKrTJvdsjE3JEAvGq3lGHSZXy28G3skua2SmVi/w4yCE6gbODqnTWlg7+wC604ydGXA8VJiS5ap43JXiUFFAaQ==" + sshPublicKey: "$(DataPlaneApiPublicKey)" + sshPassphrase: "$(SshDeployKeyPassphrase)" + sshKeySecureFile: "$(DataPlaneApiPrivateKey)" + + - bash: ci/api_mirror.sh + displayName: "Sync data-plane-api" + workingDirectory: $(Build.SourcesDirectory) + env: + AZP_BRANCH: $(Build.SourceBranch) + + - job: go_control_plane + displayName: "go-control-plane sync" + dependsOn: [] + condition: and(succeeded(), eq(variables['PostSubmit'], true)) + steps: + - task: InstallSSHKey@0 + inputs: + hostName: "github.com ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAq2A7hRGmdnm9tUDbO9IDSwBK6TbQa+PXYPCPy6rbTrTtw7PHkccKrpp0yVhp5HdEIcKr6pLlVDBfOLX9QUsyCOV0wzfjIJNlGEYsdlLJizHhbn2mUjvSAHQqZETYP81eFzLQNnPHt4EVVUh7VfDESU84KezmD5QlWpXLmvU31/yMf+Se8xhHTvKSCZIFImWwoG6mbUoWf9nzpIoaSjB+weqqUUmpaaasXVal72J+UX2B+2RPW3RcT0eOzQgqlJL3RKrTJvdsjE3JEAvGq3lGHSZXy28G3skua2SmVi/w4yCE6gbODqnTWlg7+wC604ydGXA8VJiS5ap43JXiUFFAaQ==" + sshPublicKey: "$(GoControlPlanePublicKey)" + sshPassphrase: "$(SshDeployKeyPassphrase)" + sshKeySecureFile: "$(GoControlPlanePrivateKey)" + + - bash: | + cp -a ~/.ssh $(Build.StagingDirectory)/ + ci/run_envoy_docker.sh 'ci/go_mirror.sh' + displayName: "Sync go-control-plane" + workingDirectory: $(Build.SourcesDirectory) + env: + ENVOY_DOCKER_BUILD_DIR: $(Build.StagingDirectory) + BAZEL_REMOTE_CACHE: grpcs://remotebuildexecution.googleapis.com + BAZEL_REMOTE_INSTANCE: projects/envoy-ci/instances/default_instance + GCP_SERVICE_ACCOUNT_KEY: $(GcpServiceAccountKey) + AZP_BRANCH: $(Build.SourceBranch) + - job: bazel displayName: "Linux-x64" dependsOn: ["release"] @@ -141,6 +187,8 @@ jobs: strategy: maxParallel: 3 matrix: + api: + CI_TARGET: "bazel.api" gcc: CI_TARGET: "bazel.gcc" clang_tidy: diff --git a/.circleci/config.yml b/.circleci/config.yml deleted file mode 100644 index e04a9737d8f9..000000000000 --- a/.circleci/config.yml +++ /dev/null @@ -1,43 +0,0 @@ -version: 2.1 - -executors: - ubuntu-build: - description: "A regular build executor based on ubuntu image" - docker: - # NOTE: Update this from https://github.com/envoyproxy/envoy-build-tools/blob/master/toolchains/rbe_toolchains_config.bzl#L8 - - image: envoyproxy/envoy-build-ubuntu:b480535e8423b5fd7c102fd30c92f4785519e33a - resource_class: xlarge - working_directory: /source - -jobs: - api: - executor: ubuntu-build - steps: - - run: rm -rf /home/circleci/project/.git # CircleCI git caching is likely broken - - checkout - - run: ci/do_circle_ci.sh bazel.api - - add_ssh_keys: - fingerprints: - - "fb:f3:fe:be:1c:b2:ec:b6:25:f9:7b:a6:87:54:02:8c" - - run: ci/api_mirror.sh - - store_artifacts: - path: /build/envoy/generated - destination: / - - go_control_plane_mirror: - executor: ubuntu-build - steps: - - run: rm -rf /home/circleci/project/.git # CircleCI git caching is likely broken - - checkout - - run: ci/do_circle_ci.sh bazel.api - - add_ssh_keys: - fingerprints: - - "9d:3b:fe:7c:09:3b:ce:a9:6a:de:de:41:fb:6b:52:62" - - run: ci/go_mirror.sh - -workflows: - version: 2 - all: - jobs: - - api - - go_control_plane_mirror diff --git a/ci/api_mirror.sh b/ci/api_mirror.sh index 077cdd1d3cfe..03e8ab85d80c 100755 --- a/ci/api_mirror.sh +++ b/ci/api_mirror.sh @@ -3,16 +3,15 @@ set -e CHECKOUT_DIR=../data-plane-api +MAIN_BRANCH="refs/heads/master" +API_MAIN_BRANCH="master" -if [ -z "$CIRCLE_PULL_REQUEST" ] && [ "$CIRCLE_BRANCH" == "master" ] -then +if [[ "${AZP_BRANCH}" == "${MAIN_BRANCH}" ]]; then echo "Cloning..." - git clone git@github.com:envoyproxy/data-plane-api "$CHECKOUT_DIR" + git clone git@github.com:envoyproxy/data-plane-api "$CHECKOUT_DIR" -b "${API_MAIN_BRANCH}" - git -C "$CHECKOUT_DIR" config user.name "data-plane-api(CircleCI)" + git -C "$CHECKOUT_DIR" config user.name "data-plane-api(Azure Pipelines)" git -C "$CHECKOUT_DIR" config user.email data-plane-api@users.noreply.github.com - git -C "$CHECKOUT_DIR" fetch - git -C "$CHECKOUT_DIR" checkout -B master origin/master # Determine last envoyproxy/envoy SHA in envoyproxy/data-plane-api MIRROR_MSG="Mirrored from https://github.com/envoyproxy/envoy" @@ -40,6 +39,6 @@ then done echo "Pushing..." - git -C "$CHECKOUT_DIR" push origin master + git -C "$CHECKOUT_DIR" push origin "${API_MAIN_BRANCH}" echo "Done" fi diff --git a/ci/do_ci.sh b/ci/do_ci.sh index 4e908fd6aab4..f3958aeaedf6 100755 --- a/ci/do_ci.sh +++ b/ci/do_ci.sh @@ -312,19 +312,21 @@ elif [[ "$CI_TARGET" == "bazel.compile_time_options" ]]; then collect_build_profile build exit 0 elif [[ "$CI_TARGET" == "bazel.api" ]]; then + # Use libstdc++ because the API booster links to prebuilt libclang*/libLLVM* installed in /opt/llvm/lib, + # which is built with libstdc++. Using libstdc++ for whole of the API CI job to avoid unnecessary rebuild. + ENVOY_STDLIB="libstdc++" setup_clang_toolchain + export LLVM_CONFIG="${LLVM_ROOT}"/bin/llvm-config echo "Validating API structure..." ./tools/api/validate_structure.py + echo "Testing API and API Boosting..." + bazel_with_collection test "${BAZEL_BUILD_OPTIONS[@]}" -c fastbuild @envoy_api_canonical//test/... @envoy_api_canonical//tools/... \ + @envoy_api_canonical//tools:tap2pcap_test @envoy_dev//clang_tools/api_booster/... echo "Building API..." bazel build "${BAZEL_BUILD_OPTIONS[@]}" -c fastbuild @envoy_api_canonical//envoy/... - echo "Testing API..." - bazel_with_collection test "${BAZEL_BUILD_OPTIONS[@]}" -c fastbuild @envoy_api_canonical//test/... @envoy_api_canonical//tools/... \ - @envoy_api_canonical//tools:tap2pcap_test - echo "Testing API boosting (unit tests)..." - bazel_with_collection test "${BAZEL_BUILD_OPTIONS[@]}" -c fastbuild @envoy_dev//clang_tools/api_booster/... echo "Testing API boosting (golden C++ tests)..." # We use custom BAZEL_BUILD_OPTIONS here; the API booster isn't capable of working with libc++ yet. - LLVM_CONFIG="${LLVM_ROOT}"/bin/llvm-config BAZEL_BUILD_OPTIONS="--config=clang" python3.8 ./tools/api_boost/api_boost_test.py + BAZEL_BUILD_OPTIONS="${BAZEL_BUILD_OPTIONS[*]}" python3.8 ./tools/api_boost/api_boost_test.py exit 0 elif [[ "$CI_TARGET" == "bazel.coverage" || "$CI_TARGET" == "bazel.fuzz_coverage" ]]; then setup_clang_toolchain diff --git a/ci/go_mirror.sh b/ci/go_mirror.sh index 80be4cc0b532..63f96d0d7969 100755 --- a/ci/go_mirror.sh +++ b/ci/go_mirror.sh @@ -2,7 +2,11 @@ set -e -if [ -z "$CIRCLE_PULL_REQUEST" ] && [ "$CIRCLE_BRANCH" == "master" ] -then - tools/api/generate_go_protobuf.py +MAIN_BRANCH="refs/heads/master" + +# shellcheck source=ci/setup_cache.sh +. "$(dirname "$0")"/setup_cache.sh + +if [[ "${AZP_BRANCH}" == "${MAIN_BRANCH}" ]]; then + BAZEL_BUILD_OPTIONS="${BAZEL_BUILD_EXTRA_OPTIONS}" tools/api/generate_go_protobuf.py fi diff --git a/ci/run_envoy_docker.sh b/ci/run_envoy_docker.sh index 003a5ab28b76..842b51b6ce89 100755 --- a/ci/run_envoy_docker.sh +++ b/ci/run_envoy_docker.sh @@ -56,6 +56,7 @@ mkdir -p "${ENVOY_DOCKER_BUILD_DIR}" [[ -t 1 ]] && ENVOY_DOCKER_OPTIONS+=("-it") [[ -f .git ]] && [[ ! -d .git ]] && ENVOY_DOCKER_OPTIONS+=(-v "$(git rev-parse --git-common-dir):$(git rev-parse --git-common-dir)") +[[ -n "${SSH_AUTH_SOCK}" ]] && ENVOY_DOCKER_OPTIONS+=(-v "${SSH_AUTH_SOCK}:${SSH_AUTH_SOCK}" -e SSH_AUTH_SOCK) export ENVOY_BUILD_IMAGE="${IMAGE_NAME}:${IMAGE_ID}" diff --git a/tools/api/generate_go_protobuf.py b/tools/api/generate_go_protobuf.py index 0cd15b637449..5b25de2dbb0a 100755 --- a/tools/api/generate_go_protobuf.py +++ b/tools/api/generate_go_protobuf.py @@ -4,17 +4,21 @@ from subprocess import check_call import glob import os +import shlex import shutil import sys import re +# Needed for CI to pass down bazel options. +BAZEL_BUILD_OPTIONS = shlex.split(os.environ.get('BAZEL_BUILD_OPTIONS', '')) + TARGETS = '@envoy_api//...' IMPORT_BASE = 'github.com/envoyproxy/go-control-plane' OUTPUT_BASE = 'build_go' REPO_BASE = 'go-control-plane' BRANCH = 'master' MIRROR_MSG = 'Mirrored from envoyproxy/envoy @ ' -USER_NAME = 'go-control-plane(CircleCI)' +USER_NAME = 'go-control-plane(Azure Pipelines)' USER_EMAIL = 'go-control-plane@users.noreply.github.com' @@ -32,7 +36,7 @@ def generateProtobufs(output): check_call([ 'bazel', 'build', '-c', 'fastbuild', '--experimental_proto_descriptor_sets_include_source_info' - ] + go_protos) + ] + BAZEL_BUILD_OPTIONS + go_protos) for rule in go_protos: # Example rule: @@ -67,9 +71,7 @@ def git(repo, *args): def cloneGoProtobufs(repo): # Create a local clone of go-control-plane - git(None, 'clone', 'git@github.com:envoyproxy/go-control-plane', repo) - git(repo, 'fetch') - git(repo, 'checkout', '-B', BRANCH, 'origin/master') + git(None, 'clone', 'git@github.com:envoyproxy/go-control-plane', repo, '-b', BRANCH) def findLastSyncSHA(repo): diff --git a/tools/api_boost/api_boost.py b/tools/api_boost/api_boost.py index eda6eaf94088..5cd9846bcf21 100755 --- a/tools/api_boost/api_boost.py +++ b/tools/api_boost/api_boost.py @@ -132,7 +132,6 @@ def ApiBoostTree(target_paths, sp.run([ 'bazel', 'build', - '--config=libc++', '--strip=always', ] + BAZEL_BUILD_OPTIONS + dep_lib_build_targets, check=True) From e00e5561454d5d5dd8dfb03657ffd36a4ff25b53 Mon Sep 17 00:00:00 2001 From: Pengyuan Bian Date: Wed, 14 Oct 2020 08:17:24 -0700 Subject: [PATCH 07/17] Update opencensus library (#13549) Signed-off-by: Pengyuan Bian --- bazel/repository_locations.bzl | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/bazel/repository_locations.bzl b/bazel/repository_locations.bzl index 7ab570248cd2..3e44753ab382 100644 --- a/bazel/repository_locations.bzl +++ b/bazel/repository_locations.bzl @@ -611,13 +611,13 @@ REPOSITORY_LOCATIONS_SPEC = dict( project_name = "OpenCensus C++", project_desc = "OpenCensus tracing library", project_url = "https://github.com/census-instrumentation/opencensus-cpp", - version = "7877337633466358ed680f9b26967da5b310d7aa", - sha256 = "12ff300fa804f97bd07e2ff071d969e09d5f3d7bbffeac438c725fa52a51a212", + version = "ba631066779a534267fdb1321b19850eb2b0c000", + sha256 = "f239a40803f6e2e42b57c9e68771b0990c4ca8b2d76b440073cdf14f4211ad26", strip_prefix = "opencensus-cpp-{version}", urls = ["https://github.com/census-instrumentation/opencensus-cpp/archive/{version}.tar.gz"], use_category = ["observability_ext"], extensions = ["envoy.tracers.opencensus"], - last_updated = "2020-06-01", + last_updated = "2020-10-13", cpe = "N/A", ), # This should be removed, see https://github.com/envoyproxy/envoy/issues/11816. From dc3460c247538b8f81d49bd7984f46d142f20ba9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andr=C3=A1s=20Czig=C3=A1ny?= Date: Wed, 14 Oct 2020 17:41:08 +0200 Subject: [PATCH 08/17] ratelimit: be able to disable x-envoy-ratelimited response header sent (#13270) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: András Czigány --- .../http/ratelimit/v3/rate_limit.proto | 8 +++- .../http/http_filters/rate_limit_filter.rst | 4 +- docs/root/version_history/current.rst | 1 + .../http/ratelimit/v3/rate_limit.proto | 8 +++- .../filters/http/ratelimit/ratelimit.cc | 10 +++-- .../filters/http/ratelimit/ratelimit.h | 3 ++ .../ratelimit/ratelimit_integration_test.cc | 41 ++++++++++++++++++ .../filters/http/ratelimit/ratelimit_test.cc | 43 +++++++++++++++++++ 8 files changed, 109 insertions(+), 9 deletions(-) diff --git a/api/envoy/extensions/filters/http/ratelimit/v3/rate_limit.proto b/api/envoy/extensions/filters/http/ratelimit/v3/rate_limit.proto index 326a30ef4013..bc58e7f9b2e1 100644 --- a/api/envoy/extensions/filters/http/ratelimit/v3/rate_limit.proto +++ b/api/envoy/extensions/filters/http/ratelimit/v3/rate_limit.proto @@ -19,7 +19,7 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // Rate limit :ref:`configuration overview `. // [#extension: envoy.filters.http.ratelimit] -// [#next-free-field: 9] +// [#next-free-field: 10] message RateLimit { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.http.rate_limit.v2.RateLimit"; @@ -60,7 +60,6 @@ message RateLimit { // The filter's behaviour in case the rate limiting service does // not respond back. When it is set to true, Envoy will not allow traffic in case of // communication failure between rate limiting service and the proxy. - // Defaults to false. bool failure_mode_deny = 5; // Specifies whether a `RESOURCE_EXHAUSTED` gRPC code must be returned instead @@ -99,6 +98,11 @@ message RateLimit { // Disabled by default. XRateLimitHeadersRFCVersion enable_x_ratelimit_headers = 8 [(validate.rules).enum = {defined_only: true}]; + + // Disables emitting the :ref:`x-envoy-ratelimited` header + // in case of rate limiting (i.e. 429 responses). + // Having this header not present potentially makes the request retriable. + bool disable_x_envoy_ratelimited_header = 9; } message RateLimitPerRoute { diff --git a/docs/root/configuration/http/http_filters/rate_limit_filter.rst b/docs/root/configuration/http/http_filters/rate_limit_filter.rst index 91ce997c72cd..0896f9a5b86d 100644 --- a/docs/root/configuration/http/http_filters/rate_limit_filter.rst +++ b/docs/root/configuration/http/http_filters/rate_limit_filter.rst @@ -14,7 +14,9 @@ can optionally include the virtual host rate limit configurations. More than one apply to a request. Each configuration results in a descriptor being sent to the rate limit service. If the rate limit service is called, and the response for any of the descriptors is over limit, a -429 response is returned. The rate limit filter also sets the :ref:`x-envoy-ratelimited` header. +429 response is returned. The rate limit filter also sets the :ref:`x-envoy-ratelimited` header, +unless :ref:`disable_x_envoy_ratelimited_header ` is +set to true. If there is an error in calling rate limit service or rate limit service returns an error and :ref:`failure_mode_deny ` is set to true, a 500 response is returned. diff --git a/docs/root/version_history/current.rst b/docs/root/version_history/current.rst index 977e3aaa8981..237f9ed5b10a 100644 --- a/docs/root/version_history/current.rst +++ b/docs/root/version_history/current.rst @@ -29,6 +29,7 @@ New Features * grpc: implemented header value syntax support when defining :ref:`initial metadata ` for gRPC-based `ext_authz` :ref:`HTTP ` and :ref:`network ` filters, and :ref:`ratelimit ` filters. * health_check: added option to use :ref:`no_traffic_healthy_interval ` which allows a different no traffic interval when the host is healthy. * mongo_proxy: the list of commands to produce metrics for is now :ref:`configurable `. +* ratelimit: added :ref:`disable_x_envoy_ratelimited_header ` option to disable `X-Envoy-RateLimited` header. * tcp: added a new :ref:`envoy.overload_actions.reject_incoming_connections ` action to reject incoming TCP connections. Deprecated diff --git a/generated_api_shadow/envoy/extensions/filters/http/ratelimit/v3/rate_limit.proto b/generated_api_shadow/envoy/extensions/filters/http/ratelimit/v3/rate_limit.proto index 326a30ef4013..bc58e7f9b2e1 100644 --- a/generated_api_shadow/envoy/extensions/filters/http/ratelimit/v3/rate_limit.proto +++ b/generated_api_shadow/envoy/extensions/filters/http/ratelimit/v3/rate_limit.proto @@ -19,7 +19,7 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // Rate limit :ref:`configuration overview `. // [#extension: envoy.filters.http.ratelimit] -// [#next-free-field: 9] +// [#next-free-field: 10] message RateLimit { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.http.rate_limit.v2.RateLimit"; @@ -60,7 +60,6 @@ message RateLimit { // The filter's behaviour in case the rate limiting service does // not respond back. When it is set to true, Envoy will not allow traffic in case of // communication failure between rate limiting service and the proxy. - // Defaults to false. bool failure_mode_deny = 5; // Specifies whether a `RESOURCE_EXHAUSTED` gRPC code must be returned instead @@ -99,6 +98,11 @@ message RateLimit { // Disabled by default. XRateLimitHeadersRFCVersion enable_x_ratelimit_headers = 8 [(validate.rules).enum = {defined_only: true}]; + + // Disables emitting the :ref:`x-envoy-ratelimited` header + // in case of rate limiting (i.e. 429 responses). + // Having this header not present potentially makes the request retriable. + bool disable_x_envoy_ratelimited_header = 9; } message RateLimitPerRoute { diff --git a/source/extensions/filters/http/ratelimit/ratelimit.cc b/source/extensions/filters/http/ratelimit/ratelimit.cc index c5fb0d284a49..8430f47243a8 100644 --- a/source/extensions/filters/http/ratelimit/ratelimit.cc +++ b/source/extensions/filters/http/ratelimit/ratelimit.cc @@ -170,11 +170,13 @@ void Filter::complete(Filters::Common::RateLimit::LimitStatus status, empty_stat_name, false}; httpContext().codeStats().chargeResponseStat(info); - if (response_headers_to_add_ == nullptr) { - response_headers_to_add_ = Http::ResponseHeaderMapImpl::create(); + if (config_->enableXEnvoyRateLimitedHeader()) { + if (response_headers_to_add_ == nullptr) { + response_headers_to_add_ = Http::ResponseHeaderMapImpl::create(); + } + response_headers_to_add_->setReferenceEnvoyRateLimited( + Http::Headers::get().EnvoyRateLimitedValues.True); } - response_headers_to_add_->setReferenceEnvoyRateLimited( - Http::Headers::get().EnvoyRateLimitedValues.True); break; } diff --git a/source/extensions/filters/http/ratelimit/ratelimit.h b/source/extensions/filters/http/ratelimit/ratelimit.h index 22fc9ca65f48..058eb793569a 100644 --- a/source/extensions/filters/http/ratelimit/ratelimit.h +++ b/source/extensions/filters/http/ratelimit/ratelimit.h @@ -51,6 +51,7 @@ class FilterConfig { enable_x_ratelimit_headers_( config.enable_x_ratelimit_headers() == envoy::extensions::filters::http::ratelimit::v3::RateLimit::DRAFT_VERSION_03), + disable_x_envoy_ratelimited_header_(config.disable_x_envoy_ratelimited_header()), rate_limited_grpc_status_( config.rate_limited_as_resource_exhausted() ? absl::make_optional(Grpc::Status::WellKnownGrpcStatus::ResourceExhausted) @@ -64,6 +65,7 @@ class FilterConfig { FilterRequestType requestType() const { return request_type_; } bool failureModeAllow() const { return !failure_mode_deny_; } bool enableXRateLimitHeaders() const { return enable_x_ratelimit_headers_; } + bool enableXEnvoyRateLimitedHeader() const { return !disable_x_envoy_ratelimited_header_; } const absl::optional rateLimitedGrpcStatus() const { return rate_limited_grpc_status_; } @@ -90,6 +92,7 @@ class FilterConfig { Runtime::Loader& runtime_; const bool failure_mode_deny_; const bool enable_x_ratelimit_headers_; + const bool disable_x_envoy_ratelimited_header_; const absl::optional rate_limited_grpc_status_; Http::Context& http_context_; Filters::Common::RateLimit::StatNames stat_names_; diff --git a/test/extensions/filters/http/ratelimit/ratelimit_integration_test.cc b/test/extensions/filters/http/ratelimit/ratelimit_integration_test.cc index 56114f530aca..e20d573b4a92 100644 --- a/test/extensions/filters/http/ratelimit/ratelimit_integration_test.cc +++ b/test/extensions/filters/http/ratelimit/ratelimit_integration_test.cc @@ -46,6 +46,7 @@ class RatelimitIntegrationTest : public Grpc::VersionedGrpcClientIntegrationPara TestUtility::loadFromYaml(base_filter_config_, proto_config_); proto_config_.set_failure_mode_deny(failure_mode_deny_); proto_config_.set_enable_x_ratelimit_headers(enable_x_ratelimit_headers_); + proto_config_.set_disable_x_envoy_ratelimited_header(disable_x_envoy_ratelimited_header_); setGrpcService(*proto_config_.mutable_rate_limit_service()->mutable_grpc_service(), "ratelimit", fake_upstreams_.back()->localAddress()); proto_config_.mutable_rate_limit_service()->set_transport_api_version(apiVersion()); @@ -192,6 +193,7 @@ class RatelimitIntegrationTest : public Grpc::VersionedGrpcClientIntegrationPara bool failure_mode_deny_ = false; envoy::extensions::filters::http::ratelimit::v3::RateLimit::XRateLimitHeadersRFCVersion enable_x_ratelimit_headers_ = envoy::extensions::filters::http::ratelimit::v3::RateLimit::OFF; + bool disable_x_envoy_ratelimited_header_ = false; envoy::extensions::filters::http::ratelimit::v3::RateLimit proto_config_{}; const std::string base_filter_config_ = R"EOF( domain: some_domain @@ -214,12 +216,24 @@ class RatelimitFilterHeadersEnabledIntegrationTest : public RatelimitIntegration } }; +// Test verifies that disabling X-Envoy-RateLimited response header works. +class RatelimitFilterEnvoyRatelimitedHeaderDisabledIntegrationTest + : public RatelimitIntegrationTest { +public: + RatelimitFilterEnvoyRatelimitedHeaderDisabledIntegrationTest() { + disable_x_envoy_ratelimited_header_ = true; + } +}; + INSTANTIATE_TEST_SUITE_P(IpVersionsClientType, RatelimitIntegrationTest, VERSIONED_GRPC_CLIENT_INTEGRATION_PARAMS); INSTANTIATE_TEST_SUITE_P(IpVersionsClientType, RatelimitFailureModeIntegrationTest, VERSIONED_GRPC_CLIENT_INTEGRATION_PARAMS); INSTANTIATE_TEST_SUITE_P(IpVersionsClientType, RatelimitFilterHeadersEnabledIntegrationTest, VERSIONED_GRPC_CLIENT_INTEGRATION_PARAMS); +INSTANTIATE_TEST_SUITE_P(IpVersionsClientType, + RatelimitFilterEnvoyRatelimitedHeaderDisabledIntegrationTest, + VERSIONED_GRPC_CLIENT_INTEGRATION_PARAMS); TEST_P(RatelimitIntegrationTest, Ok) { basicFlow(); } @@ -261,6 +275,11 @@ TEST_P(RatelimitIntegrationTest, OverLimit) { sendRateLimitResponse(envoy::service::ratelimit::v3::RateLimitResponse::OVER_LIMIT, {}, Http::TestResponseHeaderMapImpl{}, Http::TestRequestHeaderMapImpl{}); waitForFailedUpstreamResponse(429); + + EXPECT_THAT(response_.get()->headers(), + Http::HeaderValueOf(Http::Headers::get().EnvoyRateLimited, + Http::Headers::get().EnvoyRateLimitedValues.True)); + cleanup(); EXPECT_EQ(nullptr, test_server_->counter("cluster.cluster_0.ratelimit.ok")); @@ -284,6 +303,10 @@ TEST_P(RatelimitIntegrationTest, OverLimitWithHeaders) { return Http::HeaderMap::Iterate::Continue; }); + EXPECT_THAT(response_.get()->headers(), + Http::HeaderValueOf(Http::Headers::get().EnvoyRateLimited, + Http::Headers::get().EnvoyRateLimitedValues.True)); + cleanup(); EXPECT_EQ(nullptr, test_server_->counter("cluster.cluster_0.ratelimit.ok")); @@ -436,5 +459,23 @@ TEST_P(RatelimitFilterHeadersEnabledIntegrationTest, OverLimitWithFilterHeaders) EXPECT_EQ(nullptr, test_server_->counter("cluster.cluster_0.ratelimit.error")); } +TEST_P(RatelimitFilterEnvoyRatelimitedHeaderDisabledIntegrationTest, + OverLimitWithoutEnvoyRatelimitedHeader) { + initiateClientConnection(); + waitForRatelimitRequest(); + sendRateLimitResponse(envoy::service::ratelimit::v3::RateLimitResponse::OVER_LIMIT, {}, + Http::TestResponseHeaderMapImpl{}, Http::TestRequestHeaderMapImpl{}); + waitForFailedUpstreamResponse(429); + + EXPECT_THAT(response_.get()->headers(), + ::testing::Not(Http::HeaderValueOf(Http::Headers::get().EnvoyRateLimited, _))); + + cleanup(); + + EXPECT_EQ(nullptr, test_server_->counter("cluster.cluster_0.ratelimit.ok")); + EXPECT_EQ(1, test_server_->counter("cluster.cluster_0.ratelimit.over_limit")->value()); + EXPECT_EQ(nullptr, test_server_->counter("cluster.cluster_0.ratelimit.error")); +} + } // namespace } // namespace Envoy diff --git a/test/extensions/filters/http/ratelimit/ratelimit_test.cc b/test/extensions/filters/http/ratelimit/ratelimit_test.cc index c7ab957ada5e..4eff42850721 100644 --- a/test/extensions/filters/http/ratelimit/ratelimit_test.cc +++ b/test/extensions/filters/http/ratelimit/ratelimit_test.cc @@ -80,6 +80,11 @@ class HttpRateLimitFilterTest : public testing::Test { enable_x_ratelimit_headers: DRAFT_VERSION_03 )EOF"; + const std::string disable_x_envoy_ratelimited_header_config_ = R"EOF( + domain: foo + disable_x_envoy_ratelimited_header: true + )EOF"; + const std::string filter_config_ = R"EOF( domain: foo )EOF"; @@ -632,6 +637,44 @@ TEST_F(HttpRateLimitFilterTest, LimitResponseWithFilterHeaders) { filter_callbacks_.clusterInfo()->statsScope().counterFromStatName(upstream_rq_429_).value()); } +TEST_F(HttpRateLimitFilterTest, LimitResponseWithoutEnvoyRateLimitedHeader) { + SetUpTest(disable_x_envoy_ratelimited_header_config_); + InSequence s; + + EXPECT_CALL(route_rate_limit_, populateDescriptors(_, _, _, _, _, _)) + .WillOnce(SetArgReferee<1>(descriptor_)); + EXPECT_CALL(*client_, limit(_, _, _, _, _)) + .WillOnce( + WithArgs<0>(Invoke([&](Filters::Common::RateLimit::RequestCallbacks& callbacks) -> void { + request_callbacks_ = &callbacks; + }))); + + EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, + filter_->decodeHeaders(request_headers_, false)); + + Http::ResponseHeaderMapPtr h{new Http::TestResponseHeaderMapImpl()}; + Http::TestResponseHeaderMapImpl response_headers{{":status", "429"}}; + EXPECT_CALL(filter_callbacks_, encodeHeaders_(HeaderMapEqualRef(&response_headers), true)); + EXPECT_CALL(filter_callbacks_, continueDecoding()).Times(0); + EXPECT_CALL(filter_callbacks_.stream_info_, + setResponseFlag(StreamInfo::ResponseFlag::RateLimited)); + + request_callbacks_->complete(Filters::Common::RateLimit::LimitStatus::OverLimit, nullptr, + std::move(h), nullptr); + + EXPECT_EQ(1U, filter_callbacks_.clusterInfo() + ->statsScope() + .counterFromStatName(ratelimit_over_limit_) + .value()); + EXPECT_EQ( + 1U, + filter_callbacks_.clusterInfo()->statsScope().counterFromStatName(upstream_rq_4xx_).value()); + EXPECT_EQ( + 1U, + filter_callbacks_.clusterInfo()->statsScope().counterFromStatName(upstream_rq_429_).value()); + EXPECT_EQ("request_rate_limited", filter_callbacks_.details()); +} + TEST_F(HttpRateLimitFilterTest, LimitResponseRuntimeDisabled) { SetUpTest(filter_config_); InSequence s; From 1a66324b9615ef6d80d742cb0b77383be6f35f88 Mon Sep 17 00:00:00 2001 From: Lizan Zhou Date: Wed, 14 Oct 2020 13:16:55 -0700 Subject: [PATCH 09/17] ci: workaround for https://github.com/actions/virtual-environments/issues/1811 (#13577) Signed-off-by: Lizan Zhou --- ci/mac_ci_setup.sh | 3 +++ 1 file changed, 3 insertions(+) diff --git a/ci/mac_ci_setup.sh b/ci/mac_ci_setup.sh index 395c3297fe62..e34989cc3a78 100755 --- a/ci/mac_ci_setup.sh +++ b/ci/mac_ci_setup.sh @@ -6,6 +6,9 @@ # https://github.com/actions/virtual-environments/blob/master/images/macos/macos-10.15-Readme.md for # a list of pre-installed tools in the macOS image. +# https://github.com/actions/virtual-environments/issues/1811 +brew uninstall openssl@1.0.2t + export HOMEBREW_NO_AUTO_UPDATE=1 HOMEBREW_RETRY_ATTEMPTS=10 HOMEBREW_RETRY_INTERVAL=1 From 1ca53123c078b6545d393d2974c58ddc3bebabb4 Mon Sep 17 00:00:00 2001 From: "Drew S. Ortega" Date: Wed, 14 Oct 2020 14:22:12 -0700 Subject: [PATCH 10/17] hds: add support for delta updates in specifier (#13067) Implement incremental comparison of changes to the HealthCheckSpecifier used to configure the health discovery service. Previously all HDS Clusters would be reconstructed even if the same HealthCheckSpecifier was received, or if there was only some small change. This is unusable for the use case of a timed HealthCheckSpecifier update, where the HealthCheckSpecifier is update more often than a health check. Since all health checks were previously being reconstructed on each call, their results were inconsistent. This implementation allows for the re-usage of objects that do not change between calls, Specifically to an HdsCluster, to a Host/Endpoint, and to a Health Checker. Risk Level: Low Testing: All previous tests pass. Added unit tests that test for: - No changes in the HealthCheckSpecifier should not attempt to update. - Changes to only some of the overall HdsClusters should not reconstruct or update all of them. - Changes to some endpoints/hosts should not update or reconstruct all of them. - Changes to some health checkers should not update or reconstruct all of them. Docs Changes: N/A Release Notes: Included Signed-off-by: Drew S. Ortega --- docs/root/version_history/current.rst | 1 + source/common/upstream/BUILD | 11 + .../upstream/health_checker_base_impl.h | 17 + .../upstream/health_discovery_service.cc | 348 ++++++++++--- .../upstream/health_discovery_service.h | 56 +- source/common/upstream/locality_endpoint.h | 29 ++ test/common/upstream/hds_test.cc | 490 ++++++++++++++++-- test/integration/hds_integration_test.cc | 166 +++++- 8 files changed, 988 insertions(+), 130 deletions(-) create mode 100644 source/common/upstream/locality_endpoint.h diff --git a/docs/root/version_history/current.rst b/docs/root/version_history/current.rst index 237f9ed5b10a..830432731df9 100644 --- a/docs/root/version_history/current.rst +++ b/docs/root/version_history/current.rst @@ -27,6 +27,7 @@ Removed Config or Runtime New Features ------------ * grpc: implemented header value syntax support when defining :ref:`initial metadata ` for gRPC-based `ext_authz` :ref:`HTTP ` and :ref:`network ` filters, and :ref:`ratelimit ` filters. +* hds: added support for delta updates in the :ref:`HealthCheckSpecifier `, making only the Endpoints and Health Checkers that changed be reconstructed on receiving a new message, rather than the entire HDS. * health_check: added option to use :ref:`no_traffic_healthy_interval ` which allows a different no traffic interval when the host is healthy. * mongo_proxy: the list of commands to produce metrics for is now :ref:`configurable `. * ratelimit: added :ref:`disable_x_envoy_ratelimited_header ` option to disable `X-Envoy-RateLimited` header. diff --git a/source/common/upstream/BUILD b/source/common/upstream/BUILD index 2d0fb940cf00..9219e59d768d 100644 --- a/source/common/upstream/BUILD +++ b/source/common/upstream/BUILD @@ -202,12 +202,23 @@ envoy_cc_library( ], ) +envoy_cc_library( + name = "locality_endpoint_lib", + hdrs = ["locality_endpoint.h"], + deps = [ + "//source/common/protobuf:utility_lib", + "@envoy_api//envoy/config/core/v3:pkg_cc_proto", + "@envoy_api//envoy/config/endpoint/v3:pkg_cc_proto", + ], +) + envoy_cc_library( name = "health_discovery_service_lib", srcs = ["health_discovery_service.cc"], hdrs = ["health_discovery_service.h"], deps = [ ":health_checker_lib", + ":locality_endpoint_lib", ":upstream_includes", "//include/envoy/api:api_interface", "//include/envoy/event:dispatcher_interface", diff --git a/source/common/upstream/health_checker_base_impl.h b/source/common/upstream/health_checker_base_impl.h index b773ced03a37..c1e4bb7affff 100644 --- a/source/common/upstream/health_checker_base_impl.h +++ b/source/common/upstream/health_checker_base_impl.h @@ -37,6 +37,23 @@ struct HealthCheckerStats { ALL_HEALTH_CHECKER_STATS(GENERATE_COUNTER_STRUCT, GENERATE_GAUGE_STRUCT) }; +/** + * HealthCheckerHash and HealthCheckerEqualTo are used to allow the HealthCheck proto to be used as + * a flat_hash_map key. + */ +struct HealthCheckerHash { + size_t operator()(const envoy::config::core::v3::HealthCheck& health_check) const { + return MessageUtil::hash(health_check); + } +}; + +struct HealthCheckerEqualTo { + bool operator()(const envoy::config::core::v3::HealthCheck& lhs, + const envoy::config::core::v3::HealthCheck& rhs) const { + return Protobuf::util::MessageDifferencer::Equals(lhs, rhs); + } +}; + /** * Base implementation for all health checkers. */ diff --git a/source/common/upstream/health_discovery_service.cc b/source/common/upstream/health_discovery_service.cc index 0d0d5e600635..12c92979f418 100644 --- a/source/common/upstream/health_discovery_service.cc +++ b/source/common/upstream/health_discovery_service.cc @@ -46,8 +46,8 @@ HdsDelegate::HdsDelegate(Stats::Scope& scope, Grpc::RawAsyncClientPtr async_clie dispatcher_(dispatcher), runtime_(runtime), store_stats_(stats), ssl_context_manager_(ssl_context_manager), info_factory_(info_factory), access_log_manager_(access_log_manager), cm_(cm), local_info_(local_info), admin_(admin), - singleton_manager_(singleton_manager), tls_(tls), validation_visitor_(validation_visitor), - api_(api) { + singleton_manager_(singleton_manager), tls_(tls), specifier_hash_(0), + validation_visitor_(validation_visitor), api_(api) { health_check_request_.mutable_health_check_request()->mutable_node()->MergeFrom( local_info_.node()); backoff_strategy_ = std::make_unique( @@ -99,7 +99,6 @@ void HdsDelegate::handleFailure() { setHdsRetryTimer(); } -// TODO(lilika): Add support for the same endpoint in different clusters/ports envoy::service::health::v3::HealthCheckRequestOrEndpointHealthResponse HdsDelegate::sendResponse() { envoy::service::health::v3::HealthCheckRequestOrEndpointHealthResponse response; @@ -165,68 +164,139 @@ void HdsDelegate::onReceiveInitialMetadata(Http::ResponseHeaderMapPtr&& metadata UNREFERENCED_PARAMETER(metadata); } +envoy::config::cluster::v3::Cluster HdsDelegate::createClusterConfig( + const envoy::service::health::v3::ClusterHealthCheck& cluster_health_check) { + // Create HdsCluster config + envoy::config::cluster::v3::Cluster cluster_config; + + cluster_config.set_name(cluster_health_check.cluster_name()); + cluster_config.mutable_connect_timeout()->set_seconds(ClusterTimeoutSeconds); + cluster_config.mutable_per_connection_buffer_limit_bytes()->set_value( + ClusterConnectionBufferLimitBytes); + + // Add endpoints to cluster + for (const auto& locality_endpoints : cluster_health_check.locality_endpoints()) { + // add endpoint group by locality to config + auto* endpoints = cluster_config.mutable_load_assignment()->add_endpoints(); + // if this group contains locality information, save it. + if (locality_endpoints.has_locality()) { + endpoints->mutable_locality()->MergeFrom(locality_endpoints.locality()); + } + + // add all endpoints for this locality group to the config + for (const auto& endpoint : locality_endpoints.endpoints()) { + endpoints->add_lb_endpoints()->mutable_endpoint()->mutable_address()->MergeFrom( + endpoint.address()); + } + } + + // TODO(lilika): Add support for optional per-endpoint health checks + + // Add healthchecks to cluster + for (auto& health_check : cluster_health_check.health_checks()) { + cluster_config.add_health_checks()->MergeFrom(health_check); + } + + // Add transport_socket_match to cluster for use in host connections. + cluster_config.mutable_transport_socket_matches()->MergeFrom( + cluster_health_check.transport_socket_matches()); + + ENVOY_LOG(debug, "New HdsCluster config {} ", cluster_config.DebugString()); + + return cluster_config; +} + +void HdsDelegate::updateHdsCluster(HdsClusterPtr cluster, + const envoy::config::cluster::v3::Cluster& cluster_config) { + cluster->update(admin_, cluster_config, info_factory_, cm_, local_info_, dispatcher_, + singleton_manager_, tls_, validation_visitor_, api_, access_log_manager_, + runtime_); +} + +HdsClusterPtr +HdsDelegate::createHdsCluster(const envoy::config::cluster::v3::Cluster& cluster_config) { + static const envoy::config::core::v3::BindConfig bind_config; + + // Create HdsCluster. + auto new_cluster = std::make_shared( + admin_, runtime_, std::move(cluster_config), bind_config, store_stats_, ssl_context_manager_, + false, info_factory_, cm_, local_info_, dispatcher_, singleton_manager_, tls_, + validation_visitor_, api_); + + // Begin HCs in the background. + new_cluster->initialize([] {}); + new_cluster->initHealthchecks(access_log_manager_, runtime_, dispatcher_, api_); + + return new_cluster; +} + void HdsDelegate::processMessage( std::unique_ptr&& message) { ENVOY_LOG(debug, "New health check response message {} ", message->DebugString()); ASSERT(message); + std::vector hds_clusters; + // Maps to replace the current member variable versions. + absl::flat_hash_map new_hds_clusters_name_map; for (const auto& cluster_health_check : message->cluster_health_checks()) { - // Create HdsCluster config - static const envoy::config::core::v3::BindConfig bind_config; - envoy::config::cluster::v3::Cluster cluster_config; - - cluster_config.set_name(cluster_health_check.cluster_name()); - cluster_config.mutable_connect_timeout()->set_seconds(ClusterTimeoutSeconds); - cluster_config.mutable_per_connection_buffer_limit_bytes()->set_value( - ClusterConnectionBufferLimitBytes); - - // Add endpoints to cluster - for (const auto& locality_endpoints : cluster_health_check.locality_endpoints()) { - // add endpoint group by locality to config - auto* endpoints = cluster_config.mutable_load_assignment()->add_endpoints(); - // if this group contains locality information, save it. - if (locality_endpoints.has_locality()) { - endpoints->mutable_locality()->MergeFrom(locality_endpoints.locality()); + if (!new_hds_clusters_name_map.contains(cluster_health_check.cluster_name())) { + HdsClusterPtr cluster_ptr; + + // Create a new configuration for a cluster based on our different or new config. + auto cluster_config = createClusterConfig(cluster_health_check); + + // If this particular cluster configuration happens to have a name, then it is possible + // this particular cluster exists in the name map. We check and if we found a match, + // attempt to update this cluster. If no match was found, either the cluster name is empty + // or we have not seen a cluster by this name before. In either case, create a new cluster. + auto cluster_map_pair = hds_clusters_name_map_.find(cluster_health_check.cluster_name()); + if (cluster_map_pair != hds_clusters_name_map_.end()) { + // We have a previous cluster with this name, update. + cluster_ptr = cluster_map_pair->second; + updateHdsCluster(cluster_ptr, cluster_config); + } else { + // There is no cluster with this name previously or its an empty string, so just create a + // new cluster. + cluster_ptr = createHdsCluster(cluster_config); } - // add all endpoints for this locality group to the config - for (const auto& endpoint : locality_endpoints.endpoints()) { - endpoints->add_lb_endpoints()->mutable_endpoint()->mutable_address()->MergeFrom( - endpoint.address()); + // If this cluster does not have a name, do not add it to the name map since cluster_name is + // an optional field, and reconstruct these clusters on every update. + if (!cluster_health_check.cluster_name().empty()) { + // Since this cluster has a name, add it to our by-name map so we can update it later. + new_hds_clusters_name_map.insert({cluster_health_check.cluster_name(), cluster_ptr}); + } else { + ENVOY_LOG(warn, + "HDS Cluster has no cluster_name, it will be recreated instead of updated on " + "every reconfiguration."); } - } - - // TODO(lilika): Add support for optional per-endpoint health checks - // Add healthchecks to cluster - for (auto& health_check : cluster_health_check.health_checks()) { - cluster_config.add_health_checks()->MergeFrom(health_check); + // Add this cluster to the flat list for health checking. + hds_clusters.push_back(cluster_ptr); + } else { + ENVOY_LOG(warn, "An HDS Cluster with this cluster_name has already been added, not using."); } + } - // Add transport_socket_match to cluster for use in host connections. - cluster_config.mutable_transport_socket_matches()->MergeFrom( - cluster_health_check.transport_socket_matches()); - - ENVOY_LOG(debug, "New HdsCluster config {} ", cluster_config.DebugString()); - - // Create HdsCluster - hds_clusters_.emplace_back(new HdsCluster(admin_, runtime_, std::move(cluster_config), - bind_config, store_stats_, ssl_context_manager_, - false, info_factory_, cm_, local_info_, dispatcher_, - singleton_manager_, tls_, validation_visitor_, api_)); - hds_clusters_.back()->initialize([] {}); + // Overwrite our map data structures. + hds_clusters_name_map_ = std::move(new_hds_clusters_name_map); + hds_clusters_ = std::move(hds_clusters); - hds_clusters_.back()->startHealthchecks(access_log_manager_, runtime_, dispatcher_, api_); - } + // TODO: add stats reporting for number of clusters added, removed, and reused. } -// TODO(lilika): Add support for subsequent HealthCheckSpecifier messages that -// might modify the HdsClusters void HdsDelegate::onReceiveMessage( std::unique_ptr&& message) { stats_.requests_.inc(); ENVOY_LOG(debug, "New health check response message {} ", message->DebugString()); + const uint64_t hash = MessageUtil::hash(*message); + + if (hash == specifier_hash_) { + ENVOY_LOG(debug, "New health check specifier is unchanged, no action taken."); + return; + } + // Validate message fields try { MessageUtil::validate(*message, validation_visitor_); @@ -239,15 +309,17 @@ void HdsDelegate::onReceiveMessage( return; } - // Reset - hds_clusters_.clear(); - // Set response auto server_response_ms = PROTOBUF_GET_MS_OR_DEFAULT(*message, interval, 1000); // Process the HealthCheckSpecifier message. processMessage(std::move(message)); + stats_.updates_.inc(); + + // Update the stored hash. + specifier_hash_ = hash; + if (server_response_ms_ != server_response_ms) { server_response_ms_ = server_response_ms; setHdsStreamResponseTimer(); @@ -276,9 +348,12 @@ HdsCluster::HdsCluster(Server::Admin& admin, Runtime::Loader& runtime, ProtobufMessage::ValidationVisitor& validation_visitor, Api::Api& api) : runtime_(runtime), cluster_(std::move(cluster)), bind_config_(bind_config), stats_(stats), ssl_context_manager_(ssl_context_manager), added_via_api_(added_via_api), - initial_hosts_(new HostVector()), validation_visitor_(validation_visitor) { + hosts_(new HostVector()), validation_visitor_(validation_visitor) { ENVOY_LOG(debug, "Creating an HdsCluster"); priority_set_.getOrCreateHostSet(0); + // Set initial hashes for possible delta updates. + config_hash_ = MessageUtil::hash(cluster_); + socket_match_hash_ = RepeatedPtrUtil::hash(cluster_.transport_socket_matches()); info_ = info_factory.createClusterInfo( {admin, runtime_, cluster_, bind_config_, stats_, ssl_context_manager_, added_via_api_, cm, @@ -296,6 +371,7 @@ HdsCluster::HdsCluster(Server::Admin& admin, Runtime::Loader& runtime, hosts_by_locality.back().reserve(locality_endpoints.lb_endpoints_size()); for (const auto& host : locality_endpoints.lb_endpoints()) { + const LocalityEndpointTuple endpoint_key = {locality_endpoints.locality(), host}; // Initialize an endpoint host object. HostSharedPtr endpoint = std::make_shared( info_, "", Network::Address::resolveProtoAddress(host.endpoint().address()), nullptr, 1, @@ -303,17 +379,154 @@ HdsCluster::HdsCluster(Server::Admin& admin, Runtime::Loader& runtime, envoy::config::endpoint::v3::Endpoint::HealthCheckConfig().default_instance(), 0, envoy::config::core::v3::UNKNOWN); // Add this host/endpoint pointer to our flat list of endpoints for health checking. - initial_hosts_->push_back(endpoint); + hosts_->push_back(endpoint); // Add this host/endpoint pointer to our structured list by locality so results can be // requested by locality. hosts_by_locality.back().push_back(endpoint); + // Add this host/endpoint pointer to our map so we can rebuild this later. + hosts_map_.insert({endpoint_key, endpoint}); } } // Create the HostsPerLocality. - initial_hosts_per_locality_ = + hosts_per_locality_ = std::make_shared(std::move(hosts_by_locality), false); } +void HdsCluster::update(Server::Admin& admin, envoy::config::cluster::v3::Cluster cluster, + ClusterInfoFactory& info_factory, ClusterManager& cm, + const LocalInfo::LocalInfo& local_info, Event::Dispatcher& dispatcher, + Singleton::Manager& singleton_manager, ThreadLocal::SlotAllocator& tls, + ProtobufMessage::ValidationVisitor& validation_visitor, Api::Api& api, + AccessLog::AccessLogManager& access_log_manager, Runtime::Loader& runtime) { + + // check to see if the config changed. If it did, update. + const uint64_t config_hash = MessageUtil::hash(cluster); + if (config_hash_ != config_hash) { + config_hash_ = config_hash; + cluster_ = std::move(cluster); + + // Check to see if our list of socket matches have changed. If they have, create a new matcher + // in info_. + bool update_cluster_info = false; + const uint64_t socket_match_hash = RepeatedPtrUtil::hash(cluster_.transport_socket_matches()); + if (socket_match_hash_ != socket_match_hash) { + socket_match_hash_ = socket_match_hash; + update_cluster_info = true; + info_ = info_factory.createClusterInfo( + {admin, runtime_, cluster_, bind_config_, stats_, ssl_context_manager_, added_via_api_, + cm, local_info, dispatcher, singleton_manager, tls, validation_visitor, api}); + } + + // Check to see if anything in the endpoints list has changed. + updateHosts(cluster_.load_assignment().endpoints(), update_cluster_info); + + // Check to see if any of the health checkers have changed. + updateHealthchecks(cluster_.health_checks(), access_log_manager, runtime, dispatcher, api); + } +} + +void HdsCluster::updateHealthchecks( + const Protobuf::RepeatedPtrField& health_checks, + AccessLog::AccessLogManager& access_log_manager, Runtime::Loader& runtime, + Event::Dispatcher& dispatcher, Api::Api& api) { + std::vector health_checkers; + HealthCheckerMap health_checkers_map; + + for (const auto& health_check : health_checks) { + // Check to see if this exact same health_check config already has a health checker. + auto health_checker = health_checkers_map_.find(health_check); + if (health_checker != health_checkers_map_.end()) { + // If it does, use it. + health_checkers_map.insert({health_check, health_checker->second}); + health_checkers.push_back(health_checker->second); + } else { + // If it does not, create a new one. + auto new_health_checker = Upstream::HealthCheckerFactory::create( + health_check, *this, runtime, dispatcher, access_log_manager, validation_visitor_, api); + health_checkers_map.insert({health_check, new_health_checker}); + health_checkers.push_back(new_health_checker); + + // Start these health checks now because upstream assumes they already have been started. + new_health_checker->start(); + } + } + + // replace our member data structures with our newly created ones. + health_checkers_ = std::move(health_checkers); + health_checkers_map_ = std::move(health_checkers_map); + + // TODO: add stats reporting for number of health checkers added, removed, and reused. +} + +void HdsCluster::updateHosts( + const Protobuf::RepeatedPtrField& + locality_endpoints, + bool update_cluster_info) { + // Create the data structures needed for PrioritySet::update. + HostVectorSharedPtr hosts = std::make_shared>(); + std::vector hosts_added; + std::vector hosts_removed; + std::vector hosts_by_locality; + + // Use for delta update comparison. + HostsMap hosts_map; + + for (auto& endpoints : locality_endpoints) { + hosts_by_locality.emplace_back(); + for (auto& endpoint : endpoints.lb_endpoints()) { + LocalityEndpointTuple endpoint_key = {endpoints.locality(), endpoint}; + + // Check to see if this exact Locality+Endpoint has been seen before. + // Also, if we made changes to our info, re-create all endpoints. + auto host_pair = hosts_map_.find(endpoint_key); + HostSharedPtr host; + if (!update_cluster_info && host_pair != hosts_map_.end()) { + // If we have this exact pair, save the shared pointer. + host = host_pair->second; + } else { + // We do not have this endpoint saved, so create a new one. + host = std::make_shared( + info_, "", Network::Address::resolveProtoAddress(endpoint.endpoint().address()), + nullptr, 1, endpoints.locality(), + envoy::config::endpoint::v3::Endpoint::HealthCheckConfig().default_instance(), 0, + envoy::config::core::v3::UNKNOWN); + + // Set the initial health status as in HdsCluster::initialize. + host->healthFlagSet(Host::HealthFlag::FAILED_ACTIVE_HC); + + // Add to our hosts added list and save the shared pointer. + hosts_added.push_back(host); + } + + // No matter if it is reused or new, always add to these data structures. + hosts_by_locality.back().push_back(host); + hosts->push_back(host); + hosts_map.insert({endpoint_key, host}); + } + } + + // Compare the old map to the new to find out which endpoints are going to be removed. + for (auto& host_pair : hosts_map_) { + if (!hosts_map.contains(host_pair.first)) { + hosts_removed.push_back(host_pair.second); + } + } + + // Update the member data structures. + hosts_ = std::move(hosts); + hosts_map_ = std::move(hosts_map); + + // TODO: add stats reporting for number of endpoints added, removed, and reused. + ENVOY_LOG(debug, "Hosts Added: {}, Removed: {}, Reused: {}", hosts_added.size(), + hosts_removed.size(), hosts_->size() - hosts_added.size()); + + // Update the priority set. + hosts_per_locality_ = + std::make_shared(std::move(hosts_by_locality), false); + priority_set_.updateHosts(0, HostSetImpl::partitionHosts(hosts_, hosts_per_locality_), {}, + hosts_added, hosts_removed, absl::nullopt); +} + ClusterSharedPtr HdsCluster::create() { NOT_IMPLEMENTED_GCOVR_EXCL_LINE; } ClusterInfoConstSharedPtr @@ -337,25 +550,34 @@ ProdClusterInfoFactory::createClusterInfo(const CreateClusterInfoParams& params) params.added_via_api_, factory_context); } -void HdsCluster::startHealthchecks(AccessLog::AccessLogManager& access_log_manager, - Runtime::Loader& runtime, Event::Dispatcher& dispatcher, - Api::Api& api) { +void HdsCluster::initHealthchecks(AccessLog::AccessLogManager& access_log_manager, + Runtime::Loader& runtime, Event::Dispatcher& dispatcher, + Api::Api& api) { for (auto& health_check : cluster_.health_checks()) { - health_checkers_.push_back(Upstream::HealthCheckerFactory::create( - health_check, *this, runtime, dispatcher, access_log_manager, validation_visitor_, api)); - health_checkers_.back()->start(); + auto health_checker = Upstream::HealthCheckerFactory::create( + health_check, *this, runtime, dispatcher, access_log_manager, validation_visitor_, api); + + health_checkers_.push_back(health_checker); + health_checkers_map_.insert({health_check, health_checker}); + health_checker->start(); } } void HdsCluster::initialize(std::function callback) { initialization_complete_callback_ = callback; - for (const auto& host : *initial_hosts_) { - host->healthFlagSet(Host::HealthFlag::FAILED_ACTIVE_HC); + + // If this function gets called again we do not want to touch the priority set again with the + // initial hosts, because the hosts may have changed. + if (!initialized_) { + for (const auto& host : *hosts_) { + host->healthFlagSet(Host::HealthFlag::FAILED_ACTIVE_HC); + } + // Use the ungrouped and grouped hosts lists to retain locality structure in the priority set. + priority_set_.updateHosts(0, HostSetImpl::partitionHosts(hosts_, hosts_per_locality_), {}, + *hosts_, {}, absl::nullopt); + + initialized_ = true; } - // Use the ungrouped and grouped hosts lists to retain locality structure in the priority set. - priority_set_.updateHosts( - 0, HostSetImpl::partitionHosts(initial_hosts_, initial_hosts_per_locality_), {}, - *initial_hosts_, {}, absl::nullopt); } void HdsCluster::setOutlierDetector(const Outlier::DetectorSharedPtr&) { diff --git a/source/common/upstream/health_discovery_service.h b/source/common/upstream/health_discovery_service.h index 3818a80a3f06..a3ecbb7c428e 100644 --- a/source/common/upstream/health_discovery_service.h +++ b/source/common/upstream/health_discovery_service.h @@ -18,15 +18,24 @@ #include "common/grpc/async_client_impl.h" #include "common/network/resolver_impl.h" #include "common/upstream/health_checker_impl.h" +#include "common/upstream/locality_endpoint.h" #include "common/upstream/upstream_impl.h" #include "server/transport_socket_config_impl.h" #include "extensions/transport_sockets/well_known_names.h" +#include "absl/container/flat_hash_map.h" + namespace Envoy { namespace Upstream { +using HostsMap = absl::flat_hash_map; +using HealthCheckerMap = + absl::flat_hash_map; + class ProdClusterInfoFactory : public ClusterInfoFactory, Logger::Loggable { public: ClusterInfoConstSharedPtr createClusterInfo(const CreateClusterInfoParams& params) override; @@ -60,12 +69,19 @@ class HdsCluster : public Cluster, Logger::Loggable { Outlier::Detector* outlierDetector() override { return outlier_detector_.get(); } const Outlier::Detector* outlierDetector() const override { return outlier_detector_.get(); } void initialize(std::function callback) override; - - // Creates and starts healthcheckers to its endpoints - void startHealthchecks(AccessLog::AccessLogManager& access_log_manager, Runtime::Loader& runtime, - Event::Dispatcher& dispatcher, Api::Api& api); + // Compare changes in the cluster proto, and update parts of the cluster as needed. + void update(Server::Admin& admin, envoy::config::cluster::v3::Cluster cluster, + ClusterInfoFactory& info_factory, ClusterManager& cm, + const LocalInfo::LocalInfo& local_info, Event::Dispatcher& dispatcher, + Singleton::Manager& singleton_manager, ThreadLocal::SlotAllocator& tls, + ProtobufMessage::ValidationVisitor& validation_visitor, Api::Api& api, + AccessLog::AccessLogManager& access_log_manager, Runtime::Loader& runtime); + // Creates healthcheckers and adds them to the list, then does initial start. + void initHealthchecks(AccessLog::AccessLogManager& access_log_manager, Runtime::Loader& runtime, + Event::Dispatcher& dispatcher, Api::Api& api); std::vector healthCheckers() { return health_checkers_; }; + std::vector hosts() { return *hosts_; }; protected: PrioritySetImpl priority_set_; @@ -76,17 +92,31 @@ class HdsCluster : public Cluster, Logger::Loggable { std::function initialization_complete_callback_; Runtime::Loader& runtime_; - const envoy::config::cluster::v3::Cluster cluster_; + envoy::config::cluster::v3::Cluster cluster_; const envoy::config::core::v3::BindConfig& bind_config_; Stats::Store& stats_; Ssl::ContextManager& ssl_context_manager_; bool added_via_api_; + bool initialized_ = false; + uint64_t config_hash_; + uint64_t socket_match_hash_; - HostVectorSharedPtr initial_hosts_; - HostsPerLocalitySharedPtr initial_hosts_per_locality_; + HostVectorSharedPtr hosts_; + HostsPerLocalitySharedPtr hosts_per_locality_; + HostsMap hosts_map_; ClusterInfoConstSharedPtr info_; std::vector health_checkers_; + HealthCheckerMap health_checkers_map_; ProtobufMessage::ValidationVisitor& validation_visitor_; + + void updateHealthchecks( + const Protobuf::RepeatedPtrField& health_checks, + AccessLog::AccessLogManager& access_log_manager, Runtime::Loader& runtime, + Event::Dispatcher& dispatcher, Api::Api& api); + void + updateHosts(const Protobuf::RepeatedPtrField& + locality_endpoints, + bool update_socket_matches); }; using HdsClusterPtr = std::shared_ptr; @@ -97,7 +127,8 @@ using HdsClusterPtr = std::shared_ptr; #define ALL_HDS_STATS(COUNTER) \ COUNTER(requests) \ COUNTER(responses) \ - COUNTER(errors) + COUNTER(errors) \ + COUNTER(updates) /** * Struct definition for all hds stats. @see stats_macros.h @@ -145,7 +176,11 @@ class HdsDelegate : Grpc::AsyncStreamCallbacks&& message); - + envoy::config::cluster::v3::Cluster + createClusterConfig(const envoy::service::health::v3::ClusterHealthCheck& cluster_health_check); + void updateHdsCluster(HdsClusterPtr cluster, + const envoy::config::cluster::v3::Cluster& cluster_health_check); + HdsClusterPtr createHdsCluster(const envoy::config::cluster::v3::Cluster& cluster_health_check); HdsDelegateStats stats_; const Protobuf::MethodDescriptor& service_method_; @@ -168,10 +203,11 @@ class HdsDelegate : Grpc::AsyncStreamCallbacks health_check_message_; + uint64_t specifier_hash_; std::vector clusters_; std::vector hds_clusters_; + absl::flat_hash_map hds_clusters_name_map_; Event::TimerPtr hds_stream_response_timer_; Event::TimerPtr hds_retry_timer_; diff --git a/source/common/upstream/locality_endpoint.h b/source/common/upstream/locality_endpoint.h new file mode 100644 index 000000000000..a928cbde3ad0 --- /dev/null +++ b/source/common/upstream/locality_endpoint.h @@ -0,0 +1,29 @@ +#pragma once + +#include "envoy/config/core/v3/base.pb.h" +#include "envoy/config/endpoint/v3/endpoint_components.pb.h" + +#include "common/protobuf/utility.h" + +namespace Envoy { +namespace Upstream { + +using LocalityEndpointTuple = std::tuple; +struct LocalityEndpointHash { + size_t operator()(const LocalityEndpointTuple& values) const { + const auto locality_hash = MessageUtil::hash(std::get<0>(values)); + const auto endpoint_hash = MessageUtil::hash(std::get<1>(values)); + return locality_hash ^ endpoint_hash; + } +}; + +struct LocalityEndpointEqualTo { + bool operator()(const LocalityEndpointTuple& lhs, const LocalityEndpointTuple& rhs) const { + return Protobuf::util::MessageDifferencer::Equals(std::get<0>(lhs), std::get<0>(rhs)) && + Protobuf::util::MessageDifferencer::Equals(std::get<1>(lhs), std::get<1>(rhs)); + } +}; + +} // namespace Upstream +} // namespace Envoy diff --git a/test/common/upstream/hds_test.cc b/test/common/upstream/hds_test.cc index f2012e3d31d7..09804ccbb3ab 100644 --- a/test/common/upstream/hds_test.cc +++ b/test/common/upstream/hds_test.cc @@ -67,6 +67,15 @@ class HdsTest : public testing::Test { node_.set_id("hds-node"); } + // Checks if the cluster counters are correct + void checkHdsCounters(int requests, int responses, int errors, int updates) { + auto stats = hds_delegate_friend_.getStats(*hds_delegate_); + EXPECT_EQ(requests, stats.requests_.value()); + EXPECT_LE(responses, stats.responses_.value()); + EXPECT_EQ(errors, stats.errors_.value()); + EXPECT_EQ(updates, stats.updates_.value()); + } + // Creates an HdsDelegate void createHdsDelegate() { InSequence s; @@ -91,6 +100,23 @@ class HdsTest : public testing::Test { singleton_manager_, tls_, validation_visitor_, *api_); } + void expectCreateClientConnection() { + // Create a new mock connection for each call to createClientConnection. + EXPECT_CALL(dispatcher_, createClientConnection_(_, _, _, _)) + .WillRepeatedly(Invoke( + [](Network::Address::InstanceConstSharedPtr, Network::Address::InstanceConstSharedPtr, + Network::TransportSocketPtr&, const Network::ConnectionSocket::OptionsSharedPtr&) { + Network::MockClientConnection* connection = + new NiceMock(); + + // pretend our endpoint was connected to. + connection->raiseEvent(Network::ConnectionEvent::Connected); + + // return this new, connected endpoint. + return connection; + })); + } + // Creates a HealthCheckSpecifier message that contains one endpoint and one // healthcheck envoy::service::health::v3::HealthCheckSpecifier* createSimpleMessage() { @@ -175,6 +201,34 @@ class HdsTest : public testing::Test { return msg; } + void + addTransportSocketMatches(envoy::service::health::v3::ClusterHealthCheck* cluster_health_check, + std::string match, std::string criteria) { + // Add transport socket matches to specified cluster and its first health check. + const std::string match_yaml = absl::StrFormat( + R"EOF( +transport_socket_matches: +- name: "test_socket" + match: + %s: "true" + transport_socket: + name: "envoy.transport_sockets.raw_buffer" +)EOF", + match); + cluster_health_check->MergeFrom( + TestUtility::parseYaml(match_yaml)); + + // Add transport socket match criteria to our health check, for filtering matches. + const std::string criteria_yaml = absl::StrFormat( + R"EOF( +transport_socket_match_criteria: + %s: "true" +)EOF", + criteria); + cluster_health_check->mutable_health_checks(0)->MergeFrom( + TestUtility::parseYaml(criteria_yaml)); + } + Event::SimulatedTimeSystem time_system_; envoy::config::core::v3::Node node_; Event::MockDispatcher dispatcher_; @@ -402,19 +456,7 @@ TEST_F(HdsTest, TestSendResponseMultipleEndpoints) { // Create a new active connection on request, setting its status to connected // to mock a found endpoint. - EXPECT_CALL(dispatcher_, createClientConnection_(_, _, _, _)) - .WillRepeatedly(Invoke( - [](Network::Address::InstanceConstSharedPtr, Network::Address::InstanceConstSharedPtr, - Network::TransportSocketPtr&, const Network::ConnectionSocket::OptionsSharedPtr&) { - Network::MockClientConnection* connection = - new NiceMock(); - - // pretend our endpoint was connected to. - connection->raiseEvent(Network::ConnectionEvent::Connected); - - // return this new, connected endpoint. - return connection; - })); + expectCreateClientConnection(); EXPECT_CALL(*server_response_timer_, enableTimer(_, _)).Times(2); EXPECT_CALL(async_stream_, sendMessageRaw_(_, false)); @@ -494,31 +536,9 @@ TEST_F(HdsTest, TestSocketContext) { EXPECT_CALL(async_stream_, sendMessageRaw_(_, _)); createHdsDelegate(); - // Create Message. + // Create Message with transport sockets. message.reset(createSimpleMessage()); - - // Add transport socket matches to message. - const std::string match_yaml = absl::StrFormat( - R"EOF( -transport_socket_matches: -- name: "test_socket" - match: - test_match: "true" - transport_socket: - name: "envoy.transport_sockets.raw_buffer" -)EOF"); - auto* cluster_health_check = message->mutable_cluster_health_checks(0); - cluster_health_check->MergeFrom( - TestUtility::parseYaml(match_yaml)); - - // Add transport socket match criteria to our health check, for filtering matches. - const std::string criteria_yaml = absl::StrFormat( - R"EOF( -transport_socket_match_criteria: - test_match: "true" -)EOF"); - cluster_health_check->mutable_health_checks(0)->MergeFrom( - TestUtility::parseYaml(criteria_yaml)); + addTransportSocketMatches(message->mutable_cluster_health_checks(0), "test_match", "test_match"); Network::MockClientConnection* connection = new NiceMock(); EXPECT_CALL(dispatcher_, createClientConnection_(_, _, _, _)).WillRepeatedly(Return(connection)); @@ -679,5 +699,401 @@ TEST_F(HdsTest, TestSendResponseOneEndpointTimeout) { 1234); } +// Check to see if two of the same specifier does not get parsed twice in a row. +TEST_F(HdsTest, TestSameSpecifier) { + EXPECT_CALL(*async_client_, startRaw(_, _, _, _)).WillOnce(Return(&async_stream_)); + EXPECT_CALL(async_stream_, sendMessageRaw_(_, _)); + createHdsDelegate(); + + // Create Message + message.reset(createSimpleMessage()); + + // Create a new active connection on request, setting its status to connected + // to mock a found endpoint. + expectCreateClientConnection(); + + EXPECT_CALL(*server_response_timer_, enableTimer(_, _)).Times(AtLeast(1)); + EXPECT_CALL(async_stream_, sendMessageRaw_(_, false)); + EXPECT_CALL(test_factory_, createClusterInfo(_)).WillRepeatedly(Return(cluster_info_)); + EXPECT_CALL(dispatcher_, deferredDelete_(_)).Times(AtLeast(1)); + hds_delegate_->onReceiveMessage(std::move(message)); + hds_delegate_->sendResponse(); + + // Try to change the specifier, but it is the same. + message.reset(createSimpleMessage()); + hds_delegate_->onReceiveMessage(std::move(message)); + + // Check to see that HDS got two requests, but only used the specifier one time. + checkHdsCounters(2, 0, 0, 1); + + // Try to change the specifier, but use a new specifier this time. + message = createComplexSpecifier(1, 1, 2); + hds_delegate_->onReceiveMessage(std::move(message)); + + // Check that both requests and updates increased, meaning we did an update. + checkHdsCounters(3, 0, 0, 2); +} + +// Test to see that if a cluster is added or removed, the ones that did not change are reused. +TEST_F(HdsTest, TestClusterChange) { + EXPECT_CALL(*async_client_, startRaw(_, _, _, _)).WillOnce(Return(&async_stream_)); + EXPECT_CALL(async_stream_, sendMessageRaw_(_, _)); + createHdsDelegate(); + + // Create Message + message = createComplexSpecifier(2, 1, 1); + + // Create a new active connection on request, setting its status to connected + // to mock a found endpoint. + expectCreateClientConnection(); + + EXPECT_CALL(*server_response_timer_, enableTimer(_, _)).Times(AtLeast(1)); + EXPECT_CALL(async_stream_, sendMessageRaw_(_, false)); + EXPECT_CALL(test_factory_, createClusterInfo(_)).WillRepeatedly(Return(cluster_info_)); + EXPECT_CALL(dispatcher_, deferredDelete_(_)).Times(AtLeast(1)); + // Process message + hds_delegate_->onReceiveMessage(std::move(message)); + hds_delegate_->sendResponse(); + + // Get cluster shared pointers to make sure they are the same memory addresses, that we reused + // them. + auto original_clusters = hds_delegate_->hdsClusters(); + ASSERT_EQ(original_clusters.size(), 2); + + // Add a third cluster to the specifier. The first two should reuse pointers. + message = createComplexSpecifier(3, 1, 1); + hds_delegate_->onReceiveMessage(std::move(message)); + + // Get the new clusters list from HDS. + auto new_clusters = hds_delegate_->hdsClusters(); + ASSERT_EQ(new_clusters.size(), 3); + + // Make sure our first two clusters are at the same address in memory as before. + for (int i = 0; i < 2; i++) { + EXPECT_EQ(new_clusters[i], original_clusters[i]); + } + + message = createComplexSpecifier(3, 1, 1); + + // Remove the first element, change the order of the last two elements. + message->mutable_cluster_health_checks()->SwapElements(0, 2); + message->mutable_cluster_health_checks()->RemoveLast(); + // Sanity check. + ASSERT_EQ(message->cluster_health_checks_size(), 2); + + // Send this new specifier. + hds_delegate_->onReceiveMessage(std::move(message)); + + // Check to see that even if we changed the order, we get the expected pointers. + auto final_clusters = hds_delegate_->hdsClusters(); + ASSERT_EQ(final_clusters.size(), 2); + + // Compare first cluster in the new list is the same as the last in the previous list, + // and that the second cluster in the new list is the same as the second in the previous. + for (int i = 0; i < 2; i++) { + EXPECT_EQ(final_clusters[i], new_clusters[2 - i]); + } + + // Check to see that HDS got three requests, and updated three times with it. + checkHdsCounters(3, 0, 0, 3); +} + +// Edit one of two cluster's endpoints by adding and removing. +TEST_F(HdsTest, TestUpdateEndpoints) { + EXPECT_CALL(*async_client_, startRaw(_, _, _, _)).WillOnce(Return(&async_stream_)); + EXPECT_CALL(async_stream_, sendMessageRaw_(_, _)); + createHdsDelegate(); + + // Create Message, and later add/remove endpoints from the second cluster. + message.reset(createSimpleMessage()); + message->MergeFrom(*createComplexSpecifier(1, 1, 2)); + + // Create a new active connection on request, setting its status to connected + // to mock a found endpoint. + expectCreateClientConnection(); + + EXPECT_CALL(*server_response_timer_, enableTimer(_, _)).Times(AtLeast(1)); + EXPECT_CALL(async_stream_, sendMessageRaw_(_, false)); + EXPECT_CALL(test_factory_, createClusterInfo(_)).WillRepeatedly(Return(cluster_info_)); + EXPECT_CALL(dispatcher_, deferredDelete_(_)).Times(AtLeast(1)); + // Process message + hds_delegate_->onReceiveMessage(std::move(message)); + hds_delegate_->sendResponse(); + + // Save list of hosts/endpoints for comparison later. + auto original_hosts = hds_delegate_->hdsClusters()[1]->hosts(); + ASSERT_EQ(original_hosts.size(), 2); + + // Add 3 endpoints to the specifier's second cluster. The first in the list should reuse pointers. + message.reset(createSimpleMessage()); + message->MergeFrom(*createComplexSpecifier(1, 1, 5)); + hds_delegate_->onReceiveMessage(std::move(message)); + + // Get the new clusters list from HDS. + auto new_hosts = hds_delegate_->hdsClusters()[1]->hosts(); + ASSERT_EQ(new_hosts.size(), 5); + + // Make sure our first two endpoints are at the same address in memory as before. + for (int i = 0; i < 2; i++) { + EXPECT_EQ(original_hosts[i], new_hosts[i]); + } + EXPECT_TRUE(original_hosts[0] != new_hosts[2]); + + // This time, have 4 endpoints, 2 each under 2 localities. + // The first locality will be reused, so its 2 endpoints will be as well. + // The second locality is new so we should be getting 2 new endpoints. + // Since the first locality had 5 but now has 2, we are removing 3. + // 2 ADDED, 3 REMOVED, 2 REUSED. + message.reset(createSimpleMessage()); + message->MergeFrom(*createComplexSpecifier(1, 2, 2)); + hds_delegate_->onReceiveMessage(std::move(message)); + + // Get this new list of hosts. + auto final_hosts = hds_delegate_->hdsClusters()[1]->hosts(); + ASSERT_EQ(final_hosts.size(), 4); + + // Ensure the first two elements in the new list are reused. + for (int i = 0; i < 2; i++) { + EXPECT_EQ(new_hosts[i], final_hosts[i]); + } + + // Ensure the first last two elements in the new list are different then the previous list. + for (int i = 2; i < 4; i++) { + EXPECT_TRUE(new_hosts[i] != final_hosts[i]); + } + + // Check to see that HDS got three requests, and updated three times with it. + checkHdsCounters(3, 0, 0, 3); +} + +// Test adding, reusing, and removing health checks. +TEST_F(HdsTest, TestUpdateHealthCheckers) { + EXPECT_CALL(*async_client_, startRaw(_, _, _, _)).WillOnce(Return(&async_stream_)); + EXPECT_CALL(async_stream_, sendMessageRaw_(_, _)); + createHdsDelegate(); + + // Create Message with two different health checkers. + message.reset(createSimpleMessage()); + auto new_hc = message->mutable_cluster_health_checks(0)->add_health_checks(); + new_hc->MergeFrom(message->mutable_cluster_health_checks(0)->health_checks(0)); + new_hc->mutable_http_health_check()->set_path("/different_path"); + + // Create a new active connection on request, setting its status to connected + // to mock a found endpoint. + expectCreateClientConnection(); + + EXPECT_CALL(*server_response_timer_, enableTimer(_, _)).Times(AtLeast(1)); + EXPECT_CALL(async_stream_, sendMessageRaw_(_, false)); + EXPECT_CALL(test_factory_, createClusterInfo(_)).WillRepeatedly(Return(cluster_info_)); + EXPECT_CALL(dispatcher_, deferredDelete_(_)).Times(AtLeast(1)); + // Process message + hds_delegate_->onReceiveMessage(std::move(message)); + hds_delegate_->sendResponse(); + + // Save list of health checkers for use later. + auto original_hcs = hds_delegate_->hdsClusters()[0]->healthCheckers(); + ASSERT_EQ(original_hcs.size(), 2); + + // Create a new specifier, but make the second health checker different and add a third. + // Then reverse the order so the first one is at the end, testing the hashing works as expected. + message.reset(createSimpleMessage()); + auto new_hc0 = message->mutable_cluster_health_checks(0)->add_health_checks(); + new_hc0->MergeFrom(message->mutable_cluster_health_checks(0)->health_checks(0)); + new_hc0->mutable_http_health_check()->set_path("/path0"); + auto new_hc1 = message->mutable_cluster_health_checks(0)->add_health_checks(); + new_hc1->MergeFrom(message->mutable_cluster_health_checks(0)->health_checks(0)); + new_hc1->mutable_http_health_check()->set_path("/path1"); + message->mutable_cluster_health_checks(0)->mutable_health_checks()->SwapElements(0, 2); + hds_delegate_->onReceiveMessage(std::move(message)); + + // Get the new health check list from HDS. + auto new_hcs = hds_delegate_->hdsClusters()[0]->healthCheckers(); + ASSERT_EQ(new_hcs.size(), 3); + + // Make sure our first hc from the original list is the same as the third in the new list. + EXPECT_EQ(original_hcs[0], new_hcs[2]); + EXPECT_TRUE(original_hcs[1] != new_hcs[1]); + + // Check to see that HDS got two requests, and updated two times with it. + checkHdsCounters(2, 0, 0, 2); +} + +// Test to see that if clusters with an empty name get used, there are two clusters. +// Also test to see that if two clusters with the same non-empty name are used, only have +// One cluster. +TEST_F(HdsTest, TestClusterSameName) { + EXPECT_CALL(*async_client_, startRaw(_, _, _, _)).WillOnce(Return(&async_stream_)); + EXPECT_CALL(async_stream_, sendMessageRaw_(_, _)); + createHdsDelegate(); + + // Create Message + message = createComplexSpecifier(2, 1, 1); + // Set both clusters to have an empty name. + message->mutable_cluster_health_checks(0)->set_cluster_name(""); + message->mutable_cluster_health_checks(1)->set_cluster_name(""); + + // Create a new active connection on request, setting its status to connected + // to mock a found endpoint. + expectCreateClientConnection(); + + EXPECT_CALL(*server_response_timer_, enableTimer(_, _)).Times(AtLeast(1)); + EXPECT_CALL(async_stream_, sendMessageRaw_(_, false)); + EXPECT_CALL(test_factory_, createClusterInfo(_)).WillRepeatedly(Return(cluster_info_)); + EXPECT_CALL(dispatcher_, deferredDelete_(_)).Times(AtLeast(1)); + // Process message + hds_delegate_->onReceiveMessage(std::move(message)); + hds_delegate_->sendResponse(); + + // Get the clusters from HDS + auto original_clusters = hds_delegate_->hdsClusters(); + + // Make sure that even though they have the same name, since they are empty there are two and they + // do not point to the same thing. + ASSERT_EQ(original_clusters.size(), 2); + ASSERT_TRUE(original_clusters[0] != original_clusters[1]); + + // Create message with 3 clusters this time so we force an update. + message = createComplexSpecifier(3, 1, 1); + // Set both clusters to have empty names empty name. + message->mutable_cluster_health_checks(0)->set_cluster_name(""); + message->mutable_cluster_health_checks(1)->set_cluster_name(""); + + // Test that we still get requested number of clusters, even with repeated names on update since + // they are empty. + hds_delegate_->onReceiveMessage(std::move(message)); + auto new_clusters = hds_delegate_->hdsClusters(); + + // Check that since the names are empty, we do not reuse and just reconstruct. + ASSERT_EQ(new_clusters.size(), 3); + ASSERT_TRUE(original_clusters[0] != new_clusters[0]); + ASSERT_TRUE(original_clusters[1] != new_clusters[1]); + + // Create a new message. + message = createComplexSpecifier(2, 1, 1); + // Set both clusters to have the same, non-empty name. + message->mutable_cluster_health_checks(0)->set_cluster_name("anna"); + message->mutable_cluster_health_checks(1)->set_cluster_name("anna"); + + hds_delegate_->onReceiveMessage(std::move(message)); + + // Check that since they both have the same name, only one of them gets used. + auto final_clusters = hds_delegate_->hdsClusters(); + ASSERT_EQ(final_clusters.size(), 1); + + // Check to see that HDS got three requests, and updated three times with it. + checkHdsCounters(3, 0, 0, 3); +} + +// Test that a transport_socket_matches and transport_socket_match_criteria filter fail when not +// matching, and then after an update the same cluster is used but now matches. +TEST_F(HdsTest, TestUpdateSocketContext) { + EXPECT_CALL(*async_client_, startRaw(_, _, _, _)).WillOnce(Return(&async_stream_)); + EXPECT_CALL(async_stream_, sendMessageRaw_(_, _)); + createHdsDelegate(); + + // Create a new active connection on request, setting its status to connected + // to mock a found endpoint. + expectCreateClientConnection(); + + // Pull out socket_matcher object normally internal to createClusterInfo, to test that a matcher + // would match the expected socket. + std::vector> socket_matchers; + EXPECT_CALL(test_factory_, createClusterInfo(_)) + .WillRepeatedly(Invoke([&](const ClusterInfoFactory::CreateClusterInfoParams& params) { + // Build scope, factory_context as does ProdClusterInfoFactory. + Envoy::Stats::ScopePtr scope = + params.stats_.createScope(fmt::format("cluster.{}.", params.cluster_.name())); + Envoy::Server::Configuration::TransportSocketFactoryContextImpl factory_context( + params.admin_, params.ssl_context_manager_, *scope, params.cm_, params.local_info_, + params.dispatcher_, params.stats_, params.singleton_manager_, params.tls_, + params.validation_visitor_, params.api_); + + // Create a mock socket_factory for the scope of this unit test. + std::unique_ptr socket_factory = + std::make_unique(); + + // set socket_matcher object in test scope. + socket_matchers.push_back(std::make_unique( + params.cluster_.transport_socket_matches(), factory_context, socket_factory, *scope)); + + // But still use the fake cluster_info_. + return cluster_info_; + })); + EXPECT_CALL(dispatcher_, deferredDelete_(_)).Times(AtLeast(1)); + EXPECT_CALL(*server_response_timer_, enableTimer(_, _)).Times(AtLeast(1)); + + // Create Message, with a non-valid match and process. + message.reset(createSimpleMessage()); + addTransportSocketMatches(message->mutable_cluster_health_checks(0), "bad_match", "test_match"); + hds_delegate_->onReceiveMessage(std::move(message)); + + // Get our health checker to match against. + const auto first_clusters = hds_delegate_->hdsClusters(); + ASSERT_EQ(first_clusters.size(), 1); + const auto first_hcs = first_clusters[0]->healthCheckers(); + ASSERT_EQ(first_hcs.size(), 1); + + // Check that our fails so it uses default. + HealthCheckerImplBase* first_health_checker_base = + dynamic_cast(first_hcs[0].get()); + const auto first_match = + socket_matchers[0]->resolve(first_health_checker_base->transportSocketMatchMetadata().get()); + EXPECT_EQ(first_match.name_, "default"); + + // Create a new Message, this time with a good match. + message.reset(createSimpleMessage()); + addTransportSocketMatches(message->mutable_cluster_health_checks(0), "test_match", "test_match"); + hds_delegate_->onReceiveMessage(std::move(message)); + + // Get our new health checker to match against. + const auto second_clusters = hds_delegate_->hdsClusters(); + ASSERT_EQ(second_clusters.size(), 1); + // Check that this new pointer is actually the same pointer to the first cluster. + ASSERT_EQ(second_clusters[0], first_clusters[0]); + const auto second_hcs = second_clusters[0]->healthCheckers(); + ASSERT_EQ(second_hcs.size(), 1); + + // Check that since we made no change to our health checkers, the pointer was reused. + EXPECT_EQ(first_hcs[0], second_hcs[0]); + + // Check that our match hits. + HealthCheckerImplBase* second_health_checker_base = + dynamic_cast(second_hcs[0].get()); + ASSERT_EQ(socket_matchers.size(), 2); + const auto second_match = + socket_matchers[1]->resolve(second_health_checker_base->transportSocketMatchMetadata().get()); + EXPECT_EQ(second_match.name_, "test_socket"); + + // Create a new Message, this we leave the transport socket the same but change the health check's + // filter. This means that the health checker changes but the transport_socket_matches in the + // ClusterHealthCheck does not. + message.reset(createSimpleMessage()); + addTransportSocketMatches(message->mutable_cluster_health_checks(0), "test_match", + "something_new"); + + hds_delegate_->onReceiveMessage(std::move(message)); + // Get our new health checker to match against. + const auto third_clusters = hds_delegate_->hdsClusters(); + ASSERT_EQ(third_clusters.size(), 1); + // Check that this new pointer is actually the same pointer to the first cluster. + ASSERT_EQ(third_clusters[0], first_clusters[0]); + const auto third_hcs = third_clusters[0]->healthCheckers(); + ASSERT_EQ(third_hcs.size(), 1); + + // Check that since we made a change to our HC, it is a new pointer. + EXPECT_TRUE(first_hcs[0] != third_hcs[0]); + + HealthCheckerImplBase* third_health_checker_base = + dynamic_cast(third_hcs[0].get()); + + // Check that our socket matchers is still a size 2. This is because createClusterInfo(_) is never + // called again since there was no update to transportSocketMatches. + ASSERT_EQ(socket_matchers.size(), 2); + const auto third_match = + socket_matchers[1]->resolve(third_health_checker_base->transportSocketMatchMetadata().get()); + // Since this again does not match, it uses default. + EXPECT_EQ(third_match.name_, "default"); +} + } // namespace Upstream } // namespace Envoy diff --git a/test/integration/hds_integration_test.cc b/test/integration/hds_integration_test.cc index effe4a86bb1b..9f49ed87f0b2 100644 --- a/test/integration/hds_integration_test.cc +++ b/test/integration/hds_integration_test.cc @@ -185,6 +185,31 @@ class HdsIntegrationTest : public Grpc::VersionedGrpcClientIntegrationParamTest, return server_health_check_specifier_; } + envoy::service::health::v3::ClusterHealthCheck createSecondCluster(std::string name) { + // Add endpoint + envoy::service::health::v3::ClusterHealthCheck health_check; + + health_check.set_cluster_name(name); + Network::Utility::addressToProtobufAddress( + *host2_upstream_->localAddress(), + *health_check.add_locality_endpoints()->add_endpoints()->mutable_address()); + health_check.mutable_locality_endpoints(0)->mutable_locality()->set_region("kounopetra"); + health_check.mutable_locality_endpoints(0)->mutable_locality()->set_zone("emplisi"); + health_check.mutable_locality_endpoints(0)->mutable_locality()->set_sub_zone("paris"); + + health_check.add_health_checks()->mutable_timeout()->set_seconds(MaxTimeout); + health_check.mutable_health_checks(0)->mutable_interval()->set_seconds(MaxTimeout); + health_check.mutable_health_checks(0)->mutable_unhealthy_threshold()->set_value(2); + health_check.mutable_health_checks(0)->mutable_healthy_threshold()->set_value(2); + health_check.mutable_health_checks(0)->mutable_grpc_health_check(); + health_check.mutable_health_checks(0) + ->mutable_http_health_check() + ->set_hidden_envoy_deprecated_use_http2(false); + health_check.mutable_health_checks(0)->mutable_http_health_check()->set_path("/healthcheck"); + + return health_check; + } + // Creates a basic HealthCheckSpecifier message containing one endpoint and // one TCP health_check envoy::service::health::v3::HealthCheckSpecifier makeTcpHealthCheckSpecifier() { @@ -694,26 +719,8 @@ TEST_P(HdsIntegrationTest, TwoEndpointsDifferentClusters) { server_health_check_specifier_ = makeHttpHealthCheckSpecifier(envoy::type::v3::CodecClientType::HTTP1, false); - // Add endpoint - auto* health_check = server_health_check_specifier_.add_cluster_health_checks(); - - health_check->set_cluster_name("cat"); - Network::Utility::addressToProtobufAddress( - *host2_upstream_->localAddress(), - *health_check->add_locality_endpoints()->add_endpoints()->mutable_address()); - health_check->mutable_locality_endpoints(0)->mutable_locality()->set_region("kounopetra"); - health_check->mutable_locality_endpoints(0)->mutable_locality()->set_zone("emplisi"); - health_check->mutable_locality_endpoints(0)->mutable_locality()->set_sub_zone("paris"); - - health_check->add_health_checks()->mutable_timeout()->set_seconds(MaxTimeout); - health_check->mutable_health_checks(0)->mutable_interval()->set_seconds(MaxTimeout); - health_check->mutable_health_checks(0)->mutable_unhealthy_threshold()->set_value(2); - health_check->mutable_health_checks(0)->mutable_healthy_threshold()->set_value(2); - health_check->mutable_health_checks(0)->mutable_grpc_health_check(); - health_check->mutable_health_checks(0) - ->mutable_http_health_check() - ->set_hidden_envoy_deprecated_use_http2(false); - health_check->mutable_health_checks(0)->mutable_http_health_check()->set_path("/healthcheck"); + // Add Second Cluster + server_health_check_specifier_.add_cluster_health_checks()->MergeFrom(createSecondCluster("cat")); // Server <--> Envoy waitForHdsStream(); @@ -1040,5 +1047,124 @@ TEST_P(HdsIntegrationTest, SingleEndpointUnhealthyTlsMissingSocketMatch) { cleanupHdsConnection(); } +TEST_P(HdsIntegrationTest, UpdateEndpoints) { + initialize(); + server_health_check_specifier_ = + makeHttpHealthCheckSpecifier(envoy::type::v3::CodecClientType::HTTP1, false); + + // Add Second Cluster. + server_health_check_specifier_.add_cluster_health_checks()->MergeFrom(createSecondCluster("cat")); + + // Server <--> Envoy + waitForHdsStream(); + ASSERT_TRUE(hds_stream_->waitForGrpcMessage(*dispatcher_, envoy_msg_)); + + // Server asks for health checking + hds_stream_->startGrpcStream(); + hds_stream_->sendGrpcMessage(server_health_check_specifier_); + test_server_->waitForCounterGe("hds_delegate.requests", ++hds_requests_); + + // Envoy sends health check messages to two endpoints + healthcheckEndpoints("cat"); + + // Endpoint responds to the health check + host_stream_->encodeHeaders(Http::TestResponseHeaderMapImpl{{":status", "404"}}, false); + host_stream_->encodeData(1024, true); + host2_stream_->encodeHeaders(Http::TestResponseHeaderMapImpl{{":status", "200"}}, false); + host2_stream_->encodeData(1024, true); + + // Receive updates until the one we expect arrives + ASSERT_TRUE(waitForClusterHealthResponse(envoy::config::core::v3::HEALTHY, + host2_upstream_->localAddress(), 1, 0, 0)); + + ASSERT_EQ(response_.endpoint_health_response().cluster_endpoints_health_size(), 2); + + // store cluster response info for easier reference. + const auto& cluster_resp0 = response_.endpoint_health_response().cluster_endpoints_health(0); + const auto& cluster_resp1 = response_.endpoint_health_response().cluster_endpoints_health(1); + + // check cluster info and sizes. + EXPECT_EQ(cluster_resp0.cluster_name(), "anna"); + ASSERT_EQ(cluster_resp0.locality_endpoints_health_size(), 1); + EXPECT_EQ(cluster_resp1.cluster_name(), "cat"); + ASSERT_EQ(cluster_resp1.locality_endpoints_health_size(), 1); + + // store locality response info for easier reference. + const auto& locality_resp0 = cluster_resp0.locality_endpoints_health(0); + const auto& locality_resp1 = cluster_resp1.locality_endpoints_health(0); + + // check locality info and sizes. + EXPECT_EQ(locality_resp0.locality().sub_zone(), "hobbiton"); + ASSERT_EQ(locality_resp0.endpoints_health_size(), 1); + EXPECT_EQ(locality_resp1.locality().sub_zone(), "paris"); + ASSERT_EQ(locality_resp1.endpoints_health_size(), 1); + + // Check endpoints. + EXPECT_TRUE(checkEndpointHealthResponse(locality_resp0.endpoints_health(0), + envoy::config::core::v3::UNHEALTHY, + host_upstream_->localAddress())); + + checkCounters(1, 2, 0, 1); + EXPECT_EQ(1, test_server_->counter("cluster.cat.health_check.success")->value()); + EXPECT_EQ(0, test_server_->counter("cluster.cat.health_check.failure")->value()); + + // Create new specifier that removes the second cluster, and adds an endpoint to the first. + server_health_check_specifier_ = + makeHttpHealthCheckSpecifier(envoy::type::v3::CodecClientType::HTTP1, false); + Network::Utility::addressToProtobufAddress( + *host2_upstream_->localAddress(), + *server_health_check_specifier_.mutable_cluster_health_checks(0) + ->mutable_locality_endpoints(0) + ->add_endpoints() + ->mutable_address()); + + // Reset second endpoint for usage in our cluster. + ASSERT_TRUE(host2_fake_connection_->close()); + ASSERT_TRUE(host2_fake_connection_->waitForDisconnect()); + + // Send new specifier. + hds_stream_->sendGrpcMessage(server_health_check_specifier_); + // TODO: add stats reporting and verification for Clusters added/removed/reused and Endpoints + // added/removed/reused. + test_server_->waitForCounterGe("hds_delegate.requests", ++hds_requests_); + + // Set up second endpoint again. + ASSERT_TRUE(host2_upstream_->waitForHttpConnection(*dispatcher_, host2_fake_connection_)); + ASSERT_TRUE(host2_fake_connection_->waitForNewStream(*dispatcher_, host2_stream_)); + ASSERT_TRUE(host2_stream_->waitForEndStream(*dispatcher_)); + EXPECT_EQ(host2_stream_->headers().getPathValue(), "/healthcheck"); + EXPECT_EQ(host2_stream_->headers().getMethodValue(), "GET"); + EXPECT_EQ(host2_stream_->headers().getHostValue(), "anna"); + + // Endpoints respond to the health check + host2_stream_->encodeHeaders(Http::TestResponseHeaderMapImpl{{":status", "200"}}, false); + host2_stream_->encodeData(1024, true); + + // Receive updates until the one we expect arrives + ASSERT_TRUE(waitForClusterHealthResponse(envoy::config::core::v3::HEALTHY, + host2_upstream_->localAddress(), 0, 0, 1)); + + // Ensure we have at least one cluster before trying to read it. + ASSERT_EQ(response_.endpoint_health_response().cluster_endpoints_health_size(), 1); + + // store cluster response info for easier reference. + const auto& cluster_response = response_.endpoint_health_response().cluster_endpoints_health(0); + + // Check cluster has correct name and number of localities (1) + EXPECT_EQ(cluster_response.cluster_name(), "anna"); + ASSERT_EQ(cluster_response.locality_endpoints_health_size(), 1); + + // check the only locality and its endpoints. + const auto& locality_response = cluster_response.locality_endpoints_health(0); + EXPECT_EQ(locality_response.locality().sub_zone(), "hobbiton"); + ASSERT_EQ(locality_response.endpoints_health_size(), 2); + EXPECT_TRUE(checkEndpointHealthResponse(locality_response.endpoints_health(0), + envoy::config::core::v3::UNHEALTHY, + host_upstream_->localAddress())); + + cleanupHostConnections(); + cleanupHdsConnection(); +} + } // namespace } // namespace Envoy From 63c94a4f957c10e66d3e8ef2c03a1cb64be2218a Mon Sep 17 00:00:00 2001 From: htuch Date: Wed, 14 Oct 2020 20:40:05 -0400 Subject: [PATCH 11/17] dependencies: fix some of the fallout from Wasm merge. (#13569) * Move use of URLs/sha256 in repositories.bzl to repository_locations.bzl. * Add a check_format rule to validate we don't add URLs outside of repository_locations.bzl in the future. * Extend build graph validator (validate.py) to catch any test deps not covered by "test_only". Exceptions made for Rust/Java/Pip3. Risk level: Low (build only) Testing: Additional validate.py and check_format unit tests. Signed-off-by: Harvey Tuch --- bazel/repositories.bzl | 26 +-------------- bazel/repository_locations.bzl | 8 ++--- .../arch_overview/security/external_deps.rst | 13 +++++--- tools/code_format/check_format.py | 21 ++++++++++++ tools/code_format/check_format_test_helper.py | 4 +++ tools/dependency/validate.py | 33 +++++++++++++++++-- tools/dependency/validate_test.py | 4 +++ .../testdata/check_format/repository_url.bzl | 5 +++ .../testdata/check_format/repository_urls.bzl | 5 +++ 9 files changed, 83 insertions(+), 36 deletions(-) create mode 100644 tools/testdata/check_format/repository_url.bzl create mode 100644 tools/testdata/check_format/repository_urls.bzl diff --git a/bazel/repositories.bzl b/bazel/repositories.bzl index 12825602848c..1ba42eaa6ae2 100644 --- a/bazel/repositories.bzl +++ b/bazel/repositories.bzl @@ -351,31 +351,10 @@ def _com_github_zlib_ng_zlib_ng(): def _com_google_cel_cpp(): external_http_archive("com_google_cel_cpp") external_http_archive("rules_antlr") - external_http_archive( - name = "antlr4_runtimes", - build_file_content = """ -package(default_visibility = ["//visibility:public"]) -cc_library( - name = "cpp", - srcs = glob(["runtime/Cpp/runtime/src/**/*.cpp"]), - hdrs = glob(["runtime/Cpp/runtime/src/**/*.h"]), - includes = ["runtime/Cpp/runtime/src"], -) -""", - patch_args = ["-p1"], - # Patches ASAN violation of initialization fiasco - patches = ["@envoy//bazel:antlr.patch"], - ) # Parser dependencies # TODO: upgrade this when cel is upgraded to use the latest version - external_http_archive( - name = "rules_antlr", - sha256 = "7249d1569293d9b239e23c65f6b4c81a07da921738bde0dfeb231ed98be40429", - strip_prefix = "rules_antlr-3cc2f9502a54ceb7b79b37383316b23c4da66f9a", - urls = ["https://github.com/marcohu/rules_antlr/archive/3cc2f9502a54ceb7b79b37383316b23c4da66f9a.tar.gz"], - ) - + external_http_archive(name = "rules_antlr") external_http_archive( name = "antlr4_runtimes", build_file_content = """ @@ -387,12 +366,9 @@ cc_library( includes = ["runtime/Cpp/runtime/src"], ) """, - sha256 = "46f5e1af5f4bd28ade55cb632f9a069656b31fc8c2408f9aa045f9b5f5caad64", patch_args = ["-p1"], # Patches ASAN violation of initialization fiasco patches = ["@envoy//bazel:antlr.patch"], - strip_prefix = "antlr4-4.7.2", - urls = ["https://github.com/antlr/antlr4/archive/4.7.2.tar.gz"], ) def _com_github_nghttp2_nghttp2(): diff --git a/bazel/repository_locations.bzl b/bazel/repository_locations.bzl index 3e44753ab382..fba8cfebd16f 100644 --- a/bazel/repository_locations.bzl +++ b/bazel/repository_locations.bzl @@ -859,7 +859,7 @@ REPOSITORY_LOCATIONS_SPEC = dict( # See: https://github.com/bazelbuild/rules_rust/issues/386 strip_prefix = "rules_rust-{version}", urls = ["https://github.com/bazelbuild/rules_rust/archive/{version}.tar.gz"], - use_category = ["build"], + use_category = ["test_only"], last_updated = "2020-10-09", ), rules_antlr = dict( @@ -886,8 +886,8 @@ REPOSITORY_LOCATIONS_SPEC = dict( project_name = "ANTLR v4", project_desc = "ANTLR (ANother Tool for Language Recognition) is a powerful parser generator for reading, processing, executing, or translating structured text or binary files", project_url = "https://github.com/antlr/antlr4", - version = "4.7.1", - sha256 = "4d0714f441333a63e50031c9e8e4890c78f3d21e053d46416949803e122a6574", + version = "4.7.2", + sha256 = "46f5e1af5f4bd28ade55cb632f9a069656b31fc8c2408f9aa045f9b5f5caad64", strip_prefix = "antlr4-{version}", urls = ["https://github.com/antlr/antlr4/archive/{version}.tar.gz"], use_category = ["dataplane_ext"], @@ -898,7 +898,7 @@ REPOSITORY_LOCATIONS_SPEC = dict( "envoy.filters.network.wasm", "envoy.stat_sinks.wasm", ], - last_updated = "2020-07-29", + last_updated = "2020-10-09", cpe = "N/A", ), ) diff --git a/docs/root/intro/arch_overview/security/external_deps.rst b/docs/root/intro/arch_overview/security/external_deps.rst index 5a0fb0ff5d78..42a54fe3911f 100644 --- a/docs/root/intro/arch_overview/security/external_deps.rst +++ b/docs/root/intro/arch_overview/security/external_deps.rst @@ -36,11 +36,6 @@ Observability (extensions) .. include:: external_dep_observability_ext.rst -Test only ---------- - -.. include:: external_dep_test_only.rst - Build ----- @@ -50,3 +45,11 @@ Miscellaneous ------------- .. include:: external_dep_other.rst + +Test only +--------- + +Below we provide the status of the C/C++ dependencies that are only used in tests. Tests also +include additional Java, Rust and Python dependencies that are not tracked below. + +.. include:: external_dep_test_only.rst diff --git a/tools/code_format/check_format.py b/tools/code_format/check_format.py index 5f17d1468162..7b3e1cac97e0 100755 --- a/tools/code_format/check_format.py +++ b/tools/code_format/check_format.py @@ -110,6 +110,22 @@ EXCEPTION_DENYLIST = ("./source/common/http/http2/codec_impl.h", "./source/common/http/http2/codec_impl.cc") +# We want all URL references to exist in repository_locations.bzl files and have +# metadata that conforms to the schema in ./api/bazel/external_deps.bzl. Below +# we have some exceptions for either infrastructure files or places we fall +# short today (Rust). +# +# Please DO NOT extend this allow list without consulting +# @envoyproxy/dependency-shepherds. +BUILD_URLS_ALLOWLIST = ( + "./generated_api_shadow/bazel/repository_locations.bzl", + "./generated_api_shadow/bazel/envoy_http_archive.bzl", + "./bazel/repository_locations.bzl", + "./bazel/crates.bzl", + "./api/bazel/repository_locations.bzl", + "./api/bazel/envoy_http_archive.bzl", +) + CLANG_FORMAT_PATH = os.getenv("CLANG_FORMAT", "clang-format-10") BUILDIFIER_PATH = paths.getBuildifier() BUILDOZER_PATH = paths.getBuildozer() @@ -404,6 +420,9 @@ def denylistedForExceptions(self, file_path): return (file_path.endswith('.h') and not file_path.startswith("./test/")) or file_path in EXCEPTION_DENYLIST \ or self.isInSubdir(file_path, 'tools/testdata') + def allowlistedForBuildUrls(self, file_path): + return file_path in BUILD_URLS_ALLOWLIST + def isApiFile(self, file_path): return file_path.startswith(self.api_prefix) or file_path.startswith(self.api_shadow_root) @@ -806,6 +825,8 @@ def checkBuildLine(self, line, file_path, reportError): not self.isWorkspaceFile(file_path) and not self.isExternalBuildFile(file_path) and "@envoy//" in line): reportError("Superfluous '@envoy//' prefix") + if not self.allowlistedForBuildUrls(file_path) and (" urls = " in line or " url = " in line): + reportError("Only repository_locations.bzl may contains URL references") def fixBuildLine(self, file_path, line, line_number): if (self.envoy_build_rule_check and not self.isStarlarkFile(file_path) and diff --git a/tools/code_format/check_format_test_helper.py b/tools/code_format/check_format_test_helper.py index 1d98ca7ba17e..aa90d12848ec 100755 --- a/tools/code_format/check_format_test_helper.py +++ b/tools/code_format/check_format_test_helper.py @@ -262,6 +262,10 @@ def runChecks(): "throw.cc", "Don't introduce throws into exception-free files, use error statuses instead.") errors += checkUnfixableError("pgv_string.proto", "min_bytes is DEPRECATED, Use min_len.") errors += checkFileExpectingOK("commented_throw.cc") + errors += checkUnfixableError("repository_url.bzl", + "Only repository_locations.bzl may contains URL references") + errors += checkUnfixableError("repository_urls.bzl", + "Only repository_locations.bzl may contains URL references") # The following files have errors that can be automatically fixed. errors += checkAndFixError("over_enthusiastic_spaces.cc", diff --git a/tools/dependency/validate.py b/tools/dependency/validate.py index 813d9bd90787..1e6dc88fd343 100755 --- a/tools/dependency/validate.py +++ b/tools/dependency/validate.py @@ -38,8 +38,27 @@ 'io_bazel_rules_go', 'foreign_cc_platform_utils', 'com_github_golang_protobuf', 'com_google_googleapis' ] -IGNORE_DEPS = set(['envoy', 'envoy_api', 'platforms', 'bazel_tools', 'local_config_cc'] + - UNKNOWN_DEPS) +IGNORE_DEPS = set([ + 'envoy', 'envoy_api', 'envoy_api_canonical', 'platforms', 'bazel_tools', 'local_config_cc', + 'remote_coverage_tools' +] + UNKNOWN_DEPS) + + +# Should a dependency be ignored if it's only used in test? Any changes to this +# allowlist method should be accompanied by an update to the explanation in the +# "Test only" section of +# docs/root/intro/arch_overview/security/external_deps.rst. +def TestOnlyIgnore(dep): + # Rust + if dep.startswith('raze__'): + return True + # Java + if dep.startswith('remotejdk'): + return True + # Python (pip3) + if '_pip3_' in dep: + return True + return False class DependencyError(Exception): @@ -139,11 +158,21 @@ def ValidateTestOnlyDeps(self): DependencyError: on a dependency validation error. """ print('Validating test-only dependencies...') + # Validate that //source doesn't depend on test_only queried_source_deps = self._build_graph.QueryExternalDeps('//source/...') expected_test_only_deps = self._dep_info.DepsByUseCategory('test_only') bad_test_only_deps = expected_test_only_deps.intersection(queried_source_deps) if len(bad_test_only_deps) > 0: raise DependencyError(f'//source depends on test-only dependencies: {bad_test_only_deps}') + # Validate that //test deps additional to those of //source are captured in + # test_only. + test_only_deps = self._build_graph.QueryExternalDeps('//test/...') + source_deps = self._build_graph.QueryExternalDeps('//source/...') + marginal_test_deps = test_only_deps.difference(source_deps) + bad_test_deps = marginal_test_deps.difference(expected_test_only_deps) + unknown_bad_test_deps = [dep for dep in bad_test_deps if not TestOnlyIgnore(dep)] + if len(unknown_bad_test_deps) > 0: + raise DependencyError(f'Missing deps in test_only "use_category": {unknown_bad_test_deps}') def ValidateDataPlaneCoreDeps(self): """Validate dataplane_core dependencies. diff --git a/tools/dependency/validate_test.py b/tools/dependency/validate_test.py index 539504ba02f4..4474c6442760 100755 --- a/tools/dependency/validate_test.py +++ b/tools/dependency/validate_test.py @@ -61,10 +61,14 @@ def test_invalid_build_graph_structure(self): def test_valid_test_only_deps(self): validator = self.BuildValidator({'a': FakeDep('dataplane_core')}, {'//source/...': ['a']}) validator.ValidateTestOnlyDeps() + validator = self.BuildValidator({'a': FakeDep('test_only')}, {'//test/...': ['a', 'b__pip3_']}) + validator.ValidateTestOnlyDeps() def test_invalid_test_only_deps(self): validator = self.BuildValidator({'a': FakeDep('test_only')}, {'//source/...': ['a']}) self.assertRaises(validate.DependencyError, lambda: validator.ValidateTestOnlyDeps()) + validator = self.BuildValidator({'a': FakeDep('test_only')}, {'//test/...': ['b']}) + self.assertRaises(validate.DependencyError, lambda: validator.ValidateTestOnlyDeps()) def test_valid_dataplane_core_deps(self): validator = self.BuildValidator({'a': FakeDep('dataplane_core')}, diff --git a/tools/testdata/check_format/repository_url.bzl b/tools/testdata/check_format/repository_url.bzl new file mode 100644 index 000000000000..8450c6aa3161 --- /dev/null +++ b/tools/testdata/check_format/repository_url.bzl @@ -0,0 +1,5 @@ +http_archive( + name = "foo", + url = "http://foo.com", + sha256 = "blah", +) diff --git a/tools/testdata/check_format/repository_urls.bzl b/tools/testdata/check_format/repository_urls.bzl new file mode 100644 index 000000000000..c67406076ab3 --- /dev/null +++ b/tools/testdata/check_format/repository_urls.bzl @@ -0,0 +1,5 @@ +http_archive( + name = "foo", + urls = ["http://foo.com"] + sha256 = "blah", +) From 04ec855995a438fec630db89a961e49476505b97 Mon Sep 17 00:00:00 2001 From: phlax Date: Thu, 15 Oct 2020 04:47:53 +0100 Subject: [PATCH 12/17] ci: Increate brew retry interval (#13565) Signed-off-by: Ryan Northey --- ci/mac_ci_setup.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ci/mac_ci_setup.sh b/ci/mac_ci_setup.sh index e34989cc3a78..755852b4ffa0 100755 --- a/ci/mac_ci_setup.sh +++ b/ci/mac_ci_setup.sh @@ -11,7 +11,7 @@ brew uninstall openssl@1.0.2t export HOMEBREW_NO_AUTO_UPDATE=1 HOMEBREW_RETRY_ATTEMPTS=10 -HOMEBREW_RETRY_INTERVAL=1 +HOMEBREW_RETRY_INTERVAL=3 function is_installed { From 7d64bb9068344d6f2a36ffa37023203ec82db6bd Mon Sep 17 00:00:00 2001 From: phlax Date: Thu, 15 Oct 2020 04:48:24 +0100 Subject: [PATCH 13/17] ci: Remove shellcheck diff (#13560) Signed-off-by: Ryan Northey --- tools/code_format/check_shellcheck_format.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/code_format/check_shellcheck_format.sh b/tools/code_format/check_shellcheck_format.sh index 3edab648625f..3a3a424d4af4 100755 --- a/tools/code_format/check_shellcheck_format.sh +++ b/tools/code_format/check_shellcheck_format.sh @@ -19,7 +19,7 @@ run_shellcheck_on () { local file file="$1" echo "Shellcheck: ${file}" - shellcheck -f diff -x "$file" + shellcheck -x "$file" } run_shellchecks () { From 0d512b31d39a29439c1a24ce452d0252a58b1553 Mon Sep 17 00:00:00 2001 From: Zach Reyes <39203661+zasweq@users.noreply.github.com> Date: Wed, 14 Oct 2020 23:49:13 -0400 Subject: [PATCH 14/17] Fixed Health Check Fuzz corpus syntax (#13576) Signed-off-by: Zach --- test/common/upstream/BUILD | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/common/upstream/BUILD b/test/common/upstream/BUILD index b6b19139a7b5..54bc7d6a5375 100644 --- a/test/common/upstream/BUILD +++ b/test/common/upstream/BUILD @@ -680,7 +680,7 @@ envoy_proto_library( envoy_cc_fuzz_test( name = "health_check_fuzz_test", srcs = ["health_check_fuzz_test.cc"], - corpus = "//test/common/upstream:health_check_corpus", + corpus = "health_check_corpus", deps = [ ":health_check_fuzz_lib", ":health_check_fuzz_proto_cc_proto", From f62d51268075f0388cb361de05eaa6024ec07baf Mon Sep 17 00:00:00 2001 From: Rama Chavali Date: Thu, 15 Oct 2020 09:20:02 +0530 Subject: [PATCH 15/17] fix macos v8 build (#13572) Signed-off-by: Rama Chavali --- bazel/external/wee8.BUILD | 3 +++ bazel/external/wee8.genrule_cmd | 2 +- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/bazel/external/wee8.BUILD b/bazel/external/wee8.BUILD index 2cc17dcb2374..3a62ecd9ebf4 100644 --- a/bazel/external/wee8.BUILD +++ b/bazel/external/wee8.BUILD @@ -13,6 +13,9 @@ cc_library( "wee8/include/v8-version.h", "wee8/third_party/wasm-api/wasm.hh", ], + copts = [ + "-Wno-range-loop-analysis", + ], defines = ["ENVOY_WASM_V8"], includes = [ "wee8/include", diff --git a/bazel/external/wee8.genrule_cmd b/bazel/external/wee8.genrule_cmd index cb688bcf45f9..d8cbd1981a64 100644 --- a/bazel/external/wee8.genrule_cmd +++ b/bazel/external/wee8.genrule_cmd @@ -19,7 +19,7 @@ pushd $$ROOT/wee8 rm -rf out/wee8 # Export compiler configuration. -export CXXFLAGS="$${CXXFLAGS-} -Wno-sign-compare -Wno-deprecated-copy -Wno-unknown-warning-option" +export CXXFLAGS="$${CXXFLAGS-} -Wno-sign-compare -Wno-deprecated-copy -Wno-unknown-warning-option -Wno-range-loop-analysis" if [[ ( `uname` == "Darwin" && $${CXX-} == "" ) || $${CXX-} == *"clang"* ]]; then export IS_CLANG=true export CC=$${CC:-clang} From 8f20f978a2e57662b0c78f9c9b9843a288d1342b Mon Sep 17 00:00:00 2001 From: Piotr Sikora Date: Thu, 15 Oct 2020 05:17:35 -0700 Subject: [PATCH 16/17] tls: update BoringSSL to 2192bbc8 (4240). (#13567) Signed-off-by: Piotr Sikora --- bazel/repository_locations.bzl | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/bazel/repository_locations.bzl b/bazel/repository_locations.bzl index fba8cfebd16f..da172933473c 100644 --- a/bazel/repository_locations.bzl +++ b/bazel/repository_locations.bzl @@ -60,18 +60,18 @@ REPOSITORY_LOCATIONS_SPEC = dict( project_name = "BoringSSL", project_desc = "Minimal OpenSSL fork", project_url = "https://github.com/google/boringssl", - version = "597b810379e126ae05d32c1d94b1a9464385acd0", - sha256 = "1ea42456c020daf0a9b0f9e8d8bc3a403c9314f4f54230c617257af996cd5fa6", + version = "2192bbc878822cf6ab5977d4257a1339453d9d39", + sha256 = "bb55b0ed2f0cb548b5dce6a6b8307ce37f7f748eb9f1be6bfe2d266ff2b4d52b", strip_prefix = "boringssl-{version}", # To update BoringSSL, which tracks Chromium releases: # 1. Open https://omahaproxy.appspot.com/ and note of linux/stable release. # 2. Open https://chromium.googlesource.com/chromium/src/+/refs/tags//DEPS and note . # 3. Find a commit in BoringSSL's "master-with-bazel" branch that merges . # - # chromium-85.0.4183.83 + # chromium-86.0.4240.80 urls = ["https://github.com/google/boringssl/archive/{version}.tar.gz"], use_category = ["controlplane", "dataplane_core"], - last_updated = "2020-06-23", + last_updated = "2020-07-30", cpe = "cpe:2.3:a:google:boringssl:*", ), boringssl_fips = dict( From 73d78f8d6a7d031ec7a0e06365e729e75f1b677d Mon Sep 17 00:00:00 2001 From: Lizan Zhou Date: Thu, 15 Oct 2020 08:15:53 -0700 Subject: [PATCH 17/17] ci: use multiple stage (#13557) Signed-off-by: Lizan Zhou --- .azure-pipelines/pipelines.yml | 688 +++++++++++++++++---------------- 1 file changed, 349 insertions(+), 339 deletions(-) diff --git a/.azure-pipelines/pipelines.yml b/.azure-pipelines/pipelines.yml index f8c5ce972174..151736a8f335 100644 --- a/.azure-pipelines/pipelines.yml +++ b/.azure-pipelines/pipelines.yml @@ -10,370 +10,380 @@ trigger: # PR build config is manually overridden in Azure pipelines UI with different secrets pr: none -jobs: - - job: format - dependsOn: [] # this removes the implicit dependency on previous stage and causes this to run in parallel. - pool: - vmImage: "ubuntu-18.04" - steps: - - task: Cache@2 - inputs: - key: "format | ./WORKSPACE | **/*.bzl" - path: $(Build.StagingDirectory)/repository_cache - continueOnError: true +stages: + - stage: precheck + jobs: + - job: format + dependsOn: [] # this removes the implicit dependency on previous stage and causes this to run in parallel. + pool: + vmImage: "ubuntu-18.04" + steps: + - task: Cache@2 + inputs: + key: "format | ./WORKSPACE | **/*.bzl" + path: $(Build.StagingDirectory)/repository_cache + continueOnError: true - - script: ci/run_envoy_docker.sh 'ci/check_and_fix_format.sh' - workingDirectory: $(Build.SourcesDirectory) - env: - ENVOY_DOCKER_BUILD_DIR: $(Build.StagingDirectory) - BAZEL_REMOTE_CACHE: grpcs://remotebuildexecution.googleapis.com - BAZEL_REMOTE_INSTANCE: projects/envoy-ci/instances/default_instance - GCP_SERVICE_ACCOUNT_KEY: $(GcpServiceAccountKey) - displayName: "Run check format scripts" + - script: ci/run_envoy_docker.sh 'ci/check_and_fix_format.sh' + workingDirectory: $(Build.SourcesDirectory) + env: + ENVOY_DOCKER_BUILD_DIR: $(Build.StagingDirectory) + BAZEL_REMOTE_CACHE: grpcs://remotebuildexecution.googleapis.com + BAZEL_REMOTE_INSTANCE: projects/envoy-ci/instances/default_instance + GCP_SERVICE_ACCOUNT_KEY: $(GcpServiceAccountKey) + displayName: "Run check format scripts" - - task: PublishBuildArtifacts@1 - inputs: - pathtoPublish: "$(Build.StagingDirectory)/fix_format.diff" - artifactName: format - condition: failed() + - task: PublishBuildArtifacts@1 + inputs: + pathtoPublish: "$(Build.StagingDirectory)/fix_format.diff" + artifactName: format + condition: failed() - - job: docs - dependsOn: [] # this removes the implicit dependency on previous stage and causes this to run in parallel. - pool: - vmImage: "ubuntu-18.04" - steps: - - task: Cache@2 - inputs: - key: "docs | ./WORKSPACE | **/*.bzl" - path: $(Build.StagingDirectory)/repository_cache - continueOnError: true + - job: docs + dependsOn: [] # this removes the implicit dependency on previous stage and causes this to run in parallel. + pool: + vmImage: "ubuntu-18.04" + steps: + - task: Cache@2 + inputs: + key: "docs | ./WORKSPACE | **/*.bzl" + path: $(Build.StagingDirectory)/repository_cache + continueOnError: true - - script: ci/run_envoy_docker.sh 'ci/do_ci.sh docs' - workingDirectory: $(Build.SourcesDirectory) - env: - ENVOY_DOCKER_BUILD_DIR: $(Build.StagingDirectory) - BAZEL_REMOTE_CACHE: grpcs://remotebuildexecution.googleapis.com - BAZEL_REMOTE_INSTANCE: projects/envoy-ci/instances/default_instance - GCP_SERVICE_ACCOUNT_KEY: $(GcpServiceAccountKey) - displayName: "Generate docs" + - script: ci/run_envoy_docker.sh 'ci/do_ci.sh docs' + workingDirectory: $(Build.SourcesDirectory) + env: + ENVOY_DOCKER_BUILD_DIR: $(Build.StagingDirectory) + BAZEL_REMOTE_CACHE: grpcs://remotebuildexecution.googleapis.com + BAZEL_REMOTE_INSTANCE: projects/envoy-ci/instances/default_instance + GCP_SERVICE_ACCOUNT_KEY: $(GcpServiceAccountKey) + displayName: "Generate docs" - - script: ci/run_envoy_docker.sh 'ci/upload_gcs_artifact.sh /source/generated/docs docs' - displayName: "Upload Docs to GCS" - env: - ENVOY_DOCKER_BUILD_DIR: $(Build.StagingDirectory) - GCP_SERVICE_ACCOUNT_KEY: $(GcpServiceAccountKey) - GCS_ARTIFACT_BUCKET: $(GcsArtifactBucket) + - script: ci/run_envoy_docker.sh 'ci/upload_gcs_artifact.sh /source/generated/docs docs' + displayName: "Upload Docs to GCS" + env: + ENVOY_DOCKER_BUILD_DIR: $(Build.StagingDirectory) + GCP_SERVICE_ACCOUNT_KEY: $(GcpServiceAccountKey) + GCS_ARTIFACT_BUCKET: $(GcsArtifactBucket) - - task: PublishBuildArtifacts@1 - inputs: - pathtoPublish: "$(Build.SourcesDirectory)/generated/docs" - artifactName: docs - condition: and(succeeded(), eq(variables['Build.Reason'], 'PullRequest')) + - task: PublishBuildArtifacts@1 + inputs: + pathtoPublish: "$(Build.SourcesDirectory)/generated/docs" + artifactName: docs + condition: and(succeeded(), eq(variables['Build.Reason'], 'PullRequest')) - - task: InstallSSHKey@0 - inputs: - hostName: "github.com ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAq2A7hRGmdnm9tUDbO9IDSwBK6TbQa+PXYPCPy6rbTrTtw7PHkccKrpp0yVhp5HdEIcKr6pLlVDBfOLX9QUsyCOV0wzfjIJNlGEYsdlLJizHhbn2mUjvSAHQqZETYP81eFzLQNnPHt4EVVUh7VfDESU84KezmD5QlWpXLmvU31/yMf+Se8xhHTvKSCZIFImWwoG6mbUoWf9nzpIoaSjB+weqqUUmpaaasXVal72J+UX2B+2RPW3RcT0eOzQgqlJL3RKrTJvdsjE3JEAvGq3lGHSZXy28G3skua2SmVi/w4yCE6gbODqnTWlg7+wC604ydGXA8VJiS5ap43JXiUFFAaQ==" - sshPublicKey: "$(DocsPublicKey)" - sshPassphrase: "$(SshDeployKeyPassphrase)" - sshKeySecureFile: "$(DocsPrivateKey)" - condition: and(succeeded(), ne(variables['Build.Reason'], 'PullRequest'), eq(variables['PostSubmit'], true)) + - task: InstallSSHKey@0 + inputs: + hostName: "github.com ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAq2A7hRGmdnm9tUDbO9IDSwBK6TbQa+PXYPCPy6rbTrTtw7PHkccKrpp0yVhp5HdEIcKr6pLlVDBfOLX9QUsyCOV0wzfjIJNlGEYsdlLJizHhbn2mUjvSAHQqZETYP81eFzLQNnPHt4EVVUh7VfDESU84KezmD5QlWpXLmvU31/yMf+Se8xhHTvKSCZIFImWwoG6mbUoWf9nzpIoaSjB+weqqUUmpaaasXVal72J+UX2B+2RPW3RcT0eOzQgqlJL3RKrTJvdsjE3JEAvGq3lGHSZXy28G3skua2SmVi/w4yCE6gbODqnTWlg7+wC604ydGXA8VJiS5ap43JXiUFFAaQ==" + sshPublicKey: "$(DocsPublicKey)" + sshPassphrase: "$(SshDeployKeyPassphrase)" + sshKeySecureFile: "$(DocsPrivateKey)" + condition: and(succeeded(), ne(variables['Build.Reason'], 'PullRequest'), eq(variables['PostSubmit'], true)) - - script: docs/publish.sh - displayName: "Publish to GitHub" - workingDirectory: $(Build.SourcesDirectory) - env: - AZP_BRANCH: $(Build.SourceBranch) - AZP_SHA1: $(Build.SourceVersion) - condition: and(succeeded(), ne(variables['Build.Reason'], 'PullRequest'), eq(variables['PostSubmit'], true)) + - script: docs/publish.sh + displayName: "Publish to GitHub" + workingDirectory: $(Build.SourcesDirectory) + env: + AZP_BRANCH: $(Build.SourceBranch) + AZP_SHA1: $(Build.SourceVersion) + condition: and(succeeded(), ne(variables['Build.Reason'], 'PullRequest'), eq(variables['PostSubmit'], true)) - - job: release - displayName: "Linux-x64 release" - dependsOn: ["format"] - # For master builds, continue even if format fails - condition: and(not(canceled()), or(succeeded(), ne(variables['Build.Reason'], 'PullRequest'))) - timeoutInMinutes: 360 - pool: - vmImage: "ubuntu-18.04" - steps: - - template: bazel.yml - parameters: - ciTarget: bazel.release - - - job: release_arm64 - displayName: "Linux-arm64 release" - dependsOn: ["format"] - # For master builds, continue even if format fails - condition: and(not(canceled()), or(succeeded(), ne(variables['Build.Reason'], 'PullRequest'))) - timeoutInMinutes: 360 - pool: "arm-large" - steps: - - template: bazel.yml - parameters: - managedAgent: false - ciTarget: bazel.release - rbe: false - artifactSuffix: ".arm64" - bazelBuildExtraOptions: "--sandbox_base=/tmp/sandbox_base" - - - job: filter_example - displayName: "filter-example sync" - dependsOn: [] - pool: - vmImage: "ubuntu-18.04" + - stage: sync condition: and(succeeded(), eq(variables['PostSubmit'], true)) - steps: - - task: InstallSSHKey@0 - inputs: - hostName: "github.com ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAq2A7hRGmdnm9tUDbO9IDSwBK6TbQa+PXYPCPy6rbTrTtw7PHkccKrpp0yVhp5HdEIcKr6pLlVDBfOLX9QUsyCOV0wzfjIJNlGEYsdlLJizHhbn2mUjvSAHQqZETYP81eFzLQNnPHt4EVVUh7VfDESU84KezmD5QlWpXLmvU31/yMf+Se8xhHTvKSCZIFImWwoG6mbUoWf9nzpIoaSjB+weqqUUmpaaasXVal72J+UX2B+2RPW3RcT0eOzQgqlJL3RKrTJvdsjE3JEAvGq3lGHSZXy28G3skua2SmVi/w4yCE6gbODqnTWlg7+wC604ydGXA8VJiS5ap43JXiUFFAaQ==" - sshPublicKey: "$(FilterExamplePublicKey)" - sshPassphrase: "$(SshDeployKeyPassphrase)" - sshKeySecureFile: "$(FilterExamplePrivateKey)" + dependsOn: [] + jobs: + - job: filter_example + dependsOn: [] + pool: + vmImage: "ubuntu-18.04" + steps: + - task: InstallSSHKey@0 + inputs: + hostName: "github.com ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAq2A7hRGmdnm9tUDbO9IDSwBK6TbQa+PXYPCPy6rbTrTtw7PHkccKrpp0yVhp5HdEIcKr6pLlVDBfOLX9QUsyCOV0wzfjIJNlGEYsdlLJizHhbn2mUjvSAHQqZETYP81eFzLQNnPHt4EVVUh7VfDESU84KezmD5QlWpXLmvU31/yMf+Se8xhHTvKSCZIFImWwoG6mbUoWf9nzpIoaSjB+weqqUUmpaaasXVal72J+UX2B+2RPW3RcT0eOzQgqlJL3RKrTJvdsjE3JEAvGq3lGHSZXy28G3skua2SmVi/w4yCE6gbODqnTWlg7+wC604ydGXA8VJiS5ap43JXiUFFAaQ==" + sshPublicKey: "$(FilterExamplePublicKey)" + sshPassphrase: "$(SshDeployKeyPassphrase)" + sshKeySecureFile: "$(FilterExamplePrivateKey)" - - bash: ci/filter_example_mirror.sh - displayName: "Sync envoy-filter-example" - workingDirectory: $(Build.SourcesDirectory) - env: - AZP_BRANCH: $(Build.SourceBranch) + - bash: ci/filter_example_mirror.sh + displayName: "Sync envoy-filter-example" + workingDirectory: $(Build.SourcesDirectory) + env: + AZP_BRANCH: $(Build.SourceBranch) - - job: api - displayName: "data-plane-api sync" - dependsOn: [] - condition: and(succeeded(), eq(variables['PostSubmit'], true)) - pool: - vmImage: "ubuntu-18.04" - steps: - - task: InstallSSHKey@0 - inputs: - hostName: "github.com ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAq2A7hRGmdnm9tUDbO9IDSwBK6TbQa+PXYPCPy6rbTrTtw7PHkccKrpp0yVhp5HdEIcKr6pLlVDBfOLX9QUsyCOV0wzfjIJNlGEYsdlLJizHhbn2mUjvSAHQqZETYP81eFzLQNnPHt4EVVUh7VfDESU84KezmD5QlWpXLmvU31/yMf+Se8xhHTvKSCZIFImWwoG6mbUoWf9nzpIoaSjB+weqqUUmpaaasXVal72J+UX2B+2RPW3RcT0eOzQgqlJL3RKrTJvdsjE3JEAvGq3lGHSZXy28G3skua2SmVi/w4yCE6gbODqnTWlg7+wC604ydGXA8VJiS5ap43JXiUFFAaQ==" - sshPublicKey: "$(DataPlaneApiPublicKey)" - sshPassphrase: "$(SshDeployKeyPassphrase)" - sshKeySecureFile: "$(DataPlaneApiPrivateKey)" + - job: data_plane_api + dependsOn: [] + pool: + vmImage: "ubuntu-18.04" + steps: + - task: InstallSSHKey@0 + inputs: + hostName: "github.com ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAq2A7hRGmdnm9tUDbO9IDSwBK6TbQa+PXYPCPy6rbTrTtw7PHkccKrpp0yVhp5HdEIcKr6pLlVDBfOLX9QUsyCOV0wzfjIJNlGEYsdlLJizHhbn2mUjvSAHQqZETYP81eFzLQNnPHt4EVVUh7VfDESU84KezmD5QlWpXLmvU31/yMf+Se8xhHTvKSCZIFImWwoG6mbUoWf9nzpIoaSjB+weqqUUmpaaasXVal72J+UX2B+2RPW3RcT0eOzQgqlJL3RKrTJvdsjE3JEAvGq3lGHSZXy28G3skua2SmVi/w4yCE6gbODqnTWlg7+wC604ydGXA8VJiS5ap43JXiUFFAaQ==" + sshPublicKey: "$(DataPlaneApiPublicKey)" + sshPassphrase: "$(SshDeployKeyPassphrase)" + sshKeySecureFile: "$(DataPlaneApiPrivateKey)" - - bash: ci/api_mirror.sh - displayName: "Sync data-plane-api" - workingDirectory: $(Build.SourcesDirectory) - env: - AZP_BRANCH: $(Build.SourceBranch) + - bash: ci/api_mirror.sh + displayName: "Sync data-plane-api" + workingDirectory: $(Build.SourcesDirectory) + env: + AZP_BRANCH: $(Build.SourceBranch) - - job: go_control_plane - displayName: "go-control-plane sync" - dependsOn: [] - condition: and(succeeded(), eq(variables['PostSubmit'], true)) - steps: - - task: InstallSSHKey@0 - inputs: - hostName: "github.com ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAq2A7hRGmdnm9tUDbO9IDSwBK6TbQa+PXYPCPy6rbTrTtw7PHkccKrpp0yVhp5HdEIcKr6pLlVDBfOLX9QUsyCOV0wzfjIJNlGEYsdlLJizHhbn2mUjvSAHQqZETYP81eFzLQNnPHt4EVVUh7VfDESU84KezmD5QlWpXLmvU31/yMf+Se8xhHTvKSCZIFImWwoG6mbUoWf9nzpIoaSjB+weqqUUmpaaasXVal72J+UX2B+2RPW3RcT0eOzQgqlJL3RKrTJvdsjE3JEAvGq3lGHSZXy28G3skua2SmVi/w4yCE6gbODqnTWlg7+wC604ydGXA8VJiS5ap43JXiUFFAaQ==" - sshPublicKey: "$(GoControlPlanePublicKey)" - sshPassphrase: "$(SshDeployKeyPassphrase)" - sshKeySecureFile: "$(GoControlPlanePrivateKey)" + - job: go_control_plane + dependsOn: [] + steps: + - task: InstallSSHKey@0 + inputs: + hostName: "github.com ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAq2A7hRGmdnm9tUDbO9IDSwBK6TbQa+PXYPCPy6rbTrTtw7PHkccKrpp0yVhp5HdEIcKr6pLlVDBfOLX9QUsyCOV0wzfjIJNlGEYsdlLJizHhbn2mUjvSAHQqZETYP81eFzLQNnPHt4EVVUh7VfDESU84KezmD5QlWpXLmvU31/yMf+Se8xhHTvKSCZIFImWwoG6mbUoWf9nzpIoaSjB+weqqUUmpaaasXVal72J+UX2B+2RPW3RcT0eOzQgqlJL3RKrTJvdsjE3JEAvGq3lGHSZXy28G3skua2SmVi/w4yCE6gbODqnTWlg7+wC604ydGXA8VJiS5ap43JXiUFFAaQ==" + sshPublicKey: "$(GoControlPlanePublicKey)" + sshPassphrase: "$(SshDeployKeyPassphrase)" + sshKeySecureFile: "$(GoControlPlanePrivateKey)" - - bash: | - cp -a ~/.ssh $(Build.StagingDirectory)/ - ci/run_envoy_docker.sh 'ci/go_mirror.sh' - displayName: "Sync go-control-plane" - workingDirectory: $(Build.SourcesDirectory) - env: - ENVOY_DOCKER_BUILD_DIR: $(Build.StagingDirectory) - BAZEL_REMOTE_CACHE: grpcs://remotebuildexecution.googleapis.com - BAZEL_REMOTE_INSTANCE: projects/envoy-ci/instances/default_instance - GCP_SERVICE_ACCOUNT_KEY: $(GcpServiceAccountKey) - AZP_BRANCH: $(Build.SourceBranch) + - bash: | + cp -a ~/.ssh $(Build.StagingDirectory)/ + ci/run_envoy_docker.sh 'ci/go_mirror.sh' + displayName: "Sync go-control-plane" + workingDirectory: $(Build.SourcesDirectory) + env: + ENVOY_DOCKER_BUILD_DIR: $(Build.StagingDirectory) + BAZEL_REMOTE_CACHE: grpcs://remotebuildexecution.googleapis.com + BAZEL_REMOTE_INSTANCE: projects/envoy-ci/instances/default_instance + GCP_SERVICE_ACCOUNT_KEY: $(GcpServiceAccountKey) + AZP_BRANCH: $(Build.SourceBranch) - - job: bazel - displayName: "Linux-x64" - dependsOn: ["release"] - # For master builds, continue even if format fails + - stage: linux_x64 + dependsOn: ["precheck"] + # For master builds, continue even if precheck fails condition: and(not(canceled()), or(succeeded(), ne(variables['Build.Reason'], 'PullRequest'))) - strategy: - maxParallel: 3 - matrix: - api: - CI_TARGET: "bazel.api" - gcc: - CI_TARGET: "bazel.gcc" - clang_tidy: - CI_TARGET: "bazel.clang_tidy" - asan: - CI_TARGET: "bazel.asan" - tsan: - CI_TARGET: "bazel.tsan" - compile_time_options: - CI_TARGET: "bazel.compile_time_options" - timeoutInMinutes: 360 - pool: - vmImage: "ubuntu-18.04" - steps: - - template: bazel.yml - parameters: - ciTarget: $(CI_TARGET) + jobs: + - job: release + # For master builds, continue even if format fails + timeoutInMinutes: 360 + pool: + vmImage: "ubuntu-18.04" + steps: + - template: bazel.yml + parameters: + ciTarget: bazel.release + + - stage: linux_arm64 + dependsOn: ["precheck"] + # For master builds, continue even if precheck fails + condition: and(not(canceled()), or(succeeded(), ne(variables['Build.Reason'], 'PullRequest'))) + jobs: + - job: release + timeoutInMinutes: 360 + pool: "arm-large" + steps: + - template: bazel.yml + parameters: + managedAgent: false + ciTarget: bazel.release + rbe: false + artifactSuffix: ".arm64" + bazelBuildExtraOptions: "--sandbox_base=/tmp/sandbox_base" + + - stage: check + dependsOn: ["linux_x64"] + jobs: + - job: bazel + displayName: "linux_x64" + dependsOn: [] + strategy: + maxParallel: 3 + matrix: + api: + CI_TARGET: "bazel.api" + gcc: + CI_TARGET: "bazel.gcc" + clang_tidy: + CI_TARGET: "bazel.clang_tidy" + asan: + CI_TARGET: "bazel.asan" + tsan: + CI_TARGET: "bazel.tsan" + compile_time_options: + CI_TARGET: "bazel.compile_time_options" + timeoutInMinutes: 360 + pool: + vmImage: "ubuntu-18.04" + steps: + - template: bazel.yml + parameters: + ciTarget: $(CI_TARGET) - - job: coverage - displayName: "Linux-x64" - dependsOn: ["release"] - timeoutInMinutes: 360 - pool: "x64-large" - strategy: - maxParallel: 2 - matrix: - coverage: - CI_TARGET: "coverage" - fuzz_coverage: - CI_TARGET: "fuzz_coverage" - steps: - - template: bazel.yml - parameters: - managedAgent: false - ciTarget: bazel.$(CI_TARGET) - rbe: false - # /tmp/sandbox_base is a tmpfs in CI environment to optimize large I/O for coverage traces - bazelBuildExtraOptions: "--define=no_debug_info=1 --linkopt=-Wl,-s --test_env=ENVOY_IP_TEST_VERSIONS=v4only --sandbox_base=/tmp/sandbox_base" + - job: coverage + displayName: "linux_x64" + dependsOn: [] + timeoutInMinutes: 360 + pool: "x64-large" + strategy: + maxParallel: 2 + matrix: + coverage: + CI_TARGET: "coverage" + fuzz_coverage: + CI_TARGET: "fuzz_coverage" + steps: + - template: bazel.yml + parameters: + managedAgent: false + ciTarget: bazel.$(CI_TARGET) + rbe: false + # /tmp/sandbox_base is a tmpfs in CI environment to optimize large I/O for coverage traces + bazelBuildExtraOptions: "--define=no_debug_info=1 --linkopt=-Wl,-s --test_env=ENVOY_IP_TEST_VERSIONS=v4only --sandbox_base=/tmp/sandbox_base" - - script: ci/run_envoy_docker.sh 'ci/upload_gcs_artifact.sh /source/generated/$(CI_TARGET) $(CI_TARGET)' - displayName: "Upload $(CI_TARGET) Report to GCS" - env: - ENVOY_DOCKER_BUILD_DIR: $(Build.StagingDirectory) - GCP_SERVICE_ACCOUNT_KEY: $(GcpServiceAccountKey) - GCS_ARTIFACT_BUCKET: $(GcsArtifactBucket) - condition: always() + - script: ci/run_envoy_docker.sh 'ci/upload_gcs_artifact.sh /source/generated/$(CI_TARGET) $(CI_TARGET)' + displayName: "Upload $(CI_TARGET) Report to GCS" + env: + ENVOY_DOCKER_BUILD_DIR: $(Build.StagingDirectory) + GCP_SERVICE_ACCOUNT_KEY: $(GcpServiceAccountKey) + GCS_ARTIFACT_BUCKET: $(GcsArtifactBucket) + condition: always() - - job: docker - displayName: "Linux multi-arch docker" - dependsOn: ["release", "release_arm64"] - pool: - vmImage: "ubuntu-18.04" - steps: - - bash: .azure-pipelines/cleanup.sh - displayName: "Removing tools from agent" - - task: DownloadBuildArtifacts@0 - inputs: - buildType: current - artifactName: "bazel.release" - itemPattern: "bazel.release/envoy_binary.tar.gz" - downloadType: single - targetPath: $(Build.StagingDirectory) - - task: DownloadBuildArtifacts@0 - inputs: - buildType: current - artifactName: "bazel.release.arm64" - itemPattern: "bazel.release.arm64/envoy_binary.tar.gz" - downloadType: single - targetPath: $(Build.StagingDirectory) - - bash: | - set -e - mkdir -p linux/amd64 && tar zxf $(Build.StagingDirectory)/bazel.release/envoy_binary.tar.gz -C ./linux/amd64 - mkdir -p linux/arm64 && tar zxf $(Build.StagingDirectory)/bazel.release.arm64/envoy_binary.tar.gz -C ./linux/arm64 - ci/docker_ci.sh - workingDirectory: $(Build.SourcesDirectory) - env: - AZP_BRANCH: $(Build.SourceBranch) - AZP_SHA1: $(Build.SourceVersion) - DOCKERHUB_USERNAME: $(DockerUsername) - DOCKERHUB_PASSWORD: $(DockerPassword) - - task: PublishBuildArtifacts@1 - inputs: - pathtoPublish: "$(Build.StagingDirectory)/build_images" - artifactName: docker - condition: always() + - stage: docker + dependsOn: ["linux_x64", "linux_arm64"] + jobs: + - job: docker + displayName: "linux multiarch" + pool: + vmImage: "ubuntu-18.04" + steps: + - bash: .azure-pipelines/cleanup.sh + displayName: "Removing tools from agent" + - task: DownloadBuildArtifacts@0 + inputs: + buildType: current + artifactName: "bazel.release" + itemPattern: "bazel.release/envoy_binary.tar.gz" + downloadType: single + targetPath: $(Build.StagingDirectory) + - task: DownloadBuildArtifacts@0 + inputs: + buildType: current + artifactName: "bazel.release.arm64" + itemPattern: "bazel.release.arm64/envoy_binary.tar.gz" + downloadType: single + targetPath: $(Build.StagingDirectory) + - bash: | + set -e + mkdir -p linux/amd64 && tar zxf $(Build.StagingDirectory)/bazel.release/envoy_binary.tar.gz -C ./linux/amd64 + mkdir -p linux/arm64 && tar zxf $(Build.StagingDirectory)/bazel.release.arm64/envoy_binary.tar.gz -C ./linux/arm64 + ci/docker_ci.sh + workingDirectory: $(Build.SourcesDirectory) + env: + AZP_BRANCH: $(Build.SourceBranch) + AZP_SHA1: $(Build.SourceVersion) + DOCKERHUB_USERNAME: $(DockerUsername) + DOCKERHUB_PASSWORD: $(DockerPassword) + - task: PublishBuildArtifacts@1 + inputs: + pathtoPublish: "$(Build.StagingDirectory)/build_images" + artifactName: docker + condition: always() - - job: examples + - stage: verify dependsOn: ["docker"] - displayName: "Verify examples run as documented" - pool: - vmImage: "ubuntu-18.04" - steps: - - task: DownloadBuildArtifacts@0 - inputs: - buildType: current - artifactName: "docker" - itemPattern: "docker/envoy-docker-images.tar.xz" - downloadType: single - targetPath: $(Build.StagingDirectory) - - bash: ./ci/do_ci.sh verify_examples - env: - ENVOY_DOCKER_BUILD_DIR: $(Build.StagingDirectory) - NO_BUILD_SETUP: 1 + jobs: + - job: examples + pool: + vmImage: "ubuntu-18.04" + steps: + - task: DownloadBuildArtifacts@0 + inputs: + buildType: current + artifactName: "docker" + itemPattern: "docker/envoy-docker-images.tar.xz" + downloadType: single + targetPath: $(Build.StagingDirectory) + - bash: ./ci/do_ci.sh verify_examples + env: + ENVOY_DOCKER_BUILD_DIR: $(Build.StagingDirectory) + NO_BUILD_SETUP: 1 - - job: macOS - dependsOn: ["format"] - timeoutInMinutes: 360 - pool: - vmImage: "macos-latest" - steps: - - script: ./ci/mac_ci_setup.sh - displayName: "Install dependencies" + - stage: macos + dependsOn: ["precheck"] + jobs: + - job: test + timeoutInMinutes: 360 + pool: + vmImage: "macos-latest" + steps: + - script: ./ci/mac_ci_setup.sh + displayName: "Install dependencies" - - script: ./ci/mac_ci_steps.sh - displayName: "Run Mac CI" - env: - BAZEL_BUILD_EXTRA_OPTIONS: "--remote_download_toplevel --flaky_test_attempts=2" - BAZEL_REMOTE_CACHE: grpcs://remotebuildexecution.googleapis.com - BAZEL_REMOTE_INSTANCE: projects/envoy-ci/instances/default_instance - GCP_SERVICE_ACCOUNT_KEY: $(GcpServiceAccountKey) + - script: ./ci/mac_ci_steps.sh + displayName: "Run Mac CI" + env: + BAZEL_BUILD_EXTRA_OPTIONS: "--remote_download_toplevel --flaky_test_attempts=2" + BAZEL_REMOTE_CACHE: grpcs://remotebuildexecution.googleapis.com + BAZEL_REMOTE_INSTANCE: projects/envoy-ci/instances/default_instance + GCP_SERVICE_ACCOUNT_KEY: $(GcpServiceAccountKey) - - task: PublishTestResults@2 - inputs: - testResultsFiles: "**/bazel-testlogs/**/test.xml" - testRunTitle: "macOS" - condition: always() + - task: PublishTestResults@2 + inputs: + testResultsFiles: "**/bazel-testlogs/**/test.xml" + testRunTitle: "macOS" + condition: always() - - script: ./ci/flaky_test/run_process_xml.sh - displayName: "Process Test Results" - env: - TEST_TMPDIR: $(Build.SourcesDirectory) - SLACK_TOKEN: $(SLACK_TOKEN) - CI_TARGET: "MacOS" - REPO_URI: $(Build.Repository.Uri) - BUILD_URI: $(Build.BuildUri) + - script: ./ci/flaky_test/run_process_xml.sh + displayName: "Process Test Results" + env: + TEST_TMPDIR: $(Build.SourcesDirectory) + SLACK_TOKEN: $(SLACK_TOKEN) + CI_TARGET: "MacOS" + REPO_URI: $(Build.Repository.Uri) + BUILD_URI: $(Build.BuildUri) - - job: Windows - dependsOn: ["format"] - timeoutInMinutes: 360 - pool: - vmImage: "windows-latest" - steps: - - bash: ci/run_envoy_docker.sh ci/windows_ci_steps.sh - displayName: "Run Windows CI" - env: - ENVOY_DOCKER_BUILD_DIR: "$(Build.StagingDirectory)" - ENVOY_RBE: "true" - BAZEL_BUILD_EXTRA_OPTIONS: "--config=remote-ci --config=remote-msvc-cl --jobs=$(RbeJobs)" - BAZEL_REMOTE_CACHE: grpcs://remotebuildexecution.googleapis.com - BAZEL_REMOTE_INSTANCE: projects/envoy-ci/instances/default_instance - GCP_SERVICE_ACCOUNT_KEY: $(GcpServiceAccountKey) - - task: PublishBuildArtifacts@1 - inputs: - pathtoPublish: "$(Build.StagingDirectory)/envoy" - artifactName: windows.release - condition: always() + - stage: windows + dependsOn: ["precheck"] + jobs: + - job: release + timeoutInMinutes: 360 + pool: + vmImage: "windows-latest" + steps: + - bash: ci/run_envoy_docker.sh ci/windows_ci_steps.sh + displayName: "Run Windows CI" + env: + ENVOY_DOCKER_BUILD_DIR: "$(Build.StagingDirectory)" + ENVOY_RBE: "true" + BAZEL_BUILD_EXTRA_OPTIONS: "--config=remote-ci --config=remote-msvc-cl --jobs=$(RbeJobs)" + BAZEL_REMOTE_CACHE: grpcs://remotebuildexecution.googleapis.com + BAZEL_REMOTE_INSTANCE: projects/envoy-ci/instances/default_instance + GCP_SERVICE_ACCOUNT_KEY: $(GcpServiceAccountKey) + - task: PublishBuildArtifacts@1 + inputs: + pathtoPublish: "$(Build.StagingDirectory)/envoy" + artifactName: windows.release + condition: always() - - job: docker_windows - displayName: "Windows docker" - dependsOn: ["Windows"] - timeoutInMinutes: 360 - pool: - vmImage: "windows-latest" - steps: - - task: DownloadBuildArtifacts@0 - inputs: - buildType: current - artifactName: "windows.release" - itemPattern: "windows.release/envoy_binary.tar.gz" - downloadType: single - targetPath: $(Build.StagingDirectory) - - bash: | - set -e - # Convert to Unix-style path so tar doesn't think drive letter is a hostname - STAGING_DIR="/$(echo '$(Build.StagingDirectory)' | tr -d ':' | tr '\\' '/')" - mkdir -p windows/amd64 && tar zxf "${STAGING_DIR}/windows.release/envoy_binary.tar.gz" -C ./windows/amd64 - ci/docker_ci.sh - workingDirectory: $(Build.SourcesDirectory) - env: - AZP_BRANCH: $(Build.SourceBranch) - AZP_SHA1: $(Build.SourceVersion) - DOCKERHUB_USERNAME: $(DockerUsername) - DOCKERHUB_PASSWORD: $(DockerPassword) - - task: PublishBuildArtifacts@1 - inputs: - pathtoPublish: "$(Build.StagingDirectory)/build_images" - artifactName: docker_windows - condition: always() + - job: docker + dependsOn: ["release"] + timeoutInMinutes: 360 + pool: + vmImage: "windows-latest" + steps: + - task: DownloadBuildArtifacts@0 + inputs: + buildType: current + artifactName: "windows.release" + itemPattern: "windows.release/envoy_binary.tar.gz" + downloadType: single + targetPath: $(Build.StagingDirectory) + - bash: | + set -e + # Convert to Unix-style path so tar doesn't think drive letter is a hostname + STAGING_DIR="/$(echo '$(Build.StagingDirectory)' | tr -d ':' | tr '\\' '/')" + mkdir -p windows/amd64 && tar zxf "${STAGING_DIR}/windows.release/envoy_binary.tar.gz" -C ./windows/amd64 + ci/docker_ci.sh + workingDirectory: $(Build.SourcesDirectory) + env: + AZP_BRANCH: $(Build.SourceBranch) + AZP_SHA1: $(Build.SourceVersion) + DOCKERHUB_USERNAME: $(DockerUsername) + DOCKERHUB_PASSWORD: $(DockerPassword) + - task: PublishBuildArtifacts@1 + inputs: + pathtoPublish: "$(Build.StagingDirectory)/build_images" + artifactName: docker_windows + condition: always()